signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def read_int8 ( self , little_endian = True ) :
"""Read 1 byte as a signed integer value from the stream .
Args :
little _ endian ( bool ) : specify the endianness . ( Default ) Little endian .
Returns :
int :""" | if little_endian :
endian = "<"
else :
endian = ">"
return self . unpack ( '%sb' % endian ) |
def setIndexes ( self , indexes ) :
"""Sets the list of indexed lookups for this schema to the inputted list .
: param indexes | [ < orb . Index > , . . ]""" | self . __indexes = { }
for name , index in indexes . items ( ) :
self . __indexes [ name ] = index
index . setSchema ( self ) |
def internal_minimum_spanning_tree ( mr_distances ) :
"""Compute the ' internal ' minimum spanning tree given a matrix of mutual
reachability distances . Given a minimum spanning tree the ' internal '
graph is the subgraph induced by vertices of degree greater than one .
Parameters
mr _ distances : array ( cluster _ size , cluster _ size )
The pairwise mutual reachability distances , inferred to be the edge
weights of a complete graph . Since MSTs are computed per cluster
this is the all - points - mutual - reacability for points within a single
cluster .
Returns
internal _ nodes : array
An array listing the indices of the internal nodes of the MST
internal _ edges : array ( ? , 3)
An array of internal edges in weighted edge list format ; that is
an edge is an array of length three listing the two vertices
forming the edge and weight of the edge .
References
Moulavi , D . , Jaskowiak , P . A . , Campello , R . J . , Zimek , A . and Sander , J . ,
2014 . Density - Based Clustering Validation . In SDM ( pp . 839-847 ) .""" | single_linkage_data = mst_linkage_core ( mr_distances )
min_span_tree = single_linkage_data . copy ( )
for index , row in enumerate ( min_span_tree [ 1 : ] , 1 ) :
candidates = np . where ( isclose ( mr_distances [ int ( row [ 1 ] ) ] , row [ 2 ] ) ) [ 0 ]
candidates = np . intersect1d ( candidates , single_linkage_data [ : index , : 2 ] . astype ( int ) )
candidates = candidates [ candidates != row [ 1 ] ]
assert len ( candidates ) > 0
row [ 0 ] = candidates [ 0 ]
vertices = np . arange ( mr_distances . shape [ 0 ] ) [ np . bincount ( min_span_tree . T [ : 2 ] . flatten ( ) . astype ( np . intp ) ) > 1 ]
# A little " fancy " we select from the flattened array reshape back
# ( Fortran format to get indexing right ) and take the product to do an and
# then convert back to boolean type .
edge_selection = np . prod ( np . in1d ( min_span_tree . T [ : 2 ] , vertices ) . reshape ( ( min_span_tree . shape [ 0 ] , 2 ) , order = 'F' ) , axis = 1 ) . astype ( bool )
# Density sparseness is not well defined if there are no
# internal edges ( as per the referenced paper ) . However
# MATLAB code from the original authors simply selects the
# largest of * all * the edges in the case that there are
# no internal edges , so we do the same here
if np . any ( edge_selection ) : # If there are any internal edges , then subselect them out
edges = min_span_tree [ edge_selection ]
else : # If there are no internal edges then we want to take the
# max over all the edges that exist in the MST , so we simply
# do nothing and return all the edges in the MST .
edges = min_span_tree . copy ( )
return vertices , edges |
def profile_setting_default_args ( ij ) :
"""Build the default args for this profile .
Args :
ij ( dict ) : The install . json contents .
Returns :
dict : The default args for a Job or Playbook App .""" | # build default args
profile_default_args = OrderedDict ( )
profile_default_args [ 'api_default_org' ] = '$env.API_DEFAULT_ORG'
profile_default_args [ 'api_access_id' ] = '$env.API_ACCESS_ID'
profile_default_args [ 'api_secret_key' ] = '$envs.API_SECRET_KEY'
profile_default_args [ 'tc_api_path' ] = '$env.TC_API_PATH'
profile_default_args [ 'tc_docker' ] = False
profile_default_args [ 'tc_in_path' ] = 'log'
profile_default_args [ 'tc_log_level' ] = 'debug'
profile_default_args [ 'tc_log_path' ] = 'log'
profile_default_args [ 'tc_log_to_api' ] = False
profile_default_args [ 'tc_out_path' ] = 'log'
profile_default_args [ 'tc_proxy_external' ] = False
profile_default_args [ 'tc_proxy_host' ] = '$env.TC_PROXY_HOST'
profile_default_args [ 'tc_proxy_port' ] = '$env.TC_PROXY_PORT'
profile_default_args [ 'tc_proxy_password' ] = '$envs.TC_PROXY_PASSWORD'
profile_default_args [ 'tc_proxy_tc' ] = False
profile_default_args [ 'tc_proxy_username' ] = '$env.TC_PROXY_USERNAME'
profile_default_args [ 'tc_temp_path' ] = 'log'
if ij . get ( 'runtimeLevel' ) == 'Playbook' :
profile_default_args [ 'tc_playbook_db_type' ] = 'Redis'
profile_default_args [ 'tc_playbook_db_context' ] = str ( uuid4 ( ) )
profile_default_args [ 'tc_playbook_db_path' ] = '$env.DB_PATH'
profile_default_args [ 'tc_playbook_db_port' ] = '$env.DB_PORT'
profile_default_args [ 'tc_playbook_out_variables' ] = ''
return profile_default_args |
def _disc_kn ( clearness_index , airmass , max_airmass = 12 ) :
"""Calculate Kn for ` disc `
Parameters
clearness _ index : numeric
airmass : numeric
max _ airmass : float
airmass > max _ airmass is set to max _ airmass before being used
in calculating Kn .
Returns
Kn : numeric
am : numeric
airmass used in the calculation of Kn . am < = max _ airmass .""" | # short names for equations
kt = clearness_index
am = airmass
am = np . minimum ( am , max_airmass )
# GH 450
# powers of kt will be used repeatedly , so compute only once
kt2 = kt * kt
# about the same as kt * * 2
kt3 = kt2 * kt
# 5-10x faster than kt * * 3
bools = ( kt <= 0.6 )
a = np . where ( bools , 0.512 - 1.56 * kt + 2.286 * kt2 - 2.222 * kt3 , - 5.743 + 21.77 * kt - 27.49 * kt2 + 11.56 * kt3 )
b = np . where ( bools , 0.37 + 0.962 * kt , 41.4 - 118.5 * kt + 66.05 * kt2 + 31.9 * kt3 )
c = np . where ( bools , - 0.28 + 0.932 * kt - 2.048 * kt2 , - 47.01 + 184.2 * kt - 222.0 * kt2 + 73.81 * kt3 )
delta_kn = a + b * np . exp ( c * am )
Knc = 0.866 - 0.122 * am + 0.0121 * am ** 2 - 0.000653 * am ** 3 + 1.4e-05 * am ** 4
Kn = Knc - delta_kn
return Kn , am |
def addReadGroupSet ( self ) :
"""Adds a new ReadGroupSet into this repo .""" | self . _openRepo ( )
dataset = self . _repo . getDatasetByName ( self . _args . datasetName )
dataUrl = self . _args . dataFile
indexFile = self . _args . indexFile
parsed = urlparse . urlparse ( dataUrl )
# TODO , add https support and others when they have been
# tested .
if parsed . scheme in [ 'http' , 'ftp' ] :
if indexFile is None :
raise exceptions . MissingIndexException ( dataUrl )
else :
if indexFile is None :
indexFile = dataUrl + ".bai"
dataUrl = self . _getFilePath ( self . _args . dataFile , self . _args . relativePath )
indexFile = self . _getFilePath ( indexFile , self . _args . relativePath )
name = self . _args . name
if self . _args . name is None :
name = getNameFromPath ( dataUrl )
readGroupSet = reads . HtslibReadGroupSet ( dataset , name )
readGroupSet . populateFromFile ( dataUrl , indexFile )
referenceSetName = self . _args . referenceSetName
if referenceSetName is None : # Try to find a reference set name from the BAM header .
referenceSetName = readGroupSet . getBamHeaderReferenceSetName ( )
referenceSet = self . _repo . getReferenceSetByName ( referenceSetName )
readGroupSet . setReferenceSet ( referenceSet )
readGroupSet . setAttributes ( json . loads ( self . _args . attributes ) )
self . _updateRepo ( self . _repo . insertReadGroupSet , readGroupSet ) |
def _set_url ( self , url ) :
"""Set a new URL for the data server . If we ' re unable to contact
the given url , then the original url is kept .""" | original_url = self . _url
try :
self . _update_index ( url )
except :
self . _url = original_url
raise |
def Parse ( self , stat , file_object , knowledge_base ) :
"""Parse a ntp config into rdf .""" | _ , _ = stat , knowledge_base
# TODO ( hanuszczak ) : This parser only allows single use because it messes
# with its state . This should be fixed .
field_parser = NtpdFieldParser ( )
for line in field_parser . ParseEntries ( utils . ReadFileBytesAsUnicode ( file_object ) ) :
field_parser . ParseLine ( line )
yield rdf_config_file . NtpConfig ( config = field_parser . config , server = field_parser . keyed . get ( "server" ) , restrict = field_parser . keyed . get ( "restrict" ) , fudge = field_parser . keyed . get ( "fudge" ) , trap = field_parser . keyed . get ( "trap" ) , peer = field_parser . keyed . get ( "peer" ) , broadcast = field_parser . keyed . get ( "broadcast" ) , manycastclient = field_parser . keyed . get ( "manycastclient" ) ) |
def region_interface_areas ( regions , areas , voxel_size = 1 , strel = None ) :
r"""Calculates the interfacial area between all pairs of adjecent regions
Parameters
regions : ND - array
An image of the pore space partitioned into individual pore regions .
Note that zeros in the image will not be considered for area
calculation .
areas : array _ like
A list containing the areas of each regions , as determined by
` ` region _ surface _ area ` ` . Note that the region number and list index
are offset by 1 , such that the area for region 1 is stored in
` ` areas [ 0 ] ` ` .
voxel _ size : scalar
The resolution of the image , expressed as the length of one side of a
voxel , so the volume of a voxel would be * * voxel _ size * * - cubed . The
default is 1.
strel : array _ like
The structuring element used to blur the region . If not provided ,
then a spherical element ( or disk ) with radius 1 is used . See the
docstring for ` ` mesh _ region ` ` for more details , as this argument is
passed to there .
Returns
result : named _ tuple
A named - tuple containing 2 arrays . ` ` conns ` ` holds the connectivity
information and ` ` area ` ` holds the result for each pair . ` ` conns ` ` is
a N - regions by 2 array with each row containing the region number of an
adjacent pair of regions . For instance , if ` ` conns [ 0 , 0 ] ` ` is 0 and
` ` conns [ 0 , 1 ] ` ` is 5 , then row 0 of ` ` area ` ` contains the interfacial
area shared by regions 0 and 5.""" | print ( '_' * 60 )
print ( 'Finding interfacial areas between each region' )
from skimage . morphology import disk , square , ball , cube
im = regions . copy ( )
if im . ndim != im . squeeze ( ) . ndim :
warnings . warn ( 'Input image conains a singleton axis:' + str ( im . shape ) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.' )
if im . ndim == 2 :
cube = square
ball = disk
# Get ' slices ' into im for each region
slices = spim . find_objects ( im )
# Initialize arrays
Ps = sp . arange ( 1 , sp . amax ( im ) + 1 )
sa = sp . zeros_like ( Ps , dtype = float )
sa_combined = [ ]
# Difficult to preallocate since number of conns unknown
cn = [ ]
# Start extracting area from im
for i in tqdm ( Ps ) :
reg = i - 1
if slices [ reg ] is not None :
s = extend_slice ( slices [ reg ] , im . shape )
sub_im = im [ s ]
mask_im = sub_im == i
sa [ reg ] = areas [ reg ]
im_w_throats = spim . binary_dilation ( input = mask_im , structure = ball ( 1 ) )
im_w_throats = im_w_throats * sub_im
Pn = sp . unique ( im_w_throats ) [ 1 : ] - 1
for j in Pn :
if j > reg :
cn . append ( [ reg , j ] )
merged_region = im [ ( min ( slices [ reg ] [ 0 ] . start , slices [ j ] [ 0 ] . start ) ) : max ( slices [ reg ] [ 0 ] . stop , slices [ j ] [ 0 ] . stop ) , ( min ( slices [ reg ] [ 1 ] . start , slices [ j ] [ 1 ] . start ) ) : max ( slices [ reg ] [ 1 ] . stop , slices [ j ] [ 1 ] . stop ) ]
merged_region = ( ( merged_region == reg + 1 ) + ( merged_region == j + 1 ) )
mesh = mesh_region ( region = merged_region , strel = strel )
sa_combined . append ( mesh_surface_area ( mesh ) )
# Interfacial area calculation
cn = sp . array ( cn )
ia = 0.5 * ( sa [ cn [ : , 0 ] ] + sa [ cn [ : , 1 ] ] - sa_combined )
ia [ ia <= 0 ] = 1
result = namedtuple ( 'interfacial_areas' , ( 'conns' , 'area' ) )
result . conns = cn
result . area = ia * voxel_size ** 2
return result |
def find_urls ( observatory , frametype , start , end , on_gaps = 'error' , connection = None , ** connection_kw ) :
"""Find the URLs of files of a given data type in a GPS interval .
See also
gwdatafind . http . HTTPConnection . find _ urls
FflConnection . find _ urls
for details on the underlying method ( s )""" | return connection . find_urls ( observatory , frametype , start , end , on_gaps = on_gaps ) |
def get_mcu_definition ( self , project_file ) :
"""Parse project file to get mcu definition""" | # TODO : check the extension here if it ' s valid IAR project or we
# should at least check if syntax is correct check something IAR defines and return error if not
project_file = join ( getcwd ( ) , project_file )
ewp_dic = xmltodict . parse ( file ( project_file ) , dict_constructor = dict )
mcu = MCU_TEMPLATE
try :
ewp_dic [ 'project' ] [ 'configuration' ]
except KeyError : # validity check for iar project
logging . debug ( "The project_file %s seems to be not valid .ewp file." )
return mcu
# Fill in only must - have values , fpu will be added if defined for mcu
mcu [ 'tool_specific' ] = { 'iar' : { # MCU selection
'OGChipSelectEditMenu' : { 'state' : [ ] , } , # we use mcu
'OGCoreOrChip' : { 'state' : [ 1 ] , } , } }
# we take 0 configuration or just configuration , as multiple configuration possible
# debug , release , for mcu - does not matter , try and adjust
try :
index_general = self . _get_option ( ewp_dic [ 'project' ] [ 'configuration' ] [ 0 ] [ 'settings' ] , 'General' )
configuration = ewp_dic [ 'project' ] [ 'configuration' ] [ 0 ]
except KeyError :
index_general = self . _get_option ( ewp_dic [ 'project' ] [ 'configuration' ] [ 'settings' ] , 'General' )
configuration = ewp_dic [ 'project' ] [ 'configuration' ]
index_option = self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'OGChipSelectEditMenu' )
OGChipSelectEditMenu = configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] [ index_option ]
mcu [ 'tool_specific' ] [ 'iar' ] [ 'OGChipSelectEditMenu' ] [ 'state' ] . append ( OGChipSelectEditMenu [ 'state' ] . replace ( '\t' , ' ' , 1 ) )
# we keep this as the internal version . FPU - version 1 , FPU2 version 2.
# TODO : We shall look at IAR versioning to get this right
fileVersion = 1
try :
if self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'FPU2' ) :
fileVersion = 2
except TypeError :
pass
index_option = self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'GBECoreSlave' )
GBECoreSlave = configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] [ index_option ]
mcu [ 'tool_specific' ] [ 'iar' ] [ 'GBECoreSlave' ] = { 'state' : [ int ( GBECoreSlave [ 'state' ] ) ] }
if fileVersion == 2 :
index_option = self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'GFPUCoreSlave2' )
GFPUCoreSlave2 = configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] [ index_option ]
mcu [ 'tool_specific' ] [ 'iar' ] [ 'GFPUCoreSlave2' ] = { 'state' : [ int ( GFPUCoreSlave2 [ 'state' ] ) ] }
index_option = self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'CoreVariant' )
CoreVariant = configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] [ index_option ]
mcu [ 'tool_specific' ] [ 'iar' ] [ 'CoreVariant' ] = { 'state' : [ int ( CoreVariant [ 'state' ] ) ] }
else :
index_option = self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'GFPUCoreSlave' )
GFPUCoreSlave = configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] [ index_option ]
mcu [ 'tool_specific' ] [ 'iar' ] [ 'GFPUCoreSlave' ] = { 'state' : [ int ( GFPUCoreSlave [ 'state' ] ) ] }
index_option = self . _get_option ( configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] , 'Variant' )
Variant = configuration [ 'settings' ] [ index_general ] [ 'data' ] [ 'option' ] [ index_option ]
mcu [ 'tool_specific' ] [ 'iar' ] [ 'Variant' ] = { 'state' : [ int ( Variant [ 'state' ] ) ] }
return mcu |
def _encrypt_data_key ( self , data_key , algorithm , encryption_context ) :
"""Performs the provider - specific key encryption actions .
: param data _ key : Unencrypted data key
: type data _ key : : class : ` aws _ encryption _ sdk . structures . RawDataKey `
or : class : ` aws _ encryption _ sdk . structures . DataKey `
: param algorithm : Algorithm object which directs how this Master Key will encrypt the data key
: type algorithm : aws _ encryption _ sdk . identifiers . Algorithm
: param dict encryption _ context : Encryption context to use in encryption
: returns : Decrypted data key
: rtype : aws _ encryption _ sdk . structures . EncryptedDataKey
: raises EncryptKeyError : if Master Key is unable to encrypt data key""" | # Raw key string to EncryptedData
encrypted_wrapped_key = self . config . wrapping_key . encrypt ( plaintext_data_key = data_key . data_key , encryption_context = encryption_context )
# EncryptedData to EncryptedDataKey
return aws_encryption_sdk . internal . formatting . serialize . serialize_wrapped_key ( key_provider = self . key_provider , wrapping_algorithm = self . config . wrapping_key . wrapping_algorithm , wrapping_key_id = self . key_id , encrypted_wrapped_key = encrypted_wrapped_key , ) |
def new_payment_query_listener ( sender , order = None , payment = None , ** kwargs ) :
"""Here we fill only two obligatory fields of payment , and leave signal handler""" | payment . amount = order . total
payment . currency = order . currency
logger . debug ( "new_payment_query_listener, amount=%s, currency=%s" , payment . amount , payment . currency ) |
def randmio_und ( R , itr , seed = None ) :
'''This function randomizes an undirected network , while preserving the
degree distribution . The function does not preserve the strength
distribution in weighted networks .
Parameters
W : NxN np . ndarray
undirected binary / weighted connection matrix
itr : int
rewiring parameter . Each edge is rewired approximately itr times .
seed : hashable , optional
If None ( default ) , use the np . random ' s global random state to generate random numbers .
Otherwise , use a new np . random . RandomState instance seeded with the given value .
Returns
R : NxN np . ndarray
randomized network
eff : int
number of actual rewirings carried out''' | if not np . all ( R == R . T ) :
raise BCTParamError ( "Input must be undirected" )
rng = get_rng ( seed )
R = R . copy ( )
n = len ( R )
i , j = np . where ( np . tril ( R ) )
k = len ( i )
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np . round ( n * k / ( n * ( n - 1 ) ) )
# actual number of successful rewirings
eff = 0
for it in range ( int ( itr ) ) :
att = 0
while att <= max_attempts : # while not rewired
while True :
e1 , e2 = rng . randint ( k , size = ( 2 , ) )
while e1 == e2 :
e2 = rng . randint ( k )
a = i [ e1 ]
b = j [ e1 ]
c = i [ e2 ]
d = j [ e2 ]
if a != c and a != d and b != c and b != d :
break
# all 4 vertices must be different
if rng . random_sample ( ) > .5 :
i . setflags ( write = True )
j . setflags ( write = True )
i [ e2 ] = d
j [ e2 ] = c
# flip edge c - d with 50 % probability
c = i [ e2 ]
d = j [ e2 ]
# to explore all potential rewirings
# rewiring condition
if not ( R [ a , d ] or R [ c , b ] ) :
R [ a , d ] = R [ a , b ]
R [ a , b ] = 0
R [ d , a ] = R [ b , a ]
R [ b , a ] = 0
R [ c , b ] = R [ c , d ]
R [ c , d ] = 0
R [ b , c ] = R [ d , c ]
R [ d , c ] = 0
j . setflags ( write = True )
j [ e1 ] = d
j [ e2 ] = b
# reassign edge indices
eff += 1
break
att += 1
return R , eff |
def access_vlan ( self , inter_type , inter , vlan_id ) :
"""Add a L2 Interface to a specific VLAN .
Args :
inter _ type : The type of interface you want to configure . Ex .
tengigabitethernet , gigabitethernet , fortygigabitethernet .
inter : The ID for the interface you want to configure . Ex . 1/0/1
vlan _ id : ID for the VLAN interface being modified . Value of 2-4096.
Returns :
True if command completes successfully or False if not .
Raises :
None""" | config = ET . Element ( 'config' )
interface = ET . SubElement ( config , 'interface' , xmlns = ( "urn:brocade.com:mgmt:" "brocade-interface" ) )
int_type = ET . SubElement ( interface , inter_type )
name = ET . SubElement ( int_type , 'name' )
name . text = inter
switchport = ET . SubElement ( int_type , 'switchport' )
access = ET . SubElement ( switchport , 'access' )
accessvlan = ET . SubElement ( access , 'accessvlan' )
accessvlan . text = vlan_id
try :
self . _callback ( config )
return True
# TODO add logging and narrow exception window .
except Exception as error :
logging . error ( error )
return False |
def on_message ( self , opcode , message ) :
"""The primary dispatch function to handle incoming WebSocket messages .
: param int opcode : The opcode of the message that was received .
: param bytes message : The data contained within the message .""" | self . logger . debug ( "processing {0} (opcode: 0x{1:02x}) message" . format ( self . _opcode_names . get ( opcode , 'UNKNOWN' ) , opcode ) )
if opcode == self . _opcode_close :
self . close ( )
elif opcode == self . _opcode_ping :
if len ( message ) > 125 :
self . close ( )
return
self . send_message ( self . _opcode_pong , message )
elif opcode == self . _opcode_pong :
pass
elif opcode == self . _opcode_binary :
self . on_message_binary ( message )
elif opcode == self . _opcode_text :
try :
message = self . _decode_string ( message )
except UnicodeDecodeError :
self . logger . warning ( 'closing connection due to invalid unicode within a text message' )
self . close ( )
else :
self . on_message_text ( message )
elif opcode == self . _opcode_continue :
self . close ( )
else :
self . logger . warning ( "received unknown opcode: {0} (0x{0:02x})" . format ( opcode ) )
self . close ( ) |
def to_array ( self ) :
"""Serializes this PassportElementErrorFiles to a dictionary .
: return : dictionary representation of this object .
: rtype : dict""" | array = super ( PassportElementErrorFiles , self ) . to_array ( )
array [ 'source' ] = u ( self . source )
# py2 : type unicode , py3 : type str
array [ 'type' ] = u ( self . type )
# py2 : type unicode , py3 : type str
array [ 'file_hashes' ] = self . _as_array ( self . file_hashes )
# type list of str
array [ 'message' ] = u ( self . message )
# py2 : type unicode , py3 : type str
return array |
def time_signature_event ( self , meter = ( 4 , 4 ) ) :
"""Return a time signature event for meter .""" | numer = a2b_hex ( '%02x' % meter [ 0 ] )
denom = a2b_hex ( '%02x' % int ( log ( meter [ 1 ] , 2 ) ) )
return self . delta_time + META_EVENT + TIME_SIGNATURE + '\x04' + numer + denom + '\x18\x08' |
def targets_from_background ( back_cnn , work_dir , data ) :
"""Retrieve target and antitarget BEDs from background CNN file .""" | target_file = os . path . join ( work_dir , "%s.target.bed" % dd . get_sample_name ( data ) )
anti_file = os . path . join ( work_dir , "%s.antitarget.bed" % dd . get_sample_name ( data ) )
if not utils . file_exists ( target_file ) :
with file_transaction ( data , target_file ) as tx_out_file :
out_base = tx_out_file . replace ( ".target.bed" , "" )
cmd = [ _get_cmd ( "reference2targets.py" ) , "-o" , out_base , back_cnn ]
do . run ( _prep_cmd ( cmd , tx_out_file ) , "CNVkit targets from background" )
shutil . copy ( out_base + ".antitarget.bed" , anti_file )
return target_file , anti_file |
def __warn_user_if_wd_maybe_unreadable ( self , abs_remote_path ) :
"""Check directories above the remote module and issue a warning if
they are not traversable by all users .
The reasoning behind this is mainly aimed at set - ups with a
centralized Hadoop cluster , accessed by all users , and where
the Hadoop task tracker user is not a superuser ; an example
may be if you ' re running a shared Hadoop without HDFS ( using
only a POSIX shared file system ) . The task tracker correctly
changes user to the job requester ' s user for most operations ,
but not when initializing the distributed cache , so jobs who
want to place files not accessible by the Hadoop user into
dist cache fail .""" | host , port , path = hdfs . path . split ( abs_remote_path )
if host == '' and port == 0 : # local file system
host_port = "file:///"
else : # FIXME : this won ' t work with any scheme other than
# hdfs : / / ( e . g . , s3)
host_port = "hdfs://%s:%s/" % ( host , port )
path_pieces = path . strip ( '/' ) . split ( os . path . sep )
fs = hdfs . hdfs ( host , port )
for i in range ( 0 , len ( path_pieces ) ) :
part = os . path . join ( host_port , os . path . sep . join ( path_pieces [ 0 : i + 1 ] ) )
permissions = fs . get_path_info ( part ) [ 'permissions' ]
if permissions & 0o111 != 0o111 :
self . logger . warning ( ( "remote module %s may not be readable by the task " "tracker when initializing the distributed cache. " "Permissions on %s: %s" ) , abs_remote_path , part , oct ( permissions ) )
break |
def have_same_chars ( s0 : str , s1 : str ) -> bool :
"""Checks if two strings have the same unique characters .
: param s0 : The first string to compare
: param s1 : The second string to compare
: return : True if both strings have the same unique characters , False otherwise
> > > have _ same _ chars ( ' eabcdzzzz ' , ' dddzzzzzddeddabc ' )
True
> > > have _ same _ chars ( ' abcd ' , ' dddddabc ' )
True
> > > have _ same _ chars ( ' dddddabc ' , ' abcd ' )
True
> > > have _ same _ chars ( ' eabcd ' , ' dddddabc ' )
False
> > > have _ same _ chars ( ' abcd ' , ' dddddabce ' )
False
> > > have _ same _ chars ( ' eabcdzzzz ' , ' dddzzzzzddddabc ' )
False""" | # Convert both strings to a set to filter out duplicates
set_s0 = set ( s0 )
set_s1 = set ( s1 )
# If the sets are equal , the strings have the same characters
return set_s0 == set_s1 |
def add_bucket_key_data ( self , bucket , key , data , bucket_type = None ) :
"""Adds a bucket / key / keydata triple to the inputs .
: param bucket : the bucket
: type bucket : string
: param key : the key or list of keys
: type key : string
: param data : the key - specific data
: type data : string , list , dict , None
: param bucket _ type : Optional name of a bucket type
: type bucket _ type : string , None
: rtype : : class : ` RiakMapReduce `""" | if self . _input_mode == 'bucket' :
raise ValueError ( 'Already added a bucket, can\'t add an object.' )
elif self . _input_mode == 'query' :
raise ValueError ( 'Already added a query, can\'t add an object.' )
else :
if isinstance ( key , Iterable ) and not isinstance ( key , string_types ) :
if bucket_type is not None :
for k in key :
self . _inputs . append ( [ bucket , k , data , bucket_type ] )
else :
for k in key :
self . _inputs . append ( [ bucket , k , data ] )
else :
if bucket_type is not None :
self . _inputs . append ( [ bucket , key , data , bucket_type ] )
else :
self . _inputs . append ( [ bucket , key , data ] )
return self |
def recall_at_k ( model , test_interactions , train_interactions = None , k = 10 , user_features = None , item_features = None , preserve_rows = False , num_threads = 1 , check_intersections = True , ) :
"""Measure the recall at k metric for a model : the number of positive items in
the first k positions of the ranked list of results divided by the number
of positive items in the test period . A perfect score is 1.0.
Parameters
model : LightFM instance
the fitted model to be evaluated
test _ interactions : np . float32 csr _ matrix of shape [ n _ users , n _ items ]
Non - zero entries representing known positives in the evaluation set .
train _ interactions : np . float32 csr _ matrix of shape [ n _ users , n _ items ] , optional
Non - zero entries representing known positives in the train set . These
will be omitted from the score calculations to avoid re - recommending
known positives .
k : integer , optional
The k parameter .
user _ features : np . float32 csr _ matrix of shape [ n _ users , n _ user _ features ] , optional
Each row contains that user ' s weights over features .
item _ features : np . float32 csr _ matrix of shape [ n _ items , n _ item _ features ] , optional
Each row contains that item ' s weights over features .
preserve _ rows : boolean , optional
When False ( default ) , the number of rows in the output will be equal
to the number of users with interactions in the evaluation set .
When True , the number of rows in the output will be equal to the
number of users .
num _ threads : int , optional
Number of parallel computation threads to use . Should
not be higher than the number of physical cores .
check _ intersections : bool , optional , True by default ,
Only relevant when train _ interactions are supplied .
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split .
Returns
np . array of shape [ n _ users with interactions or n _ users , ]
Numpy array containing recall @ k scores for each user . If there are no
interactions for a given user having items in the test period , the
returned recall will be 0.""" | if num_threads < 1 :
raise ValueError ( "Number of threads must be 1 or larger." )
ranks = model . predict_rank ( test_interactions , train_interactions = train_interactions , user_features = user_features , item_features = item_features , num_threads = num_threads , check_intersections = check_intersections , )
ranks . data = np . less ( ranks . data , k , ranks . data )
retrieved = np . squeeze ( test_interactions . getnnz ( axis = 1 ) )
hit = np . squeeze ( np . array ( ranks . sum ( axis = 1 ) ) )
if not preserve_rows :
hit = hit [ test_interactions . getnnz ( axis = 1 ) > 0 ]
retrieved = retrieved [ test_interactions . getnnz ( axis = 1 ) > 0 ]
return hit / retrieved |
def sort_reverse_chronologically ( self ) :
"""Sorts the measurements of this buffer in reverse chronological order""" | self . measurements . sort ( key = lambda m : m . timestamp , reverse = True ) |
def ndarr2str ( arr , encoding = 'ascii' ) :
"""This is used to ensure that the return value of arr . tostring ( )
is actually a string . This will prevent lots of if - checks in calling
code . As of numpy v1.6.1 ( in Python 3.2.3 ) , the tostring ( ) function
still returns type ' bytes ' , not ' str ' as it advertises .""" | # be fast , don ' t check - just assume ' arr ' is a numpy array - the tostring
# call will fail anyway if not
retval = arr . tostring ( )
# would rather check " if isinstance ( retval , bytes ) " , but support 2.5.
# could rm the if PY3K check , but it makes this faster on 2 . x .
if PY3K and not isinstance ( retval , str ) :
return retval . decode ( encoding )
else : # is str
return retval |
def modify_rest_retry ( self , total = 8 , connect = None , read = None , redirect = None , status = None , method_whitelist = urllib3 . util . retry . Retry . DEFAULT_METHOD_WHITELIST , status_forcelist = None , backoff_factor = 0.705883 , raise_on_redirect = True , raise_on_status = True , respect_retry_after_header = True , adapter_url = "https://" ) :
"""Modify retry parameters for the SDK ' s rest call object .
Parameters are directly from and passed directly to ` urllib3 . util . retry . Retry ` , and get applied directly to
the underlying ` requests . Session ` object .
Default retry with total = 8 and backoff _ factor = 0.705883:
- Try 1 , 0 delay ( 0 total seconds )
- Try 2 , 0 delay ( 0 total seconds )
- Try 3 , 0.705883 delay ( 0.705883 total seconds )
- Try 4 , 1.411766 delay ( 2.117649 total seconds )
- Try 5 , 2.823532 delay ( 4.941181 total seconds )
- Try 6 , 5.647064 delay ( 10.588245 total seconds )
- Try 7 , 11.294128 delay ( 21.882373 total seconds )
- Try 8 , 22.588256 delay ( 44.470629 total seconds )
- Try 9 , 45.176512 delay ( 89.647141 total seconds )
- Try 10 , 90.353024 delay ( 180.000165 total seconds )
* * Parameters : * *
- * * total : * * int , Total number of retries to allow . Takes precedence over other counts .
- * * connect : * * int , How many connection - related errors to retry on .
- * * read : * * int , How many times to retry on read errors .
- * * redirect : * * int , How many redirects to perform . loops .
- * * status : * * int , How many times to retry on bad status codes .
- * * method _ whitelist : * * iterable , Set of uppercased HTTP method verbs that we should retry on .
- * * status _ forcelist : * * iterable , A set of integer HTTP status codes that we should force a retry on .
- * * backoff _ factor : * * float , A backoff factor to apply between attempts after the second try .
- * * raise _ on _ redirect : * * bool , True = raise a MaxRetryError , False = return latest 3xx response .
- * * raise _ on _ status : * * bool , Similar logic to ` ` raise _ on _ redirect ` ` but for status responses .
- * * respect _ retry _ after _ header : * * bool , Whether to respect Retry - After header on status codes .
- * * adapter _ url : * * string , URL match for these retry values ( default ` https : / / ` )
* * Returns : * * No return , mutates the session directly""" | # Cloudgenix responses with 502/504 are usually recoverable . Use them if no list specified .
if status_forcelist is None :
status_forcelist = ( 413 , 429 , 502 , 503 , 504 )
retry = urllib3 . util . retry . Retry ( total = total , connect = connect , read = read , redirect = redirect , status = status , method_whitelist = method_whitelist , status_forcelist = status_forcelist , backoff_factor = backoff_factor , raise_on_redirect = raise_on_redirect , raise_on_status = raise_on_status , respect_retry_after_header = respect_retry_after_header )
adapter = requests . adapters . HTTPAdapter ( max_retries = retry )
self . _session . mount ( adapter_url , adapter )
return |
def random_alphanum ( length ) :
"""Return a random string of ASCII letters and digits .
: param int length : The length of string to return
: returns : A random string
: rtype : str""" | charset = string . ascii_letters + string . digits
return random_string ( length , charset ) |
def hdepth ( tag ) :
"""Compute an h tag ' s " outline depth " .
E . g . , h1 at top level is 1 , h1 in a section is 2 , h2 at top level is 2.""" | if not _heading_re . search ( tag . name ) :
raise TaskError ( "Can't compute heading depth of non-heading {}" . format ( tag ) )
depth = int ( tag . name [ 1 ] , 10 )
# get the 2 from ' h2'
cursor = tag
while cursor :
if cursor . name == 'section' :
depth += 1
cursor = cursor . parent
return depth |
def update_offer_comment ( self , offer_comment_id , offer_comment_dict ) :
"""Updates an offer comment
: param offer _ comment _ id : the offer comment id
: param offer _ comment _ dict : dict
: return : dict""" | return self . _create_put_request ( resource = OFFER_COMMENTS , billomat_id = offer_comment_id , send_data = offer_comment_dict ) |
def count ( tex ) :
"""Extract all labels , then count the number of times each is referenced in
the provided file . Does not follow \ includes .""" | # soupify
soup = TexSoup ( tex )
# extract all unique labels
labels = set ( label . string for label in soup . find_all ( 'label' ) )
# create dictionary mapping label to number of references
return dict ( ( label , soup . find_all ( '\ref{%s}' % label ) ) for label in labels ) |
def stop_event ( self , event_type ) :
"""Stop dispatching the given event .
It is not an error to attempt to stop an event that was never started ,
the request will just be silently ignored .""" | if event_type in self . __timers :
pyglet . clock . unschedule ( self . __timers [ event_type ] ) |
def extern_project_multi ( self , context_handle , val , field_str_ptr , field_str_len ) :
"""Given a Key for ` obj ` , and a field name , project the field as a list of Keys .""" | c = self . _ffi . from_handle ( context_handle )
obj = c . from_value ( val [ 0 ] )
field_name = self . to_py_str ( field_str_ptr , field_str_len )
return c . vals_buf ( tuple ( c . to_value ( p ) for p in getattr ( obj , field_name ) ) ) |
def delete_before ( self , segment_info ) :
"""Delete all base backups and WAL before a given segment
This is the most commonly - used deletion operator ; to delete
old backups and WAL .""" | # This will delete all base backup data before segment _ info .
self . _delete_base_backups_before ( segment_info )
# This will delete all WAL segments before segment _ info .
self . _delete_wals_before ( segment_info )
if self . deleter :
self . deleter . close ( ) |
def _one_prob_per_shard ( args : Dict [ str , Any ] ) -> float :
"""Returns the probability of getting a one measurement on a state shard .""" | index = args [ 'index' ]
state = _state_shard ( args ) * _one_projector ( args , index )
norm = np . linalg . norm ( state )
return norm * norm |
def call_on_each_endpoint ( self , callback ) :
"""Find all server endpoints defined in the swagger spec and calls ' callback ' for each ,
with an instance of EndpointData as argument .""" | if 'paths' not in self . swagger_dict :
return
for path , d in list ( self . swagger_dict [ 'paths' ] . items ( ) ) :
for method , op_spec in list ( d . items ( ) ) :
data = EndpointData ( path , method )
# Which server method handles this endpoint ?
if 'x-bind-server' not in op_spec :
if 'x-no-bind-server' in op_spec : # That route should not be auto - generated
log . info ( "Skipping generation of %s %s" % ( method , path ) )
continue
else :
raise Exception ( "Swagger api defines no x-bind-server for %s %s" % ( method , path ) )
data . handler_server = op_spec [ 'x-bind-server' ]
# Make sure that endpoint only produces ' application / json '
if 'produces' not in op_spec :
raise Exception ( "Swagger api has no 'produces' section for %s %s" % ( method , path ) )
if len ( op_spec [ 'produces' ] ) != 1 :
raise Exception ( "Expecting only one type under 'produces' for %s %s" % ( method , path ) )
if op_spec [ 'produces' ] [ 0 ] == 'application/json' :
data . produces_json = True
elif op_spec [ 'produces' ] [ 0 ] == 'text/html' :
data . produces_html = True
else :
raise Exception ( "Only 'application/json' or 'text/html' are supported. See %s %s" % ( method , path ) )
# Which client method handles this endpoint ?
if 'x-bind-client' in op_spec :
data . handler_client = op_spec [ 'x-bind-client' ]
# Should we decorate the server handler ?
if 'x-decorate-server' in op_spec :
data . decorate_server = op_spec [ 'x-decorate-server' ]
# Should we manipulate the requests parameters ?
if 'x-decorate-request' in op_spec :
data . decorate_request = op_spec [ 'x-decorate-request' ]
# Generate a bravado - core operation object
data . operation = Operation . from_spec ( self . spec , path , method , op_spec )
# Figure out how parameters are passed : one json in body ? one or
# more values in query ?
if 'parameters' in op_spec :
params = op_spec [ 'parameters' ]
for p in params :
if p [ 'in' ] == 'body' :
data . param_in_body = True
if p [ 'in' ] == 'query' :
data . param_in_query = True
if p [ 'in' ] == 'path' :
data . param_in_path = True
if data . param_in_path : # Substitute { . . . } with < . . . > in path , to make a Flask friendly path
data . path = data . path . replace ( '{' , '<' ) . replace ( '}' , '>' )
if data . param_in_body and data . param_in_query :
raise Exception ( "Cannot support params in both body and param (%s %s)" % ( method , path ) )
else :
data . no_params = True
callback ( data ) |
def path2url ( p ) :
"""Return file : / / URL from a filename .""" | # Python 3 is a bit different and does a better job .
if sys . version_info . major >= 3 and sys . version_info . minor >= 4 :
import pathlib
return pathlib . Path ( p ) . as_uri ( )
else :
return six . moves . urllib . parse . urljoin ( 'file:' , six . moves . urllib . request . pathname2url ( p ) ) |
def start ( self ) :
"""Starts the OplogWatcher .""" | oplog = self . connection . local [ 'oplog.rs' ]
if self . ts is None :
cursor = oplog . find ( ) . sort ( '$natural' , - 1 )
obj = cursor [ 0 ]
if obj :
self . ts = obj [ 'ts' ]
else : # In case no oplogs are present .
self . ts = None
if self . ts :
logging . info ( 'Watching oplogs with timestamp > %s' % self . ts )
else :
logging . info ( 'Watching all oplogs' )
while self . running :
query = { 'ts' : { '$gt' : self . ts } }
try :
logging . debug ( 'Tailing over %r...' % query )
cursor = oplog . find ( query , tailable = True )
# OplogReplay flag greatly improves scanning for ts performance .
cursor . add_option ( pymongo . cursor . _QUERY_OPTIONS [ 'oplog_replay' ] )
while self . running :
for op in cursor :
self . process_op ( op [ 'ns' ] , op )
time . sleep ( self . poll_time )
if not cursor . alive :
break
except AutoReconnect , e :
logging . warning ( e )
time . sleep ( self . poll_time )
except OperationFailure , e :
logging . exception ( e )
time . sleep ( self . poll_time ) |
def hill_climbing_stochastic ( problem , iterations_limit = 0 , viewer = None ) :
'''Stochastic hill climbing .
If iterations _ limit is specified , the algorithm will end after that
number of iterations . Else , it will continue until it can ' t find a
better node than the current one .
Requires : SearchProblem . actions , SearchProblem . result , and
SearchProblem . value .''' | return _local_search ( problem , _random_best_expander , iterations_limit = iterations_limit , fringe_size = 1 , stop_when_no_better = iterations_limit == 0 , viewer = viewer ) |
def run ( self , sensor_graph , model ) :
"""Run this optimization pass on the sensor graph
If necessary , information on the device model being targeted
can be found in the associated model argument .
Args :
sensor _ graph ( SensorGraph ) : The sensor graph to optimize
model ( DeviceModel ) : The device model we ' re using""" | # This check can be done if there is 1 input and it is count = = 1
# and the stream type is input or unbuffered
for node , inputs , outputs in sensor_graph . iterate_bfs ( ) :
if node . num_inputs != 1 :
continue
input_a , trigger_a = node . inputs [ 0 ]
if input_a . selector . match_type not in [ DataStream . InputType , DataStream . UnbufferedType ] :
continue
if not isinstance ( trigger_a , InputTrigger ) :
continue
if trigger_a . comp_string != u'==' :
continue
if not trigger_a . use_count :
continue
if trigger_a . reference != 1 :
continue
# here we ' re looking at count input | unbuffered X = = 1
node . inputs [ 0 ] = ( input_a , TrueTrigger ( ) ) |
def uintersect1d ( arr1 , arr2 , assume_unique = False ) :
"""Find the sorted unique elements of the two input arrays .
A wrapper around numpy . intersect1d that preserves units . All input arrays
must have the same units . See the documentation of numpy . intersect1d for
full details .
Examples
> > > from unyt import cm
> > > A = [ 1 , 2 , 3 ] * cm
> > > B = [ 2 , 3 , 4 ] * cm
> > > uintersect1d ( A , B )
unyt _ array ( [ 2 , 3 ] , ' cm ' )""" | v = np . intersect1d ( arr1 , arr2 , assume_unique = assume_unique )
v = _validate_numpy_wrapper_units ( v , [ arr1 , arr2 ] )
return v |
def new_connection ( self ) :
"""Make a new connection .""" | if not self . prepared :
self . prepare ( )
con = sqlite3 . connect ( self . path , isolation_level = self . isolation )
con . row_factory = self . factory
if self . text_fact :
con . text_factory = self . text_fact
return con |
def call_temperature ( * args , ** kwargs ) :
'''Set the mired color temperature . More : http : / / en . wikipedia . org / wiki / Mired
Arguments :
* * * value * * : 150 ~ 500.
Options :
* * * id * * : Specifies a device ID . Can be a comma - separated values . All , if omitted .
CLI Example :
. . code - block : : bash
salt ' * ' hue . temperature value = 150
salt ' * ' hue . temperature value = 150 id = 1
salt ' * ' hue . temperature value = 150 id = 1,2,3''' | res = dict ( )
if 'value' not in kwargs :
raise CommandExecutionError ( "Parameter 'value' (150~500) is missing" )
try :
value = max ( min ( int ( kwargs [ 'value' ] ) , 500 ) , 150 )
except Exception as err :
raise CommandExecutionError ( "Parameter 'value' does not contains an integer" )
devices = _get_lights ( )
for dev_id in 'id' not in kwargs and sorted ( devices . keys ( ) ) or _get_devices ( kwargs ) :
res [ dev_id ] = _set ( dev_id , { "ct" : value } )
return res |
def create ( self , qname ) :
'''Create RackSpace Queue .''' | try :
if self . exists ( qname ) :
log . error ( 'Queues "%s" already exists. Nothing done.' , qname )
return True
self . conn . create ( qname )
return True
except pyrax . exceptions as err_msg :
log . error ( 'RackSpace API got some problems during creation: %s' , err_msg )
return False |
def is_finished ( self ) :
"""Returns whether all trials have finished running .""" | if self . _total_time > self . _global_time_limit :
logger . warning ( "Exceeded global time limit {} / {}" . format ( self . _total_time , self . _global_time_limit ) )
return True
trials_done = all ( trial . is_finished ( ) for trial in self . _trials )
return trials_done and self . _search_alg . is_finished ( ) |
def _parse_common ( text , ** options ) :
"""Tries to parse the string as a common datetime format .
: param text : The string to parse .
: type text : str
: rtype : dict or None""" | m = COMMON . match ( text )
has_date = False
year = 0
month = 1
day = 1
if not m :
raise ParserError ( "Invalid datetime string" )
if m . group ( "date" ) : # A date has been specified
has_date = True
year = int ( m . group ( "year" ) )
if not m . group ( "monthday" ) : # No month and day
month = 1
day = 1
else :
if options [ "day_first" ] :
month = int ( m . group ( "day" ) )
day = int ( m . group ( "month" ) )
else :
month = int ( m . group ( "month" ) )
day = int ( m . group ( "day" ) )
if not m . group ( "time" ) :
return date ( year , month , day )
# Grabbing hh : mm : ss
hour = int ( m . group ( "hour" ) )
minute = int ( m . group ( "minute" ) )
if m . group ( "second" ) :
second = int ( m . group ( "second" ) )
else :
second = 0
# Grabbing subseconds , if any
microsecond = 0
if m . group ( "subsecondsection" ) : # Limiting to 6 chars
subsecond = m . group ( "subsecond" ) [ : 6 ]
microsecond = int ( "{:0<6}" . format ( subsecond ) )
if has_date :
return datetime ( year , month , day , hour , minute , second , microsecond )
return time ( hour , minute , second , microsecond ) |
def register ( self , func ) :
"""Register function to templates .""" | if callable ( func ) :
self . functions [ func . __name__ ] = func
return func |
def dtw ( x , y , dist , warp = 1 , w = inf , s = 1.0 ) :
"""Computes Dynamic Time Warping ( DTW ) of two sequences .
: param array x : N1 * M array
: param array y : N2 * M array
: param func dist : distance used as cost measure
: param int warp : how many shifts are computed .
: param int w : window size limiting the maximal distance between indices of matched entries | i , j | .
: param float s : weight applied on off - diagonal moves of the path . As s gets larger , the warping path is increasingly biased towards the diagonal
Returns the minimum distance , the cost matrix , the accumulated cost matrix , and the wrap path .""" | assert len ( x )
assert len ( y )
assert isinf ( w ) or ( w >= abs ( len ( x ) - len ( y ) ) )
assert s > 0
r , c = len ( x ) , len ( y )
if not isinf ( w ) :
D0 = full ( ( r + 1 , c + 1 ) , inf )
for i in range ( 1 , r + 1 ) :
D0 [ i , max ( 1 , i - w ) : min ( c + 1 , i + w + 1 ) ] = 0
D0 [ 0 , 0 ] = 0
else :
D0 = zeros ( ( r + 1 , c + 1 ) )
D0 [ 0 , 1 : ] = inf
D0 [ 1 : , 0 ] = inf
D1 = D0 [ 1 : , 1 : ]
# view
for i in range ( r ) :
for j in range ( c ) :
if ( isinf ( w ) or ( max ( 0 , i - w ) <= j <= min ( c , i + w ) ) ) :
D1 [ i , j ] = dist ( x [ i ] , y [ j ] )
C = D1 . copy ( )
jrange = range ( c )
for i in range ( r ) :
if not isinf ( w ) :
jrange = range ( max ( 0 , i - w ) , min ( c , i + w + 1 ) )
for j in jrange :
min_list = [ D0 [ i , j ] ]
for k in range ( 1 , warp + 1 ) :
i_k = min ( i + k , r )
j_k = min ( j + k , c )
min_list += [ D0 [ i_k , j ] * s , D0 [ i , j_k ] * s ]
D1 [ i , j ] += min ( min_list )
if len ( x ) == 1 :
path = zeros ( len ( y ) ) , range ( len ( y ) )
elif len ( y ) == 1 :
path = range ( len ( x ) ) , zeros ( len ( x ) )
else :
path = _traceback ( D0 )
return D1 [ - 1 , - 1 ] / sum ( D1 . shape ) , C , D1 , path |
def get_ansible_by_id ( self , ansible_id ) :
"""Return a ansible with that id or None .""" | for elem in self . ansible_hosts :
if elem . id == ansible_id :
return elem
return None |
def _run_callback ( self , callback : Callable [ [ ] , Any ] ) -> None :
"""Runs a callback with error handling .
. . versionchanged : : 6.0
CancelledErrors are no longer logged .""" | try :
ret = callback ( )
if ret is not None :
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future . If a Future
# makes it out to the IOLoop , ensure its exception ( if any )
# gets logged too .
try :
ret = gen . convert_yielded ( ret )
except gen . BadYieldError : # It ' s not unusual for add _ callback to be used with
# methods returning a non - None and non - yieldable
# result , which should just be ignored .
pass
else :
self . add_future ( ret , self . _discard_future_result )
except asyncio . CancelledError :
pass
except Exception :
app_log . error ( "Exception in callback %r" , callback , exc_info = True ) |
def _set_sport_number_lt_udp ( self , v , load = False ) :
"""Setter method for sport _ number _ lt _ udp , mapped from YANG variable / ipv6 _ acl / ipv6 / access _ list / extended / seq / sport _ number _ lt _ udp ( union )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ sport _ number _ lt _ udp is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ sport _ number _ lt _ udp ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = [ RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'pim-auto-rp' : { 'value' : 496 } , u'domain' : { 'value' : 53 } , u'tacacs' : { 'value' : 49 } , u'snmp' : { 'value' : 161 } , u'bootps' : { 'value' : 67 } , u'rip' : { 'value' : 520 } , u'echo' : { 'value' : 7 } , u'syslog' : { 'value' : 514 } , u'ntp' : { 'value' : 123 } , u'tftp' : { 'value' : 69 } , u'bootpc' : { 'value' : 68 } } , ) , RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'1 .. 65535' ] } ) , ] , is_leaf = True , yang_name = "sport-number-lt-udp" , rest_name = "sport-number-lt-udp" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-ipv6-access-list' , defining_module = 'brocade-ipv6-access-list' , yang_type = 'union' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """sport_number_lt_udp must be of a type compatible with union""" , 'defined-type' : "brocade-ipv6-access-list:union" , 'generated-type' : """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'pim-auto-rp': {'value': 496}, u'domain': {'value': 53}, u'tacacs': {'value': 49}, u'snmp': {'value': 161}, u'bootps': {'value': 67}, u'rip': {'value': 520}, u'echo': {'value': 7}, u'syslog': {'value': 514}, u'ntp': {'value': 123}, u'tftp': {'value': 69}, u'bootpc': {'value': 68}},),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 65535']}),], is_leaf=True, yang_name="sport-number-lt-udp", rest_name="sport-number-lt-udp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='union', is_config=True)""" , } )
self . __sport_number_lt_udp = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def get_siblings ( self ) :
"""Get a list of sibling accounts associated with provided account .""" | method = 'GET'
endpoint = '/rest/v1.1/users/{}/siblings' . format ( self . client . sauce_username )
return self . client . request ( method , endpoint ) |
def line_prompt4var ( self , * args ) :
"""% % prompt4var - Prompt for macro variables that will
be assigned to the SAS session . The variables will be
prompted each time the line magic is executed .
Example :
% prompt4var libpath file1
filename myfile " ~ & file1 . " ;
libname data " & libpath " ;""" | prmpt = OrderedDict ( )
for arg in args :
assert isinstance ( arg , str )
prmpt [ arg ] = False
if not len ( self . code ) :
if self . kernel . mva is None :
self . kernel . _allow_stdin = True
self . kernel . _start_sas ( )
self . kernel . mva . submit ( code = self . code , results = "html" , prompt = prmpt )
else :
self . kernel . promptDict = prmpt |
def timesync_send ( self , tc1 , ts1 , force_mavlink1 = False ) :
'''Time synchronization message .
tc1 : Time sync timestamp 1 ( int64 _ t )
ts1 : Time sync timestamp 2 ( int64 _ t )''' | return self . send ( self . timesync_encode ( tc1 , ts1 ) , force_mavlink1 = force_mavlink1 ) |
async def answer_media_group ( self , media : typing . Union [ MediaGroup , typing . List ] , disable_notification : typing . Union [ base . Boolean , None ] = None , reply = False ) -> typing . List [ Message ] :
"""Use this method to send a group of photos or videos as an album .
Source : https : / / core . telegram . org / bots / api # sendmediagroup
: param media : A JSON - serialized array describing photos and videos to be sent
: type media : : obj : ` typing . Union [ types . MediaGroup , typing . List ] `
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound .
: type disable _ notification : : obj : ` typing . Union [ base . Boolean , None ] `
: param reply : fill ' reply _ to _ message _ id '
: return : On success , an array of the sent Messages is returned .
: rtype : typing . List [ types . Message ]""" | return await self . bot . send_media_group ( self . chat . id , media = media , disable_notification = disable_notification , reply_to_message_id = self . message_id if reply else None ) |
def set_laplacian_matrix ( self , laplacian_mat ) :
"""Parameters
laplacian _ mat : sparse matrix ( N _ obs , N _ obs ) .
The Laplacian matrix to input .""" | laplacian_mat = check_array ( laplacian_mat , accept_sparse = sparse_formats )
if laplacian_mat . shape [ 0 ] != laplacian_mat . shape [ 1 ] :
raise ValueError ( "Laplacian matrix is not square" )
self . laplacian_matrix = laplacian_mat |
def do_transaction ( args ) :
"""Runs the transaction list or show command , printing to the console
Args :
args : The parsed arguments sent to the command at runtime""" | rest_client = RestClient ( args . url , args . user )
if args . subcommand == 'list' :
transactions = rest_client . list_transactions ( )
keys = ( 'transaction_id' , 'family' , 'version' , 'size' , 'payload' )
headers = tuple ( k . upper ( ) if k != 'version' else 'VERS' for k in keys )
def parse_txn_row ( transaction , decode = True ) :
decoded = b64decode ( transaction [ 'payload' ] )
return ( transaction [ 'header_signature' ] , transaction [ 'header' ] [ 'family_name' ] , transaction [ 'header' ] [ 'family_version' ] , len ( decoded ) , str ( decoded ) if decode else transaction [ 'payload' ] )
if args . format == 'default' :
fmt . print_terminal_table ( headers , transactions , parse_txn_row )
elif args . format == 'csv' :
fmt . print_csv ( headers , transactions , parse_txn_row )
elif args . format == 'json' or args . format == 'yaml' :
data = [ { k : d for k , d in zip ( keys , parse_txn_row ( b , False ) ) } for b in transactions ]
if args . format == 'yaml' :
fmt . print_yaml ( data )
elif args . format == 'json' :
fmt . print_json ( data )
else :
raise AssertionError ( 'Missing handler: {}' . format ( args . format ) )
else :
raise AssertionError ( 'Missing handler: {}' . format ( args . format ) )
if args . subcommand == 'show' :
output = rest_client . get_transaction ( args . transaction_id )
if args . key :
if args . key == 'payload' :
output = b64decode ( output [ 'payload' ] )
elif args . key in output :
output = output [ args . key ]
elif args . key in output [ 'header' ] :
output = output [ 'header' ] [ args . key ]
else :
raise CliException ( 'Key "{}" not found in transaction or header' . format ( args . key ) )
if args . format == 'yaml' :
fmt . print_yaml ( output )
elif args . format == 'json' :
fmt . print_json ( output )
else :
raise AssertionError ( 'Missing handler: {}' . format ( args . format ) ) |
def _download_single_image ( label_path : Path , img_tuple : tuple , i : int , timeout : int = 4 ) -> None :
"""Downloads a single image from Google Search results to ` label _ path `
given an ` img _ tuple ` that contains ` ( fname , url ) ` of an image to download .
` i ` is just an iteration number ` int ` .""" | suffix = re . findall ( r'\.\w+?(?=(?:\?|$))' , img_tuple [ 1 ] )
suffix = suffix [ 0 ] . lower ( ) if len ( suffix ) > 0 else '.jpg'
fname = f"{i:08d}{suffix}"
download_url ( img_tuple [ 1 ] , label_path / fname , timeout = timeout ) |
def capture_insert ( self , * , exclude_fields = ( ) ) :
"""Apply : meth : ` . TriggerLogAbstract . capture _ insert _ from _ model ` for this log .""" | return self . capture_insert_from_model ( self . table_name , self . record_id , exclude_fields = exclude_fields ) |
def _nth_of_year ( self , nth , day_of_week ) :
"""Modify to the given occurrence of a given day of the week
in the current year . If the calculated occurrence is outside ,
the scope of the current year , then return False and no
modifications are made . Use the supplied consts
to indicate the desired day _ of _ week , ex . DateTime . MONDAY .
: type nth : int
: type day _ of _ week : int or None
: rtype : DateTime""" | if nth == 1 :
return self . first_of ( "year" , day_of_week )
dt = self . first_of ( "year" )
year = dt . year
for i in range ( nth - ( 1 if dt . day_of_week == day_of_week else 0 ) ) :
dt = dt . next ( day_of_week )
if year != dt . year :
return False
return self . on ( self . year , dt . month , dt . day ) . start_of ( "day" ) |
def selection_r ( x_bounds , x_types , clusteringmodel_gmm_good , clusteringmodel_gmm_bad , num_starting_points = 100 , minimize_constraints_fun = None ) :
'''Call selection''' | minimize_starting_points = [ lib_data . rand ( x_bounds , x_types ) for i in range ( 0 , num_starting_points ) ]
outputs = selection ( x_bounds , x_types , clusteringmodel_gmm_good , clusteringmodel_gmm_bad , minimize_starting_points , minimize_constraints_fun )
return outputs |
def reset ( vm_ , ** kwargs ) :
'''Reset a VM by emulating the reset button on a physical machine
: param vm _ : domain name
: param connection : libvirt connection URI , overriding defaults
. . versionadded : : 2019.2.0
: param username : username to connect with , overriding defaults
. . versionadded : : 2019.2.0
: param password : password to connect with , overriding defaults
. . versionadded : : 2019.2.0
CLI Example :
. . code - block : : bash
salt ' * ' virt . reset < domain >''' | conn = __get_conn ( ** kwargs )
dom = _get_domain ( conn , vm_ )
# reset takes a flag , like reboot , but it is not yet used
# so we just pass in 0
# see : http : / / libvirt . org / html / libvirt - libvirt . html # virDomainReset
ret = dom . reset ( 0 ) == 0
conn . close ( )
return ret |
def CopyNote ( self , part , measure_id , new_note ) :
'''handles copying the latest note into the measure note list .
done at end of note loading to make sure staff _ id is right as staff id could be encountered
any point during the note tag
: param part : the part class to copy it into
: param measure _ id : the id of the measure in which the note belongs
: param new _ note : the new note class to be copied in
: return : None , side effects modifying the piece tree''' | if part . getMeasure ( measure_id , self . data [ "staff_id" ] ) is None :
part . addEmptyMeasure ( measure_id , self . data [ "staff_id" ] )
measure = part . getMeasure ( measure_id , self . data [ "staff_id" ] )
voice_obj = measure . getVoice ( self . data [ "voice" ] )
if voice_obj is None :
measure . addVoice ( id = self . data [ "voice" ] )
voice_obj = measure . getVoice ( self . data [ "voice" ] )
add = True
notes = voice_obj . GetChildrenIndexes ( )
for n in notes :
no = voice_obj . GetChild ( n )
if new_note == no :
add = False
break
if add :
chord = False
if hasattr ( new_note , "chord" ) :
chord = new_note . chord
measure . addNote ( new_note , self . data [ "voice" ] , chord = chord )
if hasattr ( new_note , "BarlinesAndMarkersRest" ) and new_note . BarlinesAndMarkersRest :
measure . rest = True
voice_obj . rest = True |
def _translate_src_oprnd ( self , operand ) :
"""Translate source operand to a SMT expression .""" | if isinstance ( operand , ReilRegisterOperand ) :
return self . _translate_src_register_oprnd ( operand )
elif isinstance ( operand , ReilImmediateOperand ) :
return smtsymbol . Constant ( operand . size , operand . immediate )
else :
raise Exception ( "Invalid operand type" ) |
def remove_child_objective_bank ( self , objective_bank_id , child_id ) :
"""Removes a child from an objective bank .
arg : objective _ bank _ id ( osid . id . Id ) : the ` ` Id ` ` of an
objective bank
arg : child _ id ( osid . id . Id ) : the ` ` Id ` ` of the child
raise : NotFound - ` ` objective _ bank _ id ` ` not a parent of
` ` child _ id ` `
raise : NullArgument - ` ` objective _ bank _ id ` ` or ` ` child _ id ` ` is
` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . BinHierarchyDesignSession . remove _ child _ bin _ template
if self . _catalog_session is not None :
return self . _catalog_session . remove_child_catalog ( catalog_id = objective_bank_id , child_id = child_id )
return self . _hierarchy_session . remove_child ( id_ = objective_bank_id , child_id = child_id ) |
def data_received ( self , data ) :
"""Got response on RTSP session .
Manage time out handle since response came in a reasonable time .
Update session parameters with latest response .
If state is playing schedule keep - alive .""" | self . time_out_handle . cancel ( )
self . session . update ( data . decode ( ) )
if self . session . state == STATE_STARTING :
self . transport . write ( self . method . message . encode ( ) )
self . time_out_handle = self . loop . call_later ( TIME_OUT_LIMIT , self . time_out )
elif self . session . state == STATE_PLAYING :
self . callback ( SIGNAL_PLAYING )
if self . session . session_timeout != 0 :
interval = self . session . session_timeout - 5
self . loop . call_later ( interval , self . keep_alive )
else :
self . stop ( ) |
def sysctl ( state , host , name , value , persist = False , persist_file = '/etc/sysctl.conf' , ) :
'''Edit sysctl configuration .
+ name : name of the sysctl setting to ensure
+ value : the value or list of values the sysctl should be
+ persist : whether to write this sysctl to the config
+ persist _ file : file to write the sysctl to persist on reboot''' | string_value = ( ' ' . join ( value ) if isinstance ( value , list ) else value )
existing_value = host . fact . sysctl . get ( name )
if not existing_value or existing_value != value :
yield 'sysctl {0}={1}' . format ( name , string_value )
if persist :
yield files . line ( state , host , persist_file , '{0}[[:space:]]*=[[:space:]]*{1}' . format ( name , string_value ) , replace = '{0} = {1}' . format ( name , string_value ) , ) |
def get ( url , ** kwargs ) :
"""Wrapper for ` request . get ` function to set params .""" | headers = kwargs . get ( 'headers' , { } )
headers [ 'User-Agent' ] = config . USER_AGENT
# overwrite
kwargs [ 'headers' ] = headers
timeout = kwargs . get ( 'timeout' , config . TIMEOUT )
kwargs [ 'timeout' ] = timeout
kwargs [ 'verify' ] = False
# no SSLError
logger . debug ( "Getting: %s" , url )
return requests . get ( url , ** kwargs ) |
def generate ( env ) :
"""Add Builders and construction variables for lib to an Environment .""" | SCons . Tool . createStaticLibBuilder ( env )
SCons . Tool . createSharedLibBuilder ( env )
SCons . Tool . createProgBuilder ( env )
env [ 'AR' ] = 'mwld'
env [ 'ARCOM' ] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env [ 'LIBDIRPREFIX' ] = '-L'
env [ 'LIBDIRSUFFIX' ] = ''
env [ 'LIBLINKPREFIX' ] = '-l'
env [ 'LIBLINKSUFFIX' ] = '.lib'
env [ 'LINK' ] = 'mwld'
env [ 'LINKCOM' ] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env [ 'SHLINK' ] = '$LINK'
env [ 'SHLINKFLAGS' ] = '$LINKFLAGS'
env [ 'SHLINKCOM' ] = shlib_action
env [ 'SHLIBEMITTER' ] = shlib_emitter
env [ 'LDMODULEEMITTER' ] = shlib_emitter |
def add_monitor ( self , pattern , callback , limit = 80 ) :
"""Calls the given function whenever the given pattern matches the
incoming data .
. . HINT : :
If you want to catch all incoming data regardless of a
pattern , use the Protocol . data _ received _ event event instead .
Arguments passed to the callback are the protocol instance , the
index of the match , and the match object of the regular expression .
: type pattern : str | re . RegexObject | list ( str | re . RegexObject )
: param pattern : One or more regular expressions .
: type callback : callable
: param callback : The function that is called .
: type limit : int
: param limit : The maximum size of the tail of the buffer
that is searched , in number of bytes .""" | self . buffer . add_monitor ( pattern , partial ( callback , self ) , limit ) |
def predicatesIn ( G : Graph , n : Node ) -> Set [ TriplePredicate ] :
"""predicatesIn ( G , n ) is the set of predicates in arcsIn ( G , n ) .""" | return { p for _ , p in G . subject_predicates ( n ) } |
def push ( collector , image , ** kwargs ) :
"""Push an image""" | if not image . image_index :
raise BadOption ( "The chosen image does not have a image_index configuration" , wanted = image . name )
tag = kwargs [ "artifact" ]
if tag is NotSpecified :
tag = collector . configuration [ "harpoon" ] . tag
if tag is not NotSpecified :
image . tag = tag
Builder ( ) . make_image ( image , collector . configuration [ "images" ] , pushing = True )
Syncer ( ) . push ( image ) |
def reset_aggregations ( self ) :
"""Remove all aggregations added to the search object""" | temp_search = self . search . to_dict ( )
if 'aggs' in temp_search . keys ( ) :
del temp_search [ 'aggs' ]
self . search . from_dict ( temp_search )
self . parent_agg_counter = 0
self . child_agg_counter = 0
self . child_agg_counter_dict = defaultdict ( int ) |
def _increment ( sign , integer_part , non_repeating_part , base ) :
"""Return an increment radix .
: param int sign : - 1 , 0 , or 1 as appropriate
: param integer _ part : the integer part
: type integer _ part : list of int
: param non _ repeating _ part : the fractional part
: type non _ repeating _ part : list of int
: param int base : the base
: returns : a Radix object with ` ` non _ repeating _ part ` ` rounded up
: rtype : Radix
Complexity : O ( len ( non _ repeating _ part + integer _ part )""" | ( carry , non_repeating_part ) = Nats . carry_in ( non_repeating_part , 1 , base )
( carry , integer_part ) = Nats . carry_in ( integer_part , carry , base )
return Radix ( sign , integer_part if carry == 0 else [ carry ] + integer_part , non_repeating_part , [ ] , base , False ) |
def register_task_with_maintenance_window ( WindowId = None , Targets = None , TaskArn = None , ServiceRoleArn = None , TaskType = None , TaskParameters = None , Priority = None , MaxConcurrency = None , MaxErrors = None , LoggingInfo = None , ClientToken = None ) :
"""Adds a new task to a Maintenance Window .
See also : AWS API Documentation
: example : response = client . register _ task _ with _ maintenance _ window (
WindowId = ' string ' ,
Targets = [
' Key ' : ' string ' ,
' Values ' : [
' string ' ,
TaskArn = ' string ' ,
ServiceRoleArn = ' string ' ,
TaskType = ' RUN _ COMMAND ' ,
TaskParameters = {
' string ' : {
' Values ' : [
' string ' ,
Priority = 123,
MaxConcurrency = ' string ' ,
MaxErrors = ' string ' ,
LoggingInfo = {
' S3BucketName ' : ' string ' ,
' S3KeyPrefix ' : ' string ' ,
' S3Region ' : ' string '
ClientToken = ' string '
: type WindowId : string
: param WindowId : [ REQUIRED ]
The id of the Maintenance Window the task should be added to .
: type Targets : list
: param Targets : [ REQUIRED ]
The targets ( either instances or tags ) . Instances are specified using Key = instanceids , Values = instanceid1 , instanceid2 . Tags are specified using Key = tag name , Values = tag value .
( dict ) - - An array of search criteria that targets instances using a Key , Value combination that you specify . Targets is required if you don ' t provide one or more instance IDs in the call .
Key ( string ) - - User - defined criteria for sending commands that target instances that meet the criteria . Key can be tag : Amazon EC2 tagor InstanceIds . For more information about how to send commands that target instances using Key , Value parameters , see Executing a Command Using Systems Manager Run Command .
Values ( list ) - - User - defined criteria that maps to Key . For example , if you specified tag : ServerRole , you could specify value : WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole , WebServer . For more information about how to send commands that target instances using Key , Value parameters , see Executing a Command Using Systems Manager Run Command .
( string ) - -
: type TaskArn : string
: param TaskArn : [ REQUIRED ]
The ARN of the task to execute
: type ServiceRoleArn : string
: param ServiceRoleArn : [ REQUIRED ]
The role that should be assumed when executing the task .
: type TaskType : string
: param TaskType : [ REQUIRED ]
The type of task being registered .
: type TaskParameters : dict
: param TaskParameters : The parameters that should be passed to the task when it is executed .
( string ) - -
( dict ) - - Defines the values for a task parameter .
Values ( list ) - - This field contains an array of 0 or more strings , each 1 to 255 characters in length .
( string ) - -
: type Priority : integer
: param Priority : The priority of the task in the Maintenance Window , the lower the number the higher the priority . Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel .
: type MaxConcurrency : string
: param MaxConcurrency : [ REQUIRED ]
The maximum number of targets this task can be run for in parallel .
: type MaxErrors : string
: param MaxErrors : [ REQUIRED ]
The maximum number of errors allowed before this task stops being scheduled .
: type LoggingInfo : dict
: param LoggingInfo : A structure containing information about an Amazon S3 bucket to write instance - level logs to .
S3BucketName ( string ) - - [ REQUIRED ] The name of an Amazon S3 bucket where execution logs are stored .
S3KeyPrefix ( string ) - - ( Optional ) The Amazon S3 bucket subfolder .
S3Region ( string ) - - [ REQUIRED ] The region where the Amazon S3 bucket is located .
: type ClientToken : string
: param ClientToken : User - provided idempotency token .
This field is autopopulated if not provided .
: rtype : dict
: return : {
' WindowTaskId ' : ' string '""" | pass |
def list_file_volumes ( self , datacenter = None , username = None , storage_type = None , ** kwargs ) :
"""Returns a list of file volumes .
: param datacenter : Datacenter short name ( e . g . : dal09)
: param username : Name of volume .
: param storage _ type : Type of volume : Endurance or Performance
: param kwargs :
: return : Returns a list of file volumes .""" | if 'mask' not in kwargs :
items = [ 'id' , 'username' , 'capacityGb' , 'bytesUsed' , 'serviceResource.datacenter[name]' , 'serviceResourceBackendIpAddress' , 'activeTransactionCount' , 'fileNetworkMountAddress' , 'replicationPartnerCount' ]
kwargs [ 'mask' ] = ',' . join ( items )
_filter = utils . NestedDict ( kwargs . get ( 'filter' ) or { } )
_filter [ 'nasNetworkStorage' ] [ 'serviceResource' ] [ 'type' ] [ 'type' ] = ( utils . query_filter ( '!~ NAS' ) )
_filter [ 'nasNetworkStorage' ] [ 'storageType' ] [ 'keyName' ] = ( utils . query_filter ( '*FILE_STORAGE*' ) )
if storage_type :
_filter [ 'nasNetworkStorage' ] [ 'storageType' ] [ 'keyName' ] = ( utils . query_filter ( '%s_FILE_STORAGE*' % storage_type . upper ( ) ) )
if datacenter :
_filter [ 'nasNetworkStorage' ] [ 'serviceResource' ] [ 'datacenter' ] [ 'name' ] = ( utils . query_filter ( datacenter ) )
if username :
_filter [ 'nasNetworkStorage' ] [ 'username' ] = ( utils . query_filter ( username ) )
kwargs [ 'filter' ] = _filter . to_dict ( )
return self . client . call ( 'Account' , 'getNasNetworkStorage' , ** kwargs ) |
def set_step ( self , value , block_events = False ) :
"""Sets the step of the number box .
Setting block _ events = True will temporarily block the widget from
sending any signals when setting the value .""" | if block_events :
self . block_events ( )
self . _widget . setSingleStep ( value )
if block_events :
self . unblock_events ( ) |
def notify_state_name_change ( self , model , prop_name , info ) :
"""Checks whether the name of a state was changed and change the tab label accordingly""" | # avoid updates or checks because of execution status updates
if is_execution_status_update_notification_from_state_machine_model ( prop_name , info ) :
return
overview = NotificationOverview ( info , False , self . __class__ . __name__ )
changed_model = overview [ 'model' ] [ - 1 ]
method_name = overview [ 'method_name' ] [ - 1 ]
if isinstance ( changed_model , AbstractStateModel ) and method_name in [ 'name' , 'script_text' ] :
self . update_tab_label ( changed_model ) |
def psdump ( self , filename = None , ** kargs ) :
"""psdump ( filename = None , layer _ shift = 0 , rebuild = 1)
Creates an EPS file describing a packet . If filename is not provided a temporary file is created and gs is called .""" | canvas = self . canvas_dump ( ** kargs )
if filename is None :
fname = get_temp_file ( autoext = ".eps" )
canvas . writeEPSfile ( fname )
subprocess . Popen ( [ conf . prog . psreader , fname + ".eps" ] )
else :
canvas . writeEPSfile ( filename ) |
def union ( self , another_is ) :
"""Return the union between self and ` ` another _ is ` ` .
Parameters
another _ is : ` IntervalSet `
an IntervalSet object .
Returns
interval : ` IntervalSet `
the union of self with ` ` another _ is ` ` .""" | result = IntervalSet ( )
if another_is . empty ( ) :
result . _intervals = self . _intervals
elif self . empty ( ) :
result . _intervals = another_is . _intervals
else : # res has no overlapping intervals
result . _intervals = IntervalSet . merge ( self . _intervals , another_is . _intervals , lambda in_a , in_b : in_a or in_b )
return result |
def _basic_return ( self , args , msg ) :
"""return a failed message
This method returns an undeliverable message that was
published with the " immediate " flag set , or an unroutable
message published with the " mandatory " flag set . The reply
code and text provide information about the reason that the
message was undeliverable .
PARAMETERS :
reply _ code : short
The reply code . The AMQ reply codes are defined in AMQ
RFC 011.
reply _ text : shortstr
The localised reply text . This text can be logged as an
aid to resolving issues .
exchange : shortstr
Specifies the name of the exchange that the message
was originally published to .
routing _ key : shortstr
Message routing key
Specifies the routing key name specified when the
message was published .""" | reply_code = args . read_short ( )
reply_text = args . read_shortstr ( )
exchange = args . read_shortstr ( )
routing_key = args . read_shortstr ( )
self . returned_messages . put ( ( reply_code , reply_text , exchange , routing_key , msg ) ) |
def _next_channel ( self ) :
"""you are holding the lock""" | chanid = self . _channel_counter
while self . _channels . get ( chanid ) is not None :
self . _channel_counter = ( self . _channel_counter + 1 ) & 0xffffff
chanid = self . _channel_counter
self . _channel_counter = ( self . _channel_counter + 1 ) & 0xffffff
return chanid |
def detect_hooks ( ) :
"""Returns True if the import hooks are installed , False if not .""" | flog . debug ( 'Detecting hooks ...' )
present = any ( [ hasattr ( hook , 'RENAMER' ) for hook in sys . meta_path ] )
if present :
flog . debug ( 'Detected.' )
else :
flog . debug ( 'Not detected.' )
return present |
def retrieve ( pdb_id , cache_dir = None , bio_cache = None ) :
'''Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB .''' | pdb_id = pdb_id . upper ( )
if bio_cache :
return FASTA ( bio_cache . get_fasta_contents ( pdb_id ) )
# Check to see whether we have a cached copy
if cache_dir :
filename = os . path . join ( cache_dir , "%s.fasta" % pdb_id )
if os . path . exists ( filename ) :
return FASTA ( read_file ( filename ) )
else :
filename += ".txt"
if os . path . exists ( filename ) :
return FASTA ( read_file ( filename ) )
# Get a copy from the RCSB
contents = rcsb . retrieve_fasta ( pdb_id )
# Create a cached copy if appropriate
if cache_dir :
write_file ( os . path . join ( cache_dir , "%s.fasta" % pdb_id ) , contents )
# Return the object
return FASTA ( contents ) |
def echo_worker ( self ) :
"""The ` echo _ worker ` works through the ` self . received _ transfers ` queue and spawns
` self . on _ transfer ` greenlets for all not - yet - seen transfers .""" | log . debug ( 'echo worker' , qsize = self . received_transfers . qsize ( ) )
while self . stop_signal is None :
if self . received_transfers . qsize ( ) > 0 :
transfer = self . received_transfers . get ( )
if transfer in self . seen_transfers :
log . debug ( 'duplicate transfer ignored' , initiator = pex ( transfer . initiator ) , amount = transfer . amount , identifier = transfer . identifier , )
else :
self . seen_transfers . append ( transfer )
self . greenlets . add ( gevent . spawn ( self . on_transfer , transfer ) )
else :
gevent . sleep ( .5 ) |
def permute ( self , ba ) :
"""Permute the bitarray ba inplace .""" | c = ba . copy ( )
for i in xrange ( len ( self . mapping ) ) :
ba [ i ] = c [ self . mapping [ i ] ]
return ba |
def _key_digest ( self , secret_key ) :
'''a helper method for creating a base 64 encoded secret key and digest
: param secret _ key : string with key to encrypt / decrypt data
: return : string with base64 key , string with base64 digest''' | from hashlib import md5 , sha256
from base64 import b64encode
key_bytes = sha256 ( secret_key . encode ( 'utf-8' ) ) . digest ( )
key_b64 = b64encode ( key_bytes ) . decode ( )
digest_bytes = md5 ( key_bytes ) . digest ( )
digest_b64 = b64encode ( digest_bytes ) . decode ( )
return key_b64 , digest_b64 |
def fetch_country_by_ip ( ip ) :
"""Fetches country code by IP
Returns empty string if the request fails in non - 200 code .
Uses the ipdata . co service which has the following rules :
* Max 1500 requests per day
See : https : / / ipdata . co / docs . html # python - library""" | iplookup = ipdata . ipdata ( )
data = iplookup . lookup ( ip )
if data . get ( 'status' ) != 200 :
return ''
return data . get ( 'response' , { } ) . get ( 'country_code' , '' ) |
def find_word_prob ( word_string , word_total = sum ( WORD_DISTRIBUTION . values ( ) ) ) :
'''Finds the relative probability of the word appearing given context of a base corpus .
Returns this probability value as a float instance .''' | if word_string is None :
return 0
elif isinstance ( word_string , str ) :
return WORD_DISTRIBUTION [ word_string ] / word_total
else :
raise InputError ( "string or none type variable not passed as argument to find_word_prob" ) |
def list_projects ( self ) :
"""Lists all deployed projects . First class , maps to Scrapyd ' s
list projects endpoint .""" | url = self . _build_url ( constants . LIST_PROJECTS_ENDPOINT )
json = self . client . get ( url , timeout = self . timeout )
return json [ 'projects' ] |
def move_to_position ( cls , resource_id , to_position , new_parent_id = noop , db_session = None , * args , ** kwargs ) :
"""Moves node to new location in the tree
: param resource _ id : resource to move
: param to _ position : new position
: param new _ parent _ id : new parent id
: param db _ session :
: return :""" | db_session = get_db_session ( db_session )
# lets lock rows to prevent bad tree states
resource = ResourceService . lock_resource_for_update ( resource_id = resource_id , db_session = db_session )
ResourceService . lock_resource_for_update ( resource_id = resource . parent_id , db_session = db_session )
same_branch = False
# reset if parent is same as old
if new_parent_id == resource . parent_id :
new_parent_id = noop
if new_parent_id is not noop :
cls . check_node_parent ( resource_id , new_parent_id , db_session = db_session )
else :
same_branch = True
if new_parent_id is noop : # it is not guaranteed that parent exists
parent_id = resource . parent_id if resource else None
else :
parent_id = new_parent_id
cls . check_node_position ( parent_id , to_position , on_same_branch = same_branch , db_session = db_session )
# move on same branch
if new_parent_id is noop :
order_range = list ( sorted ( ( resource . ordering , to_position ) ) )
move_down = resource . ordering > to_position
query = db_session . query ( cls . model )
query = query . filter ( cls . model . parent_id == parent_id )
query = query . filter ( cls . model . ordering . between ( * order_range ) )
if move_down :
query . update ( { cls . model . ordering : cls . model . ordering + 1 } , synchronize_session = False , )
else :
query . update ( { cls . model . ordering : cls . model . ordering - 1 } , synchronize_session = False , )
db_session . flush ( )
db_session . expire ( resource )
resource . ordering = to_position
# move between branches
else :
cls . shift_ordering_down ( resource . parent_id , resource . ordering , db_session = db_session )
cls . shift_ordering_up ( new_parent_id , to_position , db_session = db_session )
db_session . expire ( resource )
resource . parent_id = new_parent_id
resource . ordering = to_position
db_session . flush ( )
return True |
def update ( self , key_vals = None , overwrite = True ) :
"""Locked keys will be overwritten unless overwrite = False .
Otherwise , written keys will be added to the " locked " list .""" | if not key_vals :
return
write_items = self . _update ( key_vals , overwrite )
self . _root . _root_set ( self . _path , write_items )
self . _root . _write ( commit = True ) |
def prepare_timestamp_millis ( data , schema ) :
"""Converts datetime . datetime object to int timestamp with milliseconds""" | if isinstance ( data , datetime . datetime ) :
if data . tzinfo is not None :
delta = ( data - epoch )
return int ( delta . total_seconds ( ) * MLS_PER_SECOND )
t = int ( time . mktime ( data . timetuple ( ) ) ) * MLS_PER_SECOND + int ( data . microsecond / 1000 )
return t
else :
return data |
def print_state ( state : State , file : TextIO = None ) -> None :
"""Print a state vector""" | state = state . vec . asarray ( )
for index , amplitude in np . ndenumerate ( state ) :
ket = "" . join ( [ str ( n ) for n in index ] )
print ( ket , ":" , amplitude , file = file ) |
def clip_gradients ( batch_result , model , max_grad_norm ) :
"""Clip gradients to a given maximum length""" | if max_grad_norm is not None :
grad_norm = torch . nn . utils . clip_grad_norm_ ( filter ( lambda p : p . requires_grad , model . parameters ( ) ) , max_norm = max_grad_norm )
else :
grad_norm = 0.0
batch_result [ 'grad_norm' ] = grad_norm |
def transitive_edges ( graph ) :
"""Return a list of transitive edges .
Example of transitivity within graphs : A - > B , B - > C , A - > C
in this case the transitive edge is : A - > C
@ attention : This function is only meaningful for directed acyclic graphs .
@ type graph : digraph
@ param graph : Digraph
@ rtype : List
@ return : List containing tuples with transitive edges ( or an empty array if the digraph
contains a cycle )""" | # if the graph contains a cycle we return an empty array
if not len ( find_cycle ( graph ) ) == 0 :
return [ ]
tranz_edges = [ ]
# create an empty array that will contain all the tuples
# run trough all the nodes in the graph
for start in topological_sorting ( graph ) : # find all the successors on the path for the current node
successors = [ ]
for a in traversal ( graph , start , 'pre' ) :
successors . append ( a )
del successors [ 0 ]
# we need all the nodes in it ' s path except the start node itself
for next in successors : # look for an intersection between all the neighbors of the
# given node and all the neighbors from the given successor
intersect_array = _intersection ( graph . neighbors ( next ) , graph . neighbors ( start ) )
for a in intersect_array :
if graph . has_edge ( ( start , a ) ) : # # check for the detected edge and append it to the returned array
tranz_edges . append ( ( start , a ) )
return tranz_edges |
def encode_positions ( self , positions : mx . sym . Symbol , data : mx . sym . Symbol ) -> mx . sym . Symbol :
""": param positions : ( batch _ size , )
: param data : ( batch _ size , num _ embed )
: return : ( batch _ size , num _ embed )""" | # ( batch _ size , 1)
positions = mx . sym . expand_dims ( positions , axis = 1 )
# ( num _ embed , )
channels = mx . sym . arange ( 0 , self . num_embed // 2 )
# (1 , num _ embed , )
scaling = mx . sym . expand_dims ( 1. / mx . sym . pow ( 10000 , ( 2 * channels ) / self . num_embed ) , axis = 0 )
# ( batch _ size , num _ embed / 2)
scaled_positions = mx . sym . dot ( positions , scaling )
sin = mx . sym . sin ( scaled_positions )
cos = mx . sym . cos ( scaled_positions )
# ( batch _ size , num _ embed )
pos_embedding = mx . sym . concat ( sin , cos , dim = 1 )
if self . scale_up_input :
data = data * ( self . num_embed ** 0.5 )
if self . scale_down_positions :
pos_embedding = pos_embedding * ( self . num_embed ** - 0.5 )
pos_embedding = mx . sym . BlockGrad ( pos_embedding )
return mx . sym . broadcast_add ( data , pos_embedding , name = "%s_add" % self . prefix ) |
def wrap_results ( self , ** kwargs ) :
"""Wrap returned http response into a well formatted dict
: param kwargs : this dict param should contains following keys :
fd : file directory to
url : the test url fo the result
files _ count : the number of files under har / directory
: return ( dict ) : the results of all""" | if 'fd' not in kwargs or 'url' not in kwargs or 'files_count' not in kwargs :
logging . error ( "Missing arguments in wrap_results function" )
return { }
external = kwargs [ 'external' ] if 'external' in kwargs else None
fd = kwargs [ 'fd' ]
url = kwargs [ 'url' ]
length = kwargs [ 'files_count' ]
results = { }
files = [ ]
wait_time = 15
host = self . divide_url ( url ) [ 0 ]
time . sleep ( 0.5 )
# wait until the har file is generated
while len ( os . listdir ( fd ) ) <= length + self . parsed :
time . sleep ( 1 )
wait_time -= 1
if wait_time == 0 :
logging . warning ( "%s waiting har file result timed out" % url )
results [ 'error' ] = "wrap har file timeout"
if external is not None :
external [ url ] = results
return results
time . sleep ( 1 )
# find all har files under har / directory
for fn in os . listdir ( fd ) :
if fn . endswith ( ".har" ) and host in fn :
path = os . path . join ( fd , fn )
files . append ( ( fn , os . stat ( path ) . st_mtime ) )
# sort all har files and parse the latest one
files . sort ( key = lambda x : x [ 1 ] )
if len ( files ) > 0 :
with open ( fd + '/' + files [ - 1 ] [ 0 ] ) as f :
raw_data = json . load ( f ) [ 'log' ] [ 'entries' ]
results = [ { } for i in range ( 0 , len ( raw_data ) ) ]
for i in range ( 0 , len ( results ) ) :
results [ i ] [ 'request' ] = { }
results [ i ] [ 'request' ] [ 'method' ] = raw_data [ i ] [ 'request' ] [ 'method' ]
headers = { }
for header in raw_data [ i ] [ 'request' ] [ 'headers' ] :
headers [ header [ 'name' ] ] = header [ 'value' ]
results [ i ] [ 'request' ] [ 'headers' ] = headers
results [ i ] [ 'response' ] = { }
results [ i ] [ 'response' ] [ 'status' ] = raw_data [ i ] [ 'response' ] [ 'status' ]
results [ i ] [ 'response' ] [ 'reason' ] = raw_data [ i ] [ 'response' ] [ 'statusText' ]
headers = { }
for header in raw_data [ i ] [ 'response' ] [ 'headers' ] :
headers [ header [ 'name' ] ] = header [ 'value' ]
results [ i ] [ 'response' ] [ 'headers' ] = headers
results [ i ] [ 'response' ] [ 'redirect' ] = raw_data [ i ] [ 'response' ] [ 'redirectURL' ]
results [ i ] [ 'response' ] [ 'body' ] = raw_data [ i ] [ 'response' ] [ 'content' ]
self . parsed += 1
# increment the number of parsed har files
else :
logging . warning ( "Cannot find har file for %s" % url )
# save test result of this url to the external result object or
# return the result
if external is not None :
external [ url ] = results
else :
return results |
def xdr ( self ) :
"""Packs and base64 encodes this : class : ` Transaction ` as an XDR
string .""" | tx = Xdr . StellarXDRPacker ( )
tx . pack_Transaction ( self . to_xdr_object ( ) )
return base64 . b64encode ( tx . get_buffer ( ) ) |
def render_registered ( url_id , remote_info ) :
"""Render template file for the registered user , which has some of the values
prefilled .
Args :
url _ id ( str ) : Seeder URL id .
remote _ info ( dict ) : Informations read from Seeder .
Returns :
str : Template filled with data .""" | return template ( read_index_template ( ) , registered = True , url = remote_info [ "url" ] , seeder_data = json . dumps ( remote_info ) , url_id = url_id , ) |
def value_map ( f , m , * args , ** kwargs ) :
'''value _ map ( f , mapping ) yields a persistent map whose keys are the same as those of the given dict
or mapping object and whose values , for each key k are f ( mapping [ k ] ) .
value _ map ( f , mapping , * args , * * kw ) additionally passes the given arguments to the function
f , so in the resulting map , each key k is mapped to f ( mapping [ k ] , * args , * * kw ) .
Unlike lazy _ value _ map , this function yields either a persistent or a lazy map depending on the
input argument mapping . If mapping is a lazy map , then a lazy map is returned ; otherwise , a
persistent non - lazy map is returned .''' | if is_lazy_map ( m ) :
return lazy_value_map ( f , m , * args , ** kwargs )
else :
return ps . pmap ( { k : f ( v , * args , ** kwargs ) for ( k , v ) in six . iteritems ( m ) } ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.