signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def wait_droplets ( self , droplets , status = None , locked = None , wait_interval = None , wait_time = None ) :
r"""Poll the server periodically until all droplets in ` ` droplets ` ` have
reached some final state , yielding each ` Droplet ` ' s final value when
it ' s done . If ` ` status ` ` is non - ` None ` , ` ` wait _ droplets ` ` will wait
for each droplet ' s ` ` status ` ` field to equal the given value . If
` ` locked ` ` is non - ` None ` , ` ` wait _ droplets ` ` will wait for each
droplet ' s ` ` locked ` ` field to equal ( the truth value of ) the given
value . Exactly one of ` ` status ` ` and ` ` locked ` ` must be non - ` None ` .
If ` ` wait _ time ` ` is exceeded , a ` WaitTimeoutError ` ( containing any
remaining in - progress droplets ) is raised .
If a ` KeyboardInterrupt ` is caught , any remaining droplets are returned
immediately without waiting for completion .
. . versionchanged : : 0.2.0
Raises ` WaitTimeoutError ` on timeout
. . versionchanged : : 0.2.0
` ` locked ` ` parameter added
. . versionchanged : : 0.2.0
No longer waits for actions to complete
: param iterable droplets : an iterable of ` Droplet ` \ s and / or other
values that are acceptable arguments to : meth : ` fetch _ droplet `
: param status : When non - ` None ` , the desired value for the ` ` status ` `
field of each ` Droplet ` , which should be one of
` Droplet . STATUS _ ACTIVE ` , ` Droplet . STATUS _ ARCHIVE ` ,
` Droplet . STATUS _ NEW ` , and ` Droplet . STATUS _ OFF ` . ( For the sake of
forwards - compatibility , any other value is accepted as well . )
: type status : string or ` None `
: param locked : When non - ` None ` , the desired value for the ` ` locked ` `
field of each ` Droplet `
: type locked : ` bool ` or ` None `
: param number wait _ interval : how many seconds to sleep between
requests ; defaults to : attr : ` wait _ interval ` if not specified or
` None `
: param number wait _ time : the total number of seconds after which the
method will raise an error if any droplets have not yet completed ,
or a negative number to wait indefinitely ; defaults to
: attr : ` wait _ time ` if not specified or ` None `
: rtype : generator of ` Droplet ` \ s
: raises TypeError : if both or neither of ` ` status ` ` & ` ` locked ` ` are
defined
: raises DOAPIError : if the API endpoint replies with an error
: raises WaitTimeoutError : if ` ` wait _ time ` ` is exceeded"""
|
if ( status is None ) == ( locked is None ) : # # # TODO : Is TypeError the right type of error ?
raise TypeError ( 'Exactly one of "status" and "locked" must be' ' specified' )
droplets = map ( self . _droplet , droplets )
if status is not None :
return self . _wait ( droplets , "status" , status , wait_interval , wait_time )
if locked is not None :
return self . _wait ( droplets , "locked" , bool ( locked ) , wait_interval , wait_time )
|
def estimate ( self , maxiter = 250 , convergence = 1e-7 ) :
"""run EM algorithm until convergence , or until maxiter reached"""
|
self . loglik = np . zeros ( maxiter )
iter = 0
while iter < maxiter :
self . loglik [ iter ] = self . E_step ( )
if np . isnan ( self . loglik [ iter ] ) :
print ( "undefined log-likelihood" )
break
self . M_step ( )
if self . loglik [ iter ] - self . loglik [ iter - 1 ] < 0 and iter > 0 :
print ( "log-likelihood decreased by %f at iteration %d" % ( self . loglik [ iter ] - self . loglik [ iter - 1 ] , iter ) )
elif self . loglik [ iter ] - self . loglik [ iter - 1 ] < convergence and iter > 0 :
print ( "convergence at iteration %d, loglik = %f" % ( iter , self . loglik [ iter ] ) )
self . loglik = self . loglik [ self . loglik < 0 ]
break
iter += 1
|
def delta ( self , mapping , prefix ) :
"""return a delta containing values that have changed ."""
|
previous = self . getrange ( prefix , strip = True )
if not previous :
pk = set ( )
else :
pk = set ( previous . keys ( ) )
ck = set ( mapping . keys ( ) )
delta = DeltaSet ( )
# added
for k in ck . difference ( pk ) :
delta [ k ] = Delta ( None , mapping [ k ] )
# removed
for k in pk . difference ( ck ) :
delta [ k ] = Delta ( previous [ k ] , None )
# changed
for k in pk . intersection ( ck ) :
c = mapping [ k ]
p = previous [ k ]
if c != p :
delta [ k ] = Delta ( p , c )
return delta
|
def content_length ( self ) -> Optional [ int ] :
"""The value of Content - Length HTTP header ."""
|
content_length = self . _headers . get ( hdrs . CONTENT_LENGTH )
# type : ignore
if content_length is not None :
return int ( content_length )
else :
return None
|
def varchar ( self , field = None ) :
"""Returns a chunk of text , of maximum length ' max _ length '"""
|
assert field is not None , "The field parameter must be passed to the 'varchar' method."
max_length = field . max_length
def source ( ) :
length = random . choice ( range ( 1 , max_length + 1 ) )
return "" . join ( random . choice ( general_chars ) for i in xrange ( length ) )
return self . get_allowed_value ( source , field )
|
def start_new_log ( self ) :
'''open a new dataflash log , reset state'''
|
filename = self . new_log_filepath ( )
self . block_cnt = 0
self . logfile = open ( filename , 'w+b' )
print ( "DFLogger: logging started (%s)" % ( filename ) )
self . prev_cnt = 0
self . download = 0
self . prev_download = 0
self . last_idle_status_printed_time = time . time ( )
self . last_status_time = time . time ( )
self . missing_blocks = { }
self . acking_blocks = { }
self . blocks_to_ack_and_nack = [ ]
self . missing_found = 0
self . abandoned = 0
|
def SetBalanceFor ( self , assetId , fixed8_val ) :
"""Set the balance for an asset id .
Args :
assetId ( UInt256 ) :
fixed8 _ val ( Fixed8 ) : balance value ."""
|
found = False
for key , val in self . Balances . items ( ) :
if key == assetId :
self . Balances [ key ] = fixed8_val
found = True
if not found :
self . Balances [ assetId ] = fixed8_val
|
def metadata_response ( self , request , full_url , headers ) :
"""Mock response for localhost metadata
http : / / docs . aws . amazon . com / AWSEC2 / latest / UserGuide / AESDG - chapter - instancedata . html"""
|
parsed_url = urlparse ( full_url )
tomorrow = datetime . datetime . utcnow ( ) + datetime . timedelta ( days = 1 )
credentials = dict ( AccessKeyId = "test-key" , SecretAccessKey = "test-secret-key" , Token = "test-session-token" , Expiration = tomorrow . strftime ( "%Y-%m-%dT%H:%M:%SZ" ) )
path = parsed_url . path
meta_data_prefix = "/latest/meta-data/"
# Strip prefix if it is there
if path . startswith ( meta_data_prefix ) :
path = path [ len ( meta_data_prefix ) : ]
if path == '' :
result = 'iam'
elif path == 'iam' :
result = json . dumps ( { 'security-credentials' : { 'default-role' : credentials } } )
elif path == 'iam/security-credentials/' :
result = 'default-role'
elif path == 'iam/security-credentials/default-role' :
result = json . dumps ( credentials )
else :
raise NotImplementedError ( "The {0} metadata path has not been implemented" . format ( path ) )
return 200 , headers , result
|
def peaks ( data , method = 'max' , axis = 'time' , limits = None ) :
"""Return the values of an index where the data is at max or min
Parameters
method : str , optional
' max ' or ' min '
axis : str , optional
the axis where you want to detect the peaks
limits : tuple of two values , optional
the lowest and highest limits where to search for the peaks
data : instance of Data
one of the datatypes
Returns
instance of Data
with one dimension less that the input data . The actual values in
the data can be not - numberic , for example , if you look for the
max value across electrodes
Notes
This function is useful when you want to find the frequency value at which
the power is the largest , or to find the time point at which the signal is
largest , or the channel at which the activity is largest ."""
|
idx_axis = data . index_of ( axis )
output = data . _copy ( )
output . axis . pop ( axis )
for trl in range ( data . number_of ( 'trial' ) ) :
values = data . axis [ axis ] [ trl ]
dat = data ( trial = trl )
if limits is not None :
limits = ( values < limits [ 0 ] ) | ( values > limits [ 1 ] )
idx = [ slice ( None ) ] * len ( data . list_of_axes )
idx [ idx_axis ] = limits
dat [ idx ] = nan
if method == 'max' :
peak_val = nanargmax ( dat , axis = idx_axis )
elif method == 'min' :
peak_val = nanargmin ( dat , axis = idx_axis )
output . data [ trl ] = values [ peak_val ]
return output
|
def noEmptyNests ( node ) :
'''recursively make sure that no dictionaries inside node contain empty children lists'''
|
if type ( node ) == list :
for i in node :
noEmptyNests ( i )
if type ( node ) == dict :
for i in node . values ( ) :
noEmptyNests ( i )
if node [ "children" ] == [ ] :
node . pop ( "children" )
return node
|
def drawHotspots ( self , painter ) :
"""Draws all the hotspots for the renderer .
: param painter | < QPaint >"""
|
# draw hotspots
for hotspot in ( self . _hotspots + self . _dropzones ) :
hstyle = hotspot . style ( )
if hstyle == XNode . HotspotStyle . Invisible :
continue
hotspot . render ( painter , self )
|
def verification_resource_secure ( self , verification_id , jwt , name ) :
"""Get Verification Resource .
Uses GET to / verifications / < verification _ id > interface
Use this method rather than verification _ resource when adding a second factor to your application .
See ` this < https : / / cloud . knuverse . com / docs / integration / > ` _ for more information .
: Args :
* * verification _ id * : ( str ) Verification ID
* * jwt * : ( str ) Completion token received from application
* * name * : ( str ) Client name associated with the jwt . Received from application .
: Returns : ( dict ) Verification data as shown ` here < https : / / cloud . knuverse . com / docs / api / # api - Verifications - Get _ verification _ info > ` _ ."""
|
params = { "jwt" : jwt , "name" : name }
response = self . _get ( url . verifications_id . format ( id = verification_id ) , params = params )
self . _check_response ( response , 200 )
return self . _create_response ( response )
|
def get_symbol_map ( self ) :
"""If you need the symbol map , use this method .
The symbol map is an array of string pairs mapping common tokens
to X Keysym strings , such as " alt " to " Alt _ L "
: return : array of strings ."""
|
# todo : make sure we return a list of strings !
sm = _libxdo . xdo_get_symbol_map ( )
# Return value is like :
# [ ' alt ' , ' Alt _ L ' , . . . , None , None , None , . . . ]
# We want to return only values up to the first None .
# todo : any better solution than this ?
i = 0
ret = [ ]
while True :
c = sm [ i ]
if c is None :
return ret
ret . append ( c )
i += 1
|
def _file_lists ( load , form ) :
'''Return a dict containing the file lists for files , dirs , emptydirs and symlinks'''
|
if 'env' in load : # " env " is not supported ; Use " saltenv " .
load . pop ( 'env' )
if 'saltenv' not in load or load [ 'saltenv' ] not in envs ( ) :
return [ ]
list_cachedir = os . path . join ( __opts__ [ 'cachedir' ] , 'file_lists/svnfs' )
if not os . path . isdir ( list_cachedir ) :
try :
os . makedirs ( list_cachedir )
except os . error :
log . critical ( 'Unable to make cachedir %s' , list_cachedir )
return [ ]
list_cache = os . path . join ( list_cachedir , '{0}.p' . format ( load [ 'saltenv' ] ) )
w_lock = os . path . join ( list_cachedir , '.{0}.w' . format ( load [ 'saltenv' ] ) )
cache_match , refresh_cache , save_cache = salt . fileserver . check_file_list_cache ( __opts__ , form , list_cache , w_lock )
if cache_match is not None :
return cache_match
if refresh_cache :
ret = { 'files' : set ( ) , 'dirs' : set ( ) , 'empty_dirs' : set ( ) }
for repo in init ( ) :
env_root = _env_root ( repo , load [ 'saltenv' ] )
if env_root is None : # Environment not found , try the next repo
continue
if repo [ 'root' ] :
env_root = os . path . join ( env_root , repo [ 'root' ] ) . rstrip ( os . path . sep )
if not os . path . isdir ( env_root ) : # svnfs root ( global or per - remote ) does not exist in env
continue
for root , dirs , files in salt . utils . path . os_walk ( env_root ) :
relpath = os . path . relpath ( root , env_root )
dir_rel_fn = os . path . join ( repo [ 'mountpoint' ] , relpath )
if relpath != '.' :
ret [ 'dirs' ] . add ( dir_rel_fn )
if not dirs and not files :
ret [ 'empty_dirs' ] . add ( dir_rel_fn )
for fname in files :
rel_fn = os . path . relpath ( os . path . join ( root , fname ) , env_root )
ret [ 'files' ] . add ( os . path . join ( repo [ 'mountpoint' ] , rel_fn ) )
if repo [ 'mountpoint' ] :
ret [ 'dirs' ] . add ( repo [ 'mountpoint' ] )
# Convert all compiled sets to lists
for key in ret :
ret [ key ] = sorted ( ret [ key ] )
if save_cache :
salt . fileserver . write_file_list_cache ( __opts__ , ret , list_cache , w_lock )
return ret . get ( form , [ ] )
# Shouldn ' t get here , but if we do , this prevents a TypeError
return [ ]
|
def has_separate_working_tree ( self ) :
""": return : True if our git _ dir is not at the root of our working _ tree _ dir , but a . git file with a
platform agnositic symbolic link . Our git _ dir will be wherever the . git file points to
: note : bare repositories will always return False here"""
|
if self . bare :
return False
return osp . isfile ( osp . join ( self . working_tree_dir , '.git' ) )
|
def do_save ( self , line ) :
"""save [ config _ file ] Save session variables to file save ( without parameters ) :
Save session to default file ~ / . dataone _ cli . conf save .
< file > : Save session to specified file ."""
|
config_file = self . _split_args ( line , 0 , 1 ) [ 0 ]
self . _command_processor . get_session ( ) . save ( config_file )
if config_file is None :
config_file = ( self . _command_processor . get_session ( ) . get_default_pickle_file_path ( ) )
self . _print_info_if_verbose ( "Saved session to file: {}" . format ( config_file ) )
|
def _FromSpecs ( self , specs ) :
"""Populates _ params using specification
Arguments :
specs - - either :
( a ) list as [ ( name , { . . . } ) , . . . ] ( see Parameter . FromSpec ( ) for further information )
( b ) dictionary as { " name " : value , . . . }"""
|
if isinstance ( specs , dict ) :
specs_ = [ ]
for name , value in specs . items ( ) :
specs_ . append ( ( name , { "value" : value } ) )
else :
specs_ = specs
for spec in specs_ :
self . params . append ( Parameter ( spec ) )
|
def _find_mirror ( self , axis ) :
"""Looks for mirror symmetry of specified type about axis . Possible
types are " h " or " vd " . Horizontal ( h ) mirrors are perpendicular to
the axis while vertical ( v ) or diagonal ( d ) mirrors are parallel . v
mirrors has atoms lying on the mirror plane while d mirrors do
not ."""
|
mirror_type = ""
# First test whether the axis itself is the normal to a mirror plane .
if self . is_valid_op ( SymmOp . reflection ( axis ) ) :
self . symmops . append ( SymmOp . reflection ( axis ) )
mirror_type = "h"
else : # Iterate through all pairs of atoms to find mirror
for s1 , s2 in itertools . combinations ( self . centered_mol , 2 ) :
if s1 . species == s2 . species :
normal = s1 . coords - s2 . coords
if np . dot ( normal , axis ) < self . tol :
op = SymmOp . reflection ( normal )
if self . is_valid_op ( op ) :
self . symmops . append ( op )
if len ( self . rot_sym ) > 1 :
mirror_type = "d"
for v , r in self . rot_sym :
if not np . linalg . norm ( v - axis ) < self . tol :
if np . dot ( v , normal ) < self . tol :
mirror_type = "v"
break
else :
mirror_type = "v"
break
return mirror_type
|
def get_config ( self ) :
"""Return configurations of BoltzmannQPolicy
# Returns
Dict of config"""
|
config = super ( BoltzmannQPolicy , self ) . get_config ( )
config [ 'tau' ] = self . tau
config [ 'clip' ] = self . clip
return config
|
def set_labels ( self , labels , axis = 'rows' ) :
'''Set the row / col labels .
Note that this method doesn ' t check that enough labels were set for all the assigned positions .'''
|
if axis . lower ( ) in ( 'rows' , 'row' , 'r' , 0 ) :
assigned_pos = set ( v [ 0 ] for v in self . _positions . itervalues ( ) )
not_assigned = set ( labels ) - assigned_pos
if len ( not_assigned ) > 0 :
msg = 'New labels must contain all assigned positions'
raise ValueError ( msg )
self . row_labels = labels
elif axis . lower ( ) in ( 'cols' , 'col' , 'c' , 1 ) :
self . col_labels = labels
else :
raise TypeError ( 'Unsupported axis value %s' % axis )
|
async def scan_for_apple_tvs ( loop , timeout = 5 , abort_on_found = False , device_ip = None , only_usable = True , protocol = None ) :
"""Scan for Apple TVs using zeroconf ( bonjour ) and returns them ."""
|
semaphore = asyncio . Semaphore ( value = 0 , loop = loop )
listener = _ServiceListener ( loop , abort_on_found , device_ip , protocol , semaphore )
zeroconf = Zeroconf ( )
try :
ServiceBrowser ( zeroconf , HOMESHARING_SERVICE , listener )
ServiceBrowser ( zeroconf , DEVICE_SERVICE , listener )
ServiceBrowser ( zeroconf , MEDIAREMOTE_SERVICE , listener )
ServiceBrowser ( zeroconf , AIRPLAY_SERVICE , listener )
_LOGGER . debug ( 'Discovering devices for %d seconds' , timeout )
await asyncio . wait_for ( semaphore . acquire ( ) , timeout , loop = loop )
except concurrent . futures . TimeoutError :
pass
# Will happen when timeout occurs ( totally normal )
finally :
zeroconf . close ( )
def _should_include ( atv ) :
if not only_usable :
return True
return atv . is_usable ( )
return list ( filter ( _should_include , listener . found_devices . values ( ) ) )
|
def read_daemon ( self ) :
"""Read thread ."""
|
while True :
data = self . _socket . recv ( 9999 )
self . feed_parser ( data )
|
def t_heredoc_END_HEREDOC ( t ) :
r'( ? < = \ n ) [ A - Za - z _ ] [ \ w _ ] *'
|
if t . value == t . lexer . heredoc_label :
del t . lexer . heredoc_label
t . lexer . pop_state ( )
else :
t . type = 'ENCAPSED_AND_WHITESPACE'
return t
|
def handle_readable ( client ) :
"""Return True : The client is re - registered to the selector object .
Return False : The server disconnects the client ."""
|
data = client . recv ( 1028 )
if data == b'' :
return False
client . sendall ( b'SERVER: ' + data )
print ( threading . active_count ( ) )
return True
|
def _builder_connect_signals ( self , _dict ) :
"""Called by controllers which want to autoconnect their
handlers with signals declared in internal Gtk . Builder .
This method accumulates handlers , and books signal
autoconnection later on the idle of the next occurring gtk
loop . After the autoconnection is done , this method cannot be
called anymore ."""
|
assert not self . builder_connected , "Gtk.Builder not already connected"
if _dict and not self . builder_pending_callbacks : # this is the first call , book the builder connection for
# later gtk loop
GLib . idle_add ( self . __builder_connect_pending_signals )
for n , v in _dict . items ( ) :
if n not in self . builder_pending_callbacks :
_set = set ( )
self . builder_pending_callbacks [ n ] = _set
else :
_set = self . builder_pending_callbacks [ n ]
_set . add ( v )
|
def _json_path_search ( self , json_dict , expr ) :
"""Scan JSON dictionary with using json - path passed sting of the format of $ . element . . element1 [ index ] etc .
* Args : * \n
_ json _ dict _ - JSON dictionary ; \n
_ expr _ - string of fuzzy search for items within the directory ; \n
* Returns : * \n
List of DatumInContext objects :
` ` [ DatumInContext ( value = . . . , path = . . . , context = [ DatumInContext ] ) ] ` `
- value - found value
- path - value selector inside context . value ( in implementation of jsonpath - rw : class Index or Fields )
* Raises : * \n
JsonValidatorError"""
|
path = parse ( expr )
results = path . find ( json_dict )
if len ( results ) is 0 :
raise JsonValidatorError ( "Nothing found in the dictionary {0} using the given path {1}" . format ( str ( json_dict ) , str ( expr ) ) )
return results
|
def start ( self ) :
"""Start the channel"""
|
# observers for this channel only need to wait for one value
self . _observer_params . update ( dict ( once = True ) )
super ( CurrentResourceValue , self ) . start ( )
self . _api . ensure_notifications_thread ( )
self . _api . _mds_rpc_post ( device_id = self . device_id , method = 'GET' , uri = self . resource_path , async_id = self . async_id , _wrap_with_consumer = False , )
|
def _handle_read_chunk ( self ) :
"""Some data can be read"""
|
new_data = b''
buffer_length = len ( self . read_buffer )
try :
while buffer_length < self . MAX_BUFFER_SIZE :
try :
piece = self . recv ( 4096 )
except OSError as e :
if e . errno == errno . EAGAIN : # End of the available data
break
elif e . errno == errno . EIO and new_data : # Hopefully we could read an error message before the
# actual termination
break
else :
raise
if not piece : # A closed connection is indicated by signaling a read
# condition , and having recv ( ) return 0.
break
new_data += piece
buffer_length += len ( piece )
finally :
new_data = new_data . replace ( b'\r' , b'\n' )
self . read_buffer += new_data
return new_data
|
def set_data ( self , data , invsigma = None ) :
"""Set the data to be modeled .
Returns * self * ."""
|
self . data = np . array ( data , dtype = np . float , ndmin = 1 )
if invsigma is None :
self . invsigma = np . ones ( self . data . shape )
else :
i = np . array ( invsigma , dtype = np . float )
self . invsigma = np . broadcast_arrays ( self . data , i ) [ 1 ]
# allow scalar invsigma
if self . invsigma . shape != self . data . shape :
raise ValueError ( 'data values and inverse-sigma values must have same shape' )
return self
|
def verify_rsa_signature ( signature , signature_scheme , public_key , data ) :
"""< Purpose >
Determine whether the corresponding private key of ' public _ key ' produced
' signature ' . verify _ signature ( ) will use the public key , signature scheme ,
and ' data ' to complete the verification .
> > > public , private = generate _ rsa _ public _ and _ private ( 2048)
> > > data = b ' The quick brown fox jumps over the lazy dog '
> > > scheme = ' rsassa - pss - sha256'
> > > signature , scheme = create _ rsa _ signature ( private , data , scheme )
> > > verify _ rsa _ signature ( signature , scheme , public , data )
True
> > > verify _ rsa _ signature ( signature , scheme , public , b ' bad _ data ' )
False
< Arguments >
signature :
A signature , as a string . This is the signature returned
by create _ rsa _ signature ( ) .
signature _ scheme :
A string that indicates the signature scheme used to generate
' signature ' . ' rsassa - pss - sha256 ' is currently supported .
public _ key :
The RSA public key , a string in PEM format .
data :
Data used by securesystemslib . keys . create _ signature ( ) to generate
' signature ' . ' data ' ( a string ) is needed here to verify ' signature ' .
< Exceptions >
securesystemslib . exceptions . FormatError , if ' signature ' ,
' signature _ scheme ' , ' public _ key ' , or ' data ' are improperly formatted .
securesystemslib . exceptions . UnsupportedAlgorithmError , if the signature
scheme used by ' signature ' is not one supported by
securesystemslib . keys . create _ signature ( ) .
securesystemslib . exceptions . CryptoError , if the private key cannot be
decoded or its key type is unsupported .
< Side Effects >
pyca / cryptography ' s RSAPublicKey . verifier ( ) called to do the actual
verification .
< Returns >
Boolean . True if the signature is valid , False otherwise ."""
|
# Does ' public _ key ' have the correct format ?
# This check will ensure ' public _ key ' conforms to
# ' securesystemslib . formats . PEMRSA _ SCHEMA ' . Raise
# ' securesystemslib . exceptions . FormatError ' if the check fails .
securesystemslib . formats . PEMRSA_SCHEMA . check_match ( public_key )
# Does ' signature _ scheme ' have the correct format ?
securesystemslib . formats . RSA_SCHEME_SCHEMA . check_match ( signature_scheme )
# Does ' signature ' have the correct format ?
securesystemslib . formats . PYCACRYPTOSIGNATURE_SCHEMA . check_match ( signature )
# What about ' data ' ?
securesystemslib . formats . DATA_SCHEMA . check_match ( data )
# Verify whether the private key of ' public _ key ' produced ' signature ' .
# Before returning the ' valid _ signature ' Boolean result , ensure ' RSASSA - PSS '
# was used as the signature scheme .
valid_signature = False
# Verify the RSASSA - PSS signature with pyca / cryptography .
try :
public_key_object = serialization . load_pem_public_key ( public_key . encode ( 'utf-8' ) , backend = default_backend ( ) )
# verify ( ) raises ' cryptography . exceptions . InvalidSignature ' if the
# signature is invalid . ' salt _ length ' is set to the digest size of the
# hashing algorithm .
try :
public_key_object . verify ( signature , data , padding . PSS ( mgf = padding . MGF1 ( hashes . SHA256 ( ) ) , salt_length = hashes . SHA256 ( ) . digest_size ) , hashes . SHA256 ( ) )
return True
except cryptography . exceptions . InvalidSignature :
return False
# Raised by load _ pem _ public _ key ( ) .
except ( ValueError , cryptography . exceptions . UnsupportedAlgorithm ) as e :
raise securesystemslib . exceptions . CryptoError ( 'The PEM could not be' ' decoded successfully, or contained an unsupported key type: ' + str ( e ) )
|
def close ( self ) :
"""Unmap the MMIO object ' s mapped physical memory ."""
|
if self . mapping is None :
return
self . mapping . close ( )
self . mapping = None
self . _fd = None
|
def _process_media_status ( self , data ) :
"""Processes a STATUS message ."""
|
self . status . update ( data )
self . logger . debug ( "Media:Received status %s" , data )
# Update session active threading event
if self . status . media_session_id is None :
self . session_active_event . clear ( )
else :
self . session_active_event . set ( )
self . _fire_status_changed ( )
|
def on_finish ( self ) :
"""Called regardless of success or failure"""
|
r = self . response
r . request_time = time . time ( ) - self . start_time
if self . callback :
self . callback ( r )
|
def by_median_household_income ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . median_household_income . name , ascending = False , returns = DEFAULT_LIMIT ) :
"""Search zipcode information by median household income ."""
|
return self . query ( median_household_income_lower = lower , median_household_income_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
|
def normalize_locale ( loc ) :
'''Format a locale specifier according to the format returned by ` locale - a ` .'''
|
comps = split_locale ( loc )
comps [ 'territory' ] = comps [ 'territory' ] . upper ( )
comps [ 'codeset' ] = comps [ 'codeset' ] . lower ( ) . replace ( '-' , '' )
comps [ 'charmap' ] = ''
return join_locale ( comps )
|
def speech ( self ) -> str :
"""Report summary designed to be read by a text - to - speech program"""
|
if not self . data :
self . update ( )
return speech . taf ( self . data , self . units )
|
def alias ( * aliases ) :
"""Decorator to add aliases for Cmdln . do _ * command handlers .
Example :
class MyShell ( cmdln . Cmdln ) :
@ cmdln . alias ( " ! " , " sh " )
def do _ shell ( self , argv ) :
# . . . implement ' shell ' command"""
|
def decorate ( f ) :
if not hasattr ( f , "aliases" ) :
f . aliases = [ ]
f . aliases += aliases
return f
return decorate
|
def _enforce_space ( self , item ) :
"""Enforce a space in certain situations .
There are cases where we will want a space where normally we
wouldn ' t put one . This just enforces the addition of a space ."""
|
if isinstance ( self . _lines [ - 1 ] , ( self . _Space , self . _LineBreak , self . _Indent ) ) :
return
if not self . _prev_item :
return
item_text = unicode ( item )
prev_text = unicode ( self . _prev_item )
# Prefer a space around a ' . ' in an import statement , and between the
# ' import ' and ' ( ' .
if ( ( item_text == '.' and prev_text == 'from' ) or ( item_text == 'import' and prev_text == '.' ) or ( item_text == '(' and prev_text == 'import' ) ) :
self . _lines . append ( self . _Space ( ) )
|
def json ( src , dest = False , shift = 4 ) :
"""Beautify JSON
Args :
src : JSON string or path - to - file with text to beautify ( mandatory )
dest : path - to - file to save beautified json string ; ( optional )
if file doesn ' t exist it is created automatically ;
if this arg is skept function returns string
shift : can be either integer or string ( optional )
1 ) if shift is int : number of spaces in tab , for example shift = 8
< b > < / b >
2 ) if shift is string : pattern ( for example shift = ' . . . . ' )
. . . . < b > < / b >
Returns : 1 ) beautified JSON string if dest is not provided
2 ) length of saved file if dest is provided
Example :
json ( ' path / to / file . json ' )
json ( ' path / to / file . json ' , ' path / to / save / result . json ' )
json ( ' path / to / file . json ' , 8)
json ( ' path / to / file . json ' , ' _ _ _ _ ' )
json ( ' path / to / file . json ' , ' path / to / save / result . json ' , 2)"""
|
if not dest :
return _json ( _text ( src ) )
# returns string
else :
if type ( dest ) is int : # dest is skept , custom pattern provided at dist place
return _json ( _text ( src ) , dest )
else :
with open ( dest , 'w' ) as f2 :
return f2 . write ( _json ( _text ( src ) , shift ) )
|
def set_chassis ( self , chassis ) :
"""Sets the chassis .
: param : chassis string :
1720 , 1721 , 1750 , 1751 or 1760"""
|
yield from self . _hypervisor . send ( 'c1700 set_chassis "{name}" {chassis}' . format ( name = self . _name , chassis = chassis ) )
log . info ( 'Router "{name}" [{id}]: chassis set to {chassis}' . format ( name = self . _name , id = self . _id , chassis = chassis ) )
self . _chassis = chassis
self . _setup_chassis ( )
|
def load_plugins ( plugin_dir : str , module_prefix : str ) -> int :
"""Find all non - hidden modules or packages in a given directory ,
and import them with the given module prefix .
: param plugin _ dir : plugin directory to search
: param module _ prefix : module prefix used while importing
: return : number of plugins successfully loaded"""
|
count = 0
for name in os . listdir ( plugin_dir ) :
path = os . path . join ( plugin_dir , name )
if os . path . isfile ( path ) and ( name . startswith ( '_' ) or not name . endswith ( '.py' ) ) :
continue
if os . path . isdir ( path ) and ( name . startswith ( '_' ) or not os . path . exists ( os . path . join ( path , '__init__.py' ) ) ) :
continue
m = re . match ( r'([_A-Z0-9a-z]+)(.py)?' , name )
if not m :
continue
if load_plugin ( f'{module_prefix}.{m.group(1)}' ) :
count += 1
return count
|
def _getFirstPathExpression ( name ) :
"""Returns the first metric path in an expression ."""
|
tokens = grammar . parseString ( name )
pathExpression = None
while pathExpression is None :
if tokens . pathExpression :
pathExpression = tokens . pathExpression
elif tokens . expression :
tokens = tokens . expression
elif tokens . call :
tokens = tokens . call . args [ 0 ]
else :
break
return pathExpression
|
def parse_state_variable ( self , node ) :
"""Parses < StateVariable >
@ param node : Node containing the < StateVariable > element
@ type node : xml . etree . Element
@ raise ParseError : Raised when the state variable is not
being defined in the context of a component type ."""
|
if 'name' in node . lattrib :
name = node . lattrib [ 'name' ]
else :
self . raise_error ( '<StateVariable> must specify a name' )
if 'dimension' in node . lattrib :
dimension = node . lattrib [ 'dimension' ]
else :
self . raise_error ( "State variable '{0}' must specify a dimension" , name )
if 'exposure' in node . lattrib :
exposure = node . lattrib [ 'exposure' ]
else :
exposure = None
self . current_regime . add_state_variable ( StateVariable ( name , dimension , exposure ) )
|
def find_packages ( ) :
"""Find all of mdtraj ' s python packages .
Adapted from IPython ' s setupbase . py . Copyright IPython
contributors , licensed under the BSD license ."""
|
packages = [ 'mdtraj.scripts' ]
for dir , subdirs , files in os . walk ( 'MDTraj' ) :
package = dir . replace ( os . path . sep , '.' )
if '__init__.py' not in files : # not a package
continue
packages . append ( package . replace ( 'MDTraj' , 'mdtraj' ) )
return packages
|
def create_basic_app ( cls , bundles = None , _config_overrides = None ) :
"""Creates a " fake " app for use while developing"""
|
bundles = bundles or [ ]
name = bundles [ - 1 ] . module_name if bundles else 'basic_app'
app = FlaskUnchained ( name , template_folder = os . path . join ( os . path . dirname ( __file__ ) , 'templates' ) )
for bundle in bundles :
bundle . before_init_app ( app )
unchained . init_app ( app , DEV , bundles , _config_overrides = _config_overrides )
for bundle in bundles :
bundle . after_init_app ( app )
return app
|
def update_identity ( self , identity , identity_id ) :
"""UpdateIdentity .
: param : class : ` < Identity > < azure . devops . v5_0 . identity . models . Identity > ` identity :
: param str identity _ id :"""
|
route_values = { }
if identity_id is not None :
route_values [ 'identityId' ] = self . _serialize . url ( 'identity_id' , identity_id , 'str' )
content = self . _serialize . body ( identity , 'Identity' )
self . _send ( http_method = 'PUT' , location_id = '28010c54-d0c0-4c89-a5b0-1c9e188b9fb7' , version = '5.0' , route_values = route_values , content = content )
|
def python_like_exts ( ) :
"""Return a list of all python - like extensions"""
|
exts = [ ]
for lang in languages . PYTHON_LIKE_LANGUAGES :
exts . extend ( list ( languages . ALL_LANGUAGES [ lang ] ) )
return [ '.' + ext for ext in exts ]
|
def get_repo_info ( loader , sha , prov_g ) :
"""Generate swagger information from the repo being used ."""
|
user_repo = loader . getFullName ( )
repo_title = loader . getRepoTitle ( )
contact_name = loader . getContactName ( )
contact_url = loader . getContactUrl ( )
commit_list = loader . getCommitList ( )
licence_url = loader . getLicenceURL ( )
# Add the API URI as a used entity by the activity
if prov_g :
prov_g . add_used_entity ( loader . getRepoURI ( ) )
prev_commit = None
next_commit = None
version = sha if sha else commit_list [ 0 ]
if commit_list . index ( version ) < len ( commit_list ) - 1 :
prev_commit = commit_list [ commit_list . index ( version ) + 1 ]
if commit_list . index ( version ) > 0 :
next_commit = commit_list [ commit_list . index ( version ) - 1 ]
info = { 'version' : version , 'title' : repo_title , 'contact' : { 'name' : contact_name , 'url' : contact_url } , 'license' : { 'name' : 'License' , 'url' : licence_url } }
basePath = '/api/' + user_repo + '/'
basePath += ( 'commit/' + sha + '/' ) if sha else ''
return prev_commit , next_commit , info , basePath
|
def refreshWidgets ( self ) :
"""This function manually refreshed all widgets attached to this simulation .
You want to call this function if any particle data has been manually changed ."""
|
if hasattr ( self , '_widgets' ) :
for w in self . _widgets :
w . refresh ( isauto = 0 )
else :
raise RuntimeError ( "No widgets found" )
|
def solve_limited ( self , assumptions = [ ] ) :
"""Solve internal formula using given budgets for conflicts and
propagations ."""
|
if self . minisat :
if self . use_timer :
start_time = time . clock ( )
# saving default SIGINT handler
def_sigint_handler = signal . signal ( signal . SIGINT , signal . SIG_DFL )
self . status = pysolvers . minisatgh_solve_lim ( self . minisat , assumptions )
# recovering default SIGINT handler
def_sigint_handler = signal . signal ( signal . SIGINT , def_sigint_handler )
if self . use_timer :
self . call_time = time . clock ( ) - start_time
self . accu_time += self . call_time
return self . status
|
async def run_action ( self , action_name , ** params ) :
"""Run an action on this unit .
: param str action _ name : Name of action to run
: param * * params : Action parameters
: returns : A : class : ` juju . action . Action ` instance .
Note that this only enqueues the action . You will need to call
` ` action . wait ( ) ` ` on the resulting ` Action ` instance if you wish
to block until the action is complete ."""
|
action_facade = client . ActionFacade . from_connection ( self . connection )
log . debug ( 'Starting action `%s` on %s' , action_name , self . name )
res = await action_facade . Enqueue ( [ client . Action ( name = action_name , parameters = params , receiver = self . tag , ) ] )
action = res . results [ 0 ] . action
error = res . results [ 0 ] . error
if error and error . code == 'not found' :
raise ValueError ( 'Action `%s` not found on %s' % ( action_name , self . name ) )
elif error :
raise Exception ( 'Unknown action error: %s' % error . serialize ( ) )
action_id = action . tag [ len ( 'action-' ) : ]
log . debug ( 'Action started as %s' , action_id )
# we mustn ' t use wait _ for _ action because that blocks until the
# action is complete , rather than just being in the model
return await self . model . _wait_for_new ( 'action' , action_id )
|
def incremental ( self , start_time , ** kwargs ) :
"""Retrieve bulk data from the chat incremental API .
: param fields : list of fields to retrieve . ` Chat API Docs
< https : / / developer . zendesk . com / rest _ api / docs / chat / incremental _ export # usage - notes - resource - expansion > ` _ _ .
: param start _ time : The time of the oldest object you are interested in ."""
|
return self . _query_zendesk ( self . endpoint . incremental , self . object_type , start_time = start_time , ** kwargs )
|
def is_abstract ( self , pass_is_abstract = True ) :
"""Check if the method is abstract .
A method is considered abstract if any of the following is true :
* The only statement is ' raise NotImplementedError '
* The only statement is ' pass ' and pass _ is _ abstract is True
* The method is annotated with abc . astractproperty / abc . abstractmethod
: returns : True if the method is abstract , False otherwise .
: rtype : bool"""
|
if self . decorators :
for node in self . decorators . nodes :
try :
inferred = next ( node . infer ( ) )
except exceptions . InferenceError :
continue
if inferred and inferred . qname ( ) in ( "abc.abstractproperty" , "abc.abstractmethod" , ) :
return True
for child_node in self . body :
if isinstance ( child_node , node_classes . Raise ) :
if child_node . raises_not_implemented ( ) :
return True
return pass_is_abstract and isinstance ( child_node , node_classes . Pass )
# empty function is the same as function with a single " pass " statement
if pass_is_abstract :
return True
|
def prepare ( self , query , custom_payload = None , keyspace = None ) :
"""Prepares a query string , returning a : class : ` ~ cassandra . query . PreparedStatement `
instance which can be used as follows : :
> > > session = cluster . connect ( " mykeyspace " )
> > > query = " INSERT INTO users ( id , name , age ) VALUES ( ? , ? , ? ) "
> > > prepared = session . prepare ( query )
> > > session . execute ( prepared , ( user . id , user . name , user . age ) )
Or you may bind values to the prepared statement ahead of time : :
> > > prepared = session . prepare ( query )
> > > bound _ stmt = prepared . bind ( ( user . id , user . name , user . age ) )
> > > session . execute ( bound _ stmt )
Of course , prepared statements may ( and should ) be reused : :
> > > prepared = session . prepare ( query )
> > > for user in users :
. . . bound = prepared . bind ( ( user . id , user . name , user . age ) )
. . . session . execute ( bound )
Alternatively , if : attr : ` ~ . Cluster . protocol _ version ` is 5 or higher
( requires Cassandra 4.0 + ) , the keyspace can be specified as a
parameter . This will allow you to avoid specifying the keyspace in the
query without specifying a keyspace in : meth : ` ~ . Cluster . connect ` . It
even will let you prepare and use statements against a keyspace other
than the one originally specified on connection :
> > > analyticskeyspace _ prepared = session . prepare (
. . . " INSERT INTO user _ activity id , last _ activity VALUES ( ? , ? ) " ,
. . . keyspace = " analyticskeyspace " ) # note the different keyspace
* * Important * * : PreparedStatements should be prepared only once .
Preparing the same query more than once will likely affect performance .
` custom _ payload ` is a key value map to be passed along with the prepare
message . See : ref : ` custom _ payload ` ."""
|
message = PrepareMessage ( query = query , keyspace = keyspace )
future = ResponseFuture ( self , message , query = None , timeout = self . default_timeout )
try :
future . send_request ( )
query_id , bind_metadata , pk_indexes , result_metadata , result_metadata_id = future . result ( )
except Exception :
log . exception ( "Error preparing query:" )
raise
prepared_keyspace = keyspace if keyspace else None
prepared_statement = PreparedStatement . from_message ( query_id , bind_metadata , pk_indexes , self . cluster . metadata , query , self . keyspace , self . _protocol_version , result_metadata , result_metadata_id )
prepared_statement . custom_payload = future . custom_payload
self . cluster . add_prepared ( query_id , prepared_statement )
if self . cluster . prepare_on_all_hosts :
host = future . _current_host
try :
self . prepare_on_all_hosts ( prepared_statement . query_string , host , prepared_keyspace )
except Exception :
log . exception ( "Error preparing query on all hosts:" )
return prepared_statement
|
def use_comparative_assessment_part_view ( self ) :
"""Pass through to provider AssessmentPartLookupSession . use _ comparative _ assessment _ part _ view"""
|
self . _object_views [ 'assessment_part' ] = COMPARATIVE
# self . _ get _ provider _ session ( ' assessment _ part _ lookup _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_comparative_assessment_part_view ( )
except AttributeError :
pass
|
def increment_lineno ( node , n = 1 ) :
"""Increment the line numbers of all nodes by ` n ` if they have line number
attributes . This is useful to " move code " to a different location in a
file ."""
|
for node in zip ( ( node , ) , walk ( node ) ) :
if 'lineno' in node . _attributes :
node . lineno = getattr ( node , 'lineno' , 0 ) + n
|
def read_samples ( self , sr = None , offset = 0 , duration = None ) :
"""Read the samples of the utterance .
Args :
sr ( int ) : If None uses the sampling rate given by the track ,
otherwise resamples to the given sampling rate .
offset ( float ) : Offset in seconds to read samples from .
duration ( float ) : If not ` ` None ` ` read only this
number of seconds in maximum .
Returns :
np . ndarray : A numpy array containing the samples
as a floating point ( numpy . float32 ) time series ."""
|
read_duration = self . duration
if offset > 0 and read_duration is not None :
read_duration -= offset
if duration is not None :
if read_duration is None :
read_duration = duration
else :
read_duration = min ( duration , read_duration )
return self . track . read_samples ( sr = sr , offset = self . start + offset , duration = read_duration )
|
def left_of ( self , other ) :
"""Test if this range ` other ` is strictly left of ` other ` .
> > > intrange ( 1 , 5 ) . left _ of ( intrange ( 5 , 10 ) )
True
> > > intrange ( 1 , 10 ) . left _ of ( intrange ( 5 , 10 ) )
False
The bitwise right shift operator ` ` < < ` ` is overloaded for this operation
too .
> > > intrange ( 1 , 5 ) < < intrange ( 5 , 10)
True
The choice of overloading ` ` < < ` ` might seem strange , but it is to mimick
PostgreSQL ' s operators for ranges . As this is not obvious the use of
` ` < < ` ` is discouraged .
: param other : Range to test against .
: return : ` ` True ` ` if this range is completely to the left of ` ` other ` ` ."""
|
if not self . is_valid_range ( other ) :
msg = ( "Left of is not supported for '{}', provide a proper range " "class" ) . format ( other . __class__ . __name__ )
raise TypeError ( msg )
return self < other and not self . overlap ( other )
|
def plot_baf_lrr ( file_names , options ) :
"""Plot BAF and LRR for a list of files .
: param file _ names : contains the name of the input file for each sample .
: param options : the options .
: type file _ names : dict
: type options : argparse . Namespace
Plots the BAF ( B Allele Frequency ) and LRR ( Log R Ratio ) of each samples .
Only the sexual chromosome are shown ."""
|
# importing important stuff
import matplotlib as mpl
if options . format != "X11" and mpl . get_backend ( ) != "agg" :
mpl . use ( "Agg" )
import matplotlib . pyplot as plt
if options . format != "X11" :
plt . ioff ( )
# For each of the sample / files
for sample , file_name in file_names . iteritems ( ) :
data = [ ]
# Reading the file
open_func = open
if file_name . endswith ( ".gz" ) :
open_func = gzip . open
with open_func ( file_name , 'rb' ) as input_file :
header_index = dict ( [ ( col_name , i ) for i , col_name in enumerate ( input_file . readline ( ) . rstrip ( "\r\n" ) . split ( "\t" ) ) ] )
for col_name in { "Chr" , "Position" , "B Allele Freq" , "Log R Ratio" } :
if col_name not in header_index :
msg = "{}: no column named {}" . format ( file_name , col_name )
raise ProgramError ( msg )
# Reading the dat
for line in input_file :
row = line . rstrip ( "\r\n" ) . split ( "\t" )
# We only need X and Y chromosomes
chromosome = encode_chromosome ( row [ header_index [ "Chr" ] ] )
if chromosome not in { "X" , "Y" } :
continue
# The position
position = row [ header_index [ "Position" ] ]
try :
position = int ( position )
except ValueError :
msg = "{}: impossible position {}" . format ( file_name , position )
raise ProgramError ( msg )
# The BAF
baf = row [ header_index [ "B Allele Freq" ] ]
try :
baf = float ( baf )
except ValueError :
msg = "{}: impossible baf {}" . format ( file_name , baf )
raise ProgramError ( msg )
# The LRR
lrr = row [ header_index [ "Log R Ratio" ] ]
try :
lrr = float ( lrr )
except ValueError :
msg = "{}: impossible lrr {}" . format ( file_name , lrr )
raise ProgramError ( msg )
# Saving the data
data . append ( ( chromosome , position , lrr , baf ) )
# Creating the numpy array
data = np . array ( data , dtype = [ ( "chr" , "a1" ) , ( "pos" , int ) , ( "lrr" , float ) , ( "baf" , float ) ] )
# Creating the figure and axes
fig , axes = plt . subplots ( 2 , 2 , figsize = ( 20 , 8 ) )
plt . subplots_adjust ( left = 0.05 , right = 0.97 , wspace = 0.15 , hspace = 0.3 )
fig . suptitle ( sample , fontsize = 16 , weight = "bold" )
# Setting subplot properties
for ax in axes . flatten ( ) :
ax . xaxis . set_ticks_position ( "bottom" )
ax . yaxis . set_ticks_position ( "left" )
ax . spines [ "top" ] . set_visible ( False )
ax . spines [ "right" ] . set_visible ( False )
ax . spines [ "bottom" ] . set_position ( ( "outward" , 9 ) )
ax . spines [ "left" ] . set_position ( ( "outward" , 9 ) )
# Separating the axes
x_lrr_ax , x_baf_ax , y_lrr_ax , y_baf_ax = axes . flatten ( order = 'F' )
# Printing the X chromosome
curr_chr = data [ "chr" ] == "X"
x_lrr_ax . plot ( data [ "pos" ] [ curr_chr ] / 1000000.0 , data [ "lrr" ] [ curr_chr ] , "o" , ms = 1 , mec = "#0099CC" , mfc = "#0099CC" ) [ 0 ] . set_clip_on ( False )
x_baf_ax . plot ( data [ "pos" ] [ curr_chr ] / 1000000.0 , data [ "baf" ] [ curr_chr ] , "o" , ms = 1 , mec = "#669900" , mfc = "#669900" ) [ 0 ] . set_clip_on ( False )
x_lrr_ax . axhline ( y = 0 , color = "#000000" , ls = "--" , lw = 1.2 )
x_baf_ax . axhline ( y = 0.5 , color = "#000000" , ls = "--" , lw = 1.2 )
x_lrr_ax . set_ylabel ( "LRR" , weight = "bold" )
x_baf_ax . set_ylabel ( "BAF" , weight = "bold" )
x_baf_ax . set_xlabel ( "Position (Mb)" , weight = "bold" )
x_lrr_ax . set_title ( "Chromosome X" , weight = "bold" )
# Printing the X chromosome
curr_chr = data [ "chr" ] == "Y"
y_lrr_ax . plot ( data [ "pos" ] [ curr_chr ] / 1000000.0 , data [ "lrr" ] [ curr_chr ] , "o" , ms = 1 , mec = "#0099CC" , mfc = "#0099CC" ) [ 0 ] . set_clip_on ( False )
y_baf_ax . plot ( data [ "pos" ] [ curr_chr ] / 1000000.0 , data [ "baf" ] [ curr_chr ] , "o" , ms = 1 , mec = "#669900" , mfc = "#669900" ) [ 0 ] . set_clip_on ( False )
y_lrr_ax . axhline ( y = 0 , color = "#000000" , ls = "--" , lw = 1.2 )
y_baf_ax . axhline ( y = 0.5 , color = "#000000" , ls = "--" , lw = 1.2 )
y_lrr_ax . set_ylabel ( "LRR" , weight = "bold" )
y_baf_ax . set_ylabel ( "BAF" , weight = "bold" )
y_baf_ax . set_xlabel ( "Position (Mb)" , weight = "bold" )
y_lrr_ax . set_title ( "Chromosome Y" , weight = "bold" )
# Saving the figure
if options . format == "X11" :
plt . show ( )
else :
plt . savefig ( "{}_{}_lrr_baf.{}" . format ( options . out , sample , options . format ) , dpi = options . dpi , )
# Closing the figure
plt . close ( fig )
|
def str_count ( arr , pat , flags = 0 ) :
"""Count occurrences of pattern in each string of the Series / Index .
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
: class : ` ~ pandas . Series ` .
Parameters
pat : str
Valid regular expression .
flags : int , default 0 , meaning no flags
Flags for the ` re ` module . For a complete list , ` see here
< https : / / docs . python . org / 3 / howto / regex . html # compilation - flags > ` _ .
* * kwargs
For compatibility with other string methods . Not used .
Returns
Series or Index
Same type as the calling object containing the integer counts .
See Also
re : Standard library module for regular expressions .
str . count : Standard library version , without regular expression support .
Notes
Some characters need to be escaped when passing in ` pat ` .
eg . ` ` ' $ ' ` ` has a special meaning in regex and must be escaped when
finding this literal character .
Examples
> > > s = pd . Series ( [ ' A ' , ' B ' , ' Aaba ' , ' Baca ' , np . nan , ' CABA ' , ' cat ' ] )
> > > s . str . count ( ' a ' )
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype : float64
Escape ` ` ' $ ' ` ` to find the literal dollar sign .
> > > s = pd . Series ( [ ' $ ' , ' B ' , ' Aab $ ' , ' $ $ ca ' , ' C $ B $ ' , ' cat ' ] )
> > > s . str . count ( ' \\ $ ' )
0 1
1 0
2 1
3 2
4 2
5 0
dtype : int64
This is also available on Index
> > > pd . Index ( [ ' A ' , ' A ' , ' Aaba ' , ' cat ' ] ) . str . count ( ' a ' )
Int64Index ( [ 0 , 0 , 2 , 1 ] , dtype = ' int64 ' )"""
|
regex = re . compile ( pat , flags = flags )
f = lambda x : len ( regex . findall ( x ) )
return _na_map ( f , arr , dtype = int )
|
def run_event_hooks ( event , task ) :
"""Executes registered task event plugins for the provided event and task .
` event `
Name of the event to trigger for the plugin :
( ' task _ start ' , ' task _ run ' , ' task _ end ' )
` task `
` ` Task ` ` instance ."""
|
# get chain of classes registered for this event
call_chain = _event_hooks . get ( event )
if call_chain : # lookup the associated class method for this event
event_methods = { 'task_start' : 'on_taskstart' , 'task_run' : 'on_taskrun' , 'task_end' : 'on_taskend' }
method = event_methods . get ( event )
if method :
for _ , get_plugin in call_chain :
plugin_obj = get_plugin ( )
if not _is_plugin_disabled ( plugin_obj ) :
try :
getattr ( plugin_obj , method ) ( task )
# execute
except Exception : # TODO : log these issues for plugin author or user
pass
|
def _apply_base_theme ( app ) :
"""Apply base theme to the application .
Args :
app ( QApplication ) : QApplication instance ."""
|
if QT_VERSION < ( 5 , ) :
app . setStyle ( 'plastique' )
else :
app . setStyle ( 'Fusion' )
with open ( _STYLESHEET ) as stylesheet :
app . setStyleSheet ( stylesheet . read ( ) )
|
def remove_option ( self , mask ) :
"""Unset arbitrary query flags using a bitmask .
To unset the tailable flag :
cursor . remove _ option ( 2)"""
|
if not isinstance ( mask , int ) :
raise TypeError ( "mask must be an int" )
self . __check_okay_to_chain ( )
if mask & _QUERY_OPTIONS [ "exhaust" ] :
self . __exhaust = False
self . __query_flags &= ~ mask
return self
|
def _palette_cmd ( self , event ) :
"""Respond to user click on a palette item ."""
|
label = event . widget
label . master . focus_set ( )
label . master . configure ( relief = "sunken" )
r , g , b = self . winfo_rgb ( label . cget ( "background" ) )
r = round2 ( r * 255 / 65535 )
g = round2 ( g * 255 / 65535 )
b = round2 ( b * 255 / 65535 )
args = ( r , g , b )
if self . alpha_channel :
a = self . alpha . get ( )
args += ( a , )
self . alphabar . set_color ( args )
color = rgb_to_hexa ( * args )
h , s , v = rgb_to_hsv ( r , g , b )
self . red . set ( r )
self . green . set ( g )
self . blue . set ( b )
self . hue . set ( h )
self . saturation . set ( s )
self . value . set ( v )
self . hexa . delete ( 0 , "end" )
self . hexa . insert ( 0 , color . upper ( ) )
self . bar . set ( h )
self . square . set_hsv ( ( h , s , v ) )
self . _update_preview ( )
|
def getJsonPath ( name , moduleFile ) :
"""获取JSON配置文件的路径 :
1 . 优先从当前工作目录查找JSON文件
2 . 若无法找到则前往模块所在目录查找"""
|
currentFolder = os . getcwd ( )
currentJsonPath = os . path . join ( currentFolder , name )
if os . path . isfile ( currentJsonPath ) :
return currentJsonPath
else :
moduleFolder = os . path . abspath ( os . path . dirname ( moduleFile ) )
moduleJsonPath = os . path . join ( moduleFolder , '.' , name )
return moduleJsonPath
|
def cover ( self , match_set ) :
"""Return a new classifier rule that can be added to the match set ,
with a condition that matches the situation of the match set and an
action selected to avoid duplication of the actions already
contained therein . The match _ set argument is a MatchSet instance
representing the match set to which the returned rule may be added .
Usage :
match _ set = model . match ( situation )
if model . algorithm . covering _ is _ required ( match _ set ) :
new _ rule = model . algorithm . cover ( match _ set )
assert new _ rule . condition ( situation )
model . add ( new _ rule )
match _ set = model . match ( situation )
Arguments :
match _ set : A MatchSet instance .
Return :
A new ClassifierRule instance , appropriate for the addition to
match _ set and to the classifier set from which match _ set was
drawn ."""
|
assert isinstance ( match_set , MatchSet )
assert match_set . model . algorithm is self
# Create a new condition that matches the situation .
condition = bitstrings . BitCondition . cover ( match_set . situation , self . wildcard_probability )
# Pick a random action that ( preferably ) isn ' t already suggested by
# some other rule for this situation .
action_candidates = ( frozenset ( match_set . model . possible_actions ) - frozenset ( match_set ) )
if not action_candidates :
action_candidates = match_set . model . possible_actions
action = random . choice ( list ( action_candidates ) )
# Create the new rule .
return XCSClassifierRule ( condition , action , self , match_set . time_stamp )
|
def from_dict ( cls , d ) :
"""Construct a MSONable AdfTask object from the JSON dict .
Parameters
d : dict
A dict of saved attributes .
Returns
task : AdfTask
An AdfTask object recovered from the JSON dict ` ` d ` ` ."""
|
def _from_dict ( _d ) :
return AdfKey . from_dict ( _d ) if _d is not None else None
operation = d . get ( "operation" )
title = d . get ( "title" )
basis_set = _from_dict ( d . get ( "basis_set" ) )
xc = _from_dict ( d . get ( "xc" ) )
units = _from_dict ( d . get ( "units" ) )
scf = _from_dict ( d . get ( "scf" ) )
others = [ AdfKey . from_dict ( o ) for o in d . get ( "others" , [ ] ) ]
geo = _from_dict ( d . get ( "geo" ) )
return cls ( operation , basis_set , xc , title , units , geo . subkeys , scf , others )
|
def get_as_nullable_parameters ( self , key ) :
"""Converts map element into an Parameters or returns null if conversion is not possible .
: param key : a key of element to get .
: return : Parameters value of the element or null if conversion is not supported ."""
|
value = self . get_as_nullable_map ( key )
return Parameters ( value ) if value != None else None
|
def release_apply ( ui , repo , clname , ** opts ) :
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch .
The current client must either be clean or already be in
the release branch .
The release branch must be created by starting with a
clean client , disabling the code review plugin , and running :
hg update weekly . YYYY - MM - DD
hg branch release - branch . rNN
hg commit - m ' create release - branch . rNN '
hg push - - new - branch
Then re - enable the code review plugin .
People can test the release branch by running
hg update release - branch . rNN
in a clean client . To return to the normal tree ,
hg update default
Move changes since the weekly into the release branch
using hg release - apply followed by the usual code review
process and hg submit .
When it comes time to tag the release , record the
final long - form tag of the release - branch . rNN
in the * default * branch ' s . hgtags file . That is , run
hg update default
and then edit . hgtags as you would for a weekly ."""
|
c = repo [ None ]
if not releaseBranch :
raise hg_util . Abort ( "no active release branches" )
if c . branch ( ) != releaseBranch :
if c . modified ( ) or c . added ( ) or c . removed ( ) :
raise hg_util . Abort ( "uncommitted local changes - cannot switch branches" )
err = hg_clean ( repo , releaseBranch )
if err :
raise hg_util . Abort ( err )
try :
err = clpatch_or_undo ( ui , repo , clname , opts , mode = "backport" )
if err :
raise hg_util . Abort ( err )
except Exception , e :
hg_clean ( repo , "default" )
raise e
|
def list_lattices ( device_name : str = None , num_qubits : int = None , connection : ForestConnection = None ) :
"""Query the Forest 2.0 server for its knowledge of lattices . Optionally filters by underlying
device name and lattice qubit count .
: return : A dictionary keyed on lattice names and valued in dictionaries of the form
" device _ name " : device _ name ,
" qubits " : num _ qubits"""
|
if connection is None :
connection = ForestConnection ( )
session = connection . session
url = connection . forest_cloud_endpoint + "/lattices"
try :
response = get_json ( session , url , params = { "device_name" : device_name , "num_qubits" : num_qubits } )
return response [ "lattices" ]
except Exception as e :
raise ValueError ( """
list_lattices encountered an error when querying the Forest 2.0 endpoint.
Some common causes for this error include:
* You don't have valid user authentication information. Very likely this is because you
haven't yet been invited to try QCS. We plan on making our device information publicly
accessible soon, but in the meanwhile, you'll have to use default QVM configurations and
to use `list_quantum_computers` with `qpus = False`.
* You do have user authentication information, but it is missing or modified. You can find
this either in the environment variables FOREST_API_KEY and FOREST_USER_ID or in the
config file (stored by default at ~/.qcs_config, but with location settable through the
environment variable QCS_CONFIG), which contains the subsection
[Rigetti Forest]
user_id = your_user_id
key = your_api_key
* You're missing an address for the Forest 2.0 server endpoint, or the address is invalid.
This too can be set through the environment variable FOREST_URL or by changing the
following lines in the QCS config file:
[Rigetti Forest]
url = https://forest-server.qcs.rigetti.com
For the record, here's the original exception: {}
""" . format ( repr ( e ) ) )
|
def soma_surface_area ( nrn , neurite_type = NeuriteType . soma ) :
'''Get the surface area of a neuron ' s soma .
Note :
The surface area is calculated by assuming the soma is spherical .'''
|
assert neurite_type == NeuriteType . soma , 'Neurite type must be soma'
return 4 * math . pi * nrn . soma . radius ** 2
|
def _prepare_body ( self ) :
"""private function to prepare content for paramType = body"""
|
content_type = self . __consume
if not content_type :
content_type = self . __op . consumes [ 0 ] if self . __op . consumes else 'application/json'
if self . __op . consumes and content_type not in self . __op . consumes :
raise errs . SchemaError ( 'content type {0} does not present in {1}' . format ( content_type , self . __op . consumes ) )
# according to spec , payload should be one and only ,
# so we just return the first value in dict .
for parameter in self . __op . parameters :
parameter = final ( parameter )
if getattr ( parameter , 'in' ) == 'body' :
schema = deref ( parameter . schema )
_type = schema . type
_format = schema . format
name = schema . name
body = self . __p [ 'body' ] [ parameter . name ]
return content_type , self . __op . _mime_codec . marshal ( content_type , body , _type = _type , _format = _format , name = name )
return None , None
|
def make_synthetic ( self , srd = 0 , v_repl_seismic = 2000 , v_repl_log = 2000 , f = 50 , dt = 0.001 ) :
"""Early hack . Use with extreme caution .
Hands - free . There ' ll be a more granualr version in synthetic . py .
Assumes DT is in μs / m and RHOB is kg / m3.
There is no handling yet for TVD .
The datum handling is probably sketchy .
TODO :
A lot ."""
|
kb = getattr ( self . location , 'kb' , None ) or 0
data0 = self . data [ 'DT' ] . start
log_start_time = ( ( srd - kb ) / v_repl_seismic ) + ( data0 / v_repl_log )
# Basic log values .
dt_log = self . data [ 'DT' ] . despike ( )
# assume μs / m
rho_log = self . data [ 'RHOB' ] . despike ( )
# assume kg / m3
if not np . allclose ( dt_log . basis , rho_log . basis ) :
rho_log = rho_log . to_basis_like ( dt_log )
Z = ( 1e6 / dt_log ) * rho_log
# Two - way - time .
scaled_dt = dt_log . step * np . nan_to_num ( dt_log ) / 1e6
twt = 2 * np . cumsum ( scaled_dt )
t = twt + log_start_time
# Move to time .
t_max = t [ - 1 ] + 10 * dt
t_reg = np . arange ( 0 , t_max + 1e-9 , dt )
Z_t = np . interp ( x = t_reg , xp = t , fp = Z )
# Make RC series .
rc_t = ( Z_t [ 1 : ] - Z_t [ : - 1 ] ) / ( Z_t [ 1 : ] + Z_t [ : - 1 ] )
rc_t = np . nan_to_num ( rc_t )
# Convolve .
_ , ricker = utils . ricker ( f = f , length = 0.128 , dt = dt )
synth = np . convolve ( ricker , rc_t , mode = 'same' )
params = { 'dt' : dt , 'z start' : dt_log . start , 'z stop' : dt_log . stop }
self . data [ 'Synthetic' ] = Synthetic ( synth , basis = t_reg , params = params )
return None
|
def transform_entries ( self , entries , terminal_compositions ) :
"""Method to transform all entries to the composition coordinate in the
terminal compositions . If the entry does not fall within the space
defined by the terminal compositions , they are excluded . For example ,
Li3PO4 is mapped into a Li2O : 1.5 , P2O5:0.5 composition . The terminal
compositions are represented by DummySpecies .
Args :
entries : Sequence of all input entries
terminal _ compositions : Terminal compositions of phase space .
Returns :
Sequence of TransformedPDEntries falling within the phase space ."""
|
new_entries = [ ]
if self . normalize_terminals :
fractional_comp = [ c . fractional_composition for c in terminal_compositions ]
else :
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species .
sp_mapping = collections . OrderedDict ( )
for i , comp in enumerate ( fractional_comp ) :
sp_mapping [ comp ] = DummySpecie ( "X" + chr ( 102 + i ) )
for entry in entries :
try :
rxn = Reaction ( fractional_comp , [ entry . composition ] )
rxn . normalize_to ( entry . composition )
# We only allow reactions that have positive amounts of
# reactants .
if all ( [ rxn . get_coeff ( comp ) <= CompoundPhaseDiagram . amount_tol for comp in fractional_comp ] ) :
newcomp = { sp_mapping [ comp ] : - rxn . get_coeff ( comp ) for comp in fractional_comp }
newcomp = { k : v for k , v in newcomp . items ( ) if v > CompoundPhaseDiagram . amount_tol }
transformed_entry = TransformedPDEntry ( Composition ( newcomp ) , entry )
new_entries . append ( transformed_entry )
except ReactionError : # If the reaction can ' t be balanced , the entry does not fall
# into the phase space . We ignore them .
pass
return new_entries , sp_mapping
|
def upset_union ( self , featuresets ) :
"""Yield all featuresets that subsume any of the given ones ."""
|
concepts = ( f . concept for f in featuresets )
indexes = ( c . index for c in self . lattice . upset_union ( concepts ) )
return map ( self . _featuresets . __getitem__ , indexes )
|
def decode ( self , value ) :
"""Decode value ."""
|
if self . encoding :
value = value . decode ( self . encoding )
return self . deserialize ( value )
|
def create_network ( self ) :
"""Create a new network ."""
|
return DiscreteGenerational ( generations = self . generations , generation_size = self . generation_size , initial_source = True , )
|
def del_ostype ( self , ostype , sync = True ) :
"""delete OS type from this company
: param ostype : the OS type to be deleted from this company
: param sync : If sync = True ( default ) synchronize with Ariane server . If sync = False ,
add the OS type object on list to be removed on next save ( ) .
: return :"""
|
LOGGER . debug ( "Company.del_ostype" )
if not sync :
self . ost_2_rm . append ( ostype )
else :
if ostype . id is None :
ostype . sync ( )
if self . id is not None and ostype . id is not None :
params = { 'id' : self . id , 'ostypeID' : ostype . id }
args = { 'http_operation' : 'GET' , 'operation_path' : 'update/ostypes/delete' , 'parameters' : params }
response = CompanyService . requester . call ( args )
if response . rc != 0 :
LOGGER . warning ( 'Company.del_ostype - Problem while updating company ' + self . name + '. Reason: ' + str ( response . response_content ) + '-' + str ( response . error_message ) + " (" + str ( response . rc ) + ")" )
else :
self . ost_ids . remove ( ostype . id )
ostype . sync ( )
else :
LOGGER . warning ( 'Company.del_ostype - Problem while updating company ' + self . name + '. Reason: ostype ' + ostype . name + ' id is None or self.id is None' )
|
def putrequest ( self , method , url , * args , ** kwargs ) :
"""httplib gives you more than one way to do it . This is a way
to start building up a request . Usually followed by a bunch
of putheader ( ) calls ."""
|
self . _vcr_request = Request ( method = method , uri = self . _uri ( url ) , body = "" , headers = { } )
log . debug ( 'Got {}' . format ( self . _vcr_request ) )
|
def _all_dicts ( bases , seen = None ) :
"""Yield each class in ` ` bases ` ` and each of their base classes ."""
|
if seen is None :
seen = set ( )
for cls in bases :
if cls in seen :
continue
seen . add ( cls )
yield cls . __dict__
for b in _all_dicts ( cls . __bases__ , seen ) :
yield b
|
def resolve_polytomy ( self , default_dist = 0.0 , default_support = 0.0 , recursive = True ) :
"""Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes . This
function randomly modifies current tree topology and should
only be used for compatibility reasons ( i . e . programs
rejecting multifurcated node in the newick representation ) .
: param 0.0 default _ dist : artificial branch distance of new
nodes .
: param 0.0 default _ support : artificial branch support of new
nodes .
: param True recursive : Resolve any polytomy under this
node . When False , only current node will be checked and fixed ."""
|
def _resolve ( node ) :
if len ( node . children ) > 2 :
children = list ( node . children )
node . children = [ ]
next_node = root = node
for i in range ( len ( children ) - 2 ) :
next_node = next_node . add_child ( )
next_node . dist = default_dist
next_node . support = default_support
next_node = root
for ch in children :
next_node . add_child ( ch )
if ch != children [ - 2 ] :
next_node = next_node . children [ 0 ]
target = [ self ]
if recursive :
target . extend ( [ n for n in self . get_descendants ( ) ] )
for n in target :
_resolve ( n )
|
def urlopen ( url , headers = { } , data = None , retries = RETRIES , timeout = TIMEOUT ) :
'''打开一个http连接 , 并返回Request .
headers 是一个dict . 默认提供了一些项目 , 比如User - Agent , Referer等 , 就
不需要重复加入了 .
这个函数只能用于http请求 , 不可以用于下载大文件 .
如果服务器支持gzip压缩的话 , 就会使用gzip对数据进行压缩 , 然后在本地自动
解压 .
req . data 里面放着的是最终的http数据内容 , 通常都是UTF - 8编码的文本 .'''
|
headers_merged = default_headers . copy ( )
for key in headers . keys ( ) :
headers_merged [ key ] = headers [ key ]
opener = urllib . request . build_opener ( ForbiddenHandler )
opener . addheaders = [ ( k , v ) for k , v in headers_merged . items ( ) ]
for i in range ( retries ) :
try :
req = opener . open ( url , data = data , timeout = timeout )
encoding = req . headers . get ( 'Content-encoding' )
req . data = req . read ( )
if encoding == 'gzip' :
req . data = gzip . decompress ( req . data )
elif encoding == 'deflate' :
req . data = zlib . decompress ( req . data , - zlib . MAX_WBITS )
return req
except OSError :
logger . error ( traceback . format_exc ( ) )
except :
logger . error ( traceback . format_exc ( ) )
return None
|
def get ( self , request , bot_id , id , format = None ) :
"""Get KikBot by id
serializer : KikBotSerializer
responseMessages :
- code : 401
message : Not authenticated"""
|
return super ( KikBotDetail , self ) . get ( request , bot_id , id , format )
|
def batch ( data , batch_size , batch_size_fn = None ) :
"""Yield elements from data in chunks of batch _ size ."""
|
if batch_size_fn is None :
def batch_size_fn ( new , count , sofar ) :
return count
minibatch , size_so_far = [ ] , 0
for ex in data :
minibatch . append ( ex )
size_so_far = batch_size_fn ( ex , len ( minibatch ) , size_so_far )
if size_so_far == batch_size :
yield minibatch
minibatch , size_so_far = [ ] , 0
elif size_so_far > batch_size :
yield minibatch [ : - 1 ]
minibatch , size_so_far = minibatch [ - 1 : ] , batch_size_fn ( ex , 1 , 0 )
if minibatch :
yield minibatch
|
def tileBounds ( self , zoom , tileCol , tileRow ) :
"Returns the bounds of a tile in LV03 ( EPSG : 21781)"
|
assert zoom in range ( 0 , len ( self . RESOLUTIONS ) )
# 0,0 at top left : y axis down and x axis right
tileSize = self . tileSize ( zoom )
minX = self . MINX + tileCol * tileSize
maxX = self . MINX + ( tileCol + 1 ) * tileSize
if self . originCorner == 'bottom-left' :
minY = self . MINY + tileRow * tileSize
maxY = self . MINY + ( tileRow + 1 ) * tileSize
elif self . originCorner == 'top-left' :
minY = self . MAXY - ( tileRow + 1 ) * tileSize
maxY = self . MAXY - tileRow * tileSize
return [ minX , minY , maxX , maxY ]
|
def layers ( self ) :
"""returns a list of layer classes ( including subclasses ) in this packet"""
|
# noqa : E501
layers = [ ]
lyr = self
while lyr :
layers . append ( lyr . __class__ )
lyr = lyr . payload . getlayer ( 0 , _subclass = True )
return layers
|
def append_process_params ( xmldoc , process , params ) :
"""xmldoc is an XML document tree , process is the row in the process
table for which these are the parameters , and params is a list of
( name , type , value ) tuples one for each parameter .
See also process _ params _ from _ dict ( ) , register _ to _ xmldoc ( ) ."""
|
try :
paramtable = lsctables . ProcessParamsTable . get_table ( xmldoc )
except ValueError :
paramtable = lsctables . New ( lsctables . ProcessParamsTable )
xmldoc . childNodes [ 0 ] . appendChild ( paramtable )
for name , typ , value in params :
row = paramtable . RowType ( )
row . program = process . program
row . process_id = process . process_id
row . param = unicode ( name )
if typ is not None :
row . type = unicode ( typ )
if row . type not in ligolwtypes . Types :
raise ValueError ( "invalid type '%s' for parameter '%s'" % ( row . type , row . param ) )
else :
row . type = None
if value is not None :
row . value = unicode ( value )
else :
row . value = None
paramtable . append ( row )
return process
|
def parse_file_args ( file_obj , file_type , resolver = None , ** kwargs ) :
"""Given a file _ obj and a file _ type try to turn them into a file - like
object and a lowercase string of file type .
Parameters
file _ obj : str : if string represents a file path , returns
file _ obj : an ' rb ' opened file object of the path
file _ type : the extension from the file path
str : if string is NOT a path , but has JSON - like special characters
file _ obj : the same string passed as file _ obj
file _ type : set to ' json '
str : string is a valid URL
file _ obj : an open ' rb ' file object with retrieved data
file _ type : from the extension
str : string is not an existing path or a JSON - like object
ValueError will be raised as we can ' t do anything with input
file like object : we cannot grab information on file _ type automatically
ValueError will be raised if file _ type is None
file _ obj : same as input
file _ type : same as input
other object : like a shapely . geometry . Polygon , etc :
file _ obj : same as input
file _ type : if None initially , set to the class name
( in lower case ) , otherwise passed through
file _ type : str , type of file and handled according to above
Returns
file _ obj : loadable object
file _ type : str , lower case of the type of file ( eg ' stl ' , ' dae ' , etc )
metadata : dict , any metadata
opened : bool , did we open the file or not"""
|
metadata = { }
opened = False
if ( 'metadata' in kwargs and isinstance ( kwargs [ 'metadata' ] , dict ) ) :
metadata . update ( kwargs [ 'metadata' ] )
if util . is_file ( file_obj ) and file_type is None :
raise ValueError ( 'file_type must be set when passing file objects!' )
if util . is_string ( file_obj ) :
try : # os . path . isfile will return False incorrectly
# if we don ' t give it an absolute path
file_path = os . path . expanduser ( file_obj )
file_path = os . path . abspath ( file_path )
exists = os . path . isfile ( file_path )
except BaseException :
exists = False
# file obj is a string which exists on filesystm
if exists : # if not passed create a resolver to find other files
if resolver is None :
resolver = visual . resolvers . FilePathResolver ( file_path )
# save the file name and path to metadata
metadata [ 'file_path' ] = file_path
metadata [ 'file_name' ] = os . path . basename ( file_obj )
# if file _ obj is a path that exists use extension as file _ type
if file_type is None :
file_type = util . split_extension ( file_path , special = [ 'tar.gz' , 'tar.bz2' ] )
# actually open the file
file_obj = open ( file_path , 'rb' )
opened = True
else :
if '{' in file_obj : # if a dict bracket is in the string , its probably a straight
# JSON
file_type = 'json'
elif 'https://' in file_obj or 'http://' in file_obj : # we ' ve been passed a URL , warn to use explicit function
# and don ' t do network calls via magical pipeline
raise ValueError ( 'use load_remote to load URL: {}' . format ( file_obj ) )
elif file_type is None :
raise ValueError ( 'string is not a file: {}' . format ( file_obj ) )
if file_type is None :
file_type = file_obj . __class__ . __name__
if util . is_string ( file_type ) and '.' in file_type : # if someone has passed the whole filename as the file _ type
# use the file extension as the file _ type
if 'file_path' not in metadata :
metadata [ 'file_path' ] = file_type
metadata [ 'file_name' ] = os . path . basename ( file_type )
file_type = util . split_extension ( file_type )
if resolver is None and os . path . exists ( file_type ) :
resolver = visual . resolvers . FilePathResolver ( file_type )
# all our stored extensions reference in lower case
file_type = file_type . lower ( )
# if we still have no resolver try using file _ obj name
if ( resolver is None and hasattr ( file_obj , 'name' ) and len ( file_obj . name ) > 0 ) :
resolver = visual . resolvers . FilePathResolver ( file_obj . name )
return file_obj , file_type , metadata , opened , resolver
|
def config ( self ) :
"""Return a string with the configuration"""
|
return ", " . join ( '%s:%s' % ( key , value ) for key , value in self . conf . items ( ) )
|
def authorize_client_credentials ( self , client_id , client_secret = None , scope = "private_agent" ) :
"""Authorize to platform with client credentials
This should be used if you posses client _ id / client _ secret pair
generated by platform ."""
|
self . auth_data = { "grant_type" : "client_credentials" , "scope" : [ scope ] , "client_id" : client_id , "client_secret" : client_secret }
self . _do_authorize ( )
|
def orderered_methods ( self ) :
"""An ordered list of methods
: return : A list of ordered methods is this module
: rtype : list"""
|
oms = [ ]
self . methods . reverse ( )
if self . main :
oms = [ self . main ]
for m in self . methods :
if m == self . main :
continue
oms . append ( m )
return oms
|
def get_users_of_account_group ( self , account_id , group_id , ** kwargs ) : # noqa : E501
"""Get users of a group . # noqa : E501
An endpoint for listing users of the group with details . * * Example usage : * * ` curl https : / / api . us - east - 1 . mbedcloud . com / v3 / accounts / { accountID } / policy - groups / { groupID } / users - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . get _ users _ of _ account _ group ( account _ id , group _ id , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str account _ id : Account ID . ( required )
: param str group _ id : The ID of the group whose users are retrieved . ( required )
: param int limit : The number of results to return ( 2-1000 ) , default is 50.
: param str after : The entity ID to fetch after the given one .
: param str order : The order of the records based on creation time , ASC or DESC ; by default ASC
: param str include : Comma separated additional data to return . Currently supported : total _ count
: param str status _ _ eq : An optional filter for getting users by status .
: param str status _ _ in : An optional filter for getting users with a specified set of statuses .
: param str status _ _ nin : An optional filter for excluding users with a specified set of statuses .
: return : UserInfoRespList
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . get_users_of_account_group_with_http_info ( account_id , group_id , ** kwargs )
# noqa : E501
else :
( data ) = self . get_users_of_account_group_with_http_info ( account_id , group_id , ** kwargs )
# noqa : E501
return data
|
def _get_connection ( self ) :
"""Returns our cached LDAPObject , which may or may not be bound ."""
|
if self . _connection is None :
uri = self . settings . SERVER_URI
if callable ( uri ) :
uri = uri ( )
self . _connection = self . backend . ldap . initialize ( uri )
for opt , value in self . settings . CONNECTION_OPTIONS . items ( ) :
self . _connection . set_option ( opt , value )
if self . settings . START_TLS :
logger . debug ( "Initiating TLS" )
self . _connection . start_tls_s ( )
return self . _connection
|
def _load_nonlink_level ( handler , level , pathtable , pathname ) :
"""Loads level and builds appropriate type , without handling softlinks"""
|
if isinstance ( level , tables . Group ) :
if _sns and ( level . _v_title . startswith ( 'SimpleNamespace:' ) or DEEPDISH_IO_ROOT_IS_SNS in level . _v_attrs ) :
val = SimpleNamespace ( )
dct = val . __dict__
elif level . _v_title . startswith ( 'list:' ) :
dct = { }
val = [ ]
else :
dct = { }
val = dct
# in case of recursion , object needs to be put in pathtable
# before trying to fully load it
pathtable [ pathname ] = val
# Load sub - groups
for grp in level :
lev = _load_level ( handler , grp , pathtable )
n = grp . _v_name
# Check if it ' s a complicated pair or a string - value pair
if n . startswith ( '__pair' ) :
dct [ lev [ 'key' ] ] = lev [ 'value' ]
else :
dct [ n ] = lev
# Load attributes
for name in level . _v_attrs . _f_list ( ) :
if name . startswith ( DEEPDISH_IO_PREFIX ) :
continue
v = level . _v_attrs [ name ]
dct [ name ] = v
if level . _v_title . startswith ( 'list:' ) :
N = int ( level . _v_title [ len ( 'list:' ) : ] )
for i in range ( N ) :
val . append ( dct [ 'i{}' . format ( i ) ] )
return val
elif level . _v_title . startswith ( 'tuple:' ) :
N = int ( level . _v_title [ len ( 'tuple:' ) : ] )
lst = [ ]
for i in range ( N ) :
lst . append ( dct [ 'i{}' . format ( i ) ] )
return tuple ( lst )
elif level . _v_title . startswith ( 'nonetype:' ) :
return None
elif is_pandas_dataframe ( level ) :
assert _pandas , "pandas is required to read this file"
store = _HDFStoreWithHandle ( handler )
return store . get ( level . _v_pathname )
elif level . _v_title . startswith ( 'sparse:' ) :
frm = level . _v_attrs . format
if frm in ( 'csr' , 'csc' , 'bsr' ) :
shape = tuple ( level . shape [ : ] )
cls = { 'csr' : sparse . csr_matrix , 'csc' : sparse . csc_matrix , 'bsr' : sparse . bsr_matrix }
matrix = cls [ frm ] ( shape )
matrix . data = level . data [ : ]
matrix . indices = level . indices [ : ]
matrix . indptr = level . indptr [ : ]
matrix . maxprint = level . _v_attrs . maxprint
return matrix
elif frm == 'dia' :
shape = tuple ( level . shape [ : ] )
matrix = sparse . dia_matrix ( shape )
matrix . data = level . data [ : ]
matrix . offsets = level . offsets [ : ]
matrix . maxprint = level . _v_attrs . maxprint
return matrix
elif frm == 'coo' :
shape = tuple ( level . shape [ : ] )
matrix = sparse . coo_matrix ( shape )
matrix . data = level . data [ : ]
matrix . col = level . col [ : ]
matrix . row = level . row [ : ]
matrix . maxprint = level . _v_attrs . maxprint
return matrix
else :
raise ValueError ( 'Unknown sparse matrix type: {}' . format ( frm ) )
else :
return val
elif isinstance ( level , tables . VLArray ) :
if level . shape == ( 1 , ) :
return _load_pickled ( level )
else :
return level [ : ]
elif isinstance ( level , tables . Array ) :
if 'zeroarray_dtype' in level . _v_attrs : # Unpack zero - size arrays ( shape is stored in an HDF5 array and
# type is stored in the attibute ' zeroarray _ dtype ' )
dtype = level . _v_attrs . zeroarray_dtype
sh = level [ : ]
return np . zeros ( tuple ( sh ) , dtype = dtype )
if 'strtype' in level . _v_attrs :
strtype = level . _v_attrs . strtype
itemsize = level . _v_attrs . itemsize
if strtype == b'unicode' :
return level [ : ] . view ( dtype = ( np . unicode_ , itemsize ) )
elif strtype == b'ascii' :
return level [ : ] . view ( dtype = ( np . string_ , itemsize ) )
# This serves two purposes :
# (1 ) unpack big integers : the only time we save arrays like this
# (2 ) unpack non - deepdish " scalars "
if level . shape == ( ) :
return level [ ( ) ]
return level [ : ]
|
def prepare_destruction ( self , recursive = True ) :
"""Prepares the model for destruction
Recursively un - registers all observers and removes references to child models . Extends the destroy method of
the base class by child elements of a container state ."""
|
# logger . verbose ( " Prepare destruction container state . . . " )
if recursive :
for scoped_variable in self . scoped_variables :
scoped_variable . prepare_destruction ( )
for connection in self . transitions [ : ] + self . data_flows [ : ] :
connection . prepare_destruction ( )
for state in self . states . values ( ) :
state . prepare_destruction ( recursive )
del self . scoped_variables [ : ]
del self . transitions [ : ]
del self . data_flows [ : ]
self . states . clear ( )
self . scoped_variables = None
self . transitions = None
self . data_flows = None
self . states = None
super ( ContainerStateModel , self ) . prepare_destruction ( recursive )
|
def get_time ( self , instance ) :
"""Return the current mission time for the specified instance .
: rtype : ~ datetime . datetime"""
|
url = '/instances/{}' . format ( instance )
response = self . get_proto ( url )
message = yamcsManagement_pb2 . YamcsInstance ( )
message . ParseFromString ( response . content )
if message . HasField ( 'missionTime' ) :
return parse_isostring ( message . missionTime )
return None
|
def load_policy_config ( filters = None , prepend = True , pillar_key = 'acl' , pillarenv = None , saltenv = None , merge_pillar = True , only_lower_merge = False , revision_id = None , revision_no = None , revision_date = True , revision_date_format = '%Y/%m/%d' , test = False , commit = True , debug = False , ** kwargs ) : # pylint : disable = unused - argument
'''Generate and load the configuration of the whole policy .
. . note : :
The order of the filters and their terms is very important .
The configuration loaded on the device respects the order
defined in the ` ` filters ` ` and / or inside the pillar .
When merging the ` ` filters ` ` with the pillar data , consider the
` ` prepend ` ` argument to make sure the order is correct !
filters
List of filters for this policy .
If not specified or empty , will try to load the configuration from the pillar ,
unless ` ` merge _ pillar ` ` is set as ` ` False ` ` .
prepend : ` ` True ` `
When ` ` merge _ pillar ` ` is set as ` ` True ` ` , the final list of filters generated by merging
the filters from ` ` filters ` ` with those defined in the pillar ( if any ) : new filters are prepended
at the beginning , while existing ones will preserve the position . To add the new filters
at the end of the list , set this argument to ` ` False ` ` .
pillar _ key : ` ` acl ` `
The key in the pillar containing the default attributes values . Default : ` ` acl ` ` .
pillarenv
Query the master to generate fresh pillar data on the fly ,
specifically from the requested pillar environment .
saltenv
Included only for compatibility with
: conf _ minion : ` pillarenv _ from _ saltenv ` , and is otherwise ignored .
merge _ pillar : ` ` True ` `
Merge the CLI variables with the pillar . Default : ` ` True ` ` .
The merge logic depends on the ` ` prepend ` ` argument and
the CLI has higher priority than the pillar .
only _ lower _ merge : ` ` False ` `
Specify if it should merge only the filters and terms fields . Otherwise it will try
to merge everything at the policy level . Default : ` ` False ` ` .
This option requires ` ` merge _ pillar ` ` , otherwise it is ignored .
revision _ id
Add a comment in the policy config having the description for the changes applied .
revision _ no
The revision count .
revision _ date : ` ` True ` `
Boolean flag : display the date when the policy configuration was generated . Default : ` ` True ` ` .
revision _ date _ format : ` ` % Y / % m / % d ` `
The date format to be used when generating the perforce data . Default : ` ` % Y / % m / % d ` ` ( < year > / < month > / < day > ) .
test : ` ` False ` `
Dry run ? If set as ` ` True ` ` , will apply the config , discard and return the changes .
Default : ` ` False ` ` and will commit the changes on the device .
commit : ` ` True ` `
Commit ? Default : ` ` True ` ` .
debug : ` ` False ` `
Debug mode . Will insert a new key under the output dictionary ,
as ` ` loaded _ config ` ` containing the raw configuration loaded on the device .
The output is a dictionary having the same form as : mod : ` net . load _ config < salt . modules . napalm _ network . load _ config > ` .
CLI Example :
. . code - block : : bash
salt ' edge01 . flw01 ' netacl . load _ policy _ config debug = True
Output Example :
. . code - block : : text
edge01 . flw01:
already _ configured :
False
comment :
diff :
@ @ - 1228,9 + 1228,24 @ @
+ ipv4 access - list my - filter
+ 10 remark my - term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my - other - term
+ 70 permit tcp any range 5678 5680 any
+ ipv4 access - list block - icmp
+ 10 remark first - term
+ 20 deny icmp any any
loaded _ config :
! $ Date : 2017/03/22 $
no ipv4 access - list my - filter
ipv4 access - list my - filter
remark my - term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my - other - term
permit tcp any range 5678 5680 any
exit
no ipv4 access - list block - icmp
ipv4 access - list block - icmp
remark first - term
deny icmp any any
exit
result :
True
The policy configuration has been loaded from the pillar , having the following structure :
. . code - block : : yaml
acl :
- my - filter :
terms :
- my - term :
source _ port :
- 1234
- 1235
protocol :
- tcp
- udp
source _ address : 1.2.3.4
action : reject
- my - other - term :
source _ port :
- [ 5678 , 5680]
protocol : tcp
action : accept
- block - icmp :
terms :
- first - term :
protocol :
- icmp
action : reject'''
|
if not filters :
filters = [ ]
platform = _get_capirca_platform ( )
policy_config = __salt__ [ 'capirca.get_policy_config' ] ( platform , filters = filters , prepend = prepend , pillar_key = pillar_key , pillarenv = pillarenv , saltenv = saltenv , merge_pillar = merge_pillar , only_lower_merge = only_lower_merge , revision_id = revision_id , revision_no = revision_no , revision_date = revision_date , revision_date_format = revision_date_format )
return __salt__ [ 'net.load_config' ] ( text = policy_config , test = test , commit = commit , debug = debug , inherit_napalm_device = napalm_device )
|
def nice_pkg_name ( name ) :
"""todo : Docstring for nice _ pkg _ name
: param name : arg description
: type name : type description
: return :
: rtype :"""
|
logger . debug ( "%s" , name )
root , ext = os . path . splitext ( name )
logger . debug ( "root :'%s', ext: '%s'" , root , ext )
if ext in ugly_ext :
logger . debug ( "remove ext %s to get %s" , ext , root )
return root
logger . debug ( "no change %s" , name )
return name
|
def client_info ( self , client ) :
"""Get client info . Uses GET to / clients / < client > interface .
: Args :
* * client * : ( str ) Client ' s ID
: Returns : ( dict ) Client dictionary"""
|
client = self . _client_id ( client )
response = self . _get ( url . clients_id . format ( id = client ) )
self . _check_response ( response , 200 )
return self . _create_response ( response )
|
def illumf ( method , target , ilusrc , et , fixref , abcorr , obsrvr , spoint ) :
"""Compute the illumination angles - - - phase , incidence , and
emission - - - at a specified point on a target body . Return logical
flags indicating whether the surface point is visible from
the observer ' s position and whether the surface point is
illuminated .
The target body ' s surface is represented using topographic data
provided by DSK files , or by a reference ellipsoid .
The illumination source is a specified ephemeris object .
https : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / illumf _ c . html
: param method : Computation method .
: type method : str
: param target : Name of target body .
: type target : str
: param ilusrc : Name of illumination source .
: type ilusrc : str
: param et : Epoch in ephemeris seconds past J2000.
: type et : float
: param fixref : Body - fixed , body - centered target body frame .
: type fixref : str
: param abcorr : Desired aberration correction .
: type abcorr : str
: param obsrvr : Name of observing body .
: type obsrvr : str
: param spoint : Body - fixed coordinates of a target surface point .
: type spoint : 3 - Element Array of floats
: return : Target surface point epoch , Vector from observer to target
surface point , Phase angle at the surface point , Source incidence
angle at the surface point , Emission angle at the surface point ,
Visibility flag , Illumination flag
: rtype : tuple"""
|
method = stypes . stringToCharP ( method )
target = stypes . stringToCharP ( target )
ilusrc = stypes . stringToCharP ( ilusrc )
et = ctypes . c_double ( et )
fixref = stypes . stringToCharP ( fixref )
abcorr = stypes . stringToCharP ( abcorr )
obsrvr = stypes . stringToCharP ( obsrvr )
spoint = stypes . toDoubleVector ( spoint )
trgepc = ctypes . c_double ( 0 )
srfvec = stypes . emptyDoubleVector ( 3 )
phase = ctypes . c_double ( 0 )
incdnc = ctypes . c_double ( 0 )
emissn = ctypes . c_double ( 0 )
visibl = ctypes . c_int ( )
lit = ctypes . c_int ( )
libspice . illumf_c ( method , target , ilusrc , et , fixref , abcorr , obsrvr , spoint , ctypes . byref ( trgepc ) , srfvec , ctypes . byref ( phase ) , ctypes . byref ( incdnc ) , ctypes . byref ( emissn ) , ctypes . byref ( visibl ) , ctypes . byref ( lit ) )
return trgepc . value , stypes . cVectorToPython ( srfvec ) , phase . value , incdnc . value , emissn . value , bool ( visibl . value ) , bool ( lit . value )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.