signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _process_cmap ( cmap ) :
'''Returns a kwarg dict suitable for a ColorScale'''
|
option = { }
if isinstance ( cmap , str ) :
option [ 'scheme' ] = cmap
elif isinstance ( cmap , list ) :
option [ 'colors' ] = cmap
else :
raise ValueError ( '''`cmap` must be a string (name of a color scheme)
or a list of colors, but a value of {} was given
''' . format ( cmap ) )
return option
|
def thread_local_property ( name ) :
'''Creates a thread local ` ` property ` ` .'''
|
name = '_thread_local_' + name
def fget ( self ) :
try :
return getattr ( self , name ) . value
except AttributeError :
return None
def fset ( self , value ) :
getattr ( self , name ) . value = value
return property ( fget = fget , fset = fset )
|
def latlon_round ( latlon , spacing = 1000 ) :
'''round to nearest grid corner'''
|
g = latlon_to_grid ( latlon )
g . easting = ( g . easting // spacing ) * spacing
g . northing = ( g . northing // spacing ) * spacing
return g . latlon ( )
|
def duplicates ( base , items ) :
"""Get an iterator of items similar but not equal to the base .
@ param base : base item to perform comparison against
@ param items : list of items to compare to the base
@ return : generator of items sorted by similarity to the base"""
|
for item in items :
if item . similarity ( base ) and not item . equality ( base ) :
yield item
|
def deprecated ( msg ) :
"""Marks a function / method as deprecated .
Takes one argument , a message to be logged with information on future usage of the function or alternative methods
to call .
Args :
msg ( str ) : Deprecation message to be logged
Returns :
` callable `"""
|
def decorator ( func ) :
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
logging . getLogger ( __name__ ) . warning ( msg )
return func ( * args , ** kwargs )
return wrapper
return decorator
|
def alter_function ( self , dbName , funcName , newFunc ) :
"""Parameters :
- dbName
- funcName
- newFunc"""
|
self . send_alter_function ( dbName , funcName , newFunc )
self . recv_alter_function ( )
|
def restrict_to ( self , restriction ) :
"""Restrict list operations to the hosts given in restriction . This is used
to exclude failed hosts in main playbook code , don ' t use this for other
reasons ."""
|
if type ( restriction ) != list :
restriction = [ restriction ]
self . _restriction = restriction
|
def processPreKeyBundle ( self , preKey ) :
""": type preKey : PreKeyBundle"""
|
if not self . identityKeyStore . isTrustedIdentity ( self . recipientId , preKey . getIdentityKey ( ) ) :
raise UntrustedIdentityException ( self . recipientId , preKey . getIdentityKey ( ) )
if preKey . getSignedPreKey ( ) is not None and not Curve . verifySignature ( preKey . getIdentityKey ( ) . getPublicKey ( ) , preKey . getSignedPreKey ( ) . serialize ( ) , preKey . getSignedPreKeySignature ( ) ) :
raise InvalidKeyException ( "Invalid signature on device key!" )
if preKey . getSignedPreKey ( ) is None and preKey . getPreKey ( ) is None :
raise InvalidKeyException ( "Both signed and unsigned prekeys are absent!" )
supportsV3 = preKey . getSignedPreKey ( ) is not None
sessionRecord = self . sessionStore . loadSession ( self . recipientId , self . deviceId )
ourBaseKey = Curve . generateKeyPair ( )
theirSignedPreKey = preKey . getSignedPreKey ( ) if supportsV3 else preKey . getPreKey ( )
theirOneTimePreKey = preKey . getPreKey ( )
theirOneTimePreKeyId = preKey . getPreKeyId ( ) if theirOneTimePreKey is not None else None
parameters = AliceAxolotlParameters . newBuilder ( )
parameters . setOurBaseKey ( ourBaseKey ) . setOurIdentityKey ( self . identityKeyStore . getIdentityKeyPair ( ) ) . setTheirIdentityKey ( preKey . getIdentityKey ( ) ) . setTheirSignedPreKey ( theirSignedPreKey ) . setTheirRatchetKey ( theirSignedPreKey ) . setTheirOneTimePreKey ( theirOneTimePreKey if supportsV3 else None )
if not sessionRecord . isFresh ( ) :
sessionRecord . archiveCurrentState ( )
RatchetingSession . initializeSessionAsAlice ( sessionRecord . getSessionState ( ) , 3 if supportsV3 else 2 , parameters . create ( ) )
sessionRecord . getSessionState ( ) . setUnacknowledgedPreKeyMessage ( theirOneTimePreKeyId , preKey . getSignedPreKeyId ( ) , ourBaseKey . getPublicKey ( ) )
sessionRecord . getSessionState ( ) . setLocalRegistrationId ( self . identityKeyStore . getLocalRegistrationId ( ) )
sessionRecord . getSessionState ( ) . setRemoteRegistrationId ( preKey . getRegistrationId ( ) )
sessionRecord . getSessionState ( ) . setAliceBaseKey ( ourBaseKey . getPublicKey ( ) . serialize ( ) )
self . sessionStore . storeSession ( self . recipientId , self . deviceId , sessionRecord )
self . identityKeyStore . saveIdentity ( self . recipientId , preKey . getIdentityKey ( ) )
|
def get_params ( img , output_size ) :
"""Get parameters for ` ` crop ` ` for a random crop .
Args :
img ( PIL Image ) : Image to be cropped .
output _ size ( tuple ) : Expected output size of the crop .
Returns :
tuple : params ( i , j , h , w ) to be passed to ` ` crop ` ` for random crop ."""
|
w , h , * _ = img . shape
th , tw = output_size
if w == tw and h == th :
return 0 , 0 , h , w
i = random . randint ( 0 , h - th )
j = random . randint ( 0 , w - tw )
return i , j , th , tw
|
def get_protocol_version ( protocol = None , target = None ) :
"""Return a suitable pickle protocol version for a given target .
Arguments :
target : The internals description of the targeted python
version . If this is ` ` None ` ` the specification of the currently
running python version will be used .
protocol ( None or int ) : The requested protocol version ( or None for the
default of the target python version ) .
Returns :
int : A suitable pickle protocol version ."""
|
target = get_py_internals ( target )
if protocol is None :
protocol = target [ 'pickle_default_protocol' ]
if protocol > cPickle . HIGHEST_PROTOCOL :
warnings . warn ( 'Downgrading pickle protocol, running python supports up to %d.' % cPickle . HIGHEST_PROTOCOL )
protocol = cPickle . HIGHEST_PROTOCOL
target_highest_protocol = target [ 'pickle_highest_protocol' ]
if protocol > target_highest_protocol :
warnings . warn ( 'Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol )
protocol = target_highest_protocol
return protocol
|
def run_sparser ( fname , output_fmt , outbuf = None , timeout = 600 ) :
"""Return the path to reading output after running Sparser reading .
Parameters
fname : str
The path to an input file to be processed . Due to the Spaser
executable ' s assumptions , the file name needs to start with PMC
and should be an NXML formatted file .
output _ fmt : Optional [ str ]
The format in which Sparser should produce its output , can either be
' json ' or ' xml ' .
outbuf : Optional [ file ]
A file like object that the Sparser output is written to .
timeout : int
The number of seconds to wait until giving up on this one reading . The
default is 600 seconds ( i . e . 10 minutes ) . Sparcer is a fast reader and
the typical type to read a single full text is a matter of seconds .
Returns
output _ path : str
The path to the output file created by Sparser ."""
|
if not sparser_path or not os . path . exists ( sparser_path ) :
logger . error ( 'Sparser executable not set in %s' % sparser_path_var )
return None
if output_fmt == 'xml' :
format_flag = '-x'
suffix = '.xml'
elif output_fmt == 'json' :
format_flag = '-j'
suffix = '.json'
else :
logger . error ( 'Unknown output format: %s' % output_fmt )
return None
sparser_exec_path = os . path . join ( sparser_path , 'save-semantics.sh' )
output_path = fname . split ( '.' ) [ 0 ] + '-semantics' + suffix
for fpath in [ sparser_exec_path , fname ] :
if not os . path . exists ( fpath ) :
raise Exception ( "'%s' is not a valid path." % fpath )
cmd_list = [ sparser_exec_path , format_flag , fname ]
# This is mostly a copy of the code found in subprocess . run , with the
# key change that proc . kill is replaced with os . killpg . This allows the
# process to be killed even if it has children . Solution developed from :
# https : / / stackoverflow . com / questions / 36952245 / subprocess - timeout - failure
with sp . Popen ( cmd_list , stdout = sp . PIPE ) as proc :
try :
stdout , stderr = proc . communicate ( timeout = timeout )
except sp . TimeoutExpired : # Yes , this is about as bad as it looks . But it is the only way to
# be sure the script actually dies .
sp . check_call ( [ 'pkill' , '-f' , 'r3.core.*%s' % fname ] )
stdout , stderr = proc . communicate ( )
raise sp . TimeoutExpired ( proc . args , timeout , output = stdout , stderr = stderr )
except BaseException : # See comment on above instance .
sp . check_call ( [ 'pkill' , '-f' , fname ] )
proc . wait ( )
raise
retcode = proc . poll ( )
if retcode :
raise sp . CalledProcessError ( retcode , proc . args , output = stdout , stderr = stderr )
if outbuf is not None :
outbuf . write ( stdout )
outbuf . flush ( )
assert os . path . exists ( output_path ) , 'No output file \"%s\" created by sparser.' % output_path
return output_path
|
def _get_ess ( sample_array ) :
"""Compute the effective sample size for a 2D array ."""
|
shape = sample_array . shape
if len ( shape ) != 2 :
raise TypeError ( "Effective sample size calculation requires 2 dimensional arrays." )
n_chain , n_draws = shape
if n_chain <= 1 :
raise TypeError ( "Effective sample size calculation requires multiple chains." )
acov = np . asarray ( [ _autocov ( sample_array [ chain ] ) for chain in range ( n_chain ) ] )
chain_mean = sample_array . mean ( axis = 1 )
chain_var = acov [ : , 0 ] * n_draws / ( n_draws - 1.0 )
acov_t = acov [ : , 1 ] * n_draws / ( n_draws - 1.0 )
mean_var = np . mean ( chain_var )
var_plus = mean_var * ( n_draws - 1.0 ) / n_draws
var_plus += np . var ( chain_mean , ddof = 1 )
rho_hat_t = np . zeros ( n_draws )
rho_hat_even = 1.0
rho_hat_t [ 0 ] = rho_hat_even
rho_hat_odd = 1.0 - ( mean_var - np . mean ( acov_t ) ) / var_plus
rho_hat_t [ 1 ] = rho_hat_odd
# Geyer ' s initial positive sequence
max_t = 1
t = 1
while t < ( n_draws - 2 ) and ( rho_hat_even + rho_hat_odd ) >= 0.0 :
rho_hat_even = 1.0 - ( mean_var - np . mean ( acov [ : , t + 1 ] ) ) / var_plus
rho_hat_odd = 1.0 - ( mean_var - np . mean ( acov [ : , t + 2 ] ) ) / var_plus
if ( rho_hat_even + rho_hat_odd ) >= 0 :
rho_hat_t [ t + 1 ] = rho_hat_even
rho_hat_t [ t + 2 ] = rho_hat_odd
max_t = t + 2
t += 2
# Geyer ' s initial monotone sequence
t = 3
while t <= max_t - 2 :
if ( rho_hat_t [ t + 1 ] + rho_hat_t [ t + 2 ] ) > ( rho_hat_t [ t - 1 ] + rho_hat_t [ t ] ) :
rho_hat_t [ t + 1 ] = ( rho_hat_t [ t - 1 ] + rho_hat_t [ t ] ) / 2.0
rho_hat_t [ t + 2 ] = rho_hat_t [ t + 1 ]
t += 2
ess = ( int ( ( n_chain * n_draws ) / ( - 1.0 + 2.0 * np . sum ( rho_hat_t ) ) ) if not np . any ( np . isnan ( rho_hat_t ) ) else np . nan )
return ess
|
def whois_list ( request , format = None ) :
"""Retrieve basic whois information related to a layer2 or layer3 network address ."""
|
results = [ ]
# layer3 results
for ip in Ip . objects . select_related ( ) . all ( ) :
interface = ip . interface
user = interface . device . node . user
device = interface . device
results . append ( { 'address' : str ( ip . address ) , 'user' : user . username , 'name' : user . get_full_name ( ) , 'device' : device . name , 'node' : device . node . name } )
# layer2 results
for interface in Interface . objects . select_related ( ) . all ( ) :
if interface . mac is None :
continue
user = interface . device . node . user
device = interface . device
results . append ( { 'address' : str ( interface . mac ) . replace ( '-' , ':' ) , 'user' : user . username , 'name' : user . get_full_name ( ) , 'device' : device . name , 'node' : device . node . name } )
return Response ( results )
|
def _inputLoop ( self ) :
"""Loop and copy console - > serial until EXIT _ CHARCTER character is found ."""
|
# Switch statement for handling " special " characters
actionChars = { self . EXIT_CHARACTER : self . _exit , self . EXIT_CHARACTER_2 : self . _exit , console . CURSOR_LEFT : self . _cursorLeft , console . CURSOR_RIGHT : self . _cursorRight , console . CURSOR_UP : self . _cursorUp , console . CURSOR_DOWN : self . _cursorDown , '\n' : self . _doConfirmInput , '\t' : self . _doCommandCompletion , self . CTRL_Z_CHARACTER : self . _handleCtrlZ , self . ESC_CHARACTER : self . _handleEsc , self . BACKSPACE_CHARACTER : self . _handleBackspace , console . DELETE : self . _handleDelete , console . HOME : self . _handleHome , console . END : self . _handleEnd }
try :
while self . alive :
try :
c = console . getkey ( )
except KeyboardInterrupt :
c = serial . to_bytes ( [ 3 ] )
if c in actionChars : # Handle character directly
actionChars [ c ] ( )
elif len ( c ) == 1 and self . _isPrintable ( c ) :
self . inputBuffer . insert ( self . cursorPos , c )
self . cursorPos += 1
self . _refreshInputPrompt ( )
# else :
# for a in c :
# print ( ' GOT : ' , a , ' ( ' , ord ( a ) , ' ) ' )
except :
self . alive = False
raise
|
def may_be_null_is_nullable ( ) :
"""If may _ be _ null returns nullable or if NULL can be passed in .
This can still be wrong if the specific typelib is older than the linked
libgirepository .
https : / / bugzilla . gnome . org / show _ bug . cgi ? id = 660879 # c47"""
|
repo = GIRepository ( )
repo . require ( "GLib" , "2.0" , 0 )
info = repo . find_by_name ( "GLib" , "spawn_sync" )
# this argument is ( allow - none ) and can never be ( nullable )
return not info . get_arg ( 8 ) . may_be_null
|
def __merge_json_values ( current , previous ) :
"""Merges the values between the current and previous run of the script ."""
|
for value in current :
name = value [ 'name' ]
# Find the previous value
previous_value = __find_and_remove_value ( previous , value )
if previous_value is not None :
flags = value [ 'flags' ]
previous_flags = previous_value [ 'flags' ]
if flags != previous_flags :
logging . warning ( 'Flags for %s are different. Using previous value.' , name )
value [ 'flags' ] = previous_flags
else :
logging . warning ( 'Value %s is a new value' , name )
for value in previous :
name = value [ 'name' ]
logging . warning ( 'Value %s not present in current run. Appending value.' , name )
current . append ( value )
|
def new_term ( self , term , value , ** kwargs ) :
"""Create a new root - level term in this section"""
|
tc = self . doc . get_term_class ( term . lower ( ) )
t = tc ( term , value , doc = self . doc , parent = None , section = self ) . new_children ( ** kwargs )
self . doc . add_term ( t )
return t
|
def removeClass ( self , cn ) :
'''Remove classes'''
|
if cn :
ks = self . _classes
if ks :
for cn in cn . split ( ) :
if cn in ks :
ks . remove ( cn )
return self
|
def onLeftDown ( self , event = None ) :
"""left button down : report x , y coords , start zooming mode"""
|
if event is None :
return
self . cursor_mode_action ( 'leftdown' , event = event )
self . ForwardEvent ( event = event . guiEvent )
|
def metadata ( self ) :
"""Retrieves the remote server metadata dictionary .
: returns : Dictionary containing server metadata details"""
|
resp = self . r_session . get ( self . server_url )
resp . raise_for_status ( )
return response_to_json_dict ( resp )
|
def extend ( self , ** kwargs ) :
"""Returns a new instance with this instance ' s data overlayed by the key - value args ."""
|
props = self . copy ( )
props . update ( kwargs )
return TemplateData ( ** props )
|
def _load ( self , event ) :
"""Processes a load event by setting the properties of this record
to the data restored from the database .
: param event : < orb . events . LoadEvent >"""
|
if not event . data :
return
context = self . context ( )
schema = self . schema ( )
dbname = schema . dbname ( )
clean = { }
for col , value in event . data . items ( ) :
try :
model_dbname , col_name = col . split ( '.' )
except ValueError :
col_name = col
model_dbname = dbname
# make sure the value we ' re setting is specific to this model
try :
column = schema . column ( col_name )
except orb . errors . ColumnNotFound :
column = None
if model_dbname != dbname or ( column in clean and isinstance ( clean [ column ] , Model ) ) :
continue
# look for preloaded reverse lookups and pipes
elif not column :
self . __preload [ col_name ] = value
# extract the value from the database
else :
value = column . dbRestore ( value , context = context )
clean [ column ] = value
# update the local values
with WriteLocker ( self . __dataLock ) :
for col , val in clean . items ( ) :
default = val if not isinstance ( val , dict ) else val . copy ( )
self . __values [ col . name ( ) ] = ( default , val )
self . __loaded . add ( col )
if self . processEvent ( event ) :
self . onLoad ( event )
|
def parallelize ( mapfunc , workers = None ) :
'''Parallelize the mapfunc with multithreading . mapfunc calls will be
partitioned by the provided list of arguments . Each item in the list
will represent one call ' s arguments . They can be tuples if the function
takes multiple arguments , but one - tupling is not necessary .
If workers argument is not provided , workers will be pulled from an
environment variable PYLT _ NUM _ WORKERS . If the environment variable is not
found , it will default to 10 workers .
Return : func ( args _ list : list [ arg ] ) = > dict [ arg - > result ]'''
|
workers = workers if workers else _get_default_workers ( )
def wrapper ( args_list ) :
result = { }
with concurrent . futures . ThreadPoolExecutor ( max_workers = workers ) as executor :
tasks = { }
for args in args_list :
if isinstance ( args , tuple ) :
task = executor . submit ( mapfunc , * args )
else :
task = executor . submit ( mapfunc , args )
tasks [ task ] = args
for task in concurrent . futures . as_completed ( tasks ) :
args = tasks [ task ]
task_result = task . result ( )
result [ args ] = task_result
return result
return wrapper
|
def to_threepoint ( center , radius , angles = None ) :
"""For 2D arcs , given a center and radius convert them to three
points on the arc .
Parameters
center : ( 2 , ) float
Center point on the plane
radius : float
Radius of arc
angles : ( 2 , ) float
Angles in radians for start and end angle
if not specified , will default to ( 0.0 , pi )
Returns
three : ( 3 , 2 ) float
Arc control points"""
|
# if no angles provided assume we want a half circle
if angles is None :
angles = [ 0.0 , np . pi ]
# force angles to float64
angles = np . asanyarray ( angles , dtype = np . float64 )
if angles . shape != ( 2 , ) :
raise ValueError ( 'angles must be (2,)!' )
# provide the wrap around
if angles [ 1 ] < angles [ 0 ] :
angles [ 1 ] += np . pi * 2
center = np . asanyarray ( center , dtype = np . float64 )
if center . shape != ( 2 , ) :
raise ValueError ( 'only valid on 2D arcs!' )
# turn the angles of [ start , end ]
# into [ start , middle , end ]
angles = np . array ( [ angles [ 0 ] , angles . mean ( ) , angles [ 1 ] ] , dtype = np . float64 )
# turn angles into ( 3,2 ) points
three = np . column_stack ( ( np . cos ( angles ) , np . sin ( angles ) ) ) * radius
three += center
return three
|
def replace_suffix ( name , new_suffix ) :
"""Replaces the suffix of name by new _ suffix .
If no suffix exists , the new one is added ."""
|
assert isinstance ( name , basestring )
assert isinstance ( new_suffix , basestring )
split = os . path . splitext ( name )
return split [ 0 ] + new_suffix
|
def until_connected ( self , timeout = None ) :
"""Return future that resolves when the client is connected ."""
|
t0 = self . ioloop . time ( )
yield self . until_running ( timeout = timeout )
t1 = self . ioloop . time ( )
if timeout :
timedelta = timeout - ( t1 - t0 )
else :
timedelta = None
assert get_thread_ident ( ) == self . ioloop_thread_id
yield self . _connected . until_set ( timeout = timedelta )
|
def addevensubodd ( operator , operand ) :
"""Add even numbers , subtract odd ones . See http : / / 1w6 . org / w6"""
|
try :
for i , x in enumerate ( operand ) :
if x % 2 :
operand [ i ] = - x
return operand
except TypeError :
if operand % 2 :
return - operand
return operand
|
def delete ( self , key ) :
'''Removes the object named by ` key ` .
Removes the object from the collection corresponding to ` ` key . path ` ` .
Args :
key : Key naming the object to remove .'''
|
try :
del self . _collection ( key ) [ key ]
if len ( self . _collection ( key ) ) == 0 :
del self . _items [ str ( key . path ) ]
except KeyError , e :
pass
|
def find_initial_offset ( self , pyramids = 6 ) :
"""Estimate time offset
This sets and returns the initial time offset estimation .
Parameters
pyramids : int
Number of pyramids to use for ZNCC calculations .
If initial estimation of time offset fails , try lowering this value .
Returns
float
Estimated time offset"""
|
flow = self . video . flow
gyro_rate = self . parameter [ 'gyro_rate' ]
frame_times = np . arange ( len ( flow ) ) / self . video . frame_rate
gyro_times = np . arange ( self . gyro . num_samples ) / gyro_rate
time_offset = timesync . sync_camera_gyro ( flow , frame_times , self . gyro . data . T , gyro_times , levels = pyramids )
logger . debug ( "Initial time offset: {:.4f}" . format ( time_offset ) )
self . params [ 'initialized' ] [ 'time_offset' ] = time_offset
return time_offset
|
def wx_menu ( self ) :
'''return a wx . Menu ( ) for this menu'''
|
from MAVProxy . modules . lib . wx_loader import wx
menu = wx . Menu ( )
for i in range ( len ( self . items ) ) :
m = self . items [ i ]
m . _append ( menu )
return menu
|
def create_string ( self , key , value ) :
"""Create method of CRUD operation for string data .
Args :
key ( string ) : The variable to write to the DB .
value ( any ) : The data to write to the DB .
Returns :
( string ) : Result of DB write ."""
|
data = None
if key is not None and value is not None :
if isinstance ( value , ( bool , list , int , dict ) ) : # value = str ( value )
value = u'{}' . format ( value )
# data = self . db . create ( key . strip ( ) , str ( json . dumps ( value ) ) )
data = self . db . create ( key . strip ( ) , u'{}' . format ( json . dumps ( value ) ) )
else :
self . tcex . log . warning ( u'The key or value field was None.' )
return data
|
def set_job ( user , path , mask , cmd ) :
'''Sets an incron job up for a specified user .
CLI Example :
. . code - block : : bash
salt ' * ' incron . set _ job root ' / root ' ' IN _ MODIFY ' ' echo " $ $ $ @ $ # $ % $ & " ' '''
|
# Scrub the types
mask = six . text_type ( mask ) . upper ( )
# Check for valid mask types
for item in mask . split ( ',' ) :
if item not in _MASK_TYPES :
return 'Invalid mask type: {0}' . format ( item )
updated = False
arg_mask = mask . split ( ',' )
arg_mask . sort ( )
lst = list_tab ( user )
updated_crons = [ ]
# Look for existing incrons that have cmd , path and at least one of the MASKS
# remove and replace with the one we ' re passed
for item , cron in enumerate ( lst [ 'crons' ] ) :
if path == cron [ 'path' ] :
if cron [ 'cmd' ] == cmd :
cron_mask = cron [ 'mask' ] . split ( ',' )
cron_mask . sort ( )
if cron_mask == arg_mask :
return 'present'
if any ( [ x in cron_mask for x in arg_mask ] ) :
updated = True
else :
updated_crons . append ( cron )
else :
updated_crons . append ( cron )
else :
updated_crons . append ( cron )
cron = { 'cmd' : cmd , 'path' : path , 'mask' : mask }
updated_crons . append ( cron )
lst [ 'crons' ] = updated_crons
comdat = _write_incron_lines ( user , _render_tab ( lst ) )
if comdat [ 'retcode' ] : # Failed to commit , return the error
return comdat [ 'stderr' ]
if updated :
return 'updated'
else :
return 'new'
|
def create_asset ( json ) :
"""Create : class : ` . resources . Asset ` from JSON .
: param json : JSON dict .
: return : Asset instance ."""
|
result = Asset ( json [ 'sys' ] )
file_dict = json [ 'fields' ] [ 'file' ]
result . fields = json [ 'fields' ]
result . url = file_dict [ 'url' ]
result . mimeType = file_dict [ 'contentType' ]
return result
|
def _get_running_parameters ( self , scale , f , loop = 3 ) :
"""Get the running parameters ( e . g . quark masses and the strong
coupling at a given scale ."""
|
p = { }
p [ 'alpha_s' ] = qcd . alpha_s ( scale , self . f , self . parameters [ 'alpha_s' ] , loop = loop )
p [ 'm_b' ] = qcd . m_b ( self . parameters [ 'm_b' ] , scale , self . f , self . parameters [ 'alpha_s' ] , loop = loop )
p [ 'm_c' ] = qcd . m_c ( self . parameters [ 'm_c' ] , scale , self . f , self . parameters [ 'alpha_s' ] , loop = loop )
p [ 'm_s' ] = qcd . m_s ( self . parameters [ 'm_s' ] , scale , self . f , self . parameters [ 'alpha_s' ] , loop = loop )
p [ 'm_u' ] = qcd . m_s ( self . parameters [ 'm_u' ] , scale , self . f , self . parameters [ 'alpha_s' ] , loop = loop )
p [ 'm_d' ] = qcd . m_s ( self . parameters [ 'm_d' ] , scale , self . f , self . parameters [ 'alpha_s' ] , loop = loop )
# running ignored for alpha _ e and lepton mass
p [ 'alpha_e' ] = self . parameters [ 'alpha_e' ]
p [ 'm_e' ] = self . parameters [ 'm_e' ]
p [ 'm_mu' ] = self . parameters [ 'm_mu' ]
p [ 'm_tau' ] = self . parameters [ 'm_tau' ]
return p
|
def reproject_on_template_raster ( src_file , dst_file , template_file , resampling = "near" , compress = None , overwrite = False ) :
"""Reproject a one - band raster to fit the projection , extend , pixel size etc . of a template raster .
Function based on https : / / stackoverflow . com / questions / 10454316 / how - to - project - and - resample - a - grid - to - match - another - grid - with - gdal - python
Arguments :
src _ file { str } - - Filename of the source one - band raster .
dst _ file { str } - - Filename of the destination raster .
template _ file { str } - - Filename of the template raster .
resampling { str } - - Resampling type :
' near ' ( default ) , ' bilinear ' , ' cubic ' , ' cubicspline ' , ' lanczos ' , ' average ' , ' mode ' , ' max ' , ' min ' , ' med ' , ' q1 ' , ' q3 ' ,
see https : / / www . gdal . org / gdalwarp . html - r parameter .
compress { str } - - Compression type : None ( default ) , ' lzw ' , ' packbits ' , ' defalte ' ."""
|
if not overwrite and Path ( dst_file ) . exists ( ) :
print ( "Processing skipped. Destination file exists." )
return 0
GDAL_RESAMPLING_ALGORITHMS = { "bilinear" : "GRA_Bilinear" , "cubic" : "GRA_Cubic" , "cubicspline" : "GRA_CubicSpline" , "lanczos" : "GRA_Lanczos" , "average" : "GRA_Average" , "mode" : "GRA_Mode" , "max" : "GRA_Max" , "min" : "GRA_Min" , "med" : "GRA_Med" , "near" : "GRA_NearestNeighbour" , "q1" : "GRA_Q1" , "q3" : "GRA_Q3" }
compressions = [ "lzw" , "packbits" , "deflate" ]
if resampling not in GDAL_RESAMPLING_ALGORITHMS . keys ( ) :
raise ValueError ( f"'resampling must be one of {', '.join(GDAL_RESAMPLING_ALGORITHMS.keys())}" )
if compress is None :
options = [ ]
else :
if compress . lower ( ) not in compressions :
raise ValueError ( f"'compress must be one of {', '.join(compressions)}" )
else :
options = [ f'COMPRESS={compress.upper()}' ]
# Source
src = gdal . Open ( src_file , gdalconst . GA_ReadOnly )
src_band = src . GetRasterBand ( 1 )
src_proj = src . GetProjection ( )
# We want a section of source that matches this :
match_ds = gdal . Open ( template_file , gdalconst . GA_ReadOnly )
match_proj = match_ds . GetProjection ( )
match_geotrans = match_ds . GetGeoTransform ( )
wide = match_ds . RasterXSize
high = match_ds . RasterYSize
# Output / destination
Path ( dst_file ) . parent . mkdir ( parents = True , exist_ok = True )
dst = gdal . GetDriverByName ( 'GTiff' ) . Create ( dst_file , wide , high , 1 , src_band . DataType , options = options )
dst . SetGeoTransform ( match_geotrans )
dst . SetProjection ( match_proj )
# Do the work
gdal . ReprojectImage ( src , dst , src_proj , match_proj , getattr ( gdalconst , GDAL_RESAMPLING_ALGORITHMS [ resampling ] ) )
del dst
# Flush
return 0
|
def Images ( self , run , tag ) :
"""Retrieve the image events associated with a run and tag .
Args :
run : A string name of the run for which values are retrieved .
tag : A string name of the tag for which values are retrieved .
Raises :
KeyError : If the run is not found , or the tag is not available for
the given run .
Returns :
An array of ` event _ accumulator . ImageEvents ` ."""
|
accumulator = self . GetAccumulator ( run )
return accumulator . Images ( tag )
|
def build_path ( levels ) :
"""make a linear directory structure from a list of path levels names
levels = [ " chefdir " , " trees " , " test " ]
builds . / chefdir / trees / test /"""
|
path = os . path . join ( * levels )
if not dir_exists ( path ) :
os . makedirs ( path )
return path
|
def zrevrange ( self , key , start , stop , withscores = False , encoding = _NOTSET ) :
"""Return a range of members in a sorted set , by index ,
with scores ordered from high to low .
: raises TypeError : if start or stop is not int"""
|
if not isinstance ( start , int ) :
raise TypeError ( "start argument must be int" )
if not isinstance ( stop , int ) :
raise TypeError ( "stop argument must be int" )
if withscores :
args = [ b'WITHSCORES' ]
else :
args = [ ]
fut = self . execute ( b'ZREVRANGE' , key , start , stop , * args , encoding = encoding )
if withscores :
return wait_convert ( fut , pairs_int_or_float )
return fut
|
def register_options ( cls , register ) :
"""Register an option to make capturing snapshots optional .
This class is intended to be extended by Jvm resolvers ( coursier and ivy ) , and the option name should reflect that ."""
|
super ( JvmResolverBase , cls ) . register_options ( register )
# TODO This flag should be defaulted to True when we are doing hermetic execution ,
# and should probably go away as we move forward into that direction .
register ( '--capture-snapshots' , type = bool , default = False , help = 'Enable capturing snapshots to add directory digests to dependency jars.' 'Note that this is necessary when hermetic execution is enabled.' )
|
def _url_search_builder ( term , country = 'US' , media = 'all' , entity = None , attribute = None , limit = 50 ) :
"""Builds the URL to perform the search based on the provided data
: param term : String . The URL - encoded text string you want to search for . Example : Steven Wilson .
The method will take care of spaces so you don ' t have to .
: param country : String . The two - letter country code for the store you want to search .
For a full list of the codes : http : / / en . wikipedia . org / wiki / % 20ISO _ 3166-1 _ alpha - 2
: param media : String . The media type you want to search for . Example : music
: param entity : String . The type of results you want returned , relative to the specified media type . Example : musicArtist .
Full list : musicArtist , musicTrack , album , musicVideo , mix , song
: param attribute : String . The attribute you want to search for in the stores , relative to the specified media type .
: param limit : Integer . The number of search results you want the iTunes Store to return .
: return : The built URL as a string"""
|
built_url = base_search_url + _parse_query ( term )
built_url += ampersand + parameters [ 1 ] + country
built_url += ampersand + parameters [ 2 ] + media
if entity is not None :
built_url += ampersand + parameters [ 3 ] + entity
if attribute is not None :
built_url += ampersand + parameters [ 4 ] + attribute
built_url += ampersand + parameters [ 5 ] + str ( limit )
return built_url
|
def ConsultarCTGActivosPorPatente ( self , patente = "ZZZ999" ) :
"Consulta de CTGs activos por patente"
|
ret = self . client . consultarCTGActivosPorPatente ( request = dict ( auth = { 'token' : self . Token , 'sign' : self . Sign , 'cuitRepresentado' : self . Cuit , } , patente = patente , ) ) [ 'response' ]
self . __analizar_errores ( ret )
datos = ret . get ( 'arrayConsultarCTGActivosPorPatenteResponse' )
if datos :
self . DatosCTG = datos
self . LeerDatosCTG ( pop = False )
return True
else :
self . DatosCTG = [ ]
return False
|
def getIndicesFromInstId ( self , instId ) :
"""Return index values for instance identification"""
|
if instId in self . _idToIdxCache :
return self . _idToIdxCache [ instId ]
indices = [ ]
for impliedFlag , modName , symName in self . _indexNames :
mibObj , = mibBuilder . importSymbols ( modName , symName )
try :
syntax , instId = self . oidToValue ( mibObj . syntax , instId , impliedFlag , indices )
except PyAsn1Error as exc :
debug . logger & debug . FLAG_INS and debug . logger ( 'error resolving table indices at %s, %s: %s' % ( self . __class__ . __name__ , instId , exc ) )
indices = [ instId ]
instId = ( )
break
indices . append ( syntax )
# to avoid cyclic refs
if instId :
raise error . SmiError ( 'Excessive instance identifier sub-OIDs left at %s: %s' % ( self , instId ) )
indices = tuple ( indices )
self . _idToIdxCache [ instId ] = indices
return indices
|
def x ( self ) :
"""Block the main thead until future finish , return the future . result ( ) ."""
|
with self . _condition :
result = None
if not self . done ( ) :
self . _condition . wait ( self . _timeout )
if not self . done ( ) : # timeout
self . set_exception ( TimeoutError ( ) )
if self . _state in [ CANCELLED , CANCELLED_AND_NOTIFIED ] : # cancelled
result = CancelledError ( )
elif self . _state == FINISHED : # finished
if self . _exception :
result = self . _exception
else :
result = self . _result
if isinstance ( result , Exception ) :
if self . catch_exception :
result = FailureException ( result )
return result
else :
raise result
return result
|
def toLily ( self ) :
'''Method which converts the object instance and its attributes to a string of lilypond code
: return : str of lilypond code'''
|
lilystring = ""
if hasattr ( self , "size" ) :
try :
size = float ( self . size )
lilystring += "\\abs-fontsize #" + str ( self . size ) + " "
except :
lilystring += "\\" + str ( self . size ) + " "
if hasattr ( self , "font" ) :
fonts_available = [ "sans" , "typewriter" , "roman" ]
if self . font in fonts_available :
lilystring += "\\" + self . font + " "
else :
rand = random . Random ( )
selected = rand . choice ( fonts_available )
lilystring += "\\" + selected + " "
valid = False
for char in self . text :
if char in string . ascii_letters or char in [ "0" , "1" , "2" , "3" , "4" , "5" , "6" , "7" , "8" , "9" ] :
if not hasattr ( self , "noquotes" ) :
lilystring += "\""
lilystring += self . text
if not hasattr ( self , "noquotes" ) :
lilystring += "\" "
valid = True
break
else :
valid = False
if not valid :
lilystring = ""
return lilystring
|
def move_object ( self , container , obj , new_container , new_obj_name = None , new_reference = False , content_type = None ) :
"""Works just like copy _ object , except that the source object is deleted
after a successful copy .
You can optionally change the content _ type of the object by supplying
that in the ' content _ type ' parameter .
NOTE : any references to the original object will no longer be valid ;
you will have to get a reference to the new object by passing True for
the ' new _ reference ' parameter . When this is True , a reference to the
newly moved object is returned . Otherwise , the etag for the moved
object is returned ."""
|
new_obj_etag = self . copy_object ( container , obj , new_container , new_obj_name = new_obj_name , content_type = content_type )
if not new_obj_etag :
return
# Copy succeeded ; delete the original .
self . delete_object ( container , obj )
if new_reference :
nm = new_obj_name or utils . get_name ( obj )
return self . get_object ( new_container , nm )
return new_obj_etag
|
def _sign_block ( self , block ) :
"""The block should be complete and the final
signature from the publishing validator ( this validator ) needs to
be added ."""
|
block_header = block . block_header
header_bytes = block_header . SerializeToString ( )
signature = self . _identity_signer . sign ( header_bytes )
block . set_signature ( signature )
return block
|
def help_center_section_subscriptions ( self , section_id , locale = None , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / help _ center / subscriptions # list - section - subscriptions"
|
api_path = "/api/v2/help_center/sections/{section_id}/subscriptions.json"
api_path = api_path . format ( section_id = section_id )
if locale :
api_opt_path = "/api/v2/help_center/{locale}/sections/{section_id}/subscriptions.json"
api_path = api_opt_path . format ( section_id = section_id , locale = locale )
return self . call ( api_path , ** kwargs )
|
def product ( * arrays ) :
"""Generate a cartesian product of input arrays .
Parameters
arrays : list of array - like
1 - D arrays to form the cartesian product of .
Returns
out : ndarray
2 - D array of shape ( M , len ( arrays ) ) containing cartesian products
formed of input arrays ."""
|
arrays = [ np . asarray ( x ) for x in arrays ]
shape = ( len ( x ) for x in arrays )
dtype = arrays [ 0 ] . dtype
ix = np . indices ( shape )
ix = ix . reshape ( len ( arrays ) , - 1 ) . T
out = np . empty_like ( ix , dtype = dtype )
for n , _ in enumerate ( arrays ) :
out [ : , n ] = arrays [ n ] [ ix [ : , n ] ]
return out
|
def build ( context , provider , ** kwargs ) : # pylint : disable = unused - argument
"""Build static site ."""
|
session = get_session ( provider . region )
options = kwargs . get ( 'options' , { } )
context_dict = { }
context_dict [ 'artifact_key_prefix' ] = "%s-%s-" % ( options [ 'namespace' ] , options [ 'name' ] )
# noqa
default_param_name = "%shash" % context_dict [ 'artifact_key_prefix' ]
if options . get ( 'build_output' ) :
build_output = os . path . join ( options [ 'path' ] , options [ 'build_output' ] )
else :
build_output = options [ 'path' ]
context_dict [ 'artifact_bucket_name' ] = RxrefLookup . handle ( kwargs . get ( 'artifact_bucket_rxref_lookup' ) , provider = provider , context = context )
if options . get ( 'pre_build_steps' ) :
run_commands ( options [ 'pre_build_steps' ] , options [ 'path' ] )
context_dict [ 'hash' ] = get_hash_of_files ( root_path = options [ 'path' ] , directories = options . get ( 'source_hashing' , { } ) . get ( 'directories' ) )
# Now determine if the current staticsite has already been deployed
if options . get ( 'source_hashing' , { } ) . get ( 'enabled' , True ) :
context_dict [ 'hash_tracking_parameter' ] = options . get ( 'source_hashing' , { } ) . get ( 'parameter' , default_param_name )
ssm_client = session . client ( 'ssm' )
try :
old_parameter_value = ssm_client . get_parameter ( Name = context_dict [ 'hash_tracking_parameter' ] ) [ 'Parameter' ] [ 'Value' ]
except ssm_client . exceptions . ParameterNotFound :
old_parameter_value = None
else :
context_dict [ 'hash_tracking_disabled' ] = True
old_parameter_value = None
context_dict [ 'current_archive_filename' ] = ( context_dict [ 'artifact_key_prefix' ] + context_dict [ 'hash' ] + '.zip' )
if old_parameter_value :
context_dict [ 'old_archive_filename' ] = ( context_dict [ 'artifact_key_prefix' ] + old_parameter_value + '.zip' )
if old_parameter_value == context_dict [ 'hash' ] :
LOGGER . info ( "staticsite: skipping build; app hash %s already deployed " "in this environment" , context_dict [ 'hash' ] )
context_dict [ 'deploy_is_current' ] = True
return context_dict
if does_s3_object_exist ( context_dict [ 'artifact_bucket_name' ] , context_dict [ 'current_archive_filename' ] , session ) :
context_dict [ 'app_directory' ] = download_and_extract_to_mkdtemp ( context_dict [ 'artifact_bucket_name' ] , context_dict [ 'current_archive_filename' ] , session )
else :
if options . get ( 'build_steps' ) :
LOGGER . info ( 'staticsite: executing build commands' )
run_commands ( options [ 'build_steps' ] , options [ 'path' ] )
zip_and_upload ( build_output , context_dict [ 'artifact_bucket_name' ] , context_dict [ 'current_archive_filename' ] , session )
context_dict [ 'app_directory' ] = build_output
context_dict [ 'deploy_is_current' ] = False
return context_dict
|
def compute_ ( self ) :
"""Main core - guided loop , which iteratively calls a SAT
oracle , extracts a new unsatisfiable core and processes
it . The loop finishes as soon as a satisfiable formula is
obtained . If specified in the command line , the method
additionally calls : meth : ` adapt _ am1 ` to detect and adapt
intrinsic AtMost1 constraints before executing the loop .
: rtype : bool"""
|
# trying to adapt ( simplify ) the formula
# by detecting and using atmost1 constraints
if self . adapt :
self . adapt_am1 ( )
# main solving loop
while not self . oracle . solve ( assumptions = self . sels + self . sums ) :
self . get_core ( )
if not self . core : # core is empty , i . e . hard part is unsatisfiable
return False
self . process_core ( )
if self . verbose > 1 :
print ( 'c cost: {0}; core sz: {1}; soft sz: {2}' . format ( self . cost , len ( self . core ) , len ( self . sels ) + len ( self . sums ) ) )
return True
|
def fit ( model , params , X_train , y_train , X_test , y_test , additional_calls , fit_params = None , scorer = None , random_state = None , ) :
"""Find a good model and search among a space of hyper - parameters
This does a hyper - parameter search by creating many models and then fitting
them incrementally on batches of data and reducing the number of models based
on the scores computed during training . Over time fewer and fewer models
remain . We train these models for increasingly long times .
The model , number of starting parameters , and decay can all be provided as
configuration parameters .
Training data should be given as Dask arrays . It can be large . Testing
data should be given either as a small dask array or as a numpy array . It
should fit on a single worker .
Parameters
model : Estimator
params : List [ Dict ]
Parameters to start training on model
X _ train : dask Array
y _ train : dask Array
X _ test : Array
Numpy array or small dask array . Should fit in single node ' s memory .
y _ test : Array
Numpy array or small dask array . Should fit in single node ' s memory .
additional _ calls : callable
A function that takes information about scoring history per model and
returns the number of additional partial fit calls to run on each model
fit _ params : dict
Extra parameters to give to partial _ fit
scorer : callable
A scorer callable object / function with signature
` ` scorer ( estimator , X , y ) ` ` .
random _ state : int , RandomState instance or None , optional , default : None
If int , random _ state is the seed used by the random number generator ;
If RandomState instance , random _ state is the random number generator ;
If None , the random number generator is the RandomState instance used
by ` np . random ` .
Examples
> > > import numpy as np
> > > from dask _ ml . datasets import make _ classification
> > > X , y = make _ classification ( n _ samples = 500000 , n _ features = 20,
. . . chunks = 100000 , random _ state = 0)
> > > from sklearn . linear _ model import SGDClassifier
> > > model = SGDClassifier ( tol = 1e - 3 , penalty = ' elasticnet ' , random _ state = 0)
> > > from sklearn . model _ selection import ParameterSampler
> > > params = { ' alpha ' : np . logspace ( - 2 , 1 , num = 1000 ) ,
. . . ' l1 _ ratio ' : np . linspace ( 0 , 1 , num = 1000 ) ,
. . . ' average ' : [ True , False ] }
> > > params = list ( ParameterSampler ( params , 10 , random _ state = 0 ) )
> > > X _ test , y _ test = X [ : 100000 ] , y [ : 100000]
> > > X _ train = X [ 100000 : ]
> > > y _ train = y [ 100000 : ]
> > > def remove _ worst ( scores ) :
. . . last _ score = { model _ id : info [ - 1 ] [ ' score ' ]
. . . for model _ id , info in scores . items ( ) }
. . . worst _ score = min ( last _ score . values ( ) )
. . . out = { }
. . . for model _ id , score in last _ score . items ( ) :
. . . if score ! = worst _ score :
. . . out [ model _ id ] = 1 # do one more training step
. . . if len ( out ) = = 1:
. . . out = { k : 0 for k in out } # no more work to do , stops execution
. . . return out
> > > from dask . distributed import Client
> > > client = Client ( processes = False )
> > > from dask _ ml . model _ selection . _ incremental import fit
> > > info , models , history , best = fit ( model , params ,
. . . X _ train , y _ train ,
. . . X _ test , y _ test ,
. . . additional _ calls = remove _ worst ,
. . . fit _ params = { ' classes ' : [ 0 , 1 ] } ,
. . . random _ state = 0)
> > > models
{2 : < Future : status : finished , type : SGDClassifier , key : . . . }
> > > models [ 2 ] . result ( )
SGDClassifier ( . . . )
> > > info [ 2 ] [ - 1 ] # doctest : + SKIP
{ ' model _ id ' : 2,
' params ' : { ' l1 _ ratio ' : 0.9529529529529529 , ' average ' : False ,
' alpha ' : 0.014933932161242525 } ,
' partial _ fit _ calls ' : 8,
' partial _ fit _ time ' : 0.17334818840026855,
' score ' : 0.58765,
' score _ time ' : 0.031442880630493164}
Returns
info : Dict [ int , List [ Dict ] ]
Scoring history of each successful model , keyed by model ID .
This has the parameters , scores , and timing information over time
models : Dict [ int , Future ]
Dask futures pointing to trained models
history : List [ Dict ]
A history of all models scores over time"""
|
return default_client ( ) . sync ( _fit , model , params , X_train , y_train , X_test , y_test , additional_calls , fit_params = fit_params , scorer = scorer , random_state = random_state , )
|
def start ( st_reg_number ) :
"""Checks the number valiaty for the Amazonas state"""
|
weights = range ( 2 , 10 )
digits = st_reg_number [ 0 : len ( st_reg_number ) - 1 ]
control_digit = 11
check_digit = st_reg_number [ - 1 : ]
if len ( st_reg_number ) != 9 :
return False
sum_total = 0
for i in weights :
sum_total = sum_total + i * int ( digits [ i - 2 ] )
if sum_total < control_digit :
control_digit = 11 - sum_total
return str ( digit_calculated ) == check_digit
elif sum_total % 11 <= 1 :
return '0' == check_digit
else :
digit_calculated = 11 - sum_total % 11
return str ( digit_calculated ) == check_digit
|
def check_model ( self ) :
"""Checks the model for various errors . This method checks for the following
error -
* Checks if the CPDs associated with nodes are consistent with their parents .
Returns
check : boolean
True if all the checks pass ."""
|
for node in self . nodes ( ) :
cpd = self . get_cpds ( node = node )
if isinstance ( cpd , LinearGaussianCPD ) :
if set ( cpd . evidence ) != set ( self . get_parents ( node ) ) :
raise ValueError ( "CPD associated with %s doesn't have " "proper parents associated with it." % node )
return True
|
def profile ( ctx , filepath , calltree = False ) :
"""Run and profile a given Python script .
: param str filepath : The filepath of the script to profile"""
|
filepath = pathlib . Path ( filepath )
if not filepath . is_file ( ) :
log ( "profile" , f"no such script {filepath!s}" , LogLevel . ERROR )
else :
if calltree :
log ( "profile" , f"profiling script {filepath!s} calltree" )
ctx . run ( ( f"python -m cProfile -o .profile.cprof {filepath!s}" " && pyprof2calltree -k -i .profile.cprof" " && rm -rf .profile.cprof" ) )
else :
log ( "profile" , f"profiling script {filepath!s}" )
ctx . run ( f"vprof -c cmhp {filepath!s}" )
|
def scaled_pressure3_encode ( self , time_boot_ms , press_abs , press_diff , temperature ) :
'''Barometer readings for 3rd barometer
time _ boot _ ms : Timestamp ( milliseconds since system boot ) ( uint32 _ t )
press _ abs : Absolute pressure ( hectopascal ) ( float )
press _ diff : Differential pressure 1 ( hectopascal ) ( float )
temperature : Temperature measurement ( 0.01 degrees celsius ) ( int16 _ t )'''
|
return MAVLink_scaled_pressure3_message ( time_boot_ms , press_abs , press_diff , temperature )
|
def deleteRole ( self , * args , ** kwargs ) :
"""Delete Role
Delete a role . This operation will succeed regardless of whether or not
the role exists .
This method is ` ` stable ` `"""
|
return self . _makeApiCall ( self . funcinfo [ "deleteRole" ] , * args , ** kwargs )
|
def qemu_rebase ( target , backing_file , safe = True , fail_on_error = True ) :
"""changes the backing file of ' source ' to ' backing _ file '
If backing _ file is specified as " " ( the empty string ) ,
then the image is rebased onto no backing file
( i . e . it will exist independently of any backing file ) .
( Taken from qemu - img man page )
Args :
target ( str ) : Path to the source disk
backing _ file ( str ) : path to the base disk
safe ( bool ) : if false , allow unsafe rebase
( check qemu - img docs for more info )"""
|
cmd = [ 'qemu-img' , 'rebase' , '-b' , backing_file , target ]
if not safe :
cmd . insert ( 2 , '-u' )
return run_command_with_validation ( cmd , fail_on_error , msg = 'Failed to rebase {target} onto {backing_file}' . format ( target = target , backing_file = backing_file ) )
|
def distant_total_damped_rated_level ( octave_frequencies , distance , temp , relhum , reference_distance = 1.0 ) :
"""Calculates the damped , A - rated total sound pressure level
in a given distance , temperature and relative humidity
from octave frequency sound pressure levels in a reference distance"""
|
damping_distance = distance - reference_distance
sums = 0.0
for band in OCTAVE_BANDS . keys ( ) :
if band not in octave_frequencies :
continue
if octave_frequencies [ band ] is None :
continue
# distance - adjusted level per band
distant_val = distant_level ( reference_level = float ( octave_frequencies [ band ] ) , distance = distance , reference_distance = reference_distance )
# damping
damp_per_meter = damping ( temp = temp , relhum = relhum , freq = OCTAVE_BANDS [ band ] [ 0 ] )
distant_val = distant_val - ( damping_distance * damp_per_meter )
# applyng A - rating
distant_val += OCTAVE_BANDS [ band ] [ 1 ]
sums += pow ( 10.0 , ( distant_val / 10.0 ) )
level = 10.0 * math . log10 ( sums )
return level
|
def on_connect ( self , connection ) :
"Called when the socket connects"
|
self . _sock = connection . _sock
self . _buffer = SocketBuffer ( self . _sock , self . socket_read_size )
self . encoder = connection . encoder
|
def validate_to_schema ( nanopub , schema ) -> Tuple [ bool , List [ Tuple [ str , str ] ] ] :
"""Validate nanopub against jsonschema for nanopub
Args :
nanopub ( Mapping [ str , Any ] ) : nanopub dict
schema ( Mapping [ str , Any ] ) : nanopub schema
Returns :
Tuple [ bool , List [ str ] ] :
bool : Is valid ? Yes = True , No = False
List [ Tuple [ str , str ] ] : Validation issues , empty if valid , tuple is ( ' Error | Warning ' , msg )
e . g . [ ( ' ERROR ' , " ' subject ' is a required property " ) ]"""
|
v = jsonschema . Draft4Validator ( schema )
messages = [ ]
errors = sorted ( v . iter_errors ( nanopub ) , key = lambda e : e . path )
for error in errors :
for suberror in sorted ( error . context , key = lambda e : e . schema_path ) :
print ( list ( suberror . schema_path ) , suberror . message , sep = ", " )
messages . append ( ( "ERROR" , suberror . message ) )
is_valid = True
if errors :
is_valid = False
return ( is_valid , messages )
|
def add_zfs_apt_repository ( ) :
"""adds the ZFS repository"""
|
with settings ( hide ( 'warnings' , 'running' , 'stdout' ) , warn_only = False , capture = True ) :
sudo ( 'DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update' )
install_ubuntu_development_tools ( )
apt_install ( packages = [ 'software-properties-common' , 'dkms' , 'linux-headers-generic' , 'build-essential' ] )
sudo ( 'echo | add-apt-repository ppa:zfs-native/stable' )
sudo ( 'DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update' )
return True
|
def add_activation_summary ( x , types = None , name = None , collections = None ) :
"""Call : func : ` add _ tensor _ summary ` under a reused ' activation - summary ' name scope .
This function is a no - op if not calling from main training tower .
Args :
x ( tf . Tensor ) : the tensor to summary .
types ( list [ str ] ) : summary types , defaults to ` ` [ ' sparsity ' , ' rms ' , ' histogram ' ] ` ` .
name ( str ) : if is None , use x . name .
collections ( list [ str ] ) : collections of the summary ops ."""
|
ndim = x . get_shape ( ) . ndims
if ndim < 2 :
logger . warn ( "Cannot summarize scalar activation {}" . format ( x . name ) )
return
if types is None :
types = [ 'sparsity' , 'rms' , 'histogram' ]
with cached_name_scope ( 'activation-summary' ) :
add_tensor_summary ( x , types , name = name , collections = collections )
|
def create_gzip ( archive , compression , cmd , verbosity , interactive , filenames ) :
"""Create a GZIP archive with the gzip Python module ."""
|
if len ( filenames ) > 1 :
raise util . PatoolError ( 'multi-file compression not supported in Python gzip' )
try :
with gzip . GzipFile ( archive , 'wb' ) as gzipfile :
filename = filenames [ 0 ]
with open ( filename , 'rb' ) as srcfile :
data = srcfile . read ( READ_SIZE_BYTES )
while data :
gzipfile . write ( data )
data = srcfile . read ( READ_SIZE_BYTES )
except Exception as err :
msg = "error creating %s: %s" % ( archive , err )
raise util . PatoolError ( msg )
return None
|
def to_fasta ( self ) :
"""Returns a string with the sequence in fasta format
Returns
str
The FASTA representation of the sequence"""
|
prefix , suffix = re . split ( '(?<=size=)\w+' , self . label , maxsplit = 1 )
new_count = int ( round ( self . frequency ) )
new_label = "%s%d%s" % ( prefix , new_count , suffix )
return ">%s\n%s\n" % ( new_label , self . sequence )
|
def connect ( self , hostname , port = SSH_PORT , username = None , password = None , pkey = None , key_filename = None , timeout = None , allow_agent = True , look_for_keys = True , compress = False , sock = None ) :
"""Connect to an SSH server and authenticate to it . The server ' s host key
is checked against the system host keys ( see ` load _ system _ host _ keys ` )
and any local host keys ( ` load _ host _ keys ` ) . If the server ' s hostname
is not found in either set of host keys , the missing host key policy
is used ( see ` set _ missing _ host _ key _ policy ` ) . The default policy is
to reject the key and raise an ` . SSHException ` .
Authentication is attempted in the following order of priority :
- The ` ` pkey ` ` or ` ` key _ filename ` ` passed in ( if any )
- Any key we can find through an SSH agent
- Any " id _ rsa " or " id _ dsa " key discoverable in ` ` ~ / . ssh / ` `
- Plain username / password auth , if a password was given
If a private key requires a password to unlock it , and a password is
passed in , that password will be used to attempt to unlock the key .
: param str hostname : the server to connect to
: param int port : the server port to connect to
: param str username :
the username to authenticate as ( defaults to the current local
username )
: param str password :
a password to use for authentication or for unlocking a private key
: param . PKey pkey : an optional private key to use for authentication
: param str key _ filename :
the filename , or list of filenames , of optional private key ( s ) to
try for authentication
: param float timeout : an optional timeout ( in seconds ) for the TCP connect
: param bool allow _ agent : set to False to disable connecting to the SSH agent
: param bool look _ for _ keys :
set to False to disable searching for discoverable private key
files in ` ` ~ / . ssh / ` `
: param bool compress : set to True to turn on compression
: param socket sock :
an open socket or socket - like object ( such as a ` . Channel ` ) to use
for communication to the target host
: raises BadHostKeyException : if the server ' s host key could not be
verified
: raises AuthenticationException : if authentication failed
: raises SSHException : if there was any other error connecting or
establishing an SSH session
: raises socket . error : if a socket error occurred while connecting"""
|
if not sock :
for ( family , socktype , proto , canonname , sockaddr ) in socket . getaddrinfo ( hostname , port , socket . AF_UNSPEC , socket . SOCK_STREAM ) :
if socktype == socket . SOCK_STREAM :
af = family
addr = sockaddr
break
else : # some OS like AIX don ' t indicate SOCK _ STREAM support , so just guess . : (
af , _ , _ , _ , addr = socket . getaddrinfo ( hostname , port , socket . AF_UNSPEC , socket . SOCK_STREAM )
sock = socket . socket ( af , socket . SOCK_STREAM )
if timeout is not None :
try :
sock . settimeout ( timeout )
except :
pass
retry_on_signal ( lambda : sock . connect ( addr ) )
t = self . _transport = Transport ( sock )
t . use_compression ( compress = compress )
if self . _log_channel is not None :
t . set_log_channel ( self . _log_channel )
t . start_client ( )
ResourceManager . register ( self , t )
server_key = t . get_remote_server_key ( )
keytype = server_key . get_name ( )
if port == SSH_PORT :
server_hostkey_name = hostname
else :
server_hostkey_name = "[%s]:%d" % ( hostname , port )
our_server_key = self . _system_host_keys . get ( server_hostkey_name , { } ) . get ( keytype , None )
if our_server_key is None :
our_server_key = self . _host_keys . get ( server_hostkey_name , { } ) . get ( keytype , None )
if our_server_key is None : # will raise exception if the key is rejected ; let that fall out
self . _policy . missing_host_key ( self , server_hostkey_name , server_key )
# if the callback returns , assume the key is ok
our_server_key = server_key
if server_key != our_server_key :
raise BadHostKeyException ( hostname , server_key , our_server_key )
if username is None :
username = getpass . getuser ( )
if key_filename is None :
key_filenames = [ ]
elif isinstance ( key_filename , string_types ) :
key_filenames = [ key_filename ]
else :
key_filenames = key_filename
self . _auth ( username , password , pkey , key_filenames , allow_agent , look_for_keys )
|
def _mmc_loop ( self , rounds , temp = 298.15 , verbose = True ) :
"""The main MMC loop .
Parameters
rounds : int
The number of rounds of optimisation to perform .
temp : float , optional
The temperature ( in K ) used during the optimisation .
verbose : bool , optional
If true , prints information about the run to std out ."""
|
# TODO add weighted randomisation of altered variable
current_round = 0
while current_round < rounds :
modifiable = list ( filter ( lambda p : p . parameter_type is not MMCParameterType . STATIC_VALUE , self . current_parameters ) )
chosen_parameter = random . choice ( modifiable )
if chosen_parameter . parameter_type is MMCParameterType . UNIFORM_DIST :
chosen_parameter . randomise_proposed_value ( )
else :
chosen_parameter . randomise_proposed_value ( )
proposed_parameters = [ p . current_value if p . proposed_value is None else p . proposed_value for p in self . current_parameters ]
model = self . specification ( * proposed_parameters )
model . pack_new_sequences ( self . sequences )
proposed_energy = self . eval_function ( model )
# TODO Add proper logging
if verbose :
sys . stdout . write ( '\rRound: {}, Current energy: {}, Proposed energy: {} ' '(best {}), {}. ' . format ( current_round , float_f ( self . current_energy ) , float_f ( proposed_energy ) , float_f ( self . best_energy ) , "ACCEPTED" if self . check_move ( proposed_energy , self . current_energy , t = temp ) else "DECLINED" ) )
sys . stdout . flush ( )
if self . check_move ( proposed_energy , self . current_energy , t = temp ) :
for p in self . current_parameters :
p . accept_proposed_value ( )
self . current_energy = proposed_energy
if self . current_energy < self . best_energy :
self . best_energy = copy . deepcopy ( self . current_energy )
self . best_parameters = copy . deepcopy ( self . current_parameters )
self . best_model = model
else :
for p in self . current_parameters :
p . reject_proposed_value ( )
current_round += 1
return
|
def _write_json ( self , message , extra ) :
'''The JSON logger doesn ' t obey log levels
@ param message : The message to write
@ param extra : The object to write'''
|
self . logger . info ( message , extra = extra )
|
def undobutton_action ( self ) :
"""when undo is clicked , revert the thematic map to the previous state"""
|
if len ( self . history ) > 1 :
old = self . history . pop ( - 1 )
self . selection_array = old
self . mask . set_data ( old )
self . fig . canvas . draw_idle ( )
|
def xpointerNewLocationSetNodes ( self , end ) :
"""Create a new xmlXPathObjectPtr of type LocationSet and
initialize it with the single range made of the two nodes
@ start and @ end"""
|
if end is None :
end__o = None
else :
end__o = end . _o
ret = libxml2mod . xmlXPtrNewLocationSetNodes ( self . _o , end__o )
if ret is None :
raise treeError ( 'xmlXPtrNewLocationSetNodes() failed' )
return xpathObjectRet ( ret )
|
def load_page_buffer ( self , buffer_number , address , bytes ) :
"""@ brief Load data to a numbered page buffer .
This method is used in conjunction with start _ program _ page _ with _ buffer ( ) to implement
double buffered programming ."""
|
assert buffer_number < len ( self . page_buffers ) , "Invalid buffer number"
# prevent security settings from locking the device
bytes = self . override_security_bits ( address , bytes )
# transfer the buffer to device RAM
self . target . write_memory_block8 ( self . page_buffers [ buffer_number ] , bytes )
|
def _elidable_begin ( self , word ) :
"""Check word beginning to see if it is elidable . Elidable beginnings include :
1 ) A word begins with ' h '
2 ) A word begins with a vowel
3 ) A word begins with a diphthong
: param word : syllabified / ' qu ' fixed word
: return : True if the beginning of a word is elidable , otherwise False
: rtype : bool"""
|
if str ( word [ 0 ] ) . startswith ( 'h' ) :
return True
elif str ( word [ 0 ] [ 0 ] ) in self . long_vowels :
return True
elif str ( word [ 0 ] [ 0 ] + word [ 0 ] [ - 1 ] ) in self . diphthongs :
return True
elif str ( word [ 0 ] [ 0 ] ) in self . vowels :
return True
else :
return False
|
def create_set ( self , project , json_data = None , ** options ) :
"""Create a new article set . Provide the needed arguments using
post _ data or with key - value pairs"""
|
url = URL . articlesets . format ( ** locals ( ) )
if json_data is None : # form encoded request
return self . request ( url , method = "post" , data = options )
else :
if not isinstance ( json_data , ( string_types ) ) :
json_data = json . dumps ( json_data , default = serialize )
headers = { 'content-type' : 'application/json' }
return self . request ( url , method = 'post' , data = json_data , headers = headers )
|
def _ParseAbstractInteger ( text , is_long = False ) :
"""Parses an integer without checking size / signedness .
Args :
text : The text to parse .
is _ long : True if the value should be returned as a long integer .
Returns :
The integer value .
Raises :
ValueError : Thrown Iff the text is not a valid integer ."""
|
# Do the actual parsing . Exception handling is propagated to caller .
try : # We force 32 - bit values to int and 64 - bit values to long to make
# alternate implementations where the distinction is more significant
# ( e . g . the C + + implementation ) simpler .
if is_long :
return long ( text , 0 )
else :
return int ( text , 0 )
except ValueError :
raise ValueError ( 'Couldn\'t parse integer: %s' % text )
|
def sample_batch ( self , nlive_new = 500 , update_interval = None , logl_bounds = None , maxiter = None , maxcall = None , save_bounds = True ) :
"""Generate an additional series of nested samples that will be combined
with the previous set of dead points . Works by hacking the internal
` sampler ` object .
Instantiates a generator that will be called by the user .
Parameters
nlive _ new : int
Number of new live points to be added . Default is ` 500 ` .
update _ interval : int or float , optional
If an integer is passed , only update the bounding distribution
every ` update _ interval ` - th likelihood call . If a float is passed ,
update the bound after every ` round ( update _ interval * nlive ) ` - th
likelihood call . Larger update intervals can be more efficient
when the likelihood function is quick to evaluate . If no value is
provided , defaults to the value passed during initialization .
logl _ bounds : tuple of size ( 2 , ) , optional
The ln ( likelihood ) bounds used to bracket the run . If ` None ` ,
the default bounds span the entire range covered by the
original run .
maxiter : int , optional
Maximum number of iterations . Iteration may stop earlier if the
termination condition is reached . Default is ` sys . maxsize `
( no limit ) .
maxcall : int , optional
Maximum number of likelihood evaluations . Iteration may stop
earlier if termination condition is reached . Default is
` sys . maxsize ` ( no limit ) .
save _ bounds : bool , optional
Whether or not to save past distributions used to bound
the live points internally . Default is ` True ` .
Returns
worst : int
Index of the live point with the worst likelihood . This is our
new dead point sample . * * Negative values indicate the index
of a new live point generated when initializing a new batch . * *
ustar : ` ~ numpy . ndarray ` with shape ( npdim , )
Position of the sample .
vstar : ` ~ numpy . ndarray ` with shape ( ndim , )
Transformed position of the sample .
loglstar : float
Ln ( likelihood ) of the sample .
nc : int
Number of likelihood calls performed before the new
live point was accepted .
worst _ it : int
Iteration when the live ( now dead ) point was originally proposed .
boundidx : int
Index of the bound the dead point was originally drawn from .
bounditer : int
Index of the bound being used at the current iteration .
eff : float
The cumulative sampling efficiency ( in percent ) ."""
|
# Initialize default values .
if maxcall is None :
maxcall = sys . maxsize
if maxiter is None :
maxiter = sys . maxsize
if nlive_new <= 2 * self . npdim :
warnings . warn ( "Beware: `nlive_batch <= 2 * ndim`!" )
self . sampler . save_bounds = save_bounds
# Initialize starting values .
h = 0.0
# Information , initially * 0 . *
logz = - 1.e300
# ln ( evidence ) , initially * 0 . *
logvol = 0.
# initially contains the whole prior ( volume = 1 . )
# Grab results from base run .
base_id = np . array ( self . base_id )
base_u = np . array ( self . base_u )
base_v = np . array ( self . base_v )
base_logl = np . array ( self . base_logl )
base_n = np . array ( self . base_n )
base_scale = np . array ( self . base_scale )
nbase = len ( base_n )
nblive = self . nlive_init
# Reset " new " results .
self . new_id = [ ]
self . new_u = [ ]
self . new_v = [ ]
self . new_logl = [ ]
self . new_nc = [ ]
self . new_it = [ ]
self . new_n = [ ]
self . new_boundidx = [ ]
self . new_bounditer = [ ]
self . new_scale = [ ]
self . new_logl_min , self . new_logl_max = - np . inf , np . inf
# Initialize ln ( likelihood ) bounds .
if logl_bounds is None :
logl_min , logl_max = - np . inf , max ( base_logl [ : - nblive ] )
else :
logl_min , logl_max = logl_bounds
self . new_logl_min , self . new_logl_max = logl_min , logl_max
# Check whether the lower bound encompasses all previous base samples .
psel = np . all ( logl_min <= base_logl )
vol = 1. - 1. / nblive
# starting ln ( prior volume )
if psel : # If the lower bound encompasses all base samples , we want
# to propose a new set of points from the unit cube .
live_u = self . rstate . rand ( nlive_new , self . npdim )
if self . use_pool_ptform :
live_v = np . array ( list ( self . M ( self . prior_transform , np . array ( live_u ) ) ) )
else :
live_v = np . array ( list ( map ( self . prior_transform , np . array ( live_u ) ) ) )
if self . use_pool_logl :
live_logl = np . array ( list ( self . M ( self . loglikelihood , np . array ( live_v ) ) ) )
else :
live_logl = np . array ( list ( map ( self . loglikelihood , np . array ( live_v ) ) ) )
# Convert all ` - np . inf ` log - likelihoods to finite large numbers .
# Necessary to keep estimators in our sampler from breaking .
for i , logl in enumerate ( live_logl ) :
if not np . isfinite ( logl ) :
if np . sign ( logl ) < 0 :
live_logl [ i ] = - 1e300
else :
raise ValueError ( "The log-likelihood ({0}) of live " "point {1} located at u={2} v={3} " " is invalid." . format ( logl , i , live_u [ i ] , live_v [ i ] ) )
live_bound = np . zeros ( nlive_new , dtype = 'int' )
live_it = np . zeros ( nlive_new , dtype = 'int' ) + self . it
live_nc = np . ones ( nlive_new , dtype = 'int' )
self . ncall += nlive_new
# Return live points in generator format .
for i in range ( nlive_new ) :
yield ( - i - 1 , live_u [ i ] , live_v [ i ] , live_logl [ i ] , live_nc [ i ] , live_it [ i ] , 0 , 0 , self . eff )
else : # If the lower bound doesn ' t encompass all base samples , we need
# to " rewind " our previous base run until we arrive at the
# relevant set of live points ( and scale ) at the bound .
live_u = np . empty ( ( nblive , self . npdim ) )
live_v = np . empty ( ( nblive , base_v . shape [ 1 ] ) )
live_logl = np . empty ( nblive )
live_u [ base_id [ - nblive : ] ] = base_u [ - nblive : ]
live_v [ base_id [ - nblive : ] ] = base_v [ - nblive : ]
live_logl [ base_id [ - nblive : ] ] = base_logl [ - nblive : ]
for i in range ( 1 , nbase - nblive ) :
r = - ( nblive + i )
uidx = base_id [ r ]
live_u [ uidx ] = base_u [ r ]
live_v [ uidx ] = base_v [ r ]
live_logl [ uidx ] = base_logl [ r ]
if live_logl [ uidx ] <= logl_min :
break
live_scale = base_scale [ r ]
# Hack the internal sampler by overwriting the live points
# and scale factor .
self . sampler . nlive = nblive
self . sampler . live_u = np . array ( live_u )
self . sampler . live_v = np . array ( live_v )
self . sampler . live_logl = np . array ( live_logl )
self . sampler . scale = live_scale
# Trigger an update of the internal bounding distribution based
# on the " new " set of live points .
vol = math . exp ( - 1. * ( nbase + r ) / nblive )
loglmin = min ( live_logl )
if self . sampler . _beyond_unit_bound ( loglmin ) :
bound = self . sampler . update ( vol / nblive )
if save_bounds :
self . sampler . bound . append ( copy . deepcopy ( bound ) )
self . sampler . nbound += 1
self . sampler . since_update = 0
# Sample a new batch of ` nlive _ new ` live points using the
# internal sampler given the ` logl _ min ` constraint .
live_u = np . empty ( ( nlive_new , self . npdim ) )
live_v = np . empty ( ( nlive_new , base_v . shape [ 1 ] ) )
live_logl = np . empty ( nlive_new )
live_bound = np . zeros ( nlive_new , dtype = 'int' )
if self . sampler . _beyond_unit_bound ( loglmin ) :
live_bound += self . sampler . nbound - 1
live_it = np . empty ( nlive_new , dtype = 'int' )
live_nc = np . empty ( nlive_new , dtype = 'int' )
for i in range ( nlive_new ) :
( live_u [ i ] , live_v [ i ] , live_logl [ i ] , live_nc [ i ] ) = self . sampler . _new_point ( logl_min , math . log ( vol ) )
live_it [ i ] = self . it
self . ncall += live_nc [ i ]
# Return live points in generator format .
yield ( - i - 1 , live_u [ i ] , live_v [ i ] , live_logl [ i ] , live_nc [ i ] , live_it [ i ] , live_bound [ i ] , live_bound [ i ] , self . eff )
# Overwrite the previous set of live points in our internal sampler
# with the new batch of points we just generated .
self . sampler . nlive = nlive_new
self . sampler . live_u = np . array ( live_u )
self . sampler . live_v = np . array ( live_v )
self . sampler . live_logl = np . array ( live_logl )
self . sampler . live_bound = np . array ( live_bound )
self . sampler . live_it = np . array ( live_it )
# Trigger an update of the internal bounding distribution ( again ) .
loglmin = min ( live_logl )
if self . sampler . _beyond_unit_bound ( loglmin ) :
bound = self . sampler . update ( vol / nlive_new )
if save_bounds :
self . sampler . bound . append ( copy . deepcopy ( bound ) )
self . sampler . nbound += 1
self . sampler . since_update = 0
# Copy over bound reference .
self . bound = self . sampler . bound
# Update ` update _ interval ` based on our new set of live points .
if update_interval is None :
update_interval = self . update_interval
if isinstance ( update_interval , float ) :
update_interval = int ( round ( self . update_interval * nlive_new ) )
if self . bounding == 'none' :
update_interval = np . inf
# no need to update with no bounds
self . sampler . update_interval = update_interval
# Update internal ln ( prior volume ) - based quantities used to set things
# like ` pointvol ` that help to prevent constructing over - constrained
# bounding distributions .
if self . new_logl_min == - np . inf :
bound_logvol = 0.
else :
vol_idx = np . argmin ( abs ( self . saved_logl - self . new_logl_min ) )
bound_logvol = self . saved_logvol [ vol_idx ]
bound_dlv = math . log ( ( nlive_new + 1. ) / nlive_new )
self . sampler . saved_logvol [ - 1 ] = bound_logvol
self . sampler . dlv = bound_dlv
# Tell the sampler * not * to try and remove the previous addition of
# live points . All the hacks above make the internal results
# garbage anyways .
self . sampler . added_live = False
# Run the sampler internally as a generator until we hit
# the lower likelihood threshold . Afterwards , we add in our remaining
# live points * as if * we had terminated the run . This allows us to
# sample past the original bounds " for free " .
for i in range ( 1 ) :
for it , results in enumerate ( self . sampler . sample ( dlogz = 0. , logl_max = logl_max , maxiter = maxiter - nlive_new - 1 , maxcall = maxcall - sum ( live_nc ) , save_samples = False , save_bounds = save_bounds ) ) : # Grab results .
( worst , ustar , vstar , loglstar , logvol , logwt , logz , logzvar , h , nc , worst_it , boundidx , bounditer , eff , delta_logz ) = results
# Save results .
self . new_id . append ( worst )
self . new_u . append ( ustar )
self . new_v . append ( vstar )
self . new_logl . append ( loglstar )
self . new_nc . append ( nc )
self . new_it . append ( worst_it )
self . new_n . append ( nlive_new )
self . new_boundidx . append ( boundidx )
self . new_bounditer . append ( bounditer )
self . new_scale . append ( self . sampler . scale )
# Increment relevant counters .
self . ncall += nc
self . eff = 100. * self . it / self . ncall
self . it += 1
yield ( worst , ustar , vstar , loglstar , nc , worst_it , boundidx , bounditer , self . eff )
for it , results in enumerate ( self . sampler . add_live_points ( ) ) : # Grab results .
( worst , ustar , vstar , loglstar , logvol , logwt , logz , logzvar , h , nc , worst_it , boundidx , bounditer , eff , delta_logz ) = results
# Save results .
self . new_id . append ( worst )
self . new_u . append ( ustar )
self . new_v . append ( vstar )
self . new_logl . append ( loglstar )
self . new_nc . append ( live_nc [ worst ] )
self . new_it . append ( worst_it )
self . new_n . append ( nlive_new - it )
self . new_boundidx . append ( boundidx )
self . new_bounditer . append ( bounditer )
self . new_scale . append ( self . sampler . scale )
# Increment relevant counters .
self . eff = 100. * self . it / self . ncall
self . it += 1
yield ( worst , ustar , vstar , loglstar , live_nc [ worst ] , worst_it , boundidx , bounditer , self . eff )
|
def genenare_callmap_sif ( self , filepath ) :
"""Generate a sif file from the call map"""
|
graph = self . call_map
if graph is None :
raise AngrGirlScoutError ( 'Please generate the call graph first.' )
f = open ( filepath , "wb" )
for src , dst in graph . edges ( ) :
f . write ( "0x%x\tDirectEdge\t0x%x\n" % ( src , dst ) )
f . close ( )
|
def _determine_datatype ( fields ) :
"""Determine the numpy dtype of the data ."""
|
# Convert the NRRD type string identifier into a NumPy string identifier using a map
np_typestring = _TYPEMAP_NRRD2NUMPY [ fields [ 'type' ] ]
# This is only added if the datatype has more than one byte and is not using ASCII encoding
# Note : Endian is not required for ASCII encoding
if np . dtype ( np_typestring ) . itemsize > 1 and fields [ 'encoding' ] not in [ 'ASCII' , 'ascii' , 'text' , 'txt' ] :
if 'endian' not in fields :
raise NRRDError ( 'Header is missing required field: "endian".' )
elif fields [ 'endian' ] == 'big' :
np_typestring = '>' + np_typestring
elif fields [ 'endian' ] == 'little' :
np_typestring = '<' + np_typestring
else :
raise NRRDError ( 'Invalid endian value in header: "%s"' % fields [ 'endian' ] )
return np . dtype ( np_typestring )
|
def set ( self , level = None ) :
"""Set the default log level
If the level is not specified environment variable DEBUG is used
with the following meaning : :
DEBUG = 0 . . . LOG _ WARN ( default )
DEBUG = 1 . . . LOG _ INFO
DEBUG = 2 . . . LOG _ DEBUG
DEBUG = 3 . . . LOG _ DETAILS
DEBUG = 4 . . . LOG _ DATA
DEBUG = 5 . . . LOG _ ALL ( log all messages )"""
|
# If level specified , use given
if level is not None :
Logging . _level = level
# Otherwise attempt to detect from the environment
else :
try :
Logging . _level = Logging . MAPPING [ int ( os . environ [ "DEBUG" ] ) ]
except StandardError :
Logging . _level = logging . WARN
self . logger . setLevel ( Logging . _level )
|
def copytree_atomic ( source_path , dest_path , make_parents = False , backup_suffix = None , symlinks = False ) :
"""Copy a file or directory recursively , and atomically , reanaming file or top - level dir when done .
Unlike shutil . copytree , this will not fail on a file ."""
|
if os . path . isdir ( source_path ) :
with atomic_output_file ( dest_path , make_parents = make_parents , backup_suffix = backup_suffix ) as tmp_path :
shutil . copytree ( source_path , tmp_path , symlinks = symlinks )
else :
copyfile_atomic ( source_path , dest_path , make_parents = make_parents , backup_suffix = backup_suffix )
|
def deploy ( project , version , promote , quiet ) :
"""Deploy the app to the target environment .
The target environments can be configured using the ENVIRONMENTS conf
variable . This will also collect all static files and compile translation
messages"""
|
from . import logic
logic . deploy ( project , version , promote , quiet )
|
def Start ( self , file_size = 0 , maximum_pending_files = 1000 , use_external_stores = False ) :
"""Initialize our state ."""
|
super ( MultiGetFileLogic , self ) . Start ( )
self . state . files_hashed = 0
self . state . use_external_stores = use_external_stores
self . state . file_size = file_size
self . state . files_to_fetch = 0
self . state . files_fetched = 0
self . state . files_skipped = 0
# Counter to batch up hash checking in the filestore
self . state . files_hashed_since_check = 0
# A dict of file trackers which are waiting to be checked by the file
# store . Keys are vfs urns and values are FileTrack instances . Values are
# copied to pending _ files for download if not present in FileStore .
self . state . pending_hashes = { }
# A dict of file trackers currently being fetched . Keys are vfs urns and
# values are FileTracker instances .
self . state . pending_files = { }
# The maximum number of files we are allowed to download concurrently .
self . state . maximum_pending_files = maximum_pending_files
# As pathspecs are added to the flow they are appended to this array . We
# then simply pass their index in this array as a surrogate for the full
# pathspec . This allows us to use integers to track pathspecs in dicts etc .
self . state . indexed_pathspecs = [ ]
self . state . request_data_list = [ ]
# The index of the next pathspec to start . Pathspecs are added to
# indexed _ pathspecs and wait there until there are free trackers for
# them . When the number of pending _ files falls below the
# " maximum _ pending _ files " count ] = we increment this index and start of
# downloading another pathspec .
self . state . next_pathspec_to_start = 0
# Number of blob hashes we have received but not yet scheduled for download .
self . state . blob_hashes_pending = 0
|
def cast_to_unstructured_grid ( self ) :
"""Get a new representation of this object as an
: class : ` vtki . UnstructuredGrid `"""
|
alg = vtk . vtkAppendFilter ( )
alg . AddInputData ( self )
alg . Update ( )
return vtki . filters . _get_output ( alg )
|
def connect_head_namespaced_pod_proxy ( self , name , namespace , ** kwargs ) :
"""connect HEAD requests to proxy of Pod
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . connect _ head _ namespaced _ pod _ proxy ( name , namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PodProxyOptions ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str path : Path is the URL path to use for the current proxy request to pod .
: return : str
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . connect_head_namespaced_pod_proxy_with_http_info ( name , namespace , ** kwargs )
else :
( data ) = self . connect_head_namespaced_pod_proxy_with_http_info ( name , namespace , ** kwargs )
return data
|
def show ( self , ** kwargs ) :
"""Shows the menu . Any ` kwargs ` supplied will be passed to
` show _ menu ( ) ` ."""
|
show_kwargs = copy . deepcopy ( self . _show_kwargs )
show_kwargs . update ( kwargs )
return show_menu ( self . entries , ** show_kwargs )
|
def RegisterCustomFieldCodec ( encoder , decoder ) :
"""Register a custom encoder / decoder for this field ."""
|
def Register ( field ) :
_CUSTOM_FIELD_CODECS [ field ] = _Codec ( encoder = encoder , decoder = decoder )
return field
return Register
|
def _read_msg_header ( session ) :
"""Perform a read on input socket to consume headers and then return
a tuple of message type , message length .
: param session : Push Session to read data for .
Returns response type ( i . e . PUBLISH _ MESSAGE ) if header was completely
read , otherwise None if header was not completely read ."""
|
try :
data = session . socket . recv ( 6 - len ( session . data ) )
if len ( data ) == 0 : # No Data on Socket . Likely closed .
return NO_DATA
session . data += data
# Data still not completely read .
if len ( session . data ) < 6 :
return INCOMPLETE
except ssl . SSLError : # This can happen when select gets triggered
# for an SSL socket and data has not yet been
# read .
return INCOMPLETE
session . message_length = struct . unpack ( '!i' , session . data [ 2 : 6 ] ) [ 0 ]
response_type = struct . unpack ( '!H' , session . data [ 0 : 2 ] ) [ 0 ]
# Clear out session data as header is consumed .
session . data = six . b ( "" )
return response_type
|
def remove ( self , value , count = 1 ) :
"""Remove occurences of ` ` value ` ` from the list .
: keyword count : Number of matching values to remove .
Default is to remove a single value ."""
|
count = self . client . lrem ( self . name , value , num = count )
if not count :
raise ValueError ( "%s not in list" % value )
return count
|
def decorate ( self , func , target , * anoop , ** kwnoop ) :
"""decorate the passed in func calling target when func is called
: param func : the function being decorated
: param target : the target that will be run when func is called
: returns : the decorated func"""
|
if target :
self . target = target
def decorated ( decorated_self , * args , ** kwargs ) :
self . handle_target ( request = decorated_self . request , controller_args = args , controller_kwargs = kwargs )
return func ( decorated_self , * args , ** kwargs )
return decorated
|
def _parse_wenner_file ( filename , settings ) :
"""Parse a Geotom . wen ( Wenner configuration ) file
Parsing problems
Due to column overflows it is necessary to make sure that spaces are
present around the ; character . Example :
8.000 14.000 10835948.70 ; 0.001 - 123.1853 - 1.0 23.10.2014"""
|
# read data
with open ( filename , 'r' ) as fid2 :
geotom_data_orig = fid2 . read ( )
# replace all ' ; ' by ' ; '
geotom_data = geotom_data_orig . replace ( ';' , ' ; ' )
fid = StringIO ( )
fid . write ( geotom_data )
fid . seek ( 0 )
header = [ fid . readline ( ) for i in range ( 0 , 16 ) ]
header
df = pd . read_csv ( fid , delim_whitespace = True , header = None , names = ( 'elec1_wenner' , 'a_w' , 'rho_a' , 'c4' , 'c5' , 'c6' , 'c6' , 'c7' , 'c8' , 'c9' , ) , )
# compute geometric factor using the Wenner formula
df [ 'k' ] = 2 * np . pi * df [ 'a_w' ]
df [ 'r' ] = df [ 'rho_a' ] / df [ 'k' ]
Am = df [ 'elec1_wenner' ]
Bm = df [ 'elec1_wenner' ] + df [ 'a_w' ]
Mm = df [ 'elec1_wenner' ] + 3 * df [ 'a_w' ]
Nm = df [ 'elec1_wenner' ] + 2 * df [ 'a_w' ]
df [ 'a' ] = Am / 2.0 + 1
df [ 'b' ] = Bm / 2.0 + 1
df [ 'm' ] = Mm / 2.0 + 1
df [ 'n' ] = Nm / 2.0 + 1
# remove any nan values
df . dropna ( axis = 0 , subset = [ 'a' , 'b' , 'm' , 'n' , 'r' ] , inplace = True )
return df
|
def countries ( self ) :
"""Access the countries
: returns : twilio . rest . voice . v1 . dialing _ permissions . country . CountryList
: rtype : twilio . rest . voice . v1 . dialing _ permissions . country . CountryList"""
|
if self . _countries is None :
self . _countries = CountryList ( self . _version , )
return self . _countries
|
def content ( self , value ) :
"""Setter for * * self . _ _ content * * attribute .
: param value : Attribute value .
: type value : list"""
|
if value is not None :
assert type ( value ) is list , "'{0}' attribute: '{1}' type is not 'list'!" . format ( "content" , value )
self . __content = value
|
def file_find ( self , load ) :
'''Convenience function for calls made using the LocalClient'''
|
path = load . get ( 'path' )
if not path :
return { 'path' : '' , 'rel' : '' }
tgt_env = load . get ( 'saltenv' , 'base' )
return self . find_file ( path , tgt_env )
|
def getApplicationSupportedMimeTypes ( self , pchAppKey , pchMimeTypesBuffer , unMimeTypesBuffer ) :
"""Get the list of supported mime types for this application , comma - delimited"""
|
fn = self . function_table . getApplicationSupportedMimeTypes
result = fn ( pchAppKey , pchMimeTypesBuffer , unMimeTypesBuffer )
return result
|
def as_bin ( self , as_spendable = False ) :
"""Return the txo as binary ."""
|
f = io . BytesIO ( )
self . stream ( f , as_spendable = as_spendable )
return f . getvalue ( )
|
def get_ip_by_equip_and_vip ( self , equip_name , id_evip ) :
"""Get a available IP in the Equipment related Environment VIP
: param equip _ name : Equipment Name .
: param id _ evip : Vip environment identifier . Integer value and greater than zero .
: return : Dictionary with the following structure :
{ ' ipv4 ' : [ { ' id ' : < id > , ' ip ' : < ip > , ' network ' : { ' id ' : < id > , ' network ' : < network > , ' mask ' : < mask > , } } . . . ] ,
' ipv6 ' : [ { ' id ' : < id > , ' ip ' : < ip > , ' network ' : { ' id ' : < id > , ' network ' : < network > , ' mask ' : < mask > , } } . . . ] }
: raise InvalidParameterError : Vip environment identifier or equipment name is none or invalid .
: raise EquipamentoNotFoundError : Equipment not registered .
: raise EnvironmentVipNotFoundError : Vip environment not registered .
: raise UserNotAuthorizedError : User dont have permission to perform operation .
: raise XMLError : Networkapi failed to generate the XML response .
: raise DataBaseError : Networkapi failed to access the database ."""
|
if not is_valid_int_param ( id_evip ) :
raise InvalidParameterError ( u'Vip environment is invalid or was not informed.' )
ip_map = dict ( )
ip_map [ 'equip_name' ] = equip_name
ip_map [ 'id_evip' ] = id_evip
url = "ip/getbyequipandevip/"
code , xml = self . submit ( { 'ip_map' : ip_map } , 'POST' , url )
return self . response ( code , xml )
|
def parse_point_source_node ( node , mfd_spacing = 0.1 ) :
"""Returns an " areaSource " node into an instance of the : class :
openquake . hmtk . sources . area . mtkAreaSource"""
|
assert "pointSource" in node . tag
pnt_taglist = get_taglist ( node )
# Get metadata
point_id , name , trt = ( node . attrib [ "id" ] , node . attrib [ "name" ] , node . attrib [ "tectonicRegion" ] )
assert point_id
# Defensive validation !
# Process geometry
location , upper_depth , lower_depth = node_to_point_geometry ( node . nodes [ pnt_taglist . index ( "pointGeometry" ) ] )
# Process scaling relation
msr = node_to_scalerel ( node . nodes [ pnt_taglist . index ( "magScaleRel" ) ] )
# Process aspect ratio
aspect = float_ ( node . nodes [ pnt_taglist . index ( "ruptAspectRatio" ) ] . text )
# Process MFD
mfd = node_to_mfd ( node , pnt_taglist )
# Process nodal planes
npds = node_to_nodal_planes ( node . nodes [ pnt_taglist . index ( "nodalPlaneDist" ) ] )
# Process hypocentral depths
hdds = node_to_hdd ( node . nodes [ pnt_taglist . index ( "hypoDepthDist" ) ] )
return mtkPointSource ( point_id , name , trt , geometry = location , upper_depth = upper_depth , lower_depth = lower_depth , mag_scale_rel = msr , rupt_aspect_ratio = aspect , mfd = mfd , nodal_plane_dist = npds , hypo_depth_dist = hdds )
|
def _init_metadata ( self , ** kwargs ) :
"""Initialize form metadata"""
|
osid_objects . OsidObjectForm . _init_metadata ( self , ** kwargs )
self . _grade_system_default = self . _mdata [ 'grade_system' ] [ 'default_id_values' ] [ 0 ]
|
def get_aws_secrets_from_file ( credentials_file ) : # type : ( str ) - > Set [ str ]
"""Extract AWS secrets from configuration files .
Read an ini - style configuration file and return a set with all found AWS
secret access keys ."""
|
aws_credentials_file_path = os . path . expanduser ( credentials_file )
if not os . path . exists ( aws_credentials_file_path ) :
return set ( )
parser = configparser . ConfigParser ( )
try :
parser . read ( aws_credentials_file_path )
except configparser . MissingSectionHeaderError :
return set ( )
keys = set ( )
for section in parser . sections ( ) :
for var in ( 'aws_secret_access_key' , 'aws_security_token' , 'aws_session_token' , ) :
try :
key = parser . get ( section , var ) . strip ( )
if key :
keys . add ( key )
except configparser . NoOptionError :
pass
return keys
|
def create_rule ( self , txtrule = None , regex = None , extension = None , cmd = None , codes = [ 0 , None ] , recurse = True ) :
'''Adds a set of rules to the extraction rule list .
@ txtrule - Rule string , or list of rule strings , in the format < regular expression > : < file extension > [ : < command to run > ]
@ regex - If rule string is not specified , this is the regular expression string to use .
@ extension - If rule string is not specified , this is the file extension to use .
@ cmd - If rule string is not specified , this is the command to run .
Alternatively a callable object may be specified , which will be passed one argument : the path to the file to extract .
@ codes - A list of valid return codes for the extractor .
@ recurse - If False , extracted directories will not be recursed into when the matryoshka option is enabled .
Returns None .'''
|
rules = [ ]
created_rules = [ ]
match = False
r = { 'extension' : '' , 'cmd' : '' , 'regex' : None , 'codes' : codes , 'recurse' : recurse , }
# Process single explicitly specified rule
if not txtrule and regex and extension :
r [ 'extension' ] = extension
r [ 'regex' ] = re . compile ( regex )
if cmd :
r [ 'cmd' ] = cmd
return [ r ]
# Process rule string , or list of rule strings
if not isinstance ( txtrule , type ( [ ] ) ) :
rules = [ txtrule ]
else :
rules = txtrule
for rule in rules :
r [ 'cmd' ] = ''
r [ 'extension' ] = ''
try :
values = self . _parse_rule ( rule )
match = values [ 0 ]
r [ 'regex' ] = re . compile ( values [ 0 ] )
r [ 'extension' ] = values [ 1 ]
r [ 'cmd' ] = values [ 2 ]
r [ 'codes' ] = values [ 3 ]
r [ 'recurse' ] = values [ 4 ]
except KeyboardInterrupt as e :
raise e
except Exception :
pass
# Verify that the match string was retrieved .
if match :
created_rules . append ( r )
return created_rules
|
def create_jsonable_registry ( self ) :
"""Creates a JSON - able representation of this object .
Returns :
A dictionary mapping ( device , tensor name ) to JSON - able object
representations of NumericsAlertHistory ."""
|
# JSON does not support tuples as keys . Only strings . Therefore , we store
# the device name , tensor name , and dictionary data within a 3 - item list .
return [ HistoryTriplet ( pair [ 0 ] , pair [ 1 ] , history . create_jsonable_history ( ) ) for ( pair , history ) in self . _data . items ( ) ]
|
def calculate_oobatake_dG ( seq , temp ) :
"""Get free energy of unfolding ( dG ) using Oobatake method in units cal / mol .
Args :
seq ( str , Seq , SeqRecord ) : Amino acid sequence
temp ( float ) : Temperature in degrees C
Returns :
float : Free energy of unfolding dG ( J / mol )"""
|
dH = calculate_oobatake_dH ( seq , temp )
dS = calculate_oobatake_dS ( seq , temp )
dG = dH - ( temp + 273.15 ) * dS
# 563.552 - a correction for N - and C - terminal group ( approximated from 7 examples in the paper )
return dG - 563.552
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.