signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _parse_decorated_functions ( self , code ) :
"""Return URL rule , HTTP methods and docstring ."""
|
matches = re . finditer ( r"""
# @rest decorators
(?P<decorators>
(?:@rest\(.+?\)\n)+ # one or more @rest decorators inside
)
# docstring delimited by 3 double quotes
.+?"{3}(?P<docstring>.+?)"{3}
""" , code , re . VERBOSE | re . DOTALL )
for function_match in matches :
m_dict = function_match . groupdict ( )
self . _parse_docstring ( m_dict [ 'docstring' ] )
self . _add_function_paths ( m_dict [ 'decorators' ] )
|
def save_image ( self , image , local_filename ) :
"""Identical to : meth : ` dockermap . client . base . DockerClientWrapper . save _ image ` with additional logging ."""
|
self . push_log ( "Receiving tarball for image '{0}' and storing as '{1}'" . format ( image , local_filename ) )
super ( DockerFabricClient , self ) . save_image ( image , local_filename )
|
from typing import List
def can_sort_by_shifting ( nums : List [ int ] ) -> bool :
"""Function to determine if given list can be sorted in non - decreasing order by
performing circular shift operation on the list any number of times . Circular shift
to the right means moving all elements one position to the right , with the last
element moved to the front .
For an empty list , the function returns True .
Note : The input list is guaranteed to consist of unique elements .
Examples :
> > > can _ sort _ by _ shifting ( [ 3 , 4 , 5 , 1 , 2 ] )
True
Explanation : Performing 2 right circular shifts sorts the list in non - decreasing order .
> > > can _ sort _ by _ shifting ( [ 3 , 5 , 4 , 1 , 2 ] )
False
Explanation : The list can ' t be sorted in non - decreasing order by any number of shifts .
Args :
nums : List of unique integers
Returns :
bool : True if the list can be sorted by shifts , False otherwise"""
|
if len ( nums ) == 0 :
return True
sorted_nums = sorted ( nums )
min_index = nums . index ( min ( nums ) )
shifted_nums = nums [ min_index : ] + nums [ : min_index ]
return sorted_nums == shifted_nums
|
def get_property ( host = None , admin_username = None , admin_password = None , property = None ) :
'''. . versionadded : : Fluorine
Return specific property
host
The chassis host .
admin _ username
The username used to access the chassis .
admin _ password
The password used to access the chassis .
property :
The property which should be get .
CLI Example :
. . code - block : : bash
salt dell dracr . get _ property property = System . ServerOS . HostName'''
|
if property is None :
raise SaltException ( 'No property specified!' )
ret = __execute_ret ( 'get \'{0}\'' . format ( property ) , host = host , admin_username = admin_username , admin_password = admin_password )
return ret
|
def fetch ( clobber = False ) :
"""Downloads the IPHAS 3D dust map of Sale et al . ( 2014 ) .
Args :
clobber ( Optional [ bool ] ) : If ` ` True ` ` , any existing file will be
overwritten , even if it appears to match . If ` ` False ` ` ( the
default ) , ` ` fetch ( ) ` ` will attempt to determine if the dataset
already exists . This determination is not 100 \ % robust against data
corruption ."""
|
dest_dir = fname_pattern = os . path . join ( data_dir ( ) , 'iphas' )
url_pattern = 'http://www.iphas.org/data/extinction/A_samp_{:03d}.tar.gz'
fname_pattern = os . path . join ( dest_dir , 'A_samp_' ) + '{:03d}.tar.gz'
# Check if file already exists
if not clobber :
h5_fname = os . path . join ( dest_dir , 'iphas.h5' )
h5_size = 227817543
# Guess , in Bytes
h5_dsets = { 'samples' : ( 61130 , ) }
if fetch_utils . h5_file_exists ( h5_fname , h5_size , dsets = h5_dsets ) :
print ( 'File appears to exist already. Call `fetch(clobber=True)` ' 'to force overwriting of existing file.' )
return
# Expected MD5 sums of . samp files
file_md5sum = { 30 : 'dd531e397622bc97d4ff92b6c7863ade' , 40 : 'b0f925eb3e46b77876e4054a26ad5b52' , 50 : 'ea3b9500f0419d66dd92d9f9c127c2b5' , 60 : 'cccf136f4e2306a6038e8093499216fd' , 70 : 'a05fe2f815086686056c18087cc5410b' , 80 : '799bf618c8827b3d7250c884ec66ec49' , 90 : 'd2a302d917da768bacf6ea74cb9dcfad' , 100 : '2c75e31ad9320818556c4c9964b6af65' , 110 : '742ea8de6f5f8a7e549f6c56b0088789' , 120 : '9beabfa2c9634f953adadb5016eab072' , 130 : '7cd7313f466eb60e8318d0f1bd32e035' , 140 : 'fb6d09e4d939081b891e245c30b791f1' , 150 : '8e9b6dc1561183aeadc64f41c85a64a8' , 160 : '8a35828457b7b1d53d06998114553674' , 170 : '7ffb29ec23e2f625dcfaaa84c293821d' , 180 : 'c737da479d132b88483d6ddab5b25fc8' , 190 : '9bc5fc7f7ba55f36a167473bb3679601' , 200 : '7d8ffc4aa2f7c7026d8aa3ffb670d48e' , 210 : 'e31b04964b7970b81fc90c120b4ebc24' }
# Download the . samp files
for key in file_md5sum :
url = url_pattern . format ( key )
print ( 'Downloading {}' . format ( url ) )
fetch_utils . download_and_verify ( url , file_md5sum [ key ] , fname_pattern . format ( key ) )
# Convert from ASCII to HDF5 format
print ( 'Repacking files...' )
ascii2h5 ( dest_dir , os . path . join ( dest_dir , 'iphas.h5' ) )
# Cleanup
print ( 'Removing original files...' )
for key in file_md5sum :
os . remove ( fname_pattern . format ( key ) )
|
def url_as_file ( url , ext = None ) :
"""Context manager that GETs a given ` url ` and provides it as a local file .
The file is in a closed state upon entering the context ,
and removed when leaving it , if still there .
To give the file name a specific extension , use ` ext ` ;
the extension can optionally include a separating dot ,
otherwise it will be added .
Parameters :
url ( str ) : URL to retrieve .
ext ( str , optional ) : Extension for the generated filename .
Yields :
str : The path to a temporary file with the content of the URL .
Raises :
requests . RequestException : Base exception of ` ` requests ` ` , see its
docs for more detailed ones .
Example :
> > > import io , re , json
> > > with url _ as _ file ( ' https : / / api . github . com / meta ' , ext = ' json ' ) as meta :
. . . meta , json . load ( io . open ( meta , encoding = ' ascii ' ) ) [ ' hooks ' ]
( u ' / tmp / www - api . github . com - Ba5OhD . json ' , [ u ' 192.30.252.0/22 ' ] )"""
|
if ext :
ext = '.' + ext . strip ( '.' )
# normalize extension
url_hint = 'www-{}-' . format ( urlparse ( url ) . hostname or 'any' )
content = requests . get ( url ) . content
with tempfile . NamedTemporaryFile ( suffix = ext or '' , prefix = url_hint , delete = False ) as handle :
handle . write ( content )
try :
yield handle . name
finally :
if os . path . exists ( handle . name ) :
os . remove ( handle . name )
|
def make_chart ( self ) :
'''Returns
altair . Chart'''
|
task_df = self . get_task_df ( )
import altair as alt
chart = alt . Chart ( task_df ) . mark_bar ( ) . encode ( x = 'start' , x2 = 'end' , y = 'term' , )
return chart
|
def _param_updated ( self , pk ) :
"""Callback with data for an updated parameter"""
|
if self . _useV2 :
var_id = struct . unpack ( '<H' , pk . data [ : 2 ] ) [ 0 ]
else :
var_id = pk . data [ 0 ]
element = self . toc . get_element_by_id ( var_id )
if element :
if self . _useV2 :
s = struct . unpack ( element . pytype , pk . data [ 2 : ] ) [ 0 ]
else :
s = struct . unpack ( element . pytype , pk . data [ 1 : ] ) [ 0 ]
s = s . __str__ ( )
complete_name = '%s.%s' % ( element . group , element . name )
# Save the value for synchronous access
if element . group not in self . values :
self . values [ element . group ] = { }
self . values [ element . group ] [ element . name ] = s
logger . debug ( 'Updated parameter [%s]' % complete_name )
if complete_name in self . param_update_callbacks :
self . param_update_callbacks [ complete_name ] . call ( complete_name , s )
if element . group in self . group_update_callbacks :
self . group_update_callbacks [ element . group ] . call ( complete_name , s )
self . all_update_callback . call ( complete_name , s )
# Once all the parameters are updated call the
# callback for " everything updated " ( after all the param
# updated callbacks )
if self . _check_if_all_updated ( ) and not self . is_updated :
self . is_updated = True
self . all_updated . call ( )
else :
logger . debug ( 'Variable id [%d] not found in TOC' , var_id )
|
def update_path ( self , path ) :
"""There are EXTENDED messages which don ' t include any routers at
all , and any of the EXTENDED messages may have some arbitrary
flags in them . So far , they ' re all upper - case and none start
with $ luckily . The routers in the path should all be
LongName - style router names ( this depends on them starting
with $ ) .
For further complication , it ' s possible to extend a circuit to
a router which isn ' t in the consensus . nickm via # tor thought
this might happen in the case of hidden services choosing a
rendevouz point not in the current consensus ."""
|
oldpath = self . path
self . path = [ ]
for p in path :
if p [ 0 ] != '$' :
break
# this will create a Router if we give it a router
# LongName that doesn ' t yet exist
router = self . router_container . router_from_id ( p )
self . path . append ( router )
# if the path grew , notify listeners
if len ( self . path ) > len ( oldpath ) :
for x in self . listeners :
x . circuit_extend ( self , router )
oldpath = self . path
|
def clean ( inst ) :
"""Routine to return FPMU data cleaned to the specified level
Parameters
inst : ( pysat . Instrument )
Instrument class object , whose attribute clean _ level is used to return
the desired level of data selectivity .
Returns
Void : ( NoneType )
data in inst is modified in - place .
Notes
No cleaning currently available for FPMU"""
|
inst . data . replace ( - 999. , np . nan , inplace = True )
# Te
inst . data . replace ( - 9.9999998e+30 , np . nan , inplace = True )
# Ni
return None
|
def set_configuration ( self , command ) :
"""Defines the current configuration of the logger . Can be used at any moment during runtime to modify the logger
behavior .
: param command : The command object that holds all the necessary information from the remote process ."""
|
self . permanent_progressbar_slots = command . permanent_progressbar_slots
self . redraw_frequency_millis = command . redraw_frequency_millis
self . console_level = command . console_level
self . task_millis_to_removal = command . task_millis_to_removal
self . console_format_strftime = command . console_format_strftime
self . console_format = command . console_format
self . file_handlers = command . file_handlers
# If the logger has already been initialized , then clear file handlers and add the new ones
if len ( self . log . handlers ) > 0 :
self . log . handlers . clear ( )
for handler in self . file_handlers :
if isinstance ( handler , StreamHandler ) and ( handler . stream == sys . stdout or handler . stream == sys . stderr ) :
self . critical ( LogMessageCommand ( text = 'Cannot use logging.StreamHandler with \'sys.stdout\' nor ' '\'sys.stderr\' because those are reserved by the logger ' 'process' , level = logging . CRITICAL ) )
continue
self . log . addHandler ( hdlr = handler )
# Do not clear exceptions if the user changes the configuration during runtime
if self . exceptions : # If exceptions already exists
current_length = len ( self . exceptions )
if command . exception_number < current_length : # Delete exceptions from the end to desired index to keep most recent exceptions
range_to_delete = current_length - command . exception_number
for i in range ( range_to_delete ) :
del self . exceptions [ - 1 ]
elif command . exception_number > current_length : # Add empty slots at the end
range_to_add = command . exception_number - current_length
for i in range ( range_to_add ) :
self . exceptions . append ( '' )
else : # Else , initialize a new list
self . exceptions = command . exception_number * [ '' ]
# Do not clear messages if the user changes the configuration during runtime
if self . messages : # If messages already exists
current_length = len ( self . messages )
if command . message_number < current_length : # Delete messages from 0 to desired index to keep most recent messages
range_to_delete = current_length - command . message_number
for i in range ( range_to_delete ) :
del self . messages [ 0 ]
elif command . message_number > current_length : # Add empty slots at 0
range_to_add = command . message_number - current_length
for i in range ( range_to_add ) :
self . messages . insert ( 0 , '' )
else : # Else , initialize a new list
self . messages = command . message_number * [ '' ]
|
def tokenize ( contents ) :
"""Parse a string called contents for CMake tokens ."""
|
tokens = _scan_for_tokens ( contents )
tokens = _compress_tokens ( tokens )
tokens = [ token for token in tokens if token . type != TokenType . Whitespace ]
return tokens
|
def __entropy ( data ) :
'''Compute entropy of the flattened data set ( e . g . a density distribution ) .'''
|
# normalize and convert to float
data = data / float ( numpy . sum ( data ) )
# for each grey - value g with a probability p ( g ) = 0 , the entropy is defined as 0 , therefore we remove these values and also flatten the histogram
data = data [ numpy . nonzero ( data ) ]
# compute entropy
return - 1. * numpy . sum ( data * numpy . log2 ( data ) )
|
def onerror ( self , message , source , lineno , colno ) :
"""Called when an error occurs ."""
|
return ( message , source , lineno , colno )
|
def parmap ( f , X , nprocs = multiprocessing . cpu_count ( ) ) :
"""paralell map for multiprocessing"""
|
q_in = multiprocessing . Queue ( 1 )
q_out = multiprocessing . Queue ( )
proc = [ multiprocessing . Process ( target = fun , args = ( f , q_in , q_out ) ) for _ in range ( nprocs ) ]
for p in proc :
p . daemon = True
p . start ( )
sent = [ q_in . put ( ( i , x ) ) for i , x in enumerate ( X ) ]
[ q_in . put ( ( None , None ) ) for _ in range ( nprocs ) ]
res = [ q_out . get ( ) for _ in range ( len ( sent ) ) ]
[ p . join ( ) for p in proc ]
return [ x for i , x in sorted ( res ) ]
|
def show_news_line ( slug = None , limit = 3 , ** kwargs ) :
"""Отображает список последних новостей
Пример использования : :
{ % show _ news _ line ' news _ section _ slug ' 3 class = ' news - class ' % }
: param slug : символьный код категории новостей , если не задан фильтрация по категории не происходит
: param limit : количество выводимых новостей
: param kwargs : html атрибуты оборачивающего тега
: return :"""
|
if slug is None :
section = None
q = News . objects . published ( )
else :
section = Section . objects . get ( slug = slug )
q = News . objects . published ( ) . filter ( sections__slug = slug )
models = q . prefetch_related ( 'sections' ) . order_by ( '-date' , '-id' ) . all ( ) [ : limit ]
return { 'models' : models , 'section' : section , 'data' : kwargs }
|
def is_website ( url ) :
"""Check if given url string is a website .
Usage : :
> > > is _ website ( " http : / / www . domain . com " )
True
> > > is _ website ( " domain . com " )
False
: param data : Data to check .
: type data : unicode
: return : Is website .
: rtype : bool"""
|
if re . match ( r"(http|ftp|https)://([\w\-\.]+)/?" , url ) :
LOGGER . debug ( "> {0}' is matched as website." . format ( url ) )
return True
else :
LOGGER . debug ( "> {0}' is not matched as website." . format ( url ) )
return False
|
def to_pickle ( self , filename ) :
"""Save Camera to a pickle file , given a filename ."""
|
with open ( filename , 'wb' ) as f :
pickle . dump ( self , f )
|
def _provision_network ( self , port_id , net_uuid , network_type , physical_network , segmentation_id ) :
"""Provision the network with the received information ."""
|
LOG . info ( "Provisioning network %s" , net_uuid )
vswitch_name = self . _get_vswitch_name ( network_type , physical_network )
if network_type == h_constant . TYPE_VLAN : # Nothing to do
pass
elif network_type == h_constant . TYPE_FLAT : # Nothing to do
pass
elif network_type == h_constant . TYPE_LOCAL : # TODO ( alexpilotti ) : Check that the switch type is private
# or create it if not existing .
pass
elif network_type == h_constant . TYPE_NVGRE and self . _nvgre_enabled :
self . _nvgre_ops . bind_nvgre_network ( segmentation_id , net_uuid , vswitch_name )
else :
raise exception . NetworkingHyperVException ( ( _ ( "Cannot provision unknown network type " "%(network_type)s for network %(net_uuid)s" ) % dict ( network_type = network_type , net_uuid = net_uuid ) ) )
vswitch_map = { 'network_type' : network_type , 'vswitch_name' : vswitch_name , 'ports' : [ ] , 'vlan_id' : segmentation_id }
self . _network_vswitch_map [ net_uuid ] = vswitch_map
|
def gather ( self , futures , consume_exceptions = True ) :
"""This returns a Future that waits for all the Futures in the list
` ` futures ` `
: param futures : a list of Futures ( or coroutines ? )
: param consume _ exceptions : if True , any errors are eaten and
returned in the result list ."""
|
# from the asyncio docs : " If return _ exceptions is True , exceptions
# in the tasks are treated the same as successful results , and
# gathered in the result list ; otherwise , the first raised
# exception will be immediately propagated to the returned
# future . "
return asyncio . gather ( * futures , return_exceptions = consume_exceptions )
|
def create_user ( self , data ) :
"""Create a User ."""
|
# http : / / teampasswordmanager . com / docs / api - users / # create _ user
log . info ( 'Create user with %s' % data )
NewID = self . post ( 'users.json' , data ) . get ( 'id' )
log . info ( 'User has been created with ID %s' % NewID )
return NewID
|
def run ( self ) :
'''Execute a single step and return results . The result for batch mode is the
input , output etc returned as alias , and for interactive mode is the return value
of the last expression .'''
|
# return value of the last executed statement
self . last_res = None
self . start_time = time . time ( )
self . completed = defaultdict ( int )
# prepare environments , namely variables that can be used by the step
# * step _ name : name of the step , can be used by step process to determine
# actions dynamically .
env . sos_dict . set ( 'step_name' , self . step . step_name ( ) )
env . sos_dict . set ( '__last_step__' , self . step . last_step )
self . log ( 'start' )
env . sos_dict . set ( 'step_id' , textMD5 ( f'{env.sos_dict["workflow_id"]} {env.sos_dict["step_name"]} {self.step.md5}' ) )
env . sos_dict . set ( 'master_id' , env . config [ 'master_id' ] )
# used by nested workflow
env . sos_dict . set ( '__step_context__' , self . step . context )
env . sos_dict . set ( '_runtime' , { } )
# * input : input files , which should be _ _ step _ output _ _ if it is defined , or
# None otherwise .
# * _ input : first batch of input , which should be input if no input statement is used
# * output : None at first , can be redefined by output statement
# * _ output : None at first , can be redefined by output statement
# * depends : None at first , can be redefined by depends statement
# * _ depends : None at first , can be redefined by depends statement
if '__step_output__' not in env . sos_dict or env . sos_dict [ '__step_output__' ] . unspecified ( ) :
env . sos_dict . set ( 'step_input' , sos_targets ( [ ] ) )
else :
env . sos_dict . set ( 'step_input' , env . sos_dict [ '__step_output__' ] . _remove_empty_groups ( ) )
# input can be Undetermined from undetermined output from last step
env . sos_dict . set ( '_input' , copy . deepcopy ( env . sos_dict [ 'step_input' ] ) )
if '__default_output__' in env . sos_dict : # if step is triggered by sos _ step , it should not be considered as
# output of the step . # 981
env . sos_dict . set ( '__default_output__' , sos_targets ( [ x for x in env . sos_dict [ '__default_output__' ] . _targets if not isinstance ( x , sos_step ) ] ) )
env . sos_dict . set ( 'step_output' , copy . deepcopy ( env . sos_dict [ '__default_output__' ] ) )
env . sos_dict . set ( '_output' , copy . deepcopy ( env . sos_dict [ '__default_output__' ] ) )
else :
env . sos_dict . set ( 'step_output' , sos_targets ( [ ] ) )
# output is said to be unspecified until output : is used
env . sos_dict . set ( '_output' , sos_targets ( _undetermined = True ) )
env . sos_dict . set ( 'step_depends' , sos_targets ( [ ] ) )
env . sos_dict . set ( '_depends' , sos_targets ( [ ] ) )
# _ index is needed for pre - input action ' s active option and for debug output of scripts
env . sos_dict . set ( '_index' , 0 )
if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] :
env . log_to_file ( 'STEP' , f'Executing step {env.sos_dict["step_name"]} with step_input {env.sos_dict["step_input"]} and step_output {env.sos_dict["step_output"]}' )
if self . step . task_params :
try :
task_queue = get_value_of_param ( 'queue' , self . step . task_params , extra_dict = env . sos_dict . dict ( ) )
if task_queue :
env . sos_dict [ '_runtime' ] [ 'queue' ] = task_queue [ 0 ]
except Exception as e :
raise ValueError ( f'Failed to determine value of parameter queue of {self.step.task_params}: {e}' )
# check concurrent # 1134
try :
task_concurrency = get_value_of_param ( 'concurrent' , self . step . task_params , extra_dict = env . sos_dict . dict ( ) )
if task_concurrency :
env . sos_dict [ '_runtime' ] [ 'concurrent' ] = task_concurrency [ 0 ]
except Exception as e :
raise ValueError ( f'Failed to determine value of parameter queue of {self.step.task_params}: {e}' )
if ( env . config [ 'default_queue' ] in ( 'None' , 'none' , None ) and 'queue' not in env . sos_dict [ '_runtime' ] ) or ( 'queue' in env . sos_dict [ '_runtime' ] and env . sos_dict [ '_runtime' ] [ 'queue' ] in ( 'none' , 'None' , None ) ) : # remove task statement
if len ( self . step . statements ) >= 1 and self . step . statements [ - 1 ] [ 0 ] == '!' :
self . step . statements [ - 1 ] [ 1 ] += '\n' + self . step . task
else :
self . step . statements . append ( [ '!' , self . step . task ] )
self . step . task = None
elif 'queue' not in env . sos_dict [ '_runtime' ] or not env . sos_dict [ '_runtime' ] [ 'queue' ] :
if env . config [ 'default_queue' ] :
env . sos_dict [ '_runtime' ] [ 'queue' ] = env . config [ 'default_queue' ]
else :
env . sos_dict [ '_runtime' ] [ 'queue' ] = 'localhost'
# look for input statement .
input_statement_idx = [ idx for idx , x in enumerate ( self . step . statements ) if x [ 0 ] == ':' and x [ 1 ] == 'input' ]
if not input_statement_idx :
input_statement_idx = None
elif len ( input_statement_idx ) == 1 :
input_statement_idx = input_statement_idx [ 0 ]
else :
raise ValueError ( f'More than one step input are specified in step {self.step.step_name()}' )
# if shared is true , we have to disable concurrent because we
# do not yet return anything from shared .
self . concurrent_substep = 'shared' not in self . step . options and ( 'concurrent' not in env . sos_dict [ '_runtime' ] or env . sos_dict [ '_runtime' ] [ 'concurrent' ] is True )
if input_statement_idx is not None : # execute before input stuff
for statement in self . step . statements [ : input_statement_idx ] :
if statement [ 0 ] == ':' : # wait for all dependent targets to be resolved to be resolved
key , value = statement [ 1 : 3 ]
if key != 'depends' :
raise ValueError ( f'Step input should be specified before {key}' )
while True :
try :
args , kwargs = SoS_eval ( f'__null_func__({value})' , extra_dict = { '__null_func__' : __null_func__ , 'output_from' : __output_from__ , 'named_output' : __named_output__ , 'traced' : __traced__ } )
dfiles = expand_depends_files ( * args )
# dfiles can be Undetermined
runner = self . process_depends_args ( dfiles , ** kwargs )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration as e :
pass
except ( UnknownTarget , RemovedTarget ) as e :
runner = self . handle_unknown_target ( e )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration as e :
pass
continue
except UnavailableLock :
raise
except Exception as e :
raise RuntimeError ( f'Failed to process step {key} ({value.strip()}): {e}' )
break
else :
try :
self . execute ( statement [ 1 ] )
except StopInputGroup as e : # stop before substeps , because there is no output statement before it
# we do not have to worry about keep _ output
if e . message :
env . logger . info ( e . message )
return self . collect_result ( )
# input statement
stmt = self . step . statements [ input_statement_idx ] [ 2 ]
self . log ( 'input statement' , stmt )
while True : # wait for all targets to be resovled
try :
args , kwargs = SoS_eval ( f"__null_func__({stmt})" , extra_dict = { '__null_func__' : __null_func__ , 'output_from' : __output_from__ , 'named_output' : __named_output__ , 'traced' : __traced__ } )
# Files will be expanded differently with different running modes
input_files : sos_targets = expand_input_files ( * args , ** { k : v for k , v in kwargs . items ( ) if k not in SOS_INPUT_OPTIONS } )
runner = self . process_input_args ( input_files , ** { k : v for k , v in kwargs . items ( ) if k in SOS_INPUT_OPTIONS } )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration as e :
self . _substeps = e . value
if 'concurrent' in kwargs and kwargs [ 'concurrent' ] is False :
self . concurrent_substep = False
except ( UnknownTarget , RemovedTarget ) as e :
runner = self . handle_unknown_target ( e )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration as e :
pass
continue
except UnavailableLock :
raise
except Exception as e :
raise ValueError ( f'Failed to process input statement {stmt}: {e}' )
break
input_statement_idx += 1
elif env . sos_dict [ 'step_input' ] . groups : # if default has groups . . .
# default case
self . _substeps = env . sos_dict [ 'step_input' ] . groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else : # default case
self . _substeps = [ env . sos_dict [ 'step_input' ] ]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self . proc_results = [ ]
self . vars_to_be_shared = set ( )
if 'shared' in self . step . options :
self . vars_to_be_shared = parse_shared_vars ( self . step . options [ 'shared' ] )
self . vars_to_be_shared = sorted ( [ x [ 5 : ] if x . startswith ( 'step_' ) else x for x in self . vars_to_be_shared if x not in ( 'step_' , 'step_input' , 'step_output' , 'step_depends' ) ] )
self . shared_vars = [ { } for x in self . _substeps ]
# run steps after input statement , which will be run multiple times for each input
# group .
env . sos_dict . set ( '__num_groups__' , len ( self . _substeps ) )
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index , which can remain to be None if no output
# is defined .
self . output_groups = [ sos_targets ( [ ] ) for x in self . _substeps ]
self . depends_groups = [ sos_targets ( [ ] ) for x in self . _substeps ]
# used to prevent overlapping output from substeps
self . _all_outputs = set ( )
self . _subworkflow_results = [ ]
if any ( 'sos_run' in x [ 1 ] for x in self . step . statements [ input_statement_idx : ] ) and 'shared' not in self . step . options and not self . step . task and self . step . statements [ - 1 ] [ 0 ] == '!' and ( len ( self . step . statements ) == 1 or self . step . statements [ - 2 ] [ 0 ] == ':' ) and is_sos_run_the_only_last_stmt ( self . step . statements [ - 1 ] [ 1 ] ) :
env . sos_dict . set ( '__concurrent_subworkflow__' , True )
if self . concurrent_substep :
if len ( self . _substeps ) <= 1 or env . config [ 'run_mode' ] == 'dryrun' :
self . concurrent_substep = False
elif len ( [ x for x in self . step . statements [ input_statement_idx : ] if x [ 0 ] != ':' ] ) > 1 :
self . concurrent_substep = False
env . logger . debug ( 'Substeps are executed sequentially because of existence of directives between statements.' )
elif any ( 'sos_run' in x [ 1 ] for x in self . step . statements [ input_statement_idx : ] ) :
self . concurrent_substep = False
env . logger . debug ( 'Substeps are executed sequentially because of existence of multiple nested workflow.' )
else :
self . prepare_substep ( )
try :
self . completed [ '__substep_skipped__' ] = 0
self . completed [ '__substep_completed__' ] = len ( self . _substeps )
self . _completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
pending_signatures = [ None for x in self . _substeps ]
for idx , g in enumerate ( self . _substeps ) : # other variables
_vars = { }
# now , let us expose target level variables as lists
if len ( g ) > 1 :
names = set . union ( * [ set ( x . _dict . keys ( ) ) for x in g . _targets ] )
elif len ( g ) == 1 :
names = set ( g . _targets [ 0 ] . _dict . keys ( ) )
else :
names = set ( )
for name in names :
_vars [ name ] = [ x . get ( name ) for x in g . _targets ]
# then we expose all group level variables
_vars . update ( g . _dict )
_vars . update ( env . sos_dict [ 'step_input' ] . _dict )
env . sos_dict . update ( _vars )
env . sos_dict . set ( '_input' , copy . deepcopy ( g ) )
# set vars to _ input
# env . sos _ dict [ ' _ input ' ] . set ( * * v )
self . log ( '_input' )
env . sos_dict . set ( '_index' , idx )
# in interactive mode , because sos _ dict are always shared
# execution of a substep , especially when it calls a nested
# workflow , would change step _ name , _ _ step _ context _ _ etc , and
# we will have to reset these variables to make sure the next
# substep would execute normally . Batch mode is immune to this
# problem because nested workflows are executed in their own
# process / context etc
if env . config [ 'run_mode' ] == 'interactive' :
env . sos_dict . set ( 'step_name' , self . step . step_name ( ) )
env . sos_dict . set ( 'step_id' , hash ( ( env . sos_dict [ "workflow_id" ] , env . sos_dict [ "step_name" ] , self . step . md5 ) ) )
# used by nested workflow
env . sos_dict . set ( '__step_context__' , self . step . context )
pre_statement = [ ]
if not any ( st [ 0 ] == ':' and st [ 1 ] == 'output' for st in self . step . statements [ input_statement_idx : ] ) and '__default_output__' in env . sos_dict :
pre_statement = [ [ ':' , 'output' , '_output' ] ]
# if there is no statement , no task , claim success
post_statement = [ ]
if not any ( st [ 0 ] == '!' for st in self . step . statements [ input_statement_idx : ] ) :
if self . step . task : # if there is only task , we insert a fake statement so that it can be executed by the executor
post_statement = [ [ '!' , '' ] ]
else : # complete case : no step , no statement
send_message_to_controller ( [ 'progress' , 'substep_completed' , env . sos_dict [ 'step_id' ] ] )
all_statements = pre_statement + self . step . statements [ input_statement_idx : ] + post_statement
is_input_verified = True
for statement_idx , statement in enumerate ( all_statements ) :
is_last_runblock = statement_idx == len ( all_statements ) - 1
# if input is undertermined , we can only process output :
if not g . valid ( ) and statement [ 0 ] != ':' :
raise RuntimeError ( 'Undetermined input encountered' )
if statement [ 0 ] == ':' :
key , value = statement [ 1 : 3 ]
# output , depends , and process can be processed multiple times
while True : # loop for all unresolved targets to be resolved
try :
args , kwargs = SoS_eval ( f'__null_func__({value})' , extra_dict = { '__null_func__' : __null_func__ , 'output_from' : __output_from__ , 'named_output' : __named_output__ , 'traced' : __traced__ } )
# dynamic output or dependent files
if key == 'output' : # if output is defined , its default value needs to be cleared
if idx == 0 :
env . sos_dict . set ( 'step_output' , sos_targets ( ) )
ofiles : sos_targets = expand_output_files ( value , * args , ** { k : v for k , v in kwargs . items ( ) if k not in SOS_OUTPUT_OPTIONS } )
if g . valid ( ) and ofiles . valid ( ) :
if any ( x in g . _targets for x in ofiles if not isinstance ( x , sos_step ) ) :
raise RuntimeError ( f'Overlapping input and output files: {", ".join(repr(x) for x in ofiles if x in g)}' )
# set variable _ output and output
self . process_output_args ( ofiles , ** { k : v for k , v in kwargs . items ( ) if k in SOS_OUTPUT_OPTIONS } )
self . output_groups [ idx ] = env . sos_dict [ '_output' ]
elif key == 'depends' :
try :
dfiles = expand_depends_files ( * args )
# dfiles can be Undetermined
runner = self . process_depends_args ( dfiles , ** kwargs )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration as e :
pass
self . depends_groups [ idx ] = env . sos_dict [ '_depends' ]
self . log ( '_depends' )
except Exception as e : # env . logger . info ( e )
raise
else :
raise RuntimeError ( f'Unrecognized directive {key}' )
# everything is ok , break
break
except ( UnknownTarget , RemovedTarget ) as e :
runner = self . handle_unknown_target ( e )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration as e :
pass
continue
except UnavailableLock :
raise
except Exception as e : # if input is Undertermined , it is possible that output cannot be processed
# due to that , and we just return
if not g . valid ( ) :
env . logger . debug ( e )
return self . collect_result ( )
raise RuntimeError ( f'Failed to process step {key} ({value.strip()}): {e}' )
elif is_last_runblock :
try :
if self . concurrent_substep :
if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] :
env . log_to_file ( 'STEP' , f'Execute substep {env.sos_dict["step_name"]} {idx} concurrently with {self._completed_concurrent_substeps} completed' )
# the ignatures are supposed to be written by substep worker , however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if env . config [ 'sig_mode' ] != 'ignore' and not env . sos_dict [ '_output' ] . unspecified ( ) and self . step . task :
pending_signatures [ idx ] = RuntimeInfo ( statementMD5 ( [ statement [ 1 ] , self . step . task ] ) , env . sos_dict [ '_input' ] , env . sos_dict [ '_output' ] , env . sos_dict [ '_depends' ] , env . sos_dict [ '__signature_vars__' ] , shared_vars = self . vars_to_be_shared )
# step _ output : needed only when it is undetermined
# step _ input : not needed
# _ input , _ output , _ depends , _ index : needed
# step _ name : for debug scripts
# step _ id , workflow _ id : for reporting to controller
# ' _ _ signature _ vars _ _ ' to be used for signature creation
# _ _ step _ context _ _ is not needed because substep
# executor does not support nested workflow
proc_vars = env . sos_dict [ '__signature_vars__' ] | { '_input' , '_output' , '_depends' , '_index' , 'step_output' , 'step_name' , '_runtime' , 'step_id' , 'workflow_id' , '__num_groups__' , '__signature_vars__' }
self . proc_results . append ( { } )
self . submit_substep ( dict ( stmt = statement [ 1 ] , global_def = self . step . global_def , # 1225 : the step might contain large variables from global section , but
# we do not have to sent them if they are not used in substeps .
global_vars = { x : y for x , y in self . step . global_vars . items ( ) if x in env . sos_dict [ '__signature_vars__' ] } , task = self . step . task , task_params = self . step . task_params , proc_vars = env . sos_dict . clone_selected_vars ( proc_vars ) , shared_vars = self . vars_to_be_shared , config = env . config ) )
# we check if the previous task has been completed and process them
# because further steps might need to be done
try :
runner = self . process_returned_substep_result ( wait = False )
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration :
pass
else :
if env . config [ 'sig_mode' ] == 'ignore' or env . sos_dict [ '_output' ] . unspecified ( ) :
if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] :
env . log_to_file ( 'STEP' , f'Execute substep {env.sos_dict["step_name"]} without signature' )
try :
if is_input_verified :
verify_input ( )
is_input_verified = False
if env . sos_dict . get ( '__concurrent_subworkflow__' , False ) :
self . _subworkflow_results . append ( self . execute ( statement [ 1 ] , return_result = True ) )
else :
self . execute ( statement [ 1 ] )
finally :
if not self . step . task : # if no task , this step is _ _ completed
# complete case : local skip without task
send_message_to_controller ( [ 'progress' , 'substep_completed' , env . sos_dict [ 'step_id' ] ] )
if 'shared' in self . step . options :
try :
self . shared_vars [ env . sos_dict [ '_index' ] ] . update ( { x : env . sos_dict [ x ] for x in self . vars_to_be_shared if x in env . sos_dict } )
except Exception as e :
raise ValueError ( f'Missing shared variable {e}.' )
else :
sig = RuntimeInfo ( statementMD5 ( [ statement [ 1 ] , self . step . task ] ) , env . sos_dict [ '_input' ] , env . sos_dict [ '_output' ] , env . sos_dict [ '_depends' ] , env . sos_dict [ '__signature_vars__' ] , shared_vars = self . vars_to_be_shared )
if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] :
env . log_to_file ( 'STEP' , f'Execute substep {env.sos_dict["step_name"]} with signature {sig.sig_id}' )
# if singaure match , we skip the substep even if
# there are tasks .
matched = validate_step_sig ( sig )
skip_index = bool ( matched )
if matched :
if env . sos_dict [ 'step_output' ] . undetermined ( ) :
self . output_groups [ env . sos_dict [ '_index' ] ] = matched [ "output" ]
if 'vars' in matched :
self . shared_vars [ env . sos_dict [ '_index' ] ] . update ( matched [ "vars" ] )
# complete case : local skip without task
send_message_to_controller ( [ 'progress' , 'substep_ignored' , env . sos_dict [ 'step_id' ] ] )
# do not execute the rest of the statement
break
else :
sig . lock ( )
try :
if is_input_verified :
verify_input ( )
is_input_verified = False
if env . sos_dict . get ( '__concurrent_subworkflow__' , False ) :
self . _subworkflow_results . append ( self . execute ( statement [ 1 ] , return_result = True ) )
else :
self . execute ( statement [ 1 ] )
if 'shared' in self . step . options :
try :
self . shared_vars [ env . sos_dict [ '_index' ] ] . update ( { x : env . sos_dict [ x ] for x in self . vars_to_be_shared if x in env . sos_dict } )
except Exception as e :
raise ValueError ( f'Missing shared variable {e}.' )
finally : # if this is the end of substep , save the signature
# otherwise we need to wait for the completion
# of the task .
if not self . step . task :
if env . sos_dict [ 'step_output' ] . undetermined ( ) :
output = reevaluate_output ( )
self . output_groups [ env . sos_dict [ '_index' ] ] = output
sig . set_output ( output )
sig . write ( )
# complete case : local execution without task
send_message_to_controller ( [ 'progress' , 'substep_completed' , env . sos_dict [ 'step_id' ] ] )
else :
pending_signatures [ idx ] = sig
sig . release ( )
except StopInputGroup as e :
if not e . keep_output :
clear_output ( )
self . output_groups [ idx ] = sos_targets ( [ ] )
if e . message :
env . logger . info ( e . message )
skip_index = True
break
except Exception as e :
clear_output ( )
raise
else : # if it is not the last statement group ( e . g . statements before : output )
# we execute locally without anything like signature
if is_input_verified :
verify_input ( )
is_input_verified = False
try :
self . execute ( statement [ 1 ] )
except StopInputGroup as e :
if not e . keep_output :
clear_output ( )
self . output_groups [ idx ] = sos_targets ( [ ] )
if e . message :
env . logger . info ( e . message )
skip_index = True
break
except Exception as e :
clear_output ( )
raise
# if there is no statement , but there are tasks , we should
# check signature here .
if not any ( x [ 0 ] == '!' for x in self . step . statements [ input_statement_idx : ] ) and self . step . task and not self . concurrent_substep and env . config [ 'sig_mode' ] != 'ignore' and not env . sos_dict [ '_output' ] . unspecified ( ) :
sig = RuntimeInfo ( statementMD5 ( [ self . step . task ] ) , env . sos_dict [ '_input' ] , env . sos_dict [ '_output' ] , env . sos_dict [ '_depends' ] , env . sos_dict [ '__signature_vars__' ] , shared_vars = self . vars_to_be_shared )
if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] :
env . log_to_file ( 'STEP' , f'Check task-only step {env.sos_dict["step_name"]} with signature {sig.sig_id}' )
matched = validate_step_sig ( sig )
skip_index = bool ( matched )
if matched :
if env . sos_dict [ 'step_output' ] . undetermined ( ) :
self . output_groups [ env . sos_dict [ '_index' ] ] = matched [ "output" ]
self . shared_vars [ env . sos_dict [ '_index' ] ] . update ( matched [ "vars" ] )
# complete case : step with task ignored
send_message_to_controller ( [ 'progress' , 'substep_ignored' , env . sos_dict [ 'step_id' ] ] )
pending_signatures [ idx ] = sig
# if this index is skipped , go directly to the next one
if skip_index :
self . completed [ '__substep_skipped__' ] += 1
self . completed [ '__substep_completed__' ] -= 1
skip_index = False
continue
# if concurrent input group , tasks are handled in substep
if self . concurrent_substep or not self . step . task :
continue
if env . config [ 'run_mode' ] == 'dryrun' and env . sos_dict [ '_index' ] != 0 :
continue
# check if the task is active
if 'active' in env . sos_dict [ '_runtime' ] :
active = env . sos_dict [ '_runtime' ] [ 'active' ]
if active is True :
pass
elif active is False :
continue
elif isinstance ( active , int ) :
if active >= 0 and env . sos_dict [ '_index' ] != active :
continue
if active < 0 and env . sos_dict [ '_index' ] != active + env . sos_dict [ '__num_groups__' ] :
continue
elif isinstance ( active , Sequence ) :
allowed_index = list ( [ x if x >= 0 else env . sos_dict [ '__num_groups__' ] + x for x in active ] )
if env . sos_dict [ '_index' ] not in allowed_index :
continue
elif isinstance ( active , slice ) :
allowed_index = list ( range ( env . sos_dict [ '__num_groups__' ] ) ) [ active ]
if env . sos_dict [ '_index' ] not in allowed_index :
continue
else :
raise RuntimeError ( f'Unacceptable value for option active: {active}' )
self . log ( 'task' )
try :
task_id , taskdef , task_vars = create_task ( self . step . global_def , self . step . global_vars , self . step . task , self . step . task_params )
task = self . submit_task ( { 'index' : env . sos_dict [ '_index' ] , 'task_id' : task_id , 'task_def' : taskdef , 'task_vars' : task_vars } )
self . proc_results . append ( task )
except Exception as e : # FIXME : cannot catch exception from subprocesses
if env . verbosity > 2 :
sys . stderr . write ( get_traceback ( ) )
raise RuntimeError ( f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}' )
# if not concurrent , we have to wait for the completion of the task
if 'concurrent' in env . sos_dict [ '_runtime' ] and env . sos_dict [ '_runtime' ] [ 'concurrent' ] is False : # in this case the steps must be executed not concurrently
runner = self . wait_for_results ( all_submitted = False )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration :
pass
# endfor loop for each input group
if self . _subworkflow_results :
try :
runner = self . wait_for_subworkflows ( self . _subworkflow_results )
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration :
pass
env . sos_dict . pop ( '__concurrent_subworkflow__' )
runner = self . wait_for_results ( all_submitted = True )
try :
yreq = next ( runner )
while True :
yres = yield yreq
yreq = runner . send ( yres )
except StopIteration :
pass
for idx , res in enumerate ( self . proc_results ) :
if 'sig_skipped' in res :
self . completed [ '__substep_skipped__' ] += 1
self . completed [ '__substep_completed__' ] -= 1
if 'output' in res :
self . output_groups [ idx ] = res [ "output" ]
# check results
for proc_result in [ x for x in self . proc_results if x [ 'ret_code' ] == 0 ] :
if 'stdout' in proc_result and proc_result [ 'stdout' ] :
sys . stdout . write ( proc_result [ 'stdout' ] )
if 'stderr' in proc_result and proc_result [ 'stderr' ] :
sys . stderr . write ( proc_result [ 'stderr' ] )
# now that output is settled , we can write remaining signatures
for idx , res in enumerate ( self . proc_results ) :
if pending_signatures [ idx ] is not None and res [ 'ret_code' ] == 0 and not 'sig_skipped' in res :
pending_signatures [ idx ] . write ( )
if res [ 'ret_code' ] != 0 and 'output' in res :
clear_output ( output = res [ 'output' ] )
for proc_result in [ x for x in self . proc_results if x [ 'ret_code' ] != 0 ] :
if 'stdout' in proc_result and proc_result [ 'stdout' ] :
sys . stdout . write ( proc_result [ 'stdout' ] )
if 'stderr' in proc_result and proc_result [ 'stderr' ] :
sys . stderr . write ( proc_result [ 'stderr' ] )
if 'exception' in proc_result :
excp = proc_result [ 'exception' ]
if isinstance ( excp , StopInputGroup ) :
if excp . message :
env . logger . info ( excp . message )
self . output_groups [ proc_result [ 'index' ] ] = sos_targets ( [ ] )
# elif isinstance ( e , RemovedTarget ) :
# in theory , we should be able to handled removed target from here
# by rerunning the substep , but we it is too much work for this
# corner case . Let us simply rerunt he entire step .
else :
raise excp
else :
raise RuntimeError ( f"Substep failed with return code {proc_result['ret_code']}" )
# if output is Undetermined , re - evalulate it
# finalize output from output _ groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step , for compatibility .
env . sos_dict . set ( 'step_output' , sos_targets ( [ ] ) . _add_groups ( self . output_groups ) )
env . sos_dict . set ( 'step_depends' , sos_targets ( [ ] ) . _add_groups ( self . depends_groups ) )
# if there exists an option shared , the variable would be treated as
# provides = sos _ variable ( ) , and then as step _ output
if 'shared' in self . step . options :
self . shared_vars = evaluate_shared ( self . shared_vars , self . step . options [ 'shared' ] )
env . sos_dict . quick_update ( self . shared_vars )
self . log ( 'output' )
self . verify_output ( )
substeps = self . completed [ '__substep_completed__' ] + self . completed [ '__substep_skipped__' ]
self . completed [ '__step_completed__' ] = self . completed [ '__substep_completed__' ] / substeps
self . completed [ '__step_skipped__' ] = self . completed [ '__substep_skipped__' ] / substeps
if self . completed [ '__step_completed__' ] . is_integer ( ) :
self . completed [ '__step_completed__' ] = int ( self . completed [ '__step_completed__' ] )
if self . completed [ '__step_skipped__' ] . is_integer ( ) :
self . completed [ '__step_skipped__' ] = int ( self . completed [ '__step_skipped__' ] )
def file_only ( targets ) :
if not isinstance ( targets , sos_targets ) :
env . logger . warning ( f"Unexpected input or output target for reporting. Empty list returned: {targets}" )
return [ ]
return [ ( str ( x ) , x . size ( ) ) for x in targets . _targets if isinstance ( x , file_target ) ]
step_info = { 'step_id' : self . step . md5 , 'start_time' : self . start_time , 'stepname' : self . step . step_name ( True ) , 'substeps' : len ( self . _substeps ) , 'input' : file_only ( env . sos_dict [ 'step_input' ] ) , 'output' : file_only ( env . sos_dict [ 'step_output' ] ) , 'completed' : dict ( self . completed ) , 'end_time' : time . time ( ) }
send_message_to_controller ( [ 'workflow_sig' , 'step' , env . sos_dict [ "workflow_id" ] , repr ( step_info ) ] )
return self . collect_result ( )
finally :
if self . concurrent_substep :
close_socket ( self . result_pull_socket , 'substep collector' )
|
def license_loader ( lic_dir = LIC_DIR ) :
"""Loads licenses from the given directory ."""
|
lics = [ ]
for ln in os . listdir ( lic_dir ) :
lp = os . path . join ( lic_dir , ln )
with open ( lp ) as lf :
txt = lf . read ( )
lic = License ( txt )
lics . append ( lic )
return lics
|
def _format_batch_statuses ( statuses , batch_ids , tracker ) :
"""Takes a statuses dict and formats it for transmission with Protobuf and
ZMQ .
Args :
statuses ( dict of int ) : Dict with batch ids as the key , status as value
batch _ ids ( list of str ) : The batch ids in their original order
tracker ( BatchTracker ) : A batch tracker with access to invalid info"""
|
proto_statuses = [ ]
for batch_id in batch_ids :
if statuses [ batch_id ] == client_batch_submit_pb2 . ClientBatchStatus . INVALID :
invalid_txns = tracker . get_invalid_txn_info ( batch_id )
for txn_info in invalid_txns :
try :
txn_info [ 'transaction_id' ] = txn_info . pop ( 'id' )
except KeyError as e :
LOGGER . debug ( e )
else :
invalid_txns = None
proto_statuses . append ( client_batch_submit_pb2 . ClientBatchStatus ( batch_id = batch_id , status = statuses [ batch_id ] , invalid_transactions = invalid_txns ) )
return proto_statuses
|
def log_summary ( self ) :
"""Log a summary of all the participants ' status codes ."""
|
participants = Participant . query . with_entities ( Participant . status ) . all ( )
counts = Counter ( [ p . status for p in participants ] )
sorted_counts = sorted ( counts . items ( ) , key = itemgetter ( 0 ) )
self . log ( "Status summary: {}" . format ( str ( sorted_counts ) ) )
return sorted_counts
|
def get_from_string ( cls , string_condition ) :
"""Convert string value obtained from k8s API to PodCondition enum value
: param string _ condition : str , condition value from Kubernetes API
: return : PodCondition"""
|
if string_condition == 'PodScheduled' :
return cls . SCHEDULED
elif string_condition == 'Ready' :
return cls . READY
elif string_condition == 'Initialized' :
return cls . INITIALIZED
elif string_condition == 'Unschedulable' :
return cls . UNSCHEDULABLE
elif string_condition == 'ContainersReady' :
return cls . CONTAINERS_READY
return cls . UNKNOWN
|
def nlmsg_find_attr ( nlh , hdrlen , attrtype ) :
"""Find a specific attribute in a Netlink message .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / msg . c # L231
Positional arguments :
nlh - - Netlink message header ( nlmsghdr class instance ) .
hdrlen - - length of family specific header ( integer ) .
attrtype - - type of attribute to look for ( integer ) .
Returns :
The first attribute which matches the specified type ( nlattr class instance ) ."""
|
return nla_find ( nlmsg_attrdata ( nlh , hdrlen ) , nlmsg_attrlen ( nlh , hdrlen ) , attrtype )
|
def get_block ( block_id , api_code = None ) :
"""Get a single block based on a block hash .
: param str block _ id : block hash to look up
: param str api _ code : Blockchain . info API code ( optional )
: return : an instance of : class : ` Block ` class"""
|
resource = 'rawblock/' + block_id
if api_code is not None :
resource += '?api_code=' + api_code
response = util . call_api ( resource )
json_response = json . loads ( response )
return Block ( json_response )
|
def plot_imag ( fignum , Bimag , Mimag , s ) :
"""function to plot d ( Delta M ) / dB curves"""
|
plt . figure ( num = fignum )
plt . clf ( )
if not isServer :
plt . figtext ( .02 , .01 , version_num )
plt . plot ( Bimag , Mimag , 'r' )
plt . xlabel ( 'B (T)' )
plt . ylabel ( 'M/Ms' )
plt . axvline ( 0 , color = 'k' )
plt . title ( s )
|
def get_certificates_v1 ( self ) :
"""Return a list of : class : ` asn1crypto . x509 . Certificate ` which are found
in the META - INF folder ( v1 signing ) .
Note that we simply extract all certificates regardless of the signer .
Therefore this is just a list of all certificates found in all signers ."""
|
certs = [ ]
for x in self . get_signature_names ( ) :
certs . append ( x509 . Certificate . load ( self . get_certificate_der ( x ) ) )
return certs
|
def resetSession ( self , username = None , password = None , verify = True ) :
"""resets the session"""
|
self . disconnectSession ( )
self . session = AikidoSession ( username , password , verify )
|
def from_args ( cls , args , project_profile_name = None ) :
"""Given the raw profiles as read from disk and the name of the desired
profile if specified , return the profile component of the runtime
config .
: param args argparse . Namespace : The arguments as parsed from the cli .
: param project _ profile _ name Optional [ str ] : The profile name , if
specified in a project .
: raises DbtProjectError : If there is no profile name specified in the
project or the command line arguments , or if the specified profile
is not found
: raises DbtProfileError : If the profile is invalid or missing , or the
target could not be found .
: returns Profile : The new Profile object ."""
|
cli_vars = parse_cli_vars ( getattr ( args , 'vars' , '{}' ) )
threads_override = getattr ( args , 'threads' , None )
target_override = getattr ( args , 'target' , None )
raw_profiles = read_profile ( args . profiles_dir )
profile_name = cls . pick_profile_name ( args . profile , project_profile_name )
return cls . from_raw_profiles ( raw_profiles = raw_profiles , profile_name = profile_name , cli_vars = cli_vars , target_override = target_override , threads_override = threads_override )
|
def ftdi_to_clkbits ( baudrate ) : # from libftdi
"""10,27 = > divisor = 10000 , rate = 300
88,13 = > divisor = 5000 , rate = 600
C4,09 = > divisor = 2500 , rate = 1200
E2,04 = > divisor = 1250 , rate = 2,400
71,02 = > divisor = 625 , rate = 4,800
38,41 = > divisor = 312.5 , rate = 9,600
D0,80 = > divisor = 208.25 , rate = 14406
9C , 80 = > divisor = 156 , rate = 19,230
4E , C0 = > divisor = 78 , rate = 38,461
34,00 = > divisor = 52 , rate = 57,692
1A , 00 = > divisor = 26 , rate = 115,384
0D , 00 = > divisor = 13 , rate = 230,769"""
|
clk = 48000000
clk_div = 16
frac_code = [ 0 , 3 , 2 , 4 , 1 , 5 , 6 , 7 ]
actual_baud = 0
if baudrate >= clk / clk_div :
encoded_divisor = 0
actual_baud = ( clk // clk_div )
elif baudrate >= clk / ( clk_div + clk_div / 2 ) :
encoded_divisor = 1
actual_baud = clk // ( clk_div + clk_div // 2 )
elif baudrate >= clk / ( 2 * clk_div ) :
encoded_divisor = 2
actual_baud = clk // ( 2 * clk_div )
else : # We divide by 16 to have 3 fractional bits and one bit for rounding
divisor = clk * 16 // clk_div // baudrate
best_divisor = ( divisor + 1 ) // 2
if best_divisor > 0x20000 :
best_divisor = 0x1ffff
actual_baud = clk * 16 // clk_div // best_divisor
actual_baud = ( actual_baud + 1 ) // 2
encoded_divisor = ( ( best_divisor >> 3 ) + ( frac_code [ best_divisor & 0x7 ] << 14 ) )
value = encoded_divisor & 0xFFFF
index = encoded_divisor >> 16
return actual_baud , value , index
|
def list_alarms ( self , entity , limit = None , marker = None , return_next = False ) :
"""Returns a list of all the alarms created on the specified entity ."""
|
return entity . list_alarms ( limit = limit , marker = marker , return_next = return_next )
|
def deserialize ( self , payload , obj_type ) : # type : ( str , Union [ T , str ] ) - > Any
"""Deserializes payload into an instance of provided ` ` obj _ type ` ` .
The ` ` obj _ type ` ` parameter can be a primitive type , a generic
model object or a list / dict of model objects .
The list or dict object type has to be provided as a string
format . For eg :
* ` ` ' list [ a . b . C ] ' ` ` if the payload is a list of instances of
class ` ` a . b . C ` ` .
* ` ` ' dict ( str , a . b . C ) ' ` ` if the payload is a dict containing
mappings of ` ` str : a . b . C ` ` class instance types .
The method looks for a ` ` deserialized _ types ` ` dict in the model
class , that mentions which payload values has to be
deserialized . In case the payload key names are different than
the model attribute names , the corresponding mapping can be
provided in another special dict ` ` attribute _ map ` ` . The model
class should also have the ` ` _ _ init _ _ ` ` method with default
values for arguments . Check
: py : class : ` ask _ sdk _ model . request _ envelope . RequestEnvelope `
source code for an example implementation .
: param payload : data to be deserialized .
: type payload : str
: param obj _ type : resolved class name for deserialized object
: type obj _ type : Union [ object , str ]
: return : deserialized object
: rtype : object
: raises : : py : class : ` ask _ sdk _ core . exceptions . SerializationException `"""
|
if payload is None :
return None
try :
payload = json . loads ( payload )
except Exception :
raise SerializationException ( "Couldn't parse response body: {}" . format ( payload ) )
return self . __deserialize ( payload , obj_type )
|
def _get_pk ( self ) :
"""Override the default _ get _ pk method to retrieve the real pk value if we
have a SingleValueField or a RedisModel instead of a real PK value"""
|
pk = super ( ExtendedCollectionManager , self ) . _get_pk ( )
if pk is not None and isinstance ( pk , RawFilter ) : # We have a RedisModel and we want its pk , or a RedisField
# ( single value ) and we want its value
if isinstance ( pk . value , RedisModel ) :
pk = pk . value . pk . get ( )
elif isinstance ( pk . value , SingleValueField ) :
pk = pk . value . proxy_get ( )
else :
raise ValueError ( u'Invalide filter value for a PK: %s' % pk . value )
return pk
|
def reset_cmd_timeout ( self ) :
"""Reset timeout for command execution ."""
|
if self . _cmd_timeout :
self . _cmd_timeout . cancel ( )
self . _cmd_timeout = self . loop . call_later ( self . client . timeout , self . transport . close )
|
def add_cidr_rules ( self , rules ) :
"""Add cidr rules to security group via boto .
Args :
rules ( list ) : Allowed Security Group ports and protocols .
Returns :
True : Upon successful completion .
Raises :
SpinnakerSecurityGroupError : boto3 call failed to add CIDR block to
Security Group ."""
|
session = boto3 . session . Session ( profile_name = self . env , region_name = self . region )
client = session . client ( 'ec2' )
group_id = get_security_group_id ( self . app_name , self . env , self . region )
for rule in rules :
data = { 'DryRun' : False , 'GroupId' : group_id , 'IpPermissions' : [ { 'IpProtocol' : rule [ 'protocol' ] , 'FromPort' : rule [ 'start_port' ] , 'ToPort' : rule [ 'end_port' ] , 'IpRanges' : [ { 'CidrIp' : rule [ 'app' ] } ] } ] }
self . log . debug ( 'Security Group rule: %s' , data )
try :
client . authorize_security_group_ingress ( ** data )
except botocore . exceptions . ClientError as error :
if 'InvalidPermission.Duplicate' in str ( error ) :
self . log . debug ( 'Duplicate rule exist, that is OK.' )
else :
msg = 'Unable to add cidr rules to {}' . format ( rule . get ( 'app' ) )
self . log . error ( msg )
raise SpinnakerSecurityGroupError ( msg )
return True
|
def get_process ( cmd ) :
"""Get a command process ."""
|
if sys . platform . startswith ( 'win' ) :
startupinfo = subprocess . STARTUPINFO ( )
startupinfo . dwFlags |= subprocess . STARTF_USESHOWWINDOW
process = subprocess . Popen ( cmd , startupinfo = startupinfo , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , stdin = subprocess . PIPE , shell = False )
else :
process = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , stdin = subprocess . PIPE , shell = False )
return process
|
def get_module ( self , name , folder = None ) :
"""Returns a ` PyObject ` if the module was found ."""
|
# check if this is a builtin module
pymod = self . pycore . builtin_module ( name )
if pymod is not None :
return pymod
module = self . find_module ( name , folder )
if module is None :
raise ModuleNotFoundError ( 'Module %s not found' % name )
return self . pycore . resource_to_pyobject ( module )
|
def lemmatize ( text_string ) :
'''Returns base from of text _ string using NLTK ' s WordNetLemmatizer as type str .
Keyword argument :
- text _ string : string instance
Exceptions raised :
- InputError : occurs should a non - string argument be passed'''
|
if text_string is None or text_string == "" :
return ""
elif isinstance ( text_string , str ) :
return LEMMATIZER . lemmatize ( text_string )
else :
raise InputError ( "string not passed as primary argument" )
|
def add_init_script ( self , file , name ) :
"""Add this file to the init . d directory"""
|
f_path = os . path . join ( "/etc/init.d" , name )
f = open ( f_path , "w" )
f . write ( file )
f . close ( )
os . chmod ( f_path , stat . S_IREAD | stat . S_IWRITE | stat . S_IEXEC )
self . run ( "/usr/sbin/update-rc.d %s defaults" % name )
|
def map_prop_value_as_index ( prp , lst ) :
"""Returns the given prop of each item in the list
: param prp :
: param lst :
: return :"""
|
return from_pairs ( map ( lambda item : ( prop ( prp , item ) , item ) , lst ) )
|
def Nu_Mokry ( Re , Pr , rho_w = None , rho_b = None ) :
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [ 1 ] _ ,
and reviewed in [ 2 ] _ .
. . math : :
Nu _ b = 0.0061 Re _ b ^ { 0.904 } \ bar { Pr } _ b ^ { 0.684}
\ left ( \ frac { \ rho _ w } { \ rho _ b } \ right ) ^ { 0.564}
\ bar { Cp } = \ frac { H _ w - H _ b } { T _ w - T _ b }
Parameters
Re : float
Reynolds number with bulk fluid properties , [ - ]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [ - ]
rho _ w : float , optional
Density at the wall temperature , [ kg / m ^ 3]
rho _ b : float , optional
Density at the bulk temperature , [ kg / m ^ 3]
Returns
Nu : float
Nusselt number with bulk fluid properties , [ - ]
Notes
For the data used to develop the correlation , P was set at 20 MPa , and D
was 10 mm . G varied from 200-1500 kg / m ^ 2 / s and q varied from 0 to 1250
kW / m ^ 2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures .
For deteriorated heat transfer , this was the four most accurate correlation
in [ 2 ] _ with a MAD of 24.0 % . It was also the 7th most accurate against
enhanced heat transfer , with a MAD of 14.7 % , and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined .
If the extra density information is not provided , it will not be used .
Examples
> > > Nu _ Mokry ( 1E5 , 1.2 , 330 , 290 . )
246.1156319156992
References
. . [ 1 ] Mokry , Sarah , Igor Pioro , Amjad Farah , Krysten King , Sahil Gupta ,
Wargha Peiman , and Pavel Kirillov . " Development of Supercritical Water
Heat - Transfer Correlation for Vertical Bare Tubes . " Nuclear Engineering
and Design , International Conference on Nuclear Energy for New Europe
2009 , 241 , no . 4 ( April 2011 ) : 1126-36.
doi : 10.1016 / j . nucengdes . 2010.06.012.
. . [ 2 ] Chen , Weiwei , Xiande Fang , Yu Xu , and Xianghui Su . " An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure . " Annals of Nuclear Energy 76 ( February 2015 ) :
451-60 . doi : 10.1016 / j . anucene . 2014.10.027.'''
|
Nu = 0.0061 * Re ** 0.904 * Pr ** 0.684
if rho_w and rho_b :
Nu *= ( rho_w / rho_b ) ** 0.564
return Nu
|
def uniq ( pipe ) :
'''this works like bash ' s uniq command where the generator only iterates
if the next value is not the previous'''
|
pipe = iter ( pipe )
previous = next ( pipe )
yield previous
for i in pipe :
if i is not previous :
previous = i
yield i
|
def ensure_has_same_campaigns ( self ) :
"""Ensure that the 2 campaigns to merge have been generated
from the same campaign . yaml"""
|
lhs_yaml = osp . join ( self . lhs , 'campaign.yaml' )
rhs_yaml = osp . join ( self . rhs , 'campaign.yaml' )
assert osp . isfile ( lhs_yaml )
assert osp . isfile ( rhs_yaml )
assert filecmp . cmp ( lhs_yaml , rhs_yaml )
|
def _group_flat_tags ( tag , tags ) :
"""Extract tags sharing the same name as the provided tag . Used to collect
options for radio and checkbox inputs .
: param Tag tag : BeautifulSoup tag
: param list tags : List of tags
: return : List of matching tags"""
|
grouped = [ tag ]
name = tag . get ( 'name' , '' ) . lower ( )
while tags and tags [ 0 ] . get ( 'name' , '' ) . lower ( ) == name :
grouped . append ( tags . pop ( 0 ) )
return grouped
|
def rm_incomplete_des_asc ( des_mask , asc_mask ) :
'''Remove descents - ascents that have no corresponding ascent - descent
Args
des _ mask : ndarray
Boolean mask of descents in the depth data
asc _ mask : ndarray
Boolean mask of ascents in the depth data
Returns
des _ mask : ndarray
Boolean mask of descents with erroneous regions removed
asc _ mask : ndarray
Boolean mask of ascents with erroneous regions removed'''
|
from . import utils
# Get start / stop indices for descents and ascents
des_start , des_stop = utils . contiguous_regions ( des_mask )
asc_start , asc_stop = utils . contiguous_regions ( asc_mask )
des_mask = utils . rm_regions ( des_mask , asc_mask , des_start , des_stop )
asc_mask = utils . rm_regions ( asc_mask , des_mask , asc_start , asc_stop )
return des_mask , asc_mask
|
def statichttp ( container = None ) :
"wrap a WSGI - style function to a HTTPRequest event handler"
|
def decorator ( func ) :
@ functools . wraps ( func )
def handler ( event ) :
return _handler ( container , event , func )
if hasattr ( func , '__self__' ) :
handler . __self__ = func . __self__
return handler
return decorator
|
def get_keys_from_shelve ( file_name , file_location ) :
"""Function to retreive all keys in a shelve
Args :
file _ name : Shelve storage file name
file _ location : The location of the file , derive from the os module
Returns :
a list of the keys"""
|
temp_list = list ( )
file = __os . path . join ( file_location , file_name )
shelve_store = __shelve . open ( file )
for key in shelve_store :
temp_list . append ( key )
shelve_store . close ( )
return temp_list
|
def run_job ( self , job_id , array_id = None ) :
"""This function is called to run a job ( e . g . in the grid ) with the given id and the given array index if applicable ."""
|
# set the job ' s status in the database
try : # get the job from the database
self . lock ( )
jobs = self . get_jobs ( ( job_id , ) )
if not len ( jobs ) : # it seems that the job has been deleted in the meanwhile
return
job = jobs [ 0 ]
# get the machine name we are executing on ; this might only work at idiap
machine_name = socket . gethostname ( )
# set the ' executing ' status to the job
job . execute ( array_id , machine_name )
self . session . commit ( )
except Exception as e :
logger . error ( "Caught exception '%s'" , e )
pass
finally :
self . unlock ( )
# get the command line of the job from the database ; does not need write access
self . lock ( )
job = self . get_jobs ( ( job_id , ) ) [ 0 ]
command_line = job . get_command_line ( )
exec_dir = job . get_exec_dir ( )
self . unlock ( )
logger . info ( "Starting job %d: %s" , job_id , " " . join ( command_line ) )
# execute the command line of the job , and wait until it has finished
try :
result = subprocess . call ( command_line , cwd = exec_dir )
logger . info ( "Job %d finished with result %s" , job_id , str ( result ) )
except Exception as e :
logger . error ( "The job with id '%d' could not be executed: %s" , job_id , e )
result = 69
# ASCII : ' E '
# set a new status and the results of the job
try :
self . lock ( )
jobs = self . get_jobs ( ( job_id , ) )
if not len ( jobs ) : # it seems that the job has been deleted in the meanwhile
logger . error ( "The job with id '%d' could not be found in the database!" , job_id )
self . unlock ( )
return
job = jobs [ 0 ]
job . finish ( result , array_id )
self . session . commit ( )
# This might not be working properly , so use with care !
if job . stop_on_failure and job . status == 'failure' : # the job has failed
# stop this and all dependent jobs from execution
dependent_jobs = job . get_jobs_waiting_for_us ( )
dependent_job_ids = set ( [ dep . unique for dep in dependent_jobs ] + [ job . unique ] )
while len ( dependent_jobs ) :
dep = dependent_jobs . pop ( 0 )
new = dep . get_jobs_waiting_for_us ( )
dependent_jobs += new
dependent_job_ids . update ( [ dep . unique for dep in new ] )
self . unlock ( )
deps = sorted ( list ( dependent_job_ids ) )
self . stop_jobs ( deps )
logger . warn ( "Stopped dependent jobs '%s' since this job failed." , str ( deps ) )
except Exception as e :
logger . error ( "Caught exception '%s'" , e )
pass
finally :
if hasattr ( self , 'session' ) :
self . unlock ( )
|
def arbiter ( ** params ) :
'''Obtain the ` ` arbiter ` ` .
It returns the arbiter instance only if we are on the arbiter
context domain , otherwise it returns nothing .'''
|
arbiter = get_actor ( )
if arbiter is None : # Create the arbiter
return set_actor ( _spawn_actor ( 'arbiter' , None , ** params ) )
elif arbiter . is_arbiter ( ) :
return arbiter
|
def getdoc ( obj ) :
"""Get object docstring
: rtype : str"""
|
inspect_got_doc = inspect . getdoc ( obj )
if inspect_got_doc in ( object . __init__ . __doc__ , object . __doc__ ) :
return ''
# We never want this builtin stuff
return ( inspect_got_doc or '' ) . strip ( )
|
def resources_with_perms ( cls , instance , perms , resource_ids = None , resource_types = None , db_session = None ) :
"""returns all resources that user has perms for
( note that at least one perm needs to be met )
: param instance :
: param perms :
: param resource _ ids : restricts the search to specific resources
: param resource _ types :
: param db _ session :
: return :"""
|
# owned entities have ALL permissions so we return those resources too
# even without explict perms set
# TODO : implement admin superrule perm - maybe return all apps
db_session = get_db_session ( db_session , instance )
query = db_session . query ( cls . models_proxy . Resource ) . distinct ( )
group_ids = [ gr . id for gr in instance . groups ]
# if user has some groups lets try to join based on their permissions
if group_ids :
join_conditions = ( cls . models_proxy . GroupResourcePermission . group_id . in_ ( group_ids ) , cls . models_proxy . Resource . resource_id == cls . models_proxy . GroupResourcePermission . resource_id , cls . models_proxy . GroupResourcePermission . perm_name . in_ ( perms ) , )
query = query . outerjoin ( ( cls . models_proxy . GroupResourcePermission , sa . and_ ( * join_conditions ) ) )
# ensure outerjoin permissions are correct -
# dont add empty rows from join
# conditions are - join ON possible group permissions
# OR owning group / user
query = query . filter ( sa . or_ ( cls . models_proxy . Resource . owner_user_id == instance . id , cls . models_proxy . Resource . owner_group_id . in_ ( group_ids ) , cls . models_proxy . GroupResourcePermission . perm_name != None , ) # noqa
)
else : # filter just by username
query = query . filter ( cls . models_proxy . Resource . owner_user_id == instance . id )
# lets try by custom user permissions for resource
query2 = db_session . query ( cls . models_proxy . Resource ) . distinct ( )
query2 = query2 . filter ( cls . models_proxy . UserResourcePermission . user_id == instance . id )
query2 = query2 . filter ( cls . models_proxy . Resource . resource_id == cls . models_proxy . UserResourcePermission . resource_id )
query2 = query2 . filter ( cls . models_proxy . UserResourcePermission . perm_name . in_ ( perms ) )
if resource_ids :
query = query . filter ( cls . models_proxy . Resource . resource_id . in_ ( resource_ids ) )
query2 = query2 . filter ( cls . models_proxy . Resource . resource_id . in_ ( resource_ids ) )
if resource_types :
query = query . filter ( cls . models_proxy . Resource . resource_type . in_ ( resource_types ) )
query2 = query2 . filter ( cls . models_proxy . Resource . resource_type . in_ ( resource_types ) )
query = query . union ( query2 )
query = query . order_by ( cls . models_proxy . Resource . resource_name )
return query
|
def find_globals_and_nonlocals ( node , globs , nonlocals , code , version ) :
"""search a node of parse tree to find variable names that need a
either ' global ' or ' nonlocal ' statements added ."""
|
for n in node :
if isinstance ( n , SyntaxTree ) :
globs , nonlocals = find_globals_and_nonlocals ( n , globs , nonlocals , code , version )
elif n . kind in read_global_ops :
globs . add ( n . pattr )
elif ( version >= 3.0 and n . kind in nonglobal_ops and n . pattr in code . co_freevars and n . pattr != code . co_name and code . co_name != '<lambda>' ) :
nonlocals . add ( n . pattr )
return globs , nonlocals
|
def on_task_init ( self , task_id , task ) :
"""Called before every task ."""
|
try :
is_eager = task . request . is_eager
except AttributeError :
is_eager = False
if not is_eager :
self . close_database ( )
|
def get_asset_notification_session_for_repository ( self , asset_receiver , repository_id , proxy ) :
"""Gets the asset notification session for the given repository .
arg : asset _ receiver ( osid . repository . AssetReceiver ) : the
notification callback
arg : repository _ id ( osid . id . Id ) : the Id of the repository
arg proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . repository . AssetNotificationSession ) - an
AssetNotificationSession
raise : NotFound - repository _ id not found
raise : NullArgument - asset _ receiver or repository _ id is null
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ asset _ notification ( ) or
supports _ visible _ federation ( ) is false
compliance : optional - This method must be implemented if
supports _ asset _ notfication ( ) and
supports _ visible _ federation ( ) are true ."""
|
if not repository_id or not asset_receiver :
raise NullArgument ( )
if not self . supports_asset_lookup ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( 'import error' )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . AssetAdminSession ( asset_receiver , repository_id , proxy , runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( 'attribute error' )
return session
|
def make_entry_point ( patches , original_entry_point ) :
"""Use this to make a console _ script entry point for your application
which applies patches .
: param patches : iterable of pymonkey patches to apply . Ex : ( ' my - patch , )
: param original _ entry _ point : Such as ' pip '"""
|
def entry ( argv = None ) :
argv = argv if argv is not None else sys . argv [ 1 : ]
return main ( tuple ( patches ) + ( '--' , original_entry_point ) + tuple ( argv ) )
return entry
|
def set_pointlist ( self , pointlist ) :
"""Overwrite pointlist .
Parameters
pointlist : a list of strokes ; each stroke is a list of points
The inner lists represent strokes . Every stroke consists of points .
Every point is a dictinary with ' x ' , ' y ' , ' time ' ."""
|
assert type ( pointlist ) is list , "pointlist is not of type list, but %r" % type ( pointlist )
assert len ( pointlist ) >= 1 , "The pointlist of formula_id %i is %s" % ( self . formula_id , self . get_pointlist ( ) )
self . raw_data_json = json . dumps ( pointlist )
|
def subscribe ( self , connection , destination ) :
"""Subscribes a connection to the specified topic destination .
@ param connection : The client connection to subscribe .
@ type connection : L { coilmq . server . StompConnection }
@ param destination : The topic destination ( e . g . ' / topic / foo ' )
@ type destination : C { str }"""
|
self . log . debug ( "Subscribing %s to %s" % ( connection , destination ) )
self . _topics [ destination ] . add ( connection )
|
def fatal ( callingClass , astr_key , astr_extraMsg = "" ) :
'''Convenience dispatcher to the error _ exit ( ) method .
Will raise " fatal " error , i . e . terminate script .'''
|
b_exitToOS = True
report ( callingClass , astr_key , b_exitToOS , astr_extraMsg )
|
def to_shcoeffs ( self , nmax = None , normalization = '4pi' , csphase = 1 ) :
"""Return the spherical harmonic coefficients using the first n Slepian
coefficients .
Usage
s = x . to _ shcoeffs ( [ nmax ] )
Returns
s : SHCoeffs class instance
The spherical harmonic coefficients obtained from using the first
n Slepian expansion coefficients .
Parameters
nmax : int , optional , default = x . nmax
The maximum number of expansion coefficients to use when
calculating the spherical harmonic coefficients .
normalization : str , optional , default = ' 4pi '
Normalization of the output class : ' 4pi ' , ' ortho ' or ' schmidt ' for
geodesy 4pi - normalized , orthonormalized , or Schmidt semi - normalized
coefficients , respectively .
csphase : int , optional , default = 1
Condon - Shortley phase convention : 1 to exclude the phase factor ,
or - 1 to include it ."""
|
if type ( normalization ) != str :
raise ValueError ( 'normalization must be a string. ' + 'Input type was {:s}' . format ( str ( type ( normalization ) ) ) )
if normalization . lower ( ) not in set ( [ '4pi' , 'ortho' , 'schmidt' ] ) :
raise ValueError ( "normalization must be '4pi', 'ortho' " + "or 'schmidt'. Provided value was {:s}" . format ( repr ( normalization ) ) )
if csphase != 1 and csphase != - 1 :
raise ValueError ( "csphase must be 1 or -1. Input value was {:s}" . format ( repr ( csphase ) ) )
if nmax is None :
nmax = self . nmax
if self . galpha . kind == 'cap' :
shcoeffs = _shtools . SlepianCoeffsToSH ( self . falpha , self . galpha . coeffs , nmax )
else :
shcoeffs = _shtools . SlepianCoeffsToSH ( self . falpha , self . galpha . tapers , nmax )
temp = SHCoeffs . from_array ( shcoeffs , normalization = '4pi' , csphase = 1 )
if normalization != '4pi' or csphase != 1 :
return temp . convert ( normalization = normalization , csphase = csphase )
else :
return temp
|
def is_defined ( self , objtxt , force_import = False ) :
"""Return True if object is defined"""
|
return isdefined ( objtxt , force_import = force_import , namespace = self . locals )
|
def sanity_check ( args ) :
"""Verify if the work folder is a django app .
A valid django app always must have a models . py file
: return : None"""
|
if not os . path . isfile ( os . path . join ( args [ 'django_application_folder' ] , 'models.py' ) ) :
print ( "django_application_folder is not a Django application folder" )
sys . exit ( 1 )
|
def factorized_gaussian_noise ( in_features , out_features , device ) :
"""Factorised ( cheaper ) gaussian noise from " Noisy Networks for Exploration "
by Meire Fortunato , Mohammad Gheshlaghi Azar , Bilal Piot and others"""
|
in_noise = scaled_noise ( in_features , device = device )
out_noise = scaled_noise ( out_features , device = device )
return out_noise . ger ( in_noise ) , out_noise
|
def absent ( profile = 'pagerduty' , subdomain = None , api_key = None , ** kwargs ) :
'''Ensure a pagerduty service does not exist .
Name can be the service name or pagerduty service id .'''
|
r = __salt__ [ 'pagerduty_util.resource_absent' ] ( 'services' , [ 'name' , 'id' ] , profile , subdomain , api_key , ** kwargs )
return r
|
def check_availability ( self , isos ) :
'''This routine checks if the requested set of isotopes is
available in the dataset .
Parameters
isos : list
set of isotopes in format [ ' Si - 28 ' , ' Si - 30 ' ] .
Returns
list
[ index , delta _ b , ratio _ b ] .
index : where is it .
delta _ b : is it a delta value or not ?
ratio _ ib : True if ratio is inverted , false if not'''
|
# make names
iso1name = iso_name_converter ( isos [ 0 ] )
iso2name = iso_name_converter ( isos [ 1 ] )
ratio = iso1name + '/' + iso2name
ratio_inv = iso2name + '/' + iso1name
delta = 'd(' + iso1name + '/' + iso2name + ')'
delta_inv = 'd(' + iso2name + '/' + iso1name + ')'
index = - 1
# search for data entry
try :
index = self . datadict [ ratio ]
delta_b = False
ratio_b = False
except KeyError :
try :
index = self . datadict [ ratio_inv ]
delta_b = False
ratio_b = True
except KeyError :
try :
index = self . datadict [ delta ]
delta_b = True
ratio_b = False
except KeyError :
try :
index = self . datadict [ delta_inv ]
delta_b = True
ratio_b = True
except KeyError :
index = - 1
delta_b = None
ratio_b = None
return index , delta_b , ratio_b
|
def replace_namespaced_ingress ( self , name , namespace , body , ** kwargs ) :
"""replace the specified Ingress
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ namespaced _ ingress ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the Ingress ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param NetworkingV1beta1Ingress body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint .
: return : NetworkingV1beta1Ingress
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_namespaced_ingress_with_http_info ( name , namespace , body , ** kwargs )
else :
( data ) = self . replace_namespaced_ingress_with_http_info ( name , namespace , body , ** kwargs )
return data
|
def fetch_contributing_projects ( self , ** kwargs ) :
"""List projects as contributor
Fetch projects that the currently authenticated user has access to because he or she is a contributor .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . fetch _ contributing _ projects ( callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: return : PaginatedProjectResults
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . fetch_contributing_projects_with_http_info ( ** kwargs )
else :
( data ) = self . fetch_contributing_projects_with_http_info ( ** kwargs )
return data
|
def body ( self ) :
"""Seralizes and returns the response body .
On subsequent access , returns the cached value ."""
|
if self . _body is None :
raw_body = self . _raw_body
if self . _body_writer is None :
self . _body = raw_body ( ) if callable ( raw_body ) else raw_body
else :
self . _body = self . _body_writer ( raw_body )
return self . _body
|
def _find_bracket_position ( self , text , bracket_left , bracket_right , pos_quote ) :
"""Return the start and end position of pairs of brackets .
https : / / stackoverflow . com / questions / 29991917/
indices - of - matching - parentheses - in - python"""
|
pos = { }
pstack = [ ]
for idx , character in enumerate ( text ) :
if character == bracket_left and not self . is_char_in_pairs ( idx , pos_quote ) :
pstack . append ( idx )
elif character == bracket_right and not self . is_char_in_pairs ( idx , pos_quote ) :
if len ( pstack ) == 0 :
raise IndexError ( "No matching closing parens at: " + str ( idx ) )
pos [ pstack . pop ( ) ] = idx
if len ( pstack ) > 0 :
raise IndexError ( "No matching opening parens at: " + str ( pstack . pop ( ) ) )
return pos
|
def main ( ) :
"""Entry point for gns3 - converter"""
|
arg_parse = setup_argparse ( )
args = arg_parse . parse_args ( )
if not args . quiet :
print ( 'GNS3 Topology Converter' )
if args . debug :
logging_level = logging . DEBUG
else :
logging_level = logging . WARNING
logging . basicConfig ( level = logging_level , format = LOG_MSG_FMT , datefmt = LOG_DATE_FMT )
logging . getLogger ( __name__ )
# Add the main topology to the list of files to convert
if args . topology == 'topology.net' :
args . topology = os . path . join ( os . getcwd ( ) , 'topology.net' )
topology_files = [ { 'file' : topology_abspath ( args . topology ) , 'snapshot' : False } ]
# Add any snapshot topologies to be converted
topology_files . extend ( get_snapshots ( args . topology ) )
topology_name = name ( args . topology , args . name )
# Do the conversion
for topology in topology_files :
do_conversion ( topology , topology_name , args . output , args . debug )
|
def _raise_unrecoverable_error_client ( self , exception ) :
"""Raises an exceptions . ClientError with a message telling that the error probably comes from the client
configuration .
: param exception : Exception that caused the ClientError
: type exception : Exception
: raise exceptions . ClientError"""
|
message = ( 'There was an unrecoverable error during the HTTP request which is probably related to your ' 'configuration. Please verify `' + self . DEPENDENCY + '` library configuration and update it. If the ' 'issue persists, do not hesitate to contact us with the following information: `' + repr ( exception ) + '`.' )
raise exceptions . ClientError ( message , client_exception = exception )
|
def loads ( self , content , ** options ) :
"""Load config from given string ' content ' after some checks .
: param content : Config file content
: param options :
options will be passed to backend specific loading functions .
please note that options have to be sanitized w /
: func : ` anyconfig . utils . filter _ options ` later to filter out options
not in _ load _ opts .
: return : dict or dict - like object holding configurations"""
|
container = self . _container_factory ( ** options )
if not content or content is None :
return container ( )
options = self . _load_options ( container , ** options )
return self . load_from_string ( content , container , ** options )
|
def get_port_range ( port_def ) :
'''Given a port number or range , return a start and end to that range . Port
ranges are defined as a string containing two numbers separated by a dash
( e . g . ' 4505-4506 ' ) .
A ValueError will be raised if bad input is provided .'''
|
if isinstance ( port_def , six . integer_types ) : # Single integer , start / end of range is the same
return port_def , port_def
try :
comps = [ int ( x ) for x in split ( port_def , '-' ) ]
if len ( comps ) == 1 :
range_start = range_end = comps [ 0 ]
else :
range_start , range_end = comps
if range_start > range_end :
raise ValueError ( 'start > end' )
except ( TypeError , ValueError ) as exc :
if exc . __str__ ( ) == 'start > end' :
msg = ( 'Start of port range ({0}) cannot be greater than end of ' 'port range ({1})' . format ( range_start , range_end ) )
else :
msg = '\'{0}\' is non-numeric or an invalid port range' . format ( port_def )
raise ValueError ( msg )
else :
return range_start , range_end
|
def optOut ( self , playback = None , library = None ) :
"""Opt in or out of sharing stuff with plex .
See : https : / / www . plex . tv / about / privacy - legal /"""
|
params = { }
if playback is not None :
params [ 'optOutPlayback' ] = int ( playback )
if library is not None :
params [ 'optOutLibraryStats' ] = int ( library )
url = 'https://plex.tv/api/v2/user/privacy'
return self . query ( url , method = self . _session . put , data = params )
|
def send_photo ( self , photo : str , caption : str = None , reply : Message = None , on_success : callable = None , reply_markup : botapi . ReplyMarkup = None ) :
"""Send photo to this peer .
: param photo : File path to photo to send .
: param caption : Caption for photo
: param reply : Message object or message _ id to reply to .
: param on _ success : Callback to call when call is complete .
: type reply : int or Message"""
|
self . twx . send_photo ( peer = self , photo = photo , caption = caption , reply = reply , reply_markup = reply_markup , on_success = on_success )
|
def chained ( self , text = None , fore = None , back = None , style = None ) :
"""Called by the various ' color ' methods to colorize a single string .
The RESET _ ALL code is appended to the string unless text is empty .
Raises ValueError on invalid color names .
Arguments :
text : String to colorize , or None for BG / Style change .
fore : Name of fore color to use .
back : Name of back color to use .
style : Name of style to use ."""
|
self . data = '' . join ( ( self . data , self . color ( text = text , fore = fore , back = back , style = style ) , ) )
return self
|
def fit_mle ( self , init_vals , print_res = True , method = "BFGS" , loss_tol = 1e-06 , gradient_tol = 1e-06 , maxiter = 1000 , ridge = None , constrained_pos = None , just_point = False , ** kwargs ) :
"""Parameters
init _ vals : 1D ndarray .
The initial values to start the optimization process with . There
should be one value for each utility coefficient being estimated .
print _ res : bool , optional .
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined .
method : str , optional .
Should be a valid string that can be passed to
scipy . optimize . minimize . Determines the optimization algorithm that
is used for this problem . If ' em ' is passed , a custom coded EM
algorithm will be used . Default ` = = ' newton - cg ' ` .
loss _ tol : float , optional .
Determines the tolerance on the difference in objective function
values from one iteration to the next that is needed to determine
convergence . Default ` = = 1e - 06 ` .
gradient _ tol : float , optional .
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence .
ridge : int , float , long , or None , optional .
Determines whether or not ridge regression is performed . If a
scalar is passed , then that scalar determines the ridge penalty for
the optimization . Default ` = = None ` .
constrained _ pos : list or None , optional .
Denotes the positions of the array of estimated parameters that are
not to change from their initial values . If a list is passed , the
elements are to be integers where no such integer is greater than
` init _ vals . size . ` Default = = None .
just _ point : bool , optional .
Determines whether ( True ) or not ( False ) calculations that are non -
critical for obtaining the maximum likelihood point estimate will
be performed . If True , this function will return the results
dictionary from scipy . optimize . Default = = False .
Returns
None or dict .
If ` just _ point ` is False , None is returned and the estimation
results are saved to the model instance . If ` just _ point ` is True ,
then the results dictionary from scipy . optimize ( ) is returned ."""
|
# Check integrity of passed arguments
kwargs_to_be_ignored = [ "init_shapes" , "init_intercepts" , "init_coefs" ]
if any ( [ x in kwargs for x in kwargs_to_be_ignored ] ) :
msg = "MNL model does not use of any of the following kwargs:\n{}"
msg_2 = "Remove such kwargs and pass a single init_vals argument"
raise ValueError ( msg . format ( kwargs_to_be_ignored ) + msg_2 )
if ridge is not None :
warnings . warn ( _ridge_warning_msg )
# Store the optimization method
self . optimization_method = method
# Store the ridge parameter
self . ridge_param = ridge
# Construct the mappings from alternatives to observations and from
# chosen alternatives to observations
mapping_res = self . get_mappings_for_fit ( )
# Create the estimation object
zero_vector = np . zeros ( init_vals . shape )
mnl_estimator = MNLEstimator ( self , mapping_res , ridge , zero_vector , split_param_vec , constrained_pos = constrained_pos )
# Set the derivative functions for estimation
mnl_estimator . set_derivatives ( )
# Perform one final check on the length of the initial values
mnl_estimator . check_length_of_initial_values ( init_vals )
# Get the estimation results
estimation_res = estimate ( init_vals , mnl_estimator , method , loss_tol , gradient_tol , maxiter , print_res , just_point = just_point )
if not just_point : # Store the estimation results
self . store_fit_results ( estimation_res )
return None
else :
return estimation_res
|
def run_in_thread_pool ( self , pool_size = None , func = None , * args ) :
"""If ` kwargs ` needed , try like this : func = lambda : foo ( * args , * * kwargs )"""
|
executor = Pool ( pool_size )
return self . loop . run_in_executor ( executor , func , * args )
|
async def login ( cls , url , * , username = None , password = None , insecure = False ) :
"""Make an ` Origin ` by logging - in with a username and password .
: return : A tuple of ` ` profile ` ` and ` ` origin ` ` , where the former is an
unsaved ` Profile ` instance , and the latter is an ` Origin ` instance
made using the profile ."""
|
profile , session = await bones . SessionAPI . login ( url = url , username = username , password = password , insecure = insecure )
return profile , cls ( session )
|
def calcPunkProp ( sNow ) :
'''Calculates the proportion of punks in the population , given data from each type .
Parameters
pNow : [ np . array ]
List of arrays of binary data , representing the fashion choice of each
agent in each type of this market ( 0 = jock , 1 = punk ) .
pop _ size : [ int ]
List with the number of agents of each type in the market . Unused .'''
|
sNowX = np . asarray ( sNow ) . flatten ( )
pNow = np . mean ( sNowX )
return FashionMarketInfo ( pNow )
|
def soft_target_update ( self ) :
"""Soft update model parameters :
. . math : :
\\ theta _ target = \\ tau \\ times \\ theta _ local + ( 1 - \\ tau ) \\ times \\ theta _ target ,
with \\ tau \\ ll 1
See https : / / arxiv . org / pdf / 1509.02971 . pdf"""
|
for target_param , local_param in zip ( self . target . parameters ( ) , self . local . parameters ( ) ) :
target_param . data . copy_ ( self . tau * local_param . data + ( 1.0 - self . tau ) * target_param . data )
|
def update_ar_listing_catalog ( portal ) :
"""Add Indexes / Metadata to bika _ catalog _ analysisrequest _ listing"""
|
cat_id = CATALOG_ANALYSIS_REQUEST_LISTING
catalog = api . get_tool ( cat_id )
logger . info ( "Updating Indexes/Metadata of Catalog '{}'" . format ( cat_id ) )
indexes_to_add = [ # name , attribute , metatype
( "getClientID" , "getClientID" , "FieldIndex" ) , ( "is_active" , "is_active" , "BooleanIndex" ) , ( "is_received" , "is_received" , "BooleanIndex" ) , ]
metadata_to_add = [ "getClientID" , ]
for index in indexes_to_add :
add_index ( portal , cat_id , * index )
for metadata in metadata_to_add :
refresh = metadata not in catalog . schema ( )
add_metadata ( portal , cat_id , metadata , refresh_catalog = refresh )
|
def btreeSearch ( self , ip ) :
"""" b - tree search method
" param : ip"""
|
if not ip . isdigit ( ) :
ip = self . ip2Long ( ip )
if len ( self . __headerSip ) < 1 : # pass the super block
self . __f . seek ( 8 )
# read the header block
b = self . __f . read ( 8192 )
# parse the header block
sip = None
ptr = None
for i in range ( 0 , len ( b ) - 1 , 8 ) :
sip = self . getLong ( b , i )
ptr = self . getLong ( b , i + 4 )
if ptr == 0 :
break
self . __headerSip . append ( sip )
self . __headerPtr . append ( ptr )
headerLen = len ( self . __headerSip ) - 1
l , h , sptr , eptr = ( 0 , headerLen , 0 , 0 )
while l <= h :
m = int ( ( l + h ) / 2 )
if ip == self . __headerSip [ m ] :
if m > 0 :
sptr = self . __headerPtr [ m - 1 ]
eptr = self . __headerPtr [ m ]
break ;
else :
sptr = self . __headerPtr [ m ]
eptr = self . __headerPtr [ m + 1 ]
break ;
if ip > self . __headerSip [ m ] :
if m == headerLen :
sptr = self . __headerPtr [ m - 1 ]
eptr = self . __headerPtr [ m ]
break ;
elif ip < self . __headerSip [ m + 1 ] :
sptr = self . __headerPtr [ m ]
eptr = self . __headerPtr [ m + 1 ]
break ;
l = m + 1
else :
if m == 0 :
sptr = self . __headerPtr [ m ]
eptr = self . __headerPtr [ m + 1 ]
break ;
elif ip > self . __headerSip [ m - 1 ] :
sptr = self . __headerPtr [ m - 1 ]
eptr = self . __headerPtr [ m ]
break ;
h = m - 1
if sptr == 0 :
return "N1"
indexLen = eptr - sptr
self . __f . seek ( sptr )
b = self . __f . read ( indexLen + 12 )
l , h , mixPtr = ( 0 , int ( indexLen / 12 ) , 0 )
while l <= h :
m = int ( ( l + h ) / 2 )
offset = m * 12
if ip >= self . getLong ( b , offset ) :
if ip > self . getLong ( b , offset + 4 ) :
l = m + 1
else :
mixPtr = self . getLong ( b , offset + 8 )
break ;
else :
h = m - 1
if mixPtr == 0 :
return "N2"
return self . returnData ( mixPtr )
|
def validate_arrangement_version ( self ) :
"""Validate if the arrangement _ version is supported
This is for autorebuilds to fail early otherwise they may failed
on workers because of osbs - client validation checks .
Method should be called after self . adjust _ build _ kwargs
Shows a warning when version is deprecated
: raises ValueError : when version is not supported"""
|
arrangement_version = self . build_kwargs [ 'arrangement_version' ]
if arrangement_version is None :
return
if arrangement_version <= 5 : # TODO : raise as ValueError in release 1.6.38 +
self . log . warning ( "arrangement_version <= 5 is deprecated and will be removed" " in release 1.6.38" )
|
def validate_setup ( transactions ) :
"""First two transactions must set rate & days ."""
|
if not transactions :
return True
try :
first , second = transactions [ : 2 ]
except ValueError :
print ( 'Error: vacationrc file must have both initial days and rates entries' )
return False
parts1 , parts2 = first . split ( ) , second . split ( )
if parts1 [ 0 ] != parts2 [ 0 ] :
print ( 'Error: First two entries in vacationrc must have the same date' )
return False
# Dates must match
if 'rate' not in ( parts1 [ 1 ] , parts2 [ 1 ] ) or 'days' not in ( parts1 [ 1 ] , parts2 [ 1 ] ) :
print ( 'Error: First two entries in vacationrc must set days and rate' )
return False
return True
|
def get_unixtime_registered ( self ) :
"""Returns the user ' s registration date as a UNIX timestamp ."""
|
doc = self . _request ( self . ws_prefix + ".getInfo" , True )
return int ( doc . getElementsByTagName ( "registered" ) [ 0 ] . getAttribute ( "unixtime" ) )
|
def default_jardiff_options ( updates = None ) :
"""generate an options object with the appropriate default values in
place for API usage of jardiff features . overrides is an optional
dictionary which will be used to update fields on the options
object ."""
|
parser = create_optparser ( )
options , _args = parser . parse_args ( list ( ) )
if updates : # pylint : disable = W0212
options . _update_careful ( updates )
return options
|
def get_instance ( self , payload ) :
"""Build an instance of WorkflowCumulativeStatisticsInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . taskrouter . v1 . workspace . workflow . workflow _ cumulative _ statistics . WorkflowCumulativeStatisticsInstance
: rtype : twilio . rest . taskrouter . v1 . workspace . workflow . workflow _ cumulative _ statistics . WorkflowCumulativeStatisticsInstance"""
|
return WorkflowCumulativeStatisticsInstance ( self . _version , payload , workspace_sid = self . _solution [ 'workspace_sid' ] , workflow_sid = self . _solution [ 'workflow_sid' ] , )
|
def int_to_string ( x ) :
"""Convert integer x into a string of bytes , as per X9.62."""
|
assert x >= 0
if x == 0 :
return b ( '\0' )
result = [ ]
while x :
ordinal = x & 0xFF
result . append ( int2byte ( ordinal ) )
x >>= 8
result . reverse ( )
return b ( '' ) . join ( result )
|
def add ( self , urls ) :
"""Add the provided urls to this purge request
The urls argument can be a single string , a list of strings , a queryset
or model instance . Models must implement ` get _ absolute _ url ( ) ` ."""
|
if isinstance ( urls , ( list , tuple ) ) :
self . urls . extend ( urls )
elif isinstance ( urls , basestring ) :
self . urls . append ( urls )
elif isinstance ( urls , QuerySet ) :
for obj in urls :
self . urls . append ( obj . get_absolute_url ( ) )
elif hasattr ( urls , 'get_absolute_url' ) :
self . urls . append ( urls . get_absolute_url ( ) )
else :
raise TypeError ( "Don't know how to handle %r" % urls )
|
def push_state ( self , new_file = '' ) :
'Saves the current error state to parse subpackages'
|
self . subpackages . append ( { 'detected_type' : self . detected_type , 'message_tree' : self . message_tree , 'resources' : self . pushable_resources , 'metadata' : self . metadata } )
self . message_tree = { }
self . pushable_resources = { }
self . metadata = { 'requires_chrome' : False , 'listed' : self . metadata . get ( 'listed' ) , 'validator_version' : validator . __version__ }
self . package_stack . append ( new_file )
|
def parse_content ( self , content ) :
"""Use all the defined scanners to search the log file , setting the
properties defined in the scanner ."""
|
self . lines = content
for scanner in self . scanners :
scanner ( self )
|
def makeSoftwareVersion ( store , version , systemVersion ) :
"""Return the SoftwareVersion object from store corresponding to the
version object , creating it if it doesn ' t already exist ."""
|
return store . findOrCreate ( SoftwareVersion , systemVersion = systemVersion , package = unicode ( version . package ) , version = unicode ( version . short ( ) ) , major = version . major , minor = version . minor , micro = version . micro )
|
def appendPadding ( str , blocksize = AES_blocksize , mode = 'CMS' ) :
'''Pad ( append padding to ) string for use with symmetric encryption algorithm
Input : ( string ) str - String to be padded
( int ) blocksize - block size of the encryption algorithm
( string ) mode - padding scheme one in ( CMS , Bit , ZeroLen , Null , Space , Random )
Return : ( string ) Padded string according to chosen padding mode'''
|
if mode not in ( 0 , 'CMS' ) :
for k in MODES . keys ( ) :
if mode in k :
return globals ( ) [ 'append' + k [ 1 ] + 'Padding' ] ( str , blocksize )
else :
return appendCMSPadding ( str , blocksize )
else :
return appendCMSPadding ( str , blocksize )
|
def dataset_create_version_cli ( self , folder , version_notes , quiet = False , convert_to_csv = True , delete_old_versions = False , dir_mode = 'skip' ) :
"""client wrapper for creating a version of a dataset
Parameters
folder : the folder with the dataset configuration / data files
version _ notes : notes to add for the version
quiet : suppress verbose output ( default is False )
convert _ to _ csv : on upload , if data should be converted to csv
delete _ old _ versions : if True , do that ( default False )
dir _ mode : What to do with directories : " skip " - ignore ; " zip " - compress and upload"""
|
folder = folder or os . getcwd ( )
result = self . dataset_create_version ( folder , version_notes , quiet = quiet , convert_to_csv = convert_to_csv , delete_old_versions = delete_old_versions , dir_mode = dir_mode )
if result . invalidTags :
print ( ( 'The following are not valid tags and could not be added to ' 'the dataset: ' ) + str ( result . invalidTags ) )
if result is None :
print ( 'Dataset version creation error: See previous output' )
elif result . status . lower ( ) == 'ok' :
print ( 'Dataset version is being created. Please check progress at ' + result . url )
else :
print ( 'Dataset version creation error: ' + result . error )
|
def _process_emerge_err ( stdout , stderr ) :
'''Used to parse emerge output to provide meaningful output when emerge fails'''
|
ret = { }
rexp = re . compile ( r'^[<>=][^ ]+/[^ ]+ [^\n]+' , re . M )
slot_conflicts = re . compile ( r'^[^ \n]+/[^ ]+:[^ ]' , re . M ) . findall ( stderr )
if slot_conflicts :
ret [ 'slot conflicts' ] = slot_conflicts
blocked = re . compile ( r'(?m)^\[blocks .+\] ' r'([^ ]+/[^ ]+-[0-9]+[^ ]+)' r'.*$' ) . findall ( stdout )
unsatisfied = re . compile ( r'Error: The above package list contains' ) . findall ( stderr )
# If there were blocks and emerge could not resolve it .
if blocked and unsatisfied :
ret [ 'blocked' ] = blocked
sections = re . split ( '\n\n' , stderr )
for section in sections :
if 'The following keyword changes' in section :
ret [ 'keywords' ] = rexp . findall ( section )
elif 'The following license changes' in section :
ret [ 'license' ] = rexp . findall ( section )
elif 'The following USE changes' in section :
ret [ 'use' ] = rexp . findall ( section )
elif 'The following mask changes' in section :
ret [ 'mask' ] = rexp . findall ( section )
return ret
|
def _index ( self ) :
"""Keys a list of file paths that have been pickled in this directory .
The index is stored in a json file in the same directory as the
pickled objects ."""
|
if self . __index is None :
try :
with open ( self . _get_path ( 'index.json' ) ) as f :
data = json . load ( f )
except ( IOError , ValueError ) :
self . __index = { }
else : # 0 means version is not defined ( = always delete cache ) :
if data . get ( 'version' , 0 ) != self . version :
self . clear_cache ( )
self . __index = { }
else :
self . __index = data [ 'index' ]
return self . __index
|
def to_dict ( self ) :
"""Return a dictionary representation of the dataset ."""
|
d = dict ( doses = self . doses , ns = self . ns , means = self . means , stdevs = self . stdevs )
d . update ( self . kwargs )
return d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.