signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def pop ( string , encoding = None ) :
"""pop ( string , encoding = None ) - > ( object , remain )
This function parses a tnetstring into a python object .
It returns a tuple giving the parsed object and a string
containing any unparsed data from the end of the string ."""
|
# Parse out data length , type and remaining string .
try :
( dlen , rest ) = string . split ( ":" , 1 )
dlen = int ( dlen )
except ValueError :
raise ValueError ( "not a tnetstring: missing or invalid length prefix" )
try :
( data , type , remain ) = ( rest [ : dlen ] , rest [ dlen ] , rest [ dlen + 1 : ] )
except IndexError : # This fires if len ( rest ) < dlen , meaning we don ' t need
# to further validate that data is the right length .
raise ValueError ( "not a tnetstring: invalid length prefix" )
# Parse the data based on the type tag .
if type == "," :
if encoding is not None :
return ( data . decode ( encoding ) , remain )
return ( data , remain )
if type == "#" :
try :
return ( int ( data ) , remain )
except ValueError :
raise ValueError ( "not a tnetstring: invalid integer literal" )
if type == "^" :
try :
return ( float ( data ) , remain )
except ValueError :
raise ValueError ( "not a tnetstring: invalid float literal" )
if type == "!" :
if data == "true" :
return ( True , remain )
elif data == "false" :
return ( False , remain )
else :
raise ValueError ( "not a tnetstring: invalid boolean literal" )
if type == "~" :
if data :
raise ValueError ( "not a tnetstring: invalid null literal" )
return ( None , remain )
if type == "]" :
l = [ ]
while data :
( item , data ) = pop ( data , encoding )
l . append ( item )
return ( l , remain )
if type == "}" :
d = { }
while data :
( key , data ) = pop ( data , encoding )
( val , data ) = pop ( data , encoding )
d [ key ] = val
return ( d , remain )
raise ValueError ( "unknown type tag" )
|
def _encode_string ( string ) :
"""Return a byte string , encoding Unicode with UTF - 8."""
|
if not isinstance ( string , bytes ) :
string = string . encode ( 'utf8' )
return ffi . new ( 'char[]' , string )
|
def _neg ( node ) :
"""Return the inverse of * node * ."""
|
if node is BDDNODEZERO :
return BDDNODEONE
elif node is BDDNODEONE :
return BDDNODEZERO
else :
return _bddnode ( node . root , _neg ( node . lo ) , _neg ( node . hi ) )
|
def facets_on_hull ( self ) :
"""Find which facets of the mesh are on the convex hull .
Returns
on _ hull : ( len ( mesh . facets ) , ) bool
is A facet on the meshes convex hull or not"""
|
# facets plane , origin and normal
normals = self . facets_normal
origins = self . facets_origin
# ( n , 3 ) convex hull vertices
convex = self . convex_hull . vertices . view ( np . ndarray ) . copy ( )
# boolean mask for which facets are on convex hull
on_hull = np . zeros ( len ( self . facets ) , dtype = np . bool )
for i , normal , origin in zip ( range ( len ( normals ) ) , normals , origins ) : # a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np . dot ( normal , ( convex - origin ) . T )
on_hull [ i ] = ( dot < tol . merge ) . all ( )
return on_hull
|
def get_job_status ( self , job_id ) :
"""Retrieve task statuses from ECS API
: param job _ id ( str ) : AWS Batch job uuid
Returns one of { SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED }"""
|
response = self . _client . describe_jobs ( jobs = [ job_id ] )
# Error checking
status_code = response [ 'ResponseMetadata' ] [ 'HTTPStatusCode' ]
if status_code != 200 :
msg = 'Job status request received status code {0}:\n{1}'
raise Exception ( msg . format ( status_code , response ) )
return response [ 'jobs' ] [ 0 ] [ 'status' ]
|
def children ( self , recursive = False ) :
"""Returns the list of child nodes for this item .
: return [ < QtGui . QTreeWidgetItem > , . . ]"""
|
for i in xrange ( self . childCount ( ) ) :
child = self . child ( i )
yield child
if recursive :
for subchild in child . children ( recursive = True ) :
yield subchild
|
def _download ( self , fmfile , destination , overwrite , callback ) :
"""The actual downloader streaming content from Filemail .
: param fmfile : to download
: param destination : destination path
: param overwrite : replace existing files ?
: param callback : callback function that will receive total file size
and written bytes as arguments
: type fmfile : ` ` dict ` `
: type destination : ` ` str ` ` or ` ` unicode ` `
: type overwrite : ` ` bool ` `
: type callback : ` ` func ` `"""
|
fullpath = os . path . join ( destination , fmfile . get ( 'filename' ) )
path , filename = os . path . split ( fullpath )
if os . path . exists ( fullpath ) :
msg = 'Skipping existing file: {filename}'
logger . info ( msg . format ( filename = filename ) )
return
filesize = fmfile . get ( 'filesize' )
if not os . path . exists ( path ) :
os . makedirs ( path )
url = fmfile . get ( 'downloadurl' )
stream = self . session . get ( url , stream = True )
def pg_callback ( bytes_written ) :
if pm . COMMANDLINE :
bar . show ( bytes_written )
elif callback is not None :
callback ( filesize , bytes_written )
if pm . COMMANDLINE :
label = fmfile [ 'filename' ] + ': '
bar = ProgressBar ( label = label , expected_size = filesize )
bytes_written = 0
with open ( fullpath , 'wb' ) as f :
for chunk in stream . iter_content ( chunk_size = 1024 * 1024 ) :
if not chunk :
break
f . write ( chunk )
bytes_written += len ( chunk )
# Callback
pg_callback ( bytes_written )
|
def agent_color ( self , val ) :
"""gets a colour for agent 0 - 9"""
|
if val == '0' :
colour = 'blue'
elif val == '1' :
colour = 'navy'
elif val == '2' :
colour = 'firebrick'
elif val == '3' :
colour = 'blue'
elif val == '4' :
colour = 'blue2'
elif val == '5' :
colour = 'blue4'
elif val == '6' :
colour = 'gray22'
elif val == '7' :
colour = 'gray57'
elif val == '8' :
colour = 'red4'
elif val == '9' :
colour = 'red3'
return colour
|
def process_response ( self , request , response ) :
"""Convert HttpResponseRedirect to HttpResponse if request is via ajax
to allow ajax request to redirect url"""
|
if request . is_ajax ( ) and hasattr ( request , 'horizon' ) :
queued_msgs = request . horizon [ 'async_messages' ]
if type ( response ) == http . HttpResponseRedirect : # Drop our messages back into the session as per usual so they
# don ' t disappear during the redirect . Not that we explicitly
# use django ' s messages methods here .
for tag , message , extra_tags in queued_msgs :
getattr ( django_messages , tag ) ( request , message , extra_tags )
# if response [ ' location ' ] . startswith ( settings . LOGOUT _ URL ) :
# redirect _ response = http . HttpResponse ( status = 401)
# # This header is used for handling the logout in JS
# redirect _ response [ ' logout ' ] = True
# if self . logout _ reason is not None :
# utils . add _ logout _ reason (
# request , redirect _ response , self . logout _ reason )
# else :
redirect_response = http . HttpResponse ( )
# Use a set while checking if we want a cookie ' s attributes
# copied
cookie_keys = set ( ( 'max_age' , 'expires' , 'path' , 'domain' , 'secure' , 'httponly' , 'logout_reason' ) )
# Copy cookies from HttpResponseRedirect towards HttpResponse
for cookie_name , cookie in six . iteritems ( response . cookies ) :
cookie_kwargs = dict ( ( ( key , value ) for key , value in six . iteritems ( cookie ) if key in cookie_keys and value ) )
redirect_response . set_cookie ( cookie_name , cookie . value , ** cookie_kwargs )
redirect_response [ 'X-Horizon-Location' ] = response [ 'location' ]
upload_url_key = 'X-File-Upload-URL'
if upload_url_key in response :
self . copy_headers ( response , redirect_response , ( upload_url_key , 'X-Auth-Token' ) )
return redirect_response
if queued_msgs : # TODO ( gabriel ) : When we have an async connection to the
# client ( e . g . websockets ) this should be pushed to the
# socket queue rather than being sent via a header .
# The header method has notable drawbacks ( length limits ,
# etc . ) and is not meant as a long - term solution .
response [ 'X-Horizon-Messages' ] = json . dumps ( queued_msgs )
return response
|
def auth_required ( self , view_func ) :
"""Decorator that provides an access to view function for
authenticated users only .
Note that we don ' t run authentication when ` HAWK _ ENABLED ` is ` False ` ."""
|
@ wraps ( view_func )
def wrapped_view_func ( * args , ** kwargs ) :
if current_app . config [ 'HAWK_ENABLED' ] :
if current_app . config [ 'HAWK_ALLOW_COOKIE_AUTH' ] and session :
self . _auth_by_cookie ( )
else :
self . _auth_by_signature ( )
return view_func ( * args , ** kwargs )
return wrapped_view_func
|
def compute_srec_checksum ( srec ) :
"""Compute the checksum byte of a given S - Record
Returns : The checksum as a string hex byte ( ex : " 0C " )"""
|
# Get the summable data from srec
# start at 2 to remove the S * record entry
data = srec [ 2 : len ( srec ) ]
sum = 0
# For each byte , convert to int and add .
# ( step each two character to form a byte )
for position in range ( 0 , len ( data ) , 2 ) :
current_byte = data [ position : position + 2 ]
int_value = int ( current_byte , 16 )
sum += int_value
# Extract the Least significant byte from the hex form
hex_sum = hex ( sum )
least_significant_byte = hex_sum [ len ( hex_sum ) - 2 : ]
least_significant_byte = least_significant_byte . replace ( 'x' , '0' )
# turn back to int and find the 8 - bit one ' s complement
int_lsb = int ( least_significant_byte , 16 )
computed_checksum = ( ~ int_lsb ) & 0xff
return computed_checksum
|
def build_imputation_loyers_proprietaires ( temporary_store = None , year = None ) :
"""Build menage consumption by categorie fiscale dataframe"""
|
assert temporary_store is not None
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection . load ( collection = 'budget_des_familles' , config_files_directory = config_files_directory )
survey = bdf_survey_collection . get_survey ( 'budget_des_familles_{}' . format ( year ) )
if year == 1995 :
imput00 = survey . get_values ( table = "socioscm" )
# cette étape permet de ne garder que les données dont on est sûr de la qualité et de la véracité
# exdep = 1 si les données sont bien remplies pour les dépenses du ménage
# exrev = 1 si les données sont bien remplies pour les revenus du ménage
imput00 = imput00 [ ( imput00 . exdep == 1 ) & ( imput00 . exrev == 1 ) ]
imput00 = imput00 [ ( imput00 . exdep == 1 ) & ( imput00 . exrev == 1 ) ]
kept_variables = [ 'mena' , 'stalog' , 'surfhab' , 'confort1' , 'confort2' , 'confort3' , 'confort4' , 'ancons' , 'sitlog' , 'nbphab' , 'rg' , 'cc' ]
imput00 = imput00 [ kept_variables ]
imput00 . rename ( columns = { 'mena' : 'ident_men' } , inplace = True )
# TODO : continue variable cleaning
var_to_filnas = [ 'surfhab' ]
for var_to_filna in var_to_filnas :
imput00 [ var_to_filna ] = imput00 [ var_to_filna ] . fillna ( 0 )
var_to_ints = [ 'sitlog' , 'confort1' , 'stalog' , 'surfhab' , 'ident_men' , 'ancons' , 'nbphab' ]
for var_to_int in var_to_ints :
imput00 [ var_to_int ] = imput00 [ var_to_int ] . astype ( int )
depenses = temporary_store [ 'depenses_{}' . format ( year ) ]
depenses . reset_index ( inplace = True )
depenses_small = depenses [ [ 'ident_men' , '04110' , 'pondmen' ] ] . copy ( )
depenses_small . ident_men = depenses_small . ident_men . astype ( 'int' )
imput00 = depenses_small . merge ( imput00 , on = 'ident_men' ) . set_index ( 'ident_men' )
imput00 . rename ( columns = { '04110' : 'loyer_reel' } , inplace = True )
# * une indicatrice pour savoir si le loyer est connu et l ' occupant est locataire
imput00 [ 'observe' ] = ( imput00 . loyer_reel > 0 ) & ( imput00 . stalog . isin ( [ 3 , 4 ] ) )
imput00 [ 'maison_appart' ] = imput00 . sitlog == 1
imput00 [ 'catsurf' ] = ( 1 + ( imput00 . surfhab > 15 ) + ( imput00 . surfhab > 30 ) + ( imput00 . surfhab > 40 ) + ( imput00 . surfhab > 60 ) + ( imput00 . surfhab > 80 ) + ( imput00 . surfhab > 100 ) + ( imput00 . surfhab > 150 ) )
assert imput00 . catsurf . isin ( range ( 1 , 9 ) ) . all ( )
# TODO : vérifier ce qe l ' on fait notamment regarder la vleur catsurf = 2 ommise dans le code stata
imput00 . maison = 1 - ( ( imput00 . cc == 5 ) & ( imput00 . catsurf == 1 ) & ( imput00 . maison_appart == 1 ) )
imput00 . maison = 1 - ( ( imput00 . cc == 5 ) & ( imput00 . catsurf == 3 ) & ( imput00 . maison_appart == 1 ) )
imput00 . maison = 1 - ( ( imput00 . cc == 5 ) & ( imput00 . catsurf == 8 ) & ( imput00 . maison_appart == 1 ) )
imput00 . maison = 1 - ( ( imput00 . cc == 4 ) & ( imput00 . catsurf == 1 ) & ( imput00 . maison_appart == 1 ) )
try :
parser = SafeConfigParser ( )
config_local_ini = os . path . join ( config_files_directory , 'config_local.ini' )
config_ini = os . path . join ( config_files_directory , 'config.ini' )
parser . read ( [ config_ini , config_local_ini ] )
directory_path = os . path . normpath ( parser . get ( "openfisca_france_indirect_taxation" , "assets" ) )
hotdeck = pandas . read_stata ( os . path . join ( directory_path , 'hotdeck_result.dta' ) )
except :
hotdeck = survey . get_values ( table = 'hotdeck_result' )
imput00 . reset_index ( inplace = True )
hotdeck . ident_men = hotdeck . ident_men . astype ( 'int' )
imput00 = imput00 . merge ( hotdeck , on = 'ident_men' )
imput00 . loyer_impute [ imput00 . observe ] = 0
imput00 . reset_index ( inplace = True )
loyers_imputes = imput00 [ [ 'ident_men' , 'loyer_impute' ] ] . copy ( )
assert loyers_imputes . loyer_impute . notnull ( ) . all ( )
loyers_imputes . rename ( columns = dict ( loyer_impute = '0411' ) , inplace = True )
# POUR BdF 2000 ET 2005 , ON UTILISE LES LOYERS IMPUTES CALCULES PAR L ' INSEE
if year == 2000 : # Garder les loyers imputés ( disponibles dans la table sur les ménages )
loyers_imputes = survey . get_values ( table = "menage" , variables = [ 'ident' , 'rev81' ] )
loyers_imputes . rename ( columns = { 'ident' : 'ident_men' , 'rev81' : 'poste_coicop_421' , } , inplace = True , )
if year == 2005 : # Garder les loyers imputés ( disponibles dans la table sur les ménages )
loyers_imputes = survey . get_values ( table = "menage" )
kept_variables = [ 'ident_men' , 'rev801_d' ]
loyers_imputes = loyers_imputes [ kept_variables ]
loyers_imputes . rename ( columns = { 'rev801_d' : 'poste_coicop_421' } , inplace = True )
if year == 2011 :
try :
loyers_imputes = survey . get_values ( table = "MENAGE" )
except :
loyers_imputes = survey . get_values ( table = "menage" )
kept_variables = [ 'ident_me' , 'rev801' ]
loyers_imputes = loyers_imputes [ kept_variables ]
loyers_imputes . rename ( columns = { 'rev801' : 'poste_coicop_421' , 'ident_me' : 'ident_men' } , inplace = True )
# Joindre à la table des dépenses par COICOP
loyers_imputes . set_index ( 'ident_men' , inplace = True )
temporary_store [ 'loyers_imputes_{}' . format ( year ) ] = loyers_imputes
depenses = temporary_store [ 'depenses_{}' . format ( year ) ]
depenses . index = depenses . index . astype ( 'int64' )
loyers_imputes . index = loyers_imputes . index . astype ( 'int64' )
assert set ( depenses . index ) == set ( loyers_imputes . index )
assert len ( set ( depenses . columns ) . intersection ( set ( loyers_imputes . columns ) ) ) == 0
depenses = depenses . merge ( loyers_imputes , left_index = True , right_index = True )
# Etape n ° 0-1-3 : SAUVER LES BASES DE DEPENSES HOMOGENEISEES DANS LE BON DOSSIER
# Save in temporary store
temporary_store [ 'depenses_bdf_{}' . format ( year ) ] = depenses
|
def run_commands ( self , commands ) :
"""Only useful for EOS"""
|
if "eos" in self . profile :
return list ( self . parent . cli ( commands ) . values ( ) ) [ 0 ]
else :
raise AttributeError ( "MockedDriver instance has not attribute '_rpc'" )
|
def copy ( self ) :
"""Returns a copy of the context ."""
|
other = ContextModel ( self . _context , self . parent ( ) )
other . _stale = self . _stale
other . _modified = self . _modified
other . request = self . request [ : ]
other . packages_path = self . packages_path
other . implicit_packages = self . implicit_packages
other . package_filter = self . package_filter
other . caching = self . caching
other . default_patch_lock = self . default_patch_lock
other . patch_locks = copy . deepcopy ( self . patch_locks )
return other
|
def update_with_zero_body ( self , uri = None , timeout = - 1 , custom_headers = None ) :
"""Makes a PUT request to update a resource when no request body is required .
Args :
uri : Allows to use a different URI other than resource URI
timeout : Timeout in seconds . Wait for task completion by default .
The timeout does not abort the operation in OneView ; it just stops waiting for its completion .
custom _ headers : Allows to set custom HTTP headers .
Returns :
A dict with updated resource data ."""
|
if not uri :
uri = self . data [ 'uri' ]
logger . debug ( 'Update with zero length body (uri = %s)' % uri )
resource_data = self . _helper . do_put ( uri , None , timeout , custom_headers )
return resource_data
|
def doi ( self ) :
'''Returns ISBN number with segment hypenation
Data obtained from https : / / www . isbn - international . org /
https : / / www . isbn - international . org / export _ rangemessage . xml
@ return : ISBN formated as ISBN13 with hyphens'''
|
if not ISBN . hyphenRange :
ISBN . hyphenRange = hyphen . ISBNRange ( )
seg = ISBN . hyphenRange . hyphensegments ( self . _id )
return '10.' + self . _id [ 0 : 3 ] + '.' + self . _id [ 3 : - ( 1 + seg [ 3 ] ) ] + '/' + self . _id [ - ( 1 + seg [ 3 ] ) : ]
|
def unpacktar ( tarfile , destdir ) :
"""Unpack given tarball into the specified dir"""
|
nullfd = open ( os . devnull , "w" )
tarfile = cygpath ( os . path . abspath ( tarfile ) )
log . debug ( "unpack tar %s into %s" , tarfile , destdir )
try :
check_call ( [ TAR , '-xzf' , tarfile ] , cwd = destdir , stdout = nullfd , preexec_fn = _noumask )
except Exception :
log . exception ( "Error unpacking tar file %s to %s" , tarfile , destdir )
raise
nullfd . close ( )
|
def ascii_listing2program_dump ( self , basic_program_ascii , program_start = None ) :
"""convert a ASCII BASIC program listing into tokens .
This tokens list can be used to insert it into the
Emulator RAM ."""
|
if program_start is None :
program_start = self . DEFAULT_PROGRAM_START
basic_lines = self . ascii_listing2basic_lines ( basic_program_ascii , program_start )
program_dump = self . listing . basic_lines2program_dump ( basic_lines , program_start )
assert isinstance ( program_dump , bytearray ) , ( "is type: %s and not bytearray: %s" % ( type ( program_dump ) , repr ( program_dump ) ) )
return program_dump
|
def _handle_retry ( self , resp ) :
"""Handle any exceptions during API request or
parsing its response status code .
Parameters :
resp : requests . Response instance obtained during concerning request
or None , when request failed
Returns : True if should retry our request or raises original Exception"""
|
exc_t , exc_v , exc_tb = sys . exc_info ( )
if exc_t is None :
raise TypeError ( 'Must be called in except block.' )
retry_on_exc = tuple ( ( x for x in self . _retry_on if inspect . isclass ( x ) ) )
retry_on_codes = tuple ( ( x for x in self . _retry_on if isinstance ( x , int ) ) )
if issubclass ( exc_t , ZendeskError ) :
code = exc_v . error_code
if exc_t not in retry_on_exc and code not in retry_on_codes :
six . reraise ( exc_t , exc_v , exc_tb )
else :
if not issubclass ( exc_t , retry_on_exc ) :
six . reraise ( exc_t , exc_v , exc_tb )
if resp is not None :
try :
retry_after = float ( resp . headers . get ( 'Retry-After' , 0 ) )
time . sleep ( retry_after )
except ( TypeError , ValueError ) :
pass
return True
|
def get_attached_pipettes ( self ) :
"""Mimic the behavior of robot . get _ attached _ pipettes"""
|
api = object . __getattribute__ ( self , '_api' )
instrs = { }
for mount , data in api . attached_instruments . items ( ) :
instrs [ mount . name . lower ( ) ] = { 'model' : data . get ( 'name' , None ) , 'id' : data . get ( 'pipette_id' , None ) , 'mount_axis' : Axis . by_mount ( mount ) , 'plunger_axis' : Axis . of_plunger ( mount ) }
if data . get ( 'name' ) :
instrs [ mount . name . lower ( ) ] [ 'tip_length' ] = data . get ( 'tip_length' , None )
return instrs
|
def param_docstrings ( self , info , max_col_len = 100 , only_changed = False ) :
"""Build a string to that presents all of the parameter
docstrings in a clean format ( alternating red and blue for
readability ) ."""
|
( params , val_dict , changed ) = info
contents = [ ]
displayed_params = { }
for name , p in params . items ( ) :
if only_changed and not ( name in changed ) :
continue
displayed_params [ name ] = p
right_shift = max ( len ( name ) for name in displayed_params . keys ( ) ) + 2
for i , name in enumerate ( sorted ( displayed_params ) ) :
p = displayed_params [ name ]
heading = "%s: " % name
unindented = textwrap . dedent ( "< No docstring available >" if p . doc is None else p . doc )
if ( WARN_MISFORMATTED_DOCSTRINGS and not unindented . startswith ( "\n" ) and len ( unindented . splitlines ( ) ) > 1 ) :
param . main . warning ( "Multi-line docstring for %r is incorrectly formatted " " (should start with newline)" , name )
# Strip any starting newlines
while unindented . startswith ( "\n" ) :
unindented = unindented [ 1 : ]
lines = unindented . splitlines ( )
if len ( lines ) > 1 :
tail = [ '%s%s' % ( ' ' * right_shift , line ) for line in lines [ 1 : ] ]
all_lines = [ heading . ljust ( right_shift ) + lines [ 0 ] ] + tail
elif len ( lines ) == 1 :
all_lines = [ heading . ljust ( right_shift ) + lines [ 0 ] ]
else :
all_lines = [ ]
if i % 2 : # Alternate red and blue for docstrings
contents . extend ( [ red % el for el in all_lines ] )
else :
contents . extend ( [ blue % el for el in all_lines ] )
return "\n" . join ( contents )
|
def delete ( self , docids ) :
"""Delete documents from the current session ."""
|
self . check_session ( )
result = self . session . delete ( docids )
if self . autosession :
self . commit ( )
return result
|
def construct ( self , mapping : dict , ** kwargs ) :
"""Construct an object from a mapping
: param mapping : the constructor definition , with ` ` _ _ type _ _ ` ` name and keyword arguments
: param kwargs : additional keyword arguments to pass to the constructor"""
|
assert '__type__' not in kwargs and '__args__' not in kwargs
mapping = { ** mapping , ** kwargs }
factory_fqdn = mapping . pop ( '__type__' )
factory = self . load_name ( factory_fqdn )
args = mapping . pop ( '__args__' , [ ] )
return factory ( * args , ** mapping )
|
async def jsk_debug ( self , ctx : commands . Context , * , command_string : str ) :
"""Run a command timing execution and catching exceptions ."""
|
alt_ctx = await copy_context_with ( ctx , content = ctx . prefix + command_string )
if alt_ctx . command is None :
return await ctx . send ( f'Command "{alt_ctx.invoked_with}" is not found' )
start = time . perf_counter ( )
async with ReplResponseReactor ( ctx . message ) :
with self . submit ( ctx ) :
await alt_ctx . command . invoke ( alt_ctx )
end = time . perf_counter ( )
return await ctx . send ( f"Command `{alt_ctx.command.qualified_name}` finished in {end - start:.3f}s." )
|
def get_pv_args ( name , session = None , call = None ) :
'''Get PV arguments for a VM
. . code - block : : bash
salt - cloud - a get _ pv _ args xenvm01'''
|
if call == 'function' :
raise SaltCloudException ( 'This function must be called with -a or --action.' )
if session is None :
log . debug ( 'New session being created' )
session = _get_session ( )
vm = _get_vm ( name , session = session )
pv_args = session . xenapi . VM . get_PV_args ( vm )
if pv_args :
return pv_args
return None
|
def _count_async ( self , limit = None , ** q_options ) :
"""Internal version of count _ async ( ) ."""
|
# TODO : Support offset by incorporating it to the limit .
if 'offset' in q_options :
raise NotImplementedError ( '.count() and .count_async() do not support ' 'offsets at present.' )
if 'limit' in q_options :
raise TypeError ( 'Cannot specify limit as a non-keyword argument and as a ' 'keyword argument simultaneously.' )
elif limit is None :
limit = _MAX_LIMIT
if self . _needs_multi_query ( ) : # _ MultiQuery does not support iterating over result batches ,
# so just fetch results and count them .
# TODO : Use QueryIterator to avoid materializing the results list .
q_options . setdefault ( 'batch_size' , limit )
q_options . setdefault ( 'keys_only' , True )
results = yield self . fetch_async ( limit , ** q_options )
raise tasklets . Return ( len ( results ) )
# Issue a special query requesting 0 results at a given offset .
# The skipped _ results count will tell us how many hits there were
# before that offset without fetching the items .
q_options [ 'offset' ] = limit
q_options [ 'limit' ] = 0
options = self . _make_options ( q_options )
conn = tasklets . get_context ( ) . _conn
dsquery = self . _get_query ( conn )
rpc = dsquery . run_async ( conn , options )
total = 0
while rpc is not None :
batch = yield rpc
options = QueryOptions ( offset = options . offset - batch . skipped_results , config = options )
rpc = batch . next_batch_async ( options )
total += batch . skipped_results
raise tasklets . Return ( total )
|
def color_range ( startcolor , goalcolor , steps ) :
"""wrapper for interpolate _ tuple that accepts colors as html ( " # CCCCC " and such )"""
|
start_tuple = make_color_tuple ( startcolor )
goal_tuple = make_color_tuple ( goalcolor )
return interpolate_tuple ( start_tuple , goal_tuple , steps )
|
def add ( self , key , val , minutes ) :
"""Store an item in the cache if it does not exist .
: param key : The cache key
: type key : str
: param val : The cache value
: type val : mixed
: param minutes : The lifetime in minutes of the cached value
: type minutes : int
: rtype : bool"""
|
return self . _memcache . add ( self . _prefix + key , val , minutes * 60 )
|
def parameterstep ( timestep = None ) :
"""Define a parameter time step size within a parameter control file .
Argument :
* timestep ( | Period | ) : Time step size .
Function parameterstep should usually be be applied in a line
immediately behind the model import . Defining the step size of time
dependent parameters is a prerequisite to access any model specific
parameter .
Note that parameterstep implements some namespace magic by
means of the module | inspect | . This makes things a little
complicated for framework developers , but it eases the definition of
parameter control files for framework users ."""
|
if timestep is not None :
parametertools . Parameter . parameterstep ( timestep )
namespace = inspect . currentframe ( ) . f_back . f_locals
model = namespace . get ( 'model' )
if model is None :
model = namespace [ 'Model' ] ( )
namespace [ 'model' ] = model
if hydpy . pub . options . usecython and 'cythonizer' in namespace :
cythonizer = namespace [ 'cythonizer' ]
namespace [ 'cythonmodule' ] = cythonizer . cymodule
model . cymodel = cythonizer . cymodule . Model ( )
namespace [ 'cymodel' ] = model . cymodel
model . cymodel . parameters = cythonizer . cymodule . Parameters ( )
model . cymodel . sequences = cythonizer . cymodule . Sequences ( )
for numpars_name in ( 'NumConsts' , 'NumVars' ) :
if hasattr ( cythonizer . cymodule , numpars_name ) :
numpars_new = getattr ( cythonizer . cymodule , numpars_name ) ( )
numpars_old = getattr ( model , numpars_name . lower ( ) )
for ( name_numpar , numpar ) in vars ( numpars_old ) . items ( ) :
setattr ( numpars_new , name_numpar , numpar )
setattr ( model . cymodel , numpars_name . lower ( ) , numpars_new )
for name in dir ( model . cymodel ) :
if ( not name . startswith ( '_' ) ) and hasattr ( model , name ) :
setattr ( model , name , getattr ( model . cymodel , name ) )
if 'Parameters' not in namespace :
namespace [ 'Parameters' ] = parametertools . Parameters
model . parameters = namespace [ 'Parameters' ] ( namespace )
if 'Sequences' not in namespace :
namespace [ 'Sequences' ] = sequencetools . Sequences
model . sequences = namespace [ 'Sequences' ] ( ** namespace )
namespace [ 'parameters' ] = model . parameters
for pars in model . parameters :
namespace [ pars . name ] = pars
namespace [ 'sequences' ] = model . sequences
for seqs in model . sequences :
namespace [ seqs . name ] = seqs
if 'Masks' in namespace :
model . masks = namespace [ 'Masks' ] ( model )
namespace [ 'masks' ] = model . masks
try :
namespace . update ( namespace [ 'CONSTANTS' ] )
except KeyError :
pass
focus = namespace . get ( 'focus' )
for par in model . parameters . control :
try :
if ( focus is None ) or ( par is focus ) :
namespace [ par . name ] = par
else :
namespace [ par . name ] = lambda * args , ** kwargs : None
except AttributeError :
pass
|
def decode ( self , data , erase_pos = None , only_erasures = False ) :
'''Repair a message , whatever its size is , by using chunking'''
|
# erase _ pos is a list of positions where you know ( or greatly suspect at least ) there is an erasure ( ie , wrong character but you know it ' s at this position ) . Just input the list of all positions you know there are errors , and this method will automatically split the erasures positions to attach to the corresponding data chunk .
if isinstance ( data , str ) :
data = bytearray ( data , "latin-1" )
dec = bytearray ( )
for i in xrange ( 0 , len ( data ) , self . nsize ) : # Split the long message in a chunk
chunk = data [ i : i + self . nsize ]
# Extract the erasures for this chunk
e_pos = [ ]
if erase_pos : # First extract the erasures for this chunk ( all erasures below the maximum chunk length )
e_pos = [ x for x in erase_pos if x <= self . nsize ]
# Then remove the extract erasures from the big list and also decrement all subsequent positions values by nsize ( the current chunk ' s size ) so as to prepare the correct alignment for the next iteration
erase_pos = [ x - ( self . nsize + 1 ) for x in erase_pos if x > self . nsize ]
# Decode / repair this chunk !
dec . extend ( rs_correct_msg ( chunk , self . nsym , fcr = self . fcr , generator = self . generator , erase_pos = e_pos , only_erasures = only_erasures ) [ 0 ] )
return dec
|
def sample ( generator , min = - 1 , max = 1 , width = SAMPLE_WIDTH ) :
'''Convert audio waveform generator into packed sample generator .'''
|
# select signed char , short , or in based on sample width
fmt = { 1 : '<B' , 2 : '<h' , 4 : '<i' } [ width ]
return ( struct . pack ( fmt , int ( sample ) ) for sample in normalize ( hard_clip ( generator , min , max ) , min , max , - 2 ** ( width * 8 - 1 ) , 2 ** ( width * 8 - 1 ) - 1 ) )
|
def files ( self ) :
"""Return the names of files to be created ."""
|
files_description = [ [ self . project_name , self . project_name + '.tex' , 'LaTeXBookFileTemplate' ] , [ self . project_name , 'references.bib' , 'BibTeXFileTemplate' ] , [ self . project_name , 'Makefile' , 'LaTeXMakefileFileTemplate' ] , ]
return files_description
|
def getrefs ( self , reflist ) :
"""reflist is got from getobjectref in parse _ idd . py
getobjectref returns a dictionary .
reflist is an item in the dictionary
getrefs gathers all the fields refered by reflist"""
|
alist = [ ]
for element in reflist :
if element [ 0 ] . upper ( ) in self . dt :
for elm in self . dt [ element [ 0 ] . upper ( ) ] :
alist . append ( elm [ element [ 1 ] ] )
return alist
|
def find ( self , name , menu = None ) :
"""Finds a menu item by name and returns it .
: param name :
The menu item name ."""
|
menu = menu or self . menu
for i in menu :
if i . name == name :
return i
else :
if i . childs :
ret_item = self . find ( name , menu = i . childs )
if ret_item :
return ret_item
|
def edges ( word : str , lang : str = "th" ) :
"""Get edges from ConceptNet API
: param str word : word
: param str lang : language"""
|
obj = requests . get ( f"http://api.conceptnet.io/c/{lang}/{word}" ) . json ( )
return obj [ "edges" ]
|
def get_by ( self , prop , val , raise_exc = False ) :
'''Retrieve an item from the dictionary with the given metadata
properties . If there is no such item , None will be returned , if there
are multiple such items , the first will be returned .'''
|
try :
val = self . serialize ( val )
return self . _meta [ prop ] [ val ] [ 0 ]
except ( KeyError , IndexError ) :
if raise_exc :
raise
else :
return None
|
def get_document ( self , document_id ) :
""": param document _ id : Document unique identifier .
: returns : a dictionary containing the document content and
any associated metadata ."""
|
key = 'doc.%s.%s' % ( self . name , decode ( document_id ) )
return decode_dict ( self . db . hgetall ( key ) )
|
def load_setuptools_entrypoints ( self , group , name = None ) :
"""Load modules from querying the specified setuptools ` ` group ` ` .
: param str group : entry point group to load plugins
: param str name : if given , loads only plugins with the given ` ` name ` ` .
: rtype : int
: return : return the number of loaded plugins by this call ."""
|
from pkg_resources import ( iter_entry_points , DistributionNotFound , VersionConflict , )
count = 0
for ep in iter_entry_points ( group , name = name ) : # is the plugin registered or blocked ?
if self . get_plugin ( ep . name ) or self . is_blocked ( ep . name ) :
continue
try :
plugin = ep . load ( )
except DistributionNotFound :
continue
except VersionConflict as e :
raise PluginValidationError ( plugin = None , message = "Plugin %r could not be loaded: %s!" % ( ep . name , e ) , )
self . register ( plugin , name = ep . name )
self . _plugin_distinfo . append ( ( plugin , ep . dist ) )
count += 1
return count
|
def subprocess_output ( command , raise_on_empty_output ) :
"""This is a stub to allow a check requiring ` Popen ` to run without an Agent ( e . g . during tests or development ) ,
it ' s not supposed to be used anywhere outside the ` datadog _ checks . utils ` package ."""
|
# Use tempfile , allowing a larger amount of memory . The subprocess . Popen
# docs warn that the data read is buffered in memory . They suggest not to
# use subprocess . PIPE if the data size is large or unlimited .
with tempfile . TemporaryFile ( ) as stdout_f , tempfile . TemporaryFile ( ) as stderr_f :
proc = subprocess . Popen ( command , stdout = stdout_f , stderr = stderr_f )
proc . wait ( )
stderr_f . seek ( 0 )
err = stderr_f . read ( )
stdout_f . seek ( 0 )
output = stdout_f . read ( )
if not output and raise_on_empty_output :
raise SubprocessOutputEmptyError ( "get_subprocess_output expected output but had none." )
return output , err , proc . returncode
|
def main ( argv ) :
"""This function sets up a command - line option parser and then calls fetch _ and _ write _ mrca
to do all of the real work ."""
|
import argparse
description = 'Uses Open Tree of Life web services to the MRCA for a set of OTT IDs.'
parser = argparse . ArgumentParser ( prog = 'ot-tree-of-life-mrca' , description = description )
parser . add_argument ( 'ottid' , nargs = '*' , type = int , help = 'OTT IDs' )
parser . add_argument ( '--subtree' , action = 'store_true' , default = False , required = False , help = 'write a newick representation of the subtree rooted at this mrca' )
parser . add_argument ( '--induced-subtree' , action = 'store_true' , default = False , required = False , help = 'write a newick representation of the topology of the requested taxa in the synthetic tree (the subtree pruned to just the queried taxa)' )
parser . add_argument ( '--details' , action = 'store_true' , default = False , required = False , help = 'report more details about the mrca node' )
args = parser . parse_args ( argv )
id_list = args . ottid
if not id_list :
sys . stderr . write ( 'No OTT IDs provided. Running a dummy query with 770302 770315\n' )
id_list = [ 770302 , 770315 ]
fetch_and_write_mrca ( id_list , args . details , args . subtree , args . induced_subtree , sys . stdout , sys . stderr )
|
def validate ( self , value ) :
"""Accepts : float , int , long , str , unicode
Returns : float"""
|
if isinstance ( value , ( str , unicode , int , long ) ) :
value = float ( value )
value = super ( Float , self ) . validate ( value )
if not isinstance ( value , ( float ) ) :
raise ValueError ( "Not an float: %r" % ( value , ) )
return value
|
def delete ( self , ** options ) :
"""Permanently delete this blob from Blobstore .
Args :
* * options : Options for create _ rpc ( ) ."""
|
fut = delete_async ( self . key ( ) , ** options )
fut . get_result ( )
|
def adjust ( color , attribute , percent ) :
"""Adjust an attribute of color by a percent"""
|
r , g , b , a , type = parse_color ( color )
r , g , b = hsl_to_rgb ( * _adjust ( rgb_to_hsl ( r , g , b ) , attribute , percent ) )
return unparse_color ( r , g , b , a , type )
|
def git_clone ( target_dir , repo_location , branch_or_tag = None , verbose = True ) :
"""Clone repo at repo _ location to target _ dir and checkout branch _ or _ tag .
If branch _ or _ tag is not specified , the HEAD of the primary
branch of the cloned repo is checked out ."""
|
target_dir = pipes . quote ( target_dir )
command = [ 'git' , 'clone' ]
if verbose :
command . append ( '--verbose' )
if os . path . isdir ( repo_location ) :
command . append ( '--no-hardlinks' )
command . extend ( [ pipes . quote ( repo_location ) , target_dir ] )
if branch_or_tag :
command . extend ( [ '--branch' , branch_or_tag ] )
return execute_git_command ( command )
|
def replace_header ( self , header_text ) :
"""Replace pip - compile header with custom text"""
|
with open ( self . outfile , 'rt' ) as fp :
_ , body = self . split_header ( fp )
with open ( self . outfile , 'wt' ) as fp :
fp . write ( header_text )
fp . writelines ( body )
|
def parse_pgurl ( self , url ) :
"""Given a Postgres url , return a dict with keys for user , password ,
host , port , and database ."""
|
parsed = urlsplit ( url )
return { 'user' : parsed . username , 'password' : parsed . password , 'database' : parsed . path . lstrip ( '/' ) , 'host' : parsed . hostname , 'port' : parsed . port or 5432 , }
|
def get ( key , default = - 1 ) :
"""Backport support for original codes ."""
|
if isinstance ( key , int ) :
return TransType ( key )
if key not in TransType . _member_map_ :
extend_enum ( TransType , key , default )
return TransType [ key ]
|
def text_badness ( text ) :
u'''Look for red flags that text is encoded incorrectly :
Obvious problems :
- The replacement character \ufffd , indicating a decoding error
- Unassigned or private - use Unicode characters
Very weird things :
- Adjacent letters from two different scripts
- Letters in scripts that are very rarely used on computers ( and
therefore , someone who is using them will probably get Unicode right )
- Improbable control characters , such as 0x81
Moderately weird things :
- Improbable single - byte characters , such as ƒ or ¬
- Letters in somewhat rare scripts'''
|
assert isinstance ( text , str )
errors = 0
very_weird_things = 0
weird_things = 0
prev_letter_script = None
unicodedata_name = unicodedata . name
unicodedata_category = unicodedata . category
for char in text :
index = ord ( char )
if index < 256 : # Deal quickly with the first 256 characters .
weird_things += SINGLE_BYTE_WEIRDNESS [ index ]
if SINGLE_BYTE_LETTERS [ index ] :
prev_letter_script = 'latin'
else :
prev_letter_script = None
else :
category = unicodedata_category ( char )
if category == 'Co' : # Unassigned or private use
errors += 1
elif index == 0xfffd : # Replacement character
errors += 1
elif index in WINDOWS_1252_GREMLINS :
lowchar = char . encode ( 'WINDOWS_1252' ) . decode ( 'latin-1' )
weird_things += SINGLE_BYTE_WEIRDNESS [ ord ( lowchar ) ] - 0.5
if category [ 0 ] == 'L' : # It ' s a letter . What kind of letter ? This is typically found
# in the first word of the letter ' s Unicode name .
name = unicodedata_name ( char )
scriptname = name . split ( ) [ 0 ]
freq , script = SCRIPT_TABLE . get ( scriptname , ( 0 , 'other' ) )
if prev_letter_script :
if script != prev_letter_script :
very_weird_things += 1
if freq == 1 :
weird_things += 2
elif freq == 0 :
very_weird_things += 1
prev_letter_script = script
else :
prev_letter_script = None
return 100 * errors + 10 * very_weird_things + weird_things
|
def _handle_attribute ( self , start ) :
"""Handle a case where a tag attribute is at the head of the tokens ."""
|
name = quotes = None
self . _push ( )
while self . _tokens :
token = self . _tokens . pop ( )
if isinstance ( token , tokens . TagAttrEquals ) :
name = self . _pop ( )
self . _push ( )
elif isinstance ( token , tokens . TagAttrQuote ) :
quotes = token . char
elif isinstance ( token , ( tokens . TagAttrStart , tokens . TagCloseOpen , tokens . TagCloseSelfclose ) ) :
self . _tokens . append ( token )
if name :
value = self . _pop ( )
else :
name , value = self . _pop ( ) , None
return Attribute ( name , value , quotes , start . pad_first , start . pad_before_eq , start . pad_after_eq )
else :
self . _write ( self . _handle_token ( token ) )
raise ParserError ( "_handle_attribute() missed a close token" )
|
def process_service_check_result ( self , service , return_code , plugin_output ) :
"""Process service check result
Format of the line that triggers function call : :
PROCESS _ SERVICE _ CHECK _ RESULT ; < host _ name > ; < service _ description > ; < return _ code > ; < plugin _ output >
: param service : service to process check to
: type service : alignak . objects . service . Service
: param return _ code : exit code of plugin
: type return _ code : int
: param plugin _ output : plugin output
: type plugin _ output : str
: return : None"""
|
now = time . time ( )
cls = service . __class__
# If globally disabled OR service disabled , do not launch . .
if not cls . accept_passive_checks or not service . passive_checks_enabled :
return
try :
plugin_output = plugin_output . decode ( 'utf8' , 'ignore' )
logger . debug ( '%s > Passive service check plugin output: %s' , service . get_full_name ( ) , plugin_output )
except AttributeError : # Python 3 will raise an exception
pass
except UnicodeError :
pass
# Maybe the check is just too old , if so , bail out !
if self . current_timestamp < service . last_chk :
logger . debug ( '%s > Passive service check is too old (%d seconds). ' 'Ignoring, check output: %s' , service . get_full_name ( ) , self . current_timestamp < service . last_chk , plugin_output )
return
# Create a check object from the external command
chk = service . launch_check ( now , self . hosts , self . services , self . timeperiods , self . daemon . macromodulations , self . daemon . checkmodulations , self . daemon . checks , force = True )
# Should not be possible to not find the check , but if so , don ' t crash
if not chk :
logger . error ( '%s > Passive service check failed. None check launched !?' , service . get_full_name ( ) )
return
# Now we ' transform the check into a result '
# So exit _ status , output and status is eaten by the service
chk . exit_status = return_code
chk . get_outputs ( plugin_output , service . max_plugins_output_length )
logger . debug ( '%s > Passive service check output: %s' , service . get_full_name ( ) , chk . output )
chk . status = ACT_STATUS_WAIT_CONSUME
chk . check_time = self . current_timestamp
# we are using the external command timestamps
# Set the corresponding service ' s check type to passive
chk . set_type_passive ( )
# self . daemon . nb _ check _ received + = 1
self . send_an_element ( chk )
# Ok now this result will be read by the scheduler the next loop
# raise a passive check log only if needed
if self . my_conf . log_passive_checks :
log_level = 'info'
if return_code == 1 : # WARNING
log_level = 'warning'
if return_code == 2 : # CRITICAL
log_level = 'error'
self . send_an_element ( make_monitoring_log ( log_level , 'PASSIVE SERVICE CHECK: %s;%s;%d;%s;%s;%s' % ( self . hosts [ service . host ] . get_name ( ) , service . get_name ( ) , return_code , chk . output , chk . long_output , chk . perf_data ) ) )
|
def slugs_configuration_camera_send ( self , target , idOrder , order , force_mavlink1 = False ) :
'''Control for camara .
target : The system setting the commands ( uint8 _ t )
idOrder : ID 0 : brightness 1 : aperture 2 : iris 3 : ICR 4 : backlight ( uint8 _ t )
order : 1 : up / on 2 : down / off 3 : auto / reset / no action ( uint8 _ t )'''
|
return self . send ( self . slugs_configuration_camera_encode ( target , idOrder , order ) , force_mavlink1 = force_mavlink1 )
|
def _parse_seq_preheader ( line ) :
"""$ 3 = 227(209 ) :"""
|
match = re . match ( r"\$ (\d+) = (\d+) \( (\d+) \):" , line , re . VERBOSE )
if not match :
raise ValueError ( "Unparseable header: " + line )
index , this_len , query_len = match . groups ( )
return map ( int , ( index , this_len , query_len ) )
|
def plot_predict ( self , h = 5 , past_values = 20 , intervals = True , ** kwargs ) :
"""Plots forecast with the estimated model
Parameters
h : int ( default : 5)
How many steps ahead would you like to forecast ?
past _ values : int ( default : 20)
How many past observations to show on the forecast graph ?
intervals : Boolean
Would you like to show 95 % prediction intervals for the forecast ?
Returns
- Plot of the forecast
- Error bars , forecasted _ values , plot _ values , plot _ index"""
|
import matplotlib . pyplot as plt
import seaborn as sns
figsize = kwargs . get ( 'figsize' , ( 10 , 7 ) )
if self . latent_variables . estimated is False :
raise Exception ( "No latent variables estimated!" )
else :
predictions , variance , lower , upper = self . _construct_predict ( self . latent_variables . get_z_values ( ) , h )
full_predictions = np . append ( self . data , predictions )
full_lower = np . append ( self . data , lower )
full_upper = np . append ( self . data , upper )
date_index = self . shift_dates ( h )
# Plot values ( how far to look back )
plot_values = full_predictions [ - h - past_values : ] * self . _norm_std + self . _norm_mean
plot_index = date_index [ - h - past_values : ]
# Lower and upper intervals
lower = np . append ( full_predictions [ - h - 1 ] , lower )
upper = np . append ( full_predictions [ - h - 1 ] , upper )
plt . figure ( figsize = figsize )
if intervals == True :
plt . fill_between ( date_index [ - h - 1 : ] , lower * self . _norm_std + self . _norm_mean , upper * self . _norm_std + self . _norm_mean , alpha = 0.2 )
plt . plot ( plot_index , plot_values )
plt . title ( "Forecast for " + self . data_name )
plt . xlabel ( "Time" )
plt . ylabel ( self . data_name )
plt . show ( )
|
def enable_job ( name , ** kwargs ) :
'''Enable a job in the minion ' s schedule
CLI Example :
. . code - block : : bash
salt ' * ' schedule . enable _ job job1'''
|
ret = { 'comment' : [ ] , 'result' : True }
if not name :
ret [ 'comment' ] = 'Job name is required.'
ret [ 'result' ] = False
if 'test' in __opts__ and __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Job: {0} would be enabled in schedule.' . format ( name )
else :
persist = True
if 'persist' in kwargs :
persist = kwargs [ 'persist' ]
if name in list_ ( show_all = True , where = 'opts' , return_yaml = False ) :
event_data = { 'name' : name , 'func' : 'enable_job' , 'persist' : persist }
elif name in list_ ( show_all = True , where = 'pillar' , return_yaml = False ) :
event_data = { 'name' : name , 'where' : 'pillar' , 'func' : 'enable_job' , 'persist' : False }
else :
ret [ 'comment' ] = 'Job {0} does not exist.' . format ( name )
ret [ 'result' ] = False
return ret
try :
eventer = salt . utils . event . get_event ( 'minion' , opts = __opts__ )
res = __salt__ [ 'event.fire' ] ( event_data , 'manage_schedule' )
if res :
event_ret = eventer . get_event ( tag = '/salt/minion/minion_schedule_enabled_job_complete' , wait = 30 )
if event_ret and event_ret [ 'complete' ] :
schedule = event_ret [ 'schedule' ]
# check item exists in schedule and is enabled
if name in schedule and schedule [ name ] [ 'enabled' ] :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Enabled Job {0} in schedule.' . format ( name )
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to enable job {0} in schedule.' . format ( name )
return ret
except KeyError : # Effectively a no - op , since we can ' t really return without an event system
ret [ 'comment' ] = 'Event module not available. Schedule enable job failed.'
return ret
|
def get_context_from_gdoc ( self ) :
"""Wrap getting context from Google sheets in a simple caching mechanism ."""
|
try :
start = int ( time . time ( ) )
if not self . data or start > self . expires :
self . data = self . _get_context_from_gdoc ( self . project . SPREADSHEET_KEY )
end = int ( time . time ( ) )
ttl = getattr ( self . project , 'SPREADSHEET_CACHE_TTL' , SPREADSHEET_CACHE_TTL )
self . expires = end + ttl
return self . data
except AttributeError :
return { }
|
def delete ( self , using = None ) :
"""Deletes the post instance ."""
|
if self . is_alone : # The default way of operating is to trigger the deletion of the associated topic
# only if the considered post is the only post embedded in the topic
self . topic . delete ( )
else :
super ( AbstractPost , self ) . delete ( using )
self . topic . update_trackers ( )
|
def map_weights ( self ) :
"""Reshaped weights for visualization .
The weights are reshaped as
( W . shape [ 0 ] , prod ( W . shape [ 1 : - 1 ] ) , W . shape [ 2 ] ) .
This allows one to easily see patterns , even for hyper - dimensional
soms .
For one - dimensional SOMs , the returned array is of shape
( W . shape [ 0 ] , 1 , W . shape [ 2 ] )
Returns
w : numpy array
A three - dimensional array containing the weights in a
2D array for easy visualization ."""
|
first_dim = self . map_dimensions [ 0 ]
if len ( self . map_dimensions ) != 1 :
second_dim = np . prod ( self . map_dimensions [ 1 : ] )
else :
second_dim = 1
# Reshape to appropriate dimensions
return self . weights . reshape ( ( first_dim , second_dim , self . data_dimensionality ) )
|
def add_final_live ( self , print_progress = True , print_func = None ) :
"""* * A wrapper that executes the loop adding the final live points . * *
Adds the final set of live points to the pre - existing sequence of
dead points from the current nested sampling run .
Parameters
print _ progress : bool , optional
Whether or not to output a simple summary of the current run that
updates with each iteration . Default is ` True ` .
print _ func : function , optional
A function that prints out the current state of the sampler .
If not provided , the default : meth : ` results . print _ fn ` is used ."""
|
# Initialize quantities /
if print_func is None :
print_func = print_fn
# Add remaining live points to samples .
ncall = self . ncall
it = self . it - 1
for i , results in enumerate ( self . add_live_points ( ) ) :
( worst , ustar , vstar , loglstar , logvol , logwt , logz , logzvar , h , nc , worst_it , boundidx , bounditer , eff , delta_logz ) = results
if delta_logz > 1e6 :
delta_logz = np . inf
if logz <= - 1e6 :
logz = - np . inf
# Print progress .
if print_progress :
print_func ( results , it , ncall , add_live_it = i + 1 , dlogz = 0.01 )
|
def marksheet ( self ) :
"""Returns an pandas empty dataframe object containing rows and columns for marking . This can then be passed to a google doc that is distributed to markers for editing with the mark for each section ."""
|
columns = [ 'Number' , 'Question' , 'Correct (a fraction)' , 'Max Mark' , 'Comments' ]
mark_sheet = pd . DataFrame ( )
for qu_number , question in enumerate ( self . answers ) :
part_no = 0
for number , part in enumerate ( question ) :
if number > 0 :
if part [ 2 ] > 0 :
part_no += 1
index = str ( qu_number + 1 ) + '_' + str ( part_no )
frame = pd . DataFrame ( columns = columns , index = [ index ] )
frame . loc [ index ] [ 'Number' ] = index
frame . loc [ index ] [ 'Question' ] = part [ 0 ]
frame . loc [ index ] [ 'Max Mark' ] = part [ 2 ]
mark_sheet = mark_sheet . append ( frame )
return mark_sheet . sort ( columns = 'Number' )
|
def remotes ( cwd , user = None , password = None , redact_auth = True , ignore_retcode = False , output_encoding = None ) :
'''Get fetch and push URLs for each remote in a git checkout
cwd
The path to the git checkout
user
User under which to run the git command . By default , the command is run
by the user under which the minion is running .
password
Windows only . Required when specifying ` ` user ` ` . This parameter will be
ignored on non - Windows platforms .
. . versionadded : : 2016.3.4
redact _ auth : True
Set to ` ` False ` ` to include the username / password for authenticated
remotes in the return data . Otherwise , this information will be
redacted .
. . warning : :
Setting this to ` ` False ` ` will not only reveal any HTTPS Basic Auth
that is configured , but the return data will also be written to the
job cache . When possible , it is recommended to use SSH for
authentication .
. . versionadded : : 2015.5.6
ignore _ retcode : False
If ` ` True ` ` , do not log an error to the minion log if the git command
returns a nonzero exit status .
. . versionadded : : 2015.8.0
output _ encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run . This should not be needed in most
cases .
. . note : :
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF - 8 to handle
Unicode characters .
. . versionadded : : 2018.3.1
CLI Example :
. . code - block : : bash
salt myminion git . remotes / path / to / repo'''
|
cwd = _expand_path ( cwd , user )
command = [ 'git' , 'remote' , '--verbose' ]
ret = { }
output = _git_run ( command , cwd = cwd , user = user , password = password , ignore_retcode = ignore_retcode , output_encoding = output_encoding ) [ 'stdout' ]
for remote_line in salt . utils . itertools . split ( output , '\n' ) :
try :
remote , remote_info = remote_line . split ( None , 1 )
except ValueError :
continue
try :
remote_url , action = remote_info . rsplit ( None , 1 )
except ValueError :
continue
# Remove parenthesis
action = action . lstrip ( '(' ) . rstrip ( ')' ) . lower ( )
if action not in ( 'fetch' , 'push' ) :
log . warning ( 'Unknown action \'%s\' for remote \'%s\' in git checkout ' 'located in %s' , action , remote , cwd )
continue
if redact_auth :
remote_url = salt . utils . url . redact_http_basic_auth ( remote_url )
ret . setdefault ( remote , { } ) [ action ] = remote_url
return ret
|
def train ( cls , data , lambda_ = 1.0 ) :
"""Train a Naive Bayes model given an RDD of ( label , features )
vectors .
This is the Multinomial NB ( U { http : / / tinyurl . com / lsdw6p } ) which
can handle all kinds of discrete data . For example , by
converting documents into TF - IDF vectors , it can be used for
document classification . By making every vector a 0-1 vector ,
it can also be used as Bernoulli NB ( U { http : / / tinyurl . com / p7c96j6 } ) .
The input feature values must be nonnegative .
: param data :
RDD of LabeledPoint .
: param lambda _ :
The smoothing parameter .
( default : 1.0)"""
|
first = data . first ( )
if not isinstance ( first , LabeledPoint ) :
raise ValueError ( "`data` should be an RDD of LabeledPoint" )
labels , pi , theta = callMLlibFunc ( "trainNaiveBayesModel" , data , lambda_ )
return NaiveBayesModel ( labels . toArray ( ) , pi . toArray ( ) , numpy . array ( theta ) )
|
def main ( ) :
"""Run all relevant aspects of ok . py ."""
|
args = parse_input ( )
log . setLevel ( logging . DEBUG if args . debug else logging . ERROR )
log . debug ( args )
# Checking user ' s Python bit version
bit_v = ( 8 * struct . calcsize ( "P" ) )
log . debug ( "Python {} ({}bit)" . format ( sys . version , bit_v ) )
if args . version :
print ( "okpy=={}" . format ( client . __version__ ) )
exit ( 0 )
elif args . update :
print ( "Current version: {}" . format ( client . __version__ ) )
did_update = software_update . check_version ( args . server , client . __version__ , client . FILE_NAME , timeout = 10 )
exit ( not did_update )
# exit with error if ok failed to update
assign = None
try :
if args . get_token :
access_token = auth . authenticate ( args , force = True )
print ( "Token: {}" . format ( access_token ) )
exit ( not access_token )
# exit with error if no access _ token
# Instantiating assignment
assign = assignment . load_assignment ( args . config , args )
if args . tests :
print ( 'Available tests:' )
for name in assign . test_map :
print ( ' ' + name )
exit ( 0 )
force_authenticate = args . authenticate
retry = True
while retry :
retry = False
if force_authenticate : # Authenticate and check for success
if not assign . authenticate ( force = True ) :
exit ( 1 )
try :
msgs = messages . Messages ( )
for name , proto in assign . protocol_map . items ( ) :
log . info ( 'Execute {}.run()' . format ( name ) )
proto . run ( msgs )
msgs [ 'timestamp' ] = str ( datetime . now ( ) )
except ex . AuthenticationException as e :
if not force_authenticate :
force_authenticate = True
retry = True
elif not args . no_browser :
args . no_browser = True
retry = True
if retry :
msg = "without a browser" if args . no_browser else "with a browser"
log . warning ( 'Authentication exception occurred; will retry {0}' . format ( msg ) , exc_info = True )
print ( 'Authentication error; will try to re-authenticate {0}...' . format ( msg ) )
else :
raise
# outer handler will be called
except ex . LoadingException as e :
log . warning ( 'Assignment could not load' , exc_info = True )
print ( 'Error loading assignment: ' + str ( e ) )
except ex . AuthenticationException as e :
log . warning ( 'Authentication exception occurred' , exc_info = True )
print ( 'Authentication error: {0}' . format ( e ) )
except ex . EarlyExit as e :
log . warning ( 'OK exited early (non-error)' )
print ( str ( e ) )
except ex . OkException as e :
log . warning ( 'General OK exception occurred' , exc_info = True )
print ( 'Error: ' + str ( e ) )
except KeyboardInterrupt :
log . info ( 'KeyboardInterrupt received.' )
finally :
if not args . no_update and not args . local :
try :
software_update . check_version ( args . server , client . __version__ , client . FILE_NAME )
except KeyboardInterrupt :
pass
if assign :
assign . dump_tests ( )
|
def count_above ( errors , epsilon ) :
"""Count number of errors and continuous sequences above epsilon .
Continuous sequences are counted by shifting and counting the number
of positions where there was a change and the original value was true ,
which means that a sequence started at that position ."""
|
above = errors > epsilon
total_above = len ( errors [ above ] )
above = pd . Series ( above )
shift = above . shift ( 1 )
change = above != shift
total_consecutive = sum ( above & change )
return total_above , total_consecutive
|
def add_ppas_from_file ( file_name , update = True ) :
"""Add personal package archive from a file list ."""
|
for ppa in _read_lines_from_file ( file_name ) :
add_ppa ( ppa , update = False )
if update :
update_apt_sources ( )
|
def request ( self , command_string ) :
"""Request"""
|
self . send ( command_string )
if self . debug :
print ( "Telnet Request: %s" % ( command_string ) )
while True :
response = urllib . parse . unquote ( self . tn . read_until ( b"\n" ) . decode ( ) )
if "success" in response : # Normal successful reply
break
if "huh" in response : # Something went wrong
break
if "connect" in response : # Special reply to " hello "
break
# TODO Keep track of which screen is displayed
# Try again if response was key , menu or visibility notification .
if "huh" in response or self . debug :
print ( "Telnet Response: %s" % ( response [ : - 1 ] ) )
return response
|
def check_and_update_resources ( num_cpus , num_gpus , resources ) :
"""Sanity check a resource dictionary and add sensible defaults .
Args :
num _ cpus : The number of CPUs .
num _ gpus : The number of GPUs .
resources : A dictionary mapping resource names to resource quantities .
Returns :
A new resource dictionary ."""
|
if resources is None :
resources = { }
resources = resources . copy ( )
assert "CPU" not in resources
assert "GPU" not in resources
if num_cpus is not None :
resources [ "CPU" ] = num_cpus
if num_gpus is not None :
resources [ "GPU" ] = num_gpus
if "CPU" not in resources : # By default , use the number of hardware execution threads for the
# number of cores .
resources [ "CPU" ] = multiprocessing . cpu_count ( )
# See if CUDA _ VISIBLE _ DEVICES has already been set .
gpu_ids = ray . utils . get_cuda_visible_devices ( )
# Check that the number of GPUs that the raylet wants doesn ' t
# excede the amount allowed by CUDA _ VISIBLE _ DEVICES .
if ( "GPU" in resources and gpu_ids is not None and resources [ "GPU" ] > len ( gpu_ids ) ) :
raise Exception ( "Attempting to start raylet with {} GPUs, " "but CUDA_VISIBLE_DEVICES contains {}." . format ( resources [ "GPU" ] , gpu_ids ) )
if "GPU" not in resources : # Try to automatically detect the number of GPUs .
resources [ "GPU" ] = _autodetect_num_gpus ( )
# Don ' t use more GPUs than allowed by CUDA _ VISIBLE _ DEVICES .
if gpu_ids is not None :
resources [ "GPU" ] = min ( resources [ "GPU" ] , len ( gpu_ids ) )
resources = { resource_label : resource_quantity for resource_label , resource_quantity in resources . items ( ) if resource_quantity != 0 }
# Check types .
for _ , resource_quantity in resources . items ( ) :
assert ( isinstance ( resource_quantity , int ) or isinstance ( resource_quantity , float ) )
if ( isinstance ( resource_quantity , float ) and not resource_quantity . is_integer ( ) ) :
raise ValueError ( "Resource quantities must all be whole numbers. Received {}." . format ( resources ) )
if resource_quantity < 0 :
raise ValueError ( "Resource quantities must be nonnegative. Received {}." . format ( resources ) )
if resource_quantity > ray_constants . MAX_RESOURCE_QUANTITY :
raise ValueError ( "Resource quantities must be at most {}." . format ( ray_constants . MAX_RESOURCE_QUANTITY ) )
return resources
|
def validate_layout_display ( self , table , display_condition ) :
"""Check to see if the display condition passes .
Args :
table ( str ) : The name of the DB table which hold the App data .
display _ condition ( str ) : The " where " clause of the DB SQL statement .
Returns :
bool : True if the row count is greater than 0."""
|
display = False
if display_condition is None :
display = True
else :
display_query = 'select count(*) from {} where {}' . format ( table , display_condition )
try :
cur = self . db_conn . cursor ( )
cur . execute ( display_query . replace ( '"' , '' ) )
rows = cur . fetchall ( )
if rows [ 0 ] [ 0 ] > 0 :
display = True
except sqlite3 . Error as e :
print ( '"{}" query returned an error: ({}).' . format ( display_query , e ) )
sys . exit ( 1 )
return display
|
def releaseInterface ( self ) :
r"""Release an interface previously claimed with claimInterface ."""
|
util . release_interface ( self . dev , self . __claimed_interface )
self . __claimed_interface = - 1
|
def _listen ( self , uuid = None , session = None ) :
"""Listen a connection uuid"""
|
if self . url is None :
raise Exception ( "NURESTPushCenter needs to have a valid URL. please use setURL: before starting it." )
events_url = "%s/events" % self . url
if uuid :
events_url = "%s?uuid=%s" % ( events_url , uuid )
request = NURESTRequest ( method = 'GET' , url = events_url )
# Force async to False so the push center will have only 1 thread running
connection = NURESTConnection ( request = request , async = True , callback = self . _did_receive_event , root_object = self . _root_object )
if self . _timeout :
if int ( time ( ) ) - self . _start_time >= self . _timeout :
pushcenter_logger . debug ( "[NURESTPushCenter] Timeout (timeout=%ss)." % self . _timeout )
return
else :
connection . timeout = self . _timeout
pushcenter_logger . info ( 'Bambou Sending >>>>>>\n%s %s' % ( request . method , request . url ) )
# connection . ignore _ request _ idle = True
connection . start ( )
|
def count_tokens ( tokens , to_lower = False , counter = None ) :
r"""Counts tokens in the specified string .
For token _ delim = ' ( td ) ' and seq _ delim = ' ( sd ) ' , a specified string of two sequences of tokens may
look like : :
( td ) token1 ( td ) token2 ( td ) token3 ( td ) ( sd ) ( td ) token4 ( td ) token5 ( td ) ( sd )
Parameters
tokens : list of str
A source list of tokens .
to _ lower : bool , default False
Whether to convert the source source _ str to the lower case .
counter : Counter or None , default None
The Counter instance to be updated with the counts of ` tokens ` . If
None , return a new Counter instance counting tokens from ` tokens ` .
Returns
The ` counter ` Counter instance after being updated with the token
counts of ` source _ str ` . If ` counter ` is None , return a new Counter
instance counting tokens from ` source _ str ` .
Examples
> > > import re
> > > source _ str = ' Life is great ! \ n life is good . \ n '
> > > source _ str _ tokens = filter ( None , re . split ( ' | \ n ' , source _ str ) )
> > > gluonnlp . data . count _ tokens ( source _ str _ tokens )
Counter ( { ' is ' : 2 , ' Life ' : 1 , ' great ' : 1 , ' ! ' : 1 , ' life ' : 1 , ' good ' : 1 , ' . ' : 1 } )"""
|
if to_lower :
tokens = [ t . lower ( ) for t in tokens ]
if counter is None :
return Counter ( tokens )
else :
counter . update ( tokens )
return counter
|
def get_rendition_url ( self , width = 0 , height = 0 ) :
'''get the rendition URL for a specified size
if the renditions does not exists it will be created'''
|
if width == 0 and height == 0 :
return self . get_master_url ( )
target_width , target_height = self . get_rendition_size ( width , height )
key = '%sx%s' % ( target_width , target_height )
if not self . renditions :
self . renditions = { }
rendition_name = self . renditions . get ( key , False )
if not rendition_name :
rendition_name = self . make_rendition ( target_width , target_height )
return default_storage . url ( rendition_name )
|
def sharded_cluster_link ( rel , cluster_id = None , shard_id = None , router_id = None , self_rel = False ) :
"""Helper for getting a ShardedCluster link document , given a rel ."""
|
clusters_href = '/v1/sharded_clusters'
link = _SHARDED_CLUSTER_LINKS [ rel ] . copy ( )
link [ 'href' ] = link [ 'href' ] . format ( ** locals ( ) )
link [ 'rel' ] = 'self' if self_rel else rel
return link
|
def toggle_quick_open_command_line_sensitivity ( self , chk ) :
"""When the user unchecks ' enable quick open ' , the command line should be disabled"""
|
self . get_widget ( 'quick_open_command_line' ) . set_sensitive ( chk . get_active ( ) )
self . get_widget ( 'quick_open_in_current_terminal' ) . set_sensitive ( chk . get_active ( ) )
|
def get_user_policy ( self , user_name , policy_name ) :
"""Retrieves the specified policy document for the specified user .
: type user _ name : string
: param user _ name : The name of the user the policy is associated with .
: type policy _ name : string
: param policy _ name : The policy document to get ."""
|
params = { 'UserName' : user_name , 'PolicyName' : policy_name }
return self . get_response ( 'GetUserPolicy' , params , verb = 'POST' )
|
def get_neighbourhood ( self , force , id , ** attrs ) :
"""Get a specific neighbourhood . Uses the neighbourhood _ API call .
. . _ neighbourhood : https : / / data . police . uk / docs / method / neighbourhood /
: param force : The force within which the neighbourhood resides ( either
by ID or : class : ` forces . Force ` object )
: type force : str or Force
: param str neighbourhood : The ID of the neighbourhood to fetch .
: rtype : Neighbourhood
: return : The Neighbourhood object for the given force / ID ."""
|
if not isinstance ( force , Force ) :
force = Force ( self , id = force , ** attrs )
return Neighbourhood ( self , force = force , id = id , ** attrs )
|
def run_server ( cls , args = None , ** kwargs ) :
"""Run the class as a device server .
It is based on the tango . server . run method .
The difference is that the device class
and server name are automatically given .
Args :
args ( iterable ) : args as given in the tango . server . run method
without the server name . If None , the sys . argv
list is used
kwargs : the other keywords argument are as given
in the tango . server . run method ."""
|
if args is None :
args = sys . argv [ 1 : ]
args = [ cls . __name__ ] + list ( args )
green_mode = getattr ( cls , 'green_mode' , None )
kwargs . setdefault ( "green_mode" , green_mode )
return run ( ( cls , ) , args , ** kwargs )
|
def get_local_config_file ( cls , filename ) :
"""Find local file to setup default values .
There is a pre - fixed logic on how the search of the configuration
file is performed . If the highes priority configuration file is found ,
there is no need to search for the next . From highest to lowest
priority :
1 . * * Local : * * Configuration file found in the current working
directory .
2 . * * Project : * * Configuration file found in the root of the current
working ` ` git ` ` repository .
3 . * * User : * * Configuration file found in the user ' s ` ` $ HOME ` ` .
: param str filename : Raw name of the configuration file .
: return : Union [ : class : ` . str ` , : data : ` None ` ] - Configuration file with
the highest priority , : data : ` None ` if no config file is found ."""
|
if os . path . isfile ( filename ) : # Local has priority
return filename
else :
try : # Project . If not in a git repo , this will not exist .
config_repo = _get_repo ( )
if len ( config_repo ) == 0 :
raise Exception ( )
config_repo = os . path . join ( config_repo , filename )
if os . path . isfile ( config_repo ) :
return config_repo
else :
raise Exception ( )
except Exception :
home = os . getenv ( "HOME" , os . path . expanduser ( "~" ) )
config_home = os . path . join ( home , filename )
if os . path . isfile ( config_home ) :
return config_home
return None
|
def encode_to_py3bytes_or_py2str ( s ) :
"""takes anything and attempts to return a py2 string or py3 bytes . this
is typically used when creating command + arguments to be executed via
os . exec *"""
|
fallback_encoding = "utf8"
if IS_PY3 : # if we ' re already bytes , do nothing
if isinstance ( s , bytes ) :
pass
else :
s = str ( s )
try :
s = bytes ( s , DEFAULT_ENCODING )
except UnicodeEncodeError :
s = bytes ( s , fallback_encoding )
else : # attempt to convert the thing to unicode from the system ' s encoding
try :
s = unicode ( s , DEFAULT_ENCODING )
# if the thing is already unicode , or it ' s a number , it can ' t be
# coerced to unicode with an encoding argument , but if we leave out
# the encoding argument , it will convert it to a string , then to unicode
except TypeError :
s = unicode ( s )
# now that we have guaranteed unicode , encode to our system encoding ,
# but attempt to fall back to something
try :
s = s . encode ( DEFAULT_ENCODING )
except :
s = s . encode ( fallback_encoding , "replace" )
return s
|
def Page_searchInResource ( self , frameId , url , query , ** kwargs ) :
"""Function path : Page . searchInResource
Domain : Page
Method name : searchInResource
WARNING : This function is marked ' Experimental ' !
Parameters :
Required arguments :
' frameId ' ( type : FrameId ) - > Frame id for resource to search in .
' url ' ( type : string ) - > URL of the resource to search in .
' query ' ( type : string ) - > String to search for .
Optional arguments :
' caseSensitive ' ( type : boolean ) - > If true , search is case sensitive .
' isRegex ' ( type : boolean ) - > If true , treats string parameter as regex .
Returns :
' result ' ( type : array ) - > List of search matches .
Description : Searches for given string in resource content ."""
|
assert isinstance ( url , ( str , ) ) , "Argument 'url' must be of type '['str']'. Received type: '%s'" % type ( url )
assert isinstance ( query , ( str , ) ) , "Argument 'query' must be of type '['str']'. Received type: '%s'" % type ( query )
if 'caseSensitive' in kwargs :
assert isinstance ( kwargs [ 'caseSensitive' ] , ( bool , ) ) , "Optional argument 'caseSensitive' must be of type '['bool']'. Received type: '%s'" % type ( kwargs [ 'caseSensitive' ] )
if 'isRegex' in kwargs :
assert isinstance ( kwargs [ 'isRegex' ] , ( bool , ) ) , "Optional argument 'isRegex' must be of type '['bool']'. Received type: '%s'" % type ( kwargs [ 'isRegex' ] )
expected = [ 'caseSensitive' , 'isRegex' ]
passed_keys = list ( kwargs . keys ( ) )
assert all ( [ ( key in expected ) for key in passed_keys ] ) , "Allowed kwargs are ['caseSensitive', 'isRegex']. Passed kwargs: %s" % passed_keys
subdom_funcs = self . synchronous_command ( 'Page.searchInResource' , frameId = frameId , url = url , query = query , ** kwargs )
return subdom_funcs
|
def refresh ( self ) :
"""Refreshes the status information ."""
|
if self . status == 'executing' and self . array :
new_result = 0
for array_job in self . array :
if array_job . status == 'failure' and new_result is not None :
new_result = array_job . result
elif array_job . status not in ( 'success' , 'failure' ) :
new_result = None
if new_result is not None :
self . status = 'success' if new_result == 0 else 'failure'
self . result = new_result
|
def angle2d ( self ) :
"""determine the angle of this point on a circle , measured in radians ( presume values represent a Vector )"""
|
if self . x == 0 :
if self . y < 0 :
return math . pi / 2.0 * 3
elif self . y > 0 :
return math . pi / 2.0
else :
return 0
elif self . y == 0 :
if self . x < 0 :
return math . pi
# elif self . x > 0 : return 0
else :
return 0
ans = math . atan ( self . y / self . x )
if self . x > 0 :
if self . y > 0 :
return ans
else :
return ans + math . pi * 2.0
else :
return ans + math . pi
|
def fieldAlphaHistogram ( self , name , q = '*:*' , fq = None , nbins = 10 , includequeries = True ) :
"""Generates a histogram of values from a string field . Output is :
[ [ low , high , count , query ] , . . . ] Bin edges is determined by equal division
of the fields"""
|
oldpersist = self . persistent
self . persistent = True
bins = [ ]
qbin = [ ]
fvals = [ ]
try : # get total number of values for the field
# TODO : this is a slow mechanism to retrieve the number of distinct values
# Need to replace this with something more efficient .
# # Can probably replace with a range of alpha chars - need to check on
# # case sensitivity
fvals = self . fieldValues ( name , q , fq , maxvalues = - 1 )
nvalues = len ( fvals [ name ] ) / 2
if nvalues < nbins :
nbins = nvalues
if nvalues == nbins : # Use equivalence instead of range queries to retrieve the values
for i in range ( 0 , nbins ) :
bin = [ fvals [ name ] [ i * 2 ] , fvals [ name ] [ i * 2 ] , 0 ]
binq = '%s:%s' % ( name , self . prepareQueryTerm ( name , bin [ 0 ] ) )
qbin . append ( binq )
bins . append ( bin )
else :
delta = nvalues / nbins
if delta == 1 : # Use equivalence queries , except the last one which includes the
# remainder of terms
for i in range ( 0 , nbins - 2 ) :
bin = [ fvals [ name ] [ i * 2 ] , fvals [ name ] [ i * 2 ] , 0 ]
binq = '%s:%s' % ( name , self . prepareQueryTerm ( name , bin [ 0 ] ) )
qbin . append ( binq )
bins . append ( bin )
term = fvals [ name ] [ ( nbins - 1 ) * 2 ]
bin = [ term , fvals [ name ] [ ( ( nvalues - 1 ) * 2 ) ] , 0 ]
binq = '%s:[%s TO *]' % ( name , self . prepareQueryTerm ( name , term ) )
qbin . append ( binq )
bins . append ( bin )
else : # Use range for all terms
# now need to page through all the values and get those at the edges
coffset = 0.0
delta = float ( nvalues ) / float ( nbins )
for i in range ( 0 , nbins ) :
idxl = int ( coffset ) * 2
idxu = ( int ( coffset + delta ) * 2 ) - 2
bin = [ fvals [ name ] [ idxl ] , fvals [ name ] [ idxu ] , 0 ]
# logging . info ( str ( bin ) )
binq = ''
try :
if i == 0 :
binq = '%s:[* TO %s]' % ( name , self . prepareQueryTerm ( name , bin [ 1 ] ) , )
elif i == nbins - 1 :
binq = '%s:[%s TO *]' % ( name , self . prepareQueryTerm ( name , bin [ 0 ] ) , )
else :
binq = '%s:[%s TO %s]' % ( name , self . prepareQueryTerm ( name , bin [ 0 ] ) , self . prepareQueryTerm ( name , bin [ 1 ] ) , )
except Exception :
self . logger . exception ( 'Exception 1 in fieldAlphaHistogram:' )
qbin . append ( binq )
bins . append ( bin )
coffset = coffset + delta
# now execute the facet query request
params = { 'q' : q , 'rows' : '0' , 'facet' : 'true' , 'facet.field' : name , 'facet.limit' : '1' , 'facet.mincount' : 1 , 'wt' : 'python' , }
request = urllib . parse . urlencode ( params , doseq = True )
for sq in qbin :
try :
request = request + '&%s' % urllib . parse . urlencode ( { 'facet.query' : self . encoder ( sq ) [ 0 ] } )
except Exception :
self . logger . exception ( 'Exception 2 in fieldAlphaHistogram' )
rsp = self . doPost ( self . solrBase + '' , request , self . formheaders )
data = eval ( rsp . read ( ) )
for i in range ( 0 , len ( bins ) ) :
v = data [ 'facet_counts' ] [ 'facet_queries' ] [ qbin [ i ] ]
bins [ i ] [ 2 ] = v
if includequeries :
bins [ i ] . append ( qbin [ i ] )
finally :
self . persistent = oldpersist
if not self . persistent :
self . conn . close ( )
return bins
|
def rollback ( self ) :
"""Ignore all changes made in the latest session ( terminate the session ) ."""
|
if self . session is not None :
logger . info ( "rolling back transaction in %s" % self )
self . session . close ( )
self . session = None
self . lock_update . release ( )
else :
logger . warning ( "rollback called but there's no open session in %s" % self )
|
def transform_locus ( region , window_center , window_size ) :
"""transform an input genomic region into one suitable for the profile .
: param region : input region to transform .
: param window _ center : which part of the input region to center on .
: param window _ size : how large the resultant region should be .
: return : a new genomic interval on the same chromosome , centered on the
< window _ center > ( e . g . 3 ' end ) of the input region and resized to
be window _ size long ."""
|
if window_center == CENTRE :
region . transform_center ( window_size )
else :
raise ValueError ( "Don't know how to do this transformation: " + window_center )
|
def _write_pickle ( filepath , data , kwargs ) :
"""See documentation of mpu . io . write ."""
|
if 'protocol' not in kwargs :
kwargs [ 'protocol' ] = pickle . HIGHEST_PROTOCOL
with open ( filepath , 'wb' ) as handle :
pickle . dump ( data , handle , ** kwargs )
return data
|
def make_serviceitem_description ( description , condition = 'contains' , negate = False , preserve_case = False ) :
"""Create a node for ServiceItem / description
: return : A IndicatorItem represented as an Element node"""
|
document = 'ServiceItem'
search = 'ServiceItem/description'
content_type = 'string'
content = description
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node
|
def pretty_print_model ( devicemodel ) :
"""Prints out a device model in the terminal by parsing dict ."""
|
PRETTY_PRINT_MODEL = """Device Model ID: %(deviceModelId)s
Project ID: %(projectId)s
Device Type: %(deviceType)s"""
logging . info ( PRETTY_PRINT_MODEL % devicemodel )
if 'traits' in devicemodel :
for trait in devicemodel [ 'traits' ] :
logging . info ( ' Trait %s' % trait )
else :
logging . info ( 'No traits' )
logging . info ( '' )
|
def add_comment ( self , text ) :
"""Comment on the submission using the specified text .
: returns : A Comment object for the newly created comment ."""
|
# pylint : disable = W0212
response = self . reddit_session . _add_comment ( self . fullname , text )
# pylint : enable = W0212
self . reddit_session . evict ( self . _api_link )
# pylint : disable = W0212
return response
|
def update_agent_pool ( self , pool , pool_id ) :
"""UpdateAgentPool .
[ Preview API ] Update properties on an agent pool
: param : class : ` < TaskAgentPool > < azure . devops . v5_1 . task _ agent . models . TaskAgentPool > ` pool : Updated agent pool details
: param int pool _ id : The agent pool to update
: rtype : : class : ` < TaskAgentPool > < azure . devops . v5_1 . task - agent . models . TaskAgentPool > `"""
|
route_values = { }
if pool_id is not None :
route_values [ 'poolId' ] = self . _serialize . url ( 'pool_id' , pool_id , 'int' )
content = self . _serialize . body ( pool , 'TaskAgentPool' )
response = self . _send ( http_method = 'PATCH' , location_id = 'a8c47e17-4d56-4a56-92bb-de7ea7dc65be' , version = '5.1-preview.1' , route_values = route_values , content = content )
return self . _deserialize ( 'TaskAgentPool' , response )
|
def get_reviews_summary ( self , pub_name , ext_name , before_date = None , after_date = None ) :
"""GetReviewsSummary .
[ Preview API ] Returns a summary of the reviews
: param str pub _ name : Name of the publisher who published the extension
: param str ext _ name : Name of the extension
: param datetime before _ date : Use if you want to fetch summary of reviews older than the specified date , defaults to null
: param datetime after _ date : Use if you want to fetch summary of reviews newer than the specified date , defaults to null
: rtype : : class : ` < ReviewSummary > < azure . devops . v5_0 . gallery . models . ReviewSummary > `"""
|
route_values = { }
if pub_name is not None :
route_values [ 'pubName' ] = self . _serialize . url ( 'pub_name' , pub_name , 'str' )
if ext_name is not None :
route_values [ 'extName' ] = self . _serialize . url ( 'ext_name' , ext_name , 'str' )
query_parameters = { }
if before_date is not None :
query_parameters [ 'beforeDate' ] = self . _serialize . query ( 'before_date' , before_date , 'iso-8601' )
if after_date is not None :
query_parameters [ 'afterDate' ] = self . _serialize . query ( 'after_date' , after_date , 'iso-8601' )
response = self . _send ( http_method = 'GET' , location_id = 'b7b44e21-209e-48f0-ae78-04727fc37d77' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'ReviewSummary' , response )
|
def RemoveMethod ( self , function ) :
"""Removes the specified function ' s MethodWrapper from the
added _ methods list , so we don ' t re - bind it when making a clone ."""
|
self . added_methods = [ dm for dm in self . added_methods if not dm . method is function ]
|
def setup_suspend ( self ) :
"""Setup debugger to " suspend " execution"""
|
self . frame_calling = None
self . frame_stop = None
self . frame_return = None
self . frame_suspend = True
self . pending_stop = True
self . enable_tracing ( )
return
|
def transform_sequence ( f ) :
"""A decorator to take a function operating on a point and
turn it into a function returning a callable operating on a sequence .
The functions passed to this decorator must define a kwarg called " point " ,
or have point be the last positional argument"""
|
@ wraps ( f )
def wrapper ( * args , ** kwargs ) : # The arguments here are the arguments passed to the transform ,
# ie , there will be no " point " argument
# Send a function to seq . map _ points with all of its arguments applied except
# point
return lambda seq : seq . map_points ( partial ( f , * args , ** kwargs ) )
return wrapper
|
def calculate_acl ( data , m = 5 , dtype = int ) :
r"""Calculates the autocorrelation length ( ACL ) .
Given a normalized autocorrelation function : math : ` \ rho [ i ] ` ( by normalized ,
we mean that : math : ` \ rho [ 0 ] = 1 ` ) , the ACL : math : ` \ tau ` is :
. . math : :
\ tau = 1 + 2 \ sum _ { i = 1 } ^ { K } \ rho [ i ] .
The number of samples used : math : ` K ` is found by using the first point
such that :
. . math : :
m \ tau [ K ] \ leq K ,
where : math : ` m ` is a tuneable parameter ( default = 5 ) . If no such point
exists , then the given data set it too short to estimate the ACL ; in this
case ` ` inf ` ` is returned .
This algorithm for computing the ACL is taken from :
N . Madras and A . D . Sokal , J . Stat . Phys . 50 , 109 ( 1988 ) .
Parameters
data : TimeSeries or array
A TimeSeries of data .
m : int
The number of autocorrelation lengths to use for determining the window
size : math : ` K ` ( see above ) .
dtype : int or float
The datatype of the output . If the dtype was set to int , then the
ceiling is returned .
Returns
acl : int or float
The autocorrelation length . If the ACL cannot be estimated , returns
` ` numpy . inf ` ` ."""
|
# sanity check output data type
if dtype not in [ int , float ] :
raise ValueError ( "The dtype must be either int or float." )
# if we have only a single point , just return 1
if len ( data ) < 2 :
return 1
# calculate ACF that is normalized by the zero - lag value
acf = calculate_acf ( data )
cacf = 2 * acf . numpy ( ) . cumsum ( ) - 1
win = m * cacf <= numpy . arange ( len ( cacf ) )
if win . any ( ) :
acl = cacf [ numpy . where ( win ) [ 0 ] [ 0 ] ]
if dtype == int :
acl = int ( numpy . ceil ( acl ) )
else :
acl = numpy . inf
return acl
|
def search ( self , ** kwargs ) :
"""Method to search equipments based on extends search .
: param search : Dict containing QuerySets to find equipments .
: param include : Array containing fields to include on response .
: param exclude : Array containing fields to exclude on response .
: param fields : Array containing fields to override default fields .
: param kind : Determine if result will be detailed ( ' detail ' ) or basic ( ' basic ' ) .
: return : Dict containing equipments"""
|
return super ( ApiEquipment , self ) . get ( self . prepare_url ( 'api/v3/equipment/' , kwargs ) )
|
def get_custom_implementations ( self ) :
"""Retrieve a list of cutom implementations .
Yields :
( str , str , ImplementationProperty ) tuples : The name of the attribute
an implementation lives at , the name of the related transition ,
and the related implementation ."""
|
for trname in self . custom_implems :
attr = self . transitions_at [ trname ]
implem = self . implementations [ trname ]
yield ( trname , attr , implem )
|
def _add_deflection ( position , observer , deflector , rmass ) :
"""Correct a position vector for how one particular mass deflects light .
Given the ICRS ` position ` [ x , y , z ] of an object ( AU ) together with
the positions of an ` observer ` and a ` deflector ` of reciprocal mass
` rmass ` , this function updates ` position ` in - place to show how much
the presence of the deflector will deflect the image of the object ."""
|
# Construct vector ' pq ' from gravitating body to observed object and
# construct vector ' pe ' from gravitating body to observer .
pq = observer + position - deflector
pe = observer - deflector
# Compute vector magnitudes and unit vectors .
pmag = length_of ( position )
qmag = length_of ( pq )
emag = length_of ( pe )
phat = position / where ( pmag , pmag , 1.0 )
# where ( ) avoids divide - by - zero
qhat = pq / where ( qmag , qmag , 1.0 )
ehat = pe / where ( emag , emag , 1.0 )
# Compute dot products of vectors .
pdotq = dots ( phat , qhat )
qdote = dots ( qhat , ehat )
edotp = dots ( ehat , phat )
# If gravitating body is observed object , or is on a straight line
# toward or away from observed object to within 1 arcsec , deflection
# is set to zero set ' pos2 ' equal to ' pos1 ' .
make_no_correction = abs ( edotp ) > 0.99999999999
# Compute scalar factors .
fac1 = 2.0 * GS / ( C * C * emag * AU_M * rmass )
fac2 = 1.0 + qdote
# Correct position vector .
position += where ( make_no_correction , 0.0 , fac1 * ( pdotq * ehat - edotp * qhat ) / fac2 * pmag )
|
def create_ondemand_streaming_locator ( access_token , encoded_asset_id , pid , starttime = None ) :
'''Create Media Service OnDemand Streaming Locator .
Args :
access _ token ( str ) : A valid Azure authentication token .
encoded _ asset _ id ( str ) : A Media Service Encoded Asset ID .
pid ( str ) : A Media Service Encoded PID .
starttime ( str ) : A Media Service Starttime .
Returns :
HTTP response . JSON body .'''
|
path = '/Locators'
endpoint = '' . join ( [ ams_rest_endpoint , path ] )
if starttime is None :
body = '{ \
"AccessPolicyId":"' + pid + '", \
"AssetId":"' + encoded_asset_id + '", \
"Type": "2" \
}'
else :
body = '{ \
"AccessPolicyId":"' + pid + '", \
"AssetId":"' + encoded_asset_id + '", \
"StartTime":"' + str ( starttime ) + '", \
"Type": "2" \
}'
return do_ams_post ( endpoint , path , body , access_token , "json_only" )
|
def _fill_request ( self , request , rdata ) :
"""Fills request with data from the jsonrpc call ."""
|
if not isinstance ( rdata , dict ) :
raise InvalidRequestError
request [ 'jsonrpc' ] = self . _get_jsonrpc ( rdata )
request [ 'id' ] = self . _get_id ( rdata )
request [ 'method' ] = self . _get_method ( rdata )
request [ 'params' ] = self . _get_params ( rdata )
|
def last_kstp_from_kper ( hds , kper ) :
"""function to find the last time step ( kstp ) for a
give stress period ( kper ) in a modflow head save file .
Parameters
hds : flopy . utils . HeadFile
kper : int
the zero - index stress period number
Returns
kstp : int
the zero - based last time step during stress period
kper in the head save file"""
|
# find the last kstp with this kper
kstp = - 1
for kkstp , kkper in hds . kstpkper :
if kkper == kper + 1 and kkstp > kstp :
kstp = kkstp
if kstp == - 1 :
raise Exception ( "kstp not found for kper {0}" . format ( kper ) )
kstp -= 1
return kstp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.