signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def publish_results ( self , dist_dir , use_basename_prefix , vt , bundle_dir , archivepath , id , archive_ext ) :
"""Publish a copy of the bundle and archive from the results dir in dist ."""
|
# TODO ( from mateor ) move distdir management somewhere more general purpose .
name = vt . target . basename if use_basename_prefix else id
bundle_copy = os . path . join ( dist_dir , '{}-bundle' . format ( name ) )
absolute_symlink ( bundle_dir , bundle_copy )
self . context . log . info ( 'created bundle copy {}' . format ( os . path . relpath ( bundle_copy , get_buildroot ( ) ) ) )
if archivepath :
ext = archive . archive_extensions . get ( archive_ext , archive_ext )
archive_copy = os . path . join ( dist_dir , '{}.{}' . format ( name , ext ) )
safe_mkdir_for ( archive_copy )
# Ensure parent dir exists
atomic_copy ( archivepath , archive_copy )
self . context . log . info ( 'created archive copy {}' . format ( os . path . relpath ( archive_copy , get_buildroot ( ) ) ) )
|
def enqueue ( self , payload , interval , job_id , queue_id , queue_type = 'default' , requeue_limit = None ) :
"""Enqueues the job into the specified queue _ id
of a particular queue _ type"""
|
# validate all the input
if not is_valid_interval ( interval ) :
raise BadArgumentException ( '`interval` has an invalid value.' )
if not is_valid_identifier ( job_id ) :
raise BadArgumentException ( '`job_id` has an invalid value.' )
if not is_valid_identifier ( queue_id ) :
raise BadArgumentException ( '`queue_id` has an invalid value.' )
if not is_valid_identifier ( queue_type ) :
raise BadArgumentException ( '`queue_type` has an invalid value.' )
if requeue_limit is None :
requeue_limit = self . _default_job_requeue_limit
if not is_valid_requeue_limit ( requeue_limit ) :
raise BadArgumentException ( '`requeue_limit` has an invalid value.' )
try :
serialized_payload = serialize_payload ( payload )
except TypeError as e :
raise BadArgumentException ( e . message )
timestamp = str ( generate_epoch ( ) )
keys = [ self . _key_prefix , queue_type ]
args = [ timestamp , queue_id , job_id , '"%s"' % serialized_payload , interval , requeue_limit ]
self . _lua_enqueue ( keys = keys , args = args )
response = { 'status' : 'queued' }
return response
|
def create_window ( title , url = None , js_api = None , width = 800 , height = 600 , resizable = True , fullscreen = False , min_size = ( 200 , 100 ) , strings = { } , confirm_quit = False , background_color = '#FFFFFF' , text_select = False , frameless = False , debug = False ) :
"""Create a web view window using a native GUI . The execution blocks after this function is invoked , so other
program logic must be executed in a separate thread .
: param title : Window title
: param url : URL to load
: param width : window width . Default is 800px
: param height : window height . Default is 600px
: param resizable True if window can be resized , False otherwise . Default is True
: param fullscreen : True if start in fullscreen mode . Default is False
: param min _ size : a ( width , height ) tuple that specifies a minimum window size . Default is 200x100
: param strings : a dictionary with localized strings
: param confirm _ quit : Display a quit confirmation dialog . Default is False
: param background _ color : Background color as a hex string that is displayed before the content of webview is loaded . Default is white .
: param text _ select : Allow text selection on page . Default is False .
: param frameless : Whether the window should have a frame .
: return : The uid of the created window ."""
|
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re . match ( valid_color , background_color ) :
raise ValueError ( '{0} is not a valid hex triplet color' . format ( background_color ) )
# Check if starting up from main thread ; if not , wait ; finally raise exception
if current_thread ( ) . name == 'MainThread' :
uid = 'master'
if not _initialized :
_initialize_imports ( )
localization . update ( strings )
else :
uid = 'child_' + uuid4 ( ) . hex [ : 8 ]
if not _webview_ready . wait ( 5 ) :
raise Exception ( 'Call create_window from the main thread first' )
_webview_ready . clear ( )
# Make API calls wait while the new window is created
gui . create_window ( uid , make_unicode ( title ) , transform_url ( url ) , width , height , resizable , fullscreen , min_size , confirm_quit , background_color , debug , js_api , text_select , frameless , _webview_ready )
if uid == 'master' :
_webview_ready . clear ( )
else :
return uid
|
def post_copy_notes ( self , post_id , other_post_id ) :
"""Function to copy notes ( requires login ) .
Parameters :
post _ id ( int ) :
other _ post _ id ( int ) : The id of the post to copy notes to ."""
|
return self . _get ( 'posts/{0}/copy_notes.json' . format ( post_id ) , { 'other_post_id' : other_post_id } , 'PUT' , auth = True )
|
def _decrement_current_byte ( self ) :
"""Decrements the value of the current byte at the pointer . If the result is below 0,
then it will overflow to 255"""
|
# If the current byte is uninitialized , then decrementing it will make it the max cell size
# Otherwise , if it ' s already at the minimum cell size , then it will also make it the max cell size
if self . tape [ self . pointer ] is None or self . tape [ self . pointer ] == self . MIN_CELL_SIZE :
self . tape [ self . pointer ] = self . MAX_CELL_SIZE
else : # decrement it
self . tape [ self . pointer ] -= 1
|
def _handle_request_exception ( self , e ) :
"""This method handle HTTPError exceptions the same as how tornado does ,
leave other exceptions to be handled by user defined handler function
maped in class attribute ` EXCEPTION _ HANDLERS `
Common HTTP status codes :
200 OK
301 Moved Permanently
302 Found
400 Bad Request
401 Unauthorized
403 Forbidden
404 Not Found
405 Method Not Allowed
500 Internal Server Error
It is suggested only to use above HTTP status codes"""
|
handle_func = self . _exception_default_handler
if self . EXCEPTION_HANDLERS :
for excs , func_name in self . EXCEPTION_HANDLERS . items ( ) :
if isinstance ( e , excs ) :
handle_func = getattr ( self , func_name )
break
handle_func ( e )
if not self . _finished :
self . finish ( )
|
def crack_k_from_sigs ( generator , sig1 , val1 , sig2 , val2 ) :
"""Given two signatures with the same secret exponent and K value , return that K value ."""
|
# s1 = v1 / k1 + ( se * r1 ) / k1
# s2 = v2 / k2 + ( se * r2 ) / k2
# and k = k1 = k2
# so
# k * s1 = v1 + ( se * r1)
# k * s2 = v2 + ( se * r2)
# so
# k * s1 * r2 = r2 * v1 + ( se * r1 * r2)
# k * s2 * r1 = r1 * v2 + ( se * r2 * r1)
# so
# k ( s1 * r2 - s2 * r1 ) = r2 * v1 - r1 * v2
# so
# k = ( r2 * v1 - r1 * v2 ) / ( s1 * r2 - s2 * r1)
r1 , s1 = sig1
r2 , s2 = sig2
if r1 != r2 :
raise ValueError ( "r values of signature do not match" )
k = ( r2 * val1 - r1 * val2 ) * generator . inverse ( r2 * s1 - r1 * s2 )
return k % generator . order ( )
|
def floyd_warshall ( self ) :
'''API :
floyd _ warshall ( self )
Description :
Finds all pair shortest paths and stores it in a list of lists .
This is possible if the graph does not have negative cycles . It will
return a tuple with 3 elements . The first element indicates whether
the graph has a negative cycle . It is true if the graph does not
have a negative cycle , ie . distances found are valid shortest
distances . The second element is a dictionary of shortest distances
between nodes . Keys are tuple of node pairs ie . ( i , j ) . The third
element is a dictionary that helps to retrieve the shortest path
between nodes . Then return value can be represented as ( validity ,
distance , nextn ) where nextn is the dictionary to retrieve paths .
distance and nextn can be used as inputs to other methods to get
shortest path between nodes .
Pre :
Arcs should have ' cost ' attribute .
Return :
Returns ( validity , distance , nextn ) . The distances are valid if
validity is True .'''
|
nl = self . get_node_list ( )
el = self . get_edge_list ( )
# initialize distance
distance = { }
for i in nl :
for j in nl :
distance [ ( i , j ) ] = 'infinity'
for i in nl :
distance [ ( i , i ) ] = 0
for e in el :
distance [ ( e [ 0 ] , e [ 1 ] ) ] = self . get_edge_cost ( e )
# = = end of distance initialization
# initialize next
nextn = { }
for i in nl :
for j in nl :
if i == j or distance [ ( i , j ) ] == 'infinity' :
nextn [ ( i , j ) ] = None
else :
nextn [ ( i , j ) ] = i
# = = end of next initialization
# compute shortest distance
for k in nl :
for i in nl :
for j in nl :
if distance [ ( i , k ) ] == 'infinity' or distance [ ( k , j ) ] == 'infinity' :
continue
elif distance [ ( i , j ) ] == 'infinity' :
distance [ ( i , j ) ] = distance [ ( i , k ) ] + distance [ ( k , j ) ]
nextn [ ( i , j ) ] = nextn [ ( k , j ) ]
elif distance [ ( i , j ) ] > distance [ ( i , k ) ] + distance [ ( k , j ) ] :
distance [ ( i , j ) ] = distance [ ( i , k ) ] + distance [ ( k , j ) ]
nextn [ ( i , j ) ] = nextn [ ( k , j ) ]
# = = end of compute shortest distance
# check if graph has negative cycles
for i in nl :
if distance [ ( i , i ) ] < 0 : # shortest distances are not valid
# graph has negative cycle
return ( False , distance , nextn )
return ( True , distance , nextn )
|
def _writer ( self ) :
"""Indefinitely checks the writer queue for data to write
to socket ."""
|
while not self . closed :
try :
sock , data = self . _write_queue . get ( timeout = 0.1 )
self . _write_queue . task_done ( )
sock . send ( data )
except Empty :
pass
# nothing to write after timeout
except socket . error as err :
if err . errno == errno . EBADF :
self . _clean_dead_sessions ( )
|
def handle ( cls , value , ** kwargs ) :
"""Split the supplied string on the given delimiter , providing a list .
Format of value :
< delimiter > : : < value >
For example :
Subnets : $ { split , : : subnet - 1 , subnet - 2 , subnet - 3}
Would result in the variable ` Subnets ` getting a list consisting of :
[ " subnet - 1 " , " subnet - 2 " , " subnet - 3 " ]
This is particularly useful when getting an output from another stack
that contains a list . For example , the standard vpc blueprint outputs
the list of Subnets it creates as a pair of Outputs ( PublicSubnets ,
PrivateSubnets ) that are comma separated , so you could use this in your
config :
Subnets : $ { split , : : $ { output vpc : : PrivateSubnets } }"""
|
try :
delimiter , text = value . split ( "::" , 1 )
except ValueError :
raise ValueError ( "Invalid value for split: %s. Must be in " "<delimiter>::<text> format." % value )
return text . split ( delimiter )
|
def checktype ( self , elt , ps ) :
'''See if the type of the " elt " element is what we ' re looking for .
Return the element ' s type .
Parameters :
elt - - the DOM element being parsed
ps - - the ParsedSoap object .'''
|
typeName = _find_type ( elt )
if typeName is None or typeName == "" :
return ( None , None )
# Parse the QNAME .
prefix , typeName = SplitQName ( typeName )
uri = ps . GetElementNSdict ( elt ) . get ( prefix )
if uri is None :
raise EvaluateException ( 'Malformed type attribute (bad NS)' , ps . Backtrace ( elt ) )
# typeName = list [ 1]
parselist , errorlist = self . get_parse_and_errorlist ( )
if not parselist or ( uri , typeName ) in parselist or ( _is_xsd_or_soap_ns ( uri ) and ( None , typeName ) in parselist ) :
return ( uri , typeName )
raise EvaluateException ( 'Type mismatch (%s namespace) (got %s wanted %s)' % ( uri , typeName , errorlist ) , ps . Backtrace ( elt ) )
|
def unpack_from ( cls , payload , expected_parts ) :
"""Unpack parts from payload"""
|
for num_part in iter_range ( expected_parts ) :
hdr = payload . read ( cls . header_size )
try :
part_header = PartHeader ( * cls . header_struct . unpack ( hdr ) )
except struct . error :
raise InterfaceError ( "No valid part header" )
if part_header . payload_size % 8 != 0 :
part_payload_size = part_header . payload_size + 8 - ( part_header . payload_size % 8 )
else :
part_payload_size = part_header . payload_size
pl = payload . read ( part_payload_size )
part_payload = io . BytesIO ( pl )
try :
_PartClass = PART_MAPPING [ part_header . part_kind ]
except KeyError :
raise InterfaceError ( "Unknown part kind %s" % part_header . part_kind )
debug ( '%s (%d/%d): %s' , _PartClass . __name__ , num_part + 1 , expected_parts , str ( part_header ) )
debug ( 'Read %d bytes payload for part %d' , part_payload_size , num_part + 1 )
init_arguments = _PartClass . unpack_data ( part_header . argument_count , part_payload )
debug ( 'Part data: %s' , init_arguments )
part = _PartClass ( * init_arguments )
part . header = part_header
part . attribute = part_header . part_attributes
part . source = 'server'
if pyhdb . tracing :
part . trace_header = humanhexlify ( hdr [ : part_header . payload_size ] )
part . trace_payload = humanhexlify ( pl , 30 )
yield part
|
def reload_dependencies ( force = False ) :
"""Reloads all python modules that law depends on . Currently , this is just * luigi * and * six * .
Unless * force * is * True * , multiple calls to this function will not have any effect ."""
|
global _reloaded_deps
if _reloaded_deps and not force :
return
_reloaded_deps = True
for mod in deps :
six . moves . reload_module ( mod )
logger . debug ( "reloaded module '{}'" . format ( mod ) )
|
def remove_tag_and_push_registries ( tag_and_push_registries , version ) :
"""Remove matching entries from tag _ and _ push _ registries ( in - place )
: param tag _ and _ push _ registries : dict , uri - > dict
: param version : str , ' version ' to match against"""
|
registries = [ uri for uri , regdict in tag_and_push_registries . items ( ) if regdict [ 'version' ] == version ]
for registry in registries :
logger . info ( "removing %s registry: %s" , version , registry )
del tag_and_push_registries [ registry ]
|
def execute ( self , limitRequest = 350000 , limitResMax = - 1 ) :
"""Executes the query ."""
|
query = self . getQueries ( )
query = self . __buildLimit ( query , limitResMax )
nbr_results = limitResMax
if ( limitResMax == - 1 ) :
query . setBaseUrl ( self . __url + '/count' )
countUrl = query . getUrl ( )
result_count = Util . retrieveJsonResponseFromServer ( countUrl )
nbr_results = result_count [ 'total' ]
else :
nbr_results = limitResMax
resultSearch = [ ]
if nbr_results < limitRequest :
query . setBaseUrl ( self . __url + '/records' )
url = query . getUrl ( )
startVal = query . _getParameters ( ) [ 'start' ]
while ( nbr_results - startVal ) > 0 : # Do the job per 300 items till nbr _ result is reached
resultTemp = Util . retrieveJsonResponseFromServer ( url )
resultSearch . extend ( self . __parseResponse ( resultTemp ) )
parameters = query . _getParameters ( )
startVal = parameters [ 'start' ] + parameters [ 'limit' ]
query = UpdateParameter ( query , 'start' , startVal )
query . setBaseUrl ( self . __url + '/records' )
url = query . getUrl ( )
else :
out_mess = "Not allowed\nNbr results (%d) exceeds limit_request param: %d\n" % ( nbr_results , limitRequest )
self . __logger . info ( out_mess )
return resultSearch
|
def main ( ) :
"""HAR converter : parse command line options and run commands ."""
|
parser = argparse . ArgumentParser ( description = __description__ )
parser . add_argument ( '-V' , '--version' , dest = 'version' , action = 'store_true' , help = "show version" )
parser . add_argument ( '--log-level' , default = 'INFO' , help = "Specify logging level, default is INFO." )
parser . add_argument ( 'har_source_file' , nargs = '?' , help = "Specify HAR source file" )
parser . add_argument ( '-2y' , '--to-yml' , '--to-yaml' , dest = 'to_yaml' , action = 'store_true' , help = "Convert to YAML format, if not specified, convert to JSON format by default." )
parser . add_argument ( '--filter' , help = "Specify filter keyword, only url include filter string will be converted." )
parser . add_argument ( '--exclude' , help = "Specify exclude keyword, url that includes exclude string will be ignored, multiple keywords can be joined with '|'" )
args = parser . parse_args ( )
if args . version :
print ( "{}" . format ( __version__ ) )
exit ( 0 )
log_level = getattr ( logging , args . log_level . upper ( ) )
logging . basicConfig ( level = log_level )
har_source_file = args . har_source_file
if not har_source_file or not har_source_file . endswith ( ".har" ) :
logging . error ( "HAR file not specified." )
sys . exit ( 1 )
output_file_type = "YML" if args . to_yaml else "JSON"
HarParser ( har_source_file , args . filter , args . exclude ) . gen_testcase ( output_file_type )
return 0
|
def aggregate_region ( self , variable , region = 'World' , subregions = None , components = None , append = False ) :
"""Compute the aggregate of timeseries over a number of regions
including variable components only defined at the ` region ` level
Parameters
variable : str
variable for which the aggregate should be computed
region : str , default ' World '
dimension
subregions : list of str
list of subregions , defaults to all regions other than ` region `
components : list of str
list of variables , defaults to all sub - categories of ` variable `
included in ` region ` but not in any of ` subregions `
append : bool , default False
append the aggregate timeseries to ` data ` and return None ,
else return aggregate timeseries"""
|
# default subregions to all regions other than ` region `
if subregions is None :
rows = self . _apply_filters ( variable = variable )
subregions = set ( self . data [ rows ] . region ) - set ( [ region ] )
if not len ( subregions ) :
msg = 'cannot aggregate variable `{}` to `{}` because it does not' ' exist in any subregion'
logger ( ) . info ( msg . format ( variable , region ) )
return
# compute aggregate over all subregions
subregion_df = self . filter ( region = subregions )
cols = [ 'region' , 'variable' ]
_data = _aggregate ( subregion_df . filter ( variable = variable ) . data , cols )
# add components at the ` region ` level , defaults to all variables one
# level below ` variable ` that are only present in ` region `
region_df = self . filter ( region = region )
components = components or ( set ( region_df . _variable_components ( variable ) ) . difference ( subregion_df . _variable_components ( variable ) ) )
if len ( components ) :
rows = region_df . _apply_filters ( variable = components )
_data = _data . add ( _aggregate ( region_df . data [ rows ] , cols ) , fill_value = 0 )
if append is True :
self . append ( _data , region = region , variable = variable , inplace = True )
else :
return _data
|
def load_auth_token ( token , load = True ) :
"""Validate an auth0 token . Returns the token ' s payload , or an exception
of the type :"""
|
assert get_config ( ) . jwt_secret , "No JWT secret configured for pymacaron"
assert get_config ( ) . jwt_issuer , "No JWT issuer configured for pymacaron"
assert get_config ( ) . jwt_audience , "No JWT audience configured for pymacaron"
log . info ( "Validating token, using issuer:%s, audience:%s, secret:%s***" % ( get_config ( ) . jwt_issuer , get_config ( ) . jwt_audience , get_config ( ) . jwt_secret [ 1 : 8 ] , ) )
# First extract the issuer
issuer = get_config ( ) . jwt_issuer
try :
headers = jwt . get_unverified_header ( token )
except jwt . DecodeError :
raise AuthInvalidTokenError ( 'token signature is invalid' )
log . debug ( "Token has headers %s" % headers )
if 'iss' in headers :
issuer = headers [ 'iss' ]
# Then validate the token against this issuer
log . info ( "Validating token in issuer %s" % issuer )
try :
payload = jwt . decode ( token , get_config ( ) . jwt_secret , audience = get_config ( ) . jwt_audience , # Allow for a time difference of up to 5min ( 300sec )
leeway = 300 )
except jwt . ExpiredSignature :
raise AuthTokenExpiredError ( 'Auth token is expired' )
except jwt . InvalidAudienceError :
raise AuthInvalidTokenError ( 'incorrect audience' )
except jwt . DecodeError :
raise AuthInvalidTokenError ( 'token signature is invalid' )
except jwt . InvalidIssuedAtError :
raise AuthInvalidTokenError ( 'Token was issued in the future' )
# Save payload to stack
payload [ 'token' ] = token
payload [ 'iss' ] = issuer
if load :
stack . top . current_user = payload
return payload
|
def safe_mkdir_for_all ( paths ) :
"""Make directories which would contain all of the passed paths .
This avoids attempting to re - make the same directories , which may be noticeably expensive if many
paths mostly fall in the same set of directories .
: param list of str paths : The paths for which containing directories should be created ."""
|
created_dirs = set ( )
for path in paths :
dir_to_make = os . path . dirname ( path )
if dir_to_make not in created_dirs :
safe_mkdir ( dir_to_make )
created_dirs . add ( dir_to_make )
|
def child ( self , local_name = None , name = None , ns_uri = None , node_type = None , filter_fn = None ) :
""": return : the first child node matching the given constraints , or * None * if there are no matching child nodes .
Delegates to : meth : ` NodeList . filter ` ."""
|
return self . children ( name = name , local_name = local_name , ns_uri = ns_uri , node_type = node_type , filter_fn = filter_fn , first_only = True )
|
def get_top_tags ( self , limit = None , cacheable = True ) :
"""Returns the most used tags as a sequence of TopItem objects ."""
|
# Last . fm has no " limit " parameter for tag . getTopTags
# so we need to get all ( 250 ) and then limit locally
doc = _Request ( self , "tag.getTopTags" ) . execute ( cacheable )
seq = [ ]
for node in doc . getElementsByTagName ( "tag" ) :
if limit and len ( seq ) >= limit :
break
tag = Tag ( _extract ( node , "name" ) , self )
weight = _number ( _extract ( node , "count" ) )
seq . append ( TopItem ( tag , weight ) )
return seq
|
def FetchSizeOfSignedBinary ( binary_urn , token = None ) :
"""Returns the size of the given binary ( in bytes ) .
Args :
binary _ urn : RDFURN that uniquely identifies the binary .
token : ACL token to use with the legacy ( non - relational ) datastore .
Raises :
SignedBinaryNotFoundError : If no signed binary with the given URN exists ."""
|
if _ShouldUseLegacyDatastore ( ) :
try :
aff4_stream = aff4 . FACTORY . Open ( binary_urn , aff4_type = collects . GRRSignedBlob , mode = "r" , token = token )
return aff4_stream . size
except aff4 . InstantiationError :
raise SignedBinaryNotFoundError ( binary_urn )
else :
try :
references , _ = data_store . REL_DB . ReadSignedBinaryReferences ( _SignedBinaryIDFromURN ( binary_urn ) )
except db . UnknownSignedBinaryError :
raise SignedBinaryNotFoundError ( binary_urn )
last_reference = references . items [ - 1 ]
return last_reference . offset + last_reference . size
|
def execute ( self ) :
"""Convert the notebook to a python script and execute it , returning the local context
as a dict"""
|
from nbformat import read
from nbconvert . exporters import export_script
from cStringIO import StringIO
notebook = read ( StringIO ( self . record . unpacked_contents ) , 4 )
script , resources = export_script ( notebook )
env_dict = { }
exec ( compile ( script . replace ( '# coding: utf-8' , '' ) , 'script' , 'exec' ) , env_dict )
return env_dict
|
def underlying_symbol ( self ) :
"""[ str ] 合约标的代码 , 目前除股指期货 ( IH , IF , IC ) 之外的期货合约 , 这一字段全部为 ’ null ’ ( 期货专用 )"""
|
try :
return self . __dict__ [ "underlying_symbol" ]
except ( KeyError , ValueError ) :
raise AttributeError ( "Instrument(order_book_id={}) has no attribute 'underlying_symbol' " . format ( self . order_book_id ) )
|
def find_root ( node ) :
"""Find the top level namespace ."""
|
# Scamper up to the top level namespace
while node . type != syms . file_input :
node = node . parent
if not node :
raise ValueError ( "root found before file_input node was found." )
return node
|
def is_member ( self , rtc ) :
'''Is the given component a member of this composition ?
rtc may be a Component object or a string containing a component ' s
instance name . Component objects are more reliable .
Returns False if the given component is not a member of this
composition .
Raises NotCompositeError if this component is not a composition .'''
|
if not self . is_composite :
raise exceptions . NotCompositeError ( self . name )
members = self . organisations [ 0 ] . obj . get_members ( )
if type ( rtc ) is str :
for m in members :
if m . get_component_profile ( ) . instance_name == rtc :
return True
else :
for m in members :
if m . _is_equivalent ( rtc . object ) :
return True
return False
|
def cartesian_to_homogeneous_vectors ( cartesian_vector , matrix_type = "numpy" ) :
"""Converts a cartesian vector to an homogenous vector"""
|
dimension_x = cartesian_vector . shape [ 0 ]
# Vector
if matrix_type == "numpy" :
homogeneous_vector = np . zeros ( dimension_x + 1 )
# Last item is a 1
homogeneous_vector [ - 1 ] = 1
homogeneous_vector [ : - 1 ] = cartesian_vector
return homogeneous_vector
|
def _append_dataframe ( self , df , source_info = "" , units = None ) :
"""Appends a new data group from a Pandas data frame ."""
|
units = units or { }
t = df . index
index_name = df . index . name
time_name = index_name or "time"
version = self . version
timestamps = t
# if self . version < ' 3.10 ' :
# if timestamps . dtype . byteorder = = ' > ' :
# timestamps = timestamps . byteswap ( ) . newbyteorder ( )
# for signal in signals :
# if signal . samples . dtype . byteorder = = ' > ' :
# signal . samples = signal . samples . byteswap ( ) . newbyteorder ( )
if self . version >= "3.00" :
channel_size = v23c . CN_DISPLAYNAME_BLOCK_SIZE
elif self . version >= "2.10" :
channel_size = v23c . CN_LONGNAME_BLOCK_SIZE
else :
channel_size = v23c . CN_SHORT_BLOCK_SIZE
file = self . _tempfile
tell = file . tell
kargs = { "module_nr" : 0 , "module_address" : 0 , "type" : v23c . SOURCE_ECU , "description" : b"Channel inserted by Python Script" , }
ce_block = ChannelExtension ( ** kargs )
dg_cntr = len ( self . groups )
gp = { }
gp . channels = gp_channels = [ ]
gp . channel_dependencies = gp_dep = [ ]
gp . signal_types = gp_sig_types = [ ]
gp . string_dtypes = [ ]
self . groups . append ( gp )
cycles_nr = len ( timestamps )
fields = [ ]
types = [ ]
parents = { }
ch_cntr = 0
offset = 0
field_names = UniqueDB ( )
if df . shape [ 0 ] : # conversion for time channel
kargs = { "conversion_type" : v23c . CONVERSION_TYPE_NONE , "unit" : b"s" , "min_phy_value" : timestamps [ 0 ] if cycles_nr else 0 , "max_phy_value" : timestamps [ - 1 ] if cycles_nr else 0 , }
conversion = ChannelConversion ( ** kargs )
conversion . unit = "s"
source = ce_block
# time channel
t_type , t_size = fmt_to_datatype_v3 ( timestamps . dtype , timestamps . shape )
kargs = { "short_name" : time_name . encode ( "latin-1" ) , "channel_type" : v23c . CHANNEL_TYPE_MASTER , "data_type" : t_type , "start_offset" : 0 , "min_raw_value" : timestamps [ 0 ] if cycles_nr else 0 , "max_raw_value" : timestamps [ - 1 ] if cycles_nr else 0 , "bit_count" : t_size , "block_len" : channel_size , "version" : version , }
channel = Channel ( ** kargs )
channel . name = name = time_name
channel . conversion = conversion
channel . source = source
gp_channels . append ( channel )
self . channels_db . add ( name , ( dg_cntr , ch_cntr ) )
self . masters_db [ dg_cntr ] = 0
# data group record parents
parents [ ch_cntr ] = name , 0
# time channel doesn ' t have channel dependencies
gp_dep . append ( None )
fields . append ( timestamps )
types . append ( ( name , timestamps . dtype ) )
field_names . add ( name )
offset += t_size
ch_cntr += 1
gp_sig_types . append ( 0 )
for signal in df :
sig = df [ signal ]
name = signal
sig_type = v23c . SIGNAL_TYPE_SCALAR
gp_sig_types . append ( sig_type )
new_source = ce_block
# compute additional byte offset for large records size
if offset > v23c . MAX_UINT16 :
additional_byte_offset = ceil ( ( offset - v23c . MAX_UINT16 ) / 8 )
start_bit_offset = offset - additional_byte_offset * 8
else :
start_bit_offset = offset
additional_byte_offset = 0
s_type , s_size = fmt_to_datatype_v3 ( sig . dtype , sig . shape )
kargs = { "channel_type" : v23c . CHANNEL_TYPE_VALUE , "data_type" : s_type , "min_raw_value" : 0 , "max_raw_value" : 0 , "start_offset" : start_bit_offset , "bit_count" : s_size , "additional_byte_offset" : additional_byte_offset , "block_len" : channel_size , "version" : version , }
if s_size < 8 :
s_size = 8
channel = Channel ( ** kargs )
channel . name = name
channel . source = new_source
unit = units . get ( name , b"" )
if unit :
if hasattr ( unit , "encode" ) :
unit = unit . encode ( "latin-1" )
# conversion for time channel
kargs = { "conversion_type" : v23c . CONVERSION_TYPE_NONE , "unit" : unit , "min_phy_value" : 0 , "max_phy_value" : 0 , }
conversion = ChannelConversion ( ** kargs )
conversion . unit = unit
gp_channels . append ( channel )
offset += s_size
self . channels_db . add ( name , ( dg_cntr , ch_cntr ) )
# update the parents as well
field_name = field_names . get_unique_name ( name )
parents [ ch_cntr ] = field_name , 0
if sig . dtype . kind == "S" :
gp . string_dtypes . append ( sig . dtype )
fields . append ( sig )
types . append ( ( field_name , sig . dtype ) )
ch_cntr += 1
# simple channels don ' t have channel dependencies
gp_dep . append ( None )
# channel group
kargs = { "cycles_nr" : cycles_nr , "samples_byte_nr" : offset // 8 , "ch_nr" : ch_cntr , }
if self . version >= "3.30" :
kargs [ "block_len" ] = v23c . CG_POST_330_BLOCK_SIZE
else :
kargs [ "block_len" ] = v23c . CG_PRE_330_BLOCK_SIZE
gp . channel_group = ChannelGroup ( ** kargs )
gp . channel_group . comment = source_info
# data group
if self . version >= "3.20" :
block_len = v23c . DG_POST_320_BLOCK_SIZE
else :
block_len = v23c . DG_PRE_320_BLOCK_SIZE
gp . data_group = DataGroup ( block_len = block_len )
# data block
types = dtype ( types )
gp . types = types
gp . parents = parents
gp . sorted = True
if df . shape [ 0 ] :
samples = fromarrays ( fields , dtype = types )
else :
samples = array ( [ ] )
block = samples . tostring ( )
gp . data_location = v23c . LOCATION_TEMPORARY_FILE
if cycles_nr :
data_address = tell ( )
gp . data_group . data_block_addr = data_address
size = len ( block )
self . _tempfile . write ( block )
gp . data_blocks . append ( DataBlockInfo ( address = data_address , block_type = 0 , raw_size = size , size = size , param = 0 , ) )
else :
gp . data_location = v23c . LOCATION_TEMPORARY_FILE
# data group trigger
gp . trigger = None
|
def setlist ( self , key , new_list ) : # type : ( Hashable , List [ Any ] ) - > None
"""Remove the old values for a key and add new ones . Note that the list
you pass the values in will be shallow - copied before it is inserted in
the dictionary .
> > > d = MultiValueDict ( )
> > > d . setlist ( ' foo ' , [ ' 1 ' , ' 2 ' ] )
> > > d [ ' foo ' ]
> > > d . getlist ( ' foo ' )
[ ' 1 ' , ' 2 ' ]
: param key : The key for which the values are set .
: param new _ list : An iterable with the new values for the key . Old values
are removed first ."""
|
dict . __setitem__ ( self , key , list ( new_list ) )
|
def r ( self ) :
"""Extract read lock ( r ) counter if available ( lazy ) ."""
|
if not self . _counters_calculated :
self . _counters_calculated = True
self . _extract_counters ( )
return self . _r
|
def saveCertPem ( self , cert , path ) :
'''Save a certificate in PEM format to a file outside the certdir .'''
|
with s_common . genfile ( path ) as fd :
fd . write ( crypto . dump_certificate ( crypto . FILETYPE_PEM , cert ) )
|
def semActsSatisfied ( acts : Optional [ List [ ShExJ . SemAct ] ] , cntxt : Context ) -> bool :
"""` 5.7.1 Semantic Actions Semantics < http : / / shex . io / shex - semantics / # semantic - actions - semantics > ` _
The evaluation semActsSatisfied on a list of SemActs returns success or failure . The evaluation of an individual
SemAct is implementation - dependent ."""
|
return True
|
def agent_leave ( consul_url = None , token = None , node = None ) :
'''Used to instruct the agent to force a node into the left state .
: param consul _ url : The Consul server URL .
: param node : The node the agent will force into left state
: return : Boolean and message indicating success or failure .
CLI Example :
. . code - block : : bash
salt ' * ' consul . agent _ leave node = ' web1 . example . com ' '''
|
ret = { }
query_params = { }
if not consul_url :
consul_url = _get_config ( )
if not consul_url :
log . error ( 'No Consul URL found.' )
ret [ 'message' ] = 'No Consul URL found.'
ret [ 'res' ] = False
return ret
if not node :
raise SaltInvocationError ( 'Required argument "node" is missing.' )
function = 'agent/force-leave/{0}' . format ( node )
res = _query ( consul_url = consul_url , function = function , token = token , method = 'GET' , query_params = query_params )
if res [ 'res' ] :
ret [ 'res' ] = True
ret [ 'message' ] = 'Node {0} put in leave state.' . format ( node )
else :
ret [ 'res' ] = False
ret [ 'message' ] = 'Unable to change state for {0}.' . format ( node )
return ret
|
def getExportsList ( self , enable = True ) :
"""Return the exports list .
if enable is True , only return the active exporters ( default )
if enable is False , return all the exporters
Return : list of export module name"""
|
if enable :
return [ e for e in self . _exports ]
else :
return [ e for e in self . _exports_all ]
|
def _load_resource_listing ( resource_listing ) :
"""Load the resource listing from file , handling errors .
: param resource _ listing : path to the api - docs resource listing file
: type resource _ listing : string
: returns : contents of the resource listing file
: rtype : dict"""
|
try :
with open ( resource_listing ) as resource_listing_file :
return simplejson . load ( resource_listing_file )
# If not found , raise a more user - friendly error .
except IOError :
raise ResourceListingNotFoundError ( 'No resource listing found at {0}. Note that your json file ' 'must be named {1}' . format ( resource_listing , API_DOCS_FILENAME ) )
|
def losing_abbr ( self ) :
"""Returns a ` ` string ` ` of the losing team ' s abbreviation , such as ' LAD '
for the Los Angeles Dodgers ."""
|
if self . winner == HOME :
return utils . _parse_abbreviation ( self . _away_name )
return utils . _parse_abbreviation ( self . _home_name )
|
def _reduced_stack ( istart = 3 , iend = 5 , ipython = True ) :
"""Returns the reduced function call stack that includes only relevant
function calls ( i . e . , ignores any that are not part of the specified package
or acorn .
Args :
package ( str ) : name of the package that the logged method belongs to ."""
|
import inspect
return [ i [ istart : iend ] for i in inspect . stack ( ) if _decorated_path ( i [ 1 ] ) ]
|
def train ( params , dtrain , num_boost_round = 10 , evals = ( ) , obj = None , feval = None , maximize = False , early_stopping_rounds = None , evals_result = None , verbose_eval = True , xgb_model = None , callbacks = None , learning_rates = None ) : # pylint : disable = too - many - statements , too - many - branches , attribute - defined - outside - init
"""Train a booster with given parameters .
Parameters
params : dict
Booster params .
dtrain : DMatrix
Data to be trained .
num _ boost _ round : int
Number of boosting iterations .
evals : list of pairs ( DMatrix , string )
List of items to be evaluated during training , this allows user to watch
performance on the validation set .
obj : function
Customized objective function .
feval : function
Customized evaluation function .
maximize : bool
Whether to maximize feval .
early _ stopping _ rounds : int
Activates early stopping . Validation error needs to decrease at least
every * * early _ stopping _ rounds * * round ( s ) to continue training .
Requires at least one item in * * evals * * .
If there ' s more than one , will use the last .
Returns the model from the last iteration ( not the best one ) .
If early stopping occurs , the model will have three additional fields :
` ` bst . best _ score ` ` , ` ` bst . best _ iteration ` ` and ` ` bst . best _ ntree _ limit ` ` .
( Use ` ` bst . best _ ntree _ limit ` ` to get the correct value if
` ` num _ parallel _ tree ` ` and / or ` ` num _ class ` ` appears in the parameters )
evals _ result : dict
This dictionary stores the evaluation results of all the items in watchlist .
Example : with a watchlist containing
` ` [ ( dtest , ' eval ' ) , ( dtrain , ' train ' ) ] ` ` and
a parameter containing ` ` ( ' eval _ metric ' : ' logloss ' ) ` ` ,
the * * evals _ result * * returns
. . code - block : : python
{ ' train ' : { ' logloss ' : [ ' 0.48253 ' , ' 0.35953 ' ] } ,
' eval ' : { ' logloss ' : [ ' 0.480385 ' , ' 0.357756 ' ] } }
verbose _ eval : bool or int
Requires at least one item in * * evals * * .
If * * verbose _ eval * * is True then the evaluation metric on the validation set is
printed at each boosting stage .
If * * verbose _ eval * * is an integer then the evaluation metric on the validation set
is printed at every given * * verbose _ eval * * boosting stage . The last boosting stage
/ the boosting stage found by using * * early _ stopping _ rounds * * is also printed .
Example : with ` ` verbose _ eval = 4 ` ` and at least one item in * * evals * * , an evaluation metric
is printed every 4 boosting stages , instead of every boosting stage .
learning _ rates : list or function ( deprecated - use callback API instead )
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round ( e . g . yields
learning rate decay )
xgb _ model : file name of stored xgb model or ' Booster ' instance
Xgb model to be loaded before training ( allows training continuation ) .
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration .
It is possible to use predefined callbacks by using
: ref : ` Callback API < callback _ api > ` .
Example :
. . code - block : : python
[ xgb . callback . reset _ learning _ rate ( custom _ rates ) ]
Returns
Booster : a trained booster model"""
|
callbacks = [ ] if callbacks is None else callbacks
# Most of legacy advanced options becomes callbacks
if isinstance ( verbose_eval , bool ) and verbose_eval :
callbacks . append ( callback . print_evaluation ( ) )
else :
if isinstance ( verbose_eval , int ) :
callbacks . append ( callback . print_evaluation ( verbose_eval ) )
if early_stopping_rounds is not None :
callbacks . append ( callback . early_stop ( early_stopping_rounds , maximize = maximize , verbose = bool ( verbose_eval ) ) )
if evals_result is not None :
callbacks . append ( callback . record_evaluation ( evals_result ) )
if learning_rates is not None :
warnings . warn ( "learning_rates parameter is deprecated - use callback API instead" , DeprecationWarning )
callbacks . append ( callback . reset_learning_rate ( learning_rates ) )
return _train_internal ( params , dtrain , num_boost_round = num_boost_round , evals = evals , obj = obj , feval = feval , xgb_model = xgb_model , callbacks = callbacks )
|
def scatter ( self , projection = None , c = None , cmap = 'rainbow' , linewidth = 0.0 , edgecolor = 'k' , axes = None , colorbar = True , s = 10 , ** kwargs ) :
"""Display a scatter plot .
Displays a scatter plot using the SAM projection or another input
projection with or without annotations .
Parameters
projection - ndarray of floats , optional , default None
An N x 2 matrix , where N is the number of data points . If None ,
use an existing SAM projection ( default t - SNE ) . Can take on values
' umap ' or ' tsne ' to specify either the SAM UMAP embedding or
SAM t - SNE embedding .
c - ndarray or str , optional , default None
Colors for each cell in the scatter plot . Can be a vector of
floats or strings for cell annotations . Can also be a key
for sam . adata . obs ( i . e . ' louvain _ clusters ' ) .
axes - matplotlib axis , optional , default None
Plot output to the specified , existing axes . If None , create new
figure window .
cmap - string , optional , default ' rainbow '
The colormap to use for the input color values .
colorbar - bool , optional default True
If True , display a colorbar indicating which values / annotations
correspond to which color in the scatter plot .
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib . pyplot . scatter can be used ."""
|
if ( not PLOTTING ) :
print ( "matplotlib not installed!" )
else :
if ( isinstance ( projection , str ) ) :
try :
dt = self . adata . obsm [ projection ]
except KeyError :
print ( 'Please create a projection first using run_umap or' 'run_tsne' )
elif ( projection is None ) :
try :
dt = self . adata . obsm [ 'X_umap' ]
except KeyError :
try :
dt = self . adata . obsm [ 'X_tsne' ]
except KeyError :
print ( "Please create either a t-SNE or UMAP projection" "first." )
return
else :
dt = projection
if ( axes is None ) :
plt . figure ( )
axes = plt . gca ( )
if ( c is None ) :
plt . scatter ( dt [ : , 0 ] , dt [ : , 1 ] , s = s , linewidth = linewidth , edgecolor = edgecolor , ** kwargs )
else :
if isinstance ( c , str ) :
try :
c = self . adata . obs [ c ] . get_values ( )
except KeyError :
0
# do nothing
if ( ( isinstance ( c [ 0 ] , str ) or isinstance ( c [ 0 ] , np . str_ ) ) and ( isinstance ( c , np . ndarray ) or isinstance ( c , list ) ) ) :
i = ut . convert_annotations ( c )
ui , ai = np . unique ( i , return_index = True )
cax = axes . scatter ( dt [ : , 0 ] , dt [ : , 1 ] , c = i , cmap = cmap , s = s , linewidth = linewidth , edgecolor = edgecolor , ** kwargs )
if ( colorbar ) :
cbar = plt . colorbar ( cax , ax = axes , ticks = ui )
cbar . ax . set_yticklabels ( c [ ai ] )
else :
if not ( isinstance ( c , np . ndarray ) or isinstance ( c , list ) ) :
colorbar = False
i = c
cax = axes . scatter ( dt [ : , 0 ] , dt [ : , 1 ] , c = i , cmap = cmap , s = s , linewidth = linewidth , edgecolor = edgecolor , ** kwargs )
if ( colorbar ) :
plt . colorbar ( cax , ax = axes )
|
def serialize_instance ( instance ) :
"""Since Django 1.6 items added to the session are no longer pickled ,
but JSON encoded by default . We are storing partially complete models
in the session ( user , account , token , . . . ) . We cannot use standard
Django serialization , as these are models are not " complete " yet .
Serialization will start complaining about missing relations et al ."""
|
ret = dict ( [ ( k , v ) for k , v in instance . __dict__ . items ( ) if not k . startswith ( '_' ) ] )
return json . loads ( json . dumps ( ret , cls = DjangoJSONEncoder ) )
|
def i18n_alternate_links ( ) :
"""Render the < link rel = " alternate " hreflang / >
if page is in a I18nBlueprint"""
|
if ( not request . endpoint or not current_app . url_map . is_endpoint_expecting ( request . endpoint , 'lang_code' ) ) :
return Markup ( '' )
try :
LINK_PATTERN = ( '<link rel="alternate" href="{url}" hreflang="{lang}" />' )
links = [ ]
current_lang = get_current_locale ( ) . language
params = { }
if request . args :
params . update ( request . args )
if request . view_args :
params . update ( request . view_args )
for lang in current_app . config [ 'LANGUAGES' ] :
if lang != current_lang :
url = url_for ( request . endpoint , lang_code = lang , ** params )
links . append ( LINK_PATTERN . format ( url = url , lang = lang ) )
return Markup ( '' . join ( links ) )
except Exception : # Never fails
return Markup ( '' )
|
def store_relation ( self , src , name , dst ) :
'''use this to store a relation between two objects'''
|
self . __require_string__ ( name )
# print ( ' storing relation ' , src , name , dst )
# make sure both items are stored
self . store_item ( src )
self . store_item ( dst )
with self . _write_lock : # print ( locals ( ) )
# run the insertion
self . _execute ( 'insert into relations select ob1.id, ?, ob2.id from objects as ob1, objects as ob2 where ob1.code=? and ob2.code=?;' , ( name , self . serialize ( src ) , self . serialize ( dst ) ) )
self . autocommit ( )
|
def signmsg ( msg , priv , iscompressed , k = 0 ) :
'''Sign a message - - the message itself , not a hash - - with a given
private key .
Input private key must be hex , NOT WIF . Use wiftohex ( ) found in
. bitcoin in order to get the hex private key and whether it is
( or rather , its public key is ) compressed .
' iscompressed ' is True / False bool for whether or not to indicate
compression on the public key that corresponds to the input
private key hex .
' iscompressed ' is not defaulted to True like it is in most other
functions , because it really matters whether you use it . All
software implementations treat uncompressed and compressed keys as
entirely different , and a valid message signature will NOT
validate if the public key compression is not correct . Whereas for
transaction signatures , only the r - value is checked , message
signature validation additionally checks / verifies public key
compression . So you must manually set it !
Also , note that message signatures are an entirely different
format from DER - encoded transaction signatures .
Sample message , which includes the quotation marks , and has a new
line and 4 spaces after the new line :
" You miss 100 % of the shots you don ' t take . - - Wayne Gretzky "
- - Michael Scott
> > > msg = ' " You miss 100 % of the shots you don \\ ' t take . - - Wayne Gretzky " \\ n - - Michael Scott '
> > > p = ' c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
> > > k = 4 # chosen by fair dice roll , guaranteed to be random
> > > signmsg ( msg , p , True , k )
' H + ST2 / HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam / poUZPl / PukXCrYBzuwMW3Tyyo = '
Your software should then translate that data set into something akin to :
- - - - - BEGIN BITCOIN SIGNED MESSAGE - - - - -
" You miss 100 % of the shots you don ' t take . - - Wayne Gretzky "
- - Michael Scott
- - - - - BEGIN BITCOIN SIGNATURE - - - - -
Address : 1AuZ7wby1rUVzwFvFgySeTFS7JcHN2TeGs
H + ST2 / HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam / poUZPl / PukXCrYBzuwMW3Tyyo =
- - - - - END BITCOIN SIGNATURE - - - - -'''
|
omsg = msg
# Stripping carraige returns is standard practice in every
# implementation I found , including Bitcoin Core
msg = msg . replace ( "\r\n" , "\n" )
msg1 = hexstrlify ( bytearray ( "\x18Bitcoin Signed Message:\n" , 'utf-8' ) )
msg2 = tovarint ( len ( msg ) )
msg3 = hexstrlify ( bytearray ( msg , 'utf-8' ) )
msg = hash256 ( msg1 + msg2 + msg3 )
sig = sign ( msg , priv , k )
# Bitcoin message signature format doesn ' t use DER leading ' 00 ' s
# Although , r / s must be 64 - char , so they are zfilled to that
rlen = 2 * int ( sig [ 6 : 8 ] , 16 )
r = sig [ 8 : 8 + ( rlen ) ] . lstrip ( "0" ) . zfill ( 64 )
slen = 2 * int ( sig [ 10 + ( rlen ) : 12 + ( rlen ) ] , 16 )
s = sig [ 12 + ( rlen ) : ( 12 + ( rlen ) + ( slen ) ) ] . lstrip ( "0" ) . zfill ( 64 )
pubkey = privtopub ( priv , iscompressed )
for i in range ( 4 ) :
prefix = 27 + i
if iscompressed :
prefix = prefix + 4
o = base64 . b64encode ( unhexlify ( dechex ( prefix , 1 ) + r + s ) )
if str ( o ) [ : 2 ] == "b'" : # Fuck you , Python 3
o = str ( o ) [ 2 : - 1 ]
if verifymsg ( omsg , o ) == pubkey :
return o
raise Exception ( "Unknown failure. This method should never reach the end." )
|
def create_branches ( self , branches ) :
"""Create branches from a TreeBuffer or dict mapping names to type names
Parameters
branches : TreeBuffer or dict"""
|
if not isinstance ( branches , TreeBuffer ) :
branches = TreeBuffer ( branches )
self . set_buffer ( branches , create_branches = True )
|
def execute ( self , * args , ** kwargs ) :
'''Executes all appropriate modules according to the options specified in args / kwargs .
Returns a list of executed module objects .'''
|
run_modules = [ ]
orig_arguments = self . arguments
if args or kwargs :
self . _set_arguments ( list ( args ) , kwargs )
# Run all modules
for module in self . list ( ) :
obj = self . run ( module )
# Add all loaded modules that marked themselves as enabled to the
# run _ modules list
for ( module , obj ) in iterator ( self . executed_modules ) : # Report the results if the module is enabled and if it is a
# primary module or if it reported any results / errors
if obj . enabled and ( obj . PRIMARY or obj . results or obj . errors ) :
run_modules . append ( obj )
self . arguments = orig_arguments
return run_modules
|
def incrementKeySequenceCounter ( self , iIncrementValue = 1 ) :
"""increment the key sequence with a given value
Args :
iIncrementValue : specific increment value to be added
Returns :
True : successful to increment the key sequence with a given value
False : fail to increment the key sequence with a given value"""
|
print '%s call incrementKeySequenceCounter' % self . port
print iIncrementValue
currentKeySeq = ''
try :
currentKeySeq = self . getKeySequenceCounter ( )
keySequence = int ( currentKeySeq , 10 ) + iIncrementValue
print keySequence
return self . setKeySequenceCounter ( keySequence )
except Exception , e :
ModuleHelper . WriteIntoDebugLogger ( 'incrementKeySequenceCounter() Error: ' + str ( e ) )
|
def _set_get_vnetwork_vswitches ( self , v , load = False ) :
"""Setter method for get _ vnetwork _ vswitches , mapped from YANG variable / brocade _ vswitch _ rpc / get _ vnetwork _ vswitches ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ get _ vnetwork _ vswitches is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ get _ vnetwork _ vswitches ( ) directly .
YANG Description : Shows discovered Virtual Switches"""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = get_vnetwork_vswitches . get_vnetwork_vswitches , is_leaf = True , yang_name = "get-vnetwork-vswitches" , rest_name = "get-vnetwork-vswitches" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'vs-name' } } , namespace = 'urn:brocade.com:mgmt:brocade-vswitch' , defining_module = 'brocade-vswitch' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """get_vnetwork_vswitches must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=get_vnetwork_vswitches.get_vnetwork_vswitches, is_leaf=True, yang_name="get-vnetwork-vswitches", rest_name="get-vnetwork-vswitches", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vs-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""" , } )
self . __get_vnetwork_vswitches = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def linterp ( self , setx , sety , x ) :
"""Linear interp of model data values between time steps"""
|
if math . isnan ( sety [ 0 ] ) or math . isnan ( setx [ 0 ] ) :
return np . nan
# if math . isnan ( sety [ 0 ] ) :
# sety [ 0 ] = 0.
# if math . isnan ( sety [ 1 ] ) :
# sety [ 1 ] = 0.
return sety [ 0 ] + ( x - setx [ 0 ] ) * ( ( sety [ 1 ] - sety [ 0 ] ) / ( setx [ 1 ] - setx [ 0 ] ) )
|
def find_declared_encoding ( cls , markup , is_html = False , search_entire_document = False ) :
"""Given a document , tries to find its declared encoding .
An XML encoding is declared at the beginning of the document .
An HTML encoding is declared in a < meta > tag , hopefully near the
beginning of the document ."""
|
if search_entire_document :
xml_endpos = html_endpos = len ( markup )
else :
xml_endpos = 1024
html_endpos = max ( 2048 , int ( len ( markup ) * 0.05 ) )
declared_encoding = None
declared_encoding_match = xml_encoding_re . search ( markup , endpos = xml_endpos )
if not declared_encoding_match and is_html :
declared_encoding_match = html_meta_re . search ( markup , endpos = html_endpos )
if declared_encoding_match is not None :
declared_encoding = declared_encoding_match . groups ( ) [ 0 ] . decode ( 'ascii' , 'replace' )
if declared_encoding :
return declared_encoding . lower ( )
return None
|
def namedb_flatten_history ( hist ) :
"""Given a name ' s history , flatten it into a list of deltas .
They will be in * increasing * order ."""
|
ret = [ ]
block_ids = sorted ( hist . keys ( ) )
for block_id in block_ids :
vtxinfos = hist [ block_id ]
for vtxinfo in vtxinfos :
info = copy . deepcopy ( vtxinfo )
ret . append ( info )
return ret
|
def _get_all_files ( self , path , * exclude ) :
'''Walk implementation . Version in python 2 . x and 3 . x works differently .'''
|
files = list ( )
dirs = list ( )
links = list ( )
if os . access ( path , os . R_OK ) :
for obj in os . listdir ( path ) :
obj = os . path . join ( path , obj )
valid = True
for ex_obj in exclude :
if obj . startswith ( str ( ex_obj ) ) :
valid = False
continue
if not valid or not os . path . exists ( obj ) or not os . access ( obj , os . R_OK ) :
continue
if salt . utils . path . islink ( obj ) :
links . append ( obj )
elif os . path . isdir ( obj ) :
dirs . append ( obj )
f_obj , d_obj , l_obj = self . _get_all_files ( obj , * exclude )
files . extend ( f_obj )
dirs . extend ( d_obj )
links . extend ( l_obj )
elif os . path . isfile ( obj ) :
files . append ( obj )
return sorted ( files ) , sorted ( dirs ) , sorted ( links )
|
def describe_export ( self , export_type ) :
"""Fetch metadata for an export .
- * * export _ type * * is a string specifying which type of export to look
up .
Returns a : py : class : ` dict ` containing metadata for the export ."""
|
if export_type in TALK_EXPORT_TYPES :
return talk . get_data_request ( 'project-{}' . format ( self . id ) , export_type . replace ( 'talk_' , '' ) ) [ 0 ]
return self . http_get ( self . _export_path ( export_type ) , ) [ 0 ]
|
def resource_present ( name , resource_id , resource_type , resource_options = None , cibname = None ) :
'''Ensure that a resource is created
Should be run on one cluster node only
( there may be races )
Can only be run on a node with a functional pacemaker / corosync
name
Irrelevant , not used ( recommended : { { formulaname } } _ _ resource _ present _ { { resource _ id } } )
resource _ id
name for the resource
resource _ type
resource type ( f . e . ocf : heartbeat : IPaddr2 or VirtualIP )
resource _ options
additional options for creating the resource
cibname
use a cached CIB - file named like cibname instead of the live CIB
Example :
. . code - block : : yaml
mysql _ pcs _ _ resource _ present _ galera :
pcs . resource _ present :
- resource _ id : galera
- resource _ type : " ocf : heartbeat : galera "
- resource _ options :
- ' wsrep _ cluster _ address = gcomm : / / node1 . example . org , node2 . example . org , node3 . example . org '
- ' - - master '
- cibname : cib _ for _ galera'''
|
return _item_present ( name = name , item = 'resource' , item_id = resource_id , item_type = resource_type , extra_args = resource_options , cibname = cibname )
|
def initialize_ports ( self ) -> None :
"""Load IO port parameters for device ."""
|
if not self . params :
self . initialize_params ( preload_data = False )
self . params . update_ports ( )
self . ports = Ports ( self . params , self . request )
|
def _unlock ( self ) :
'''Unlocks the index'''
|
if self . _devel :
self . logger . debug ( "Unlocking Index" )
if self . _is_locked ( ) :
os . remove ( self . _lck )
return True
else :
return True
|
def predict_pRF_radius ( eccentricity , visual_area = 'V1' , source = 'Wandell2015' ) :
'''predict _ pRF _ radius ( eccentricity ) yields an estimate of the pRF size for a patch of cortex at the
given eccentricity in V1.
predict _ pRF _ radius ( eccentricity , area ) yields an estimate in the given visual area ( may be given
by the keyword visual _ area ) .
predict _ pRF _ radius ( eccentricity , area , source ) uses the given source to estimate the pRF size
( may be given by the keyword source ) .
The following visual areas can be specified :
* ' V1 ' ( default ) , ' V2 ' , ' V3'
* ' hV4'
* ' V3a ' , ' V3b '
* ' VO1 ' , ' VO2'
* ' LO1 ' , ' LO2'
* ' TO1 ' , ' TO2'
The following sources may be given :
* ' Wandell2015 ' : Wandell BA , Winawer J ( 2015 ) Computational neuroimaging and population
receptive fields . Trends Cogn Sci . 19(6 ) : 349-57.
doi : 10.1016 / j . tics . 2015.03.009.
* ' Kay2013 : Kay KN , Winawer J , Mezer A , Wandell BA ( 2013 ) Compressive spatial summation in
human visual cortex . J Neurophysiol . 110(2 ) : 481-94.
The default source is ' Wandell2015 ' .'''
|
visual_area = visual_area . lower ( )
if pimms . is_str ( source ) :
source = source . lower ( )
if source not in pRF_data :
raise ValueError ( 'Given source (%s) not found in pRF-size database' % source )
dat = pRF_data [ source ]
dat = dat [ visual_area ]
else :
dat = { 'm' : source [ 0 ] , 'b' : source [ 1 ] }
return dat [ 'm' ] * eccentricity + dat [ 'b' ]
|
def digest_manifest ( self , manifest , java_algorithm = "SHA-256" ) :
"""Create a main section checksum and sub - section checksums based off
of the data from an existing manifest using an algorithm given
by Java - style name ."""
|
# pick a line separator for creating checksums of the manifest
# contents . We want to use either the one from the given
# manifest , or the OS default if it hasn ' t specified one .
linesep = manifest . linesep or os . linesep
all_key = java_algorithm + "-Digest-Manifest"
main_key = java_algorithm + "-Digest-Manifest-Main-Attributes"
sect_key = java_algorithm + "-Digest"
digest = _get_digest ( java_algorithm )
accum = manifest . get_main_section ( )
self [ main_key ] = b64_encoded_digest ( accum , digest )
for sub_section in manifest . sub_sections . values ( ) :
sub_data = sub_section . get_data ( linesep )
sf_sect = self . create_section ( sub_section . primary ( ) )
sf_sect [ sect_key ] = b64_encoded_digest ( sub_data , digest )
accum += sub_data
self [ all_key ] = b64_encoded_digest ( accum , digest )
|
def column_vectors ( self ) :
"""The values of the transform as three 2D column vectors"""
|
a , b , c , d , e , f , _ , _ , _ = self
return ( a , d ) , ( b , e ) , ( c , f )
|
def mute_string ( text ) :
"""Replace contents with ' xxx ' to prevent syntax matching .
> > > mute _ string ( ' " abc " ' )
' " xxx " '
> > > mute _ string ( " ' ' ' abc ' ' ' " )
" ' ' ' xxx ' ' ' "
> > > mute _ string ( " r ' abc ' " )
" r ' xxx ' " """
|
start = 1
end = len ( text ) - 1
# String modifiers ( e . g . u or r )
if text . endswith ( '"' ) :
start += text . index ( '"' )
elif text . endswith ( "'" ) :
start += text . index ( "'" )
# Triple quotes
if text . endswith ( '"""' ) or text . endswith ( "'''" ) :
start += 2
end -= 2
return text [ : start ] + 'x' * ( end - start ) + text [ end : ]
|
def _parse_content ( self , text ) :
'''Parses the content of a response doc into the correct
format for . state .'''
|
try :
return json . loads ( text )
except ValueError :
raise exc . UnexpectedlyNotJSON ( "The resource at {.uri} wasn't valid JSON" , self )
|
def make_strain_from_inj_object ( self , inj , delta_t , detector_name , f_lower = None , distance_scale = 1 ) :
"""Make a h ( t ) strain time - series from an injection object as read from
a sim _ inspiral table , for example .
Parameters
inj : injection object
The injection object to turn into a strain h ( t ) .
delta _ t : float
Sample rate to make injection at .
detector _ name : string
Name of the detector used for projecting injections .
f _ lower : { None , float } , optional
Low - frequency cutoff for injected signals . If None , use value
provided by each injection .
distance _ scale : { 1 , float } , optional
Factor to scale the distance of an injection with . The default is
no scaling .
Returns
signal : float
h ( t ) corresponding to the injection ."""
|
detector = Detector ( detector_name )
if f_lower is None :
f_l = inj . f_lower
else :
f_l = f_lower
name , phase_order = legacy_approximant_name ( inj . waveform )
# compute the waveform time series
hp , hc = get_td_waveform ( inj , approximant = name , delta_t = delta_t , phase_order = phase_order , f_lower = f_l , distance = inj . distance , ** self . extra_args )
hp /= distance_scale
hc /= distance_scale
hp . _epoch += inj . get_time_geocent ( )
hc . _epoch += inj . get_time_geocent ( )
# taper the polarizations
hp_tapered = wfutils . taper_timeseries ( hp , inj . taper )
hc_tapered = wfutils . taper_timeseries ( hc , inj . taper )
# compute the detector response and add it to the strain
signal = detector . project_wave ( hp_tapered , hc_tapered , inj . longitude , inj . latitude , inj . polarization )
return signal
|
def scene_on ( self ) :
"""Trigger group / scene to ON level ."""
|
user_data = Userdata ( { 'd1' : self . _group , 'd2' : 0x00 , 'd3' : 0x00 , 'd4' : 0x11 , 'd5' : 0xff , 'd6' : 0x00 } )
self . _set_sent_property ( DIMMABLE_KEYPAD_SCENE_ON_LEVEL , 0xff )
cmd = ExtendedSend ( self . _address , COMMAND_EXTENDED_TRIGGER_ALL_LINK_0X30_0X00 , user_data )
cmd . set_checksum ( )
_LOGGER . debug ( 'Calling scene_on and sending response to ' '_received_scene_triggered' )
self . _send_method ( cmd , self . _received_scene_triggered )
|
def find_objects ( self , ObjectClass , ** kwargs ) :
"""Retrieve all objects of type ` ` ObjectClass ` ` ,
matching the specified filters in ` ` * * kwargs ` ` - - case sensitive ."""
|
filter = None
for k , v in kwargs . items ( ) :
cond = ObjectClass . getattr ( k ) == v
filter = cond if filter is None else filter & cond
return ObjectClass . scan ( filter )
|
def levenshtein ( seq1 , seq2 , normalized = False , max_dist = - 1 ) :
"""Compute the absolute Levenshtein distance between the two sequences
` seq1 ` and ` seq2 ` .
The Levenshtein distance is the minimum number of edit operations necessary
for transforming one sequence into the other . The edit operations allowed are :
* deletion : ABC - > BC , AC , AB
* insertion : ABC - > ABCD , EABC , AEBC . .
* substitution : ABC - > ABE , ADC , FBC . .
The ` max _ dist ` parameter controls at which moment we should stop computing the
distance between the provided sequences . If it is a negative integer , the
distance will be computed until the sequences are exhausted ; otherwise , the
computation will stop at the moment the calculated distance is higher than
` max _ dist ` , and then return - 1 . For example :
> > > levenshtein ( " abc " , " abcd " , max _ dist = 1 ) # dist = 1
> > > levenshtein ( " abc " , " abcde " , max _ dist = 1 ) # dist = 2
This can be a time saver if you ' re not interested in the exact distance , but
only need to check if the distance between the given sequences is below a
given threshold .
The ` normalized ` parameter is here for backward compatibility ; providing
it will result in a call to ` nlevenshtein ` , which should be used directly
instead ."""
|
if normalized :
return nlevenshtein ( seq1 , seq2 , method = 1 )
if seq1 == seq2 :
return 0
len1 , len2 = len ( seq1 ) , len ( seq2 )
if max_dist >= 0 and abs ( len1 - len2 ) > max_dist :
return - 1
if len1 == 0 :
return len2
if len2 == 0 :
return len1
if len1 < len2 :
len1 , len2 = len2 , len1
seq1 , seq2 = seq2 , seq1
column = array ( 'L' , range ( len2 + 1 ) )
for x in range ( 1 , len1 + 1 ) :
column [ 0 ] = x
last = x - 1
for y in range ( 1 , len2 + 1 ) :
old = column [ y ]
cost = int ( seq1 [ x - 1 ] != seq2 [ y - 1 ] )
column [ y ] = min ( column [ y ] + 1 , column [ y - 1 ] + 1 , last + cost )
last = old
if max_dist >= 0 and min ( column ) > max_dist :
return - 1
if max_dist >= 0 and column [ len2 ] > max_dist : # stay consistent , even if we have the exact distance
return - 1
return column [ len2 ]
|
def platform ( ) :
"""Return platform for the current shell , e . g . windows or unix"""
|
executable = parent ( )
basename = os . path . basename ( executable )
basename , _ = os . path . splitext ( basename )
if basename in ( "bash" , "sh" ) :
return "unix"
if basename in ( "cmd" , "powershell" ) :
return "windows"
raise SystemError ( "Unsupported shell: %s" % basename )
|
def request_encode_body ( self , method , url , fields = None , headers = None , encode_multipart = True , multipart_boundary = None , ** urlopen_kw ) :
"""Make a request using : meth : ` urlopen ` with the ` ` fields ` ` encoded in
the body . This is useful for request methods like POST , PUT , PATCH , etc .
When ` ` encode _ multipart = True ` ` ( default ) , then
: meth : ` urllib3 . filepost . encode _ multipart _ formdata ` is used to encode
the payload with the appropriate content type . Otherwise
: meth : ` urllib . urlencode ` is used with the
' application / x - www - form - urlencoded ' content type .
Multipart encoding must be used when posting files , and it ' s reasonably
safe to use it in other times too . However , it may break request
signing , such as with OAuth .
Supports an optional ` ` fields ` ` parameter of key / value strings AND
key / filetuple . A filetuple is a ( filename , data , MIME type ) tuple where
the MIME type is optional . For example : :
fields = {
' foo ' : ' bar ' ,
' fakefile ' : ( ' foofile . txt ' , ' contents of foofile ' ) ,
' realfile ' : ( ' barfile . txt ' , open ( ' realfile ' ) . read ( ) ) ,
' typedfile ' : ( ' bazfile . bin ' , open ( ' bazfile ' ) . read ( ) ,
' image / jpeg ' ) ,
' nonamefile ' : ' contents of nonamefile field ' ,
When uploading a file , providing a filename ( the first parameter of the
tuple ) is optional but recommended to best mimic behavior of browsers .
Note that if ` ` headers ` ` are supplied , the ' Content - Type ' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request . The random boundary
string can be explicitly set with the ` ` multipart _ boundary ` ` parameter ."""
|
if headers is None :
headers = self . headers
extra_kw = { 'headers' : { } }
if fields :
if 'body' in urlopen_kw :
raise TypeError ( "request got values for both 'fields' and 'body', can only specify one." )
if encode_multipart :
body , content_type = encode_multipart_formdata ( fields , boundary = multipart_boundary )
else :
body , content_type = urlencode ( fields ) , 'application/x-www-form-urlencoded'
extra_kw [ 'body' ] = body
extra_kw [ 'headers' ] = { 'Content-Type' : content_type }
extra_kw [ 'headers' ] . update ( headers )
extra_kw . update ( urlopen_kw )
return self . urlopen ( method , url , ** extra_kw )
|
def get_all_logger_names ( include_root = False ) :
"""Return ` ` list ` ` of names of all loggers than have been accessed .
Warning : this is sensitive to internal structures in the standard logging module ."""
|
# noinspection PyUnresolvedReferences
rv = list ( logging . Logger . manager . loggerDict . keys ( ) )
if include_root :
rv . insert ( 0 , '' )
return rv
|
def select_rows ( cols , rows , mode = 'list' , cast = True ) :
"""Yield data selected from rows .
It is sometimes useful to select a subset of data from a profile .
This function selects the data in * cols * from * rows * and yields it
in a form specified by * mode * . Possible values of * mode * are :
mode description example ` [ ' i - id ' , ' i - wf ' ] `
` ' list ' ` ( default ) a list of values ` [ 10 , 1 ] `
` ' dict ' ` col to value map ` { ' i - id ' : 10 , ' i - wf ' : 1 } `
` ' row ' ` [ incr tsdb ( ) ] row ` ' 10@1 ' `
Args :
cols : an iterable of column names to select data for
rows : the rows to select column data from
mode : the form yielded data should take
cast : if ` True ` , cast column values to their datatype
( requires * rows * to be : class : ` Record ` objects )
Yields :
Selected data in the form specified by * mode * ."""
|
mode = mode . lower ( )
if mode == 'list' :
modecast = lambda cols , data : data
elif mode == 'dict' :
modecast = lambda cols , data : dict ( zip ( cols , data ) )
elif mode == 'row' :
modecast = lambda cols , data : encode_row ( data )
else :
raise ItsdbError ( 'Invalid mode for select operation: {}\n' ' Valid options include: list, dict, row' . format ( mode ) )
for row in rows :
try :
data = [ row . get ( c , cast = cast ) for c in cols ]
except TypeError :
data = [ row . get ( c ) for c in cols ]
yield modecast ( cols , data )
|
def run ( self ) :
"""Initializes the stream ."""
|
if not hasattr ( self , 'queue' ) :
raise RuntimeError ( "Audio queue is not intialized." )
self . stream . start_stream ( )
self . keep_listening = True
while self . keep_listening :
try :
frame = self . queue . get ( False , timeout = queue_timeout )
self . stream . write ( frame )
except Empty :
continue
time . sleep ( 0.01 )
self . stream . stop_stream ( )
self . stream . close ( )
self . pa . terminate ( )
|
def spline_functions ( lower_points , upper_points , degree = 3 ) :
"""Method that creates two ( upper and lower ) spline functions based on points lower _ points and upper _ points .
Args :
lower _ points :
Points defining the lower function .
upper _ points :
Points defining the upper function .
degree :
Degree for the spline function
Returns :
A dictionary with the lower and upper spline functions ."""
|
lower_xx = np . array ( [ pp [ 0 ] for pp in lower_points ] )
lower_yy = np . array ( [ pp [ 1 ] for pp in lower_points ] )
upper_xx = np . array ( [ pp [ 0 ] for pp in upper_points ] )
upper_yy = np . array ( [ pp [ 1 ] for pp in upper_points ] )
lower_spline = UnivariateSpline ( lower_xx , lower_yy , k = degree , s = 0 )
upper_spline = UnivariateSpline ( upper_xx , upper_yy , k = degree , s = 0 )
def lower ( x ) :
return lower_spline ( x )
def upper ( x ) :
return upper_spline ( x )
return { 'lower' : lower , 'upper' : upper }
|
def keras_tuples ( stream , inputs = None , outputs = None ) :
"""Reformat data objects as keras - compatible tuples .
For more detail : https : / / keras . io / models / model / # fit
Parameters
stream : iterable
Stream of data objects .
inputs : string or iterable of strings , None
Keys to use for ordered input data .
If not specified , returns ` None ` in its place .
outputs : string or iterable of strings , default = None
Keys to use for ordered output data .
If not specified , returns ` None ` in its place .
Yields
x : np . ndarray , list of np . ndarray , or None
If ` inputs ` is a string , ` x ` is a single np . ndarray .
If ` inputs ` is an iterable of strings , ` x ` is a list of np . ndarrays .
If ` inputs ` is a null type , ` x ` is None .
y : np . ndarray , list of np . ndarray , or None
If ` outputs ` is a string , ` y ` is a single np . ndarray .
If ` outputs ` is an iterable of strings , ` y ` is a list of np . ndarrays .
If ` outputs ` is a null type , ` y ` is None .
Raises
DataError
If the stream contains items that are not data - like ."""
|
flatten_inputs , flatten_outputs = False , False
if inputs and isinstance ( inputs , six . string_types ) :
inputs = [ inputs ]
flatten_inputs = True
if outputs and isinstance ( outputs , six . string_types ) :
outputs = [ outputs ]
flatten_outputs = True
inputs , outputs = ( inputs or [ ] ) , ( outputs or [ ] )
if not inputs + outputs :
raise PescadorError ( 'At least one key must be given for ' '`inputs` or `outputs`' )
for data in stream :
try :
x = list ( data [ key ] for key in inputs ) or None
if len ( inputs ) == 1 and flatten_inputs :
x = x [ 0 ]
y = list ( data [ key ] for key in outputs ) or None
if len ( outputs ) == 1 and flatten_outputs :
y = y [ 0 ]
yield ( x , y )
except TypeError :
raise DataError ( "Malformed data stream: {}" . format ( data ) )
|
def zncc ( ts1 , ts2 ) :
"""Zero mean normalised cross - correlation ( ZNCC )
This function does ZNCC of two signals , ts1 and ts2
Normalisation by very small values is avoided by doing
max ( nmin , nvalue )
Parameters
ts1 : ndarray
Input signal 1 to be aligned with
ts2 : ndarray
Input signal 2
Returns
best _ shift : float
The best shift of * ts1 * to align it with * ts2*
ts _ out : ndarray
The correlation result"""
|
# Output is the same size as ts1
Ns1 = np . size ( ts1 )
Ns2 = np . size ( ts2 )
ts_out = np . zeros ( ( Ns1 , 1 ) , dtype = 'float64' )
ishift = int ( np . floor ( Ns2 / 2 ) )
# origin of ts2
t1m = np . mean ( ts1 )
t2m = np . mean ( ts2 )
for k in range ( 0 , Ns1 ) :
lstart = np . int ( ishift - k )
if lstart < 0 :
lstart = 0
lend = np . int ( ishift - k + Ns2 )
imax = np . int ( np . min ( [ Ns2 , Ns1 - k + ishift ] ) )
if lend > imax :
lend = imax
csum = 0
ts1sum = 0
ts1sum2 = 0
ts2sum = 0
ts2sum2 = 0
Nterms = lend - lstart
for l in range ( lstart , lend ) :
csum += ts1 [ k + l - ishift ] * ts2 [ l ]
ts1sum += ts1 [ k + l - ishift ]
ts1sum2 += ts1 [ k + l - ishift ] * ts1 [ k + l - ishift ]
ts2sum += ts2 [ l ]
ts2sum2 += ts2 [ l ] * ts2 [ l ]
ts1sum2 = np . max ( [ t1m * t1m * 100 , ts1sum2 ] ) - ts1sum * ts1sum / Nterms
ts2sum2 = np . max ( [ t2m * t2m * 100 , ts2sum2 ] ) - ts2sum * ts2sum / Nterms
# ts _ out [ k ] = csum / np . sqrt ( ts1sum2 * ts2sum2)
ts_out [ k ] = ( csum - 2.0 * ts1sum * ts2sum / Nterms + ts1sum * ts2sum / Nterms / Nterms ) / np . sqrt ( ts1sum2 * ts2sum2 )
best_shift = np . argmax ( ts_out ) - ishift
return best_shift , ts_out
|
def unArrayify ( self , gene ) :
"""Copies gene bias values and weights to network bias values and
weights ."""
|
g = 0
# if gene is too small an IndexError will be thrown
for layer in self . layers :
if layer . type != 'Input' :
for i in range ( layer . size ) :
layer . weight [ i ] = float ( gene [ g ] )
g += 1
for connection in self . connections :
for i in range ( connection . fromLayer . size ) :
for j in range ( connection . toLayer . size ) :
connection . weight [ i ] [ j ] = gene [ g ]
g += 1
# if gene is too long we may have a problem
if len ( gene ) > g :
raise IndexError ( 'Argument to unArrayify is too long.' , len ( gene ) )
|
def register_elasticapm ( client , worker ) :
"""Given an ElasticAPM client and an RQ worker , registers exception handlers
with the worker so exceptions are logged to the apm server .
E . g . :
from elasticapm . contrib . django . models import client
from elasticapm . contrib . rq import register _ elasticapm
worker = Worker ( map ( Queue , listen ) )
register _ elasticapm ( client , worker )
worker . work ( )"""
|
def send_to_server ( job , * exc_info ) :
client . capture_exception ( exc_info = exc_info , extra = { "job_id" : job . id , "func" : job . func_name , "args" : job . args , "kwargs" : job . kwargs , "description" : job . description , } , )
worker . push_exc_handler ( send_to_server )
|
def revoke_session ( self , sid = '' , token = '' ) :
"""Mark session as revoked but also explicitly revoke all issued tokens
: param token : any token connected to the session
: param sid : Session identifier"""
|
if not sid :
if token :
sid = self . handler . sid ( token )
else :
raise ValueError ( 'Need one of "sid" or "token"' )
for typ in [ 'access_token' , 'refresh_token' , 'code' ] :
try :
self . revoke_token ( self [ sid ] [ typ ] , typ )
except KeyError : # If no such token has been issued
pass
self . update ( sid , revoked = True )
|
def load_stream ( handle , delimiter = None ) :
"""Creates a string generator from a stream ( file handle ) containing data
delimited by the delimiter strings . This is a stand - alone function and
should be used to feed external data into a pipeline .
Arguments :
- hande ( ` ` file ` ` ) A file handle open for reading .
- delimiter ( ` ` str ` ` ) [ default : ` ` None ` ` ] The default means that items will
be separated by two new - line characters i . e . : ` ` " \\ n \\ n " ` ` ."""
|
delimiter = ( delimiter or "" ) + "\n"
while True :
item = [ ]
while True :
line = handle . readline ( )
if line == "" :
raise StopIteration
elif line == delimiter :
if item :
break
elif line != '\n' :
item . append ( line )
yield "" . join ( item )
|
def plot_ac ( calc_id ) :
"""Aggregate loss curves plotter ."""
|
# read the hazard data
dstore = util . read ( calc_id )
agg_curve = dstore [ 'agg_curve-rlzs' ]
plt = make_figure ( agg_curve )
plt . show ( )
|
def init ( self , value ) :
'''hash passwords given in the constructor'''
|
value = self . value_or_default ( value )
if value is None :
return None
if is_hashed ( value ) :
return value
return make_password ( value )
|
def fullsplit ( path , result = None , base_path = None ) :
"""Split a pathname into components ( the opposite of os . path . join ) in a
platform - neutral way ."""
|
if base_path :
path = path . replace ( base_path , '' )
if result is None :
result = [ ]
head , tail = os . path . split ( path )
if head == '' :
return [ tail ] + result
if head == path :
return result
return fullsplit ( head , [ tail ] + result )
|
def append ( self , ** kwargs ) :
"""Add commands at the end of the sequence .
Be careful : because this runs in Python 2 . x , the order of the kwargs dict may not match
the order in which the args were specified . Thus , if you care about specific ordering ,
you must make multiple calls to append in that order . Luckily , append returns
the Action so you can compose easily : Action ( . . . ) . append ( . . . ) . append ( . . . ) .
See also insert , below .
: param kwargs : the key / value pairs to add
: return : the action"""
|
for k , v in six . iteritems ( kwargs ) :
self . commands . append ( { k : v } )
return self
|
def resources ( self ) :
"""Retrieve contents of each page of PDF"""
|
return [ self . pdf . getPage ( i ) for i in range ( self . pdf . getNumPages ( ) ) ]
|
def maybe_print_as_json ( opts , data , page_info = None ) :
"""Maybe print data as JSON ."""
|
if opts . output not in ( "json" , "pretty_json" ) :
return False
root = { "data" : data }
if page_info is not None and page_info . is_valid :
meta = root [ "meta" ] = { }
meta [ "pagination" ] = page_info . as_dict ( num_results = len ( data ) )
if opts . output == "pretty_json" :
dump = json . dumps ( root , indent = 4 , sort_keys = True )
else :
dump = json . dumps ( root , sort_keys = True )
click . echo ( dump )
return True
|
def bundles ( ) :
"""Display bundles ."""
|
per_page = int ( request . args . get ( 'per_page' , 30 ) )
page = int ( request . args . get ( 'page' , 1 ) )
query = store . bundles ( )
query_page = query . paginate ( page , per_page = per_page )
data = [ ]
for bundle_obj in query_page . items :
bundle_data = bundle_obj . to_dict ( )
bundle_data [ 'versions' ] = [ version . to_dict ( ) for version in bundle_obj . versions ]
data . append ( bundle_data )
return jsonify ( bundles = data )
|
def remove_device ( self , device , id_override = None , type_override = None ) :
"""Remove a device .
Args :
device ( WinkDevice ) : The device the change is being requested for .
id _ override ( String , optional ) : A device ID used to override the
passed in device ' s ID . Used to make changes on sub - devices .
i . e . Outlet in a Powerstrip . The Parent device ' s ID .
type _ override ( String , optional ) : Used to override the device type
when a device inherits from a device other than WinkDevice .
Returns :
( boolean ) : True if the device was removed ."""
|
object_id = id_override or device . object_id ( )
object_type = type_override or device . object_type ( )
url_string = "{}/{}s/{}" . format ( self . BASE_URL , object_type , object_id )
try :
arequest = requests . delete ( url_string , headers = API_HEADERS )
if arequest . status_code == 204 :
return True
_LOGGER . error ( "Failed to remove device. Status code: %s" , arequest . status_code )
return False
except requests . exceptions . RequestException :
_LOGGER . error ( "Failed to remove device." )
return False
|
def calculateKey ( cls , target ) :
"""Calculate the reference key for this reference
Currently this is a two - tuple of the id ( ) ' s of the
target object and the target function respectively ."""
|
return ( id ( getattr ( target , im_self ) ) , id ( getattr ( target , im_func ) ) )
|
def _dry_message_received ( self , msg ) :
"""Report a dry state ."""
|
for callback in self . _dry_wet_callbacks :
callback ( LeakSensorState . DRY )
self . _update_subscribers ( 0x11 )
|
async def enqueue ( self , query , queue_index = None , stop_current = False , shuffle = False ) :
"""Queues songs based on either a YouTube search or a link
Args :
query ( str ) : Either a search term or a link
queue _ index ( str ) : The queue index to enqueue at ( None for end )
stop _ current ( bool ) : Whether to stop the current song after the songs are queued
shuffle ( bool ) : Whether to shuffle the added songs"""
|
if query is None or query == "" :
return
self . statuslog . info ( "Parsing {}" . format ( query ) )
self . logger . debug ( "Enqueueing from query" )
indexnum = None
if queue_index is not None :
try :
indexnum = int ( queue_index ) - 1
except TypeError :
self . statuslog . error ( "Play index argument must be a number" )
return
except ValueError :
self . statuslog . error ( "Play index argument must be a number" )
return
if not self . vready :
self . parse_query ( query , indexnum , stop_current , shuffle )
else :
parse_thread = threading . Thread ( target = self . parse_query , args = [ query , indexnum , stop_current , shuffle ] )
# Run threads
parse_thread . start ( )
|
def get ( self , key , dt ) :
"""Get the value of a cached object .
Parameters
key : any
The key to lookup .
dt : datetime
The time of the lookup .
Returns
result : any
The value for ` ` key ` ` .
Raises
KeyError
Raised if the key is not in the cache or the value for the key
has expired ."""
|
try :
return self . _cache [ key ] . unwrap ( dt )
except Expired :
self . cleanup ( self . _cache [ key ] . _unsafe_get_value ( ) )
del self . _cache [ key ]
raise KeyError ( key )
|
def pop ( self , key , default = NO_DEFAULT ) :
"""If key is in the flat dictionary , remove it and return its value ,
else return default . If default is not given and key is not in the
dictionary , : exc : ` KeyError ` is raised .
: param mixed key : The key name
: param mixed default : The default value
: rtype : mixed"""
|
if key not in self and default != NO_DEFAULT :
return default
value = self [ key ]
self . __delitem__ ( key )
return value
|
def _set_lsp_commit ( self , v , load = False ) :
"""Setter method for lsp _ commit , mapped from YANG variable / mpls _ config / router / mpls / mpls _ cmds _ holder / lsp / secondary _ path / lsp _ commit ( empty )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ lsp _ commit is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ lsp _ commit ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGBool , is_leaf = True , yang_name = "lsp-commit" , rest_name = "commit" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Commit the changes to adaptive LSP' , u'alt-name' : u'commit' , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'empty' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """lsp_commit must be of a type compatible with empty""" , 'defined-type' : "empty" , 'generated-type' : """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-commit", rest_name="commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Commit the changes to adaptive LSP', u'alt-name': u'commit', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)""" , } )
self . __lsp_commit = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def run ( self ) :
"""Run git add and commit with message if provided ."""
|
if os . system ( 'git add .' ) :
sys . exit ( 1 )
if self . message is not None :
os . system ( 'git commit -a -m "' + self . message + '"' )
else :
os . system ( 'git commit -a' )
|
async def get_constraints ( self ) :
"""Return the machine constraints dict for this application ."""
|
app_facade = client . ApplicationFacade . from_connection ( self . connection )
log . debug ( 'Getting constraints for %s' , self . name )
result = ( await app_facade . Get ( self . name ) ) . constraints
return vars ( result ) if result else result
|
def update_holder ( self , holder ) :
"""Udpate the Holder state according to the occurrence .
This implementation is a example of how a Occurrence object
can update the Holder state ; this method should be overriden
by classes that inherit from the Occurrence class .
This sample implementation simply update the quantity and the average
price of the Subject in the Holder ' s possession every time objects
from this class are passed to Holder . trade ( ) .
This sample implementation considers the following signature for
the Holder . state dict :
. . code : : python
" SUBJECT SYMBOL " : {
" quantity " : 0,
" value " : 0
And the following signature for the Occurrance . details dict :
. . code : : python
" quantity " : 0,
" value " : 0"""
|
subject_symbol = self . subject . symbol
# If the Holder already have a state regarding this Subject ,
# update that state
if subject_symbol in holder . state : # If the Holder have zero units of this subject , the average
# value paid / received for the subject is the value of the trade itself
if not holder . state [ subject_symbol ] [ 'quantity' ] :
holder . state [ subject_symbol ] [ 'value' ] = self . details [ 'value' ]
# If the Holder owns units of this subject then the average value
# paid / received for the subject may need to be updated with
# this occurrence details
# If the occurrence have the same sign as the quantity in the Holder
# state , a new average value needs to be calculated for the subject
elif same_sign ( holder . state [ subject_symbol ] [ 'quantity' ] , self . details [ 'quantity' ] ) :
holder . state [ subject_symbol ] [ 'value' ] = average_price ( holder . state [ subject_symbol ] [ 'quantity' ] , holder . state [ subject_symbol ] [ 'value' ] , self . details [ 'quantity' ] , self . details [ 'value' ] )
# If the occurrence does not have the same sign of the quantity in the
# Holder state , then do other stuff .
# A trade app would normally implement some sort of profit / loss logic
# here .
# This sample implementation only checks if the average value
# of the subject needs to be updated and then update it as needed .
else :
if same_sign ( self . details [ 'quantity' ] , holder . state [ subject_symbol ] [ 'quantity' ] + self . details [ 'quantity' ] ) :
holder . state [ subject_symbol ] [ 'value' ] = self . details [ 'value' ]
# Update the quantity of the subject in the Holder ' s posession
holder . state [ subject_symbol ] [ 'quantity' ] += self . details [ 'quantity' ]
# If the Holder don ' t have a state with this occurrence ' s Subject ,
# then register this occurrence as the first state of the Subject
# in the Holder ' s possession
else :
holder . state [ subject_symbol ] = { 'quantity' : self . details [ 'quantity' ] , 'value' : self . details [ 'value' ] }
# If the Holder knows about this Subject but don ' t have any unit
# of it , the paid value of the subject in the Holder state should
# be zero .
if not holder . state [ subject_symbol ] [ 'quantity' ] :
holder . state [ subject_symbol ] [ 'value' ] = 0
|
def make_strain_from_inj_object ( self , inj , delta_t , detector_name , f_lower = None , distance_scale = 1 ) :
"""Make a h ( t ) strain time - series from an injection object .
Parameters
inj : injection object
The injection object to turn into a strain h ( t ) . Can be any
object which has waveform parameters as attributes , such as an
element in a ` ` WaveformArray ` ` .
delta _ t : float
Sample rate to make injection at .
detector _ name : string
Name of the detector used for projecting injections .
f _ lower : { None , float } , optional
Low - frequency cutoff for injected signals . If None , use value
provided by each injection .
distance _ scale : { 1 , float } , optional
Factor to scale the distance of an injection with . The default is
no scaling .
Returns
signal : float
h ( t ) corresponding to the injection ."""
|
detector = Detector ( detector_name )
if f_lower is None :
f_l = inj . f_lower
else :
f_l = f_lower
# compute the waveform time series
hp , hc = get_td_waveform ( inj , delta_t = delta_t , f_lower = f_l , ** self . extra_args )
hp /= distance_scale
hc /= distance_scale
hp . _epoch += inj . tc
hc . _epoch += inj . tc
# taper the polarizations
try :
hp_tapered = wfutils . taper_timeseries ( hp , inj . taper )
hc_tapered = wfutils . taper_timeseries ( hc , inj . taper )
except AttributeError :
hp_tapered = hp
hc_tapered = hc
# compute the detector response and add it to the strain
signal = detector . project_wave ( hp_tapered , hc_tapered , inj . ra , inj . dec , inj . polarization )
return signal
|
def classes ( self , name = None , function = None , header_dir = None , header_file = None , recursive = None , allow_empty = None ) :
"""returns a set of class declarations , that are matched defined
criteria"""
|
return ( self . _find_multiple ( self . _impl_matchers [ scopedef_t . class_ ] , name = name , function = function , decl_type = self . _impl_decl_types [ scopedef_t . class_ ] , header_dir = header_dir , header_file = header_file , recursive = recursive , allow_empty = allow_empty ) )
|
def find_hass_config ( ) :
"""Try to find HASS config ."""
|
if "HASSIO_TOKEN" in os . environ :
return "/config"
config_dir = default_hass_config_dir ( )
if os . path . isdir ( config_dir ) :
return config_dir
raise ValueError ( "Unable to automatically find the location of Home Assistant " "config. Please pass it in." )
|
def to_json ( self ) :
"""Serialize to JSON , which can be returned e . g . via RPC"""
|
ret = { 'address' : self . address , 'domain' : self . domain , 'block_number' : self . block_height , 'sequence' : self . n , 'txid' : self . txid , 'value_hash' : get_zonefile_data_hash ( self . zonefile_str ) , 'zonefile' : base64 . b64encode ( self . zonefile_str ) , 'name' : self . get_fqn ( ) , }
if self . pending is not None :
ret [ 'pending' ] = self . pending
if self . resolver is not None :
ret [ 'resolver' ] = self . resolver
return ret
|
def add_company_quarter ( self , company_name , quarter_name , dt , calendar_id = 'notices' ) :
'''Adds a company _ name quarter event to the calendar . dt should be a date object . Returns True if the event was added .'''
|
assert ( calendar_id in self . configured_calendar_ids . keys ( ) )
calendarId = self . configured_calendar_ids [ calendar_id ]
quarter_name = quarter_name . title ( )
quarter_numbers = { 'Spring' : 1 , 'Summer' : 2 , 'Fall' : 3 , 'Winter' : 4 }
assert ( quarter_name in quarter_numbers . keys ( ) )
start_time = datetime ( year = dt . year , month = dt . month , day = dt . day , hour = 0 , minute = 0 , second = 0 , tzinfo = self . timezone ) + timedelta ( days = - 1 )
end_time = start_time + timedelta ( days = 3 , seconds = - 1 )
summary = '%s %s Quarter begins' % ( company_name , quarter_name )
# Do not add the quarter multiple times
events = self . get_events ( start_time . isoformat ( ) , end_time . isoformat ( ) , ignore_cancelled = True )
for event in events :
if event . summary . find ( summary ) != - 1 :
return False
event_body = { 'summary' : summary , 'description' : summary , 'start' : { 'date' : dt . isoformat ( ) , 'timeZone' : self . timezone_string } , 'end' : { 'date' : dt . isoformat ( ) , 'timeZone' : self . timezone_string } , 'status' : 'confirmed' , 'gadget' : { 'display' : 'icon' , 'iconLink' : 'https://guybrush.ucsf.edu/images/Q%d_32.png' % quarter_numbers [ quarter_name ] , 'title' : summary , } , 'extendedProperties' : { 'shared' : { 'event_type' : '%s quarter' % company_name , 'quarter_name' : quarter_name } } }
colortext . warning ( '\n%s\n' % pprint . pformat ( event_body ) )
created_event = self . service . events ( ) . insert ( calendarId = self . configured_calendar_ids [ calendar_id ] , body = event_body ) . execute ( )
return True
|
def _qmed_from_pot_records ( self ) :
"""Return QMED estimate based on peaks - over - threshold ( POT ) records .
Methodology source : FEH , Vol . 3 , pp . 77-78
: return : QMED in m3 / s
: rtype : float"""
|
pot_dataset = self . catchment . pot_dataset
if not pot_dataset :
raise InsufficientDataError ( "POT dataset must be set for catchment {} to estimate QMED from POT data." . format ( self . catchment . id ) )
complete_year_records , length = self . _complete_pot_years ( pot_dataset )
if length < 1 :
raise InsufficientDataError ( "Insufficient POT flow records available for catchment {}." . format ( self . catchment . id ) )
position = 0.790715789 * length + 0.539684211
i = floor ( position )
w = 1 + i - position
# This is equivalent to table 12.1!
flows = [ record . flow for record in complete_year_records ]
flows . sort ( reverse = True )
return w * flows [ i - 1 ] + ( 1 - w ) * flows [ i ]
|
def update ( self , name = None , metadata = None ) :
"""Updates this webhook . One or more of the parameters may be specified ."""
|
return self . policy . update_webhook ( self , name = name , metadata = metadata )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.