signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def expand ( self , nmax = None , grid = 'DH2' , zeros = None ) :
"""Expand the function on a grid using the first n Slepian coefficients .
Usage
f = x . expand ( [ nmax , grid , zeros ] )
Returns
f : SHGrid class instance
Parameters
nmax : int , optional , default = x . nmax
The number of expansion coefficients to use when calculating the
spherical harmonic coefficients .
grid : str , optional , default = ' DH2'
' DH ' or ' DH1 ' for an equisampled lat / lon grid with nlat = nlon , ' DH2'
for an equidistant lat / lon grid with nlon = 2 * nlat , or ' GLQ ' for a
Gauss - Legendre quadrature grid .
zeros : ndarray , optional , default = None
The cos ( colatitude ) nodes used in the Gauss - Legendre Quadrature
grids ."""
|
if type ( grid ) != str :
raise ValueError ( 'grid must be a string. ' + 'Input type was {:s}' . format ( str ( type ( grid ) ) ) )
if nmax is None :
nmax = self . nmax
if self . galpha . kind == 'cap' :
shcoeffs = _shtools . SlepianCoeffsToSH ( self . falpha , self . galpha . coeffs , nmax )
else :
shcoeffs = _shtools . SlepianCoeffsToSH ( self . falpha , self . galpha . tapers , nmax )
if grid . upper ( ) in ( 'DH' , 'DH1' ) :
gridout = _shtools . MakeGridDH ( shcoeffs , sampling = 1 , norm = 1 , csphase = 1 )
return SHGrid . from_array ( gridout , grid = 'DH' , copy = False )
elif grid . upper ( ) == 'DH2' :
gridout = _shtools . MakeGridDH ( shcoeffs , sampling = 2 , norm = 1 , csphase = 1 )
return SHGrid . from_array ( gridout , grid = 'DH' , copy = False )
elif grid . upper ( ) == 'GLQ' :
if zeros is None :
zeros , weights = _shtools . SHGLQ ( self . galpha . lmax )
gridout = _shtools . MakeGridGLQ ( shcoeffs , zeros , norm = 1 , csphase = 1 )
return SHGrid . from_array ( gridout , grid = 'GLQ' , copy = False )
else :
raise ValueError ( "grid must be 'DH', 'DH1', 'DH2', or 'GLQ'. " + "Input value was {:s}" . format ( repr ( grid ) ) )
|
def _get_peers ( self , child_self , parent_other ) :
'''_ get _ peers
Low - level api : Given a config node , find peers under a parent node .
Parameters
child _ self : ` Element `
An Element node on this side .
parent _ other : ` Element `
An Element node on the other side .
Returns
list
A list of children of parent _ other who are peers of child _ self .'''
|
peers = parent_other . findall ( child_self . tag )
s_node = self . device . get_schema_node ( child_self )
if s_node . get ( 'type' ) == 'leaf-list' :
return list ( filter ( lambda x : child_self . text == x . text , peers ) )
elif s_node . get ( 'type' ) == 'list' :
keys = self . _get_list_keys ( s_node )
return list ( filter ( lambda x : self . _is_peer ( keys , child_self , x ) , peers ) )
else :
return peers
|
def run_marionette_script ( script , chrome = False , async = False , host = 'localhost' , port = 2828 ) :
"""Create a Marionette instance and run the provided script"""
|
m = DeviceHelper . getMarionette ( host , port )
m . start_session ( )
if chrome :
m . set_context ( marionette . Marionette . CONTEXT_CHROME )
if not async :
result = m . execute_script ( script )
else :
result = m . execute_async_script ( script )
m . delete_session ( )
return result
|
def GroupsSensorsDelete ( self , group_id , sensor_id ) :
"""Stop sharing a sensor within a group
@ param group _ id ( int ) - Id of the group to stop sharing the sensor with
@ param sensor _ id ( int ) - Id of the sensor to stop sharing
@ return ( bool ) - Boolean indicating whether GroupsSensorsDelete was successful"""
|
if self . __SenseApiCall__ ( "/groups/{0}/sensors/{1}.json" . format ( group_id , sensor_id ) , "DELETE" ) :
return True
else :
self . __error__ = "api call unsuccessful"
return False
|
def _executor ( self , jobGraph , stats , fileStore ) :
"""This is the core wrapping method for running the job within a worker . It sets up the stats
and logging before yielding . After completion of the body , the function will finish up the
stats and logging , and starts the async update process for the job ."""
|
if stats is not None :
startTime = time . time ( )
startClock = getTotalCpuTime ( )
baseDir = os . getcwd ( )
yield
# If the job is not a checkpoint job , add the promise files to delete
# to the list of jobStoreFileIDs to delete
if not self . checkpoint :
for jobStoreFileID in Promise . filesToDelete :
fileStore . deleteGlobalFile ( jobStoreFileID )
else : # Else copy them to the job wrapper to delete later
jobGraph . checkpointFilesToDelete = list ( Promise . filesToDelete )
Promise . filesToDelete . clear ( )
# Now indicate the asynchronous update of the job can happen
fileStore . _updateJobWhenDone ( )
# Change dir back to cwd dir , if changed by job ( this is a safety issue )
if os . getcwd ( ) != baseDir :
os . chdir ( baseDir )
# Finish up the stats
if stats is not None :
totalCpuTime , totalMemoryUsage = getTotalCpuTimeAndMemoryUsage ( )
stats . jobs . append ( Expando ( time = str ( time . time ( ) - startTime ) , clock = str ( totalCpuTime - startClock ) , class_name = self . _jobName ( ) , memory = str ( totalMemoryUsage ) ) )
|
def get_client ( self , service , region , public = True , cached = True ) :
"""Returns the client object for the specified service and region .
By default the public endpoint is used . If you wish to work with a
services internal endpoints , specify ` public = False ` .
By default , if a client has already been created for the given service ,
region , and public values , that will be returned . To force a new client
to be created , pass ' cached = False ' ."""
|
client_class = None
# Cloud Networks currently uses nova - networks , so it doesn ' t appear as
# a separate entry in the service catalog . This hack will allow context
# objects to continue to work with Rackspace Cloud Networks . When the
# Neutron service is implemented , this hack will have to be removed .
if service in ( "compute:networks" , "networks" , "network" , "cloudnetworks" , "cloud_networks" ) :
service = "compute"
client_class = CloudNetworkClient
return super ( RaxIdentity , self ) . get_client ( service , region , public = public , cached = cached , client_class = client_class )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'environment_id' ) and self . environment_id is not None :
_dict [ 'environment_id' ] = self . environment_id
if hasattr ( self , 'customer_id' ) and self . customer_id is not None :
_dict [ 'customer_id' ] = self . customer_id
if hasattr ( self , 'document_type' ) and self . document_type is not None :
_dict [ 'document_type' ] = self . document_type
if hasattr ( self , 'natural_language_query' ) and self . natural_language_query is not None :
_dict [ 'natural_language_query' ] = self . natural_language_query
if hasattr ( self , 'document_results' ) and self . document_results is not None :
_dict [ 'document_results' ] = self . document_results . _to_dict ( )
if hasattr ( self , 'created_timestamp' ) and self . created_timestamp is not None :
_dict [ 'created_timestamp' ] = datetime_to_string ( self . created_timestamp )
if hasattr ( self , 'client_timestamp' ) and self . client_timestamp is not None :
_dict [ 'client_timestamp' ] = datetime_to_string ( self . client_timestamp )
if hasattr ( self , 'query_id' ) and self . query_id is not None :
_dict [ 'query_id' ] = self . query_id
if hasattr ( self , 'session_token' ) and self . session_token is not None :
_dict [ 'session_token' ] = self . session_token
if hasattr ( self , 'collection_id' ) and self . collection_id is not None :
_dict [ 'collection_id' ] = self . collection_id
if hasattr ( self , 'display_rank' ) and self . display_rank is not None :
_dict [ 'display_rank' ] = self . display_rank
if hasattr ( self , 'document_id' ) and self . document_id is not None :
_dict [ 'document_id' ] = self . document_id
if hasattr ( self , 'event_type' ) and self . event_type is not None :
_dict [ 'event_type' ] = self . event_type
if hasattr ( self , 'result_type' ) and self . result_type is not None :
_dict [ 'result_type' ] = self . result_type
return _dict
|
def update_from ( self , source , incr = 1 , force = False ) :
"""Args :
source ( : py : class : ` SubCounter ` ) : : py : class : ` SubCounter ` or : py : class : ` Counter `
to increment from
incr ( int ) : Amount to increment ` ` count ` ` ( Default : 1)
force ( bool ) : Force refresh even if ` ` min _ delta ` ` has not been reached
Move a value to this counter from another counter .
` ` source ` ` must be the parent : py : class : ` Counter ` instance or a : py : class : ` SubCounter ` with
the same parent"""
|
# Make sure source is a parent or peer
if source is self . parent or getattr ( source , 'parent' , None ) is self . parent :
if self . count + incr < 0 or source . count - incr < 0 :
raise ValueError ( 'Invalid increment: %s' % incr )
if source is self . parent :
if self . parent . count - self . parent . subcount - incr < 0 :
raise ValueError ( 'Invalid increment: %s' % incr )
else :
source . count -= incr
self . count += incr
self . parent . update ( 0 , force )
else :
raise ValueError ( 'source must be parent or peer' )
|
def add_to_queue ( self , series ) :
"""Add a series to the queue
@ param crunchyroll . models . Series series
@ return bool"""
|
result = self . _android_api . add_to_queue ( series_id = series . series_id )
return result
|
def _make_netmask ( cls , arg ) :
"""Make a ( netmask , prefix _ len ) tuple from the given argument .
Argument can be :
- an integer ( the prefix length )
- a string representing the prefix length ( e . g . " 24 " )
- a string representing the prefix netmask ( e . g . " 255.255.255.0 " )"""
|
if arg not in cls . _netmask_cache :
if isinstance ( arg , _compat_int_types ) :
prefixlen = arg
else :
try : # Check for a netmask in prefix length form
prefixlen = cls . _prefix_from_prefix_string ( arg )
except NetmaskValueError : # Check for a netmask or hostmask in dotted - quad form .
# This may raise NetmaskValueError .
prefixlen = cls . _prefix_from_ip_string ( arg )
netmask = IPv4Address ( cls . _ip_int_from_prefix ( prefixlen ) )
cls . _netmask_cache [ arg ] = netmask , prefixlen
return cls . _netmask_cache [ arg ]
|
def fromJSON ( value ) :
"""loads the GP object from a JSON string"""
|
j = json . loads ( value )
v = GPDouble ( )
if "defaultValue" in j :
v . value = j [ 'defaultValue' ]
else :
v . value = j [ 'value' ]
if 'paramName' in j :
v . paramName = j [ 'paramName' ]
elif 'name' in j :
v . paramName = j [ 'name' ]
return v
|
def search_subscriptions ( self , ** kwargs ) :
"""Search for all subscriptions by parameters"""
|
params = [ ( key , kwargs [ key ] ) for key in sorted ( kwargs . keys ( ) ) ]
url = "/notification/v1/subscription?{}" . format ( urlencode ( params , doseq = True ) )
response = NWS_DAO ( ) . getURL ( url , self . _read_headers )
if response . status != 200 :
raise DataFailureException ( url , response . status , response . data )
data = json . loads ( response . data )
subscriptions = [ ]
for datum in data . get ( "Subscriptions" , [ ] ) :
subscriptions . append ( self . _subscription_from_json ( datum ) )
return subscriptions
|
def get_swagger_versions ( settings ) :
"""Validates and returns the versions of the Swagger Spec that this pyramid
application supports .
: type settings : dict
: return : list of strings . eg [ ' 1.2 ' , ' 2.0 ' ]
: raises : ValueError when an unsupported Swagger version is encountered ."""
|
swagger_versions = set ( aslist ( settings . get ( 'pyramid_swagger.swagger_versions' , DEFAULT_SWAGGER_VERSIONS ) ) )
if len ( swagger_versions ) == 0 :
raise ValueError ( 'pyramid_swagger.swagger_versions is empty' )
for swagger_version in swagger_versions :
if swagger_version not in SUPPORTED_SWAGGER_VERSIONS :
raise ValueError ( 'Swagger version {0} is not supported.' . format ( swagger_version ) )
return swagger_versions
|
def managed ( name , probes , defaults = None ) :
'''Ensure the networks device is configured as specified in the state SLS file .
Probes not specified will be removed , while probes not confiured as expected will trigger config updates .
: param probes : Defines the probes as expected to be configured on the
device . In order to ease the configuration and avoid repeating the
same parameters for each probe , the next parameter ( defaults ) can be
used , providing common characteristics .
: param defaults : Specifies common parameters for the probes .
SLS Example :
. . code - block : : yaml
rpmprobes :
probes . managed :
- probes :
probe _ name1:
probe1 _ test1:
source : 192.168.0.2
target : 192.168.0.1
probe1 _ test2:
target : 172.17.17.1
probe1 _ test3:
target : 8.8.8.8
probe _ type : http - ping
probe _ name2:
probe2 _ test1:
test _ interval : 100
- defaults :
target : 10.10.10.10
probe _ count : 15
test _ interval : 3
probe _ type : icmp - ping
In the probes configuration , the only mandatory attribute is * target *
( specified either in probes configuration , either in the defaults
dictionary ) . All the other parameters will use the operating system
defaults , if not provided :
- ` ` source ` ` - Specifies the source IP Address to be used during the tests . If
not specified will use the IP Address of the logical interface loopback0.
- ` ` target ` ` - Destination IP Address .
- ` ` probe _ count ` ` - Total number of probes per test ( 1 . . 15 ) . System
defaults : 1 on both JunOS & Cisco .
- ` ` probe _ interval ` ` - Delay between tests ( 0 . . 86400 seconds ) . System
defaults : 3 on JunOS , 5 on Cisco .
- ` ` probe _ type ` ` - Probe request type . Available options :
- icmp - ping
- tcp - ping
- udp - ping
Using the example configuration above , after running the state , on the device will be configured 4 probes ,
with the following properties :
. . code - block : : yaml
probe _ name1:
probe1 _ test1:
source : 192.168.0.2
target : 192.168.0.1
probe _ count : 15
test _ interval : 3
probe _ type : icmp - ping
probe1 _ test2:
target : 172.17.17.1
probe _ count : 15
test _ interval : 3
probe _ type : icmp - ping
probe1 _ test3:
target : 8.8.8.8
probe _ count : 15
test _ interval : 3
probe _ type : http - ping
probe _ name2:
probe2 _ test1:
target : 10.10.10.10
probe _ count : 15
test _ interval : 3
probe _ type : icmp - ping'''
|
ret = _default_ret ( name )
result = True
comment = ''
rpm_probes_config = _retrieve_rpm_probes ( )
# retrieves the RPM config from the device
if not rpm_probes_config . get ( 'result' ) :
ret . update ( { 'result' : False , 'comment' : 'Cannot retrieve configurtion of the probes from the device: {reason}' . format ( reason = rpm_probes_config . get ( 'comment' ) ) } )
return ret
# build expect probes config dictionary
# using default values
configured_probes = rpm_probes_config . get ( 'out' , { } )
if not isinstance ( defaults , dict ) :
defaults = { }
expected_probes = _expand_probes ( probes , defaults )
_clean_probes ( configured_probes )
# let ' s remove the unnecessary data from the configured probes
_clean_probes ( expected_probes )
# also from the expected data
# - - - - - Compare expected config with the existing config - - - - - >
diff = _compare_probes ( configured_probes , expected_probes )
# compute the diff
# < - - - - Compare expected config with the existing config - - - - -
# - - - - - Call set _ probes and delete _ probes as needed - - - - - >
add_probes = diff . get ( 'add' )
update_probes = diff . get ( 'update' )
remove_probes = diff . get ( 'remove' )
changes = { 'added' : _ordered_dict_to_dict ( add_probes ) , 'updated' : _ordered_dict_to_dict ( update_probes ) , 'removed' : _ordered_dict_to_dict ( remove_probes ) }
ret . update ( { 'changes' : changes } )
if __opts__ [ 'test' ] is True :
ret . update ( { 'comment' : 'Testing mode: configuration was not changed!' , 'result' : None } )
return ret
config_change_expected = False
# to check if something changed and a commit would be needed
if add_probes :
added = _set_rpm_probes ( add_probes )
if added . get ( 'result' ) :
config_change_expected = True
else :
result = False
comment += 'Cannot define new probes: {reason}\n' . format ( reason = added . get ( 'comment' ) )
if update_probes :
updated = _set_rpm_probes ( update_probes )
if updated . get ( 'result' ) :
config_change_expected = True
else :
result = False
comment += 'Cannot update probes: {reason}\n' . format ( reason = updated . get ( 'comment' ) )
if remove_probes :
removed = _delete_rpm_probes ( remove_probes )
if removed . get ( 'result' ) :
config_change_expected = True
else :
result = False
comment += 'Cannot remove probes! {reason}\n' . format ( reason = removed . get ( 'comment' ) )
# < - - - - Call set _ probes and delete _ probes as needed - - - - -
# - - - - - Try to save changes - - - - - >
if config_change_expected : # if any changes expected , try to commit
result , comment = __salt__ [ 'net.config_control' ] ( )
# < - - - - Try to save changes - - - - -
# - - - - - Try to schedule the probes - - - - - >
add_scheduled = _schedule_probes ( add_probes )
if add_scheduled . get ( 'result' ) : # if able to load the template to schedule the probes , try to commit the scheduling data
# ( yes , a second commit is needed )
# on devices such as Juniper , RPM probes do not need to be scheduled
# therefore the template is empty and won ' t try to commit empty changes
result , comment = __salt__ [ 'net.config_control' ] ( )
if config_change_expected :
if result and comment == '' : # if any changes and was able to apply them
comment = 'Probes updated successfully!'
ret . update ( { 'result' : result , 'comment' : comment } )
return ret
|
def html_listify ( tree , root_xl_element , extensions , list_type = 'ol' ) :
"""Convert a node tree into an xhtml nested list - of - lists .
This will create ' li ' elements under the root _ xl _ element ,
additional sublists of the type passed as list _ type . The contents
of each li depends on the extensions dictonary : the keys of this
dictionary are the ids of tree elements that are repesented by files
in the epub , with associated filename extensions as the value . Those
nodes will be rendered as links to the reassembled filename : i . e .
id = ' abc - 2345-54e4 ' { ' abc - 2345-54e4 ' : ' xhtml ' } - > abc - 2345-54e4 . xhtml
Other nodes will render as spans . If the node has id or short id values ,
the associated li will be populated with cnx - archive - uri and
cnx - archive - shortid attributes , respectively"""
|
for node in tree :
li_elm = etree . SubElement ( root_xl_element , 'li' )
if node [ 'id' ] not in extensions : # no extension , no associated file
span_elm = lxml . html . fragment_fromstring ( node [ 'title' ] , create_parent = 'span' )
li_elm . append ( span_elm )
else :
a_elm = lxml . html . fragment_fromstring ( node [ 'title' ] , create_parent = 'a' )
a_elm . set ( 'href' , '' . join ( [ node [ 'id' ] , extensions [ node [ 'id' ] ] ] ) )
li_elm . append ( a_elm )
if node [ 'id' ] is not None and node [ 'id' ] != 'subcol' :
li_elm . set ( 'cnx-archive-uri' , node [ 'id' ] )
if node [ 'shortId' ] is not None :
li_elm . set ( 'cnx-archive-shortid' , node [ 'shortId' ] )
if 'contents' in node :
elm = etree . SubElement ( li_elm , list_type )
html_listify ( node [ 'contents' ] , elm , extensions )
|
def get_least_distinct_words ( vocab , topic_word_distrib , doc_topic_distrib , doc_lengths , n = None ) :
"""Order the words from ` vocab ` by " distinctiveness score " ( Chuang et al . 2012 ) from least to most distinctive .
Optionally only return the ` n ` least distinctive words .
J . Chuang , C . Manning , J . Heer 2012 : " Termite : Visualization Techniques for Assessing Textual Topic Models " """
|
return _words_by_distinctiveness_score ( vocab , topic_word_distrib , doc_topic_distrib , doc_lengths , n , least_to_most = True )
|
def format_argspec_plus ( fn , grouped = True ) :
"""Returns a dictionary of formatted , introspected function arguments .
A enhanced variant of inspect . formatargspec to support code generation .
fn
An inspectable callable or tuple of inspect getargspec ( ) results .
grouped
Defaults to True ; include ( parens , around , argument ) lists
Returns :
args
Full inspect . formatargspec for fn
self _ arg
The name of the first positional argument , varargs [ 0 ] , or None
if the function defines no positional arguments .
apply _ pos
args , re - written in calling rather than receiving syntax . Arguments are
passed positionally .
apply _ kw
Like apply _ pos , except keyword - ish args are passed as keywords .
Example : :
> > > format _ argspec _ plus ( lambda self , a , b , c = 3 , * * d : 123)
{ ' args ' : ' ( self , a , b , c = 3 , * * d ) ' ,
' self _ arg ' : ' self ' ,
' apply _ kw ' : ' ( self , a , b , c = c , * * d ) ' ,
' apply _ pos ' : ' ( self , a , b , c , * * d ) ' }"""
|
spec = callable ( fn ) and inspect . getargspec ( fn ) or fn
args = inspect . formatargspec ( * spec )
if spec [ 0 ] :
self_arg = spec [ 0 ] [ 0 ]
elif spec [ 1 ] :
self_arg = '%s[0]' % spec [ 1 ]
else :
self_arg = None
apply_pos = inspect . formatargspec ( spec [ 0 ] , spec [ 1 ] , spec [ 2 ] )
defaulted_vals = spec [ 3 ] is not None and spec [ 0 ] [ 0 - len ( spec [ 3 ] ) : ] or ( )
apply_kw = inspect . formatargspec ( spec [ 0 ] , spec [ 1 ] , spec [ 2 ] , defaulted_vals , formatvalue = lambda x : '=' + x )
if grouped :
return dict ( args = args , self_arg = self_arg , apply_pos = apply_pos , apply_kw = apply_kw )
else :
return dict ( args = args [ 1 : - 1 ] , self_arg = self_arg , apply_pos = apply_pos [ 1 : - 1 ] , apply_kw = apply_kw [ 1 : - 1 ] )
|
def get_decoder_self_attention_bias ( length ) :
"""Calculate bias for decoder that maintains model ' s autoregressive property .
Creates a tensor that masks out locations that correspond to illegal
connections , so prediction at position i cannot draw information from future
positions .
Args :
length : int length of sequences in batch .
Returns :
float tensor of shape [ 1 , 1 , length , length ]"""
|
with tf . name_scope ( "decoder_self_attention_bias" ) :
valid_locs = tf . matrix_band_part ( tf . ones ( [ length , length ] ) , - 1 , 0 )
valid_locs = tf . reshape ( valid_locs , [ 1 , 1 , length , length ] )
decoder_bias = _NEG_INF * ( 1.0 - valid_locs )
return decoder_bias
|
def restore_definition ( self , project , definition_id , deleted ) :
"""RestoreDefinition .
Restores a deleted definition
: param str project : Project ID or project name
: param int definition _ id : The identifier of the definition to restore .
: param bool deleted : When false , restores a deleted definition .
: rtype : : class : ` < BuildDefinition > < azure . devops . v5_0 . build . models . BuildDefinition > `"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if definition_id is not None :
route_values [ 'definitionId' ] = self . _serialize . url ( 'definition_id' , definition_id , 'int' )
query_parameters = { }
if deleted is not None :
query_parameters [ 'deleted' ] = self . _serialize . query ( 'deleted' , deleted , 'bool' )
response = self . _send ( http_method = 'PATCH' , location_id = 'dbeaf647-6167-421a-bda9-c9327b25e2e6' , version = '5.0' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'BuildDefinition' , response )
|
def translate_x ( self , d ) :
"""Translate mesh for x - direction
: param float d : Amount to translate"""
|
mat = numpy . array ( [ [ 1 , 0 , 0 , 0 ] , [ 0 , 1 , 0 , 0 ] , [ 0 , 0 , 1 , 0 ] , [ d , 0 , 0 , 1 ] ] )
self . vectors = self . vectors . dot ( mat )
return self
|
def decrypt ( ciphertext_blob , encryption_context = None , grant_tokens = None , region = None , key = None , keyid = None , profile = None ) :
'''Decrypt ciphertext .
CLI example : :
salt myminion boto _ kms . decrypt encrypted _ ciphertext'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
r = { }
try :
plaintext = conn . decrypt ( ciphertext_blob , encryption_context = encryption_context , grant_tokens = grant_tokens )
r [ 'plaintext' ] = plaintext [ 'Plaintext' ]
except boto . exception . BotoServerError as e :
r [ 'error' ] = __utils__ [ 'boto.get_error' ] ( e )
return r
|
def dividend_receivable ( self ) :
"""[ float ] 投资组合在分红现金收到账面之前的应收分红部分 。 具体细节在分红部分"""
|
return sum ( d [ 'quantity' ] * d [ 'dividend_per_share' ] for d in six . itervalues ( self . _dividend_receivable ) )
|
def batched_expiration_maintenance_dev ( self , elapsed_time ) :
"""Batched version of expiration _ maintenance ( )"""
|
num_iterations = self . num_batched_maintenance ( elapsed_time )
for i in range ( num_iterations ) :
self . expiration_maintenance ( )
|
def find_Note ( data , freq , bits ) :
"""Get the frequencies , feed them to find _ notes and the return the Note
with the highest amplitude ."""
|
data = find_frequencies ( data , freq , bits )
return sorted ( find_notes ( data ) , key = operator . itemgetter ( 1 ) ) [ - 1 ] [ 0 ]
|
def sync ( self , group , name = None , host = None , location = None , move = False , all = False ) :
'''Sync the latest archive to the host on given location .
CLI Example :
. . code - block : : bash
salt ' * ' support . sync group = test
salt ' * ' support . sync group = test name = / tmp / myspecial - 12345-67890 . bz2
salt ' * ' support . sync group = test name = / tmp / myspecial - 12345-67890 . bz2 host = allmystuff . lan
salt ' * ' support . sync group = test name = / tmp / myspecial - 12345-67890 . bz2 host = allmystuff . lan location = / opt /
: param group : name of the local directory to which sync is going to put the result files
: param name : name of the archive . Latest , if not specified .
: param host : name of the destination host for rsync . Default is master , if not specified .
: param location : local destination directory , default temporary if not specified
: param move : move archive file [ s ] . Default is False .
: param all : work with all available archives . Default is False ( i . e . latest available )
: return :'''
|
tfh , tfn = tempfile . mkstemp ( )
processed_archives = [ ]
src_uri = uri = None
last_arc = self . last_archive ( )
if name :
archives = [ name ]
elif all :
archives = self . archives ( )
elif last_arc :
archives = [ last_arc ]
else :
archives = [ ]
for name in archives :
err = None
if not name :
err = 'No support archive has been defined.'
elif not os . path . exists ( name ) :
err = 'Support archive "{}" was not found' . format ( name )
if err is not None :
log . error ( err )
raise salt . exceptions . SaltInvocationError ( err )
if not uri :
src_uri = os . path . dirname ( name )
uri = '{host}:{loc}' . format ( host = host or __opts__ [ 'master' ] , loc = os . path . join ( location or tempfile . gettempdir ( ) , group ) )
os . write ( tfh , salt . utils . stringutils . to_bytes ( os . path . basename ( name ) ) )
os . write ( tfh , salt . utils . stringutils . to_bytes ( os . linesep ) )
processed_archives . append ( name )
log . debug ( 'Syncing %s to %s' , name , uri )
os . close ( tfh )
if not processed_archives :
raise salt . exceptions . SaltInvocationError ( 'No archives found to transfer.' )
ret = __salt__ [ 'rsync.rsync' ] ( src = src_uri , dst = uri , additional_opts = [ '--stats' , '--files-from={}' . format ( tfn ) ] )
ret [ 'files' ] = { }
for name in processed_archives :
if move :
salt . utils . dictupdate . update ( ret , self . delete_archives ( name ) )
log . debug ( 'Deleting %s' , name )
ret [ 'files' ] [ name ] = 'moved'
else :
ret [ 'files' ] [ name ] = 'copied'
try :
os . unlink ( tfn )
except ( OSError , IOError ) as err :
log . error ( 'Cannot remove temporary rsync file %s: %s' , tfn , err )
return self . format_sync_stats ( ret )
|
def _parse_myinfo ( client , command , actor , args ) :
"""Parse MYINFO and update the Host object ."""
|
_ , server , version , usermodes , channelmodes = args . split ( None , 5 ) [ : 5 ]
s = client . server
s . host = server
s . version = version
s . user_modes = set ( usermodes )
s . channel_modes = set ( channelmodes )
|
def init ( opts ) :
'''Open the connection to the Nexsu switch over the NX - API .
As the communication is HTTP based , there is no connection to maintain ,
however , in order to test the connectivity and make sure we are able to
bring up this Minion , we are executing a very simple command ( ` ` show clock ` ` )
which doesn ' t come with much overhead and it ' s sufficient to confirm we are
indeed able to connect to the NX - API endpoint as configured .'''
|
proxy_dict = opts . get ( 'proxy' , { } )
conn_args = copy . deepcopy ( proxy_dict )
conn_args . pop ( 'proxytype' , None )
opts [ 'multiprocessing' ] = conn_args . pop ( 'multiprocessing' , True )
# This is not a SSH - based proxy , so it should be safe to enable
# multiprocessing .
try :
rpc_reply = __utils__ [ 'nxos_api.rpc' ] ( 'show clock' , ** conn_args )
# Execute a very simple command to confirm we are able to connect properly
nxos_device [ 'conn_args' ] = conn_args
nxos_device [ 'initialized' ] = True
nxos_device [ 'up' ] = True
except SaltException :
log . error ( 'Unable to connect to %s' , conn_args [ 'host' ] , exc_info = True )
raise
return True
|
def _detect_encoding ( self , source_file ) :
"""Detect encoding ."""
|
encoding = self . _guess ( source_file )
# If we didn ' t explicitly detect an encoding , assume default .
if encoding is None :
encoding = self . default_encoding
return encoding
|
def wait_for ( self , condition , timeout = None , interval = 0.1 , errmsg = None ) :
'''Wait for a condition to be True .
Wait for condition , a callable , to return True . If timeout is
nonzero , raise a TimeoutError ( errmsg ) if the condition is not
True after timeout seconds . Check the condition everal
interval seconds .'''
|
t0 = time . time ( )
while not condition ( ) :
t1 = time . time ( )
if timeout and ( t1 - t0 ) >= timeout :
raise TimeoutError ( errmsg )
time . sleep ( interval )
|
def set_camera_enabled ( self , camera_id , is_enabled ) :
"""Turn Arlo camera On / Off .
: param mode : True , False"""
|
self . publish ( action = 'set' , resource = 'privacy' , camera_id = camera_id , mode = is_enabled , publish_response = True )
self . update ( )
|
def clear ( self ) :
"""Delete Mentions of each class in the extractor from the given split ."""
|
# Create set of candidate _ subclasses associated with each mention _ subclass
cand_subclasses = set ( )
for mentions , tablename in [ ( _ [ 1 ] [ 0 ] , _ [ 1 ] [ 1 ] ) for _ in candidate_subclasses . values ( ) ] :
for mention in mentions :
if mention in self . mention_classes :
cand_subclasses . add ( tablename )
# First , clear all the Mentions . This will cascade and remove the
# mention _ subclasses and corresponding candidate _ subclasses .
for mention_class in self . mention_classes :
logger . info ( f"Clearing table: {mention_class.__tablename__}" )
self . session . query ( Mention ) . filter_by ( type = mention_class . __tablename__ ) . delete ( synchronize_session = "fetch" )
# Next , clear the Candidates . This is done manually because we have
# no cascading relationship from candidate _ subclass to Candidate .
for cand_subclass in cand_subclasses :
logger . info ( f"Cascading to clear table: {cand_subclass}" )
self . session . query ( Candidate ) . filter_by ( type = cand_subclass ) . delete ( synchronize_session = "fetch" )
|
def put_function ( self , fn ) :
""": param fn : Function
: type fn : LambdaFunction"""
|
if fn . function_name in self . _functions :
self . _functions [ fn . function_name ] [ 'latest' ] = fn
else :
self . _functions [ fn . function_name ] = { 'latest' : fn , 'versions' : [ ] , 'alias' : weakref . WeakValueDictionary ( ) }
self . _arns [ fn . function_arn ] = fn
|
def query_module_funcs ( self , module ) :
"""Query the functions in the specified module ."""
|
funcs = self . session . query ( Export ) . filter_by ( module = module ) . all ( )
return funcs
|
async def main ( ) :
"""Get the data from a * hole instance ."""
|
async with aiohttp . ClientSession ( ) as session :
data = Hole ( '192.168.0.215' , loop , session )
await data . get_data ( )
# Get the raw data
print ( json . dumps ( data . data , indent = 4 , sort_keys = True ) )
print ( "Status:" , data . status )
print ( "Domains being blocked:" , data . domains_being_blocked )
|
def _extract_params_from_i0 ( only_variable_parameters , parameters_with_variability , initial_conditions_with_variability ) :
"""Used within the distance / cost function to create the current kinetic parameter and initial condition vectors
to be used during that interaction , using current values in i0.
This function takes i0 and complements it with additional information from variables that we do not want to vary
so the simulation function could be run and values compared .
: param only _ variable _ parameters : ` i0 ` list returned from ` make _ i0 `
: param param : list of starting values for kinetic parameters
: param vary : list to identify which values in ` param ` to vary during inference ( 0 = fixed , 1 = optimise )
: param initcond : list of starting values ( i . e . at t0 ) for moments
: param varyic : list to identify which values in ` initcond ` to vary ( 0 = fixed , 1 = optimise )
: return :"""
|
complete_params = [ ]
counter = 0
for param , is_variable in parameters_with_variability : # If param not variable , add it from param list
if not is_variable :
complete_params . append ( param )
else : # Otherwise add it from variable parameters list
complete_params . append ( only_variable_parameters [ counter ] )
counter += 1
complete_initial_conditions = [ ]
for initial_condition , is_variable in initial_conditions_with_variability :
if not is_variable :
complete_initial_conditions . append ( initial_condition )
else :
complete_initial_conditions . append ( only_variable_parameters [ counter ] )
counter += 1
return complete_params , complete_initial_conditions
|
def atlasdb_get_zonefile_bits ( zonefile_hash , con = None , path = None ) :
"""What bit ( s ) in a zonefile inventory does a zonefile hash correspond to ?
Return their indexes in the bit field ."""
|
with AtlasDBOpen ( con = con , path = path ) as dbcon :
sql = "SELECT inv_index FROM zonefiles WHERE zonefile_hash = ?;"
args = ( zonefile_hash , )
cur = dbcon . cursor ( )
res = atlasdb_query_execute ( cur , sql , args )
# NOTE : zero - indexed
ret = [ ]
for r in res :
ret . append ( r [ 'inv_index' ] - 1 )
return ret
|
def OpenServerEndpoint ( self , path , verify_cb = lambda x : True , data = None , params = None , headers = None , method = "GET" , timeout = None ) :
"""Search through all the base URLs to connect to one that works .
This is a thin wrapper around requests . request ( ) so most parameters are
documented there .
Args :
path : The URL path to access in this endpoint .
verify _ cb : A callback which should return True if the response is
reasonable . This is used to detect if we are able to talk to the correct
endpoint . If not we try a different endpoint / proxy combination .
data : Parameters to send in POST bodies ( See Requests documentation ) .
params : Parameters to send in GET URLs ( See Requests documentation ) .
headers : Additional headers ( See Requests documentation )
method : The HTTP method to use . If not set we select one automatically .
timeout : See Requests documentation .
Returns :
an HTTPObject ( ) instance with the correct error code set ."""
|
tries = 0
last_error = HTTPObject ( code = 404 )
while tries < len ( self . base_urls ) :
base_url_index = self . last_base_url_index % len ( self . base_urls )
active_base_url = self . base_urls [ base_url_index ]
result = self . OpenURL ( self . _ConcatenateURL ( active_base_url , path ) , data = data , params = params , headers = headers , method = method , timeout = timeout , verify_cb = verify_cb , )
if not result . Success ( ) :
tries += 1
self . last_base_url_index += 1
last_error = result
continue
# The URL worked - we record that .
self . active_base_url = active_base_url
return result
# No connection is possible at all .
logging . info ( "Could not connect to GRR servers %s, directly or through " "these proxies: %s." , self . base_urls , self . proxies )
return last_error
|
def read ( value , split = False ) :
'''Get the value of an option interpreting as a file implicitly or
explicitly and falling back to the value if not explicitly specified .
If the value is ' @ name ' , then a file must exist with name and the returned
value will be the contents of that file . If the value is ' @ - ' or ' - ' , then
stdin will be read and returned as the value . Finally , if a file exists
with the provided value , that file will be read . Otherwise , the value
will be returned .'''
|
v = str ( value )
retval = value
if v [ 0 ] == '@' or v == '-' :
fname = '-' if v == '-' else v [ 1 : ]
try :
with click . open_file ( fname ) as fp :
if not fp . isatty ( ) :
retval = fp . read ( )
else :
retval = None
# @ todo better to leave as IOError and let caller handle it
# to better report in context of call ( e . g . the option / type )
except IOError as ioe : # if explicit and problems , raise
if v [ 0 ] == '@' :
raise click . ClickException ( str ( ioe ) )
elif path . exists ( v ) and path . isfile ( v ) :
with click . open_file ( v ) as fp :
retval = fp . read ( )
if retval and split and type ( retval ) != tuple :
retval = _split ( retval . strip ( ) )
return retval
|
def name ( self ) :
"""Table name used in requests .
For example :
. . literalinclude : : snippets _ table . py
: start - after : [ START bigtable _ table _ name ]
: end - before : [ END bigtable _ table _ name ]
. . note : :
This property will not change if ` ` table _ id ` ` does not , but the
return value is not cached .
The table name is of the form
` ` " projects / . . / instances / . . / tables / { table _ id } " ` `
: rtype : str
: returns : The table name ."""
|
project = self . _instance . _client . project
instance_id = self . _instance . instance_id
table_client = self . _instance . _client . table_data_client
return table_client . table_path ( project = project , instance = instance_id , table = self . table_id )
|
def to_data ( interval , conv = None , pinf = float ( 'inf' ) , ninf = float ( '-inf' ) ) :
"""Export given interval ( or atomic interval ) to a list of 4 - uples ( left , lower ,
upper , right ) .
: param interval : an Interval or AtomicInterval instance .
: param conv : function that convert bounds to " lower " and " upper " , default to identity .
: param pinf : value used to encode positive infinity .
: param ninf : value used to encode negative infinity .
: return : a list of 4 - uples ( left , lower , upper , right )"""
|
interval = Interval ( interval ) if isinstance ( interval , AtomicInterval ) else interval
conv = ( lambda v : v ) if conv is None else conv
data = [ ]
def _convert ( bound ) :
if bound == inf :
return pinf
elif bound == - inf :
return ninf
else :
return conv ( bound )
for item in interval :
data . append ( ( item . left , _convert ( item . lower ) , _convert ( item . upper ) , item . right ) )
return data
|
def events_for_onchain_secretreveal_if_dangerzone ( channelmap : ChannelMap , secrethash : SecretHash , transfers_pair : List [ MediationPairState ] , block_number : BlockNumber , block_hash : BlockHash , ) -> List [ Event ] :
"""Reveal the secret on - chain if the lock enters the unsafe region and the
secret is not yet on - chain ."""
|
events : List [ Event ] = list ( )
all_payer_channels = [ ]
for pair in transfers_pair :
channel_state = get_payer_channel ( channelmap , pair )
if channel_state :
all_payer_channels . append ( channel_state )
transaction_sent = has_secret_registration_started ( all_payer_channels , transfers_pair , secrethash , )
# Only consider the transfers which have a pair . This means if we have a
# waiting transfer and for some reason the node knows the secret , it will
# not try to register it . Otherwise it would be possible for an attacker to
# reveal the secret late , just to force the node to send an unecessary
# transaction .
for pair in get_pending_transfer_pairs ( transfers_pair ) :
payer_channel = get_payer_channel ( channelmap , pair )
if not payer_channel :
continue
lock = pair . payer_transfer . lock
safe_to_wait , _ = is_safe_to_wait ( lock . expiration , payer_channel . reveal_timeout , block_number , )
secret_known = channel . is_secret_known ( payer_channel . partner_state , pair . payer_transfer . lock . secrethash , )
if not safe_to_wait and secret_known :
pair . payer_state = 'payer_waiting_secret_reveal'
if not transaction_sent :
secret = channel . get_secret ( payer_channel . partner_state , lock . secrethash , )
assert secret , 'the secret should be known at this point'
reveal_events = secret_registry . events_for_onchain_secretreveal ( channel_state = payer_channel , secret = secret , expiration = lock . expiration , block_hash = block_hash , )
events . extend ( reveal_events )
transaction_sent = True
return events
|
def remove ( feature , remove_payload = False , restart = False ) :
r'''Remove an installed feature
. . note : :
Some features require a reboot after installation / uninstallation . If
one of these features are modified , then other features cannot be
installed until the server is restarted . Additionally , some features
take a while to complete installation / uninstallation , so it is a good
idea to use the ` ` - t ` ` option to set a longer timeout .
Args :
feature ( str , list ) :
The name of the feature ( s ) to remove . This can be a single feature ,
a string of features in a comma delimited list ( no spaces ) , or a
list of features .
. . versionadded : : 2018.3.0
Added the ability to pass a list of features to be removed .
remove _ payload ( Optional [ bool ] ) :
True will cause the feature to be removed from the side - by - side
store ( ` ` % SystemDrive % : \ Windows \ WinSxS ` ` ) . Default is False
restart ( Optional [ bool ] ) :
Restarts the computer when uninstall is complete , if required by the
role / feature removed . Default is False
Returns :
dict : A dictionary containing the results of the uninstall
CLI Example :
. . code - block : : bash
salt - t 600 ' * ' win _ servermanager . remove Telnet - Client'''
|
# If it is a list of features , make it a comma delimited string
if isinstance ( feature , list ) :
feature = ',' . join ( feature )
# Use Uninstall - WindowsFeature on Windows 2012 ( osversion 6.2 ) and later
# minions . Default to Remove - WindowsFeature for earlier releases of Windows .
# The newer command makes management tools optional so add them for parity
# with old behavior .
command = 'Remove-WindowsFeature'
management_tools = ''
_remove_payload = ''
if salt . utils . versions . version_cmp ( __grains__ [ 'osversion' ] , '6.2' ) >= 0 :
command = 'Uninstall-WindowsFeature'
management_tools = '-IncludeManagementTools'
# Only available with the ` Uninstall - WindowsFeature ` command
if remove_payload :
_remove_payload = '-Remove'
cmd = '{0} -Name {1} {2} {3} {4} ' '-WarningAction SilentlyContinue' . format ( command , _cmd_quote ( feature ) , management_tools , _remove_payload , '-Restart' if restart else '' )
try :
out = _pshell_json ( cmd )
except CommandExecutionError as exc :
if 'ArgumentNotValid' in exc . message :
raise CommandExecutionError ( 'Invalid Feature Name' , info = exc . info )
raise
# Results are stored in a list of dictionaries in ` FeatureResult `
if out [ 'FeatureResult' ] :
ret = { 'ExitCode' : out [ 'ExitCode' ] , 'RestartNeeded' : False , 'Restarted' : False , 'Features' : { } , 'Success' : out [ 'Success' ] }
for item in out [ 'FeatureResult' ] :
ret [ 'Features' ] [ item [ 'Name' ] ] = { 'DisplayName' : item [ 'DisplayName' ] , 'Message' : item [ 'Message' ] , 'RestartNeeded' : item [ 'RestartNeeded' ] , 'SkipReason' : item [ 'SkipReason' ] , 'Success' : item [ 'Success' ] }
# Only items that installed are in the list of dictionaries
# Add ' Not installed ' for features that aren ' t in the list of dicts
for item in feature . split ( ',' ) :
if item not in ret [ 'Features' ] :
ret [ 'Features' ] [ item ] = { 'Message' : 'Not installed' }
return ret
else : # If we get here then none of the features were installed
ret = { 'ExitCode' : out [ 'ExitCode' ] , 'Features' : { } , 'RestartNeeded' : False , 'Restarted' : False , 'Success' : out [ 'Success' ] }
for item in feature . split ( ',' ) :
ret [ 'Features' ] [ item ] = { 'Message' : 'Not installed' }
return ret
|
def get_aoi ( self , solar_zenith , solar_azimuth ) :
"""Get the angle of incidence on the system .
Parameters
solar _ zenith : float or Series .
Solar zenith angle .
solar _ azimuth : float or Series .
Solar azimuth angle .
Returns
aoi : Series
The angle of incidence"""
|
aoi = irradiance . aoi ( self . surface_tilt , self . surface_azimuth , solar_zenith , solar_azimuth )
return aoi
|
def build_kcorrection_array ( log , redshiftArray , snTypesArray , snLightCurves , pathToOutputDirectory , plot = True ) :
"""* Given the random redshiftArray and snTypeArray , generate a dictionary of k - correction polynomials ( one for each filter ) for every object . *
* * Key Arguments : * *
- ` ` log ` ` - - logger
- ` ` redshiftArray ` ` - - the pre - generated redshift array
- ` ` snTypesArray ` ` - - the pre - generated array of random sn types
- ` ` snLightCurves ` ` - - yaml style dictionary of SN lightcurve info
- ` ` pathToOutputDirectory ` ` - - path to the output directory ( provided by the user )
- ` ` plot ` ` - - generate plot ?
* * Return : * *
- None"""
|
# # # # # # > IMPORTS # # # # #
# # STANDARD LIB # #
# # THIRD PARTY # #
import yaml
import numpy as np
# # LOCAL APPLICATION # #
# # # # # # > ACTION ( S ) # # # # #
dataDir = pathToOutputDirectory + "/k_corrections/"
filters = [ 'g' , 'r' , 'i' , 'z' ]
fileName = pathToOutputDirectory + "/transient_light_curves.yaml"
stream = file ( fileName , 'r' )
generatedLCs = yaml . load ( stream )
models = generatedLCs . keys ( )
kCorList = [ ]
for i in range ( len ( redshiftArray ) ) :
redshift = redshiftArray [ i ]
kCorDict = { }
for model in models :
for ffilter in filters :
filterDir = dataDir + model + "/" + ffilter
strRed = "%0.3f" % ( redshift , )
fileName = filterDir + "/z" + str ( strRed ) . replace ( "." , "pt" ) + "_poly.yaml"
try :
stream = file ( fileName , 'r' )
yamlContent = yaml . load ( stream )
# log . info ( ' yamlContent % s ' % ( yamlContent , ) )
stream . close ( )
flatPoly = np . poly1d ( yamlContent [ 'polyCoeffs' ] )
except :
flatPoly = None
kCorDict [ ffilter ] = flatPoly
kCorList . append ( kCorDict )
kCorArray = np . array ( kCorList )
return kCorArray
|
def __filter_non_working_providers ( self ) :
'''Remove any mis - configured cloud providers from the available listing'''
|
for alias , drivers in six . iteritems ( self . opts [ 'providers' ] . copy ( ) ) :
for driver in drivers . copy ( ) :
fun = '{0}.get_configured_provider' . format ( driver )
if fun not in self . clouds : # Mis - configured provider that got removed ?
log . warning ( 'The cloud driver, \'%s\', configured under the ' '\'%s\' cloud provider alias, could not be loaded. ' 'Please check your provider configuration files and ' 'ensure all required dependencies are installed ' 'for the \'%s\' driver.\n' 'In rare cases, this could indicate the \'%s()\' ' 'function could not be found.\nRemoving \'%s\' from ' 'the available providers list' , driver , alias , driver , fun , driver )
self . opts [ 'providers' ] [ alias ] . pop ( driver )
if alias not in self . opts [ 'providers' ] :
continue
if not self . opts [ 'providers' ] [ alias ] :
self . opts [ 'providers' ] . pop ( alias )
continue
with salt . utils . context . func_globals_inject ( self . clouds [ fun ] , __active_provider_name__ = ':' . join ( [ alias , driver ] ) ) :
if self . clouds [ fun ] ( ) is False :
log . warning ( 'The cloud driver, \'%s\', configured under the ' '\'%s\' cloud provider alias is not properly ' 'configured. Removing it from the available ' 'providers list.' , driver , alias )
self . opts [ 'providers' ] [ alias ] . pop ( driver )
if alias not in self . opts [ 'providers' ] :
continue
if not self . opts [ 'providers' ] [ alias ] :
self . opts [ 'providers' ] . pop ( alias )
|
def decrypt ( self , key ) :
"""This method checks the signature on the state and decrypts it .
: param key : the key to decrypt and sign with"""
|
# check signature
if ( self . get_hmac ( key ) != self . hmac ) :
raise HeartbeatError ( "Signature invalid on state." )
if ( not self . encrypted ) :
return
# decrypt
aes = AES . new ( key , AES . MODE_CFB , self . iv )
self . f_key = aes . decrypt ( self . f_key )
self . alpha_key = aes . decrypt ( self . alpha_key )
self . encrypted = False
self . hmac = self . get_hmac ( key )
|
def regenerate_recovery_code ( self , user_id ) :
"""Removes the current recovery token , generates and returns a new one
Args :
user _ id ( str ) : The user _ id of the user identity .
See : https : / / auth0 . com / docs / api / management / v2 # ! / Users / post _ recovery _ code _ regeneration"""
|
url = self . _url ( '{}/recovery-code-regeneration' . format ( user_id ) )
return self . client . post ( url )
|
def connectSelection ( self , cls = None ) :
"""Creates a connection between the currently selected nodes , provided there are only 2 nodes selected . If the cls parameter is supplied then that will be the class instance used when creating the connection . Otherwise , the default connection class will be used .
: param cls subclass of < XNodeConnection >
: return < XNodeConnection > | | None"""
|
# collect the selected nodes
nodes = self . selectedNodes ( )
if ( len ( nodes ) != 2 ) :
return None
# create the connection
con = self . addConnection ( cls )
con . setOutputNode ( nodes [ 0 ] )
con . setInputNode ( nodes [ 1 ] )
con . rebuild ( )
return con
|
def _get_points ( self ) :
"""Subclasses may override this method ."""
|
return tuple ( [ self . _getitem__points ( i ) for i in range ( self . _len__points ( ) ) ] )
|
def delete_snmp_template ( auth , url , template_name = None , template_id = None ) :
"""Takes template _ name as input to issue RESTUL call to HP IMC which will delete the specific
snmp template from the IMC system
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: param template _ name : str value of template name
: param template _ id : str value template template _ id value
: return : int HTTP response code
: rtype int"""
|
try :
if template_id is None :
snmp_templates = get_snmp_templates ( auth , url )
if template_name is None :
template_name = snmp_template [ 'name' ]
template_id = None
for template in snmp_templates :
if template [ 'name' ] == template_name :
template_id = template [ 'id' ]
f_url = url + "/imcrs/plat/res/snmp/%s/delete" % template_id
response = requests . delete ( f_url , auth = auth , headers = HEADERS )
return response . status_code
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + " delete_snmp_template: An Error has occured"
|
def merge_corpus ( self , corpus ) :
"""Merge the given corpus into this corpus . All assets ( tracks , utterances , issuers , . . . ) are copied into
this corpus . If any ids ( utt - idx , track - idx , issuer - idx , subview - idx , . . . ) are occurring in both corpora ,
the ids from the merging corpus are suffixed by a number ( starting from 1 until no other is matching ) .
Args :
corpus ( CorpusView ) : The corpus to merge ."""
|
# Create a copy , so objects aren ' t changed in the original merging corpus
merging_corpus = Corpus . from_corpus ( corpus )
self . import_tracks ( corpus . tracks . values ( ) )
self . import_issuers ( corpus . issuers . values ( ) )
utterance_idx_mapping = self . import_utterances ( corpus . utterances . values ( ) )
for subview_idx , subview in merging_corpus . subviews . items ( ) :
for filter in subview . filter_criteria :
if isinstance ( filter , subset . MatchingUtteranceIdxFilter ) :
new_filtered_utt_ids = set ( )
for utt_idx in filter . utterance_idxs :
new_filtered_utt_ids . add ( utterance_idx_mapping [ utt_idx ] . idx )
filter . utterance_idxs = new_filtered_utt_ids
new_idx = naming . index_name_if_in_list ( subview_idx , self . subviews . keys ( ) )
self . import_subview ( new_idx , subview )
for feat_container_idx , feat_container in merging_corpus . feature_containers . items ( ) :
self . new_feature_container ( feat_container_idx , feat_container . path )
|
def state ( self ) :
"""State of this instance . One of ` ` OFFLINE ` ` , ` ` INITIALIZING ` ` ,
` ` INITIALIZED ` ` , ` ` STARTING ` ` , ` ` RUNNING ` ` , ` ` STOPPING ` ` or
` ` FAILED ` ` ."""
|
if self . _proto . HasField ( 'state' ) :
return yamcsManagement_pb2 . YamcsInstance . InstanceState . Name ( self . _proto . state )
return None
|
def model_changed ( self , model , prop_name , info ) :
"""This method notifies the model lists and the parent state about changes
The method is called each time , the model is changed . This happens , when the state itself changes or one of
its children ( states , transitions , data flows ) changes . Changes of the children cannot be observed directly ,
therefore children notify their parent about their changes by calling this method .
This method then checks , what has been changed by looking at the model that is passed to it . In the following it
notifies the list in which the change happened about the change .
E . g . one child state changes its name . The model of that state observes itself and notifies the parent (
i . e . this state model ) about the change by calling this method with the information about the change . This
method recognizes that the model is of type StateModel and therefore triggers a notify on the list of state
models .
" _ notify _ method _ before " is used as trigger method when the changing function is entered and
" _ notify _ method _ after " is used when the changing function returns . This changing function in the example
would be the setter of the property name .
: param model : The model that was changed
: param prop _ name : The property that was changed
: param info : Information about the change ( e . g . the name of the changing function )"""
|
# if info . method _ name = = ' change _ state _ type ' : # Handled in method ' change _ state _ type '
# return
# If this model has been changed ( and not one of its child states ) , then we have to update all child models
# This must be done before notifying anybody else , because other may relay on the updated models
if self . state == info [ 'instance' ] :
if 'after' in info :
self . update_child_models ( model , prop_name , info )
# if there is and exception set is _ about _ to _ be _ destroyed _ recursively flag to False again
if info . method_name in [ "remove_state" ] and isinstance ( info . result , Exception ) :
state_id = info . kwargs [ 'state_id' ] if 'state_id' in info . kwargs else info . args [ 1 ]
self . states [ state_id ] . is_about_to_be_destroyed_recursively = False
else : # while before notification mark all states which get destroyed recursively
if info . method_name in [ "remove_state" ] and info . kwargs . get ( 'destroy' , True ) and info . kwargs . get ( 'recursive' , True ) :
state_id = info . kwargs [ 'state_id' ] if 'state_id' in info . kwargs else info . args [ 1 ]
self . states [ state_id ] . is_about_to_be_destroyed_recursively = True
changed_list = None
cause = None
# If the change happened in a child state , notify the list of all child states
if ( isinstance ( model , AbstractStateModel ) and model is not self ) or ( # The state was changed directly
not isinstance ( model , AbstractStateModel ) and model . parent is not self ) : # One of the member models was changed
changed_list = self . states
cause = 'state_change'
# If the change happened in one of the transitions , notify the list of all transitions
elif isinstance ( model , TransitionModel ) and model . parent is self :
changed_list = self . transitions
cause = 'transition_change'
# If the change happened in one of the data flows , notify the list of all data flows
elif isinstance ( model , DataFlowModel ) and model . parent is self :
changed_list = self . data_flows
cause = 'data_flow_change'
# If the change happened in one of the scoped variables , notify the list of all scoped variables
elif isinstance ( model , ScopedVariableModel ) and model . parent is self :
changed_list = self . scoped_variables
cause = 'scoped_variable_change'
if not ( cause is None or changed_list is None ) :
if 'before' in info :
changed_list . _notify_method_before ( self . state , cause , ( self . state , ) , info )
elif 'after' in info :
changed_list . _notify_method_after ( self . state , cause , None , ( self . state , ) , info )
# Finally call the method of the base class , to forward changes in ports and outcomes
super ( ContainerStateModel , self ) . model_changed ( model , prop_name , info )
|
def get_linked_deployments ( deployments : Dict [ str , Any ] ) -> Dict [ str , Any ] :
"""Returns all deployments found in a chain URI ' s deployment data that contain link dependencies ."""
|
linked_deployments = { dep : data for dep , data in deployments . items ( ) if get_in ( ( "runtime_bytecode" , "link_dependencies" ) , data ) }
for deployment , data in linked_deployments . items ( ) :
if any ( link_dep [ "value" ] == deployment for link_dep in data [ "runtime_bytecode" ] [ "link_dependencies" ] ) :
raise BytecodeLinkingError ( f"Link dependency found in {deployment} deployment that references its " "own contract instance, which is disallowed" )
return linked_deployments
|
def condensedDistance ( dupes ) :
'''Convert the pairwise list of distances in dupes to " condensed
distance matrix " required by the hierarchical clustering
algorithms . Also return a dictionary that maps the distance matrix
to the record _ ids .
The formula for an index of the condensed matrix is
index = { N choose 2 } - { N - row choose 2 } + ( col - row - 1)
= N * ( N - 1 ) / 2 - ( N - row ) * ( N - row - 1 ) / 2 + col - row - 1
matrix _ length row _ step
where ( row , col ) is index of an uncondensed square N X N distance matrix .
See http : / / docs . scipy . org / doc / scipy / reference / generated / scipy . spatial . distance . squareform . html'''
|
candidate_set = numpy . unique ( dupes [ 'pairs' ] )
i_to_id = dict ( enumerate ( candidate_set ) )
ids = candidate_set . searchsorted ( dupes [ 'pairs' ] )
row = ids [ : , 0 ]
col = ids [ : , 1 ]
N = len ( candidate_set )
matrix_length = N * ( N - 1 ) / 2
row_step = ( N - row ) * ( N - row - 1 ) / 2
index = matrix_length - row_step + col - row - 1
condensed_distances = numpy . ones ( int ( matrix_length ) , 'f4' )
condensed_distances [ index . astype ( int ) ] = 1 - dupes [ 'score' ]
return i_to_id , condensed_distances , N
|
def get_metric_by_day ( self , unique_identifier , metric , from_date , limit = 30 , ** kwargs ) :
"""Returns the ` ` metric ` ` for ` ` unique _ identifier ` ` segmented by day
starting from ` ` from _ date ` `
: param unique _ identifier : Unique string indetifying the object this metric is for
: param metric : A unique name for the metric you want to track
: param from _ date : A python date object
: param limit : The total number of days to retrive starting from ` ` from _ date ` `"""
|
conn = kwargs . get ( "connection" , None )
date_generator = ( from_date + datetime . timedelta ( days = i ) for i in itertools . count ( ) )
metric_key_date_range = self . _get_daily_date_range ( from_date , datetime . timedelta ( days = limit ) )
# generate a list of mondays in between the start date and the end date
series = list ( itertools . islice ( date_generator , limit ) )
metric_keys = [ self . _get_daily_metric_name ( metric , daily_date ) for daily_date in series ]
metric_func = lambda conn : [ conn . hmget ( self . _get_daily_metric_key ( unique_identifier , metric_key_date ) , metric_keys ) for metric_key_date in metric_key_date_range ]
if conn is not None :
results = metric_func ( conn )
else :
with self . _analytics_backend . map ( ) as conn :
results = metric_func ( conn )
series , results = self . _parse_and_process_metrics ( series , results )
return series , results
|
def _get_context_table ( context ) :
"""Yields a formatted table to print context details .
: param dict context : The tunnel context
: return Table : Formatted for tunnel context output"""
|
table = formatting . KeyValueTable ( [ 'name' , 'value' ] )
table . align [ 'name' ] = 'r'
table . align [ 'value' ] = 'l'
table . add_row ( [ 'id' , context . get ( 'id' , '' ) ] )
table . add_row ( [ 'name' , context . get ( 'name' , '' ) ] )
table . add_row ( [ 'friendly name' , context . get ( 'friendlyName' , '' ) ] )
table . add_row ( [ 'internal peer IP address' , context . get ( 'internalPeerIpAddress' , '' ) ] )
table . add_row ( [ 'remote peer IP address' , context . get ( 'customerPeerIpAddress' , '' ) ] )
table . add_row ( [ 'advanced configuration flag' , context . get ( 'advancedConfigurationFlag' , '' ) ] )
table . add_row ( [ 'preshared key' , context . get ( 'presharedKey' , '' ) ] )
table . add_row ( [ 'phase 1 authentication' , context . get ( 'phaseOneAuthentication' , '' ) ] )
table . add_row ( [ 'phase 1 diffie hellman group' , context . get ( 'phaseOneDiffieHellmanGroup' , '' ) ] )
table . add_row ( [ 'phase 1 encryption' , context . get ( 'phaseOneEncryption' , '' ) ] )
table . add_row ( [ 'phase 1 key life' , context . get ( 'phaseOneKeylife' , '' ) ] )
table . add_row ( [ 'phase 2 authentication' , context . get ( 'phaseTwoAuthentication' , '' ) ] )
table . add_row ( [ 'phase 2 diffie hellman group' , context . get ( 'phaseTwoDiffieHellmanGroup' , '' ) ] )
table . add_row ( [ 'phase 2 encryption' , context . get ( 'phaseTwoEncryption' , '' ) ] )
table . add_row ( [ 'phase 2 key life' , context . get ( 'phaseTwoKeylife' , '' ) ] )
table . add_row ( [ 'phase 2 perfect forward secrecy' , context . get ( 'phaseTwoPerfectForwardSecrecy' , '' ) ] )
table . add_row ( [ 'created' , context . get ( 'createDate' ) ] )
table . add_row ( [ 'modified' , context . get ( 'modifyDate' ) ] )
return table
|
def distance_matrix ( self , leaf_labels = False ) :
'''Return a distance matrix ( 2D dictionary ) of the leaves of this ` ` Tree ` `
Args :
` ` leaf _ labels ` ` ( ` ` bool ` ` ) : ` ` True ` ` to have keys be labels of leaf ` ` Node ` ` objects , otherwise ` ` False ` ` to have keys be ` ` Node ` ` objects
Returns :
` ` dict ` ` : Distance matrix ( 2D dictionary ) of the leaves of this ` ` Tree ` ` , where keys are labels of leaves ; ` ` M [ u ] [ v ] ` ` = distance from ` ` u ` ` to ` ` v ` `'''
|
M = dict ( ) ;
leaf_dists = dict ( )
for node in self . traverse_postorder ( ) :
if node . is_leaf ( ) :
leaf_dists [ node ] = [ [ node , 0 ] ]
else :
for c in node . children :
if c . edge_length is not None :
for i in range ( len ( leaf_dists [ c ] ) ) :
leaf_dists [ c ] [ i ] [ 1 ] += c . edge_length
for c1 in range ( 0 , len ( node . children ) - 1 ) :
leaves_c1 = leaf_dists [ node . children [ c1 ] ]
for c2 in range ( c1 + 1 , len ( node . children ) ) :
leaves_c2 = leaf_dists [ node . children [ c2 ] ]
for i in range ( len ( leaves_c1 ) ) :
for j in range ( len ( leaves_c2 ) ) :
u , ud = leaves_c1 [ i ] ;
v , vd = leaves_c2 [ j ] ;
d = ud + vd
if leaf_labels :
u_key = u . label ;
v_key = v . label
else :
u_key = u ;
v_key = v
if u_key not in M :
M [ u_key ] = dict ( )
M [ u_key ] [ v_key ] = d
if v_key not in M :
M [ v_key ] = dict ( )
M [ v_key ] [ u_key ] = d
leaf_dists [ node ] = leaf_dists [ node . children [ 0 ] ] ;
del leaf_dists [ node . children [ 0 ] ]
for i in range ( 1 , len ( node . children ) ) :
leaf_dists [ node ] += leaf_dists [ node . children [ i ] ] ;
del leaf_dists [ node . children [ i ] ]
return M
|
def GetAuditLogEntries ( offset , now , token ) :
"""Return all audit log entries between now - offset and now .
Args :
offset : rdfvalue . Duration how far back to look in time
now : rdfvalue . RDFDatetime for current time
token : GRR access token
Yields :
AuditEvents created during the time range"""
|
start_time = now - offset - audit . AUDIT_ROLLOVER_TIME
for fd in audit . LegacyAuditLogsForTimespan ( start_time , now , token ) :
for event in fd . GenerateItems ( ) :
if now - offset < event . timestamp < now :
yield event
|
def to_unicode ( string ) :
"""Ensure a passed string is unicode"""
|
if isinstance ( string , six . binary_type ) :
return string . decode ( 'utf8' )
if isinstance ( string , six . text_type ) :
return string
if six . PY2 :
return unicode ( string )
return str ( string )
|
def get_word_at ( self , index : int ) -> Union [ int , BitVec ] :
"""Access a word from a specified memory index .
: param index : integer representing the index to access
: return : 32 byte word at the specified index"""
|
try :
return symbol_factory . BitVecVal ( util . concrete_int_from_bytes ( bytes ( [ util . get_concrete_int ( b ) for b in self [ index : index + 32 ] ] ) , 0 , ) , 256 , )
except TypeError :
result = simplify ( Concat ( [ b if isinstance ( b , BitVec ) else symbol_factory . BitVecVal ( b , 8 ) for b in cast ( List [ Union [ int , BitVec ] ] , self [ index : index + 32 ] ) ] ) )
assert result . size ( ) == 256
return result
|
def azm ( self ) :
"""Corrected azimuth , taking into account backsight , declination , and compass corrections ."""
|
azm1 = self . get ( 'BEARING' , None )
azm2 = self . get ( 'AZM2' , None )
if azm1 is None and azm2 is None :
return None
if azm2 is None :
return azm1 + self . declination
if azm1 is None :
return ( azm2 + 180 ) % 360 + self . declination
return ( azm1 + ( azm2 + 180 ) % 360 ) / 2.0 + self . declination
|
def _get_peering_connection_ids ( name , conn ) :
''': param name : The name of the VPC peering connection .
: type name : String
: param conn : The boto aws ec2 connection .
: return : The id associated with this peering connection
Returns the VPC peering connection ids
given the VPC peering connection name .'''
|
filters = [ { 'Name' : 'tag:Name' , 'Values' : [ name ] , } , { 'Name' : 'status-code' , 'Values' : [ ACTIVE , PENDING_ACCEPTANCE , PROVISIONING ] , } ]
peerings = conn . describe_vpc_peering_connections ( Filters = filters ) . get ( 'VpcPeeringConnections' , [ ] )
return [ x [ 'VpcPeeringConnectionId' ] for x in peerings ]
|
def get ( self ) :
"""Get a JSON - ready representation of this Mail object .
: returns : This Mail object , ready for use in a request body .
: rtype : dict"""
|
mail = { 'from' : self . _get_or_none ( self . from_email ) , 'subject' : self . _get_or_none ( self . subject ) , 'personalizations' : [ p . get ( ) for p in self . personalizations or [ ] ] , 'content' : [ c . get ( ) for c in self . contents or [ ] ] , 'attachments' : [ a . get ( ) for a in self . attachments or [ ] ] , 'template_id' : self . _get_or_none ( self . template_id ) , 'sections' : self . _flatten_dicts ( self . sections ) , 'headers' : self . _flatten_dicts ( self . headers ) , 'categories' : [ c . get ( ) for c in self . categories or [ ] ] , 'custom_args' : self . _flatten_dicts ( self . custom_args ) , 'send_at' : self . _get_or_none ( self . send_at ) , 'batch_id' : self . _get_or_none ( self . batch_id ) , 'asm' : self . _get_or_none ( self . asm ) , 'ip_pool_name' : self . _get_or_none ( self . ip_pool_name ) , 'mail_settings' : self . _get_or_none ( self . mail_settings ) , 'tracking_settings' : self . _get_or_none ( self . tracking_settings ) , 'reply_to' : self . _get_or_none ( self . reply_to ) , }
return { key : value for key , value in mail . items ( ) if value is not None and value != [ ] and value != { } }
|
def scan_url ( url , apikey ) :
"""If URL is found , scan it"""
|
logger . info ( 'Found what I believe is a URL: %s' , url )
v_api = virus_total . VirusTotal ( )
while True :
url_report = v_api . url_report ( url , apikey )
response_code = url_report [ 'response_code' ]
# report does not exist , need to scan
if response_code == - 2 :
logger . info ( 'Report job still queued..' )
if response_code == 0 :
logger . info ( 'No report for %s' , url )
break
if response_code == 1 :
logger . info ( 'Report found, job complete for %s.' , url )
break
try :
positives = url_report [ 'positives' ]
if positives > 0 :
for site , results in url_report [ 'scans' ] . items ( ) :
if results [ 'detected' ] :
detected = True
failure = True
logger . error ( "%s is recorded as a %s by %s" , url , results [ 'result' ] , site )
if detected :
logger . error ( "Full report available here: %s" , url_report [ 'permalink' ] )
else :
logger . info ( "%s is recorded as a clean" , url )
except :
pass
|
def packet_synopsis ( url_encoded_ivorn = None ) :
"""Result :
Nested dict providing key details , e . g . : :
{ " coords " : [
" dec " : 10.9712,
" error " : 0.05,
" ra " : 233.7307,
" time " : " 2015-10-01T15:04:22.930000 + 00:00"
" refs " : [
" cite _ type " : u " followup " ,
" description " : " This is the XRT Position . . . " ,
" ref _ ivorn " : " ivo : / / nasa . gsfc . gcn / SWIFT # BAT _ . . . "
" voevent " : {
" author _ datetime " : " 2015-10-01T15:04:46 + 00:00 " ,
" author _ ivorn " : " ivo : / / nasa . gsfc . tan / gcn " ,
" ivorn " : " ivo : / / nasa . gsfc . gcn / SWIFT # BAT _ GRB _ Pos _ 657286-112 " ,
" received " : " 2015-11-19T20:41:38.226431 + 00:00 " ,
" role " : " observation " ,
" stream " : " nasa . gsfc . gcn / SWIFT " ,
" version " : " 2.0"
" relevant _ urls " : [ " http : / / address1 . foo . bar " ,
" http : / / address2 . foo . bar "
Returns some key details for the packet specified by IVORN .
The required IVORN should be appended to the URL after ` ` / synopsis / ` `
in : ref : ` URL - encoded < url - encoding > ` form ."""
|
ivorn = validate_ivorn ( url_encoded_ivorn )
voevent_row = db_session . query ( Voevent ) . filter ( Voevent . ivorn == ivorn ) . one ( )
cites = db_session . query ( Cite ) . filter ( Cite . voevent_id == voevent_row . id ) . all ( )
coords = db_session . query ( Coord ) . filter ( Coord . voevent_id == voevent_row . id ) . all ( )
v_dict = voevent_row . to_odict ( exclude = ( 'id' , 'xml' ) )
cite_list = [ c . to_odict ( exclude = ( 'id' , 'voevent_id' ) ) for c in cites ]
coord_list = [ c . to_odict ( exclude = ( 'id' , 'voevent_id' ) ) for c in coords ]
relevant_urls = lookup_relevant_urls ( voevent_row , cites )
result = { 'voevent' : v_dict , 'refs' : cite_list , 'coords' : coord_list , 'relevant_urls' : relevant_urls , }
return jsonify ( make_response_dict ( result ) )
|
def get_last_content ( request , page_id ) :
"""Get the latest content for a particular type"""
|
content_type = request . GET . get ( 'content_type' )
language_id = request . GET . get ( 'language_id' )
page = get_object_or_404 ( Page , pk = page_id )
placeholders = get_placeholders ( page . get_template ( ) )
_template = template . loader . get_template ( page . get_template ( ) )
for placeholder in placeholders :
if placeholder . name == content_type :
context = RequestContext ( request , { 'current_page' : page , 'lang' : language_id } )
with context . bind_template ( _template . template ) :
content = placeholder . render ( context )
return HttpResponse ( content )
raise Http404
|
def get_input_photo ( photo ) :
"""Similar to : meth : ` get _ input _ peer ` , but for photos"""
|
try :
if photo . SUBCLASS_OF_ID == 0x846363e0 : # crc32 ( b ' InputPhoto ' ) :
return photo
except AttributeError :
_raise_cast_fail ( photo , 'InputPhoto' )
if isinstance ( photo , types . photos . Photo ) :
photo = photo . photo
if isinstance ( photo , types . Photo ) :
return types . InputPhoto ( id = photo . id , access_hash = photo . access_hash , file_reference = photo . file_reference )
if isinstance ( photo , types . PhotoEmpty ) :
return types . InputPhotoEmpty ( )
if isinstance ( photo , types . messages . ChatFull ) :
photo = photo . full_chat
if isinstance ( photo , types . ChannelFull ) :
return get_input_photo ( photo . chat_photo )
elif isinstance ( photo , types . UserFull ) :
return get_input_photo ( photo . profile_photo )
elif isinstance ( photo , ( types . Channel , types . Chat , types . User ) ) :
return get_input_photo ( photo . photo )
if isinstance ( photo , ( types . UserEmpty , types . ChatEmpty , types . ChatForbidden , types . ChannelForbidden ) ) :
return types . InputPhotoEmpty ( )
_raise_cast_fail ( photo , 'InputPhoto' )
|
def unpack_value ( self , tup_tree ) :
"""Find VALUE or VALUE . ARRAY under tup _ tree and convert to a Python value .
Looks at the TYPE of the node to work out how to decode it .
Handles nodes with no value ( e . g . when representing NULL by omitting
VALUE )"""
|
valtype = attrs ( tup_tree ) [ 'TYPE' ]
raw_val = self . list_of_matching ( tup_tree , ( 'VALUE' , 'VALUE.ARRAY' ) )
if not raw_val :
return None
if len ( raw_val ) > 1 :
raise CIMXMLParseError ( _format ( "Element {0!A} has too many child elements {1!A} " "(allowed is one of 'VALUE' or 'VALUE.ARRAY')" , name ( tup_tree ) ) , conn_id = self . conn_id )
raw_val = raw_val [ 0 ]
if type ( raw_val ) == list : # pylint : disable = unidiomatic - typecheck
return [ self . unpack_single_value ( data , valtype ) for data in raw_val ]
return self . unpack_single_value ( raw_val , valtype )
|
def paginate_resources ( cls , request , resources , on_fail_status ) :
"""Truncates a list of resources based on ClientPagingControls
Args :
request ( object ) : The parsed protobuf request object
resources ( list of objects ) : The resources to be paginated
Returns :
list : The paginated list of resources
object : The ClientPagingResponse to be sent back to the client"""
|
if not resources :
return ( resources , client_list_control_pb2 . ClientPagingResponse ( ) )
paging = request . paging
limit = min ( paging . limit , MAX_PAGE_SIZE ) or DEFAULT_PAGE_SIZE
# Find the start index from the location marker sent
try :
if paging . start :
start_index = cls . index_by_id ( paging . start , resources )
else :
start_index = 0
if start_index < 0 or start_index >= len ( resources ) :
raise AssertionError
except AssertionError :
raise _ResponseFailed ( on_fail_status )
paged_resources = resources [ start_index : start_index + limit ]
if start_index + limit < len ( resources ) :
paging_response = client_list_control_pb2 . ClientPagingResponse ( next = cls . id_by_index ( start_index + limit , resources ) , start = cls . id_by_index ( start_index , resources ) , limit = limit )
else :
paging_response = client_list_control_pb2 . ClientPagingResponse ( start = cls . id_by_index ( start_index , resources ) , limit = limit )
return paged_resources , paging_response
|
def _services ( lancet ) :
"""List all currently configured services ."""
|
def get_services ( config ) :
for s in config . sections ( ) :
if config . has_option ( s , 'url' ) :
if config . has_option ( s , 'username' ) :
yield s
for s in get_services ( lancet . config ) :
click . echo ( '{}[Logout from {}]' . format ( s , lancet . config . get ( s , 'url' ) ) )
|
def find_by_typename ( self , typename ) :
"""List of all objects whose type has the given name ."""
|
return self . find_by ( lambda obj : type ( obj ) . __name__ == typename )
|
def get_mv_impedance ( grid ) :
"""Determine MV grid impedance ( resistance and reactance separately )
Parameters
grid : LVGridDing0
Returns
: any : ` list `
List containing resistance and reactance of MV grid"""
|
omega = 2 * math . pi * 50
mv_grid = grid . grid_district . lv_load_area . mv_grid_district . mv_grid
edges = mv_grid . find_path ( grid . _station , mv_grid . _station , type = 'edges' )
r_mv_grid = sum ( [ e [ 2 ] [ 'branch' ] . type [ 'R' ] * e [ 2 ] [ 'branch' ] . length / 1e3 for e in edges ] )
x_mv_grid = sum ( [ e [ 2 ] [ 'branch' ] . type [ 'L' ] / 1e3 * omega * e [ 2 ] [ 'branch' ] . length / 1e3 for e in edges ] )
return [ r_mv_grid , x_mv_grid ]
|
def tag_provinces ( tokens : List [ str ] ) -> List [ Tuple [ str , str ] ] :
"""Recognize Thailand provinces in text
Input is a list of words
Return a list of tuples
Example : :
> > > text = [ ' หนองคาย ' , ' น่าอยู่ ' ]
> > > tag _ provinces ( text )
[ ( ' หนองคาย ' , ' B - LOCATION ' ) , ( ' น่าอยู่ ' , ' O ' ) ]"""
|
province_list = provinces ( )
output = [ ]
for token in tokens :
if token in province_list :
output . append ( ( token , "B-LOCATION" ) )
else :
output . append ( ( token , "O" ) )
return output
|
def _binary_exp ( expression , op ) : # type : ( QuilParser . ExpressionContext , Callable ) - > Number
"""Apply an operator to two expressions . Start by evaluating both sides of the operator ."""
|
[ arg1 , arg2 ] = expression . expression ( )
return op ( _expression ( arg1 ) , _expression ( arg2 ) )
|
def _configure_buffer_sizes ( ) :
"""Set up module globals controlling buffer sizes"""
|
global PIPE_BUF_BYTES
global OS_PIPE_SZ
PIPE_BUF_BYTES = 65536
OS_PIPE_SZ = None
# Teach the ' fcntl ' module about ' F _ SETPIPE _ SZ ' , which is a Linux - ism ,
# but a good one that can drastically reduce the number of syscalls
# when dealing with high - throughput pipes .
if not hasattr ( fcntl , 'F_SETPIPE_SZ' ) :
import platform
if platform . system ( ) == 'Linux' :
fcntl . F_SETPIPE_SZ = 1031
# If Linux procfs ( or something that looks like it ) exposes its
# maximum F _ SETPIPE _ SZ , adjust the default buffer sizes .
try :
with open ( '/proc/sys/fs/pipe-max-size' , 'r' ) as f : # Figure out OS pipe size , but in case it is unusually large
# or small restrain it to sensible values .
OS_PIPE_SZ = min ( int ( f . read ( ) ) , 1024 * 1024 )
PIPE_BUF_BYTES = max ( OS_PIPE_SZ , PIPE_BUF_BYTES )
except Exception :
pass
|
def _sub_hostname ( self , line ) :
'''This will replace the exact hostname and all instances of the domain name with the obfuscated alternatives .
Example :'''
|
try :
for od , d in self . dn_db . items ( ) : # regex = re . compile ( r ' \ w * \ . % s ' % d )
regex = re . compile ( r'(?![\W\-\:\ \.])[a-zA-Z0-9\-\_\.]*\.%s' % d )
hostnames = [ each for each in regex . findall ( line ) ]
if len ( hostnames ) > 0 :
for hn in hostnames :
new_hn = self . _hn2db ( hn )
self . logger . debug ( "Obfuscating FQDN - %s > %s" , hn , new_hn )
line = line . replace ( hn , new_hn )
if self . hostname :
line = line . replace ( self . hostname , self . _hn2db ( self . hostname ) )
# catch any non - fqdn instances of the system hostname
return line
except Exception as e : # pragma : no cover
self . logger . exception ( e )
raise Exception ( 'SubHostnameError: Unable to Substitute Hostname/Domainname' )
|
def sleep ( self , seconds ) :
"""Sleep in simulated time ."""
|
start = self . time ( )
while ( self . time ( ) - start < seconds and not self . need_to_stop . is_set ( ) ) :
self . need_to_stop . wait ( self . sim_time )
|
def since ( version ) :
"""A decorator that annotates a function to append the version of Spark the function was added ."""
|
import re
indent_p = re . compile ( r'\n( +)' )
def deco ( f ) :
indents = indent_p . findall ( f . __doc__ )
indent = ' ' * ( min ( len ( m ) for m in indents ) if indents else 0 )
f . __doc__ = f . __doc__ . rstrip ( ) + "\n\n%s.. versionadded:: %s" % ( indent , version )
return f
return deco
|
def unfreeze ( name ) :
'''unfreezes the container'''
|
if not exists ( name ) :
raise ContainerNotExists ( "The container (%s) does not exist!" % name )
cmd = [ 'lxc-unfreeze' , '-n' , name ]
subprocess . check_call ( cmd )
|
def autodoc_event_handlers ( stream = sys . stdout ) :
"""Print to the given string , the documentation for the events
and the associated handlers ."""
|
lines = [ ]
for cls in all_subclasses ( EventHandler ) :
if cls in _ABC_EVHANDLER_CLASSES :
continue
event_class = cls . event_class
lines . extend ( cls . cls2str ( ) . split ( "\n" ) )
# Here we enforce the abstract protocol of the class
# The unit test in tests _ events will detect the problem .
if not hasattr ( cls , "can_change_physics" ) :
raise RuntimeError ( "%s: can_change_physics must be defined" % cls )
stream . write ( "\n" . join ( lines ) + "\n" )
|
async def deregister ( self , node , * , check = None , service = None , write_token = None ) :
"""Deregisters a node , service or check
Parameters :
node ( Object or ObjectID ) : Node
check ( ObjectID ) : Check ID
service ( ObjectID ) : Service ID
write _ token ( ObjectID ) : Token ID
Returns :
bool : ` ` True ` ` on success
* * Node * * expects a body that look like one of the following : :
" Datacenter " : " dc1 " ,
" Node " : " foobar " ,
" Datacenter " : " dc1 " ,
" Node " : " foobar " ,
" CheckID " : " service : redis1"
" Datacenter " : " dc1 " ,
" Node " : " foobar " ,
" ServiceID " : " redis1 " ,
The behavior of the endpoint depends on what keys are provided .
The endpoint requires * * Node * * to be provided while * * Datacenter * *
will be defaulted to match that of the agent . If only * * Node * * is
provided , the node and all associated services and checks are deleted .
If * * CheckID * * is provided , only that check is removed .
If * * ServiceID * * is provided , the service and its associated health
check ( if any ) are removed .
An optional ACL token may be provided to perform the deregister action
by adding a * * WriteRequest * * block in the query , like this : :
" WriteRequest " : {
" Token " : " foo " """
|
entry = { }
if isinstance ( node , str ) :
entry [ "Node" ] = node
else :
for k in ( "Datacenter" , "Node" , "CheckID" , "ServiceID" , "WriteRequest" ) :
if k in node :
entry [ k ] = node [ k ]
service_id = extract_attr ( service , keys = [ "ServiceID" , "ID" ] )
check_id = extract_attr ( check , keys = [ "CheckID" , "ID" ] )
if service_id and not check_id :
entry [ "ServiceID" ] = service_id
elif service_id and check_id :
entry [ "CheckID" ] = "%s:%s" % ( service_id , check_id )
elif not service_id and check_id :
entry [ "CheckID" ] = check_id
if write_token :
entry [ "WriteRequest" ] = { "Token" : extract_attr ( write_token , keys = [ "ID" ] ) }
response = await self . _api . put ( "/v1/catalog/deregister" , data = entry )
return response . status == 200
|
def FindDevice ( self , address ) :
'''Find a specific device by bluetooth address .'''
|
for obj in mockobject . objects . keys ( ) :
if obj . startswith ( '/org/bluez/' ) and 'dev_' in obj :
o = mockobject . objects [ obj ]
if o . props [ DEVICE_IFACE ] [ 'Address' ] == dbus . String ( address , variant_level = 1 ) :
return obj
raise dbus . exceptions . DBusException ( 'No such device.' , name = 'org.bluez.Error.NoSuchDevice' )
|
async def _raise_for_status ( response ) :
"""Raise an appropriate error for a given response .
Arguments :
response ( : py : class : ` aiohttp . ClientResponse ` ) : The API response .
Raises :
: py : class : ` aiohttp . web _ exceptions . HTTPException ` : The appropriate
error for the response ' s status .
This function was taken from the aslack project and modified . The original
copyright notice :
Copyright ( c ) 2015 , Jonathan Sharpe
Permission to use , copy , modify , and / or distribute this software for any
purpose with or without fee is hereby granted , provided that the above
copyright notice and this permission notice appear in all copies .
THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ."""
|
try :
response . raise_for_status ( )
except aiohttp . ClientResponseError as exc :
reason = response . reason
spacetrack_error_msg = None
try :
json = await response . json ( )
if isinstance ( json , Mapping ) :
spacetrack_error_msg = json [ 'error' ]
except ( ValueError , KeyError , aiohttp . ClientResponseError ) :
pass
if not spacetrack_error_msg :
spacetrack_error_msg = await response . text ( )
if spacetrack_error_msg :
reason += '\nSpace-Track response:\n' + spacetrack_error_msg
payload = dict ( code = response . status , message = reason , headers = response . headers , )
# history attribute is only aiohttp > = 2.1
try :
payload [ 'history' ] = exc . history
except AttributeError :
pass
raise aiohttp . ClientResponseError ( ** payload )
|
def sma ( self , n , array = False ) :
"""简单均线"""
|
result = talib . SMA ( self . close , n )
if array :
return result
return result [ - 1 ]
|
def getATR ( self ) :
"""Return card ATR"""
|
CardConnection . getATR ( self )
if None == self . hcard :
raise CardConnectionException ( 'Card not connected' )
hresult , reader , state , protocol , atr = SCardStatus ( self . hcard )
if hresult != 0 :
raise CardConnectionException ( 'Failed to get status: ' + SCardGetErrorMessage ( hresult ) )
return atr
|
def enforce_git_config ( self ) :
'''For the config options which need to be maintained in the git config ,
ensure that the git config file is configured as desired .'''
|
git_config = os . path . join ( self . gitdir , 'config' )
conf = salt . utils . configparser . GitConfigParser ( )
if not conf . read ( git_config ) :
log . error ( 'Failed to read from git config file %s' , git_config )
else : # We are currently enforcing the following git config items :
# 1 . Fetch URL
# 2 . refspecs used in fetch
# 3 . http . sslVerify
conf_changed = False
remote_section = 'remote "origin"'
# 1 . URL
try :
url = conf . get ( remote_section , 'url' )
except salt . utils . configparser . NoSectionError : # First time we ' ve init ' ed this repo , we need to add the
# section for the remote to the git config
conf . add_section ( remote_section )
conf_changed = True
url = None
log . debug ( 'Current fetch URL for %s remote \'%s\': %s (desired: %s)' , self . role , self . id , url , self . url )
if url != self . url :
conf . set ( remote_section , 'url' , self . url )
log . debug ( 'Fetch URL for %s remote \'%s\' set to %s' , self . role , self . id , self . url )
conf_changed = True
# 2 . refspecs
try :
refspecs = sorted ( conf . get ( remote_section , 'fetch' , as_list = True ) )
except salt . utils . configparser . NoOptionError : # No ' fetch ' option present in the remote section . Should never
# happen , but if it does for some reason , don ' t let it cause a
# traceback .
refspecs = [ ]
desired_refspecs = sorted ( self . refspecs )
log . debug ( 'Current refspecs for %s remote \'%s\': %s (desired: %s)' , self . role , self . id , refspecs , desired_refspecs )
if refspecs != desired_refspecs :
conf . set_multivar ( remote_section , 'fetch' , self . refspecs )
log . debug ( 'Refspecs for %s remote \'%s\' set to %s' , self . role , self . id , desired_refspecs )
conf_changed = True
# 3 . http . sslVerify
try :
ssl_verify = conf . get ( 'http' , 'sslVerify' )
except salt . utils . configparser . NoSectionError :
conf . add_section ( 'http' )
ssl_verify = None
except salt . utils . configparser . NoOptionError :
ssl_verify = None
desired_ssl_verify = six . text_type ( self . ssl_verify ) . lower ( )
log . debug ( 'Current http.sslVerify for %s remote \'%s\': %s (desired: %s)' , self . role , self . id , ssl_verify , desired_ssl_verify )
if ssl_verify != desired_ssl_verify :
conf . set ( 'http' , 'sslVerify' , desired_ssl_verify )
log . debug ( 'http.sslVerify for %s remote \'%s\' set to %s' , self . role , self . id , desired_ssl_verify )
conf_changed = True
# Write changes , if necessary
if conf_changed :
with salt . utils . files . fopen ( git_config , 'w' ) as fp_ :
conf . write ( fp_ )
log . debug ( 'Config updates for %s remote \'%s\' written to %s' , self . role , self . id , git_config )
|
def _handle_blacklisted_tag ( self ) :
"""Handle the body of an HTML tag that is parser - blacklisted ."""
|
strip = lambda text : text . rstrip ( ) . lower ( )
while True :
this , next = self . _read ( ) , self . _read ( 1 )
if this is self . END :
self . _fail_route ( )
elif this == "<" and next == "/" :
self . _head += 3
if self . _read ( ) != ">" or ( strip ( self . _read ( - 1 ) ) != strip ( self . _stack [ 1 ] . text ) ) :
self . _head -= 1
self . _emit_text ( "</" )
continue
self . _emit ( tokens . TagOpenClose ( ) )
self . _emit_text ( self . _read ( - 1 ) )
self . _emit ( tokens . TagCloseClose ( ) )
return self . _pop ( )
elif this == "&" :
self . _parse_entity ( )
else :
self . _emit_text ( this )
self . _head += 1
|
def stop ( self ) :
"""Stop the workers ( will block until they are finished ) ."""
|
if self . running and self . num_workers :
for worker in self . workers :
self . queue . put ( None )
for worker in self . workers :
worker . join ( )
# Free up references held by workers
del self . workers [ : ]
self . queue . join ( )
self . running = False
|
def setTitle ( self , title ) :
"""Sets the title for this page to the inputed title .
: param title | < str >"""
|
self . _titleLabel . setText ( title )
self . _titleLabel . adjustSize ( )
self . adjustMargins ( )
|
def _separate_header_and_content ( self , text_lines ) :
"""From a given Org text , return the header separate from the content .
The given text must be separate line by line and be a list .
The return is a list of two items : header and content .
Theses two items are text separate line by line in format of a list
Keyword Arguments :
text _ lines - - A list , each item is a line of the texte
Return :
header - - A list , each item is a line of the texte
content - - A list , each item is a line of the texte"""
|
no_more_header = False
expr_metadata = re . compile ( r'^#\+[a-zA-Z]+:.*' )
header = [ ]
content = [ ]
for line in text_lines :
metadata = expr_metadata . match ( line )
if metadata and not no_more_header :
header . append ( line )
else :
no_more_header = True
content . append ( line )
return header , content
|
def extension ( names ) :
"""Makes a function to be an extension ."""
|
for name in names :
if not NAME_PATTERN . match ( name ) :
raise ValueError ( 'invalid extension name: %s' % name )
def decorator ( f , names = names ) :
return Extension ( f , names = names )
return decorator
|
def _transport_interceptor ( self , callback ) :
"""Takes a callback function and returns a function that takes headers and
messages and places them on the main service queue ."""
|
def add_item_to_queue ( header , message ) :
queue_item = ( Priority . TRANSPORT , next ( self . _transport_interceptor_counter ) , # insertion sequence to keep messages in order
( callback , header , message ) , )
self . __queue . put ( queue_item )
# Block incoming transport until insertion completes
return add_item_to_queue
|
def loadTargetState ( targetStateConfig , existingTargetState = None ) :
"""extracts a new TargetState object from the specified configuration
: param targetStateConfig : the config dict .
: param existingTargetState : the existing state
: return :"""
|
from analyser . common . targetstatecontroller import TargetState
targetState = TargetState ( ) if existingTargetState is None else existingTargetState
# FIXFIX validate
if targetStateConfig is not None :
val = targetStateConfig . get ( 'fs' )
if val is not None :
targetState . fs = val
val = targetStateConfig . get ( 'samplesPerBatch' )
if val is not None :
targetState . samplesPerBatch = val
val = targetStateConfig . get ( 'gyroEnabled' )
if val is not None :
targetState . gyroEnabled = val
val = targetStateConfig . get ( 'gyroSens' )
if val is not None :
targetState . gyroSens = val
val = targetStateConfig . get ( 'accelerometerEnabled' )
if val is not None :
targetState . accelerometerEnabled = val
val = targetStateConfig . get ( 'accelerometerSens' )
if val is not None :
targetState . accelerometerSens = val
return targetState
|
def tokenize_words ( string ) :
"""Tokenize input text to words .
: param string : Text to tokenize
: type string : str or unicode
: return : words
: rtype : list of strings"""
|
string = six . text_type ( string )
return re . findall ( WORD_TOKENIZATION_RULES , string )
|
def parse ( cls , parser , token ) :
"""Parse the node syntax :
. . code - block : : html + django
{ % page _ placeholder parentobj slotname title = " test " role = " m " % }"""
|
bits , as_var = parse_as_var ( parser , token )
tag_name , args , kwargs = parse_token_kwargs ( parser , bits , allowed_kwargs = cls . allowed_kwargs , compile_args = True , compile_kwargs = True )
# Play with the arguments
if len ( args ) == 2 :
parent_expr = args [ 0 ]
slot_expr = args [ 1 ]
elif len ( args ) == 1 : # Allow ' page ' by default . Works with most CMS ' es , including django - fluent - pages .
parent_expr = Variable ( 'page' )
slot_expr = args [ 0 ]
else :
raise TemplateSyntaxError ( """{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=".." role="..".""" . format ( tag_name ) )
cls . validate_args ( tag_name , * args , ** kwargs )
return cls ( tag_name = tag_name , as_var = as_var , parent_expr = parent_expr , slot_expr = slot_expr , ** kwargs )
|
def load_rdd_from_pickle ( sc , path , min_partitions = None , return_type = 'images' ) :
"""Loads an rdd that was saved as one pickle file per partition
: param sc : Spark Context
: param path : directory to load from
: param min _ partitions : minimum number of partitions . If None will be sc . defaultParallelism
: param return _ type : what to return :
' rdd ' - RDD
' images ' - Thunder Images object
' series ' - Thunder Series object
: return : based on return type ."""
|
if min_partitions is None :
min_partitions = sc . defaultParallelism
rdd = sc . pickleFile ( path , minPartitions = min_partitions )
rdd = rdd . flatMap ( lambda x : x )
if return_type == 'images' :
result = td . images . fromrdd ( rdd ) . repartition ( min_partitions )
elif return_type == 'series' :
result = td . series . fromrdd ( rdd ) . repartition ( min_partitions )
elif return_type == 'rdd' :
result = rdd . repartition ( min_partitions )
else :
raise ValueError ( 'return_type not supported: %s' % return_type )
logging . getLogger ( 'pySparkUtils' ) . info ( 'Loaded rdd from: %s as type: %s' % ( path , return_type ) )
return result
|
def find_node ( self , node_list_pyxb , base_url ) :
"""Search NodeList for Node that has { base _ url } .
Return matching Node or None"""
|
for node_pyxb in node_list_pyxb . node :
if node_pyxb . baseURL == base_url :
return node_pyxb
|
def __get_ws_distance ( wstation , latitude , longitude ) :
"""Get the distance to the weatherstation from wstation section of xml .
wstation : weerstation section of buienradar xml ( dict )
latitude : our latitude
longitude : our longitude"""
|
if wstation :
try :
wslat = float ( wstation [ __BRLAT ] )
wslon = float ( wstation [ __BRLON ] )
dist = vincenty ( ( latitude , longitude ) , ( wslat , wslon ) )
log . debug ( "calc distance: %s (latitude: %s, longitude: " "%s, wslat: %s, wslon: %s)" , dist , latitude , longitude , wslat , wslon )
return dist
except ( ValueError , TypeError , KeyError ) : # value does not exist , or is not a float
return None
else :
return None
|
def get_dirinfo ( self , name : str ) :
'''get a ` DirectoryInfo ` for a directory ( without create actual directory ) .'''
|
return DirectoryInfo ( os . path . join ( self . _path , name ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.