signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def prt_goids ( self , goids = None , prtfmt = None , sortby = True , prt = sys . stdout ) :
"""Given GO IDs , print decriptive info about each GO Term .""" | if goids is None :
goids = self . go_sources
nts = self . get_nts ( goids , sortby )
if prtfmt is None :
prtfmt = self . prt_attr [ 'fmta' ]
for ntgo in nts :
key2val = ntgo . _asdict ( )
prt . write ( "{GO}\n" . format ( GO = prtfmt . format ( ** key2val ) ) )
return nts |
async def proposal ( self ) :
"""Get the proposal in question .
Actually just the first proposal with the same name , but the
chance of a collision is tiny .
Returns
awaitable of : class : ` aionationstates . Proposal `
The proposal submitted .
Raises
aionationstates . NotFound
If the proposal has since been withdrawn or promoted .""" | proposals = await aionationstates . wa . proposals ( )
for proposal in proposals :
if ( proposal . name == self . proposal_name ) :
return proposal
raise aionationstates . NotFound |
def load_dictionary ( self , filename , encoding = "utf-8" ) :
"""Load in a pre - built word frequency list
Args :
filename ( str ) : The filepath to the json ( optionally gzipped ) file to be loaded
encoding ( str ) : The encoding of the dictionary""" | with load_file ( filename , encoding ) as data :
self . _dictionary . update ( json . loads ( data . lower ( ) , encoding = encoding ) )
self . _update_dictionary ( ) |
def srandmember ( self , name , number = None ) :
"""Emulate srandmember .""" | redis_set = self . _get_set ( name , 'SRANDMEMBER' )
if not redis_set :
return None if number is None else [ ]
if number is None :
return choice ( list ( redis_set ) )
elif number > 0 :
return sample ( list ( redis_set ) , min ( number , len ( redis_set ) ) )
else :
return [ choice ( list ( redis_set ) ) for _ in xrange ( abs ( number ) ) ] |
def get_config_node ( self ) :
'''get _ config _ node
High - level api : get _ config _ node returns an Element node in the config
tree , which is corresponding to the URL in the Restconf GET reply .
Returns
Element
A config node .''' | default_ns = ''
config_node = etree . Element ( config_tag , nsmap = { 'nc' : nc_url } )
for index , url_piece in enumerate ( self . _url_pieces ) :
if index == len ( self . _url_pieces ) - 1 :
config_node_parent = self . copy ( config_node )
node_name , values = self . parse_url_piece ( url_piece )
default_ns , tag = self . convert_tag ( default_ns , node_name , src = Tag . JSON_NAME , dst = Tag . LXML_ETREE )
config_node = self . subelement ( config_node , tag , None )
schema_node = self . device . get_schema_node ( config_node )
if schema_node . get ( 'type' ) == 'leaf-list' and len ( values ) > 0 :
model_name , text_value = self . get_name ( values [ 0 ] )
if model_name :
prefix = self . _name_to_prefix [ model_name ]
config_node . text = '{}:{}' . format ( prefix , text_value )
else :
config_node . text = text_value
elif schema_node . get ( 'type' ) == 'list' and len ( values ) > 0 :
key_tags = BaseCalculator . _get_list_keys ( schema_node )
for key_tag , value in zip ( key_tags , values ) :
key = self . subelement ( config_node , key_tag , value )
return config_node_parent , config_node |
def _validate_path ( validation_context , path , end_entity_name_override = None ) :
"""Internal copy of validate _ path ( ) that allows overriding the name of the
end - entity certificate as used in exception messages . This functionality is
used during chain validation when dealing with indirect CRLs issuer or
OCSP responder certificates .
: param validation _ context :
A certvalidator . context . ValidationContext object to use for
configuring validation behavior
: param path :
A certvalidator . path . ValidationPath object of the path to validate
: param end _ entity _ name _ override :
A unicode string of the name to use for the final certificate in the
path . This is necessary when dealing with indirect CRL issuers or
OCSP responder certificates .
: return :
The final certificate in the path - an instance of
asn1crypto . x509 . Certificate""" | if not isinstance ( path , ValidationPath ) :
raise TypeError ( pretty_message ( '''
path must be an instance of certvalidator.path.ValidationPath,
not %s
''' , type_name ( path ) ) )
if not isinstance ( validation_context , ValidationContext ) :
raise TypeError ( pretty_message ( '''
validation_context must be an instance of
certvalidator.context.ValidationContext, not %s
''' , type_name ( validation_context ) ) )
moment = validation_context . moment
if end_entity_name_override is not None and not isinstance ( end_entity_name_override , str_cls ) :
raise TypeError ( pretty_message ( '''
end_entity_name_override must be a unicode string, not %s
''' , type_name ( end_entity_name_override ) ) )
# Inputs
trust_anchor = path . first
# We skip the trust anchor when measuring the path since technically
# the trust anchor is not part of the path
path_length = len ( path ) - 1
# We don ' t accept any certificate policy or name constraint values as input
# and instead just start allowing everything during initialization
# Step 1 : initialization
# Step 1 a
valid_policy_tree = PolicyTreeRoot ( 'any_policy' , set ( ) , set ( [ 'any_policy' ] ) )
# Steps 1 b - c skipped since they relate to name constraints
# Steps 1 d - f
# We do not use initial - explicit - policy , initial - any - policy - inhibit or
# initial - policy - mapping - inhibit , so they are all set to the path length + 1
explicit_policy = path_length + 1
inhibit_any_policy = path_length + 1
policy_mapping = path_length + 1
# Steps 1 g - i
working_public_key = trust_anchor . public_key
# Step 1 j
working_issuer_name = trust_anchor . subject
# Step 1 k
max_path_length = path_length
if trust_anchor . max_path_length is not None :
max_path_length = trust_anchor . max_path_length
# Step 2 : basic processing
index = 1
last_index = len ( path ) - 1
completed_path = ValidationPath ( trust_anchor )
validation_context . record_validation ( trust_anchor , completed_path )
cert = trust_anchor
while index <= last_index :
cert = path [ index ]
# Step 2 a 1
signature_algo = cert [ 'signature_algorithm' ] . signature_algo
hash_algo = cert [ 'signature_algorithm' ] . hash_algo
if hash_algo in validation_context . weak_hash_algos :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because the signature of %s
uses the weak hash algorithm %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) , hash_algo ) )
if signature_algo == 'rsassa_pkcs1v15' :
verify_func = asymmetric . rsa_pkcs1v15_verify
elif signature_algo == 'dsa' :
verify_func = asymmetric . dsa_verify
elif signature_algo == 'ecdsa' :
verify_func = asymmetric . ecdsa_verify
else :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because the signature of %s
uses the unsupported algorithm %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) , signature_algo ) )
try :
key_object = asymmetric . load_public_key ( working_public_key )
verify_func ( key_object , cert [ 'signature_value' ] . native , cert [ 'tbs_certificate' ] . dump ( ) , hash_algo )
except ( oscrypto . errors . SignatureError ) :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because the signature of %s
could not be verified
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) ) )
# Step 2 a 2
if not validation_context . is_whitelisted ( cert ) :
validity = cert [ 'tbs_certificate' ] [ 'validity' ]
if moment < validity [ 'not_before' ] . native :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because %s is not valid
until %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) , validity [ 'not_before' ] . native . strftime ( '%Y-%m-%d %H:%M:%SZ' ) ) )
if moment > validity [ 'not_after' ] . native :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because %s expired %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) , validity [ 'not_after' ] . native . strftime ( '%Y-%m-%d %H:%M:%SZ' ) ) )
# Step 2 a 3 - CRL / OCSP
if not validation_context . _skip_revocation_checks :
status_good = False
revocation_check_failed = False
matched = False
soft_fail = False
failures = [ ]
if cert . ocsp_urls or validation_context . revocation_mode == 'require' :
try :
verify_ocsp_response ( cert , path , validation_context , cert_description = _cert_type ( index , last_index , end_entity_name_override , definite = True ) , end_entity_name_override = end_entity_name_override )
status_good = True
matched = True
except ( OCSPValidationIndeterminateError ) as e :
failures . extend ( [ failure [ 0 ] for failure in e . failures ] )
revocation_check_failed = True
matched = True
except ( SoftFailError ) :
soft_fail = True
except ( OCSPNoMatchesError ) :
pass
if not status_good and ( cert . crl_distribution_points or validation_context . revocation_mode == 'require' ) :
try :
cert_description = _cert_type ( index , last_index , end_entity_name_override , definite = True )
verify_crl ( cert , path , validation_context , cert_description = cert_description , end_entity_name_override = end_entity_name_override )
revocation_check_failed = False
status_good = True
matched = True
except ( CRLValidationIndeterminateError ) as e :
failures . extend ( [ failure [ 0 ] for failure in e . failures ] )
revocation_check_failed = True
matched = True
except ( SoftFailError ) :
soft_fail = True
except ( CRLNoMatchesError ) :
pass
if not soft_fail :
if not matched and validation_context . revocation_mode == 'require' :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because no revocation
information could be found for %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) ) )
if not status_good and revocation_check_failed :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because the %s revocation
checks failed: %s
''' , _cert_type ( index , last_index , end_entity_name_override ) , '; ' . join ( failures ) ) )
# Step 2 a 4
if cert . issuer != working_issuer_name :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because the %s issuer name
could not be matched
''' , _cert_type ( index , last_index , end_entity_name_override ) , ) )
# Steps 2 b - c skipped since they relate to name constraints
# Steps 2 d
if cert . certificate_policies_value and valid_policy_tree is not None :
cert_any_policy = None
cert_policy_identifiers = set ( )
# Step 2 d 1
for policy in cert . certificate_policies_value :
policy_identifier = policy [ 'policy_identifier' ] . native
if policy_identifier == 'any_policy' :
cert_any_policy = policy
continue
cert_policy_identifiers . add ( policy_identifier )
policy_qualifiers = policy [ 'policy_qualifiers' ]
policy_id_match = False
parent_any_policy = None
# Step 2 d 1 i
for node in valid_policy_tree . at_depth ( index - 1 ) :
if node . valid_policy == 'any_policy' :
parent_any_policy = node
if policy_identifier not in node . expected_policy_set :
continue
policy_id_match = True
node . add_child ( policy_identifier , policy_qualifiers , set ( [ policy_identifier ] ) )
# Step 2 d 1 ii
if not policy_id_match and parent_any_policy :
parent_any_policy . add_child ( policy_identifier , policy_qualifiers , set ( [ policy_identifier ] ) )
# Step 2 d 2
if cert_any_policy and ( inhibit_any_policy > 0 or ( index < path_length and cert . self_issued ) ) :
for node in valid_policy_tree . at_depth ( index - 1 ) :
for expected_policy_identifier in node . expected_policy_set :
if expected_policy_identifier not in cert_policy_identifiers :
node . add_child ( expected_policy_identifier , cert_any_policy [ 'policy_qualifiers' ] , set ( [ expected_policy_identifier ] ) )
# Step 2 d 3
for node in valid_policy_tree . walk_up ( index - 1 ) :
if not node . children :
node . parent . remove_child ( node )
if len ( valid_policy_tree . children ) == 0 :
valid_policy_tree = None
# Step 2 e
if cert . certificate_policies_value is None :
valid_policy_tree = None
# Step 2 f
if valid_policy_tree is None and explicit_policy <= 0 :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because there is no valid set
of policies for %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) , ) )
if index != last_index : # Step 3 : prepare for certificate index + 1
if cert . policy_mappings_value :
policy_map = { }
for mapping in cert . policy_mappings_value :
issuer_domain_policy = mapping [ 'issuer_domain_policy' ] . native
subject_domain_policy = mapping [ 'subject_domain_policy' ] . native
if issuer_domain_policy not in policy_map :
policy_map [ issuer_domain_policy ] = set ( )
policy_map [ issuer_domain_policy ] . add ( subject_domain_policy )
# Step 3 a
if issuer_domain_policy == 'any_policy' or subject_domain_policy == 'any_policy' :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because %s contains
a policy mapping for the "any policy"
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) ) )
# Step 3 b
if valid_policy_tree is not None :
for mapping in cert . policy_mappings_value :
issuer_domain_policy = mapping [ 'issuer_domain_policy' ] . native
# Step 3 b 1
if policy_mapping > 0 :
issuer_domain_policy_match = False
cert_any_policy = None
for node in valid_policy_tree . at_depth ( index ) :
if node . valid_policy == 'any_policy' :
cert_any_policy = node
if node . valid_policy == issuer_domain_policy :
issuer_domain_policy_match = True
node . expected_policy_set = policy_map [ issuer_domain_policy ]
if not issuer_domain_policy_match and cert_any_policy :
cert_any_policy . parent . add_child ( issuer_domain_policy , cert_any_policy . qualifier_set , policy_map [ issuer_domain_policy ] )
# Step 3 b 2
elif policy_mapping == 0 :
for node in valid_policy_tree . at_depth ( index ) :
if node . valid_policy == issuer_domain_policy :
node . parent . remove_child ( node )
for node in valid_policy_tree . walk_up ( index - 1 ) :
if not node . children :
node . parent . remove_child ( node )
if len ( valid_policy_tree . children ) == 0 :
valid_policy_tree = None
# Step 3 c
working_issuer_name = cert . subject
# Steps 3 d - f
# Handle inheritance of DSA parameters from a signing CA to the
# next in the chain
copy_params = None
if cert . public_key . algorithm == 'dsa' and cert . public_key . hash_algo is None :
if working_public_key . algorithm == 'dsa' :
copy_params = working_public_key [ 'algorithm' ] [ 'parameters' ] . copy ( )
working_public_key = cert . public_key
if copy_params :
working_public_key [ 'algorithm' ] [ 'parameters' ] = copy_params
# Step 3 g skipped since it relates to name constraints
# Step 3 h
if not cert . self_issued : # Step 3 h 1
if explicit_policy != 0 :
explicit_policy -= 1
# Step 3 h 2
if policy_mapping != 0 :
policy_mapping -= 1
# Step 3 h 3
if inhibit_any_policy != 0 :
inhibit_any_policy -= 1
# Step 3 i
if cert . policy_constraints_value : # Step 3 i 1
require_explicit_policy = cert . policy_constraints_value [ 'require_explicit_policy' ] . native
if require_explicit_policy is not None and require_explicit_policy < explicit_policy :
explicit_policy = require_explicit_policy
# Step 3 i 2
inhibit_policy_mapping = cert . policy_constraints_value [ 'inhibit_policy_mapping' ] . native
if inhibit_policy_mapping is not None and inhibit_policy_mapping < policy_mapping :
policy_mapping = inhibit_policy_mapping
# Step 3 j
if cert . inhibit_any_policy_value :
inhibit_any_policy = min ( cert . inhibit_any_policy_value . native , inhibit_any_policy )
# Step 3 k
if not cert . ca :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because %s is not a CA
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) ) )
# Step 3 l
if not cert . self_issued :
if max_path_length == 0 :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because it exceeds the
maximum path length
''' ) )
max_path_length -= 1
# Step 3 m
if cert . max_path_length is not None and cert . max_path_length < max_path_length :
max_path_length = cert . max_path_length
# Step 3 n
if cert . key_usage_value and 'key_cert_sign' not in cert . key_usage_value . native :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because %s is not allowed
to sign certificates
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) ) )
# Step 3 o
# Check for critical unsupported extensions
supported_extensions = set ( [ 'authority_information_access' , 'authority_key_identifier' , 'basic_constraints' , 'crl_distribution_points' , 'extended_key_usage' , 'freshest_crl' , 'key_identifier' , 'key_usage' , 'ocsp_no_check' , 'certificate_policies' , 'policy_mappings' , 'policy_constraints' , 'inhibit_any_policy' , ] )
unsupported_critical_extensions = cert . critical_extensions - supported_extensions
if unsupported_critical_extensions :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because %s contains the
following unsupported critical extension%s: %s
''' , _cert_type ( index , last_index , end_entity_name_override , definite = True ) , 's' if len ( unsupported_critical_extensions ) != 1 else '' , ', ' . join ( sorted ( unsupported_critical_extensions ) ) , ) )
if validation_context :
completed_path = completed_path . copy ( ) . append ( cert )
validation_context . record_validation ( cert , completed_path )
index += 1
# Step 4 : wrap - up procedure
# Step 4 a
if explicit_policy != 0 :
explicit_policy -= 1
# Step 4 b
if cert . policy_constraints_value :
if cert . policy_constraints_value [ 'require_explicit_policy' ] . native == 0 :
explicit_policy = 0
# Steps 4 c - e skipped since this method doesn ' t output it
# Step 4 f skipped since this method defers that to the calling application
# Step 4 g
# Step 4 g i
if valid_policy_tree is None :
intersection = None
# Step 4 g ii
else :
intersection = valid_policy_tree
# Step 4 g iii is skipped since the initial policy set is always any _ policy
if explicit_policy == 0 and intersection is None :
raise PathValidationError ( pretty_message ( '''
The path could not be validated because there is no valid set of
policies for %s
''' , _cert_type ( last_index , last_index , end_entity_name_override , definite = True ) ) )
return cert |
def ordinal ( value ) :
'''Converts a number to its ordinal representation .
: param value : number
> > > print ( ordinal ( 1 ) )
1st
> > > print ( ordinal ( 11 ) )
11th
> > > print ( ordinal ( 101 ) )
101st
> > > print ( ordinal ( 104 ) )
104th
> > > print ( ordinal ( 113 ) )
113th
> > > print ( ordinal ( 123 ) )
123rd''' | try :
value = int ( value )
except ( TypeError , ValueError ) :
raise ValueError
if value % 100 in ( 11 , 12 , 13 ) :
return '%d%s' % ( value , ORDINAL_SUFFIX [ 0 ] )
else :
return '%d%s' % ( value , ORDINAL_SUFFIX [ value % 10 ] ) |
def convert_reshape ( net , node , module , builder ) :
"""Converts a reshape layer from mxnet to coreml .
This doesn ' t currently handle the deprecated parameters for the reshape layer .
Parameters
network : net
An mxnet network object .
layer : node
Node to convert .
module : module
A module for MXNet
builder : NeuralNetworkBuilder
A neural network builder object .""" | input_name , output_name = _get_input_output_name ( net , node )
name = node [ 'name' ]
target_shape = node [ 'shape' ]
if any ( item <= 0 for item in target_shape ) :
raise NotImplementedError ( 'Special dimensional values less than or equal to 0 are not supported yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.' )
if 'reverse' in node and node [ 'reverse' ] == 'True' :
raise NotImplementedError ( '"reverse" parameter is not supported by yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.' )
mode = 0
# CHANNEL _ FIRST
builder . add_reshape ( name , input_name , output_name , target_shape , mode ) |
def index_queryset ( self , using = None ) :
"""Used when the entire index for model is updated .""" | kwargs = { "active" : True }
# if permissions are enabled then we want only public pages
# https : / / github . com / leonardo - modules / leonardo - module - pagepermissions
if hasattr ( Page ( ) , 'permissions' ) :
kwargs [ 'permissions__isnull' ] = True
# https : / / github . com / leonardo - modules / leonardo - page - search
if hasattr ( Page ( ) , 'search_exclude' ) :
kwargs [ 'search_exclude' ] = False
return self . get_model ( ) . objects . filter ( ** kwargs ) |
def hybrid_forward ( self , F , inputs ) : # pylint : disable = arguments - differ
"""Compute context insensitive token embeddings for ELMo representations .
Parameters
inputs : NDArray
Shape ( batch _ size , sequence _ length , max _ character _ per _ token )
of character ids representing the current batch .
Returns
token _ embedding : NDArray
Shape ( batch _ size , sequence _ length , embedding _ size ) with context
insensitive token representations .""" | # the character id embedding
# ( batch _ size * sequence _ length , max _ chars _ per _ token , embed _ dim )
character_embedding = self . _char_embedding ( inputs . reshape ( ( - 1 , self . _max_chars_per_token ) ) )
character_embedding = F . transpose ( character_embedding , axes = ( 1 , 0 , 2 ) )
token_embedding = self . _convolutions ( character_embedding )
out_shape_ref = inputs . slice_axis ( axis = - 1 , begin = 0 , end = 1 )
out_shape_ref = out_shape_ref . broadcast_axes ( axis = ( 2 , ) , size = ( self . _output_size ) )
return token_embedding . reshape_like ( out_shape_ref ) |
def cmd_arp_sniff ( iface ) :
"""Listen for ARP packets and show information for each device .
Columns : Seconds from last packet | IP | MAC | Vendor
Example :
1 192.168.0.1 a4:08 : f5:19:17 : a4 Sagemcom Broadband SAS
7 192.168.0.2 64 : bc : 0c : 33 : e5:57 LG Electronics ( Mobile Communications )
2 192.168.0.5 00 : c2 : c6:30:2c : 58 Intel Corporate
6 192.168.0.7 54 : f2:01 : db : 35:58 Samsung Electronics Co . , Ltd""" | conf . verb = False
if iface :
conf . iface = iface
print ( "Waiting for ARP packets..." , file = sys . stderr )
sniff ( filter = "arp" , store = False , prn = procpkt ) |
def _argsort_and_resolve_ties ( time , random_state ) :
"""Like numpy . argsort , but resolves ties uniformly at random""" | n_samples = len ( time )
order = numpy . argsort ( time , kind = "mergesort" )
i = 0
while i < n_samples - 1 :
inext = i + 1
while inext < n_samples and time [ order [ i ] ] == time [ order [ inext ] ] :
inext += 1
if i + 1 != inext : # resolve ties randomly
random_state . shuffle ( order [ i : inext ] )
i = inext
return order |
def get_image ( self , digest , blob , mime_type , index , size = 500 ) :
"""Return an image for the given content , only if it already exists in
the image cache .""" | # Special case , for now ( XXX ) .
if mime_type . startswith ( "image/" ) :
return ""
cache_key = f"img:{index}:{size}:{digest}"
return self . cache . get ( cache_key ) |
def file_selection ( multiple = False , directory = False , save = False , confirm_overwrite = False , filename = None , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) :
"""Open a file selection window
: param multiple : allow multiple file selection
: type multiple : bool
: param directory : only directory selection
: type directory : bool
: param save : save mode
: type save : bool
: param confirm _ overwrite : confirm when a file is overwritten
: type confirm _ overwrite : bool
: param filename : placeholder for the filename
: type filename : str
: param text : text inside the window
: type text : str
: param title : title of the window
: type title : str
: param width : window width
: type width : int
: param height : window height
: type height : int
: param timeout : close the window after n seconds
: type timeout : int
: return : path of files selected .
: rtype : string or list if multiple enabled""" | dialog = ZFileSelection ( multiple , directory , save , confirm_overwrite , filename , title , width , height , timeout )
dialog . run ( )
return dialog . response |
def _mute ( self ) :
"""mute vlc""" | if self . muted :
self . _sendCommand ( "volume {}\n" . format ( self . actual_volume ) )
if logger . isEnabledFor ( logging . DEBUG ) :
logger . debug ( 'VLC unmuted: {0} ({1}%)' . format ( self . actual_volume , int ( 100 * self . actual_volume / self . max_volume ) ) )
else :
if self . actual_volume == - 1 :
self . _get_volume ( )
self . _sendCommand ( "volume 0\n" )
if logger . isEnabledFor ( logging . DEBUG ) :
logger . debug ( 'VLC muted: 0 (0%)' ) |
def remove_member_from ( self , leaderboard_name , member ) :
'''Remove the optional member data for a given member in the named leaderboard .
@ param leaderboard _ name [ String ] Name of the leaderboard .
@ param member [ String ] Member name .''' | pipeline = self . redis_connection . pipeline ( )
pipeline . zrem ( leaderboard_name , member )
pipeline . hdel ( self . _member_data_key ( leaderboard_name ) , member )
pipeline . execute ( ) |
def _get_efron_values_batch ( self , X , T , E , weights , beta ) : # pylint : disable = too - many - locals
"""Assumes sorted on ascending on T
Calculates the first and second order vector differentials , with respect to beta .
A good explanation for how Efron handles ties . Consider three of five subjects who fail at the time .
As it is not known a priori that who is the first to fail , so one - third of
( φ1 + φ2 + φ3 ) is adjusted from sum _ j ^ { 5 } φj after one fails . Similarly two - third
of ( φ1 + φ2 + φ3 ) is adjusted after first two individuals fail , etc .
Returns
hessian : ( d , d ) numpy array ,
gradient : ( 1 , d ) numpy array
log _ likelihood : float""" | n , d = X . shape
hessian = np . zeros ( ( d , d ) )
gradient = np . zeros ( ( d , ) )
log_lik = 0
# weights = weights [ : , None ]
# Init risk and tie sums to zero
risk_phi , tie_phi = 0 , 0
risk_phi_x , tie_phi_x = np . zeros ( ( d , ) ) , np . zeros ( ( d , ) )
risk_phi_x_x , tie_phi_x_x = np . zeros ( ( d , d ) ) , np . zeros ( ( d , d ) )
# counts are sorted by - T
_ , counts = np . unique ( - T , return_counts = True )
scores = weights * np . exp ( np . dot ( X , beta ) )
pos = n
for count_of_removals in counts :
slice_ = slice ( pos - count_of_removals , pos )
X_at_t = X [ slice_ ]
weights_at_t = weights [ slice_ ]
phi_i = scores [ slice_ , None ]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np . dot ( X_at_t . T , phi_x_i )
# Calculate sums of Risk set
risk_phi = risk_phi + array_sum_to_scalar ( phi_i )
risk_phi_x = risk_phi_x + matrix_axis_0_sum_to_array ( phi_x_i )
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the sums of Tie set
deaths = E [ slice_ ]
tied_death_counts = array_sum_to_scalar ( deaths . astype ( int ) )
if tied_death_counts == 0 : # no deaths , can continue
pos -= count_of_removals
continue
xi_deaths = X_at_t [ deaths ]
weights_deaths = weights_at_t [ deaths ]
x_death_sum = matrix_axis_0_sum_to_array ( weights_deaths [ : , None ] * xi_deaths )
weight_count = array_sum_to_scalar ( weights_deaths )
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1 : # a lot of this is now in Einstein notation for performance , but see original " expanded " code here
# https : / / github . com / CamDavidsonPilon / lifelines / blob / e7056e7817272eb5dff5983556954f56c33301b1 / lifelines / fitters / coxph _ fitter . py # L755 - L789
# it ' s faster if we can skip computing these when we don ' t need to .
tie_phi = array_sum_to_scalar ( phi_i [ deaths ] )
tie_phi_x = matrix_axis_0_sum_to_array ( phi_x_i [ deaths ] )
tie_phi_x_x = np . dot ( xi_deaths . T , phi_i [ deaths ] * xi_deaths )
increasing_proportion = np . arange ( tied_death_counts ) / tied_death_counts
denom = 1.0 / ( risk_phi - increasing_proportion * tie_phi )
numer = risk_phi_x - np . outer ( increasing_proportion , tie_phi_x )
# computes outer products and sums them together .
# Naive approach is to
# 1 ) broadcast tie _ phi _ x _ x and increasing _ proportion into a ( tied _ death _ counts , d , d ) matrix
# 2 ) broadcast risk _ phi _ x _ x and denom into a ( tied _ death _ counts , d , d ) matrix
# 3 ) subtract them , and then sum to ( d , d )
# Alternatively , we can sum earlier without having to explicitly create ( _ , d , d ) matrices . This is used here .
a1 = np . einsum ( "ab,i->ab" , risk_phi_x_x , denom ) - np . einsum ( "ab,i->ab" , tie_phi_x_x , increasing_proportion * denom )
else : # no tensors here , but do some casting to make it easier in the converging step next .
denom = 1.0 / np . array ( [ risk_phi ] )
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom [ : , None ]
# This is a batch outer product .
# given a matrix t , for each row , m , compute it ' s outer product : m . dot ( m . T ) , and stack these new matrices together .
# which would be : np . einsum ( " Bi , Bj - > Bij " , t , t )
a2 = summand . T . dot ( summand )
gradient = gradient + x_death_sum - weighted_average * summand . sum ( 0 )
log_lik = log_lik + np . dot ( x_death_sum , beta ) + weighted_average * np . log ( denom ) . sum ( )
hessian = hessian + weighted_average * ( a2 - a1 )
pos -= count_of_removals
return hessian , gradient , log_lik |
def is_homozygous ( self ) :
'''Returns true iff this variant has a GT field and is homozygous , which here
means that the genotype is n / n ( where n can be any number ) .''' | if self . FORMAT is None :
return False
else :
genotypes = set ( self . FORMAT . get ( 'GT' , '0/1' ) . split ( '/' ) )
return '.' not in genotypes and len ( genotypes ) == 1 |
def is_unused ( input , model_file = None , model_proto = None , name = None ) :
"""Returns true if input id is unused piece .
Args :
input : An arbitrary tensor of int32.
model _ file : The sentencepiece model file path .
model _ proto : The sentencepiece model serialized proto .
Either ` model _ file ` or ` model _ proto ` must be set .
name : The name argument that is passed to the op function .
Returns :
A tensor of bool with the same shape as input .""" | return _gen_sentencepiece_processor_op . sentencepiece_get_piece_type ( input , model_file = model_file , model_proto = model_proto , name = name , piece_type = 2 ) |
def edges ( self , tail_head_iter ) :
"""Create a bunch of edges .
Args :
tail _ head _ iter : Iterable of ` ` ( tail _ name , head _ name ) ` ` pairs .""" | edge = self . _edge_plain
quote = self . _quote_edge
lines = ( edge % ( quote ( t ) , quote ( h ) ) for t , h in tail_head_iter )
self . body . extend ( lines ) |
def define_hardware_breakpoint ( self , dwThreadId , address , triggerFlag = BP_BREAK_ON_ACCESS , sizeFlag = BP_WATCH_DWORD , condition = True , action = None ) :
"""Creates a disabled hardware breakpoint at the given address .
@ see :
L { has _ hardware _ breakpoint } ,
L { get _ hardware _ breakpoint } ,
L { enable _ hardware _ breakpoint } ,
L { enable _ one _ shot _ hardware _ breakpoint } ,
L { disable _ hardware _ breakpoint } ,
L { erase _ hardware _ breakpoint }
@ note :
Hardware breakpoints do not seem to work properly on VirtualBox .
See U { http : / / www . virtualbox . org / ticket / 477 } .
@ type dwThreadId : int
@ param dwThreadId : Thread global ID .
@ type address : int
@ param address : Memory address to watch .
@ type triggerFlag : int
@ param triggerFlag : Trigger of breakpoint . Must be one of the following :
- L { BP _ BREAK _ ON _ EXECUTION }
Break on code execution .
- L { BP _ BREAK _ ON _ WRITE }
Break on memory read or write .
- L { BP _ BREAK _ ON _ ACCESS }
Break on memory write .
@ type sizeFlag : int
@ param sizeFlag : Size of breakpoint . Must be one of the following :
- L { BP _ WATCH _ BYTE }
One ( 1 ) byte in size .
- L { BP _ WATCH _ WORD }
Two ( 2 ) bytes in size .
- L { BP _ WATCH _ DWORD }
Four ( 4 ) bytes in size .
- L { BP _ WATCH _ QWORD }
Eight ( 8 ) bytes in size .
@ type condition : function
@ param condition : ( Optional ) Condition callback function .
The callback signature is : :
def condition _ callback ( event ) :
return True # returns True or False
Where B { event } is an L { Event } object ,
and the return value is a boolean
( C { True } to dispatch the event , C { False } otherwise ) .
@ type action : function
@ param action : ( Optional ) Action callback function .
If specified , the event is handled by this callback instead of
being dispatched normally .
The callback signature is : :
def action _ callback ( event ) :
pass # no return value
Where B { event } is an L { Event } object ,
and the return value is a boolean
( C { True } to dispatch the event , C { False } otherwise ) .
@ rtype : L { HardwareBreakpoint }
@ return : The hardware breakpoint object .""" | thread = self . system . get_thread ( dwThreadId )
bp = HardwareBreakpoint ( address , triggerFlag , sizeFlag , condition , action )
begin = bp . get_address ( )
end = begin + bp . get_size ( )
if dwThreadId in self . __hardwareBP :
bpSet = self . __hardwareBP [ dwThreadId ]
for oldbp in bpSet :
old_begin = oldbp . get_address ( )
old_end = old_begin + oldbp . get_size ( )
if MemoryAddresses . do_ranges_intersect ( begin , end , old_begin , old_end ) :
msg = "Already exists (TID %d) : %r" % ( dwThreadId , oldbp )
raise KeyError ( msg )
else :
bpSet = set ( )
self . __hardwareBP [ dwThreadId ] = bpSet
bpSet . add ( bp )
return bp |
def load_configuration ( conf_path ) :
"""Load and validate test configuration .
: param conf _ path : path to YAML configuration file .
: return : configuration as dict .""" | with open ( conf_path ) as f :
conf_dict = yaml . load ( f )
validate_config ( conf_dict )
return conf_dict |
def google_get_token ( self , config , prefix ) :
"""Make request to Google API to get token .""" | params = { 'code' : self . request_args_get ( 'code' , default = '' ) , 'client_id' : self . google_api_client_id , 'client_secret' : self . google_api_client_secret , 'redirect_uri' : self . scheme_host_port_prefix ( 'http' , config . host , config . port , prefix ) + '/home' , 'grant_type' : 'authorization_code' , }
payload = urlencode ( params ) . encode ( 'utf-8' )
url = self . google_oauth2_url + 'token'
req = Request ( url , payload )
json_str = urlopen ( req ) . read ( )
return json . loads ( json_str . decode ( 'utf-8' ) ) |
def expandEntitiesFromEmail ( e ) :
"""Method that receives an email an creates linked entities
Args :
e : Email to verify .
Returns :
Three different values : email , alias and domain in a list .""" | # Grabbing the email
email = { }
email [ "type" ] = "i3visio.email"
email [ "value" ] = e
email [ "attributes" ] = [ ]
# Grabbing the alias
alias = { }
alias [ "type" ] = "i3visio.alias"
alias [ "value" ] = e . split ( "@" ) [ 0 ]
alias [ "attributes" ] = [ ]
# Grabbing the domain
domain = { }
domain [ "type" ] = "i3visio.domain"
domain [ "value" ] = e . split ( "@" ) [ 1 ]
domain [ "attributes" ] = [ ]
return [ email , alias , domain ] |
def press ( self ) :
'''press key via name or key code . Supported key name includes :
home , back , left , right , up , down , center , menu , search , enter ,
delete ( or del ) , recent ( recent apps ) , volume _ up , volume _ down ,
volume _ mute , camera , power .
Usage :
d . press . back ( ) # press back key
d . press . menu ( ) # press home key
d . press ( 89 ) # press keycode''' | @ param_to_property ( key = [ "home" , "back" , "left" , "right" , "up" , "down" , "center" , "menu" , "search" , "enter" , "delete" , "del" , "recent" , "volume_up" , "volume_down" , "volume_mute" , "camera" , "power" ] )
def _press ( key , meta = None ) :
if isinstance ( key , int ) :
return self . server . jsonrpc . pressKeyCode ( key , meta ) if meta else self . server . jsonrpc . pressKeyCode ( key )
else :
return self . server . jsonrpc . pressKey ( str ( key ) )
return _press |
def _handle_subscribed ( self , * args , chanId = None , channel = None , ** kwargs ) :
"""Handles responses to subscribe ( ) commands - registers a channel id with
the client and assigns a data handler to it .
: param chanId : int , represent channel id as assigned by server
: param channel : str , represents channel name""" | log . debug ( "_handle_subscribed: %s - %s - %s" , chanId , channel , kwargs )
if chanId in self . channels :
raise AlreadyRegisteredError ( )
self . _heartbeats [ chanId ] = time . time ( )
try :
channel_key = ( 'raw_' + channel if kwargs [ 'prec' ] . startswith ( 'R' ) and channel == 'book' else channel )
except KeyError :
channel_key = channel
try :
self . channels [ chanId ] = self . _data_handlers [ channel_key ]
except KeyError :
raise UnknownChannelError ( )
# prep kwargs to be used as secondary value in dict key
try :
kwargs . pop ( 'event' )
except KeyError :
pass
try :
kwargs . pop ( 'len' )
except KeyError :
pass
try :
kwargs . pop ( 'chanId' )
except KeyError :
pass
self . channel_labels [ chanId ] = ( channel_key , kwargs ) |
def _parse_metadata ( self , meta ) :
"""Return the dict containing document metadata""" | formatted_fields = self . settings [ 'FORMATTED_FIELDS' ]
output = collections . OrderedDict ( )
for name , value in meta . items ( ) :
name = name . lower ( )
if name in formatted_fields :
rendered = self . _render ( value ) . strip ( )
output [ name ] = self . process_metadata ( name , rendered )
else :
output [ name ] = self . process_metadata ( name , value )
return output |
def rect_to_cyl_vec ( vx , vy , vz , X , Y , Z , cyl = False ) :
"""NAME :
rect _ to _ cyl _ vec
PURPOSE :
transform vectors from rectangular to cylindrical coordinates vectors
INPUT :
vx -
vy -
vz -
X - X
Y - Y
Z - Z
cyl - if True , X , Y , Z are already cylindrical
OUTPUT :
vR , vT , vz
HISTORY :
2010-09-24 - Written - Bovy ( NYU )""" | if not cyl :
R , phi , Z = rect_to_cyl ( X , Y , Z )
else :
phi = Y
vr = + vx * sc . cos ( phi ) + vy * sc . sin ( phi )
vt = - vx * sc . sin ( phi ) + vy * sc . cos ( phi )
return ( vr , vt , vz ) |
def RemoveProcessedTaskStorage ( self , task ) :
"""Removes a processed task storage .
Args :
task ( Task ) : task .
Raises :
IOError : if the task storage does not exist .
OSError : if the task storage does not exist .""" | if task . identifier not in self . _task_storage_writers :
raise IOError ( 'Storage writer for task: {0:s} does not exist.' . format ( task . identifier ) )
del self . _task_storage_writers [ task . identifier ] |
def to_ipv6 ( key ) :
"""Get IPv6 address from a public key .""" | if key [ - 2 : ] != '.k' :
raise ValueError ( 'Key does not end with .k' )
key_bytes = base32 . decode ( key [ : - 2 ] )
hash_one = sha512 ( key_bytes ) . digest ( )
hash_two = sha512 ( hash_one ) . hexdigest ( )
return ':' . join ( [ hash_two [ i : i + 4 ] for i in range ( 0 , 32 , 4 ) ] ) |
def eta_hms ( seconds , always_show_hours = False , always_show_minutes = False , hours_leading_zero = False ) :
"""Converts seconds remaining into a human readable timestamp ( e . g . hh : mm : ss , h : mm : ss , mm : ss , or ss ) .
Positional arguments :
seconds - - integer / float indicating seconds remaining .
Keyword arguments :
always _ show _ hours - - don ' t hide the 0 hours .
always _ show _ minutes - - don ' t hide the 0 minutes .
hours _ leading _ zero - - show 01:00:00 instead of 1:00:00.
Returns :
Human readable string .""" | # Convert seconds to other units .
final_hours , final_minutes , final_seconds = 0 , 0 , seconds
if final_seconds >= 3600 :
final_hours = int ( final_seconds / 3600.0 )
final_seconds -= final_hours * 3600
if final_seconds >= 60 :
final_minutes = int ( final_seconds / 60.0 )
final_seconds -= final_minutes * 60
final_seconds = int ( ceil ( final_seconds ) )
# Determine which string template to use .
if final_hours or always_show_hours :
if hours_leading_zero :
template = '{hour:02.0f}:{minute:02.0f}:{second:02.0f}'
else :
template = '{hour}:{minute:02.0f}:{second:02.0f}'
elif final_minutes or always_show_minutes :
template = '{minute:02.0f}:{second:02.0f}'
else :
template = '{second:02.0f}'
return template . format ( hour = final_hours , minute = final_minutes , second = final_seconds ) |
def _get_location_descriptor ( self , location ) :
"""Get corresponding : class : ` LocationDescriptor ` object from a string or a : class : ` LocationDescriptor ` itself .
Args :
location : a string or a : class : ` LocationDescriptor ` .
Returns :
A corresponding : class : ` LocationDescriptor ` object . If ` ` location ` ` is a : class : ` LocationDescriptor ` ,
we simply return it .
Raises :
A ` ` RuntimeError ` ` is raised whenever the ` location ` object is not recognized a string or : class : ` self . _ nbr _ of _ nodes ` .""" | loc_descriptor = None
if isinstance ( location , basestring ) :
loc_descriptor = LocationDescriptor ( location )
elif isinstance ( location , LocationDescriptor ) :
loc_descriptor = location
else :
raise RuntimeError ( "Argument is neither a string nor a self._nbr_of_nodes" )
return loc_descriptor |
def growth ( interval , pricecol , eqdata ) :
"""Retrieve growth labels .
Parameters
interval : int
Number of sessions over which growth is measured . For example , if
the value of 32 is passed for ` interval ` , the data returned will
show the growth 32 sessions ahead for each data point .
eqdata : DataFrame
Data for evaluating growth .
pricecol : str
Column of ` eqdata ` to be used for prices ( Normally ' Adj Close ' ) .
Returns
labels : DataFrame
Growth labels for the specified period
skipatend : int
Number of rows skipped at the end of ` eqdata ` for the given labels .
Used to synchronize labels and features .
Examples
> > > from functools import partial
> > > features , labels = pn . data . labeledfeatures ( eqdata , 256,
. . . partial ( pn . data . lab . growth , 32 , ' Adj Close ' ) )""" | size = len ( eqdata . index )
labeldata = eqdata . loc [ : , pricecol ] . values [ interval : ] / eqdata . loc [ : , pricecol ] . values [ : ( size - interval ) ]
df = pd . DataFrame ( data = labeldata , index = eqdata . index [ : ( size - interval ) ] , columns = [ 'Growth' ] , dtype = 'float64' )
return df |
def add_reserved_switch_binding ( switch_ip , state ) :
"""Add a reserved switch binding .""" | # overload port _ id to contain switch state
add_nexusport_binding ( state , const . NO_VLAN_OR_VNI_ID , const . NO_VLAN_OR_VNI_ID , switch_ip , const . RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 ) |
def prj_create_user ( self , * args , ** kwargs ) :
"""Create a new project
: returns : None
: rtype : None
: raises : None""" | if not self . cur_prj :
return
user = self . create_user ( projects = [ self . cur_prj ] )
if user :
userdata = djitemdata . UserItemData ( user )
treemodel . TreeItem ( userdata , self . prj_user_model . root ) |
def chmod_plus_x ( path ) :
"""Equivalent of unix ` chmod a + x path `""" | path_mode = os . stat ( path ) . st_mode
path_mode &= int ( '777' , 8 )
if path_mode & stat . S_IRUSR :
path_mode |= stat . S_IXUSR
if path_mode & stat . S_IRGRP :
path_mode |= stat . S_IXGRP
if path_mode & stat . S_IROTH :
path_mode |= stat . S_IXOTH
os . chmod ( path , path_mode ) |
def to_dict ( self ) :
"""Returns a dict representation of this instance suitable for
conversion to YAML .""" | return { 'model_type' : 'segmented_discretechoice' , 'name' : self . name , 'segmentation_col' : self . segmentation_col , 'sample_size' : self . sample_size , 'probability_mode' : self . probability_mode , 'choice_mode' : self . choice_mode , 'choosers_fit_filters' : self . choosers_fit_filters , 'choosers_predict_filters' : self . choosers_predict_filters , 'alts_fit_filters' : self . alts_fit_filters , 'alts_predict_filters' : self . alts_predict_filters , 'interaction_predict_filters' : self . interaction_predict_filters , 'estimation_sample_size' : self . estimation_sample_size , 'prediction_sample_size' : self . prediction_sample_size , 'choice_column' : self . choice_column , 'default_config' : { 'model_expression' : self . default_model_expr , } , 'remove_alts' : self . remove_alts , 'fitted' : self . fitted , 'models' : { yamlio . to_scalar_safe ( name ) : self . _process_model_dict ( m . to_dict ( ) ) for name , m in self . _group . models . items ( ) } } |
def get_halfs_double ( self , vertex_a1 , vertex_b1 , vertex_a2 , vertex_b2 ) :
"""Compute the two parts separated by ` ` ( vertex _ a1 , vertex _ b1 ) ` ` and ` ` ( vertex _ a2 , vertex _ b2 ) ` `
Raise a GraphError when ` ` ( vertex _ a1 , vertex _ b1 ) ` ` and
` ` ( vertex _ a2 , vertex _ b2 ) ` ` do not separate the graph in two
disconnected parts . The edges must be neighbors . If not a GraphError
is raised . The for vertices must not coincide or a GraphError is
raised .
Returns the vertices of the two halfs and the four ' hinge ' vertices
in the correct order , i . e . both ` ` vertex _ a1 ` ` and ` ` vertex _ a2 ` ` are
in the first half and both ` ` vertex _ b1 ` ` and ` ` vertex _ b2 ` ` are in the
second half .""" | if vertex_a1 not in self . neighbors [ vertex_b1 ] :
raise GraphError ( "vertex_a1 must be a neighbor of vertex_b1." )
if vertex_a2 not in self . neighbors [ vertex_b2 ] :
raise GraphError ( "vertex_a2 must be a neighbor of vertex_b2." )
# find vertex _ a _ part ( and possibly switch vertex _ a2 , vertex _ b2)
vertex_a_new = set ( self . neighbors [ vertex_a1 ] )
vertex_a_new . discard ( vertex_b1 )
if vertex_a1 == vertex_b2 : # we now that we have to swap vertex _ a2 and vertex _ b2 . The algo
# below will fail otherwise in this ' exotic ' case .
vertex_a2 , vertex_b2 = vertex_b2 , vertex_a2
# vertex _ a _ new . discard ( vertex _ a2 ) # in case there is overlap
if vertex_a1 == vertex_a2 :
vertex_a_new . discard ( vertex_b2 )
# in case there is overlap
vertex_a_part = set ( [ vertex_a1 ] )
touched = False
# True if ( the switched ) vertex _ a2 has been reached .
while len ( vertex_a_new ) > 0 :
pivot = vertex_a_new . pop ( )
if pivot == vertex_b1 :
raise GraphError ( "The graph can not be separated in two halfs. " "vertex_b1 reached by vertex_a1." )
vertex_a_part . add ( pivot )
# create a new set that we can modify
pivot_neighbors = set ( self . neighbors [ pivot ] )
pivot_neighbors -= vertex_a_part
if pivot == vertex_a2 or pivot == vertex_b2 :
if pivot == vertex_b2 :
if touched :
raise GraphError ( "The graph can not be separated in " "two halfs. vertex_b2 reached by " "vertex_a1." )
else : # put them in the correct order
vertex_a2 , vertex_b2 = vertex_b2 , vertex_a2
pivot_neighbors . discard ( vertex_b2 )
touched = True
vertex_a_new |= pivot_neighbors
if vertex_a2 not in vertex_a_part :
raise GraphError ( "The graph can not be separated in two halfs. " "vertex_a1 can not reach vertex_a2 trough " "vertex_a_part" )
# find vertex _ b _ part : easy , is just the rest . . .
# vertex _ b _ part = set ( xrange ( self . num _ vertices ) ) - vertex _ a _ part
# . . . but we also want that there is a path in vertex _ b _ part from
# vertex _ b1 to vertex _ b2
if vertex_b1 == vertex_b2 :
closed = True
else :
vertex_b_new = set ( self . neighbors [ vertex_b1 ] )
vertex_b_new . discard ( vertex_a1 )
vertex_b_part = set ( [ vertex_b1 ] )
closed = False
while len ( vertex_b_new ) > 0 :
pivot = vertex_b_new . pop ( )
if pivot == vertex_b2 :
closed = True
break
pivot_neighbors = set ( self . neighbors [ pivot ] )
pivot_neighbors -= vertex_b_part
vertex_b_new |= pivot_neighbors
vertex_b_part . add ( pivot )
if not closed :
raise GraphError ( "The graph can not be separated in two halfs. " "vertex_b1 can not reach vertex_b2 trough " "vertex_b_part" )
# finaly compute the real vertex _ b _ part , the former loop might break
# early for efficiency .
vertex_b_part = set ( range ( self . num_vertices ) ) - vertex_a_part
# done !
return vertex_a_part , vertex_b_part , ( vertex_a1 , vertex_b1 , vertex_a2 , vertex_b2 ) |
def random_dimer ( molecule0 , molecule1 , thresholds , shoot_max ) :
"""Create a random dimer .
molecule0 and molecule1 are placed in one reference frame at random
relative positions . Interatomic distances are above the thresholds .
Initially a dimer is created where one interatomic distance approximates
the threshold value . Then the molecules are given an additional
separation in the range [ 0 , shoot _ max ] .
thresholds has the following format :
{ frozenset ( [ atom _ number1 , atom _ number2 ] ) : distance }""" | # apply a random rotation to molecule1
center = np . zeros ( 3 , float )
angle = np . random . uniform ( 0 , 2 * np . pi )
axis = random_unit ( )
rotation = Complete . about_axis ( center , angle , axis )
cor1 = np . dot ( molecule1 . coordinates , rotation . r )
# select a random atom in each molecule
atom0 = np . random . randint ( len ( molecule0 . numbers ) )
atom1 = np . random . randint ( len ( molecule1 . numbers ) )
# define a translation of molecule1 that brings both atoms in overlap
delta = molecule0 . coordinates [ atom0 ] - cor1 [ atom1 ]
cor1 += delta
# define a random direction
direction = random_unit ( )
cor1 += 1 * direction
# move molecule1 along this direction until all intermolecular atomic
# distances are above the threshold values
threshold_mat = np . zeros ( ( len ( molecule0 . numbers ) , len ( molecule1 . numbers ) ) , float )
distance_mat = np . zeros ( ( len ( molecule0 . numbers ) , len ( molecule1 . numbers ) ) , float )
for i1 , n1 in enumerate ( molecule0 . numbers ) :
for i2 , n2 in enumerate ( molecule1 . numbers ) :
threshold = thresholds . get ( frozenset ( [ n1 , n2 ] ) )
threshold_mat [ i1 , i2 ] = threshold ** 2
while True :
cor1 += 0.1 * direction
distance_mat [ : ] = 0
for i in 0 , 1 , 2 :
distance_mat += np . subtract . outer ( molecule0 . coordinates [ : , i ] , cor1 [ : , i ] ) ** 2
if ( distance_mat > threshold_mat ) . all ( ) :
break
# translate over a random distance [ 0 , shoot ] along the same direction
# ( if necessary repeat until no overlap is found )
while True :
cor1 += direction * np . random . uniform ( 0 , shoot_max )
distance_mat [ : ] = 0
for i in 0 , 1 , 2 :
distance_mat += np . subtract . outer ( molecule0 . coordinates [ : , i ] , cor1 [ : , i ] ) ** 2
if ( distance_mat > threshold_mat ) . all ( ) :
break
# done
dimer = Molecule ( np . concatenate ( [ molecule0 . numbers , molecule1 . numbers ] ) , np . concatenate ( [ molecule0 . coordinates , cor1 ] ) )
dimer . direction = direction
dimer . atom0 = atom0
dimer . atom1 = atom1
return dimer |
def split_term ( cls , term ) :
"""Split a term in to parent and record term components
: param term : combined term text
: return : Tuple of parent and record term""" | if '.' in term :
parent_term , record_term = term . split ( '.' )
parent_term , record_term = parent_term . strip ( ) , record_term . strip ( )
if parent_term == '' :
parent_term = ELIDED_TERM
else :
parent_term , record_term = ROOT_TERM , term . strip ( )
return parent_term , record_term |
def query ( botcust2 , message ) :
"""Sends a message to Mitsuku and retrieves the reply
Args :
botcust2 ( str ) : The botcust2 identifier
message ( str ) : The message to send to Mitsuku
Returns :
reply ( str ) : The message Mitsuku sent back""" | logger . debug ( "Getting Mitsuku reply" )
# Set up http request packages
params = { 'botid' : 'f6a012073e345a08' , 'amp;skin' : 'chat' }
headers = { 'Accept-Encoding' : 'gzip, deflate, br' , 'Accept-Language' : 'en-US,en;q=0.8' , 'Cache-Control' : 'max-age=0' , 'Connection' : 'keep-alive' , 'Content-Length' : str ( len ( message ) + 34 ) , 'Content-Type' : 'application/x-www-form-urlencoded' , 'Cookie' : 'botcust2=' + botcust2 , 'DNT' : '1' , 'Host' : 'kakko.pandorabots.com' , 'Origin' : 'https://kakko.pandorabots.com' , 'Referer' : 'https://kakko.pandorabots.com/pandora/talk?botid=f6a012073e345a08&skin=chat' , 'Upgrade-Insecure-Requests' : '1' , 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/58.0.3029.110 Safari/537.36' }
data = { 'botcust2' : botcust2 , 'message' : message }
# Get response from http POST request to url
logger . debug ( "Sending POST request" )
response = requests . post ( url , params = params , headers = headers , data = data )
logger . debug ( "POST response {}" . format ( response ) )
# Parse response
parsed = lxml . html . parse ( io . StringIO ( response . text ) ) . getroot ( )
try :
result = parsed [ 1 ] [ 2 ] [ 0 ] [ 2 ] . tail [ 1 : ]
logger . debug ( "Getting botcust2 successful" )
except IndexError :
result = False
logger . critical ( "Getting botcust2 from html failed" )
return result |
def format_stack_trace_json ( self ) :
"""Convert a StackTrace object to json format .""" | stack_trace_json = { }
if self . stack_frames :
stack_trace_json [ 'stack_frames' ] = { 'frame' : self . stack_frames , 'dropped_frames_count' : self . dropped_frames_count }
stack_trace_json [ 'stack_trace_hash_id' ] = self . stack_trace_hash_id
return stack_trace_json |
def copy_to_file ( self , name , fp_dest , callback = None ) :
"""Write cur _ dir / name to file - like ` fp _ dest ` .
Args :
name ( str ) : file name , located in self . curdir
fp _ dest ( file - like ) : must support write ( ) method
callback ( function , optional ) :
Called like ` func ( buf ) ` for every written chunk""" | assert compat . is_native ( name )
def _write_to_file ( data ) : # print ( " _ write _ to _ file ( ) { } bytes . " . format ( len ( data ) ) )
fp_dest . write ( data )
if callback :
callback ( data )
self . ftp . retrbinary ( "RETR {}" . format ( name ) , _write_to_file , FtpTarget . DEFAULT_BLOCKSIZE ) |
def verification_events ( self ) :
"""Events related to command verification .
: type : List [ : class : ` . CommandHistoryEvent ` ]""" | queued = self . _assemble_event ( 'Verifier_Queued' )
started = self . _assemble_event ( 'Verifier_Started' )
return [ x for x in [ queued , started ] if x ] |
def merge ( self , graph , witness_sigil , witness_tokens , alignments = { } ) :
""": type graph : VariantGraph""" | # NOTE : token _ to _ vertex only contains newly generated vertices
token_to_vertex = { }
last = graph . start
for token in witness_tokens :
vertex = alignments . get ( token , None )
if not vertex :
vertex = graph . add_vertex ( token , witness_sigil )
token_to_vertex [ token ] = vertex
else :
vertex . add_token ( witness_sigil , token )
# graph . add _ token _ to _ vertex ( vertex , token , witness _ sigil )
graph . connect ( last , vertex , witness_sigil )
last = vertex
graph . connect ( last , graph . end , witness_sigil )
return token_to_vertex |
def log_pdf ( self , y , mu , weights = None ) :
"""computes the log of the pdf or pmf of the values under the current distribution
Parameters
y : array - like of length n
target values
mu : array - like of length n
expected values
weights : array - like shape ( n , ) or None , default : None
sample weights
if None , defaults to array of ones
Returns
pdf / pmf : np . array of length n""" | if weights is None :
weights = np . ones_like ( mu )
scale = self . scale / weights
return sp . stats . norm . logpdf ( y , loc = mu , scale = scale ) |
def from_internal ( self , attribute_profile , internal_dict ) :
"""Converts the internal data to " type "
: type attribute _ profile : str
: type internal _ dict : dict [ str , str ]
: rtype : dict [ str , str ]
: param attribute _ profile : To which external type to convert ( ex : oidc , saml , . . . )
: param internal _ dict : attributes to map
: return : attribute values and names in the specified " profile " """ | external_dict = { }
for internal_attribute_name in internal_dict :
try :
attribute_mapping = self . from_internal_attributes [ internal_attribute_name ]
except KeyError :
logger . debug ( "no attribute mapping found for the internal attribute '%s'" , internal_attribute_name )
continue
if attribute_profile not in attribute_mapping : # skip this internal attribute if we have no mapping in the specified profile
logger . debug ( "no mapping found for '%s' in attribute profile '%s'" % ( internal_attribute_name , attribute_profile ) )
continue
external_attribute_names = self . from_internal_attributes [ internal_attribute_name ] [ attribute_profile ]
# select the first attribute name
external_attribute_name = external_attribute_names [ 0 ]
logger . debug ( "frontend attribute %s mapped from %s" % ( external_attribute_name , internal_attribute_name ) )
if self . separator in external_attribute_name :
nested_attribute_names = external_attribute_name . split ( self . separator )
nested_dict = self . _create_nested_attribute_value ( nested_attribute_names [ 1 : ] , internal_dict [ internal_attribute_name ] )
external_dict [ nested_attribute_names [ 0 ] ] = nested_dict
else :
external_dict [ external_attribute_name ] = internal_dict [ internal_attribute_name ]
return external_dict |
def weekdays ( start , end ) :
"""Returns the number of weekdays between the inputted start and end dates .
This would be the equivalent of doing ( end - start ) to get the number of
calendar days between the two dates .
: param start | < datetime . date >
end | < datetime . date >
: return < int >""" | # don ' t bother calculating anything for the same inputted date
if start == end :
return int ( start . isoweekday ( ) not in ( 6 , 7 ) )
elif end < start :
return - weekdays ( end , start )
else :
strt_weekday = start . isoweekday ( )
end_weekday = end . isoweekday ( )
# calculate in the positive direction
if end < start :
return - weekdays ( end , start )
# calculate from the monday after the start
if 5 < strt_weekday :
start = start + datetime . timedelta ( days = 8 - strt_weekday )
# calculate from the friday before the end
if 5 < end_weekday :
end = end - datetime . timedelta ( days = end_weekday - 5 )
remainder = end . isoweekday ( ) - start . isoweekday ( )
end = end - datetime . timedelta ( days = remainder )
# if the end is now before the start , then both dates fell on a weekend
if end < start :
return 0
# otherwise , if the dates normalized to each other , then return the
# remainder
elif end == start :
return remainder + 1
# remove the number of weekends from the start and end dates
days = ( ( end - start ) . days + 1 )
total_days = abs ( days )
multiplier = days / total_days
weekends = int ( round ( total_days / 7.0 ) * 2 )
week_days = ( ( total_days - weekends ) + remainder ) * multiplier
return week_days |
def getDescsV2 ( flags , fs_list = ( ) , hs_list = ( ) , ss_list = ( ) , os_list = ( ) ) :
"""Return a FunctionFS descriptor suitable for serialisation .
flags ( int )
Any combination of VIRTUAL _ ADDR , EVENTFD , ALL _ CTRL _ RECIP ,
CONFIG0 _ SETUP .
{ fs , hs , ss , os } _ list ( list of descriptors )
Instances of the following classes :
{ fs , hs , ss } _ list :
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO : HID
All ( non - empty ) lists must define the same number of interfaces
and endpoints , and endpoint descriptors must be given in the same
order , bEndpointAddress - wise .
os _ list :
OSDesc""" | count_field_list = [ ]
descr_field_list = [ ]
kw = { }
for descriptor_list , flag , prefix , allowed_descriptor_klass in ( ( fs_list , HAS_FS_DESC , 'fs' , USBDescriptorHeader ) , ( hs_list , HAS_HS_DESC , 'hs' , USBDescriptorHeader ) , ( ss_list , HAS_SS_DESC , 'ss' , USBDescriptorHeader ) , ( os_list , HAS_MS_OS_DESC , 'os' , OSDescHeader ) , ) :
if descriptor_list :
for index , descriptor in enumerate ( descriptor_list ) :
if not isinstance ( descriptor , allowed_descriptor_klass ) :
raise TypeError ( 'Descriptor %r of unexpected type: %r' % ( index , type ( descriptor ) , ) , )
descriptor_map = [ ( 'desc_%i' % x , y ) for x , y in enumerate ( descriptor_list ) ]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list . append ( ( count_name , le32 ) )
descr_type = type ( 't_' + descr_name , ( ctypes . LittleEndianStructure , ) , { '_pack_' : 1 , '_fields_' : [ ( x , type ( y ) ) for x , y in descriptor_map ] , } )
descr_field_list . append ( ( descr_name , descr_type ) )
kw [ count_name ] = len ( descriptor_map )
kw [ descr_name ] = descr_type ( ** dict ( descriptor_map ) )
elif flags & flag :
raise ValueError ( 'Flag %r set but descriptor list empty, cannot generate type.' % ( FLAGS . get ( flag ) , ) )
klass = type ( 'DescsV2_0x%02x' % ( flags & ( HAS_FS_DESC | HAS_HS_DESC | HAS_SS_DESC | HAS_MS_OS_DESC ) , # XXX : include contained descriptors type information ? ( and name ? )
) , ( DescsHeadV2 , ) , { '_fields_' : count_field_list + descr_field_list , } , )
return klass ( magic = DESCRIPTORS_MAGIC_V2 , length = ctypes . sizeof ( klass ) , flags = flags , ** kw ) |
def make_plot ( self ) :
"""This method creates the waterfall plot .""" | # sets levels of main contour plot
colors1 = [ 'None' , 'darkblue' , 'blue' , 'deepskyblue' , 'aqua' , 'greenyellow' , 'orange' , 'red' , 'darkred' ]
if len ( self . contour_vals ) > len ( colors1 ) + 1 :
raise AttributeError ( "Reduce number of contours." )
# produce filled contour of SNR
sc = self . axis . contourf ( self . xvals [ 0 ] , self . yvals [ 0 ] , self . zvals [ 0 ] , levels = np . asarray ( self . contour_vals ) , colors = colors1 )
self . colorbar . setup_colorbars ( sc )
# check for user desire to show separate contour line
if self . snr_contour_value is not None :
self . axis . contour ( self . xvals [ 0 ] , self . yvals [ 0 ] , self . zvals [ 0 ] , np . array ( [ self . snr_contour_value ] ) , colors = 'white' , linewidths = 1.5 , linestyles = 'dashed' )
return |
def from_vega ( cls , ** kwargs ) :
"""Load : ref : ` Vega spectrum < synphot - vega - spec > ` .
Parameters
kwargs : dict
Keywords acceptable by : func : ` ~ synphot . specio . read _ remote _ spec ` .
Returns
vegaspec : ` SourceSpectrum `
Empirical Vega spectrum .""" | filename = conf . vega_file
header , wavelengths , fluxes = specio . read_remote_spec ( filename , ** kwargs )
header [ 'filename' ] = filename
meta = { 'header' : header , 'expr' : 'Vega from {0}' . format ( os . path . basename ( filename ) ) }
return cls ( Empirical1D , points = wavelengths , lookup_table = fluxes , meta = meta ) |
def display_reports ( self , layout ) :
"""Issues the final PyLint score as a TeamCity build statistic value""" | try :
score = self . linter . stats [ 'global_note' ]
except ( AttributeError , KeyError ) :
pass
else :
self . tc . message ( 'buildStatisticValue' , key = 'PyLintScore' , value = str ( score ) ) |
def passthrough_device ( self , name , controller_port , device , passthrough ) :
"""Sets the passthrough mode of an existing DVD device . Changing the
setting while the VM is running is forbidden . The setting is only used
if at VM start the device is configured as a host DVD drive , in all
other cases it is ignored . The device must already exist ; see
: py : func : ` IMachine . attach _ device ` for how to attach a new device .
The @ a controllerPort and @ a device parameters specify the device slot and
have have the same meaning as with : py : func : ` IMachine . attach _ device ` .
in name of type str
Name of the storage controller .
in controller _ port of type int
Storage controller port .
in device of type int
Device slot in the given port .
in passthrough of type bool
New value for the passthrough setting .
raises : class : ` OleErrorInvalidarg `
SATA device , SATA port , IDE port or IDE slot out of range .
raises : class : ` VBoxErrorInvalidObjectState `
Attempt to modify an unregistered virtual machine .
raises : class : ` VBoxErrorInvalidVmState `
Invalid machine state .""" | if not isinstance ( name , basestring ) :
raise TypeError ( "name can only be an instance of type basestring" )
if not isinstance ( controller_port , baseinteger ) :
raise TypeError ( "controller_port can only be an instance of type baseinteger" )
if not isinstance ( device , baseinteger ) :
raise TypeError ( "device can only be an instance of type baseinteger" )
if not isinstance ( passthrough , bool ) :
raise TypeError ( "passthrough can only be an instance of type bool" )
self . _call ( "passthroughDevice" , in_p = [ name , controller_port , device , passthrough ] ) |
def get_hierarchy_design_session ( self , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the hierarchy design service .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . hierarchy . HierarchyDesignSession ) - a
` ` HierarchyDesignSession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ hierarchy _ design ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ hierarchy _ design ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_hierarchy_design ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . HierarchyDesignSession ( proxy = proxy , runtime = self . _runtime ) |
def _resolve_to_func ( self , what ) :
"""This method resolves whatever is passed : a string , a
bound or unbound method , a function , to make it a
function . This makes internal handling of setter and getter
uniform and easier .""" | if isinstance ( what , str ) :
what = getattr ( Adapter . _get_property ( self ) , what )
# makes it an unbounded function if needed
if type ( what ) == types . MethodType :
what = what . __func__
if not type ( what ) == types . FunctionType :
raise TypeError ( "Expected a method name, a method or a function" )
return what |
def make_button_widget ( cls , label , file_path = None , handler = None , style = None , layout = Layout ( width = 'auto' ) ) :
"Return a Button widget with specified ` handler ` ." | btn = widgets . Button ( description = label , layout = layout )
if handler is not None :
btn . on_click ( handler )
if style is not None :
btn . button_style = style
btn . file_path = file_path
btn . flagged_for_delete = False
return btn |
def _estimateCubicCurveLength ( pt0 , pt1 , pt2 , pt3 , precision = 10 ) :
"""Estimate the length of this curve by iterating
through it and averaging the length of the flat bits .""" | points = [ ]
length = 0
step = 1.0 / precision
factors = range ( 0 , precision + 1 )
for i in factors :
points . append ( _getCubicPoint ( i * step , pt0 , pt1 , pt2 , pt3 ) )
for i in range ( len ( points ) - 1 ) :
pta = points [ i ]
ptb = points [ i + 1 ]
length += _distance ( pta , ptb )
return length |
def pool_process ( func , iterable , process_name = 'Pool processing' , cpus = cpu_count ( ) ) :
"""Apply a function to each element in an iterable and return a result list .
: param func : A function that returns a value
: param iterable : A list or set of elements to be passed to the func as the singular parameter
: param process _ name : Name of the process , for printing purposes only
: param cpus : Number of CPUs
: return : Result list""" | with Timer ( '\t{0} ({1}) completed in' . format ( process_name , str ( func ) ) ) :
pool = Pool ( cpus )
vals = pool . map ( func , iterable )
pool . close ( )
return vals |
def verify_submit ( session , queue_url , log_url , job_ids , timeout = _DEFAULT_TIMEOUT , delay = _DEFAULT_DELAY , ** kwargs ) :
"""Verifies that the results were successfully submitted .""" | verification_queue = get_queue_obj ( session = session , queue_url = queue_url , log_url = log_url )
return verification_queue . verify_submit ( job_ids , timeout , delay , ** kwargs ) |
def make_heading_authors ( self , authors ) :
"""Constructs the Authors content for the Heading . This should display
directly after the Article Title .
Metadata element , content derived from FrontMatter""" | author_element = etree . Element ( 'h3' , { 'class' : 'authors' } )
# Construct content for the author element
first = True
for author in authors :
if first :
first = False
else :
append_new_text ( author_element , ',' , join_str = '' )
collab = author . find ( 'collab' )
anon = author . find ( 'anon' )
if collab is not None :
append_all_below ( author_element , collab )
elif anon is not None : # If anonymous , just add " Anonymous "
append_new_text ( author_element , 'Anonymous' )
else : # Author is neither Anonymous or a Collaboration
author_name , _ = self . get_contrib_names ( author )
append_new_text ( author_element , author_name )
# TODO : Handle author footnote references , also put footnotes in the ArticleInfo
# Example : journal . pbio . 0040370 . xml
first = True
for xref in author . xpath ( "./xref[@ref-type='corresp' or @ref-type='aff']" ) :
_sup = xref . find ( 'sup' )
sup_text = all_text ( _sup ) if _sup is not None else ''
auth_sup = etree . SubElement ( author_element , 'sup' )
sup_link = etree . SubElement ( auth_sup , 'a' , { 'href' : self . main_fragment . format ( xref . attrib [ 'rid' ] ) } )
sup_link . text = sup_text
if first :
first = False
else :
append_new_text ( auth_sup , ', ' , join_str = '' )
# for xref in author . findall ( ' xref ' ) :
# if xref . attrs [ ' ref - type ' ] in [ ' corresp ' , ' aff ' ] :
# try :
# sup _ element = xref . sup [ 0 ] . node
# except IndexError :
# sup _ text = ' '
# else :
# sup _ text = all _ text ( sup _ element )
# new _ sup = etree . SubElement ( author _ element , ' sup ' )
# sup _ link = etree . SubElement ( new _ sup , ' a ' )
# sup _ link . attrib [ ' href ' ] = self . main _ fragment . format ( xref . attrs [ ' rid ' ] )
# sup _ link . text = sup _ text
# if first :
# first = False
# else :
# new _ sup . text = ' , '
return author_element |
def token ( self , adata , load ) :
'''Determine if token auth is valid and yield the adata''' | try :
token = self . loadauth . get_tok ( load [ 'token' ] )
except Exception as exc :
log . error ( 'Exception occurred when generating auth token: %s' , exc )
yield { }
if not token :
log . warning ( 'Authentication failure of type "token" occurred.' )
yield { }
for sub_auth in adata :
for sub_adata in adata :
if token [ 'eauth' ] not in adata :
continue
if not ( ( token [ 'name' ] in adata [ token [ 'eauth' ] ] ) | ( '*' in adata [ token [ 'eauth' ] ] ) ) :
continue
yield { 'sub_auth' : sub_auth , 'token' : token }
yield { } |
def add_layer2image ( grid2d , x_pos , y_pos , kernel , order = 1 ) :
"""adds a kernel on the grid2d image at position x _ pos , y _ pos with an interpolated subgrid pixel shift of order = order
: param grid2d : 2d pixel grid ( i . e . image )
: param x _ pos : x - position center ( pixel coordinate ) of the layer to be added
: param y _ pos : y - position center ( pixel coordinate ) of the layer to be added
: param kernel : the layer to be added to the image
: param order : interpolation order for sub - pixel shift of the kernel to be added
: return : image with added layer , cut to original size""" | x_int = int ( round ( x_pos ) )
y_int = int ( round ( y_pos ) )
shift_x = x_int - x_pos
shift_y = y_int - y_pos
kernel_shifted = interp . shift ( kernel , [ - shift_y , - shift_x ] , order = order )
return add_layer2image_int ( grid2d , x_int , y_int , kernel_shifted ) |
def _linalg_cho_factor ( A , rho , lower = False , check_finite = True ) :
"""Patched version of : func : ` sporco . linalg . cho _ factor ` .""" | N , M = A . shape
if N >= M :
c , lwr = _cho_factor ( A . T . dot ( A ) + rho * cp . identity ( M , dtype = A . dtype ) , lower = lower , check_finite = check_finite )
else :
c , lwr = _cho_factor ( A . dot ( A . T ) + rho * cp . identity ( N , dtype = A . dtype ) , lower = lower , check_finite = check_finite )
return c , lwr |
def create ( self ) :
"""Create the subqueue to change the default behavior of Lock to semaphore .""" | self . queue = self . scheduler . queue . addSubQueue ( self . priority , LockEvent . createMatcher ( self . context , self . key ) , maxdefault = self . size , defaultQueueClass = CBQueue . AutoClassQueue . initHelper ( 'locker' , subqueuelimit = 1 ) ) |
def connect ( self , servers = [ "nats://127.0.0.1:4222" ] , loop = None , # ' io _ loop ' and ' loop ' are the same , but we have
# both params to be consistent with asyncio client .
io_loop = None , # Event Callbacks
error_cb = None , disconnected_cb = None , reconnected_cb = None , closed_cb = None , # ' close _ cb ' is the same as ' closed _ cb ' but we have
# both params to be consistent with the asyncio client .
close_cb = None , # CONNECT options
name = None , pedantic = False , verbose = False , no_echo = False , user = None , password = None , token = None , # Reconnect logic
allow_reconnect = True , connect_timeout = DEFAULT_CONNECT_TIMEOUT , reconnect_time_wait = RECONNECT_TIME_WAIT , max_reconnect_attempts = MAX_RECONNECT_ATTEMPTS , dont_randomize = False , # Ping Interval
ping_interval = DEFAULT_PING_INTERVAL , max_outstanding_pings = MAX_OUTSTANDING_PINGS , tls = None , max_read_buffer_size = DEFAULT_READ_BUFFER_SIZE , max_write_buffer_size = DEFAULT_WRITE_BUFFER_SIZE , read_chunk_size = DEFAULT_READ_CHUNK_SIZE , tcp_nodelay = False , drain_timeout = DEFAULT_DRAIN_TIMEOUT , ) :
"""Establishes a connection to a NATS server .
Examples :
# Configure pool of NATS servers .
nc = nats . io . client . Client ( )
yield nc . connect ( { ' servers ' : [ ' nats : / / 192.168.1.10:4222 ' , ' nats : / / 192.168.2.10:4222 ' ] } )
# User and pass are to be passed on the uri to authenticate .
yield nc . connect ( { ' servers ' : [ ' nats : / / hello : world @ 192.168.1.10:4222 ' ] } )
# Simple URL can be used as well .
yield nc . connect ( ' demo . nats . io : 4222 ' )""" | self . _setup_server_pool ( servers )
self . _loop = io_loop or loop or tornado . ioloop . IOLoop . current ( )
self . _error_cb = error_cb
self . _closed_cb = closed_cb or close_cb
self . _reconnected_cb = reconnected_cb
self . _disconnected_cb = disconnected_cb
self . _max_read_buffer_size = max_read_buffer_size
self . _max_write_buffer_size = max_write_buffer_size
self . _read_chunk_size = read_chunk_size
self . options [ "verbose" ] = verbose
self . options [ "pedantic" ] = pedantic
self . options [ "name" ] = name
self . options [ "no_echo" ] = no_echo
self . options [ "user" ] = user
self . options [ "password" ] = password
self . options [ "token" ] = token
self . options [ "max_outstanding_pings" ] = max_outstanding_pings
self . options [ "max_reconnect_attempts" ] = max_reconnect_attempts
self . options [ "reconnect_time_wait" ] = reconnect_time_wait
self . options [ "dont_randomize" ] = dont_randomize
self . options [ "allow_reconnect" ] = allow_reconnect
self . options [ "tcp_nodelay" ] = tcp_nodelay
# In seconds
self . options [ "connect_timeout" ] = connect_timeout
self . options [ "ping_interval" ] = ping_interval
self . options [ "drain_timeout" ] = drain_timeout
# TLS customizations
if tls is not None :
self . options [ "tls" ] = tls
while True :
try :
s = self . _next_server ( )
if s is None :
raise ErrNoServers
# Check when was the last attempt and back off before reconnecting
if s . last_attempt is not None :
now = time . time ( )
if ( now - s . last_attempt ) < self . options [ "reconnect_time_wait" ] :
yield tornado . gen . sleep ( self . options [ "reconnect_time_wait" ] )
# Mark that we have attempted to connect
s . reconnects += 1
s . last_attempt = time . time ( )
yield self . _server_connect ( s )
self . _current_server = s
# Mark that TCP connect worked against this server .
s . did_connect = True
# Established TCP connection at least and about
# to send connect command , which might not succeed
# in case TLS required and handshake failed .
self . _status = Client . CONNECTING
yield self . _process_connect_init ( )
break
except ErrNoServers :
raise
except Exception as e :
self . _status = Client . DISCONNECTED
self . _err = e
if self . _error_cb is not None :
self . _error_cb ( ErrServerConnect ( e ) )
if not self . options [ "allow_reconnect" ] :
raise ErrNoServers
# Flush pending data before continuing in connected status .
# FIXME : Could use future here and wait for an error result
# to bail earlier in case there are errors in the connection .
yield self . _flush_pending ( )
# First time connecting to NATS so if there were no errors ,
# we can consider to be connected at this point .
self . _status = Client . CONNECTED
# Prepare the ping pong interval .
self . _ping_timer = tornado . ioloop . PeriodicCallback ( self . _ping_interval , self . options [ "ping_interval" ] * 1000 )
self . _ping_timer . start ( ) |
def add_log_entry ( self , entry ) :
""": db . model . job record holds event log , that can be accessed by MX
this method adds a record and removes oldest one if necessary""" | event_log = self . job_record . event_log
if len ( event_log ) > job . MAX_NUMBER_OF_EVENTS :
del event_log [ - 1 ]
event_log . insert ( 0 , entry ) |
def has_value_of_type ( self , var_type ) :
"""Does the variable both have the given type and
have a variable value we can use ?""" | if self . has_value ( ) and self . has_type ( var_type ) :
return True
return False |
def __set_clear_button_visibility ( self , text ) :
"""Sets the clear button visibility .
: param text : Current field text .
: type text : QString""" | if text :
self . __clear_button . show ( )
else :
self . __clear_button . hide ( ) |
def main ( argv = None ) :
"""Takes crash data via args and generates a Socorro signature""" | parser = argparse . ArgumentParser ( description = DESCRIPTION , epilog = EPILOG )
parser . add_argument ( '-v' , '--verbose' , help = 'increase output verbosity' , action = 'store_true' )
parser . add_argument ( '--format' , help = 'specify output format: csv, text (default)' )
parser . add_argument ( '--different-only' , dest = 'different' , action = 'store_true' , help = 'limit output to just the signatures that changed' , )
parser . add_argument ( 'crashids' , metavar = 'crashid' , nargs = '*' , help = 'crash id to generate signatures for' )
if argv is None :
args = parser . parse_args ( )
else :
args = parser . parse_args ( argv )
if args . format == 'csv' :
outputter = CSVOutput
else :
outputter = TextOutput
api_token = os . environ . get ( 'SOCORRO_API_TOKEN' , '' )
generator = SignatureGenerator ( )
if args . crashids :
crashids_iterable = args . crashids
elif not sys . stdin . isatty ( ) : # If a script is piping to this script , then isatty ( ) returns False . If
# there is no script piping to this script , then isatty ( ) returns True
# and if we do list ( sys . stdin ) , it ' ll block waiting for input .
crashids_iterable = list ( sys . stdin )
else :
crashids_iterable = [ ]
if not crashids_iterable :
parser . print_help ( )
return 0
with outputter ( ) as out :
for crash_id in crashids_iterable :
crash_id = crash_id . strip ( )
resp = fetch ( '/RawCrash/' , crash_id , api_token )
if resp . status_code == 404 :
out . warning ( '%s: does not exist.' % crash_id )
continue
if resp . status_code == 429 :
out . warning ( 'API rate limit reached. %s' % resp . content )
# FIXME ( willkg ) : Maybe there ' s something better we could do here . Like maybe wait a
# few minutes .
return 1
if resp . status_code == 500 :
out . warning ( 'HTTP 500: %s' % resp . content )
continue
raw_crash = resp . json ( )
# If there ' s an error in the raw crash , then something is wrong - - probably with the API
# token . So print that out and exit .
if 'error' in raw_crash :
out . warning ( 'Error fetching raw crash: %s' % raw_crash [ 'error' ] )
return 1
resp = fetch ( '/ProcessedCrash/' , crash_id , api_token )
if resp . status_code == 404 :
out . warning ( '%s: does not have processed crash.' % crash_id )
continue
if resp . status_code == 429 :
out . warning ( 'API rate limit reached. %s' % resp . content )
# FIXME ( willkg ) : Maybe there ' s something better we could do here . Like maybe wait a
# few minutes .
return 1
if resp . status_code == 500 :
out . warning ( 'HTTP 500: %s' % resp . content )
continue
processed_crash = resp . json ( )
# If there ' s an error in the processed crash , then something is wrong - - probably with the
# API token . So print that out and exit .
if 'error' in processed_crash :
out . warning ( 'Error fetching processed crash: %s' % processed_crash [ 'error' ] )
return 1
old_signature = processed_crash [ 'signature' ]
crash_data = convert_to_crash_data ( raw_crash , processed_crash )
result = generator . generate ( crash_data )
if not args . different or old_signature != result . signature :
out . data ( crash_id , old_signature , result , args . verbose ) |
def position_target_global_int_send ( self , time_boot_ms , coordinate_frame , type_mask , lat_int , lon_int , alt , vx , vy , vz , afx , afy , afz , yaw , yaw_rate , force_mavlink1 = False ) :
'''Reports the current commanded vehicle position , velocity , and
acceleration as specified by the autopilot . This
should match the commands sent in
SET _ POSITION _ TARGET _ GLOBAL _ INT if the vehicle is being
controlled this way .
time _ boot _ ms : Timestamp in milliseconds since system boot . The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint . This allows the system to compensate processing latency . ( uint32 _ t )
coordinate _ frame : Valid options are : MAV _ FRAME _ GLOBAL _ INT = 5 , MAV _ FRAME _ GLOBAL _ RELATIVE _ ALT _ INT = 6 , MAV _ FRAME _ GLOBAL _ TERRAIN _ ALT _ INT = 11 ( uint8 _ t )
type _ mask : Bitmask to indicate which dimensions should be ignored by the vehicle : a value of 0b00000 or 0b00000100000 indicates that none of the setpoint dimensions should be ignored . If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration . Mapping : bit 1 : x , bit 2 : y , bit 3 : z , bit 4 : vx , bit 5 : vy , bit 6 : vz , bit 7 : ax , bit 8 : ay , bit 9 : az , bit 10 : is force setpoint , bit 11 : yaw , bit 12 : yaw rate ( uint16 _ t )
lat _ int : X Position in WGS84 frame in 1e7 * meters ( int32 _ t )
lon _ int : Y Position in WGS84 frame in 1e7 * meters ( int32 _ t )
alt : Altitude in meters in AMSL altitude , not WGS84 if absolute or relative , above terrain if GLOBAL _ TERRAIN _ ALT _ INT ( float )
vx : X velocity in NED frame in meter / s ( float )
vy : Y velocity in NED frame in meter / s ( float )
vz : Z velocity in NED frame in meter / s ( float )
afx : X acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
afy : Y acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
afz : Z acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
yaw : yaw setpoint in rad ( float )
yaw _ rate : yaw rate setpoint in rad / s ( float )''' | return self . send ( self . position_target_global_int_encode ( time_boot_ms , coordinate_frame , type_mask , lat_int , lon_int , alt , vx , vy , vz , afx , afy , afz , yaw , yaw_rate ) , force_mavlink1 = force_mavlink1 ) |
def get_command_templates ( command_tokens , file_tokens = [ ] , path_tokens = [ ] , job_options = [ ] ) :
"""Given a list of tokens from the grammar , return a
list of commands .""" | files = get_files ( file_tokens )
paths = get_paths ( path_tokens )
job_options = get_options ( job_options )
templates = _get_command_templates ( command_tokens , files , paths , job_options )
for command_template in templates :
command_template . _dependencies = _get_prelim_dependencies ( command_template , templates )
return templates |
def select ( * cases ) :
"""Select the first case that becomes ready .
If a default case ( : class : ` goless . dcase ` ) is present ,
return that if no other cases are ready .
If there is no default case and no case is ready ,
block until one becomes ready .
See Go ' s ` ` reflect . Select ` ` method for an analog
( http : / / golang . org / pkg / reflect / # Select ) .
: param cases : List of case instances , such as
: class : ` goless . rcase ` , : class : ` goless . scase ` , or : class : ` goless . dcase ` .
: return : ` ` ( chosen case , received value ) ` ` .
If the chosen case is not an : class : ` goless . rcase ` , it will be None .""" | if len ( cases ) == 0 :
return
# If the first argument is a list , it should be the only argument
if isinstance ( cases [ 0 ] , list ) :
if len ( cases ) != 1 :
raise TypeError ( 'Select can be called either with a list of cases ' 'or multiple case arguments, but not both.' )
cases = cases [ 0 ]
if not cases : # Handle the case of an empty list as an argument ,
# and prevent the raising of a SystemError by libev .
return
default = None
for c in cases :
if c . ready ( ) :
return c , c . exec_ ( )
if isinstance ( c , dcase ) :
assert default is None , 'Only one default case is allowd.'
default = c
if default is not None : # noinspection PyCallingNonCallable
return default , None
# We need to check for deadlocks before selecting .
# We can ' t rely on the underlying backend to do it ,
# as we do for channels , since we don ' t do an actual send or recv here .
# It ' s possible to still have a deadlock unless we move the check into
# the loop , but since the check is slow
# ( gevent doesn ' t provide a fast way ) , let ' s leave it out here .
if _be . would_deadlock ( ) :
raise _Deadlock ( 'No other tasklets running, cannot select.' )
while True :
for c in cases :
if c . ready ( ) :
return c , c . exec_ ( )
_be . yield_ ( ) |
def convert_svc_catalog_endpoint_data_to_v3 ( self , ep_data ) :
"""Convert v2 endpoint data into v3.
' service _ name1 ' : [
' adminURL ' : adminURL ,
' id ' : id ,
' region ' : region .
' publicURL ' : publicURL ,
' internalURL ' : internalURL
' service _ name2 ' : [
' adminURL ' : adminURL ,
' id ' : id ,
' region ' : region .
' publicURL ' : publicURL ,
' internalURL ' : internalURL""" | self . log . warn ( "Endpoint ID and Region ID validation is limited to not " "null checks after v2 to v3 conversion" )
for svc in ep_data . keys ( ) :
assert len ( ep_data [ svc ] ) == 1 , "Unknown data format"
svc_ep_data = ep_data [ svc ] [ 0 ]
ep_data [ svc ] = [ { 'url' : svc_ep_data [ 'adminURL' ] , 'interface' : 'admin' , 'region' : svc_ep_data [ 'region' ] , 'region_id' : self . not_null , 'id' : self . not_null } , { 'url' : svc_ep_data [ 'publicURL' ] , 'interface' : 'public' , 'region' : svc_ep_data [ 'region' ] , 'region_id' : self . not_null , 'id' : self . not_null } , { 'url' : svc_ep_data [ 'internalURL' ] , 'interface' : 'internal' , 'region' : svc_ep_data [ 'region' ] , 'region_id' : self . not_null , 'id' : self . not_null } ]
return ep_data |
def is_valid_endpoint ( url ) :
"""Just ensures the url has a scheme ( http / https ) , and a net location ( IP or domain name ) .
Can make more advanced or do on - network tests if needed , but this is really just to catch obvious errors .
> > > is _ valid _ endpoint ( " https : / / 34.216.72.29:6206 " )
True
> > > is _ valid _ endpoint ( " blahblah " )
False
> > > is _ valid _ endpoint ( " blah : / / 34.216.72.29 " )
False
> > > is _ valid _ endpoint ( " http : / / 34.216.72.29 : % % % " )
False
> > > is _ valid _ endpoint ( " http : / / 192.168.0.2:9999 " )
True""" | try :
result = urlparse ( url )
if result . port :
_port = int ( result . port )
return ( all ( [ result . scheme , result . netloc ] ) and result . scheme in [ 'http' , 'https' ] )
except ValueError :
return False |
def localize_shapefile ( shp_href , dirs ) :
"""Given a shapefile href and a set of directories , modify the shapefile
name so it ' s correct with respect to the output and cache directories .""" | # support latest mapnik features of auto - detection
# of image sizes and jpeg reading support . . .
# http : / / trac . mapnik . org / ticket / 508
mapnik_requires_absolute_paths = ( MAPNIK_VERSION < 601 )
shp_href = urljoin ( dirs . source . rstrip ( '/' ) + '/' , shp_href )
scheme , host , path , p , q , f = urlparse ( shp_href )
if scheme in ( 'http' , 'https' ) :
msg ( '%s | %s' % ( shp_href , dirs . cache ) )
scheme , path = '' , locally_cache_remote_file ( shp_href , dirs . cache )
else :
host = None
# collect drive for windows
to_posix ( systempath . realpath ( path ) )
if scheme not in ( 'file' , '' ) :
raise Exception ( "Shapefile needs to be local, not %s" % shp_href )
if mapnik_requires_absolute_paths :
path = posixpath . realpath ( path )
original = path
path = dirs . output_path ( path )
if path . endswith ( '.zip' ) : # unzip _ shapefile _ into needs a path it can find
path = posixpath . join ( dirs . output , path )
path = unzip_shapefile_into ( path , dirs . cache , host )
return dirs . output_path ( path ) |
def similar_objects ( self , num = None , ** filters ) :
"""Find similar objects using related tags .""" | tags = self . tags
if not tags :
return [ ]
content_type = ContentType . objects . get_for_model ( self . __class__ )
filters [ 'content_type' ] = content_type
# can ' t filter , see
# - https : / / github . com / alex / django - taggit / issues / 32
# - https : / / django - taggit . readthedocs . io / en / latest / api . html # TaggableManager . similar _ objects
# Otherwise this would be possible :
# return tags . similar _ objects ( * * filters )
lookup_kwargs = tags . _lookup_kwargs ( )
lookup_keys = sorted ( lookup_kwargs )
subq = tags . all ( )
qs = ( tags . through . objects . values ( * lookup_kwargs . keys ( ) ) . annotate ( n = models . Count ( 'pk' ) ) . exclude ( ** lookup_kwargs ) . filter ( tag__in = list ( subq ) ) . order_by ( '-n' ) )
# from https : / / github . com / alex / django - taggit / issues / 32 # issuecomment - 1002491
if filters is not None :
qs = qs . filter ( ** filters )
if num is not None :
qs = qs [ : num ]
# Normal taggit code continues
# TODO : This all feels like a bit of a hack .
items = { }
if len ( lookup_keys ) == 1 : # Can we do this without a second query by using a select _ related ( )
# somehow ?
f = tags . through . _meta . get_field_by_name ( lookup_keys [ 0 ] ) [ 0 ]
objs = f . rel . to . _default_manager . filter ( ** { "%s__in" % f . rel . field_name : [ r [ "content_object" ] for r in qs ] } )
for obj in objs :
items [ ( getattr ( obj , f . rel . field_name ) , ) ] = obj
else :
preload = { }
for result in qs :
preload . setdefault ( result [ 'content_type' ] , set ( ) )
preload [ result [ "content_type" ] ] . add ( result [ "object_id" ] )
for ct , obj_ids in preload . items ( ) :
ct = ContentType . objects . get_for_id ( ct )
for obj in ct . model_class ( ) . _default_manager . filter ( pk__in = obj_ids ) :
items [ ( ct . pk , obj . pk ) ] = obj
results = [ ]
for result in qs :
obj = items [ tuple ( result [ k ] for k in lookup_keys ) ]
obj . similar_tags = result [ "n" ]
results . append ( obj )
return results |
def nowarnings ( func ) :
"""Create a function wrapped in a context that ignores warnings .""" | @ functools . wraps ( func )
def new_func ( * args , ** kwargs ) :
with warnings . catch_warnings ( ) :
warnings . simplefilter ( 'ignore' )
return func ( * args , ** kwargs )
return new_func |
def get_resource_url ( cls , resource , base_url ) :
"""Construct the URL for talking to this resource .
i . e . :
http : / / myapi . com / api / resource
Note that this is NOT the method for calling individual instances i . e .
http : / / myapi . com / api / resource / 1
Args :
resource : The resource class instance
base _ url : The Base URL of this API service .
returns :
resource _ url : The URL for this resource""" | if resource . Meta . resource_name :
url = '{}/{}' . format ( base_url , resource . Meta . resource_name )
else :
p = inflect . engine ( )
plural_name = p . plural ( resource . Meta . name . lower ( ) )
url = '{}/{}' . format ( base_url , plural_name )
return cls . _parse_url_and_validate ( url ) |
def immerkaer_local ( input , size , output = None , mode = "reflect" , cval = 0.0 ) :
r"""Estimate the local noise .
The input image is assumed to have additive zero mean Gaussian noise . The Immerkaer
noise estimation is applied to the image locally over a N - dimensional cube of
side - length size . The size of the region should be sufficiently high for a stable
noise estimation .
Parameters
input : array _ like
Array of which to estimate the noise .
size : integer
The local region ' s side length .
output : ndarray , optional
The ` output ` parameter passes an array in which to store the
filter output .
mode : { ' reflect ' , ' constant ' , ' nearest ' , ' mirror ' , ' wrap ' } , optional
The ` mode ` parameter determines how the array borders are
handled , where ` cval ` is the value when mode is equal to
' constant ' . Default is ' reflect '
cval : scalar , optional
Value to fill past edges of input if ` mode ` is ' constant ' . Default
is 0.0
Returns
sigmas : array _ like
Map of the estimated standard deviation of the images Gaussian noise per voxel .
Notes
Does not take the voxel spacing into account .
Works good with medium to strong noise . Tends to underestimate for low noise levels .
See also
immerkaer""" | output = _ni_support . _get_output ( output , input )
footprint = numpy . asarray ( [ 1 ] * size )
# build nd - kernel to acquire square root of sum of squared elements
kernel = [ 1 , - 2 , 1 ]
for _ in range ( input . ndim - 1 ) :
kernel = numpy . tensordot ( kernel , [ 1 , - 2 , 1 ] , 0 )
divider = numpy . square ( numpy . abs ( kernel ) ) . sum ( )
# 36 for 1d , 216 for 3D , etc .
# compute laplace of input
laplace = separable_convolution ( input , [ 1 , - 2 , 1 ] , numpy . double , mode , cval )
# compute factor
factor = numpy . sqrt ( numpy . pi / 2. ) * 1. / ( numpy . sqrt ( divider ) * numpy . power ( footprint . size , laplace . ndim ) )
# locally sum laplacian values
separable_convolution ( numpy . abs ( laplace ) , footprint , output , mode , cval )
output *= factor
return output |
def compile_timeoper ( rule ) :
"""Compiler helper method : attempt to compile constant into object representing
datetime or timedelta object to enable relations and thus simple comparisons
using Python operators .""" | if isinstance ( rule . value , ( datetime . datetime , datetime . timedelta ) ) :
return rule
if isinstance ( rule , NumberRule ) :
return compile_timedelta ( rule )
if isinstance ( rule , ConstantRule ) :
try :
return compile_datetime ( rule )
except ValueError :
pass
try :
return compile_timedelta ( rule )
except ValueError :
pass
raise ValueError ( "Wrong time operation constant '{}'" . format ( rule ) ) |
def dispatch ( self , request , * args , ** kwargs ) :
"""Does request processing for return _ url query parameter and redirects with it ' s missing
We can ' t do that in the get method , as it does not exist in the View base class
and child mixins implementing get do not call super ( ) . get""" | self . return_url = request . GET . get ( 'return_url' , None )
referrer = request . META . get ( 'HTTP_REFERER' , None )
# leave alone POST and ajax requests and if return _ url is explicitly left empty
if ( request . method != "GET" or request . is_ajax ( ) or self . return_url or referrer is None or self . return_url is None and 'return_url' in request . GET ) :
return super ( ) . dispatch ( request , * args , ** kwargs )
if not self . return_url :
url = request . get_full_path ( )
if url . find ( "?" ) < 0 :
url = "?return_url=" . join ( ( url , referrer ) )
else :
url = "&return_url=" . join ( ( url , referrer ) )
return HttpResponseRedirect ( url ) |
def newer_pairwise_group ( sources_groups , targets ) :
"""Walk both arguments in parallel , testing if each source group is newer
than its corresponding target . Returns a pair of lists ( sources _ groups ,
targets ) where sources is newer than target , according to the semantics
of ' newer _ group ( ) ' .""" | if len ( sources_groups ) != len ( targets ) :
raise ValueError ( "'sources_group' and 'targets' must be the same length" )
# build a pair of lists ( sources _ groups , targets ) where source is newer
n_sources = [ ]
n_targets = [ ]
for i in range ( len ( sources_groups ) ) :
if newer_group ( sources_groups [ i ] , targets [ i ] ) :
n_sources . append ( sources_groups [ i ] )
n_targets . append ( targets [ i ] )
return n_sources , n_targets |
def purge ( self , strategy = "klogn" , keep = None , deleteNonSnapshots = False , ** kwargs ) :
"""Purge snapshot directory of snapshots according to some strategy ,
preserving however a given " keep " list or set of snapshot numbers .
Available strategies are :
" lastk " : Keep last k snapshots ( Default : k = 10)
" klogn " : Keep every snapshot in the last k , 2k snapshots in
the last k * * 2 , 3k snapshots in the last k * * 3 , . . .
( Default : k = 4 . k must be > 1 ) .
Returns ` self ` .""" | assert ( isinstance ( keep , ( list , set ) ) or keep is None )
keep = set ( keep or [ ] )
if self . haveSnapshots :
if strategy == "lastk" :
keep . update ( self . strategyLastK ( self . latestSnapshotNum , ** kwargs ) )
elif strategy == "klogn" :
keep . update ( self . strategyKLogN ( self . latestSnapshotNum , ** kwargs ) )
else :
raise ValueError ( "Unknown purge strategy " + str ( None ) + "!" )
keep . update ( [ "latest" , str ( self . latestSnapshotNum ) ] )
keep = set ( map ( str , keep ) )
snaps , nonSnaps = self . listSnapshotDir ( self . snapDir )
dirEntriesToDelete = set ( )
dirEntriesToDelete . update ( snaps )
dirEntriesToDelete . update ( nonSnaps if deleteNonSnapshots else set ( ) )
dirEntriesToDelete . difference_update ( keep )
for dirEntry in dirEntriesToDelete :
self . rmR ( os . path . join ( self . snapDir , dirEntry ) )
return self |
def on_mouse ( self , event ) :
'''implement dragging''' | # print ( ' on _ mouse ' )
if not event . Dragging ( ) :
self . _dragPos = None
return
# self . CaptureMouse ( )
if not self . _dragPos :
self . _dragPos = event . GetPosition ( )
else :
pos = event . GetPosition ( )
displacement = self . _dragPos - pos
self . SetPosition ( self . GetPosition ( ) - displacement ) |
def _endCodeIfNeeded ( line , inCodeBlock ) :
"""Simple routine to append end code marker if needed .""" | assert isinstance ( line , str )
if inCodeBlock :
line = '# @endcode{0}{1}' . format ( linesep , line . rstrip ( ) )
inCodeBlock = False
return line , inCodeBlock |
def writeObject ( self , obj , is_proxy = False ) :
"""Writes an object to the stream .""" | if self . use_proxies and not is_proxy :
self . writeProxy ( obj )
return
self . stream . write ( TYPE_OBJECT )
ref = self . context . getObjectReference ( obj )
if ref != - 1 :
self . _writeInteger ( ref << 1 )
return
self . context . addObject ( obj )
# object is not referenced , serialise it
kls = obj . __class__
definition = self . context . getClass ( kls )
alias = None
class_ref = False
# if the class definition is a reference
if definition :
class_ref = True
alias = definition . alias
else :
alias = self . context . getClassAlias ( kls )
definition = ClassDefinition ( alias )
self . context . addClass ( definition , alias . klass )
if class_ref :
self . stream . write ( definition . reference )
else :
ref = 0
if definition . encoding != ObjectEncoding . EXTERNAL :
ref += definition . attr_len << 4
final_reference = encode_int ( ref | definition . encoding << 2 | REFERENCE_BIT << 1 | REFERENCE_BIT )
self . stream . write ( final_reference )
definition . reference = encode_int ( definition . reference << 2 | REFERENCE_BIT )
if alias . anonymous :
self . stream . write ( '\x01' )
else :
self . serialiseString ( alias . alias )
# work out what the final reference for the class will be .
# this is okay because the next time an object of the same
# class is encoded , class _ ref will be True and never get here
# again .
if alias . external :
obj . __writeamf__ ( DataOutput ( self ) )
return
attrs = alias . getEncodableAttributes ( obj , codec = self )
if alias . static_attrs :
if not class_ref :
[ self . serialiseString ( attr ) for attr in alias . static_attrs ]
for attr in alias . static_attrs :
value = attrs . pop ( attr )
self . writeElement ( value )
if definition . encoding == ObjectEncoding . STATIC :
return
if definition . encoding == ObjectEncoding . DYNAMIC :
if attrs :
for attr , value in attrs . iteritems ( ) :
if type ( attr ) in python . int_types :
attr = str ( attr )
self . serialiseString ( attr )
self . writeElement ( value )
self . stream . write ( '\x01' ) |
def QA_fetch_user ( user_cookie , db = DATABASE ) :
"""get the user
Arguments :
user _ cookie : str the unique cookie _ id for a user
Keyword Arguments :
db : database for query
Returns :
list - - - [ ACCOUNT ]""" | collection = DATABASE . account
return [ res for res in collection . find ( { 'user_cookie' : user_cookie } , { "_id" : 0 } ) ] |
def remove_capability ( capability , image = None , restart = False ) :
'''Uninstall a capability
Args :
capability ( str ) : The capability to be removed
image ( Optional [ str ] ) : The path to the root directory of an offline
Windows image . If ` None ` is passed , the running operating system is
targeted . Default is None .
restart ( Optional [ bool ] ) : Reboot the machine if required by the install
Raises :
NotImplementedError : For all versions of Windows that are not Windows 10
and later . Server editions of Windows use ServerManager instead .
Returns :
dict : A dictionary containing the results of the command
CLI Example :
. . code - block : : bash
salt ' * ' dism . remove _ capability Tools . Graphics . DirectX ~ ~ ~ ~ 0.0.1.0''' | if salt . utils . versions . version_cmp ( __grains__ [ 'osversion' ] , '10' ) == - 1 :
raise NotImplementedError ( '`uninstall_capability` is not available on this version of ' 'Windows: {0}' . format ( __grains__ [ 'osversion' ] ) )
cmd = [ 'DISM' , '/Quiet' , '/Image:{0}' . format ( image ) if image else '/Online' , '/Remove-Capability' , '/CapabilityName:{0}' . format ( capability ) ]
if not restart :
cmd . append ( '/NoRestart' )
return __salt__ [ 'cmd.run_all' ] ( cmd ) |
def read ( path ) :
"""Read the contents of a LockFile .
Arguments :
path ( str ) : Path to lockfile .
Returns :
Tuple ( int , datetime ) : The integer PID of the lock owner , and the
date the lock was required . If the lock is not claimed , both
values are None .""" | if fs . exists ( path ) :
with open ( path ) as infile :
components = infile . read ( ) . split ( )
pid = int ( components [ 0 ] )
date = datetime . date . fromtimestamp ( float ( components [ 1 ] ) )
return pid , date
else :
return None , None |
def expand_dir ( _dir , cwd = os . getcwd ( ) ) :
"""Return path with environmental variables and tilde ~ expanded .
: param _ dir :
: type _ dir : str
: param cwd : current working dir ( for deciphering relative _ dir paths )
: type cwd : str
: rtype ; str""" | _dir = os . path . expanduser ( os . path . expandvars ( _dir ) )
if not os . path . isabs ( _dir ) :
_dir = os . path . normpath ( os . path . join ( cwd , _dir ) )
return _dir |
def wait_until_complete ( job_list ) :
"""Args : Accepts a list of GPJob objects
This method will not return until all GPJob objects in the list have
finished running . That us , they are either complete and have resulted in
an error state .
This method will occasionally query each job to see if it is finished .""" | complete = [ False ] * len ( job_list )
wait = 1
while not all ( complete ) :
time . sleep ( wait )
for i , job in enumerate ( job_list ) :
if not complete [ i ] :
complete [ i ] = job . is_finished ( )
if not complete [ i ] :
break
wait = min ( wait * 2 , 10 ) |
def set_model ( model , tablename = None , created = None , appname = None , model_path = None ) :
"""Register an model and tablename to a global variable .
model could be a string format , i . e . , ' uliweb . contrib . auth . models . User '
: param appname : if no appname , then archive according to model
item structure
created
model
model _ path
appname
For dynamic model you should pass model _ path with ' ' value""" | if isinstance ( model , type ) and issubclass ( model , Model ) : # use alias first
tablename = model . _alias or model . tablename
tablename = tablename . lower ( )
# set global _ _ models _ _
d = __models__ . setdefault ( tablename , { } )
engines = d . get ( 'config' , { } ) . pop ( 'engines' , [ 'default' ] )
if isinstance ( engines , ( str , unicode ) ) :
engines = [ engines ]
d [ 'engines' ] = engines
item = { }
if created is not None :
item [ 'created' ] = created
else :
item [ 'created' ] = None
if isinstance ( model , ( str , unicode ) ) :
if model_path is None :
model_path = model
else :
model_path = model_path
if not appname :
appname = model . rsplit ( '.' , 2 ) [ 0 ]
# for example ' uliweb . contrib . auth . models . User '
model = None
else :
appname = model . __module__ . rsplit ( '.' , 1 ) [ 0 ]
if model_path is None :
model_path = model . __module__ + '.' + model . __name__
else :
model_path = ''
# for example ' uliweb . contrib . auth . models '
model . __engines__ = engines
item [ 'model' ] = model
item [ 'model_path' ] = model_path
item [ 'appname' ] = appname
d [ 'model_path' ] = model_path
d [ 'appname' ] = appname
for name in engines :
if not isinstance ( name , ( str , unicode ) ) :
raise BadValueError ( 'Engine name should be string type, but %r found' % name )
engine_manager [ name ] . models [ tablename ] = item . copy ( ) |
def update ( self ) :
"""Update | KI1 | based on | EQI1 | and | TInd | .
> > > from hydpy . models . lland import *
> > > parameterstep ( ' 1d ' )
> > > eqi1(5.0)
> > > tind . value = 10.0
> > > derived . ki1 . update ( )
> > > derived . ki1
ki1(50.0)""" | con = self . subpars . pars . control
self ( con . eqi1 * con . tind ) |
def width_aware_slice ( self , index ) :
"""Slice based on the number of columns it would take to display the substring .""" | if wcswidth ( self . s ) == - 1 :
raise ValueError ( 'bad values for width aware slicing' )
index = normalize_slice ( self . width , index )
counter = 0
parts = [ ]
for chunk in self . chunks :
if index . start < counter + chunk . width and index . stop > counter :
start = max ( 0 , index . start - counter )
end = min ( index . stop - counter , chunk . width )
if end - start == chunk . width :
parts . append ( chunk )
else :
s_part = width_aware_slice ( chunk . s , max ( 0 , index . start - counter ) , index . stop - counter )
parts . append ( Chunk ( s_part , chunk . atts ) )
counter += chunk . width
if index . stop < counter :
break
return FmtStr ( * parts ) if parts else fmtstr ( '' ) |
def add_bundle ( self , prov_bundle , identifier ) :
"""Verbose method of adding a bundle .
Can also be done as :
> > > api = Api ( )
> > > document = api . document . get ( 148)
> > > document . bundles [ ' identifier ' ] = prov _ bundle
: param prov _ bundle : The bundle to be added
: param str identifier : URI or QName for this bundle
: type prov _ bundle : : py : class : ` prov . model . ProvDocument ` or : py : class : ` str `""" | if self . abstract :
raise AbstractDocumentException ( )
self . _api . add_bundle ( self . id , prov_bundle . serialize ( ) , identifier ) |
def seq_2_StdDoubleVector ( seq , vec = None ) :
"""Converts a python sequence < float > object to a : class : ` tango . StdDoubleVector `
: param seq : the sequence of floats
: type seq : sequence < : py : obj : ` float ` >
: param vec : ( optional , default is None ) an : class : ` tango . StdDoubleVector `
to be filled . If None is given , a new : class : ` tango . StdDoubleVector `
is created
: return : a : class : ` tango . StdDoubleVector ` filled with the same contents as seq
: rtype : : class : ` tango . StdDoubleVector `""" | if vec is None :
if isinstance ( seq , StdDoubleVector ) :
return seq
vec = StdDoubleVector ( )
if not isinstance ( vec , StdDoubleVector ) :
raise TypeError ( 'vec must be a tango.StdDoubleVector' )
for e in seq :
vec . append ( str ( e ) )
return vec |
def time_coef ( tc , nc , tb , nb ) :
"""Return time coefficient relative to base numbers .
@ param tc : current test time
@ param nc : current test data size
@ param tb : base test time
@ param nb : base test data size
@ return : time coef .""" | tc = float ( tc )
nc = float ( nc )
tb = float ( tb )
nb = float ( nb )
q = ( tc * nb ) / ( tb * nc )
return q |
def install_virtualbox ( distribution , force_setup = False ) :
"""install virtualbox""" | if 'ubuntu' in distribution :
with hide ( 'running' , 'stdout' ) :
sudo ( 'DEBIAN_FRONTEND=noninteractive apt-get update' )
sudo ( "sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o " "Dpkg::Options::='--force-confdef' " "-o Dpkg::Options::='--force-confold' upgrade --force-yes" )
install_ubuntu_development_tools ( )
apt_install ( packages = [ 'dkms' , 'linux-headers-generic' , 'build-essential' ] )
sudo ( 'wget -q ' 'https://www.virtualbox.org/download/oracle_vbox.asc -O- |' 'sudo apt-key add -' )
os = lsb_release ( )
apt_string = ' ' . join ( [ 'deb' , 'http://download.virtualbox.org/virtualbox/debian' , '%s contrib' % os [ 'DISTRIB_CODENAME' ] ] )
apt_add_repository_from_apt_string ( apt_string , 'vbox.list' )
apt_install ( packages = [ 'virtualbox-5.0' ] )
loaded_modules = sudo ( 'lsmod' )
if 'vboxdrv' not in loaded_modules or force_setup :
if 'Vivid Vervet' in run ( 'cat /etc/os-release' ) :
sudo ( 'systemctl start vboxdrv' )
else :
sudo ( '/etc/init.d/vboxdrv start' )
sudo ( 'wget -c ' 'http://download.virtualbox.org/virtualbox/5.0.4/' 'Oracle_VM_VirtualBox_Extension_Pack-5.0.4-102546.vbox-extpack' )
# noqa
sudo ( 'VBoxManage extpack install --replace ' 'Oracle_VM_VirtualBox_Extension_Pack-5.0.4-102546.vbox-extpack' ) |
def pad_sequences ( self , sequences , fixed_sentences_seq_length = None , fixed_token_seq_length = None , padding = 'pre' , truncating = 'post' , padding_token = "<PAD>" ) :
"""Pads each sequence to the same fixed length ( length of the longest sequence or provided override ) .
Args :
sequences : list of list ( samples , words ) or list of list of list ( samples , sentences , words )
fixed _ sentences _ seq _ length : The fix sentence sequence length to use . If None , largest sentence length is used .
fixed _ token _ seq _ length : The fix token sequence length to use . If None , largest word length is used .
padding : ' pre ' or ' post ' , pad either before or after each sequence .
truncating : ' pre ' or ' post ' , remove values from sequences larger than fixed _ sentences _ seq _ length or fixed _ token _ seq _ length
either in the beginning or in the end of the sentence or word sequence respectively .
padding _ token : The token to add for padding .
Returns :
Numpy array of ( samples , max _ sentences , max _ tokens ) or ( samples , max _ tokens ) depending on the sequence input .
Raises :
ValueError : in case of invalid values for ` truncating ` or ` padding ` .""" | value = self . special_token . index ( padding_token )
if value < 0 :
raise ValueError ( 'The padding token "' + padding_token + " is not in the special tokens of the tokenizer." )
# Determine if input is ( samples , max _ sentences , max _ tokens ) or not .
if isinstance ( sequences [ 0 ] [ 0 ] , list ) :
x = utils . _pad_sent_sequences ( sequences , fixed_sentences_seq_length , fixed_token_seq_length , padding , truncating , value )
else :
x = utils . _pad_token_sequences ( sequences , fixed_token_seq_length , padding , truncating , value )
return np . array ( x , dtype = 'int32' ) |
def run_bots ( bots ) :
"""Run many bots in parallel .
: param bots : IRC bots to run .
: type bots : list""" | greenlets = [ spawn ( bot . run ) for bot in bots ]
try :
joinall ( greenlets )
except KeyboardInterrupt :
for bot in bots :
bot . disconnect ( )
finally :
killall ( greenlets ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.