signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def __html_rep ( self , game_key , rep_code ) :
"""Retrieves the nhl html reports for the specified game and report code"""
|
seas , gt , num = game_key . to_tuple ( )
url = [ self . __domain , "scores/htmlreports/" , str ( seas - 1 ) , str ( seas ) , "/" , rep_code , "0" , str ( gt ) , ( "%04i" % ( num ) ) , ".HTM" ]
url = '' . join ( url )
return self . __open ( url )
|
def unsetNsProp ( self , node , name ) :
"""Remove an attribute carried by a node ."""
|
if node is None :
node__o = None
else :
node__o = node . _o
ret = libxml2mod . xmlUnsetNsProp ( node__o , self . _o , name )
return ret
|
def load_addon ( username , package_name , _globals ) :
'''Load an fabsetup addon given by ' package _ name ' and hook it in the
base task namespace ' username ' .
Args :
username ( str )
package _ name ( str )
_ globals ( dict ) : the globals ( ) namespace of the fabric script .
Return : None'''
|
addon_module = get_or_create_module_r ( username )
package_module = __import__ ( package_name )
add_tasks_r ( addon_module , package_module , package_name )
_globals . update ( { username : addon_module } )
del package_module
del addon_module
|
def delete_topic_rule ( ruleName , region = None , key = None , keyid = None , profile = None ) :
'''Given a rule name , delete it .
Returns { deleted : true } if the rule was deleted and returns
{ deleted : false } if the rule was not deleted .
CLI Example :
. . code - block : : bash
salt myminion boto _ iot . delete _ rule myrule'''
|
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
conn . delete_topic_rule ( ruleName = ruleName )
return { 'deleted' : True }
except ClientError as e :
return { 'deleted' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def merge ( cls , predictors ) :
"""Merge the ensembles of two or more ` Class1AffinityPredictor ` instances .
Note : the resulting merged predictor will NOT have calibrated percentile
ranks . Call ` calibrate _ percentile _ ranks ` on it if these are needed .
Parameters
predictors : sequence of ` Class1AffinityPredictor `
Returns
` Class1AffinityPredictor ` instance"""
|
assert len ( predictors ) > 0
if len ( predictors ) == 1 :
return predictors [ 0 ]
allele_to_allele_specific_models = collections . defaultdict ( list )
class1_pan_allele_models = [ ]
allele_to_fixed_length_sequence = predictors [ 0 ] . allele_to_fixed_length_sequence
for predictor in predictors :
for ( allele , networks ) in ( predictor . allele_to_allele_specific_models . items ( ) ) :
allele_to_allele_specific_models [ allele ] . extend ( networks )
class1_pan_allele_models . extend ( predictor . class1_pan_allele_models )
return Class1AffinityPredictor ( allele_to_allele_specific_models = allele_to_allele_specific_models , class1_pan_allele_models = class1_pan_allele_models , allele_to_fixed_length_sequence = allele_to_fixed_length_sequence )
|
def sync_blockchain ( working_dir , bt_opts , last_block , server_state , expected_snapshots = { } , ** virtualchain_args ) :
"""synchronize state with the blockchain .
Return True on success
Return False if we ' re supposed to stop indexing
Abort on error"""
|
subdomain_index = server_state [ 'subdomains' ]
atlas_state = server_state [ 'atlas' ]
# make this usable even if we haven ' t explicitly configured virtualchain
impl = sys . modules [ __name__ ]
log . info ( "Synchronizing database {} up to block {}" . format ( working_dir , last_block ) )
# NOTE : this is the only place where a read - write handle should be created ,
# since this is the only place where the db should be modified .
new_db = BlockstackDB . borrow_readwrite_instance ( working_dir , last_block , expected_snapshots = expected_snapshots )
# propagate runtime state to virtualchain callbacks
new_db . subdomain_index = subdomain_index
new_db . atlas_state = atlas_state
rc = virtualchain . sync_virtualchain ( bt_opts , last_block , new_db , expected_snapshots = expected_snapshots , ** virtualchain_args )
BlockstackDB . release_readwrite_instance ( new_db , last_block )
return rc
|
def _set_tunnel_dst ( self , v , load = False ) :
"""Setter method for tunnel _ dst , mapped from YANG variable / overlay _ gateway / site / tunnel _ dst ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ tunnel _ dst is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ tunnel _ dst ( ) directly .
YANG Description : Site IP address configuration represents
destination IP of tunnel to the site . Tunnel will
not be setup without the IP address configuration ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "address" , tunnel_dst . tunnel_dst , yang_name = "tunnel-dst" , rest_name = "ip" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'address' , extensions = { u'tailf-common' : { u'info' : u'IP configuration for site.' , u'cli-suppress-mode' : None , u'cli-compact-syntax' : None , u'alt-name' : u'ip' , u'callpoint' : u'overlay-site-ip-cp' } } ) , is_container = 'list' , yang_name = "tunnel-dst" , rest_name = "ip" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'IP configuration for site.' , u'cli-suppress-mode' : None , u'cli-compact-syntax' : None , u'alt-name' : u'ip' , u'callpoint' : u'overlay-site-ip-cp' } } , namespace = 'urn:brocade.com:mgmt:brocade-tunnels' , defining_module = 'brocade-tunnels' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """tunnel_dst must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("address",tunnel_dst.tunnel_dst, yang_name="tunnel-dst", rest_name="ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}), is_container='list', yang_name="tunnel-dst", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""" , } )
self . __tunnel_dst = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def start ( self ) :
"""Starts the worker threads if they are not already started .
This method is thread - safe and will be called automatically
when executing an operation ."""
|
# Check whether we are already started , skip if we are .
if not self . _started . is_set ( ) : # If we are not started , try to capture the lock .
if self . _lock . acquire ( False ) : # If we got the lock , go ahead and start the worker
# threads , set the started flag , and release the lock .
for i in range ( self . _size ) :
name = "riak.client.multi-worker-{0}-{1}" . format ( self . _name , i )
worker = Thread ( target = self . _worker_method , name = name )
worker . daemon = False
worker . start ( )
self . _workers . append ( worker )
self . _started . set ( )
self . _lock . release ( )
else : # We didn ' t get the lock , so someone else is already
# starting the worker threads . Wait until they have
# signaled that the threads are started .
self . _started . wait ( )
|
def get_key_value ( parts , key_field_num , ignore_missing_keys , seen_keys , output_type ) :
"""get the key value from the line and check it ' s not a dup . or missing . fields
with only whitespace are considered empty ( missing ) .
: param ignore _ missing _ keys : if True , return None for missing keys . If false ,
missing keys cause an exception
( MissingKeyError ) .
: param seen _ keys : a set of keys already seen .
: return : the key value , or None if the field was empty ."""
|
key_val = parts [ key_field_num ]
if key_val . strip ( ) == "" :
if not ignore_missing_keys :
raise MissingKeyError ( "missing key value" )
else :
return None
if key_val in seen_keys and output_type is OutputType . error_on_dups :
raise DuplicateKeyError ( key_val + " appears multiple times as key" )
return key_val
|
def table_server_status ( ) :
"""Return table string to be printed ."""
|
table_data = [ [ Color ( 'Low Space' ) , Color ( '{autocyan}Nominal Space{/autocyan}' ) , Color ( 'Excessive Space' ) ] , [ Color ( 'Low Load' ) , Color ( 'Nominal Load' ) , Color ( '{autored}High Load{/autored}' ) ] , [ Color ( '{autocyan}Low Free RAM{/autocyan}' ) , Color ( 'Nominal Free RAM' ) , Color ( 'High Free RAM' ) ] , ]
table_instance = SingleTable ( table_data , '192.168.0.105' )
table_instance . inner_heading_row_border = False
table_instance . inner_row_border = True
table_instance . justify_columns = { 0 : 'center' , 1 : 'center' , 2 : 'center' }
return table_instance . table
|
def get_if_exists ( self , item_name , default_value = None ) :
"""Retrieve the value of an option if it exists , otherwise
return * default _ value * instead of raising an error :
: param str item _ name : The name of the option to retrieve .
: param default _ value : The value to return if * item _ name * does not exist .
: return : The value of * item _ name * in the configuration ."""
|
if self . has_option ( item_name ) :
return self . get ( item_name )
return default_value
|
def get_swagger_schema ( settings ) :
"""Return a : class : ` pyramid _ swagger . model . SwaggerSchema ` constructed from
the swagger specs in ` pyramid _ swagger . schema _ directory ` . If
` pyramid _ swagger . enable _ swagger _ spec _ validation ` is enabled the schema
will be validated before returning it .
: param settings : a pyramid registry settings with configuration for
building a swagger schema
: type settings : dict
: returns : a : class : ` pyramid _ swagger . model . SwaggerSchema `"""
|
schema_dir = settings . get ( 'pyramid_swagger.schema_directory' , 'api_docs' )
resource_listing = get_resource_listing ( schema_dir , settings . get ( 'pyramid_swagger.generate_resource_listing' , False ) )
if settings . get ( 'pyramid_swagger.enable_swagger_spec_validation' , True ) :
validate_swagger_schema ( schema_dir , resource_listing )
return compile_swagger_schema ( schema_dir , resource_listing )
|
def get_inertia ( self ) :
"""Calculate the inertia tensor and transforms along
rotation axes .
This function calculates the inertia tensor and returns
a 4 - tuple .
The unit is ` ` amu * length - unit - of - xyz - file * * 2 ` `
Args :
None
Returns :
dict : The returned dictionary has four possible keys :
` ` transformed _ Cartesian ` ` :
A : class : ` ~ chemcoord . Cartesian `
that is transformed to the basis spanned by
the eigenvectors of the inertia tensor . The x - axis
is the axis with the lowest inertia moment , the
z - axis the one with the highest . Contains also a
column for the mass
` ` diag _ inertia _ tensor ` ` :
A vector containing the ascendingly sorted inertia moments after
diagonalization .
` ` inertia _ tensor ` ` :
The inertia tensor in the old basis .
` ` eigenvectors ` ` :
The eigenvectors of the inertia tensor in the old basis .
Since the inertia _ tensor is hermitian , they are orthogonal and
are returned as an orthonormal righthanded basis .
The i - th eigenvector corresponds to the i - th eigenvalue in
` ` diag _ inertia _ tensor ` ` ."""
|
def calculate_inertia_tensor ( molecule ) :
masses = molecule . loc [ : , 'mass' ] . values
pos = molecule . loc [ : , [ 'x' , 'y' , 'z' ] ] . values
inertia = np . sum ( masses [ : , None , None ] * ( ( pos ** 2 ) . sum ( axis = 1 ) [ : , None , None ] * np . identity ( 3 ) [ None , : , : ] - pos [ : , : , None ] * pos [ : , None , : ] ) , axis = 0 )
diag_inertia , eig_v = np . linalg . eig ( inertia )
sorted_index = np . argsort ( diag_inertia )
diag_inertia = diag_inertia [ sorted_index ]
eig_v = eig_v [ : , sorted_index ]
return inertia , eig_v , diag_inertia
molecule = self . add_data ( 'mass' )
molecule = molecule - molecule . get_barycenter ( )
inertia , eig_v , diag_inertia = calculate_inertia_tensor ( molecule )
eig_v = xyz_functions . orthonormalize_righthanded ( eig_v )
molecule = molecule . basistransform ( eig_v )
return { 'transformed_Cartesian' : molecule , 'eigenvectors' : eig_v , 'diag_inertia_tensor' : diag_inertia , 'inertia_tensor' : inertia }
|
def create_cloudwatch_event ( app_name , env , region , rules ) :
"""Create cloudwatch event for lambda from rules .
Args :
app _ name ( str ) : name of the lambda function
env ( str ) : Environment / Account for lambda function
region ( str ) : AWS region of the lambda function
rules ( dict ) : Trigger rules from the settings"""
|
session = boto3 . Session ( profile_name = env , region_name = region )
cloudwatch_client = session . client ( 'events' )
rule_name = rules . get ( 'rule_name' )
schedule = rules . get ( 'schedule' )
rule_description = rules . get ( 'rule_description' )
json_input = rules . get ( 'json_input' , { } )
if schedule is None :
LOG . critical ( 'Schedule is required and no schedule is defined!' )
raise InvalidEventConfiguration ( 'Schedule is required and no schedule is defined!' )
if rule_name is None :
LOG . critical ( 'Rule name is required and no rule_name is defined!' )
raise InvalidEventConfiguration ( 'Rule name is required and no rule_name is defined!' )
else :
LOG . info ( '%s and %s' , app_name , rule_name )
rule_name = "{}_{}" . format ( app_name , rule_name . replace ( ' ' , '_' ) )
if rule_description is None :
rule_description = "{} - {}" . format ( app_name , rule_name )
lambda_arn = get_lambda_arn ( app = app_name , account = env , region = region )
# Add lambda permissions
account_id = get_env_credential ( env = env ) [ 'accountId' ]
principal = "events.amazonaws.com"
statement_id = '{}_cloudwatch_{}' . format ( app_name , rule_name )
source_arn = 'arn:aws:events:{}:{}:rule/{}' . format ( region , account_id , rule_name )
add_lambda_permissions ( function = lambda_arn , statement_id = statement_id , action = 'lambda:InvokeFunction' , principal = principal , source_arn = source_arn , env = env , region = region , )
# Create Cloudwatch rule
cloudwatch_client . put_rule ( Name = rule_name , ScheduleExpression = schedule , State = 'ENABLED' , Description = rule_description , )
targets = [ ]
# TODO : read this one from file event - config - * . json
json_payload = '{}' . format ( json . dumps ( json_input ) )
target = { "Id" : app_name , "Arn" : lambda_arn , "Input" : json_payload , }
targets . append ( target )
put_targets_response = cloudwatch_client . put_targets ( Rule = rule_name , Targets = targets )
LOG . debug ( 'Cloudwatch put targets response: %s' , put_targets_response )
LOG . info ( 'Created Cloudwatch event "%s" with schedule: %s' , rule_name , schedule )
|
def all_subclasses ( cls ) :
"""Given a class ` cls ` , this recursive function returns a list with
all subclasses , subclasses of subclasses , and so on ."""
|
subclasses = cls . __subclasses__ ( )
return subclasses + [ g for s in subclasses for g in all_subclasses ( s ) ]
|
def itemsbyscore ( self , min = '-inf' , max = '+inf' , start = None , num = None , reverse = None ) :
"""Return a range of | ( member , score ) | pairs from the sorted set name
with scores between @ min and @ max .
If @ start and @ num are specified , then return a slice
of the range .
@ min : # int minimum score , or # str ' - inf '
@ max : # int minimum score , or # str ' + inf '
@ start : # int starting range position
@ num : # int number of members to fetch
@ reverse : # bool indicating whether to sort the results descendingly
- > yields | ( member , score ) | # tuple pairs"""
|
reverse = reverse if reverse is not None else self . reversed
for member in self . iterbyscore ( min , max , start , num , withscores = True , reverse = reverse ) :
yield member
|
def run_mnist_DistilledSGLD ( num_training = 50000 , gpu_id = None ) :
"""Run DistilledSGLD on mnist dataset"""
|
X , Y , X_test , Y_test = load_mnist ( num_training )
minibatch_size = 100
if num_training >= 10000 :
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else :
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym ( num_hidden = num_hidden )
logsoftmax = LogSoftmax ( )
student_net = get_mnist_sym ( output_op = logsoftmax , num_hidden = num_hidden )
data_shape = ( minibatch_size , ) + X . shape [ 1 : : ]
teacher_data_inputs = { 'data' : nd . zeros ( data_shape , ctx = dev ( gpu_id ) ) , 'softmax_label' : nd . zeros ( ( minibatch_size , ) , ctx = dev ( gpu_id ) ) }
student_data_inputs = { 'data' : nd . zeros ( data_shape , ctx = dev ( gpu_id ) ) , 'softmax_label' : nd . zeros ( ( minibatch_size , 10 ) , ctx = dev ( gpu_id ) ) }
teacher_initializer = BiasXavier ( factor_type = "in" , magnitude = 1 )
student_initializer = BiasXavier ( factor_type = "in" , magnitude = 1 )
student_exe , student_params , _ = DistilledSGLD ( teacher_sym = teacher_net , student_sym = student_net , teacher_data_inputs = teacher_data_inputs , student_data_inputs = student_data_inputs , X = X , Y = Y , X_test = X_test , Y_test = Y_test , total_iter_num = total_iter_num , student_initializer = student_initializer , teacher_initializer = teacher_initializer , student_optimizing_algorithm = "adam" , teacher_learning_rate = teacher_learning_rate , student_learning_rate = student_learning_rate , teacher_prior_precision = teacher_prior , student_prior_precision = student_prior , perturb_deviation = perturb_deviation , minibatch_size = 100 , dev = dev ( gpu_id ) )
|
def find_program_variables ( code ) :
"""Return a dict describing program variables : :
{ ' var _ name ' : ( ' uniform | attribute | varying ' , type ) , . . . }"""
|
vars = { }
lines = code . split ( '\n' )
for line in lines :
m = re . match ( r"\s*" + re_prog_var_declaration + r"\s*(=|;)" , line )
if m is not None :
vtype , dtype , names = m . groups ( ) [ : 3 ]
for name in names . split ( ',' ) :
vars [ name . strip ( ) ] = ( vtype , dtype )
return vars
|
def aa ( codon ) :
"""This function converts a codon to an amino acid . If the codon is not
valid an error message is given , or else , the amino acid is returned ."""
|
codon = codon . upper ( )
aa = { "ATT" : "I" , "ATC" : "I" , "ATA" : "I" , "CTT" : "L" , "CTC" : "L" , "CTA" : "L" , "CTG" : "L" , "TTA" : "L" , "TTG" : "L" , "GTT" : "V" , "GTC" : "V" , "GTA" : "V" , "GTG" : "V" , "TTT" : "F" , "TTC" : "F" , "ATG" : "M" , "TGT" : "C" , "TGC" : "C" , "GCT" : "A" , "GCC" : "A" , "GCA" : "A" , "GCG" : "A" , "GGT" : "G" , "GGC" : "G" , "GGA" : "G" , "GGG" : "G" , "CCT" : "P" , "CCC" : "P" , "CCA" : "P" , "CCG" : "P" , "ACT" : "T" , "ACC" : "T" , "ACA" : "T" , "ACG" : "T" , "TCT" : "S" , "TCC" : "S" , "TCA" : "S" , "TCG" : "S" , "AGT" : "S" , "AGC" : "S" , "TAT" : "Y" , "TAC" : "Y" , "TGG" : "W" , "CAA" : "Q" , "CAG" : "Q" , "AAT" : "N" , "AAC" : "N" , "CAT" : "H" , "CAC" : "H" , "GAA" : "E" , "GAG" : "E" , "GAT" : "D" , "GAC" : "D" , "AAA" : "K" , "AAG" : "K" , "CGT" : "R" , "CGC" : "R" , "CGA" : "R" , "CGG" : "R" , "AGA" : "R" , "AGG" : "R" , "TAA" : "*" , "TAG" : "*" , "TGA" : "*" }
# Translate valid codon
try :
amino_a = aa [ codon ]
except KeyError :
amino_a = "?"
return amino_a
|
def input_check ( self , name , label , multi_line = False ) :
"""{ % if multiple _ choice _ 1 % }
{ % set checked = " checked " % }
{ % else % }
{ % set checked = " " % }
{ % endif % }
< input type = " checkbox " name = " multiple _ choice _ 1 " value = " multiple _ choice _ 1 " { { checked } } > multiple _ choice _ 1"""
|
lines = list ( )
lines . append ( '{%% if %s %%}' % name )
lines . append ( self . tab + '{% set checked = "checked" %}' )
lines . append ( '{% else %}' )
lines . append ( self . tab + '{% set checked = "" %}' )
lines . append ( '{% endif %}' )
if multi_line :
line_break = "<br>"
else :
line_break = ""
lines . append ( '<input type="checkbox" name="%s" value="%s" {{checked}}> %s %s' % ( name , name , label , line_break ) )
return "\n" . join ( lines )
|
def forget_fact ( term ) :
"""Forgets a fact by removing it from the database"""
|
logger . info ( 'Removing fact %s' , term )
db . facts . remove ( { 'term' : term_regex ( term ) } )
return random . choice ( ACKS )
|
def _plot ( self ) :
"""Draw all the serie slices"""
|
squares = self . _squares ( )
sq_dimensions = self . add_squares ( squares )
for index , serie in enumerate ( self . series ) :
current_square = self . _current_square ( squares , index )
self . gaugify ( serie , squares , sq_dimensions , current_square )
|
def in_bulk ( self , id_list ) :
"""Returns a dictionary mapping each of the given IDs to the object with
that ID ."""
|
if not id_list :
return { }
qs = self . _clone ( )
qs . add_filter ( ( 'pk__in' , id_list ) )
qs . _clear_ordering ( force_empty = True )
return dict ( [ ( obj . _get_pk_val ( ) , obj ) for obj in qs ] )
|
def is_site_available ( self ) :
"""Returns true if we can access LendingClub . com
This is also a simple test to see if there ' s a network connection
Returns
boolean
True or False"""
|
try :
response = requests . head ( self . base_url )
status = response . status_code
return 200 <= status < 400
# Returns true if the status code is greater than 200 and less than 400
except Exception :
return False
|
def decrypt ( algorithm , key , encrypted_data , associated_data ) :
"""Decrypts a frame body .
: param algorithm : Algorithm used to encrypt this body
: type algorithm : aws _ encryption _ sdk . identifiers . Algorithm
: param bytes key : Plaintext data key
: param encrypted _ data : EncryptedData containing body data
: type encrypted _ data : : class : ` aws _ encryption _ sdk . internal . structures . EncryptedData ` ,
: class : ` aws _ encryption _ sdk . internal . structures . FrameBody ` ,
or : class : ` aws _ encryption _ sdk . internal . structures . MessageNoFrameBody `
: param bytes associated _ data : AAD string generated for body
: type associated _ data : bytes
: returns : Plaintext of body
: rtype : bytes"""
|
decryptor = Decryptor ( algorithm , key , associated_data , encrypted_data . iv , encrypted_data . tag )
return decryptor . update ( encrypted_data . ciphertext ) + decryptor . finalize ( )
|
def get_alias ( self , alias = None , manifest = None , verify = True , sizes = False , dcd = None ) : # pylint : disable = too - many - arguments
"""Get the blob hashes assigned to an alias .
: param alias : Alias name . You almost definitely will only need to pass this argument .
: type alias : str
: param manifest : If you previously obtained a manifest , specify it here instead of ` ` alias ` ` . You almost definitely won ' t need to do this .
: type manifest : str
: param verify : ( v1 schema only ) Whether to verify the integrity of the alias definition in the registry itself . You almost definitely won ' t need to change this from the default ( ` ` True ` ` ) .
: type verify : bool
: param sizes : Whether to return sizes of the blobs along with their hashes
: type sizes : bool
: param dcd : ( if ` ` manifest ` ` is specified ) The Docker - Content - Digest header returned when getting the manifest . If present , this is checked against the manifest .
: type dcd : str
: rtype : list
: returns : If ` ` sizes ` ` is falsey , a list of blob hashes ( strings ) which are assigned to the alias . If ` ` sizes ` ` is truthy , a list of ( hash , size ) tuples for each blob ."""
|
return self . _get_alias ( alias , manifest , verify , sizes , dcd , False )
|
def listtransactions ( self , user_id = "" , count = 10 , start_at = 0 ) :
"""List all transactions associated with this account .
Args :
user _ id ( str ) : this user ' s unique identifier
count ( int ) : number of transactions to return ( default = 10)
start _ at ( int ) : start the list at this transaction ( default = 0)
Returns :
list [ dict ] : transactions associated with this user ' s account"""
|
txlist = self . rpc . call ( "listtransactions" , user_id , count , start_at )
self . logger . debug ( "Got transaction list for " + str ( user_id ) )
return txlist
|
def check_status_mapping ( self ) :
"""ensure status map does not contain status values which are not present in DB"""
|
self . verbose ( 'checking status mapping...' )
if not self . status_mapping :
self . message ( 'no status mapping found' )
return
for old_val , new_val in self . status_mapping . iteritems ( ) :
try : # look up by slug if new _ val is string
if isinstance ( new_val , basestring ) :
lookup = { 'slug' : new_val }
# lookup by primary key otherwise
else :
lookup = { 'pk' : new_val }
status = Status . objects . get ( ** lookup )
self . status_mapping [ old_val ] = status . id
except Status . DoesNotExist :
raise ImproperlyConfigured ( 'Error! Status with slug %s not found in the database' % new_val )
self . verbose ( 'status map correct' )
|
def select_host ( self , metric ) :
"""Returns the carbon host that has data for the given metric ."""
|
key = self . keyfunc ( metric )
nodes = [ ]
servers = set ( )
for node in self . hash_ring . get_nodes ( key ) :
server , instance = node
if server in servers :
continue
servers . add ( server )
nodes . append ( node )
if len ( servers ) >= self . replication_factor :
break
available = [ n for n in nodes if self . is_available ( n ) ]
return random . choice ( available or nodes )
|
def _load_json_file ( self , file , decoder = None ) :
"""Load data from json file
: param file : Readable file or path to file
: type file : FileIO | str | unicode
: param decoder : Use custom json decoder
: type decoder : T < = flotils . loadable . DateTimeDecoder
: return : Json data
: rtype : None | int | float | str | list | dict
: raises IOError : Failed to load"""
|
try :
res = load_json_file ( file , decoder = decoder )
except ValueError as e :
if "{}" . format ( e ) == "No JSON object could be decoded" :
raise IOError ( "Decoding JSON failed" )
self . exception ( "Failed to load from {}" . format ( file ) )
raise IOError ( "Loading file failed" )
except :
self . exception ( "Failed to load from {}" . format ( file ) )
raise IOError ( "Loading file failed" )
return res
|
def create ( cls , identifier_name , verbose_name , tag_filter_expression = '' ) :
"""Create a new : class : ` AnalysisSystem ` on the server .
: param identifier _ name : Unique identifier string .
: param verbose _ name : A descriptive name of the AnalysisSystem .
: param tag _ filter _ expression : Tag filters to automatically select samples for this AnalysisSystem .
: return : The created : class : ` AnalysisSystem ` object ."""
|
return cls . _create ( identifier_name = identifier_name , verbose_name = verbose_name , tag_filter_expression = tag_filter_expression )
|
def _set_declarations ( self , declarations ) :
"""Set declarations local to this loop ( e . g . ` for ` variables ) ."""
|
assert isinstance ( declarations , CodeStatement )
self . declarations = declarations
declarations . scope = self . body
|
def col_to_dt ( df , col_name , set_format = None , infer_format = True , dest = False ) :
"""Coerces a column in a DataFrame to datetime
Parameters :
df - DataFrame
DataFrame to operate on
col _ name - string
Name of column to coerce
dest - bool , default False
Whether to apply the result to the DataFrame or return it .
True is apply , False is return ."""
|
new_col = _pd . to_datetime ( df [ col_name ] , errors = 'coerce' , format = set_format , infer_datetime_format = infer_format )
if dest :
set_col ( df , col_name , new_col )
else :
return new_col
|
def get_descriptor ( self , number ) :
"""Create file descriptors for process output ."""
|
# Create stdout file and get file descriptor
stdout_path = os . path . join ( self . config_dir , 'pueue_process_{}.stdout' . format ( number ) )
if os . path . exists ( stdout_path ) :
os . remove ( stdout_path )
out_descriptor = open ( stdout_path , 'w+' )
# Create stderr file and get file descriptor
stderr_path = os . path . join ( self . config_dir , 'pueue_process_{}.stderr' . format ( number ) )
if os . path . exists ( stderr_path ) :
os . remove ( stderr_path )
err_descriptor = open ( stderr_path , 'w+' )
self . descriptors [ number ] = { }
self . descriptors [ number ] [ 'stdout' ] = out_descriptor
self . descriptors [ number ] [ 'stdout_path' ] = stdout_path
self . descriptors [ number ] [ 'stderr' ] = err_descriptor
self . descriptors [ number ] [ 'stderr_path' ] = stderr_path
return out_descriptor , err_descriptor
|
def http_range ( self ) -> slice :
"""The content of Range HTTP header .
Return a slice instance ."""
|
rng = self . _headers . get ( hdrs . RANGE )
start , end = None , None
if rng is not None :
try :
pattern = r'^bytes=(\d*)-(\d*)$'
start , end = re . findall ( pattern , rng ) [ 0 ]
except IndexError : # pattern was not found in header
raise ValueError ( "range not in acceptable format" )
end = int ( end ) if end else None
start = int ( start ) if start else None
if start is None and end is not None : # end with no start is to return tail of content
start = - end
end = None
if start is not None and end is not None : # end is inclusive in range header , exclusive for slice
end += 1
if start >= end :
raise ValueError ( 'start cannot be after end' )
if start is end is None : # No valid range supplied
raise ValueError ( 'No start or end of range specified' )
return slice ( start , end , 1 )
|
def compute_backoff ( attempts , * , factor = 5 , jitter = True , max_backoff = 2000 , max_exponent = 32 ) :
"""Compute an exponential backoff value based on some number of attempts .
Parameters :
attempts ( int ) : The number of attempts there have been so far .
factor ( int ) : The number of milliseconds to multiply each backoff by .
max _ backoff ( int ) : The max number of milliseconds to backoff by .
max _ exponent ( int ) : The maximum backoff exponent .
Returns :
tuple : The new number of attempts and the backoff in milliseconds ."""
|
exponent = min ( attempts , max_exponent )
backoff = min ( factor * 2 ** exponent , max_backoff )
if jitter :
backoff /= 2
backoff = int ( backoff + uniform ( 0 , backoff ) )
return attempts + 1 , backoff
|
def localization_merge_back ( updated_localizable_file , old_translated_file , new_translated_file , merged_translated_file ) :
"""Generates a file merging the old translations and the new ones .
Args :
updated _ localizable _ file ( str ) : The path to the updated localization strings file , meaning the strings that
require translation .
old _ translated _ file ( str ) : The path to the strings file containing the previously translated strings .
new _ translated _ file ( str ) : The path to the strings file containing the newly translated strings .
merged _ translated _ file ( str ) : The path to the output file with the merged translations ."""
|
output_file_elements = [ ]
old_translated_file_dict = generate_localization_key_to_entry_dictionary_from_file ( old_translated_file )
new_translated_file_dict = generate_localization_key_to_entry_dictionary_from_file ( new_translated_file )
f = open_strings_file ( updated_localizable_file , "r" )
for header_comment , comments , key , value in extract_header_comment_key_value_tuples_from_file ( f ) :
translation_value = None
if len ( header_comment ) > 0 :
output_file_elements . append ( Comment ( header_comment ) )
if value in new_translated_file_dict :
translation_value = new_translated_file_dict [ value ] . value
elif key in old_translated_file_dict :
translation_value = old_translated_file_dict [ key ] . value
elif key in new_translated_file_dict :
translation_value = new_translated_file_dict [ key ] . value
if translation_value is not None :
output_file_elements . append ( LocalizationEntry ( comments , key , translation_value ) )
f . close ( )
write_file_elements_to_strings_file ( merged_translated_file , output_file_elements )
|
def forModule ( cls , name ) :
"""Return an instance of this class representing the module of the given name . If the given
module name is " _ _ main _ _ " , it will be translated to the actual file name of the top - level
script without the . py or . pyc extension . This method assumes that the module with the
specified name has already been loaded ."""
|
module = sys . modules [ name ]
filePath = os . path . abspath ( module . __file__ )
filePath = filePath . split ( os . path . sep )
filePath [ - 1 ] , extension = os . path . splitext ( filePath [ - 1 ] )
if not extension in ( '.py' , '.pyc' ) :
raise Exception ( 'The name of a user script/module must end in .py or .pyc.' )
if name == '__main__' :
log . debug ( "Discovering real name of module" )
# User script / module was invoked as the main program
if module . __package__ : # Invoked as a module via python - m foo . bar
log . debug ( "Script was invoked as a module" )
name = [ filePath . pop ( ) ]
for package in reversed ( module . __package__ . split ( '.' ) ) :
dirPathTail = filePath . pop ( )
assert dirPathTail == package
name . append ( dirPathTail )
name = '.' . join ( reversed ( name ) )
dirPath = os . path . sep . join ( filePath )
else : # Invoked as a script via python foo / bar . py
name = filePath . pop ( )
dirPath = os . path . sep . join ( filePath )
cls . _check_conflict ( dirPath , name )
else : # User module was imported . Determine the directory containing the top - level package
if filePath [ - 1 ] == '__init__' : # module is a subpackage
filePath . pop ( )
for package in reversed ( name . split ( '.' ) ) :
dirPathTail = filePath . pop ( )
assert dirPathTail == package
dirPath = os . path . sep . join ( filePath )
log . debug ( "Module dir is %s" , dirPath )
if not os . path . isdir ( dirPath ) :
raise Exception ( 'Bad directory path %s for module %s. Note that hot-deployment does not support .egg-link files yet, or scripts located in the root directory.' % ( dirPath , name ) )
fromVirtualEnv = inVirtualEnv ( ) and dirPath . startswith ( sys . prefix )
return cls ( dirPath = dirPath , name = name , fromVirtualEnv = fromVirtualEnv )
|
def consume_keys_asynchronous_processes ( self ) :
"""Work through the keys to look up asynchronously using multiple processes"""
|
print ( "\nLooking up " + self . input_queue . qsize ( ) . __str__ ( ) + " keys from " + self . source_name + "\n" )
jobs = multiprocessing . cpu_count ( ) * 4 if ( multiprocessing . cpu_count ( ) * 4 < self . input_queue . qsize ( ) ) else self . input_queue . qsize ( )
pool = multiprocessing . Pool ( processes = jobs , maxtasksperchild = 10 )
for x in range ( jobs ) :
pool . apply ( self . data_worker , [ ] , self . worker_args )
pool . close ( )
pool . join ( )
|
def position_scales ( self ) :
"""Return a list of the position scales that are present"""
|
l = [ s for s in self if ( 'x' in s . aesthetics ) or ( 'y' in s . aesthetics ) ]
return Scales ( l )
|
def idna_encode ( host ) :
"""Encode hostname as internationalized domain name ( IDN ) according
to RFC 3490.
@ raise : UnicodeError if hostname is not properly IDN encoded ."""
|
if host and isinstance ( host , unicode ) :
try :
host . encode ( 'ascii' )
return host , False
except UnicodeError :
uhost = host . encode ( 'idna' ) . decode ( 'ascii' )
return uhost , uhost != host
return host , False
|
def get_admin_urls_for_registration ( self ) :
"""Utilised by Wagtail ' s ' register _ admin _ urls ' hook to register urls for
our the views that class offers ."""
|
urls = ( url ( get_url_pattern ( self . opts ) , self . index_view , name = get_url_name ( self . opts ) ) , url ( get_url_pattern ( self . opts , 'create' ) , self . create_view , name = get_url_name ( self . opts , 'create' ) ) , url ( get_object_specific_url_pattern ( self . opts , 'edit' ) , self . edit_view , name = get_url_name ( self . opts , 'edit' ) ) , url ( get_object_specific_url_pattern ( self . opts , 'confirm_delete' ) , self . confirm_delete_view , name = get_url_name ( self . opts , 'confirm_delete' ) ) , )
if self . inspect_view_enabled :
urls = urls + ( url ( get_object_specific_url_pattern ( self . opts , 'inspect' ) , self . inspect_view , name = get_url_name ( self . opts , 'inspect' ) ) , )
if self . is_pagemodel :
urls = urls + ( url ( get_url_pattern ( self . opts , 'choose_parent' ) , self . choose_parent_view , name = get_url_name ( self . opts , 'choose_parent' ) ) , url ( get_object_specific_url_pattern ( self . opts , 'unpublish' ) , self . unpublish_view , name = get_url_name ( self . opts , 'unpublish' ) ) , url ( get_object_specific_url_pattern ( self . opts , 'copy' ) , self . copy_view , name = get_url_name ( self . opts , 'copy' ) ) , )
return urls
def construct_main_menu ( self , request , menu_items ) :
warnings . warn ( ( "The 'construct_main_menu' method is now deprecated. You " "should also remove the construct_main_menu hook from " "wagtail_hooks.py in your app folder." ) , DeprecationWarning )
return menu_items
|
def send_file ( self , sender , receiver_type , receiver_id , media_id ) :
"""发送文件消息
详情请参考
https : / / qydev . weixin . qq . com / wiki / index . php ? title = 企业会话接口说明
: param sender : 发送人
: param receiver _ type : 接收人类型 : single | group , 分别表示 : 单聊 | 群聊
: param receiver _ id : 接收人的值 , 为userid | chatid , 分别表示 : 成员id | 会话id
: param media _ id : 文件id , 可以调用上传素材文件接口获取 , 文件须大于4字节
: return : 返回的 JSON 数据包"""
|
data = { 'receiver' : { 'type' : receiver_type , 'id' : receiver_id , } , 'sender' : sender , 'msgtype' : 'file' , 'file' : { 'media_id' : media_id , } }
return self . _post ( 'chat/send' , data = data )
|
def makePickle ( self , record ) :
"""Convert a : class : ` logging . LogRecord ` into a bytes object
representing a GELF log
: param record : : class : ` logging . LogRecord ` to convert into a
Graylog GELF log .
: type record : logging . LogRecord
: return : A bytes object representing a GELF log .
: rtype : bytes"""
|
gelf_dict = self . _make_gelf_dict ( record )
packed = self . _pack_gelf_dict ( gelf_dict )
pickle = zlib . compress ( packed ) if self . compress else packed
return pickle
|
def _apply_snap_off ( self , queue = None ) :
r"""Add all the throats to the queue with snap off pressure
This is probably wrong ! ! ! ! Each one needs to start a new cluster ."""
|
net = self . project . network
phase = self . project . find_phase ( self )
snap_off = self . settings [ 'snap_off' ]
if queue is None :
queue = self . queue [ 0 ]
try :
Pc_snap_off = phase [ snap_off ]
logger . info ( "Adding snap off pressures to queue" )
for T in net . throats ( ) :
if not np . isnan ( Pc_snap_off [ T ] ) :
hq . heappush ( queue , [ Pc_snap_off [ T ] , T , 'throat' ] )
except KeyError :
logger . warning ( "Phase " + phase . name + " doesn't have " + "property " + snap_off )
|
def _get_specifications ( specifications ) :
"""Computes the list of strings corresponding to the given specifications
: param specifications : A string , a class or a list of specifications
: return : A list of strings
: raise ValueError : Invalid specification found"""
|
if not specifications or specifications is object :
raise ValueError ( "No specifications given" )
elif inspect . isclass ( specifications ) :
if Provides . USE_MODULE_QUALNAME :
if sys . version_info < ( 3 , 3 , 0 ) :
raise ValueError ( "Qualified name capability requires Python 3.3+" )
# Get the name of the class
if not specifications . __module__ :
return [ specifications . __qualname__ ]
return [ "{0}.{1}" . format ( specifications . __module__ , specifications . __qualname__ ) ]
else : # Legacy behavior
return [ specifications . __name__ ]
elif is_string ( specifications ) : # Specification name
specifications = specifications . strip ( )
if not specifications :
raise ValueError ( "Empty specification given" )
return [ specifications ]
elif isinstance ( specifications , ( list , tuple ) ) : # List given : normalize its content
results = [ ]
for specification in specifications :
results . extend ( _get_specifications ( specification ) )
return results
else :
raise ValueError ( "Unhandled specifications type : {0}" . format ( type ( specifications ) . __name__ ) )
|
def vol_per_rev_3_stop ( color = "" , inner_diameter = 0 ) :
"""Return the volume per revolution of an Ismatec 6 roller pump
given the inner diameter ( ID ) of 3 - stop tubing . The calculation is
interpolated from the table found at
http : / / www . ismatec . com / int _ e / pumps / t _ mini _ s _ ms _ ca / tubing _ msca2 . htm .
Note :
1 . Either input a string as the tubing color code or a number as the
tubing inner diameter . If both are given , the function will default to using
the color .
2 . The calculation is interpolated for inner diameters between 0.13 and 3.17
mm . Accuracy is not guaranteed for tubes with smaller or larger diameters .
: param color : Color code of the Ismatec 3 - stop tubing
: type color : string
: param inner _ diameter : Inner diameter of the Ismatec 3 - stop tubing . Results will be most accurate for inner diameters between 0.13 and 3.17 mm .
: type inner _ diameter : float
: return : Volume per revolution output by a 6 - roller pump through the 3 - stop tubing ( mL / rev )
: rtype : float
: Examples :
> > > from aguaclara . research . peristaltic _ pump import vol _ per _ rev _ 3 _ stop
> > > from aguaclara . core . units import unit _ registry as u
> > > round ( vol _ per _ rev _ 3 _ stop ( color = " yellow - blue " ) , 6)
< Quantity ( 0.148846 , ' milliliter / rev ' ) >
> > > round ( vol _ per _ rev _ 3 _ stop ( inner _ diameter = . 20 * u . mm ) , 6)
< Quantity ( 0.003116 , ' milliliter / rev ' ) >"""
|
if color != "" :
inner_diameter = ID_colored_tube ( color )
term1 = ( R_pump * 2 * np . pi - k_nonlinear * inner_diameter ) / u . rev
term2 = np . pi * ( inner_diameter ** 2 ) / 4
return ( term1 * term2 ) . to ( u . mL / u . rev )
|
def delete_os_dummy_rtr_nwk ( self , rtr_id , net_id , subnet_id ) :
"""Delete the dummy interface to the router ."""
|
subnet_lst = set ( )
subnet_lst . add ( subnet_id )
ret = self . os_helper . delete_intf_router ( None , None , rtr_id , subnet_lst )
if not ret :
return ret
return self . os_helper . delete_network_all_subnets ( net_id )
|
def pack_rpc_response ( response = None , exception = None ) :
"""Convert a response payload or exception to a status code and payload .
This function will convert an Exception raised by an RPC implementation
to the corresponding status code ."""
|
if response is None :
response = bytes ( )
if exception is None :
status = ( 1 << 6 )
if len ( response ) > 0 :
status |= ( 1 << 7 )
elif isinstance ( exception , ( RPCInvalidIDError , RPCNotFoundError ) ) :
status = 2
elif isinstance ( exception , BusyRPCResponse ) :
status = 0
elif isinstance ( exception , TileNotFoundError ) :
status = 0xFF
elif isinstance ( exception , RPCErrorCode ) :
status = ( 1 << 6 ) | ( exception . params [ 'code' ] & ( ( 1 << 6 ) - 1 ) )
else :
status = 3
return status , response
|
def clean ( self , value ) :
"""Clean
Goes through each of the values in the dict , cleans it , stores it , and
returns a new dict
Arguments :
value { dict } - - The value to clean
Returns :
dict"""
|
# If the value is None and it ' s optional , return as is
if value is None and self . _optional :
return None
# If the value is not a dict
if not isinstance ( value , dict ) :
raise ValueError ( 'value' )
# Init the return value
dRet = { }
# Go through each value and clean it using the associated node
for k in value . keys ( ) :
try :
dRet [ k ] = self . _nodes [ k ] . clean ( value [ k ] )
except KeyError :
raise ValueError ( '%s is not a valid node in the parent' % k )
# Return the cleaned values
return dRet
|
def request ( self , path , data = None , method = 'GET' ) :
"""Convenience Facebook request function .
Utility function to request resources via the graph API , with the
format expected by Facebook ."""
|
url = '%s%s?access_token=%s' % ( 'https://graph.facebook.com' , path , self [ 'oauth_token' ] )
req = Request ( url , data = data )
req . get_method = lambda : method
return loads ( urlopen ( req ) . read ( ) )
|
def _get_tag_query_tag ( self , sampler , wmi_obj , tag_query ) :
"""Design a query based on the given WMIObject to extract a tag .
Returns : tag or TagQueryUniquenessFailure exception ."""
|
self . log . debug ( u"`tag_queries` parameter found." " wmi_object={wmi_obj} - query={tag_query}" . format ( wmi_obj = wmi_obj , tag_query = tag_query ) )
# Extract query information
target_class , target_property , filters = self . _format_tag_query ( sampler , wmi_obj , tag_query )
# Create a specific sampler
tag_query_sampler = WMISampler ( self . log , target_class , [ target_property ] , filters = filters , ** sampler . connection )
tag_query_sampler . sample ( )
# Extract tag
self . _raise_on_invalid_tag_query_result ( tag_query_sampler , wmi_obj , tag_query )
link_value = str ( tag_query_sampler [ 0 ] [ target_property ] ) . lower ( )
tag = "{tag_name}:{tag_value}" . format ( tag_name = target_property . lower ( ) , tag_value = "_" . join ( link_value . split ( ) ) )
self . log . debug ( u"Extracted `tag_queries` tag: '{tag}'" . format ( tag = tag ) )
return tag
|
def _remove_refs ( self , keys ) :
"""Removes all references to all entries in keys . This does not
cascade !
: param Iterable [ _ ReferenceKey ] keys : The keys to remove ."""
|
# remove direct refs
for key in keys :
del self . relations [ key ]
# then remove all entries from each child
for cached in self . relations . values ( ) :
cached . release_references ( keys )
|
def deserialize ( cls , string ) :
"""Reconstruct a previously serialized string back into an instance of a ` ` CacheModel ` ` ."""
|
data = json . loads ( string )
for fieldname , value in data . items ( ) :
data [ fieldname ] = getattr ( cls , fieldname ) . cache_to_python ( value )
return cls ( ** data )
|
def DeserializeForImport ( self , reader ) :
"""Deserialize full object .
Args :
reader ( neo . IO . BinaryReader ) :"""
|
super ( Block , self ) . Deserialize ( reader )
self . Transactions = [ ]
transaction_length = reader . ReadVarInt ( )
for i in range ( 0 , transaction_length ) :
tx = Transaction . DeserializeFrom ( reader )
self . Transactions . append ( tx )
if len ( self . Transactions ) < 1 :
raise Exception ( 'Invalid format %s ' % self . Index )
|
def verify ( self , payload ) :
"""Verify payload authenticity via the supplied authenticator"""
|
if not self . authenticator :
return payload
try :
self . authenticator . auth ( payload )
return self . authenticator . unsigned ( payload )
except AuthenticatorInvalidSignature :
raise
except Exception as exception :
raise AuthenticateError ( str ( exception ) )
|
def get_volume ( ) :
'''Get the volume .
Get the current volume .
Returns :
int : The current volume ( percentage , between 0 and 100 ) .'''
|
if system . get_name ( ) == 'windows' : # TODO : Implement volume for Windows . Looks like WinAPI is the
# solution . . .
pass
elif system . get_name ( ) == 'mac' :
volume = system . get_cmd_out ( [ 'osascript' , '-e' , 'set ovol to output volume of (get volume settings); return the quoted form of ovol' ] )
return int ( volume ) * 10
else : # Linux / Unix
volume = system . get_cmd_out ( ( 'amixer get Master |grep % |awk \'{print $5}\'|' 'sed -e \'s/\[//\' -e \'s/\]//\' | head -n1' ) )
return int ( volume . replace ( '%' , '' ) )
|
def _jit_give_bond_array ( pos , bond_radii , self_bonding_allowed = False ) :
"""Calculate a boolean array where ` ` A [ i , j ] is True ` ` indicates a
bond between the i - th and j - th atom ."""
|
n = pos . shape [ 0 ]
bond_array = np . empty ( ( n , n ) , dtype = nb . boolean )
for i in range ( n ) :
for j in range ( i , n ) :
D = 0
for h in range ( 3 ) :
D += ( pos [ i , h ] - pos [ j , h ] ) ** 2
B = ( bond_radii [ i ] + bond_radii [ j ] ) ** 2
bond_array [ i , j ] = ( B - D ) >= 0
bond_array [ j , i ] = bond_array [ i , j ]
if not self_bonding_allowed :
for i in range ( n ) :
bond_array [ i , i ] = False
return bond_array
|
def enable_napps ( cls , napps ) :
"""Enable a list of NApps .
Args :
napps ( list ) : List of NApps ."""
|
mgr = NAppsManager ( )
for napp in napps :
mgr . set_napp ( * napp )
LOG . info ( 'NApp %s:' , mgr . napp_id )
cls . enable_napp ( mgr )
|
def values ( self , section = None ) :
"""Provide dict like values method"""
|
if not section and self . section :
section = self . section
config = self . config . get ( section , { } ) if section else self . config
return config . values ( )
|
def interpolate_colors ( array : numpy . ndarray , x : int ) -> numpy . ndarray :
"""Creates a color map for values in array
: param array : color map to interpolate
: param x : number of colors
: return : interpolated color map"""
|
out_array = [ ]
for i in range ( x ) :
if i % ( x / ( len ( array ) - 1 ) ) == 0 :
index = i / ( x / ( len ( array ) - 1 ) )
out_array . append ( array [ int ( index ) ] )
else :
start_marker = array [ math . floor ( i / ( x / ( len ( array ) - 1 ) ) ) ]
stop_marker = array [ math . ceil ( i / ( x / ( len ( array ) - 1 ) ) ) ]
interp_amount = i % ( x / ( len ( array ) - 1 ) ) / ( x / ( len ( array ) - 1 ) )
interp_color = numpy . rint ( start_marker + ( ( stop_marker - start_marker ) * interp_amount ) )
out_array . append ( interp_color )
out_array [ - 1 ] = array [ - 1 ]
return numpy . array ( out_array ) . astype ( numpy . uint8 )
|
def netmiko_fun ( fun , * args , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Call an arbitrary function from the : mod : ` Netmiko < salt . modules . netmiko _ mod > `
module , passing the authentication details from the existing NAPALM
connection .
fun
The name of the function from the : mod : ` Netmiko < salt . modules . netmiko _ mod > `
to invoke .
args
List of arguments to send to the execution function specified in
` ` fun ` ` .
kwargs
Key - value arguments to send to the execution function specified in
` ` fun ` ` .
CLI Example :
. . code - block : : bash
salt ' * ' napalm . netmiko _ fun send _ command ' show version ' '''
|
if 'netmiko.' not in fun :
fun = 'netmiko.{fun}' . format ( fun = fun )
netmiko_kwargs = netmiko_args ( )
kwargs . update ( netmiko_kwargs )
return __salt__ [ fun ] ( * args , ** kwargs )
|
def ex_literal ( val ) :
"""An int , float , long , bool , string , or None literal with the given
value ."""
|
if val is None :
return ast . Name ( 'None' , ast . Load ( ) )
elif isinstance ( val , int ) :
return ast . Num ( val )
elif isinstance ( val , bool ) :
return ast . Name ( bytes ( val ) , ast . Load ( ) )
elif isinstance ( val , str ) :
return ast . Str ( val )
raise TypeError ( u'no literal for {0}' . format ( type ( val ) ) )
|
def add_signer ( self , signer ) :
"""Simple dict of { ' name ' : ' John Doe ' , ' email ' : ' name @ example . com ' }"""
|
if isinstance ( signer , HelloSigner ) and signer . validate ( ) :
self . signers . append ( signer )
else :
if not signer . validate ( ) :
raise Exception ( "HelloSigner Errors %s" % ( signer . errors , ) )
else :
raise Exception ( "add_signer signer must be an instance of class HelloSigner" )
|
def get_version ( ) :
"""Get version of pydf and wkhtmltopdf binary
: return : version string"""
|
try :
wk_version = _string_execute ( '-V' )
except Exception as e : # we catch all errors here to make sure we get a version no matter what
wk_version = '%s: %s' % ( e . __class__ . __name__ , e )
return 'pydf version: %s\nwkhtmltopdf version: %s' % ( VERSION , wk_version )
|
def submit ( self , fn , * args , ** kwargs ) :
"""Submit an operation"""
|
if not self . asynchronous :
return fn ( * args , ** kwargs )
raise NotImplementedError
|
def parse_list_multipart_uploads ( data , bucket_name ) :
"""Parser for list multipart uploads response .
: param data : Response data for list multipart uploads .
: param bucket _ name : Response for the bucket .
: return : Replies back four distinctive components .
- List of : class : ` IncompleteUpload < IncompleteUpload > `
- True if list is truncated , False otherwise .
- Object name marker for the next request .
- Upload id marker for the next request ."""
|
root = S3Element . fromstring ( 'ListMultipartUploadsResult' , data )
is_truncated = root . get_child_text ( 'IsTruncated' ) . lower ( ) == 'true'
key_marker = root . get_urldecoded_elem_text ( 'NextKeyMarker' , strict = False )
upload_id_marker = root . get_child_text ( 'NextUploadIdMarker' , strict = False )
uploads = [ IncompleteUpload ( bucket_name , upload . get_urldecoded_elem_text ( 'Key' ) , upload . get_child_text ( 'UploadId' ) , upload . get_localized_time_elem ( 'Initiated' ) ) for upload in root . findall ( 'Upload' ) ]
return uploads , is_truncated , key_marker , upload_id_marker
|
def post_message ( driver , message , msg_dur , style = "info" ) :
"""A helper method to post a message on the screen with Messenger .
( Should only be called from post _ message ( ) in base _ case . py )"""
|
if not msg_dur :
msg_dur = settings . DEFAULT_MESSAGE_DURATION
msg_dur = float ( msg_dur )
message = re . escape ( message )
message = escape_quotes_if_needed ( message )
messenger_script = ( '''Messenger().post({message: "%s", type: "%s", ''' '''hideAfter: %s, hideOnNavigate: true});''' % ( message , style , msg_dur ) )
try :
driver . execute_script ( messenger_script )
except Exception :
activate_messenger ( driver )
set_messenger_theme ( driver )
try :
driver . execute_script ( messenger_script )
except Exception :
time . sleep ( 0.2 )
activate_messenger ( driver )
time . sleep ( 0.2 )
set_messenger_theme ( driver )
time . sleep ( 0.5 )
driver . execute_script ( messenger_script )
|
def max_roi_pooling ( attrs , inputs , proto_obj ) :
"""Max ROI Pooling ."""
|
new_attrs = translation_utils . _fix_attribute_names ( attrs , { 'pooled_shape' : 'pooled_size' , 'spatial_scale' : 'spatial_scale' } )
return 'ROIPooling' , new_attrs , inputs
|
def select_visualization ( n ) :
"""get viz choice based on numerical index"""
|
try :
n = int ( n ) - 1
test = VISUALIZATIONS_LIST [ n ]
# throw exception if number wrong
return n
except :
printDebug ( "Invalid viz-type option. Valid options are:" , "red" )
show_types ( )
raise SystemExit ( 1 )
|
def recall ( y , y_pred ) :
"""Recall score
recall = true _ positives / ( true _ positives + false _ negatives )
Parameters :
y : vector , shape ( n _ samples , )
The target labels .
y _ pred : vector , shape ( n _ samples , )
The predicted labels .
Returns :
recall : float"""
|
tp = true_positives ( y , y_pred )
fn = false_negatives ( y , y_pred )
return tp / ( tp + fn )
|
def save ( self , obj , label , format = 'text' ) :
"""Save or update obj as pkl file with name label
format can be ' text ' or ' pickle ' ."""
|
# initialize hidden state directory
objloc = '{0}/{1}' . format ( self . statedir , label )
with open ( objloc , 'w' ) as fp :
if format == 'pickle' :
pickle . dump ( obj , fp )
elif format == 'text' :
fp . write ( str ( obj ) )
else :
print ( 'Format {0} not recognized. Please choose either pickle or text.' . format ( format ) )
print ( 'Saving {0} to label {1}' . format ( obj , label ) )
|
def load ( file , check_version = True ) :
"""Load VOEvent from file object .
A simple wrapper to read a file before passing the contents to
: py : func : ` . loads ` . Use with an open file object , e . g . : :
with open ( ' / path / to / voevent . xml ' , ' rb ' ) as f :
v = vp . load ( f )
Args :
file ( io . IOBase ) : An open file object ( binary mode preferred ) , see also
http : / / lxml . de / FAQ . html :
" Can lxml parse from file objects opened in unicode / text mode ? "
check _ version ( bool ) : ( Default = True ) Checks that the VOEvent is of a
supported schema version - currently only v2.0 is supported .
Returns :
: py : class : ` Voevent ` : Root - node of the etree ."""
|
s = file . read ( )
return loads ( s , check_version )
|
def delete_volume_attachment ( self , name , ** kwargs ) : # noqa : E501
"""delete _ volume _ attachment # noqa : E501
delete a VolumeAttachment # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ volume _ attachment ( name , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the VolumeAttachment ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param int grace _ period _ seconds : The duration in seconds before the object should be deleted . Value must be non - negative integer . The value zero indicates delete immediately . If this value is nil , the default grace period for the specified type will be used . Defaults to a per object value if not specified . zero means delete immediately .
: param bool orphan _ dependents : Deprecated : please use the PropagationPolicy , this field will be deprecated in 1.7 . Should the dependent objects be orphaned . If true / false , the \" orphan \" finalizer will be added to / removed from the object ' s finalizers list . Either this field or PropagationPolicy may be set , but not both .
: param str propagation _ policy : Whether and how garbage collection will be performed . Either this field or OrphanDependents may be set , but not both . The default policy is decided by the existing finalizer set in the metadata . finalizers and the resource - specific default policy . Acceptable values are : ' Orphan ' - orphan the dependents ; ' Background ' - allow the garbage collector to delete the dependents in the background ; ' Foreground ' - a cascading policy that deletes all dependents in the foreground .
: param V1DeleteOptions body :
: return : V1Status
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_volume_attachment_with_http_info ( name , ** kwargs )
# noqa : E501
else :
( data ) = self . delete_volume_attachment_with_http_info ( name , ** kwargs )
# noqa : E501
return data
|
def syllabify ( word ) :
'''Syllabify the given word , whether simplex or complex .'''
|
word = split ( word )
# detect any non - delimited compounds
compound = True if re . search ( r'-| |\.' , word ) else False
syllabify = _syllabify_compound if compound else _syllabify
syll , rules = syllabify ( word )
yield syll , rules
n = 3
if 'T4' in rules :
yield syllabify ( word , T4 = False )
n -= 1
# yield empty syllabifications and rules
for n in range ( 3 ) :
yield '' , ''
|
def GetPointWithDistanceTraveled ( self , shape_dist_traveled ) :
"""Returns a point on the shape polyline with the input shape _ dist _ traveled .
Args :
shape _ dist _ traveled : The input shape _ dist _ traveled .
Returns :
The shape point as a tuple ( lat , lng , shape _ dist _ traveled ) , where lat and
lng is the location of the shape point , and shape _ dist _ traveled is an
increasing metric representing the distance traveled along the shape .
Returns None if there is data error in shape ."""
|
if not self . distance :
return None
if shape_dist_traveled <= self . distance [ 0 ] :
return self . points [ 0 ]
if shape_dist_traveled >= self . distance [ - 1 ] :
return self . points [ - 1 ]
index = bisect . bisect ( self . distance , shape_dist_traveled )
( lat0 , lng0 , dist0 ) = self . points [ index - 1 ]
( lat1 , lng1 , dist1 ) = self . points [ index ]
# Interpolate if shape _ dist _ traveled does not equal to any of the point
# in shape segment .
# ( lat0 , lng0 ) ( lat , lng ) ( lat1 , lng1)
# dist0 shape _ dist _ traveled dist1
# \ - - - - - ca - - - - - / \ - - - - - bc - - - - - /
# \ - - - - - ba - - - - - /
ca = shape_dist_traveled - dist0
bc = dist1 - shape_dist_traveled
ba = bc + ca
if ba == 0 : # This only happens when there ' s data error in shapes and should have been
# catched before . Check to avoid crash .
return None
# This won ' t work crossing longitude 180 and is only an approximation which
# works well for short distance .
lat = ( lat1 * ca + lat0 * bc ) / ba
lng = ( lng1 * ca + lng0 * bc ) / ba
return ( lat , lng , shape_dist_traveled )
|
def _zforce ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ zforce
PURPOSE :
evaluate the vertical force at ( R , z , phi )
INPUT :
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
vertical force at ( R , z , phi )
HISTORY :
2016-06-06 - Written - Aladdin"""
|
if not self . isNonAxi and phi is None :
phi = 0.
r , theta , phi = bovy_coords . cyl_to_spher ( R , z , phi )
# x = z
dr_dz = nu . divide ( z , r ) ;
dtheta_dz = nu . divide ( - R , r ** 2 ) ;
dphi_dz = 0
return self . _computeforceArray ( dr_dz , dtheta_dz , dphi_dz , R , z , phi )
|
def blank ( columns = 1 , name = None ) :
"""Creates the grammar for a blank field .
These are for constant empty strings which should be ignored , as they are
used just as fillers .
: param columns : number of columns , which is the required number of
whitespaces
: param name : name for the field
: return : grammar for the blank field"""
|
if name is None :
name = 'Blank Field'
field = pp . Regex ( '[ ]{' + str ( columns ) + '}' )
field . leaveWhitespace ( )
field . suppress ( )
field . setName ( name )
return field
|
def place_module ( self , module_name : str ) -> Optional [ str ] :
"""Tries to determine if a module is a python std import , third party import , or project code :
if it can ' t determine - it assumes it is project code"""
|
return self . finder . find ( module_name )
|
def set_timestamp ( cls , filename : str , response : HTTPResponse ) :
'''Set the Last - Modified timestamp onto the given file .
Args :
filename : The path of the file
response : Response'''
|
last_modified = response . fields . get ( 'Last-Modified' )
if not last_modified :
return
try :
last_modified = email . utils . parsedate ( last_modified )
except ValueError :
_logger . exception ( 'Failed to parse date.' )
return
last_modified = time . mktime ( last_modified )
os . utime ( filename , ( time . time ( ) , last_modified ) )
|
def get_hash ( self , image = None ) :
'''return an md5 hash of the file based on a criteria level . This
is intended to give the file a reasonable version . This only is
useful for actual image files .
Parameters
image : the image path to get hash for ( first priority ) . Second
priority is image path saved with image object , if exists .'''
|
hasher = hashlib . md5 ( )
image = image or self . image
if os . path . exists ( image ) :
with open ( image , "rb" ) as f :
for chunk in iter ( lambda : f . read ( 4096 ) , b"" ) :
hasher . update ( chunk )
return hasher . hexdigest ( )
bot . warning ( '%s does not exist.' % image )
|
def set_parent ( self , parent ) :
'''Set the parent of this ` ` Node ` ` object . Use this carefully , otherwise you may damage the structure of this ` ` Tree ` ` object .
Args :
` ` Node ` ` : The new parent of this ` ` Node ` `'''
|
if not isinstance ( parent , Node ) :
raise TypeError ( "parent must be a Node" )
self . parent = parent
|
def _get_frameshift_start ( self , variant_data ) :
"""Get starting position ( AA ref index ) of the last frameshift
which affects the rest of the sequence , i . e . not offset by subsequent frameshifts
: param variant _ data : info on each variant
: type variant _ data : attrs
: return variant data with additional field for AA index ( 1 - based ) of the frameshift start
: rtype attrs"""
|
if DBG :
print ( "is_frameshift:{}" . format ( variant_data . is_frameshift ) )
print ( "variant_start_aa:{}" . format ( variant_data . variant_start_aa ) )
if variant_data . is_frameshift :
variant_data . frameshift_start = variant_data . variant_start_aa
return variant_data
|
def crop_avatar ( self , filename , x , y , w , h ) :
"""Crop avatar with given size , return a list of file name : [ filename _ s , filename _ m , filename _ l ] .
: param filename : The raw image ' s filename .
: param x : The x - pos to start crop .
: param y : The y - pos to start crop .
: param w : The crop width .
: param h : The crop height ."""
|
x = int ( x )
y = int ( y )
w = int ( w )
h = int ( h )
sizes = current_app . config [ 'AVATARS_SIZE_TUPLE' ]
if not filename :
path = os . path . join ( self . root_path , 'static/default/default_l.jpg' )
else :
path = os . path . join ( current_app . config [ 'AVATARS_SAVE_PATH' ] , filename )
print ( path )
raw_img = Image . open ( path )
base_width = current_app . config [ 'AVATARS_CROP_BASE_WIDTH' ]
if raw_img . size [ 0 ] >= base_width :
raw_img = self . resize_avatar ( raw_img , base_width = base_width )
cropped_img = raw_img . crop ( ( x , y , x + w , y + h ) )
filename = uuid4 ( ) . hex
avatar_s = self . resize_avatar ( cropped_img , base_width = sizes [ 0 ] )
avatar_m = self . resize_avatar ( cropped_img , base_width = sizes [ 1 ] )
avatar_l = self . resize_avatar ( cropped_img , base_width = sizes [ 2 ] )
filename_s = filename + '_s.png'
filename_m = filename + '_m.png'
filename_l = filename + '_l.png'
path_s = os . path . join ( current_app . config [ 'AVATARS_SAVE_PATH' ] , filename_s )
path_m = os . path . join ( current_app . config [ 'AVATARS_SAVE_PATH' ] , filename_m )
path_l = os . path . join ( current_app . config [ 'AVATARS_SAVE_PATH' ] , filename_l )
avatar_s . save ( path_s , optimize = True , quality = 85 )
avatar_m . save ( path_m , optimize = True , quality = 85 )
avatar_l . save ( path_l , optimize = True , quality = 85 )
return [ filename_s , filename_m , filename_l ]
|
def get_world_list_url_tibiadata ( cls , world ) :
"""Gets the TibiaData . com URL for the guild list of a specific world .
Parameters
world : : class : ` str `
The name of the world .
Returns
: class : ` str `
The URL to the guild ' s page ."""
|
return GUILD_LIST_URL_TIBIADATA % urllib . parse . quote ( world . title ( ) . encode ( 'iso-8859-1' ) )
|
def find_aliases ( self , seq_id = None , namespace = None , alias = None , current_only = True , translate_ncbi_namespace = None ) :
"""returns iterator over alias annotation records that match criteria
The arguments , all optional , restrict the records that are
returned . Without arguments , all aliases are returned .
If arguments contain % , the ` like ` comparison operator is
used . Otherwise arguments must match exactly ."""
|
clauses = [ ]
params = [ ]
def eq_or_like ( s ) :
return "like" if "%" in s else "="
if translate_ncbi_namespace is None :
translate_ncbi_namespace = self . translate_ncbi_namespace
if alias is not None :
clauses += [ "alias {} ?" . format ( eq_or_like ( alias ) ) ]
params += [ alias ]
if namespace is not None : # Switch to using RefSeq for RefSeq accessions
# issue # 38 : translate " RefSeq " to " NCBI " to enable RefSeq lookups
# issue # 31 : later breaking change , translate database
if namespace == "RefSeq" :
namespace = "NCBI"
clauses += [ "namespace {} ?" . format ( eq_or_like ( namespace ) ) ]
params += [ namespace ]
if seq_id is not None :
clauses += [ "seq_id {} ?" . format ( eq_or_like ( seq_id ) ) ]
params += [ seq_id ]
if current_only :
clauses += [ "is_current = 1" ]
cols = [ "seqalias_id" , "seq_id" , "alias" , "added" , "is_current" ]
if translate_ncbi_namespace :
cols += [ "case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace" ]
else :
cols += [ "namespace" ]
sql = "select {cols} from seqalias" . format ( cols = ", " . join ( cols ) )
if clauses :
sql += " where " + " and " . join ( "(" + c + ")" for c in clauses )
sql += " order by seq_id, namespace, alias"
_logger . debug ( "Executing: " + sql )
return self . _db . execute ( sql , params )
|
def loads ( s , cls = BinaryQuadraticModel , vartype = None ) :
"""Load a COOrdinate formatted binary quadratic model from a string ."""
|
return load ( s . split ( '\n' ) , cls = cls , vartype = vartype )
|
def generate_readme ( self ) :
"""Generate a readme with all the generators"""
|
print ( "## Examples of settings runtime params" )
print ( "### Command-line parameters" )
print ( "```" )
self . generate_command ( )
print ( "```" )
print ( "### Environment variables" )
print ( "```" )
self . generate_env ( )
print ( "```" )
print ( "### ini file" )
print ( "```" )
self . generate_ini ( )
print ( "```" )
print ( "### docker run" )
print ( "```" )
self . generate_docker_run ( )
print ( "```" )
print ( "### docker compose" )
print ( "```" )
self . generate_docker_compose ( )
print ( "```" )
print ( "### kubernetes" )
print ( "```" )
self . generate_kubernetes ( )
print ( "```" )
print ( "### drone plugin" )
print ( "```" )
self . generate_drone_plugin ( )
print ( "```" )
|
async def verify_authed ( self ) :
"""Verify if device is allowed to use AirPlau ."""
|
resp = await self . _send ( self . srp . verify1 ( ) , 'verify1' )
atv_public_secret = resp [ 0 : 32 ]
data = resp [ 32 : ]
# TODO : what is this ?
await self . _send ( self . srp . verify2 ( atv_public_secret , data ) , 'verify2' )
return True
|
def patch ( self , path = None , url_kwargs = None , ** kwargs ) :
"""Sends a PUT request .
: param path :
The HTTP path ( either absolute or relative ) .
: param url _ kwargs :
Parameters to override in the generated URL . See ` ~ hyperlink . URL ` .
: param * * kwargs :
Optional arguments that ` ` request ` ` takes .
: return : response object"""
|
return self . _session . patch ( self . _url ( path , url_kwargs ) , ** kwargs )
|
def read_data ( self , dstart = None , dend = None ) :
"""Read data from ` file ` and return it as Numpy array .
Parameters
dstart : int , optional
Offset in bytes of the data field . By default , it is taken to
be the header size as determined from reading the header .
Backwards indexing with negative values is also supported .
Use a value larger than the header size to extract a data subset .
dend : int , optional
End position in bytes until which data is read ( exclusive ) .
Backwards indexing with negative values is also supported .
Use a value different from the file size to extract a data subset .
Returns
data : ` numpy . ndarray `
The data read from ` file ` .
See Also
read _ header"""
|
self . file . seek ( 0 , 2 )
# 2 means " from the end "
filesize_bytes = self . file . tell ( )
if dstart is None :
dstart_abs = int ( self . header_size )
elif dstart < 0 :
dstart_abs = filesize_bytes + int ( dstart )
else :
dstart_abs = int ( dstart )
if dend is None :
dend_abs = int ( filesize_bytes )
elif dend < 0 :
dend_abs = int ( dend ) + filesize_bytes
else :
dend_abs = int ( dend )
if dstart_abs >= dend_abs :
raise ValueError ( 'invalid `dstart` and `dend`, resulting in ' 'absolute `dstart` >= `dend` ({} >= {})' '' . format ( dstart_abs , dend_abs ) )
if dstart_abs < self . header_size :
raise ValueError ( 'invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' '' . format ( dstart_abs , self . header_size ) )
if dend_abs > filesize_bytes :
raise ValueError ( 'invalid `dend`, resulting in absolute ' '`dend` > `filesize_bytes` ({} < {})' '' . format ( dend_abs , filesize_bytes ) )
num_elems = ( dend_abs - dstart_abs ) / self . data_dtype . itemsize
if num_elems != int ( num_elems ) :
raise ValueError ( 'trying to read {} bytes, which is not a multiple of ' 'the itemsize {} of the data type {}' '' . format ( dend_abs - dstart_abs , self . data_dtype . itemsize , self . data_dtype ) )
self . file . seek ( dstart_abs )
array = np . empty ( int ( num_elems ) , dtype = self . data_dtype )
self . file . readinto ( array . data )
return array
|
def parse_error ( output_dir ) :
"""Add contents of stderr and eplusout . err and put it in the exception message .
: param output _ dir : str
: return : str"""
|
sys . stderr . seek ( 0 )
std_err = sys . stderr . read ( ) . decode ( 'utf-8' )
err_file = os . path . join ( output_dir , "eplusout.err" )
if os . path . isfile ( err_file ) :
with open ( err_file , "r" ) as f :
ep_err = f . read ( )
else :
ep_err = "<File not found>"
message = "\r\n{std_err}\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}" . format ( ** locals ( ) )
return message
|
def data ( self , index , role = Qt . DisplayRole ) :
"""Qt Override ."""
|
row = index . row ( )
if not index . isValid ( ) or not ( 0 <= row < len ( self . shortcuts ) ) :
return to_qvariant ( )
shortcut = self . shortcuts [ row ]
key = shortcut . key
column = index . column ( )
if role == Qt . DisplayRole :
if column == CONTEXT :
return to_qvariant ( shortcut . context )
elif column == NAME :
color = self . text_color
if self . _parent == QApplication . focusWidget ( ) :
if self . current_index ( ) . row ( ) == row :
color = self . text_color_highlight
else :
color = self . text_color
text = self . rich_text [ row ]
text = '<p style="color:{0}">{1}</p>' . format ( color , text )
return to_qvariant ( text )
elif column == SEQUENCE :
text = QKeySequence ( key ) . toString ( QKeySequence . NativeText )
return to_qvariant ( text )
elif column == SEARCH_SCORE : # Treating search scores as a table column simplifies the
# sorting once a score for a specific string in the finder
# has been defined . This column however should always remain
# hidden .
return to_qvariant ( self . scores [ row ] )
elif role == Qt . TextAlignmentRole :
return to_qvariant ( int ( Qt . AlignHCenter | Qt . AlignVCenter ) )
return to_qvariant ( )
|
def xml_to_string ( elem , qualified_name = None , public_id = None , system_id = None ) :
"""Return a pretty - printed XML string for the Element .
Also allows setting a document type ."""
|
from xml . dom import minidom
rough_string = ET . tostring ( elem , 'utf-8' )
reparsed = minidom . parseString ( rough_string )
if qualified_name :
doctype = minidom . DOMImplementation ( ) . createDocumentType ( qualified_name , public_id , system_id )
reparsed . insertBefore ( doctype , reparsed . documentElement )
return reparsed . toprettyxml ( indent = " " )
|
def raw_writer ( indent = None ) :
"""Returns a raw text writer co - routine .
Yields :
DataEvent : serialization events to write out
Receives : class : ` amazon . ion . core . IonEvent ` or ` ` None ` ` when the co - routine yields
` ` HAS _ PENDING ` ` : class : ` WriteEventType ` events ."""
|
is_whitespace_str = isinstance ( indent , str ) and re . search ( r'\A\s*\Z' , indent , re . M ) is not None
if not ( indent is None or is_whitespace_str ) :
raise ValueError ( 'The indent parameter must either be None or a string containing only whitespace' )
indent_bytes = six . b ( indent ) if isinstance ( indent , str ) else indent
return writer_trampoline ( _raw_writer_coroutine ( indent = indent_bytes ) )
|
def _get_time_override ( self ) :
"""Retrieves the datetime . time or None from the ` time _ override ` attribute ."""
|
if callable ( self . time_override ) :
time_override = self . time_override ( )
else :
time_override = self . time_override
if not isinstance ( time_override , datetime_time ) :
raise ValueError ( 'Invalid type. Must be a datetime.time instance.' )
return time_override
|
def determine_cons3rt_role_name_linux ( self ) :
"""Determines the CONS3RT _ ROLE _ NAME for this Linux system , and
Set the cons3rt _ role _ name member for this system
This method determines the CONS3RT _ ROLE _ NAME for this system
in the deployment by first checking for the environment
variable , if not set , determining the value from the
deployment properties .
: return : None
: raises : DeploymentError"""
|
log = logging . getLogger ( self . cls_logger + '.determine_cons3rt_role_name_linux' )
# Determine IP addresses for this system
log . info ( 'Determining the IPv4 addresses for this system...' )
try :
ip_addresses = get_ip_addresses ( )
except CommandError :
_ , ex , trace = sys . exc_info ( )
msg = 'Unable to get the IP address of this system, thus cannot determine the ' 'CONS3RT_ROLE_NAME\n{e}' . format ( e = str ( ex ) )
log . error ( msg )
raise DeploymentError , msg , trace
else :
log . info ( 'Found IP addresses: {a}' . format ( a = ip_addresses ) )
log . info ( 'Trying to determine IP address for eth0...' )
try :
ip_address = ip_addresses [ 'eth0' ]
except KeyError :
_ , ex , trace = sys . exc_info ( )
msg = 'Unable to determine the IP address for eth0. Found the ' 'following IP addresses: {i}\n{e}' . format ( i = ip_addresses , e = str ( ex ) )
log . error ( msg )
raise DeploymentError , msg , trace
else :
log . info ( 'Found IP address for eth0: {i}' . format ( i = ip_address ) )
pattern = '^cons3rt\.fap\.deployment\.machine.*0.internalIp=' + ip_address + '$'
try :
f = open ( self . properties_file )
except IOError :
_ , ex , trace = sys . exc_info ( )
msg = 'Could not open file {f}' . format ( f = self . properties_file )
log . error ( msg )
raise DeploymentError , msg , trace
prop_list_matched = [ ]
log . debug ( 'Searching for deployment properties matching pattern: {p}' . format ( p = pattern ) )
for line in f :
log . debug ( 'Processing deployment properties file line: {l}' . format ( l = line ) )
if line . startswith ( '#' ) :
continue
elif '=' in line :
match = re . search ( pattern , line )
if match :
log . debug ( 'Found matching prop: {l}' . format ( l = line ) )
prop_list_matched . append ( line )
log . debug ( 'Number of matching properties found: {n}' . format ( n = len ( prop_list_matched ) ) )
if len ( prop_list_matched ) == 1 :
prop_parts = prop_list_matched [ 0 ] . split ( '.' )
if len ( prop_parts ) > 5 :
self . cons3rt_role_name = prop_parts [ 4 ]
log . info ( 'Found CONS3RT_ROLE_NAME from deployment properties: {c}' . format ( c = self . cons3rt_role_name ) )
log . info ( 'Adding CONS3RT_ROLE_NAME to the current environment...' )
os . environ [ 'CONS3RT_ROLE_NAME' ] = self . cons3rt_role_name
return
else :
log . error ( 'Property found was not formatted as expected: %s' , prop_parts )
else :
log . error ( 'Did not find a unique matching deployment property' )
msg = 'Could not determine CONS3RT_ROLE_NAME from deployment properties'
log . error ( msg )
raise DeploymentError ( msg )
|
def rotate ( self , angle , x = 0 , y = 0 ) :
"""Rotate element by given angle around given pivot .
Parameters
angle : float
rotation angle in degrees
x , y : float
pivot coordinates in user coordinate system ( defaults to top - left
corner of the figure )"""
|
self . root . set ( "transform" , "%s rotate(%f %f %f)" % ( self . root . get ( "transform" ) or '' , angle , x , y ) )
|
def group_structures ( self , s_list , anonymous = False ) :
"""Given a list of structures , use fit to group
them by structural equality .
Args :
s _ list ( [ Structure ] ) : List of structures to be grouped
anonymous ( bool ) : Wheher to use anonymous mode .
Returns :
A list of lists of matched structures
Assumption : if s1 = = s2 but s1 ! = s3 , than s2 and s3 will be put
in different groups without comparison ."""
|
if self . _subset :
raise ValueError ( "allow_subset cannot be used with" " group_structures" )
original_s_list = list ( s_list )
s_list = self . _process_species ( s_list )
# Use structure hash to pre - group structures
if anonymous :
c_hash = lambda c : c . anonymized_formula
else :
c_hash = self . _comparator . get_hash
s_hash = lambda s : c_hash ( s [ 1 ] . composition )
sorted_s_list = sorted ( enumerate ( s_list ) , key = s_hash )
all_groups = [ ]
# For each pre - grouped list of structures , perform actual matching .
for k , g in itertools . groupby ( sorted_s_list , key = s_hash ) :
unmatched = list ( g )
while len ( unmatched ) > 0 :
i , refs = unmatched . pop ( 0 )
matches = [ i ]
if anonymous :
inds = filter ( lambda i : self . fit_anonymous ( refs , unmatched [ i ] [ 1 ] ) , list ( range ( len ( unmatched ) ) ) )
else :
inds = filter ( lambda i : self . fit ( refs , unmatched [ i ] [ 1 ] ) , list ( range ( len ( unmatched ) ) ) )
inds = list ( inds )
matches . extend ( [ unmatched [ i ] [ 0 ] for i in inds ] )
unmatched = [ unmatched [ i ] for i in range ( len ( unmatched ) ) if i not in inds ]
all_groups . append ( [ original_s_list [ i ] for i in matches ] )
return all_groups
|
def _unschedule_sending_init_updates ( self ) :
"""Un - schedules sending of initial updates
Stops the timer if set for sending initial updates .
Returns :
- True if timer was stopped
- False if timer was already stopped and nothing was done"""
|
LOG . debug ( 'Un-scheduling sending of initial Non-RTC UPDATEs' ' (init. UPDATEs already sent: %s)' , self . _sent_init_non_rtc_update )
if self . _rtc_eor_timer :
self . _rtc_eor_timer . stop ( )
self . _rtc_eor_timer = None
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.