signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def push_json_file ( json_file , url , dry_run = False , batch_size = 100 , anonymize_fields = [ ] , remove_fields = [ ] , rename_fields = [ ] ) :
"""read the json file provided and POST in batches no bigger than the
batch _ size specified to the specified url .""" | batch = [ ]
json_data = json . loads ( json_file . read ( ) )
if isinstance ( json_data , list ) :
for item in json_data : # anonymize fields
for field_name in anonymize_fields :
if field_name in item :
item [ field_name ] = md5sum ( item [ field_name ] )
# remove fields
for field_name in remove_fields :
if field_name in item :
del item [ field_name ]
# rename fields
for ( field_name , new_field_name ) in rename_fields :
if field_name in item :
item [ new_field_name ] = item [ field_name ]
del item [ field_name ]
batch . append ( item )
if len ( batch ) >= batch_size :
post ( batch , url , dry_run = dry_run )
batch = [ ]
if len ( batch ) > 0 :
post ( batch , url , dry_run = dry_run )
else :
post ( json_data , url , dry_run = dry_run ) |
def _print_split_model ( self , path , apps_models ) :
"""Print each model in apps _ models into its own file .""" | for app , models in apps_models :
for model in models :
model_name = model ( ) . title
if self . _has_extension ( path ) :
model_path = re . sub ( r'^(.*)[.](\w+)$' , r'\1.%s.%s.\2' % ( app , model_name ) , path )
else :
model_path = '%s.%s.%s.puml' % ( path , app , model_name )
self . _print_single_file ( model_path , [ ( app , [ model ] ) ] ) |
def fromJSON ( cls , jdata ) :
"""Generates a new column from the given json data . This should
be already loaded into a Python dictionary , not a JSON string .
: param jdata | < dict >
: return < orb . Column > | | None""" | cls_type = jdata . get ( 'type' )
col_cls = cls . byName ( cls_type )
if not col_cls :
raise orb . errors . ColumnTypeNotFound ( cls_type )
else :
col = col_cls ( )
col . loadJSON ( jdata )
return col |
def _format_capability_report ( self , data ) :
"""This is a private utility method .
This method formats a capability report if the user wishes to
send it to the console .
If log _ output = True , no output is generated
: param data : Capability report
: returns : None""" | if self . log_output :
return
else :
pin_modes = { 0 : 'Digital_Input' , 1 : 'Digital_Output' , 2 : 'Analog' , 3 : 'PWM' , 4 : 'Servo' , 5 : 'Shift' , 6 : 'I2C' , 7 : 'One Wire' , 8 : 'Stepper' , 9 : 'Encoder' }
x = 0
pin = 0
print ( '\nCapability Report' )
print ( '-----------------\n' )
while x < len ( data ) : # get index of next end marker
print ( '{} {}{}' . format ( 'Pin' , str ( pin ) , ':' ) )
while data [ x ] != 127 :
mode_str = ""
pin_mode = pin_modes . get ( data [ x ] )
mode_str += str ( pin_mode )
x += 1
bits = data [ x ]
print ( '{:>5}{}{} {}' . format ( ' ' , mode_str , ':' , bits ) )
x += 1
x += 1
pin += 1 |
def _flush ( self ) :
"""We skip any locking code due to the fact that this is now a single
process per collector""" | # Send a None down the queue to indicate a flush
try :
self . queue . put ( None , block = False )
except Queue . Full :
self . _throttle_error ( 'Queue full, check handlers for delays' ) |
def listSubjectsResponse ( self , query , status = None , start = None , count = None , vendorSpecific = None ) :
"""CNIdentity . listSubjects ( session , query , status , start , count ) → SubjectList
https : / / releases . dataone . org / online / api -
documentation - v2.0.1 / apis / CN _ APIs . html # CNIdentity . listSubjects .
Args :
query :
status :
start :
count :
vendorSpecific :
Returns :""" | url_query = { 'status' : status , 'start' : start , 'count' : count , 'query' : query }
return self . GET ( 'accounts' , query = url_query , headers = vendorSpecific ) |
def parse_cutadapt_logs ( self , f ) :
"""Go through log file looking for cutadapt output""" | fh = f [ 'f' ]
regexes = { '1.7' : { 'bp_processed' : "Total basepairs processed:\s*([\d,]+) bp" , 'bp_written' : "Total written \(filtered\):\s*([\d,]+) bp" , 'quality_trimmed' : "Quality-trimmed:\s*([\d,]+) bp" , 'r_processed' : "Total reads processed:\s*([\d,]+)" , 'r_with_adapters' : "Reads with adapters:\s*([\d,]+)" } , '1.6' : { 'r_processed' : "Processed reads:\s*([\d,]+)" , 'bp_processed' : "Processed bases:\s*([\d,]+) bp" , 'r_trimmed' : "Trimmed reads:\s*([\d,]+)" , 'quality_trimmed' : "Quality-trimmed:\s*([\d,]+) bp" , 'bp_trimmed' : "Trimmed bases:\s*([\d,]+) bp" , 'too_short' : "Too short reads:\s*([\d,]+)" , 'too_long' : "Too long reads:\s*([\d,]+)" , } }
s_name = None
cutadapt_version = '1.7'
log_section = None
for l in fh : # New log starting
if 'cutadapt' in l :
s_name = None
c_version = re . match ( r'This is cutadapt ([\d\.]+)' , l )
if c_version :
try :
assert ( StrictVersion ( c_version . group ( 1 ) ) <= StrictVersion ( '1.6' ) )
cutadapt_version = '1.6'
except :
cutadapt_version = '1.7'
c_version_old = re . match ( r'cutadapt version ([\d\.]+)' , l )
if c_version_old :
try :
assert ( StrictVersion ( c_version . group ( 1 ) ) <= StrictVersion ( '1.6' ) )
cutadapt_version = '1.6'
except : # I think the pattern " cutadapt version XX " is only pre - 1.6?
cutadapt_version = '1.6'
# Get sample name from end of command line params
if l . startswith ( 'Command line parameters' ) :
s_name = l . split ( ) [ - 1 ]
# Manage case where sample name is ' - ' ( reading from stdin )
if s_name == '-' :
s_name = f [ 's_name' ]
else :
s_name = self . clean_s_name ( s_name , f [ 'root' ] )
if s_name in self . cutadapt_data :
log . debug ( "Duplicate sample name found! Overwriting: {}" . format ( s_name ) )
self . cutadapt_data [ s_name ] = dict ( )
if s_name is not None :
self . add_data_source ( f , s_name )
# Search regexes for overview stats
for k , r in regexes [ cutadapt_version ] . items ( ) :
match = re . search ( r , l )
if match :
self . cutadapt_data [ s_name ] [ k ] = int ( match . group ( 1 ) . replace ( ',' , '' ) )
# Starting a new section
if '===' in l :
log_section = l . strip ( ) . strip ( '=' ) . strip ( )
# Histogram showing lengths trimmed
if 'length' in l and 'count' in l and 'expect' in l :
plot_sname = s_name
if log_section is not None :
plot_sname = '{} - {}' . format ( s_name , log_section )
self . cutadapt_length_counts [ plot_sname ] = dict ( )
self . cutadapt_length_exp [ plot_sname ] = dict ( )
self . cutadapt_length_obsexp [ plot_sname ] = dict ( )
# Nested loop to read this section while the regex matches
for l in fh :
r_seqs = re . search ( "^(\d+)\s+(\d+)\s+([\d\.]+)" , l )
if r_seqs :
a_len = int ( r_seqs . group ( 1 ) )
self . cutadapt_length_counts [ plot_sname ] [ a_len ] = int ( r_seqs . group ( 2 ) )
self . cutadapt_length_exp [ plot_sname ] [ a_len ] = float ( r_seqs . group ( 3 ) )
if float ( r_seqs . group ( 3 ) ) > 0 :
self . cutadapt_length_obsexp [ plot_sname ] [ a_len ] = float ( r_seqs . group ( 2 ) ) / float ( r_seqs . group ( 3 ) )
else : # Cheating , I know . Infinity is difficult to plot .
self . cutadapt_length_obsexp [ plot_sname ] [ a_len ] = float ( r_seqs . group ( 2 ) )
else :
break
# Calculate a few extra numbers of our own
for s_name , d in self . cutadapt_data . items ( ) :
if 'bp_processed' in d and 'bp_written' in d :
self . cutadapt_data [ s_name ] [ 'percent_trimmed' ] = ( float ( d [ 'bp_processed' ] - d [ 'bp_written' ] ) / d [ 'bp_processed' ] ) * 100
elif 'bp_processed' in d and 'bp_trimmed' in d :
self . cutadapt_data [ s_name ] [ 'percent_trimmed' ] = ( ( float ( d . get ( 'bp_trimmed' , 0 ) ) + float ( d . get ( 'quality_trimmed' , 0 ) ) ) / d [ 'bp_processed' ] ) * 100 |
def renew_token ( self , token = None , increment = None , wrap_ttl = None ) :
"""POST / auth / token / renew
POST / auth / token / renew - self
: param token :
: type token :
: param increment :
: type increment :
: param wrap _ ttl :
: type wrap _ ttl :
: return :
: rtype :""" | params = { 'increment' : increment , }
if token is not None :
params [ 'token' ] = token
return self . _adapter . post ( '/v1/auth/token/renew' , json = params , wrap_ttl = wrap_ttl ) . json ( )
else :
return self . _adapter . post ( '/v1/auth/token/renew-self' , json = params , wrap_ttl = wrap_ttl ) . json ( ) |
def run_exitfuncs ( ) :
"""Function that behaves exactly like Python ' s atexit , but runs atexit functions
in the order in which they were registered , not reversed .""" | exc_info = None
for func , targs , kargs in _exithandlers :
try :
func ( * targs , ** kargs )
except SystemExit :
exc_info = sys . exc_info ( )
except :
exc_info = sys . exc_info ( )
if exc_info is not None :
six . reraise ( exc_info [ 0 ] , exc_info [ 1 ] , exc_info [ 2 ] ) |
def POST_AUTH ( self ) : # pylint : disable = arguments - differ
"""Parse course registration or course creation and display the course list page""" | username = self . user_manager . session_username ( )
user_info = self . database . users . find_one ( { "username" : username } )
user_input = web . input ( )
success = None
# Handle registration to a course
if "register_courseid" in user_input and user_input [ "register_courseid" ] != "" :
try :
course = self . course_factory . get_course ( user_input [ "register_courseid" ] )
if not course . is_registration_possible ( user_info ) :
success = False
else :
success = self . user_manager . course_register_user ( course , username , user_input . get ( "register_password" , None ) )
except :
success = False
elif "new_courseid" in user_input and self . user_manager . user_is_superadmin ( ) :
try :
courseid = user_input [ "new_courseid" ]
self . course_factory . create_course ( courseid , { "name" : courseid , "accessible" : False } )
success = True
except :
success = False
return self . show_page ( success ) |
def compute_neighbors ( self , n_neighbors : int = 30 , knn : bool = True , n_pcs : Optional [ int ] = None , use_rep : Optional [ str ] = None , method : str = 'umap' , random_state : Optional [ Union [ RandomState , int ] ] = 0 , write_knn_indices : bool = False , metric : str = 'euclidean' , metric_kwds : Mapping [ str , Any ] = { } ) -> None :
"""Compute distances and connectivities of neighbors .
Parameters
n _ neighbors
Use this number of nearest neighbors .
knn
Restrict result to ` n _ neighbors ` nearest neighbors .
{ n _ pcs }
{ use _ rep }
Returns
Writes sparse graph attributes ` . distances ` and ` . connectivities ` .
Also writes ` . knn _ indices ` and ` . knn _ distances ` if
` write _ knn _ indices = = True ` .""" | if n_neighbors > self . _adata . shape [ 0 ] : # very small datasets
n_neighbors = 1 + int ( 0.5 * self . _adata . shape [ 0 ] )
logg . warn ( 'n_obs too small: adjusting to `n_neighbors = {}`' . format ( n_neighbors ) )
if method == 'umap' and not knn :
raise ValueError ( '`method = \'umap\' only with `knn = True`.' )
if method not in { 'umap' , 'gauss' } :
raise ValueError ( '`method` needs to be \'umap\' or \'gauss\'.' )
if self . _adata . shape [ 0 ] >= 10000 and not knn :
logg . warn ( 'Using high n_obs without `knn=True` takes a lot of memory...' )
self . n_neighbors = n_neighbors
self . knn = knn
X = choose_representation ( self . _adata , use_rep = use_rep , n_pcs = n_pcs )
# neighbor search
use_dense_distances = ( metric == 'euclidean' and X . shape [ 0 ] < 8192 ) or knn == False
if use_dense_distances :
_distances = pairwise_distances ( X , metric = metric , ** metric_kwds )
knn_indices , knn_distances = get_indices_distances_from_dense_matrix ( _distances , n_neighbors )
if knn :
self . _distances = get_sparse_matrix_from_indices_distances_numpy ( knn_indices , knn_distances , X . shape [ 0 ] , n_neighbors )
else :
self . _distances = _distances
else : # non - euclidean case and approx nearest neighbors
if X . shape [ 0 ] < 4096 :
X = pairwise_distances ( X , metric = metric , ** metric_kwds )
metric = 'precomputed'
knn_indices , knn_distances , _ = compute_neighbors_umap ( X , n_neighbors , random_state , metric = metric , metric_kwds = metric_kwds )
# self . _ rp _ forest = _ make _ forest _ dict ( forest )
# write indices as attributes
if write_knn_indices :
self . knn_indices = knn_indices
self . knn_distances = knn_distances
logg . msg ( 'computed neighbors' , t = True , v = 4 )
if not use_dense_distances or method == 'umap' : # we need self . _ distances also for method = = ' gauss ' if we didn ' t
# use dense distances
self . _distances , self . _connectivities = compute_connectivities_umap ( knn_indices , knn_distances , self . _adata . shape [ 0 ] , self . n_neighbors )
# overwrite the umap connectivities if method is ' gauss '
# self . _ distances is unaffected by this
if method == 'gauss' :
self . _compute_connectivities_diffmap ( )
logg . msg ( 'computed connectivities' , t = True , v = 4 )
self . _number_connected_components = 1
if issparse ( self . _connectivities ) :
from scipy . sparse . csgraph import connected_components
self . _connected_components = connected_components ( self . _connectivities )
self . _number_connected_components = self . _connected_components [ 0 ] |
def ite_burrowed ( self ) :
"""Returns an equivalent AST that " burrows " the ITE expressions as deep as possible into the ast , for simpler
printing .""" | if self . _burrowed is None :
self . _burrowed = self . _burrow_ite ( )
# pylint : disable = attribute - defined - outside - init
self . _burrowed . _burrowed = self . _burrowed
# pylint : disable = attribute - defined - outside - init
return self . _burrowed |
def unpatch ( self ) :
"""un - applies this patch .""" | if not self . _patched :
return
for func in self . _read_compilers + self . _write_compilers :
func . execute_sql = self . _original [ func ]
self . cache_backend . unpatch ( )
self . _patched = False |
def _outer_distance_mod_n ( ref , est , modulus = 12 ) :
"""Compute the absolute outer distance modulo n .
Using this distance , d ( 11 , 0 ) = 1 ( modulo 12)
Parameters
ref : np . ndarray , shape = ( n , )
Array of reference values .
est : np . ndarray , shape = ( m , )
Array of estimated values .
modulus : int
The modulus .
12 by default for octave equivalence .
Returns
outer _ distance : np . ndarray , shape = ( n , m )
The outer circular distance modulo n .""" | ref_mod_n = np . mod ( ref , modulus )
est_mod_n = np . mod ( est , modulus )
abs_diff = np . abs ( np . subtract . outer ( ref_mod_n , est_mod_n ) )
return np . minimum ( abs_diff , modulus - abs_diff ) |
def assign_fields ( meta , assignments ) :
"""Takes a list of C { key = value } strings and assigns them to the
given metafile . If you want to set nested keys ( e . g . " info . source " ) ,
you have to use a dot as a separator . For exotic keys * containing *
a dot , double that dot ( " dotted . . key " ) .
Numeric values starting with " + " or " - " are converted to integers .
If just a key name is given ( no ' = ' ) , the field is removed .""" | for assignment in assignments :
assignment = fmt . to_unicode ( assignment )
try :
if '=' in assignment :
field , val = assignment . split ( '=' , 1 )
else :
field , val = assignment , None
if val and val [ 0 ] in "+-" and val [ 1 : ] . isdigit ( ) :
val = int ( val , 10 )
# TODO : Allow numerical indices , and " + " for append
namespace = meta
keypath = [ i . replace ( '\0' , '.' ) for i in field . replace ( '..' , '\0' ) . split ( '.' ) ]
for key in keypath [ : - 1 ] : # Create missing dicts as we go . . .
namespace = namespace . setdefault ( fmt . to_utf8 ( key ) , { } )
except ( KeyError , IndexError , TypeError , ValueError ) as exc :
if self . options . debug :
raise
raise error . UserError ( "Bad assignment %r (%s)!" % ( assignment , exc ) )
else :
if val is None :
del namespace [ fmt . to_utf8 ( keypath [ - 1 ] ) ]
else :
namespace [ fmt . to_utf8 ( keypath [ - 1 ] ) ] = fmt . to_utf8 ( val )
return meta |
def is_monotonic ( df , items = None , increasing = None , strict = False ) :
"""Asserts that the DataFrame is monotonic .
Parameters
df : Series or DataFrame
items : dict
mapping columns to conditions ( increasing , strict )
increasing : None or bool
None is either increasing or decreasing .
strict : whether the comparison should be strict
Returns
df : DataFrame""" | if items is None :
items = { k : ( increasing , strict ) for k in df }
for col , ( increasing , strict ) in items . items ( ) :
s = pd . Index ( df [ col ] )
if increasing :
good = getattr ( s , 'is_monotonic_increasing' )
elif increasing is None :
good = getattr ( s , 'is_monotonic' ) | getattr ( s , 'is_monotonic_decreasing' )
else :
good = getattr ( s , 'is_monotonic_decreasing' )
if strict :
if increasing :
good = good & ( s . to_series ( ) . diff ( ) . dropna ( ) > 0 ) . all ( )
elif increasing is None :
good = good & ( ( s . to_series ( ) . diff ( ) . dropna ( ) > 0 ) . all ( ) | ( s . to_series ( ) . diff ( ) . dropna ( ) < 0 ) . all ( ) )
else :
good = good & ( s . to_series ( ) . diff ( ) . dropna ( ) < 0 ) . all ( )
if not good :
raise AssertionError
return df |
def SequenceOf ( klass ) :
"""Function to return a class that can encode and decode a list of
some other type .""" | if _debug :
SequenceOf . _debug ( "SequenceOf %r" , klass )
global _sequence_of_map
global _sequence_of_classes , _array_of_classes
# if this has already been built , return the cached one
if klass in _sequence_of_map :
if _debug :
SequenceOf . _debug ( " - found in cache" )
return _sequence_of_map [ klass ]
# no SequenceOf ( SequenceOf ( . . . ) ) allowed
if klass in _sequence_of_classes :
raise TypeError ( "nested sequences disallowed" )
# no SequenceOf ( ArrayOf ( . . . ) ) allowed
if klass in _array_of_classes :
raise TypeError ( "sequences of arrays disallowed" )
# define a generic class for lists
@ bacpypes_debugging
class _SequenceOf :
subtype = None
def __init__ ( self , value = None ) :
if _debug :
_SequenceOf . _debug ( "(%r)__init__ %r (subtype=%r)" , self . __class__ . __name__ , value , self . subtype )
if value is None :
self . value = [ ]
elif isinstance ( value , list ) :
self . value = value
else :
raise TypeError ( "invalid constructor datatype" )
def append ( self , value ) :
if issubclass ( self . subtype , Atomic ) :
pass
elif issubclass ( self . subtype , AnyAtomic ) and not isinstance ( value , Atomic ) :
raise TypeError ( "instance of an atomic type required" )
elif not isinstance ( value , self . subtype ) :
raise TypeError ( "%s value required" % ( self . subtype . __name__ , ) )
self . value . append ( value )
def __len__ ( self ) :
return len ( self . value )
def __getitem__ ( self , item ) :
return self . value [ item ]
def __iter__ ( self ) :
return iter ( self . value )
def encode ( self , taglist ) :
if _debug :
_SequenceOf . _debug ( "(%r)encode %r" , self . __class__ . __name__ , taglist )
for value in self . value :
if issubclass ( self . subtype , ( Atomic , AnyAtomic ) ) : # a helper cooperates between the atomic value and the tag
helper = self . subtype ( value )
# build a tag and encode the data into it
tag = Tag ( )
helper . encode ( tag )
# now encode the tag
taglist . append ( tag )
elif isinstance ( value , self . subtype ) : # it must have its own encoder
value . encode ( taglist )
else :
raise TypeError ( "%s must be a %s" % ( value , self . subtype . __name__ ) )
def decode ( self , taglist ) :
if _debug :
_SequenceOf . _debug ( "(%r)decode %r" , self . __class__ . __name__ , taglist )
while len ( taglist ) != 0 :
tag = taglist . Peek ( )
if tag . tagClass == Tag . closingTagClass :
return
if issubclass ( self . subtype , ( Atomic , AnyAtomic ) ) :
if _debug :
_SequenceOf . _debug ( " - building helper: %r %r" , self . subtype , tag )
taglist . Pop ( )
# a helper cooperates between the atomic value and the tag
helper = self . subtype ( tag )
# save the value
self . value . append ( helper . value )
else :
if _debug :
_SequenceOf . _debug ( " - building value: %r" , self . subtype )
# build an element
value = self . subtype ( )
# let it decode itself
value . decode ( taglist )
# save what was built
self . value . append ( value )
def debug_contents ( self , indent = 1 , file = sys . stdout , _ids = None ) :
i = 0
for value in self . value :
if issubclass ( self . subtype , ( Atomic , AnyAtomic ) ) :
file . write ( "%s[%d] = %r\n" % ( " " * indent , i , value ) )
elif isinstance ( value , self . subtype ) :
file . write ( "%s[%d]" % ( " " * indent , i ) )
value . debug_contents ( indent + 1 , file , _ids )
else :
file . write ( "%s[%d] %s must be a %s" % ( " " * indent , i , value , self . subtype . __name__ ) )
i += 1
def dict_contents ( self , use_dict = None , as_class = dict ) : # return sequences as arrays
mapped_value = [ ]
for value in self . value :
if issubclass ( self . subtype , Atomic ) :
mapped_value . append ( value )
# # # ambiguous
elif issubclass ( self . subtype , AnyAtomic ) :
mapped_value . append ( value . value )
# # # ambiguous
elif isinstance ( value , self . subtype ) :
mapped_value . append ( value . dict_contents ( as_class = as_class ) )
# return what we built
return mapped_value
# constrain it to a list of a specific type of item
setattr ( _SequenceOf , 'subtype' , klass )
_SequenceOf . __name__ = 'SequenceOf' + klass . __name__
if _debug :
SequenceOf . _debug ( " - build this class: %r" , _SequenceOf )
# cache this type
_sequence_of_map [ klass ] = _SequenceOf
_sequence_of_classes [ _SequenceOf ] = 1
# return this new type
return _SequenceOf |
def impulse_deltav_hernquist_curvedstream ( v , x , b , w , x0 , v0 , GM , rs ) :
"""NAME :
impulse _ deltav _ plummer _ hernquist
PURPOSE :
calculate the delta velocity to due an encounter with a Hernquist sphere in the impulse approximation ; allows for arbitrary velocity vectors , and arbitrary position along the stream
INPUT :
v - velocity of the stream ( nstar , 3)
x - position along the stream ( nstar , 3)
b - impact parameter
w - velocity of the Hernquist sphere ( 3)
x0 - point of closest approach
v0 - velocity of point of closest approach
GM - mass of the Hernquist sphere ( in natural units )
rs - size of the Hernquist sphere
OUTPUT :
deltav ( nstar , 3)
HISTORY :
2015-08-13 - SANDERS , using Wyn Evans calculation""" | if len ( v . shape ) == 1 :
v = numpy . reshape ( v , ( 1 , 3 ) )
if len ( x . shape ) == 1 :
x = numpy . reshape ( x , ( 1 , 3 ) )
b0 = numpy . cross ( w , v0 )
b0 *= b / numpy . sqrt ( numpy . sum ( b0 ** 2 ) )
b_ = b0 + x - x0
w = w - v
wmag = numpy . sqrt ( numpy . sum ( w ** 2 , axis = 1 ) )
bdotw = numpy . sum ( b_ * w , axis = 1 ) / wmag
B = numpy . sqrt ( numpy . sum ( b_ ** 2 , axis = 1 ) - bdotw ** 2 )
denom = wmag * ( B ** 2 - rs ** 2 )
denom = 1. / denom
s = numpy . sqrt ( 2. * B / ( rs + B ) )
HernquistXv = numpy . vectorize ( HernquistX )
Xfac = 1. - 2. * rs / ( rs + B ) * HernquistXv ( s )
return - 2.0 * GM * ( ( b_ . T - bdotw * w . T / wmag ) * Xfac * denom ) . T |
def _update_notification ( self , message = None ) :
"""Update the message area with blank or a message .""" | if message is None :
message = ''
message_label = self . _parts [ 'notification label' ]
message_label . config ( text = message )
self . _base . update ( ) |
def get_lan_ip ( interface = "default" ) :
if sys . version_info < ( 3 , 0 , 0 ) :
if type ( interface ) == str :
interface = unicode ( interface )
else :
if type ( interface ) == bytes :
interface = interface . decode ( "utf-8" )
# Get ID of interface that handles WAN stuff .
default_gateway = get_default_gateway ( interface )
gateways = netifaces . gateways ( )
wan_id = None
if netifaces . AF_INET in gateways :
gw_list = gateways [ netifaces . AF_INET ]
for gw_info in gw_list :
if gw_info [ 0 ] == default_gateway :
wan_id = gw_info [ 1 ]
break
# Find LAN IP of interface for WAN stuff .
interfaces = netifaces . interfaces ( )
if wan_id in interfaces :
families = netifaces . ifaddresses ( wan_id )
if netifaces . AF_INET in families :
if_info_list = families [ netifaces . AF_INET ]
for if_info in if_info_list :
if "addr" in if_info :
return if_info [ "addr" ]
"""Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server . In this
case""" | if platform . system ( ) == "Linux" :
if ip is not None :
return ip . routes [ "8.8.8.8" ] [ "prefsrc" ]
return None |
def namedb_get_namespace_reveal ( cur , namespace_id , current_block , include_history = True ) :
"""Get a namespace reveal , and optionally its history , given its namespace ID .
Only return a namespace record if :
* it is not ready
* it is not expired""" | select_query = "SELECT * FROM namespaces WHERE namespace_id = ? AND op = ? AND reveal_block <= ? AND ? < reveal_block + ?;"
args = ( namespace_id , NAMESPACE_REVEAL , current_block , current_block , NAMESPACE_REVEAL_EXPIRE )
namespace_reveal_rows = namedb_query_execute ( cur , select_query , args )
namespace_reveal_row = namespace_reveal_rows . fetchone ( )
if namespace_reveal_row is None : # no such reveal
return None
reveal_rec = { }
reveal_rec . update ( namespace_reveal_row )
if include_history :
hist = namedb_get_history ( cur , namespace_id )
reveal_rec [ 'history' ] = hist
reveal_rec = op_decanonicalize ( 'NAMESPACE_REVEAL' , reveal_rec )
return reveal_rec |
def report ( self , item_id , report_format = "json" ) :
"""Retrieves the specified report for the analyzed item , referenced by item _ id .
Available formats include : json , html .
: type item _ id : str
: param item _ id : File ID number
: type report _ format : str
: param report _ format : Return format
: rtype : dict
: return : Dictionary representing the JSON parsed data or raw , for other
formats / JSON parsing failure .""" | report_format = report_format . lower ( )
response = self . _request ( "/report/{job_id}/summary" . format ( job_id = item_id ) )
if response . status_code == 429 :
raise sandboxapi . SandboxError ( 'API rate limit exceeded while fetching report' )
# if response is JSON , return it as an object
if report_format == "json" :
try :
return json . loads ( response . content . decode ( 'utf-8' ) )
except ValueError :
pass
# otherwise , return the raw content .
return response . content . decode ( 'utf-8' ) |
def _get_pitcher ( self , pitcher ) :
"""get pitcher object
: param pitcher : Beautifulsoup object ( pitcher element )
: return : pitcher ( dict )""" | values = OrderedDict ( )
player = self . players . rosters . get ( pitcher . get ( 'id' ) )
values [ 'pos' ] = pitcher . get ( 'pos' , MlbamConst . UNKNOWN_SHORT )
values [ 'id' ] = pitcher . get ( 'id' , MlbamConst . UNKNOWN_SHORT )
values [ 'first' ] = player . first
values [ 'last' ] = player . last
values [ 'box_name' ] = player . box_name
values [ 'rl' ] = player . rl
values [ 'bats' ] = player . bats
values [ 'out' ] = pitcher . get ( 'out' , MlbamConst . UNKNOWN_SHORT )
values [ 'bf' ] = pitcher . get ( 'bf' , MlbamConst . UNKNOWN_SHORT )
return values |
def update ( self , tickDict ) :
'''consume ticks''' | if not self . __trakers :
self . __setUpTrakers ( )
for security , tick in tickDict . items ( ) :
if security in self . __trakers :
self . __trakers [ security ] . tickUpdate ( tick ) |
def get ( self , key , defaultvalue = None ) :
"""Support dict - like get ( return a default value if not found )""" | ( t , k ) = self . _getsubitem ( key , False )
if t is None :
return defaultvalue
else :
return t . __dict__ . get ( k , defaultvalue ) |
def update_default_channels ( sender , instance , created , ** kwargs ) :
"""Post save hook to ensure that there is only one default""" | if instance . default :
Channel . objects . filter ( default = True ) . exclude ( channel_id = instance . channel_id ) . update ( default = False ) |
def disable_scanners ( self , scanners ) :
"""Enable the provided scanners by group and / or IDs .""" | scanner_ids = [ ]
for scanner in scanners :
if scanner in self . scanner_groups :
self . disable_scanners_by_group ( scanner )
elif scanner . isdigit ( ) :
scanner_ids . append ( scanner )
else :
raise ZAPError ( 'Invalid scanner "{0}" provided. Must be a valid group or numeric ID.' . format ( scanner ) )
if scanner_ids :
self . disable_scanners_by_ids ( scanner_ids ) |
def send_reply_to ( address , reply = EMPTY ) :
"""Reply to a message previously received
: param address : a nw0 address ( eg from ` nw0 . advertise ` )
: param reply : any simple Python object , including text & tuples""" | _logger . debug ( "Sending reply %s to %s" , reply , address )
return sockets . _sockets . send_reply_to ( address , reply ) |
def integrate ( self , dt ) :
"""Integrate gyro measurements to orientation using a uniform sample rate .
Parameters
dt : float
Sample distance in seconds
Returns
orientation : ( 4 , N ) ndarray
Gyroscope orientation in quaternion form ( s , q1 , q2 , q3)""" | if not dt == self . __last_dt :
self . __last_q = fastintegrate . integrate_gyro_quaternion_uniform ( self . data , dt )
self . __last_dt = dt
return self . __last_q |
def ge ( self , other ) :
"""Greater than or overlaps . Returns True if no part of this Interval
extends lower than other .
: raises ValueError : if either self or other is a null Interval
: param other : Interval or point
: return : True or False
: rtype : bool""" | self . _raise_if_null ( other )
return self . begin >= getattr ( other , 'begin' , other ) |
def local_histogram ( image , bins = 19 , rang = "image" , cutoffp = ( 0.0 , 100.0 ) , size = None , footprint = None , output = None , mode = "ignore" , origin = 0 , mask = slice ( None ) ) :
r"""Computes multi - dimensional histograms over a region around each voxel .
Supply an image and ( optionally ) a mask and get the local histogram of local
neighbourhoods around each voxel . These neighbourhoods are cubic with a sidelength of
size in voxels or , when a shape instead of an integer is passed to size , of this
shape .
If not argument is passed to output , the returned array will be of dtype float .
Voxels along the image border are treated as defined by mode . The possible values are
the same as for scipy . ndimage filter without the ' ' constant ' ' mode . Instead " ignore "
is the default and additional mode , which sets that the area outside of the image are
ignored when computing the histogram .
When a mask is supplied , the local histogram is extracted only for the voxels where
the mask is True . But voxels from outside the mask can be incorporated in the
compuation of the histograms .
The range of the histograms can be set via the rang argument . The ' image ' keyword can
be supplied , to use the same range for all local histograms , extracted from the images
max and min intensity values . Alternatively , an own range can be supplied in the form
of a tuple of two numbers . Values outside the range of the histogram are ignored .
Setting a proper range is important , as all voxels that lie outside of the range are
ignored i . e . do not contribute to the histograms as if they would not exists . Some
of the local histograms can therefore be constructed from less than the expected
number of voxels .
Taking the histogram range from the whole image is sensitive to outliers . Supplying
percentile values to the cutoffp argument , these can be filtered out when computing
the range . This keyword is ignored if rang is not set to ' image ' .
Setting the rang to None causes local ranges to be used i . e . the ranges of the
histograms are computed only over the local area covered by them and are hence
not comparable . This behaviour should normally not be taken .
The local histograms are normalized by dividing them through the number of elements
in the bins .
Parameters
image : array _ like or list / tuple of array _ like
A single image or a list / tuple of images ( for multi - spectral case ) .
bins : integer
The number of histogram bins .
rang : ' image ' or tuple of numbers or None
The range of the histograms , can be supplied manually , set to ' image ' to use
global or set to None to use local ranges .
cutoffp : tuple of numbers
The cut - off percentiles to exclude outliers , only processed if ` ` rang ` ` is set
to ' image ' .
size : scalar or tuple of integers
See footprint , below
footprint : array
Either ` ` size ` ` or ` ` footprint ` ` must be defined . ` ` size ` ` gives the shape that
is taken from the input array , at every element position , to define the input to
the filter function . ` ` footprint ` ` is a boolean array that specifies ( implicitly )
a shape , but also which of the elements within this shape will get passed to the
filter function . Thus ` ` size = ( n , m ) ` ` is equivalent to
` ` footprint = np . ones ( ( n , m ) ) ` ` . We adjust ` ` size ` ` to the number of dimensions of
the input array , so that , if the input array is shape ( 10,10,10 ) , and ` ` size ` `
is 2 , then the actual size used is ( 2,2,2 ) .
output ndarray or dtype
The ` ` output ` ` parameter passes an array in which to store the filter output .
mode : { ' reflect ' , ' ignore ' , ' nearest ' , ' mirror ' , ' wrap ' }
The ` ` mode ` ` parameter determines how the array borders are handled . Default is ' ignore '
origin : number
The ` ` origin ` ` parameter controls the placement of the filter . Default 0.
mask : array _ like
A binary mask for the image .
Returns
local _ histogram : ndarray
The bin values of the local histograms for each voxel as a multi - dimensional image .""" | return _extract_feature ( _extract_local_histogram , image , mask , bins = bins , rang = rang , cutoffp = cutoffp , size = size , footprint = footprint , output = output , mode = mode , origin = origin ) |
def open ( self , filename ) : # type : ( str ) - > None
'''Open up an existing ISO for inspection and modification .
Parameters :
filename - The filename containing the ISO to open up .
Returns :
Nothing .''' | if self . _initialized :
raise pycdlibexception . PyCdlibInvalidInput ( 'This object already has an ISO; either close it or create a new object' )
fp = open ( filename , 'r+b' )
self . _managing_fp = True
try :
self . _open_fp ( fp )
except Exception :
fp . close ( )
raise |
def source_headers ( self ) :
"""" Returns the headers for the resource source . Specifically , does not include any header that is
the EMPTY _ SOURCE _ HEADER value of _ NONE _""" | t = self . schema_term
if t :
return [ self . _name_for_col_term ( c , i ) for i , c in enumerate ( t . children , 1 ) if c . term_is ( "Table.Column" ) and c . get_value ( 'name' ) != EMPTY_SOURCE_HEADER ]
else :
return None |
def from_payload ( self , payload ) :
"""Init frame from binary data .""" | self . node_id = payload [ 0 ]
self . name = bytes_to_string ( payload [ 1 : 65 ] ) |
def getRnaQuantificationSet ( self , id_ ) :
"""Returns the RnaQuantification set with the specified name , or raises
a RnaQuantificationSetNotFoundException otherwise .""" | if id_ not in self . _rnaQuantificationSetIdMap :
raise exceptions . RnaQuantificationSetNotFoundException ( id_ )
return self . _rnaQuantificationSetIdMap [ id_ ] |
def _init_action_list ( self , action_filename ) :
"""Parses the file and populates the data .""" | self . actions = list ( )
self . hiid_to_action_index = dict ( )
f = codecs . open ( action_filename , 'r' , encoding = 'latin-1' )
first_line = True
for line in f :
line = line . rstrip ( )
if first_line : # Ignore the first line
first_line = False
else :
self . actions . append ( GenewaysAction ( line ) )
latestInd = len ( self . actions ) - 1
hiid = self . actions [ latestInd ] . hiid
if hiid in self . hiid_to_action_index :
raise Exception ( 'action hiid not unique: %d' % hiid )
self . hiid_to_action_index [ hiid ] = latestInd |
def retry ( transport : 'UDPTransport' , messagedata : bytes , message_id : UDPMessageID , recipient : Address , stop_event : Event , timeout_backoff : Iterable [ int ] , ) -> bool :
"""Send messagedata until it ' s acknowledged .
Exit when :
- The message is delivered .
- Event _ stop is set .
- The iterator timeout _ backoff runs out .
Returns :
bool : True if the message was acknowledged , False otherwise .""" | async_result = transport . maybe_sendraw_with_result ( recipient , messagedata , message_id , )
event_quit = event_first_of ( async_result , stop_event , )
for timeout in timeout_backoff :
if event_quit . wait ( timeout = timeout ) is True :
break
log . debug ( 'retrying message' , node = pex ( transport . raiden . address ) , recipient = pex ( recipient ) , msgid = message_id , )
transport . maybe_sendraw_with_result ( recipient , messagedata , message_id , )
return async_result . ready ( ) |
def do_display ( self , arg ) :
"""display [ expression ]
Display the value of the expression if it changed , each time execution
stops in the current frame .
Without expression , list all display expressions for the current frame .""" | if not arg :
self . message ( 'Currently displaying:' )
for item in self . displaying . get ( self . curframe , { } ) . items ( ) :
self . message ( '%s: %s' % bdb . safe_repr ( item ) )
else :
val = self . _getval_except ( arg )
self . displaying . setdefault ( self . curframe , { } ) [ arg ] = val
self . message ( 'display %s: %s' % ( arg , bdb . safe_repr ( val ) ) ) |
def _param_toc_updated_cb ( self ) :
"""Called when the param TOC has been fully updated""" | logger . info ( 'Param TOC finished updating' )
self . connected_ts = datetime . datetime . now ( )
self . connected . call ( self . link_uri )
# Trigger the update for all the parameters
self . param . request_update_of_all_params ( ) |
def execute_sql ( self , sql , commit = False ) :
"""Log and then execute a SQL query""" | logger . info ( "Running sqlite query: \"%s\"" , sql )
self . connection . execute ( sql )
if commit :
self . connection . commit ( ) |
def SetModel ( self , loader ) :
"""Set our overall model ( a loader object ) and populate sub - controls""" | self . loader = loader
self . adapter , tree , rows = self . RootNode ( )
self . listControl . integrateRecords ( rows . values ( ) )
self . activated_node = tree
self . squareMap . SetModel ( tree , self . adapter )
self . RecordHistory ( ) |
def setting ( self , opt , val ) :
"""change an arbitrary synth setting , type - smart""" | opt = opt . encode ( )
if isinstance ( val , basestring ) :
fluid_settings_setstr ( self . settings , opt , val )
elif isinstance ( val , int ) :
fluid_settings_setint ( self . settings , opt , val )
elif isinstance ( val , float ) :
fluid_settings_setnum ( self . settings , opt , val ) |
def _handle_tag_definesceneandframelabeldata ( self ) :
"""Handle the DefineSceneAndFrameLabelData tag .""" | obj = _make_object ( "DefineSceneAndFrameLabelData" )
obj . SceneCount = self . _get_struct_encodedu32 ( )
for i in range ( 1 , obj . SceneCount + 1 ) :
setattr ( obj , 'Offset{}' . format ( i ) , self . _get_struct_encodedu32 ( ) )
setattr ( obj , 'Name{}' . format ( i ) , self . _get_struct_string ( ) )
obj . FrameLabelCount = self . _get_struct_encodedu32 ( )
for i in range ( 1 , obj . FrameLabelCount + 1 ) :
setattr ( obj , 'FrameNum{}' . format ( i ) , self . _get_struct_encodedu32 ( ) )
setattr ( obj , 'FrameLabel{}' . format ( i ) , self . _get_struct_string ( ) )
return obj |
def _update_from_database ( self ) :
"""Updates map to latest state in database .
Should be called prior to major object events to assure that an
assessment being taken on multiple devices are reasonably synchronized .""" | collection = JSONClientValidated ( 'assessment' , collection = 'AssessmentSection' , runtime = self . _runtime )
self . _my_map = collection . find_one ( { '_id' : self . _my_map [ '_id' ] } ) |
def print_err ( * args , end = '\n' ) :
"""Similar to print , but prints to stderr .""" | print ( * args , end = end , file = sys . stderr )
sys . stderr . flush ( ) |
def list_consumer_group_offsets ( self , group_id , group_coordinator_id = None , partitions = None ) :
"""Fetch Consumer Group Offsets .
Note :
This does not verify that the group _ id or partitions actually exist
in the cluster .
As soon as any error is encountered , it is immediately raised .
: param group _ id : The consumer group id name for which to fetch offsets .
: param group _ coordinator _ id : The node _ id of the group ' s coordinator
broker . If set to None , will query the cluster to find the group
coordinator . Explicitly specifying this can be useful to prevent
that extra network round trip if you already know the group
coordinator . Default : None .
: param partitions : A list of TopicPartitions for which to fetch
offsets . On brokers > = 0.10.2 , this can be set to None to fetch all
known offsets for the consumer group . Default : None .
: return dictionary : A dictionary with TopicPartition keys and
OffsetAndMetada values . Partitions that are not specified and for
which the group _ id does not have a recorded offset are omitted . An
offset value of ` - 1 ` indicates the group _ id has no offset for that
TopicPartition . A ` - 1 ` can only happen for partitions that are
explicitly specified .""" | group_offsets_listing = { }
if group_coordinator_id is None :
group_coordinator_id = self . _find_group_coordinator_id ( group_id )
version = self . _matching_api_version ( OffsetFetchRequest )
if version <= 3 :
if partitions is None :
if version <= 1 :
raise ValueError ( """OffsetFetchRequest_v{} requires specifying the
partitions for which to fetch offsets. Omitting the
partitions is only supported on brokers >= 0.10.2.
For details, see KIP-88.""" . format ( version ) )
topics_partitions = None
else : # transform from [ TopicPartition ( " t1 " , 1 ) , TopicPartition ( " t1 " , 2 ) ] to [ ( " t1 " , [ 1 , 2 ] ) ]
topics_partitions_dict = defaultdict ( set )
for topic , partition in partitions :
topics_partitions_dict [ topic ] . add ( partition )
topics_partitions = list ( six . iteritems ( topics_partitions_dict ) )
request = OffsetFetchRequest [ version ] ( group_id , topics_partitions )
response = self . _send_request_to_node ( group_coordinator_id , request )
if version > 1 : # OffsetFetchResponse _ v1 lacks a top - level error _ code
error_type = Errors . for_code ( response . error_code )
if error_type is not Errors . NoError : # optionally we could retry if error _ type . retriable
raise error_type ( "Request '{}' failed with response '{}'." . format ( request , response ) )
# transform response into a dictionary with TopicPartition keys and
# OffsetAndMetada values - - this is what the Java AdminClient returns
for topic , partitions in response . topics :
for partition , offset , metadata , error_code in partitions :
error_type = Errors . for_code ( error_code )
if error_type is not Errors . NoError :
raise error_type ( "Unable to fetch offsets for group_id {}, topic {}, partition {}" . format ( group_id , topic , partition ) )
group_offsets_listing [ TopicPartition ( topic , partition ) ] = OffsetAndMetadata ( offset , metadata )
else :
raise NotImplementedError ( "Support for OffsetFetch v{} has not yet been added to KafkaAdminClient." . format ( version ) )
return group_offsets_listing |
def unregisterDataItem ( self , path ) :
"""Unregisters a data item that has been previously registered with
the server ' s data store .
Inputs :
path - path to share folder
Example :
path = r " / fileShares / folder _ share "
print data . unregisterDataItem ( path )""" | url = self . _url + "/unregisterItem"
params = { "f" : "json" , "itempath" : path , "force" : "true" }
return self . _post ( url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) |
def start_heartbeat ( self ) :
"""Reset hearbeat timer""" | self . stop_heartbeat ( )
self . _heartbeat_timer = periodic . Callback ( self . _heartbeat , self . _heartbeat_interval , self . server . io_loop )
self . _heartbeat_timer . start ( ) |
def from_buffer ( string , config_path = None ) :
'''Detects MIME type of the buffered content
: param string : buffered content whose type needs to be detected
: return :''' | status , response = callServer ( 'put' , ServerEndpoint , '/detect/stream' , string , { 'Accept' : 'text/plain' } , False , config_path = config_path )
return response |
def get_attribute_stats ( cls , soup , key , data_type = str , unknown = None ) :
"""Get attribute for Beautifulsoup object
: param soup : Beautifulsoup object
: param key : attribute key
: param data _ type : Data type ( int , float , etc . . . )
: param unknown : attribute key not exists value ( default : None )
: return : ( data _ type ) attribute value""" | value = cls . get_attribute ( soup , key , unknown )
if value and value != unknown :
return data_type ( value )
return unknown |
def launch_thread ( self , name , fn , * args , ** kwargs ) :
"""Adds a named thread to the " thread pool " dictionary of Thread objects .
A daemon thread that executes the passed - in function ` fn ` with the
given args and keyword args is started and tracked in the ` thread _ pool `
attribute with the given ` name ` as the key .""" | logger . debug ( "Launching thread '%s': %s(%s, %s)" , name , fn , args , kwargs )
self . thread_pool [ name ] = threading . Thread ( target = fn , args = args , kwargs = kwargs )
self . thread_pool [ name ] . daemon = True
self . thread_pool [ name ] . start ( ) |
def parse ( self , tokens , debug = None ) :
"""This is the main entry point from outside .
Passing in a debug dictionary changes the default debug
setting .""" | self . tokens = tokens
if debug :
self . debug = debug
sets = [ [ ( 1 , 0 ) , ( 2 , 0 ) ] ]
self . links = { }
if self . ruleschanged :
self . computeNull ( )
self . newrules = { }
self . new2old = { }
self . makeNewRules ( )
self . ruleschanged = False
self . edges , self . cores = { } , { }
self . states = { 0 : self . makeState0 ( ) }
self . makeState ( 0 , self . _BOF )
for i in range ( len ( tokens ) ) :
sets . append ( [ ] )
if sets [ i ] == [ ] :
break
self . makeSet ( tokens , sets , i )
else :
sets . append ( [ ] )
self . makeSet ( None , sets , len ( tokens ) )
finalitem = ( self . finalState ( tokens ) , 0 )
if finalitem not in sets [ - 2 ] :
if len ( tokens ) > 0 :
if self . debug . get ( 'errorstack' , False ) :
self . errorstack ( tokens , i - 1 , str ( self . debug [ 'errorstack' ] ) == 'full' )
self . error ( tokens , i - 1 )
else :
self . error ( None , None )
if self . profile_info is not None :
self . dump_profile_info ( )
return self . buildTree ( self . _START , finalitem , tokens , len ( sets ) - 2 ) |
def _psplit ( self ) :
"""Split ` self ` at both north and south poles .
: return : A list of split StridedIntervals""" | nsplit_list = self . _nsplit ( )
psplit_list = [ ]
for si in nsplit_list :
psplit_list . extend ( si . _ssplit ( ) )
return psplit_list |
def simhash ( self , content ) :
"""Select policies for simhash on the different types of content .""" | if content is None :
self . hash = - 1
return
if isinstance ( content , str ) :
features = self . tokenizer_func ( content , self . keyword_weight_pari )
self . hash = self . build_from_features ( features )
elif isinstance ( content , collections . Iterable ) :
self . hash = self . build_from_features ( content )
elif isinstance ( content , int ) :
self . hash = content
else :
raise Exception ( "Unsupported parameter type %s" % type ( content ) ) |
def add_key_val ( keyname , keyval , keytype , filename , extnum ) :
"""Add / replace FITS key
Add / replace the key keyname with value keyval of type keytype in filename .
Parameters :
keyname : str
FITS Keyword name .
keyval : str
FITS keyword value .
keytype : str
FITS keyword type : int , float , str or bool .
filaname : str
FITS filename .
extnum : int
Extension number where the keyword will be inserted . Note that
the first extension is number 1 ( and not zero ) .""" | funtype = { 'int' : int , 'float' : float , 'str' : str , 'bool' : bool }
if keytype not in funtype :
raise ValueError ( 'Undefined keyword type: ' , keytype )
with fits . open ( filename , "update" ) as hdulist :
hdulist [ extnum ] . header [ keyname ] = funtype [ keytype ] ( keyval )
print ( '>>> Inserting ' + keyname + '=' + keyval + ' in ' + filename ) |
def post_operations ( self , mode = None ) :
"""Return post - operations only for the mode asked""" | version_mode = self . _get_version_mode ( mode = mode )
return version_mode . post_operations |
def get_hdrgo2usrgos ( self , hdrgos ) :
"""Return a subset of hdrgo2usrgos .""" | get_usrgos = self . hdrgo2usrgos . get
hdrgos_actual = self . get_hdrgos ( ) . intersection ( hdrgos )
return { h : get_usrgos ( h ) for h in hdrgos_actual } |
def list ( self , filters = None ) :
"""List model instances .
Currently this gets * everything * and iterates through all
possible pages in the API . This may be unsuitable for production
environments with huge databases , so finer grained page support
should likely be added at some point .
Args :
filters ( dict , optional ) : API query filters to apply to the
request . For example :
. . code - block : : python
{ ' name _ _ startswith ' : ' azure ' ,
' user _ _ in ' : [ 1 , 2 , 3 , 4 ] , }
See saltant ' s API reference at
https : / / saltant - org . github . io / saltant / for each model ' s
available filters .
Returns :
list :
A list of : class : ` saltant . models . resource . Model `
subclass instances ( for example , container task type
model instances ) .""" | # Add in the page and page _ size parameters to the filter , such
# that our request gets * all * objects in the list . However ,
# don ' t do this if the user has explicitly included these
# parameters in the filter .
if not filters :
filters = { }
if "page" not in filters :
filters [ "page" ] = 1
if "page_size" not in filters : # The below " magic number " is 2 ^ 63 - 1 , which is the largest
# number you can hold in a 64 bit integer . The main point
# here is that we want to get everything in one page ( unless
# otherwise specified , of course ) .
filters [ "page_size" ] = 9223372036854775807
# Form the request URL - first add in the query filters
query_filter_sub_url = ""
for idx , filter_param in enumerate ( filters ) : # Prepend ' ? ' or ' & '
if idx == 0 :
query_filter_sub_url += "?"
else :
query_filter_sub_url += "&"
# Add in the query filter
query_filter_sub_url += "{param}={val}" . format ( param = filter_param , val = filters [ filter_param ] )
# Stitch together all sub - urls
request_url = ( self . _client . base_api_url + self . list_url + query_filter_sub_url )
# Make the request
response = self . _client . session . get ( request_url )
# Validate that the request was successful
self . validate_request_success ( response_text = response . text , request_url = request_url , status_code = response . status_code , expected_status_code = HTTP_200_OK , )
# Return a list of model instances
return self . response_data_to_model_instances_list ( response . json ( ) ) |
def load_overrides ( introspection_module ) :
"""Loads overrides for an introspection module .
Either returns the same module again in case there are no overrides or a
proxy module including overrides . Doesn ' t cache the result .""" | namespace = introspection_module . __name__ . rsplit ( "." , 1 ) [ - 1 ]
module_keys = [ prefix + "." + namespace for prefix in const . PREFIX ]
# We use sys . modules so overrides can import from gi . repository
# but restore everything at the end so this doesn ' t have any side effects
for module_key in module_keys :
has_old = module_key in sys . modules
old_module = sys . modules . get ( module_key )
# Create a new sub type , so we can separate descriptors like
# _ DeprecatedAttribute for each namespace .
proxy_type = type ( namespace + "ProxyModule" , ( OverridesProxyModule , ) , { } )
proxy = proxy_type ( introspection_module )
for module_key in module_keys :
sys . modules [ module_key ] = proxy
try :
override_package_name = 'pgi.overrides.' + namespace
# http : / / bugs . python . org / issue14710
try :
override_loader = get_loader ( override_package_name )
except AttributeError :
override_loader = None
# Avoid checking for an ImportError , an override might
# depend on a missing module thus causing an ImportError
if override_loader is None :
return introspection_module
override_mod = importlib . import_module ( override_package_name )
finally :
for module_key in module_keys :
del sys . modules [ module_key ]
if has_old :
sys . modules [ module_key ] = old_module
override_all = [ ]
if hasattr ( override_mod , "__all__" ) :
override_all = override_mod . __all__
for var in override_all :
try :
item = getattr ( override_mod , var )
except ( AttributeError , TypeError ) : # Gedit puts a non - string in _ _ all _ _ , so catch TypeError here
continue
# make sure new classes have a proper _ _ module _ _
try :
if item . __module__ . split ( "." ) [ - 1 ] == namespace :
item . __module__ = namespace
except AttributeError :
pass
setattr ( proxy , var , item )
# Replace deprecated module level attributes with a descriptor
# which emits a warning when accessed .
for attr , replacement in _deprecated_attrs . pop ( namespace , [ ] ) :
try :
value = getattr ( proxy , attr )
except AttributeError :
raise AssertionError ( "%s was set deprecated but wasn't added to __all__" % attr )
delattr ( proxy , attr )
deprecated_attr = _DeprecatedAttribute ( namespace , attr , value , replacement )
setattr ( proxy_type , attr , deprecated_attr )
return proxy |
def cli ( ctx , hpo_term , check_terms , output , p_value_limit , verbose , username , password , to_json ) :
"Give hpo terms either on the form ' HP : 0001623 ' , or ' 0001623'" | loglevel = LEVELS . get ( min ( verbose , 3 ) )
configure_stream ( level = loglevel )
if not hpo_term :
logger . info ( "Please specify at least one hpo term with '-t/--hpo_term'." )
ctx . abort ( )
if not ( username and password ) :
logger . info ( "Please specify username with -u and password with -p." )
logger . info ( "Contact sebastian.koehler@charite.de." )
ctx . abort ( )
hpo_list = [ ]
for term in hpo_term :
if len ( term . split ( ':' ) ) < 2 :
term = ':' . join ( [ 'HP' , term ] )
hpo_list . append ( term )
logger . info ( "HPO terms used: {0}" . format ( ',' . join ( hpo_list ) ) )
if check_terms :
for term in hpo_list :
try :
if not validate_term ( username , password , term ) :
logger . info ( "HPO term : {0} does not exist" . format ( term ) )
else :
logger . info ( "HPO term : {0} does exist!" . format ( term ) )
except RuntimeError as err :
click . echo ( err )
ctx . abort ( )
ctx . abort ( )
else :
try :
for result in query ( username , password , * hpo_list ) :
if to_json :
click . echo ( json . dumps ( result ) )
else :
print_string = "{0}\t{1}:{2}\t{3}\t{4}" . format ( result [ 'p_value' ] , result [ 'disease_source' ] , result [ 'disease_nr' ] , result [ 'description' ] , ',' . join ( result [ 'gene_symbols' ] ) )
p_value = result [ 'p_value' ]
if p_value <= p_value_limit :
click . echo ( print_string )
except RuntimeError as e :
click . echo ( e )
ctx . abort ( ) |
def embedded_tweet ( self ) :
"""Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object
Returns :
Tweet ( or None , if the Tweet is neither a quote tweet or a Retweet ) :
a Tweet representing the quote Tweet or the Retweet
( see tweet _ embeds . get _ embedded _ tweet , this is that value as a Tweet )
Raises :
NotATweetError : if embedded tweet is malformed""" | embedded_tweet = tweet_embeds . get_embedded_tweet ( self )
if embedded_tweet is not None :
try :
return Tweet ( embedded_tweet )
except NotATweetError as nate :
raise ( NotATweetError ( "The embedded tweet payload {} appears malformed." + " Failed with '{}'" . format ( embedded_tweet , nate ) ) )
else :
return None |
def _merge_with_defaults ( params ) :
"""Performs a 2 - level deep merge of params with _ default _ params with corrent
merging of params for each mark .
This is a bit complicated since params [ ' marks ' ] is a list and we need to
make sure each mark gets the default params .""" | marks_params = [ tz . merge ( default , param ) for default , param in zip ( itertools . repeat ( _default_params [ 'marks' ] ) , params [ 'marks' ] ) ] if 'marks' in params else [ _default_params [ 'marks' ] ]
merged_without_marks = tz . merge_with ( tz . merge , tz . dissoc ( _default_params , 'marks' ) , tz . dissoc ( params , 'marks' ) )
return tz . merge ( merged_without_marks , { 'marks' : marks_params } ) |
def _request_helper ( self , url , request_body ) :
"""A helper method to assist in making a request and provide a parsed
response .""" | response = None
# pylint : disable = try - except - raise
try :
response = self . session . post ( url , data = request_body , ** self . request_defaults )
# We expect utf - 8 from the server
response . encoding = 'UTF-8'
# update / set any cookies
if self . _cookiejar is not None :
for cookie in response . cookies :
self . _cookiejar . set_cookie ( cookie )
if self . _cookiejar . filename is not None : # Save is required only if we have a filename
self . _cookiejar . save ( )
response . raise_for_status ( )
return self . parse_response ( response )
except requests . RequestException as e :
if not response :
raise
raise ProtocolError ( url , response . status_code , str ( e ) , response . headers )
except Fault :
raise
except Exception :
e = BugzillaError ( str ( sys . exc_info ( ) [ 1 ] ) )
# pylint : disable = attribute - defined - outside - init
e . __traceback__ = sys . exc_info ( ) [ 2 ]
# pylint : enable = attribute - defined - outside - init
raise e |
def learn ( self , sentence_list , token_master_list , hidden_neuron_count = 1000 , training_count = 1 , batch_size = 100 , learning_rate = 1e-03 , seq_len = 5 ) :
'''Init .
Args :
sentence _ list : The ` list ` of sentences .
token _ master _ list : Unique ` list ` of tokens .
hidden _ neuron _ count : The number of units in hidden layer .
training _ count : The number of training .
bath _ size : Batch size of Mini - batch .
learning _ rate : Learning rate .
seq _ len : The length of one sequence .''' | observed_arr = self . __setup_dataset ( sentence_list , token_master_list , seq_len )
visible_num = observed_arr . shape [ - 1 ]
# ` Builder ` in ` Builder Pattern ` for LSTM - RTRBM .
rnnrbm_builder = LSTMRTRBMSimpleBuilder ( )
# Learning rate .
rnnrbm_builder . learning_rate = learning_rate
# Set units in visible layer .
rnnrbm_builder . visible_neuron_part ( LogisticFunction ( ) , visible_num )
# Set units in hidden layer .
rnnrbm_builder . hidden_neuron_part ( LogisticFunction ( ) , hidden_neuron_count )
# Set units in RNN layer .
rnnrbm_builder . rnn_neuron_part ( TanhFunction ( ) )
# Set graph and approximation function , delegating ` SGD ` which is - a ` OptParams ` .
rnnrbm_builder . graph_part ( LSTMRTRBMCD ( opt_params = SGD ( ) ) )
# Building .
rbm = rnnrbm_builder . get_result ( )
# Learning .
rbm . learn ( # The ` np . ndarray ` of observed data points .
observed_arr , # Training count .
training_count = training_count , # Batch size .
batch_size = batch_size )
self . __rbm = rbm
self . __token_master_list = token_master_list
self . __seq_len = seq_len |
def save_task ( task , broker ) :
"""Saves the task package to Django or the cache""" | # SAVE LIMIT < 0 : Don ' t save success
if not task . get ( 'save' , Conf . SAVE_LIMIT >= 0 ) and task [ 'success' ] :
return
# enqueues next in a chain
if task . get ( 'chain' , None ) :
django_q . tasks . async_chain ( task [ 'chain' ] , group = task [ 'group' ] , cached = task [ 'cached' ] , sync = task [ 'sync' ] , broker = broker )
# SAVE LIMIT > 0 : Prune database , SAVE _ LIMIT 0 : No pruning
db . close_old_connections ( )
try :
if task [ 'success' ] and 0 < Conf . SAVE_LIMIT <= Success . objects . count ( ) :
Success . objects . last ( ) . delete ( )
# check if this task has previous results
if Task . objects . filter ( id = task [ 'id' ] , name = task [ 'name' ] ) . exists ( ) :
existing_task = Task . objects . get ( id = task [ 'id' ] , name = task [ 'name' ] )
# only update the result if it hasn ' t succeeded yet
if not existing_task . success :
existing_task . stopped = task [ 'stopped' ]
existing_task . result = task [ 'result' ]
existing_task . success = task [ 'success' ]
existing_task . save ( )
else :
Task . objects . create ( id = task [ 'id' ] , name = task [ 'name' ] , func = task [ 'func' ] , hook = task . get ( 'hook' ) , args = task [ 'args' ] , kwargs = task [ 'kwargs' ] , started = task [ 'started' ] , stopped = task [ 'stopped' ] , result = task [ 'result' ] , group = task . get ( 'group' ) , success = task [ 'success' ] )
except Exception as e :
logger . error ( e ) |
def is_email ( self , address , diagnose = False ) :
"""Check that an address address conforms to RFCs 5321 , 5322 and others .
More specifically , see the follow RFCs :
* http : / / tools . ietf . org / html / rfc5321
* http : / / tools . ietf . org / html / rfc5322
* http : / / tools . ietf . org / html / rfc4291 # section - 2.2
* http : / / tools . ietf . org / html / rfc1123 # section - 2.1
* http : / / tools . ietf . org / html / rfc3696 ) ( guidance only )
Keyword arguments :
address - - address to check .
diagnose - - flag to report a diagnosis or a boolean ( default False )""" | threshold = BaseDiagnosis . CATEGORIES [ 'VALID' ]
return_status = [ ValidDiagnosis ( ) ]
parse_data = { }
# Parse the address into components , character by character
raw_length = len ( address )
context = Context . LOCALPART
# Where we are
context_stack = [ context ]
# Where we ' ve been
context_prior = Context . LOCALPART
# Where we just came from
token = ''
# The current character
token_prior = ''
# The previous character
parse_data [ Context . LOCALPART ] = ''
# The address ' components
parse_data [ Context . DOMAIN ] = ''
atom_list = { Context . LOCALPART : [ '' ] , Context . DOMAIN : [ '' ] }
# The address ' dot - atoms
element_count = 0
element_len = 0
hyphen_flag = False
# Hyphen cannot occur at the end of a subdomain
end_or_die = False
# CFWS can only appear at the end of an element
skip = False
# Skip flag that simulates i + +
crlf_count = - 1
# crlf _ count = - 1 = = ! isset ( crlf _ count )
for i in _range ( raw_length ) : # Skip simulates the use of + + operator
if skip :
skip = False
continue
token = address [ i ]
token = to_char ( token )
# Switch to simulate decrementing ; needed for FWS
repeat = True
while repeat :
repeat = False
# Local part
if context == Context . LOCALPART : # http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1
# local - part = dot - atom / quoted - string /
# obs - local - part
# dot - atom = [ CFWS ] dot - atom - text [ CFWS ]
# dot - atom - text = 1 * atext * ( " . " 1 * atext )
# quoted - string = [ CFWS ]
# DQUOTE * ( [ FWS ] qcontent ) [ FWS ] DQUOTE
# [ CFWS ]
# obs - local - part = word * ( " . " word )
# word = atom / quoted - string
# atom = [ CFWS ] 1 * atext [ CFWS ]
if token == Char . OPENPARENTHESIS :
if element_len == 0 : # Comments are OK at the beginning of an element
if element_count == 0 :
return_status . append ( CFWSDiagnosis ( 'COMMENT' ) )
else :
return_status . append ( DeprecatedDiagnosis ( 'COMMENT' ) )
else :
return_status . append ( CFWSDiagnosis ( 'COMMENT' ) )
# We can ' t start a comment in the middle of an
# element , so this better be the end
end_or_die = True
context_stack . append ( context )
context = Context . COMMENT
elif token == Char . DOT :
if element_len == 0 : # Another dot , already ? Fatal error
if element_count == 0 :
return_status . append ( InvalidDiagnosis ( 'DOT_START' ) )
else :
return_status . append ( InvalidDiagnosis ( 'CONSECUTIVEDOTS' ) )
else : # The entire local - part can be a quoted string for
# RFC 5321 . If it ' s just one atom that is quoted
# then it ' s an RFC 5322 obsolete form
if end_or_die :
return_status . append ( DeprecatedDiagnosis ( 'LOCALPART' ) )
# CFWS & quoted strings are OK again now we ' re at
# the beginning of an element ( although they are
# obsolete forms )
end_or_die = False
element_len = 0
element_count += 1
parse_data [ Context . LOCALPART ] += token
atom_list [ Context . LOCALPART ] . append ( '' )
elif token == Char . DQUOTE :
if element_len == 0 : # The entire local - part can be a quoted string for
# RFC 5321 . If it ' s just one atom that is quoted
# then it ' s an RFC 5322 obsolete form
if element_count == 0 :
return_status . append ( RFC5321Diagnosis ( 'QUOTEDSTRING' ) )
else :
return_status . append ( DeprecatedDiagnosis ( 'LOCALPART' ) )
parse_data [ Context . LOCALPART ] += token
atom_list [ Context . LOCALPART ] [ element_count ] += token
element_len += 1
end_or_die = True
context_stack . append ( context )
context = Context . QUOTEDSTRING
else : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_ATEXT' ) )
# Folding White Space ( FWS )
elif token in [ Char . CR , Char . SP , Char . HTAB ] : # Skip simulates the use of + + operator if the latter
# check doesn ' t short - circuit
if token == Char . CR :
skip = True
if ( i + 1 == raw_length or to_char ( address [ i + 1 ] ) != Char . LF ) :
return_status . append ( InvalidDiagnosis ( 'CR_NO_LF' ) )
break
if element_len == 0 :
if element_count == 0 :
return_status . append ( CFWSDiagnosis ( 'FWS' ) )
else :
return_status . append ( DeprecatedDiagnosis ( 'FWS' ) )
else : # We can ' t start FWS in the middle of an element ,
# so this better be the end
end_or_die = True
context_stack . append ( context )
context = Context . FWS
token_prior = token
elif token == Char . AT : # At this point we should have a valid local - part
if len ( context_stack ) != 1 : # pragma : no cover
if diagnose :
return InvalidDiagnosis ( 'BAD_PARSE' )
else :
return False
if parse_data [ Context . LOCALPART ] == '' : # Fatal error
return_status . append ( InvalidDiagnosis ( 'NOLOCALPART' ) )
elif element_len == 0 : # Fatal error
return_status . append ( InvalidDiagnosis ( 'DOT_END' ) )
# http : / / tools . ietf . org / html / rfc5321 # section - 4.5.3.1.1
# The maximum total length of a user name or other
# local - part is 64 octets .
elif len ( parse_data [ Context . LOCALPART ] ) > 64 :
return_status . append ( RFC5322Diagnosis ( 'LOCAL_TOOLONG' ) )
# http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1
# Comments and folding white space
# SHOULD NOT be used around the " @ " in the addr - spec .
# http : / / tools . ietf . org / html / rfc2119
# 4 . SHOULD NOT This phrase , or the phrase " NOT
# RECOMMENDED " mean that there may exist valid
# reasons in particular circumstances when the
# particular behavior is acceptable or even useful ,
# but the full implications should be understood and
# the case carefully weighed before implementing any
# behavior described with this label .
elif context_prior in [ Context . COMMENT , Context . FWS ] :
return_status . append ( DeprecatedDiagnosis ( 'CFWS_NEAR_AT' ) )
# Clear everything down for the domain parsing
context = Context . DOMAIN
context_stack = [ ]
element_count = 0
element_len = 0
# CFWS can only appear at the end of the element
end_or_die = False
# atext
else : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.3
# atext = ALPHA / DIGIT / ; Printable US - ASCII
# " ! " / " # " / ; characters not
# " $ " / " % " / ; including specials .
# " & " / " ' " / ; Used for atoms .
if end_or_die : # We have encountered atext where it is no longer
# valid
if context_prior in [ Context . COMMENT , Context . FWS ] :
return_status . append ( InvalidDiagnosis ( 'ATEXT_AFTER_CFWS' ) )
elif context_prior == Context . QUOTEDSTRING :
return_status . append ( InvalidDiagnosis ( 'ATEXT_AFTER_QS' ) )
else : # pragma : no cover
if diagnose :
return InvalidDiagnosis ( 'BAD_PARSE' )
else :
return False
else :
context_prior = context
o = ord ( token )
if ( o < 33 or o > 126 or o == 10 or token in Char . SPECIALS ) :
return_status . append ( InvalidDiagnosis ( 'EXPECTING_ATEXT' ) )
parse_data [ Context . LOCALPART ] += token
atom_list [ Context . LOCALPART ] [ element_count ] += token
element_len += 1
# Domain
elif context == Context . DOMAIN : # http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1
# domain = dot - atom / domain - literal / obs - domain
# dot - atom = [ CFWS ] dot - atom - text [ CFWS ]
# dot - atom - text = 1 * atext * ( " . " 1 * atext )
# domain - literal = [ CFWS ]
# " [ " * ( [ FWS ] dtext ) [ FWS ] " ] "
# [ CFWS ]
# dtext = % d33-90 / ; Printable US - ASCII
# % d94-126 / ; characters not
# obs - dtext ; including [ , ] , or \
# obs - domain = atom * ( " . " atom )
# atom = [ CFWS ] 1 * atext [ CFWS ]
# http : / / tools . ietf . org / html / rfc5321 # section - 4.1.2
# Mailbox = Local - part
# ( Domain / address - literal )
# Domain = sub - domain * ( " . " sub - domain )
# address - literal = " [ " ( IPv4 - address - literal /
# IPv6 - address - literal /
# General - address - literal ) " ] "
# ; See Section 4.1.3
# http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1
# Note : A liberal syntax for the domain portion of
# addr - spec is given here . However , the domain portion
# contains addressing information specified by and
# used in other protocols ( e . g . , RFC 1034 , RFC 1035,
# RFC 1123 , RFC5321 ) . It is therefore incumbent upon
# implementations to conform to the syntax of
# addresse for the context in which they are used .
# is _ email ( ) author ' s note : it ' s not clear how to interpret
# this in the context of a general address address
# validator . The conclusion I have reached is this :
# " addressing information " must comply with RFC 5321 ( and
# in turn RFC 1035 ) , anything that is " semantically
# invisible " must comply only with RFC 5322.
# Comment
if token == Char . OPENPARENTHESIS :
if element_len == 0 : # Comments at the start of the domain are
# deprecated in the text
# Comments at the start of a subdomain are
# obs - domain
# ( http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1)
if element_count == 0 :
return_status . append ( DeprecatedDiagnosis ( 'CFWS_NEAR_AT' ) )
else :
return_status . append ( DeprecatedDiagnosis ( 'COMMENT' ) )
else :
return_status . append ( CFWSDiagnosis ( 'COMMENT' ) )
# We can ' t start a comment in the middle of an
# element , so this better be the end
end_or_die = True
context_stack . append ( context )
context = Context . COMMENT
# Next dot - atom element
elif token == Char . DOT :
if element_len == 0 : # Another dot , already ? Fatal error
if element_count == 0 :
return_status . append ( InvalidDiagnosis ( 'DOT_START' ) )
else :
return_status . append ( InvalidDiagnosis ( 'CONSECUTIVEDOTS' ) )
elif hyphen_flag : # Previous subdomain ended in a hyphen . Fatal error
return_status . append ( InvalidDiagnosis ( 'DOMAINHYPHENEND' ) )
else : # Nowhere in RFC 5321 does it say explicitly that
# the domain part of a Mailbox must be a valid
# domain according to the DNS standards set out in
# RFC 1035 , but this * is * implied in several
# places . For instance , wherever the idea of host
# routing is discussed the RFC says that the domain
# must be looked up in the DNS . This would be
# nonsense unless the domain was designed to be a
# valid DNS domain . Hence we must conclude that the
# RFC 1035 restriction on label length also applies
# to RFC 5321 domains .
# http : / / tools . ietf . org / html / rfc1035 # section - 2.3.4
# labels 63 octets or less
if element_len > 63 :
return_status . append ( RFC5322Diagnosis ( 'LABEL_TOOLONG' ) )
# CFWS is OK again now we ' re at the beginning of an
# element ( although it may be obsolete CFWS )
end_or_die = False
element_len = 0
element_count += 1
atom_list [ Context . DOMAIN ] . append ( '' )
parse_data [ Context . DOMAIN ] += token
# Domain literal
elif token == Char . OPENSQBRACKET :
if parse_data [ Context . DOMAIN ] == '' : # Domain literal must be the only component
end_or_die = True
element_len += 1
context_stack . append ( context )
context = Context . LITERAL
parse_data [ Context . DOMAIN ] += token
atom_list [ Context . DOMAIN ] [ element_count ] += token
parse_data [ 'literal' ] = ''
else : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_ATEXT' ) )
# Folding White Space ( FWS )
elif token in [ Char . CR , Char . SP , Char . HTAB ] : # Skip simulates the use of + + operator if the latter
# check doesn ' t short - circuit
if token == Char . CR :
skip = True
if i + 1 == raw_length or ( to_char ( address [ i + 1 ] ) != Char . LF ) : # Fatal error
return_status . append ( InvalidDiagnosis ( 'CR_NO_LF' ) )
break
if element_len == 0 :
if element_count == 0 :
return_status . append ( DeprecatedDiagnosis ( 'CFWS_NEAR_AT' ) )
else :
return_status . append ( DeprecatedDiagnosis ( 'FWS' ) )
else :
return_status . append ( CFWSDiagnosis ( 'FWS' ) )
# We can ' t start FWS in the middle of an element ,
# so this better be the end
end_or_die = True
context_stack . append ( context )
context = Context . FWS
token_prior = token
# atext
else : # RFC 5322 allows any atext . . .
# http : / / tools . ietf . org / html / rfc5322 # section - 3.2.3
# atext = ALPHA / DIGIT / ; Printable US - ASCII
# " ! " / " # " / ; characters not
# " $ " / " % " / ; including specials .
# " & " / " ' " / ; Used for atoms .
# But RFC 5321 only allows letter - digit - hyphen to
# comply with DNS rules ( RFCs 1034 & 1123)
# http : / / tools . ietf . org / html / rfc5321 # section - 4.1.2
# sub - domain = Let - dig [ Ldh - str ]
# Let - dig = ALPHA / DIGIT
# Ldh - str = * ( ALPHA / DIGIT / " - " ) Let - dig
if end_or_die : # We have encountered atext where it is no longer
# valid
if context_prior in [ Context . COMMENT , Context . FWS ] :
return_status . append ( InvalidDiagnosis ( 'ATEXT_AFTER_CFWS' ) )
elif context_prior == Context . LITERAL :
return_status . append ( InvalidDiagnosis ( 'ATEXT_AFTER_DOMLIT' ) )
else : # pragma : no cover
if diagnose :
return InvalidDiagnosis ( 'BAD_PARSE' )
else :
return False
o = ord ( token )
# Assume this token isn ' t a hyphen unless we discover
# it is
hyphen_flag = False
if o < 33 or o > 126 or token in Char . SPECIALS : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_ATEXT' ) )
elif token == Char . HYPHEN :
if element_len == 0 : # Hyphens can ' t be at the beginning of a
# subdomain
# Fatal error
return_status . append ( InvalidDiagnosis ( 'DOMAINHYPHENSTART' ) )
hyphen_flag = True
elif not ( 47 < o < 58 or 64 < o < 91 or 96 < o < 123 ) : # Not an RFC 5321 subdomain , but still OK by RFC
# 5322
return_status . append ( RFC5322Diagnosis ( 'DOMAIN' ) )
parse_data [ Context . DOMAIN ] += token
atom_list [ Context . DOMAIN ] [ element_count ] += token
element_len += 1
# Domain literal
elif context == Context . LITERAL : # http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1
# domain - literal = [ CFWS ]
# " [ " * ( [ FWS ] dtext ) [ FWS ] " ] "
# [ CFWS ]
# dtext = % d33-90 / ; Printable US - ASCII
# % d94-126 / ; characters not
# obs - dtext ; including [ , ] , or \
# obs - dtext = obs - NO - WS - CTL / quoted - pair
# End of domain literal
if token == Char . CLOSESQBRACKET :
if ( max ( return_status ) < BaseDiagnosis . CATEGORIES [ 'DEPREC' ] ) : # Could be a valid RFC 5321 address literal , so
# let ' s check
# http : / / tools . ietf . org / html / rfc5321 # section - 4.1.2
# address - literal = " [ " ( IPv4 - address - literal /
# IPv6 - address - literal /
# General - address - literal ) " ] "
# ; See Section 4.1.3
# http : / / tools . ietf . org / html / rfc5321 # section - 4.1.3
# IPv4 - address - literal = Snum 3 ( " . " Snum )
# IPv6 - address - literal = " IPv6 : " IPv6 - addr
# General - address - literal = Standardized - tag " : "
# 1 * dcontent
# Standardized - tag = Ldh - str
# ; Standardized - tag MUST be
# ; specified in a
# ; Standards - Track RFC and
# ; registered with IANA
# dcontent = % d33-90 / ; Printable US - ASCII
# % d94-126 ; excl . " [ " , " \ " , " ] "
# Snum = 1*3DIGIT
# ; representing a decimal integer
# ; value in the range 0-255
# IPv6 - addr = IPv6 - full / IPv6 - comp /
# IPv6v4 - full / IPv6v4 - comp
# IPv6 - hex = 1*4HEXDIG
# IPv6 - full = IPv6 - hex 7 ( " : " IPv6 - hex )
# IPv6 - comp = [ IPv6 - hex * 5 ( " : " IPv6 - hex ) ]
# [ IPv6 - hex * 5 ( " : " IPv6 - hex ) ]
# ; The " : : " represents at least 2
# ; 16 - bit groups of zeros . No more
# ; than 6 groups in addition to
# ; the " : : " may be present .
# IPv6v4 - full = IPv6 - hex 5 ( " : " IPv6 - hex ) " : "
# IPv4 - address - literal
# IPv6v4 - comp = [ IPv6 - hex * 3 ( " : " IPv6 - hex ) ]
# [ IPv6 - hex * 3 ( " : " IPv6 - hex ) " : " ]
# IPv4 - address - literal
# ; The " : : " represents at least 2
# ; 16 - bit groups of zeros . No more
# ; than 4 groups in addition to
# ; the " : : " and
# ; IPv4 - address - literal may be
# ; present .
max_groups = 8
index = False
address_literal = parse_data [ 'literal' ]
# Extract IPv4 part from the end of the
# address - literal ( if there is one )
regex = ( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)" r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" )
match_ip = re . search ( regex , address_literal )
if match_ip :
index = address_literal . rfind ( match_ip . group ( 0 ) )
if index != 0 : # Convert IPv4 part to IPv6 format for
# further testing
address_literal = ( address_literal [ 0 : index ] + '0:0' )
if index == 0 and index is not False : # Nothing there except a valid IPv4 address
return_status . append ( RFC5321Diagnosis ( 'ADDRESSLITERAL' ) )
elif not address_literal . startswith ( Char . IPV6TAG ) :
return_status . append ( RFC5322Diagnosis ( 'DOMAINLITERAL' ) )
else :
ipv6 = address_literal [ 5 : ]
# Revision 2.7 : Daniel Marschall ' s new IPv6
# testing strategy
match_ip = ipv6 . split ( Char . COLON )
grp_count = len ( match_ip )
index = ipv6 . find ( Char . DOUBLECOLON )
if index == - 1 : # We need exactly the right number of
# groups
if grp_count != max_groups :
return_status . append ( RFC5322Diagnosis ( 'IPV6_GRPCOUNT' ) )
else :
if index != ipv6 . rfind ( Char . DOUBLECOLON ) :
return_status . append ( RFC5322Diagnosis ( 'IPV6_2X2XCOLON' ) )
else :
if index in [ 0 , len ( ipv6 ) - 2 ] : # RFC 4291 allows : : at the start
# or end of an address with 7 other
# groups in addition
max_groups += 1
if grp_count > max_groups :
return_status . append ( RFC5322Diagnosis ( 'IPV6_MAXGRPS' ) )
elif grp_count == max_groups : # Eliding a single " : : "
return_status . append ( RFC5321Diagnosis ( 'IPV6DEPRECATED' ) )
# Revision 2.7 : Daniel Marschall ' s new IPv6
# testing strategy
if ( ipv6 [ 0 ] == Char . COLON and ipv6 [ 1 ] != Char . COLON ) : # Address starts with a single colon
return_status . append ( RFC5322Diagnosis ( 'IPV6_COLONSTRT' ) )
elif ( ipv6 [ - 1 ] == Char . COLON and ipv6 [ - 2 ] != Char . COLON ) : # Address ends with a single colon
return_status . append ( RFC5322Diagnosis ( 'IPV6_COLONEND' ) )
elif ( [ re . match ( r"^[0-9A-Fa-f]{0,4}$" , i ) for i in match_ip ] . count ( None ) != 0 ) : # Check for unmatched characters
return_status . append ( RFC5322Diagnosis ( 'IPV6_BADCHAR' ) )
else :
return_status . append ( RFC5321Diagnosis ( 'ADDRESSLITERAL' ) )
else :
return_status . append ( RFC5322Diagnosis ( 'DOMAINLITERAL' ) )
parse_data [ Context . DOMAIN ] += token
atom_list [ Context . DOMAIN ] [ element_count ] += token
element_len += 1
context_prior = context
context = context_stack . pop ( )
elif token == Char . BACKSLASH :
return_status . append ( RFC5322Diagnosis ( 'DOMLIT_OBSDTEXT' ) )
context_stack . append ( context )
context = Context . QUOTEDPAIR
# Folding White Space ( FWS )
elif token in [ Char . CR , Char . SP , Char . HTAB ] : # Skip simulates the use of + + operator if the latter
# check doesn ' t short - circuit
if token == Char . CR :
skip = True
if ( i + 1 == raw_length or to_char ( address [ i + 1 ] ) != Char . LF ) :
return_status . append ( InvalidDiagnosis ( 'CR_NO_LF' ) )
break
return_status . append ( CFWSDiagnosis ( 'FWS' ) )
context_stack . append ( context )
context = Context . FWS
token_prior = token
# dtext
else : # http : / / tools . ietf . org / html / rfc5322 # section - 3.4.1
# dtext = % d33-90 / ; Printable US - ASCII
# % d94-126 / ; characters not
# obs - dtext ; including [ , ] , or \
# obs - dtext = obs - NO - WS - CTL / quoted - pair
# obs - NO - WS - CTL = % d1-8 / ; US - ASCII control
# % d11 / ; characters that do
# % d12 / ; not include the
# % d14-31 / ; carriage return , line
# % d127 ; feed , and white space
# ; characters
o = ord ( token )
# CR , LF , SP & HTAB have already been parsed above
if o > 127 or o == 0 or token == Char . OPENSQBRACKET : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_DTEXT' ) )
break
elif o < 33 or o == 127 :
return_status . append ( RFC5322Diagnosis ( 'DOMLIT_OBSDTEXT' ) )
parse_data [ 'literal' ] += token
parse_data [ Context . DOMAIN ] += token
atom_list [ Context . DOMAIN ] [ element_count ] += token
element_len += 1
# Quoted string
elif context == Context . QUOTEDSTRING : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.4
# quoted - string = [ CFWS ]
# DQUOTE * ( [ FWS ] qcontent ) [ FWS ] DQUOTE
# [ CFWS ]
# qcontent = qtext / quoted - pair
# Quoted pair
if token == Char . BACKSLASH :
context_stack . append ( context )
context = Context . QUOTEDPAIR
# Folding White Space ( FWS )
# Inside a quoted string , spaces are allow as regular
# characters . It ' s only FWS if we include HTAB or CRLF
elif token in [ Char . CR , Char . HTAB ] : # Skip simulates the use of + + operator if the latter
# check doesn ' t short - circuit
if token == Char . CR :
skip = True
if ( i + 1 == raw_length or to_char ( address [ i + 1 ] ) != Char . LF ) :
return_status . append ( InvalidDiagnosis ( 'CR_NO_LF' ) )
break
# http : / / tools . ietf . org / html / rfc5322 # section - 3.2.2
# Runs of FWS , comment , or CFWS that occur between
# lexical tokens in a structured header field are
# semantically interpreted as a single space
# character .
# http : / / tools . ietf . org / html / rfc5322 # section - 3.2.4
# the CRLF in any FWS / CFWS that appears within the
# quoted string [ is ] semantically " invisible " and
# therefore not part of the quoted - string
parse_data [ Context . LOCALPART ] += Char . SP
atom_list [ Context . LOCALPART ] [ element_count ] += Char . SP
element_len += 1
return_status . append ( CFWSDiagnosis ( 'FWS' ) )
context_stack . append ( context )
context = Context . FWS
token_prior = token
# End of quoted string
elif token == Char . DQUOTE :
parse_data [ Context . LOCALPART ] += token
atom_list [ Context . LOCALPART ] [ element_count ] += token
element_len += 1
context_prior = context
context = context_stack . pop ( )
# qtext
else : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.4
# qtext = % d33 / ; Printable US - ASCII
# % d35-91 / ; characters not
# % d93-126 / ; including " \ " or
# obs - qtext ; the quote
# ; character
# obs - qtext = obs - NO - WS - CTL
# obs - NO - WS - CTL = % d1-8 / ; US - ASCII control
# % d11 / ; characters that do
# % d12 / ; not include the CR ,
# % d14-31 / ; LF , and white space
# % d127 ; characters
o = ord ( token )
if o > 127 or o == 0 or o == 10 : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_QTEXT' ) )
elif o < 32 or o == 127 :
return_status . append ( DeprecatedDiagnosis ( 'QTEXT' ) )
parse_data [ Context . LOCALPART ] += token
atom_list [ Context . LOCALPART ] [ element_count ] += token
element_len += 1
# Quoted pair
elif context == Context . QUOTEDPAIR : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.1
# quoted - pair = ( " \ " ( VCHAR / WSP ) ) / obs - qp
# VCHAR = % d33-126 ; visible ( printing )
# ; characters
# WSP = SP / HTAB ; white space
# obs - qp = " \ " ( % d0 / obs - NO - WS - CTL / LF / CR )
# obs - NO - WS - CTL = % d1-8 / ; US - ASCII control
# % d11 / ; characters that do not
# % d12 / ; include the carriage
# % d14-31 / ; return , line feed , and
# % d127 ; white space characters
# i . e . obs - qp = " \ " ( % d0-8 , % d10-31 / % d127)
o = ord ( token )
if o > 127 : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_QPAIR' ) )
elif ( o < 31 and o != 9 ) or o == 127 : # SP & HTAB are allowed
return_status . append ( DeprecatedDiagnosis ( 'QP' ) )
# At this point we know where this qpair occurred so
# we could check to see if the character actually
# needed to be quoted at all .
# http : / / tools . ietf . org / html / rfc5321 # section - 4.1.2
# the sending system SHOULD transmit the
# form that uses the minimum quoting possible .
context_prior = context
context = context_stack . pop ( )
# End of qpair
token = Char . BACKSLASH + token
if context == Context . COMMENT :
pass
elif context == Context . QUOTEDSTRING :
parse_data [ Context . LOCALPART ] += token
atom_list [ Context . LOCALPART ] [ element_count ] += token
# The maximum sizes specified by RFC 5321 are octet
# counts , so we must include the backslash
element_len += 2
elif context == Context . LITERAL :
parse_data [ Context . DOMAIN ] += token
atom_list [ Context . DOMAIN ] [ element_count ] += token
# The maximum sizes specified by RFC 5321 are octet
# counts , so we must include the backslash
element_len += 2
else : # pragma : no cover
if diagnose :
return InvalidDiagnosis ( 'BAD_PARSE' )
else :
return False
# Comment
elif context == Context . COMMENT : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.2
# comment = " ( " * ( [ FWS ] ccontent ) [ FWS ] " ) "
# ccontent = ctext / quoted - pair / comment
# Nested comment
if token == Char . OPENPARENTHESIS : # Nested comments are OK
context_stack . append ( context )
context = Context . COMMENT
# End of comment
elif token == Char . CLOSEPARENTHESIS :
context_prior = context
context = context_stack . pop ( )
# Quoted pair
elif token == Char . BACKSLASH :
context_stack . append ( context )
context = Context . QUOTEDPAIR
# Folding White Space ( FWS )
elif token in [ Char . CR , Char . SP , Char . HTAB ] : # Skip simulates the use of + + operator if the latter
# check doesn ' t short - circuit
if token == Char . CR :
skip = True
if ( i + 1 == raw_length or to_char ( address [ i + 1 ] ) != Char . LF ) :
return_status . append ( InvalidDiagnosis ( 'CR_NO_LF' ) )
break
return_status . append ( CFWSDiagnosis ( 'FWS' ) )
context_stack . append ( context )
context = Context . FWS
token_prior = token
# ctext
else : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.3
# ctext = % d33-39 / ; Printable US -
# % d42-91 / ; ASCII characters
# % d93-126 / ; not including
# obs - ctext ; " ( " , " ) " , or " \ "
# obs - ctext = obs - NO - WS - CTL
# obs - NO - WS - CTL = % d1-8 / ; US - ASCII control
# % d11 / ; characters that
# % d12 / ; do not include
# % d14-31 / ; the CR , LF , and
# ; white space
# ; characters
o = ord ( token )
if o > 127 or o == 0 or o == 10 : # Fatal error
return_status . append ( InvalidDiagnosis ( 'EXPECTING_CTEXT' ) )
break
elif o < 32 or o == 127 :
return_status . append ( DeprecatedDiagnosis ( 'CTEXT' ) )
# Folding White Space ( FWS )
elif context == Context . FWS : # http : / / tools . ietf . org / html / rfc5322 # section - 3.2.2
# FWS = ( [ * WSP CRLF ] 1 * WSP ) / obs - FWS
# ; Folding white space
# But note the erratum :
# http : / / www . rfc - editor . org / errata _ search . php ? rfc = 5322 & eid = 1908
# In the obsolete syntax , any amount of folding white
# space MAY be inserted where the obs - FWS rule is
# allowed . This creates the possibility of having two
# consecutive " folds " in a line , and therefore the
# possibility that a line which makes up a folded header
# field could be composed entirely of white space .
# obs - FWS = 1 * ( [ CRLF ] WSP )
if token_prior == Char . CR :
if token == Char . CR : # Fatal error
return_status . append ( InvalidDiagnosis ( 'FWS_CRLF_X2' ) )
break
if crlf_count != - 1 :
crlf_count += 1
if crlf_count > 1 : # Multiple folds = obsolete FWS
return_status . append ( DeprecatedDiagnosis ( 'FWS' ) )
else :
crlf_count = 1
# Skip simulates the use of + + operator if the latter
# check doesn ' t short - circuit
if token == Char . CR :
skip = True
if ( i + 1 == raw_length or to_char ( address [ i + 1 ] ) != Char . LF ) :
return_status . append ( InvalidDiagnosis ( 'CR_NO_LF' ) )
break
elif token in [ Char . SP , Char . HTAB ] :
pass
else :
if token_prior == Char . CR : # Fatal error
return_status . append ( InvalidDiagnosis ( 'FWS_CRLF_END' ) )
break
if crlf_count != - 1 :
crlf_count = - 1
context_prior = context
# End of FWS
context = context_stack . pop ( )
# Look at this token again in the parent context
repeat = True
token_prior = token
# A context we aren ' t expecting
else : # pragma : no cover
if diagnose :
return InvalidDiagnosis ( 'BAD_PARSE' )
else :
return False
# No point in going on if we ' ve got a fatal error
if max ( return_status ) > BaseDiagnosis . CATEGORIES [ 'RFC5322' ] :
break
# Some simple final tests
if max ( return_status ) < BaseDiagnosis . CATEGORIES [ 'RFC5322' ] :
if context == Context . QUOTEDSTRING : # Fatal error
return_status . append ( InvalidDiagnosis ( 'UNCLOSEDQUOTEDSTR' ) )
elif context == Context . QUOTEDPAIR : # Fatal error
return_status . append ( InvalidDiagnosis ( 'BACKSLASHEND' ) )
elif context == Context . COMMENT : # Fatal error
return_status . append ( InvalidDiagnosis ( 'UNCLOSEDCOMMENT' ) )
elif context == Context . LITERAL : # Fatal error
return_status . append ( InvalidDiagnosis ( 'UNCLOSEDDOMLIT' ) )
elif token == Char . CR : # Fatal error
return_status . append ( InvalidDiagnosis ( 'FWS_CRLF_END' ) )
elif parse_data [ Context . DOMAIN ] == '' : # Fatal error
return_status . append ( InvalidDiagnosis ( 'NODOMAIN' ) )
elif element_len == 0 : # Fatal error
return_status . append ( InvalidDiagnosis ( 'DOT_END' ) )
elif hyphen_flag : # Fatal error
return_status . append ( InvalidDiagnosis ( 'DOMAINHYPHENEND' ) )
# http : / / tools . ietf . org / html / rfc5321 # section - 4.5.3.1.2
# The maximum total length of a domain name or number is 255 octets
elif len ( parse_data [ Context . DOMAIN ] ) > 255 :
return_status . append ( RFC5322Diagnosis ( 'DOMAIN_TOOLONG' ) )
# http : / / tools . ietf . org / html / rfc5321 # section - 4.1.2
# Forward - path = Path
# Path = " < " [ A - d - l " : " ] Mailbox " > "
# http : / / tools . ietf . org / html / rfc5321 # section - 4.5.3.1.3
# The maximum total length of a reverse - path or forward - path is
# 256 octets ( including the punctuation and element separators ) .
# Thus , even without ( obsolete ) routing information , the Mailbox
# can only be 254 characters long . This is confirmed by this
# verified erratum to RFC 3696:
# http : / / www . rfc - editor . org / errata _ search . php ? rfc = 3696 & eid = 1690
# However , there is a restriction in RFC 2821 on the length of an
# address in MAIL and RCPT commands of 254 characters . Since
# addresses that do not fit in those fields are not normally
# useful , the upper limit on address lengths should normally be
# considered to be 254.
elif len ( parse_data [ Context . LOCALPART ] + Char . AT + parse_data [ Context . DOMAIN ] ) > 254 :
return_status . append ( RFC5322Diagnosis ( 'TOOLONG' ) )
# http : / / tools . ietf . org / html / rfc1035 # section - 2.3.4
# labels 63 octets or less
elif element_len > 63 :
return_status . append ( RFC5322Diagnosis ( 'LABEL_TOOLONG' ) )
return_status = list ( set ( return_status ) )
final_status = max ( return_status )
if len ( return_status ) != 1 : # Remove redundant ValidDiagnosis
return_status . pop ( 0 )
parse_data [ 'status' ] = return_status
if final_status < threshold :
final_status = ValidDiagnosis ( )
if diagnose :
return final_status
else :
return final_status < BaseDiagnosis . CATEGORIES [ 'THRESHOLD' ] |
def _prep_genome ( out_dir , data ) :
"""Create prepped reference directory for pisces .
Requires a custom GenomeSize . xml file present .""" | genome_name = utils . splitext_plus ( os . path . basename ( dd . get_ref_file ( data ) ) ) [ 0 ]
out_dir = utils . safe_makedir ( os . path . join ( out_dir , genome_name ) )
ref_file = dd . get_ref_file ( data )
utils . symlink_plus ( ref_file , os . path . join ( out_dir , os . path . basename ( ref_file ) ) )
with open ( os . path . join ( out_dir , "GenomeSize.xml" ) , "w" ) as out_handle :
out_handle . write ( '<sequenceSizes genomeName="%s">' % genome_name )
for c in pysam . AlignmentFile ( "%s.dict" % utils . splitext_plus ( ref_file ) [ 0 ] ) . header [ "SQ" ] :
cur_ploidy = ploidy . get_ploidy ( [ data ] , region = [ c [ "SN" ] ] )
out_handle . write ( '<chromosome fileName="%s" contigName="%s" totalBases="%s" knownBases="%s" ' 'isCircular="false" ploidy="%s" md5="%s"/>' % ( os . path . basename ( ref_file ) , c [ "SN" ] , c [ "LN" ] , c [ "LN" ] , cur_ploidy , c [ "M5" ] ) )
out_handle . write ( '</sequenceSizes>' )
return out_dir |
def breadth_first ( problem , graph_search = False , viewer = None ) :
'''Breadth first search .
If graph _ search = True , will avoid exploring repeated states .
Requires : SearchProblem . actions , SearchProblem . result , and
SearchProblem . is _ goal .''' | return _search ( problem , FifoList ( ) , graph_search = graph_search , viewer = viewer ) |
def init_from_fcnxs ( ctx , fcnxs_symbol , fcnxs_args_from , fcnxs_auxs_from ) :
"""use zero initialization for better convergence , because it tends to oputut 0,
and the label 0 stands for background , which may occupy most size of one image .""" | fcnxs_args = fcnxs_args_from . copy ( )
fcnxs_auxs = fcnxs_auxs_from . copy ( )
for k , v in fcnxs_args . items ( ) :
if ( v . context != ctx ) :
fcnxs_args [ k ] = mx . nd . zeros ( v . shape , ctx )
v . copyto ( fcnxs_args [ k ] )
for k , v in fcnxs_auxs . items ( ) :
if ( v . context != ctx ) :
fcnxs_auxs [ k ] = mx . nd . zeros ( v . shape , ctx )
v . copyto ( fcnxs_auxs [ k ] )
data_shape = ( 1 , 3 , 500 , 500 )
arg_names = fcnxs_symbol . list_arguments ( )
arg_shapes , _ , _ = fcnxs_symbol . infer_shape ( data = data_shape )
rest_params = { }
deconv_params = { }
# this is fcn8s init from fcn16s
if 'score_pool3_weight' in arg_names :
rest_params = dict ( [ ( x [ 0 ] , mx . nd . zeros ( x [ 1 ] , ctx ) ) for x in zip ( arg_names , arg_shapes ) if x [ 0 ] in [ 'score_pool3_bias' , 'score_pool3_weight' ] ] )
deconv_params = dict ( [ ( x [ 0 ] , x [ 1 ] ) for x in zip ( arg_names , arg_shapes ) if x [ 0 ] in [ "bigscore_weight" , 'score4_weight' ] ] )
# this is fcn16s init from fcn32s
elif 'score_pool4_weight' in arg_names :
rest_params = dict ( [ ( x [ 0 ] , mx . nd . zeros ( x [ 1 ] , ctx ) ) for x in zip ( arg_names , arg_shapes ) if x [ 0 ] in [ 'score_pool4_weight' , 'score_pool4_bias' ] ] )
deconv_params = dict ( [ ( x [ 0 ] , x [ 1 ] ) for x in zip ( arg_names , arg_shapes ) if x [ 0 ] in [ "bigscore_weight" , 'score2_weight' ] ] )
# this is fcn32s init
else :
logging . error ( "you are init the fcn32s model, so you should use init_from_vgg16()" )
sys . exit ( )
fcnxs_args . update ( rest_params )
for k , v in deconv_params . items ( ) :
filt = upsample_filt ( v [ 3 ] )
initw = np . zeros ( v )
initw [ range ( v [ 0 ] ) , range ( v [ 1 ] ) , : , : ] = filt
# becareful here is the slice assing
fcnxs_args [ k ] = mx . nd . array ( initw , ctx )
return fcnxs_args , fcnxs_auxs |
def align ( self , sequence ) :
"""Aligns the primer to the given query sequence , returning a tuple of :
hamming _ distance , start , end
Where hamming distance is the distance between the primer and aligned
sequence , and start and end give the start and end index of the primer
relative to the input sequence .""" | seq_aln , primer_aln , score , start , end = pairwise2 . align . globalms ( str ( sequence ) . upper ( ) , str ( self . primer ) . upper ( ) , self . match , self . difference , self . gap_open , self . gap_extend , one_alignment_only = True , penalize_end_gaps = self . penalize_end_gaps ) [ 0 ]
# Get an ungapped mapping on the sequence
index_map = gap_index_map ( seq_aln )
ungap_map = ungap_index_map ( primer_aln )
# Trim to primer
start = ungap_map [ 0 ]
end = ungap_map [ len ( self . primer ) - 1 ]
trimmed = seq_aln [ start : end + 1 ]
ham_dist = hamming_distance ( primer_aln [ start : end + 1 ] , trimmed , _iupac_ambiguous_equal )
# assert primer _ aln [ start : end ] . replace ( ' - ' , ' ' ) = = str ( self . primer )
# TODO : handle start or end being gap better . For now , just give up
# and return maxint for the hamming distance
if trimmed . endswith ( '-' ) :
tail = len ( trimmed ) - len ( trimmed . rstrip ( '-' ) )
end = index_map [ end - tail ] + 1
ham_dist = sys . maxsize
else :
end = index_map [ end ]
if trimmed . startswith ( '-' ) :
start = 0
ham_dist = sys . maxsize
else :
start = index_map [ start ]
return ham_dist , start , end |
def reset ( self ) :
"""Empties all internal storage containers""" | super ( MorseComplex , self ) . reset ( )
self . base_partitions = { }
self . merge_sequence = { }
self . persistences = [ ]
self . max_indices = [ ]
# State properties
self . persistence = 0. |
def get_node_bundle ( manager , handle_id = None , node = None ) :
""": param manager : Neo4jDBSessionManager
: param handle _ id : Unique id
: type handle _ id : str | unicode
: param node : Node object
: type node : neo4j . v1 . types . Node
: return : dict""" | if not node :
node = get_node ( manager , handle_id = handle_id , legacy = False )
d = { 'data' : node . properties }
labels = list ( node . labels )
labels . remove ( 'Node' )
# All nodes have this label for indexing
for label in labels :
if label in META_TYPES :
d [ 'meta_type' ] = label
labels . remove ( label )
d [ 'labels' ] = labels
return d |
def _lemmatise_suffixe ( self , f , * args , ** kwargs ) :
"""Lemmatise un mot f si il finit par un suffixe
: param f : Mot à lemmatiser
: yield : Match formated like in _ lemmatise ( )""" | for suffixe in self . _suffixes :
if f . endswith ( suffixe ) and suffixe != f :
yield from self . _lemmatise ( f [ : - len ( suffixe ) ] , * args , ** kwargs ) |
def _parse_section_links ( self , id_tag ) :
"""given a section id , parse the links in the unordered list""" | soup = BeautifulSoup ( self . html , "html.parser" )
info = soup . find ( "span" , { "id" : id_tag } )
all_links = list ( )
if info is None :
return all_links
for node in soup . find ( id = id_tag ) . parent . next_siblings :
if not isinstance ( node , Tag ) :
continue
elif node . get ( "role" , "" ) == "navigation" :
continue
elif "infobox" in node . get ( "class" , [ ] ) :
continue
# this is actually the child node ' s class . . .
is_headline = node . find ( "span" , { "class" : "mw-headline" } )
if is_headline is not None :
break
elif node . name == "a" :
all_links . append ( self . __parse_link_info ( node ) )
else :
for link in node . findAll ( "a" ) :
all_links . append ( self . __parse_link_info ( link ) )
return all_links |
def rm_rf ( path , dry_run = False ) :
"""Remove a file or directory tree .
Won ' t throw an exception , even if the removal fails .""" | log . info ( "removing %s" % path )
if dry_run :
return
try :
if os . path . isdir ( path ) and not os . path . islink ( path ) :
shutil . rmtree ( path )
else :
os . remove ( path )
except OSError :
pass |
def _len_lcs ( x , y ) :
"""Returns the length of the Longest Common Subsequence between sequences x
and y .
Source : http : / / www . algorithmist . com / index . php / Longest _ Common _ Subsequence
: param x : sequence of words
: param y : sequence of words
: returns integer : Length of LCS between x and y""" | table = _lcs ( x , y )
n , m = _get_index_of_lcs ( x , y )
return table [ n , m ] |
def expand ( self , delta_width , delta_height ) :
'''Makes the cloud surface bigger . Maintains all word positions .''' | temp_surface = pygame . Surface ( ( self . width + delta_width , self . height + delta_height ) )
( self . width , self . height ) = ( self . width + delta_width , self . height + delta_height )
temp_surface . blit ( self . cloud , ( 0 , 0 ) )
self . cloud = temp_surface |
def delete_sp_template_for_vlan ( self , vlan_id ) :
"""Deletes SP Template for a vlan _ id if it exists .""" | with self . session . begin ( subtransactions = True ) :
try :
self . session . query ( ucsm_model . ServiceProfileTemplate ) . filter_by ( vlan_id = vlan_id ) . delete ( )
except orm . exc . NoResultFound :
return |
def cartogram ( df , projection = None , scale = None , limits = ( 0.2 , 1 ) , scale_func = None , trace = True , trace_kwargs = None , hue = None , categorical = False , scheme = None , k = 5 , cmap = 'viridis' , vmin = None , vmax = None , legend = False , legend_values = None , legend_labels = None , legend_kwargs = None , legend_var = "scale" , extent = None , figsize = ( 8 , 6 ) , ax = None , ** kwargs ) :
"""Self - scaling area plot .
Parameters
df : GeoDataFrame
The data being plotted .
projection : geoplot . crs object instance , optional
A geographic projection . For more information refer to ` the tutorial page on projections
< https : / / nbviewer . jupyter . org / github / ResidentMario / geoplot / blob / master / notebooks / tutorials / Projections . ipynb > ` _ .
scale : str or iterable , optional
Applies scaling to the output points . Defaults to None ( no scaling ) .
limits : ( min , max ) tuple , optional
The minimum and maximum scale limits . Ignored if ` ` scale ` ` is left specified .
scale _ func : ufunc , optional
The function used to scale point sizes . Defaults to a linear scale . For more information see ` the Gallery demo
< examples / usa - city - elevations . html > ` _ .
trace : boolean , optional
Whether or not to include a trace of the polygon ' s original outline in the plot result .
trace _ kwargs : dict , optional
If ` ` trace ` ` is set to ` ` True ` ` , this parameter can be used to adjust the properties of the trace outline . This
parameter is ignored if trace is ` ` False ` ` .
hue : None , Series , GeoSeries , iterable , or str , optional
Applies a colormap to the output points .
categorical : boolean , optional
Set to ` ` True ` ` if ` ` hue ` ` references a categorical variable , and ` ` False ` ` ( the default ) otherwise . Ignored
if ` ` hue ` ` is left unspecified .
scheme : None or { " quantiles " | " equal _ interval " | " fisher _ Jenks " } , optional
Controls how the colormap bin edges are determined . Ignored if ` ` hue ` ` is left unspecified .
k : int or None , optional
Ignored if ` ` hue ` ` is left unspecified . Otherwise , if ` ` categorical ` ` is False , controls how many colors to
use ( 5 is the default ) . If set to ` ` None ` ` , a continuous colormap will be used .
cmap : matplotlib color , optional
The ` matplotlib colormap < http : / / matplotlib . org / examples / color / colormaps _ reference . html > ` _ to be used .
Ignored if ` ` hue ` ` is left unspecified .
vmin : float , optional
Values below this level will be colored the same threshold value . Defaults to the dataset minimum . Ignored
if ` ` hue ` ` is left unspecified .
vmax : float , optional
Values above this level will be colored the same threshold value . Defaults to the dataset maximum . Ignored
if ` ` hue ` ` is left unspecified .
legend : boolean , optional
Whether or not to include a legend . Ignored if neither a ` ` hue ` ` nor a ` ` scale ` ` is specified .
legend _ values : list , optional
The values to use in the legend . Defaults to equal intervals . For more information see ` the Gallery demo
< https : / / residentmario . github . io / geoplot / examples / largest - cities - usa . html > ` _ .
legend _ labels : list , optional
The names to use in the legend . Defaults to the variable values . For more information see ` the Gallery demo
< https : / / residentmario . github . io / geoplot / examples / largest - cities - usa . html > ` _ .
legend _ kwargs : dict , optional
Keyword arguments to be passed to ` the underlying legend < http : / / matplotlib . org / users / legend _ guide . html > ` _ .
extent : None or ( minx , maxx , miny , maxy ) , optional
Used to control plot x - axis and y - axis limits manually .
figsize : tuple , optional
An ( x , y ) tuple passed to ` ` matplotlib . figure ` ` which sets the size , in inches , of the resultant plot .
ax : AxesSubplot or GeoAxesSubplot instance , optional
A ` ` matplotlib . axes . AxesSubplot ` ` or ` ` cartopy . mpl . geoaxes . GeoAxesSubplot ` ` instance . Defaults to a new axis .
kwargs : dict , optional
Keyword arguments to be passed to the underlying ` ` matplotlib ` ` ` Polygon patches
< http : / / matplotlib . org / api / patches _ api . html # matplotlib . patches . Polygon > ` _ .
Returns
` ` AxesSubplot ` ` or ` ` GeoAxesSubplot ` `
The plot axis
Examples
A cartogram is a plot type which ingests a series of enclosed ` ` Polygon ` ` or ` ` MultiPolygon ` ` entities and spits
out a view of these shapes in which area is distorted according to the size of some parameter of interest .
A basic cartogram specifies data , a projection , and a ` ` scale ` ` parameter .
. . code - block : : python
import geoplot as gplt
import geoplot . crs as gcrs
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) )
. . image : : . . / figures / cartogram / cartogram - initial . png
The gray outline can be turned off by specifying ` ` trace ` ` , and a legend can be added by specifying ` ` legend ` ` .
. . code - block : : python
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
trace = False , legend = True )
. . image : : . . / figures / cartogram / cartogram - trace - legend . png
Keyword arguments can be passed to the legend using the ` ` legend _ kwargs ` ` argument . These arguments will be
passed to the underlying ` ` matplotlib . legend . Legend ` ` instance ( ` ref
< http : / / matplotlib . org / api / legend _ api . html # matplotlib . legend . Legend > ` _ ) . The ` ` loc ` ` and ` ` bbox _ to _ anchor ` `
parameters are particularly useful for positioning the legend . Other additional arguments will be passed to the
underlying ` ` matplotlib ` ` ` scatter plot < http : / / matplotlib . org / api / pyplot _ api . html # matplotlib . pyplot . scatter > ` _ .
. . code - block : : python
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
trace = False , legend = True , legend _ kwargs = { ' loc ' : ' upper left ' } )
. . image : : . . / figures / cartogram / cartogram - legend - kwargs . png
Additional arguments to ` ` cartogram ` ` will be interpreted as keyword arguments for the scaled polygons ,
using ` matplotlib Polygon patch
< http : / / matplotlib . org / api / patches _ api . html # matplotlib . patches . Polygon > ` _ rules .
. . code - block : : python
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
edgecolor = ' darkgreen ' )
. . image : : . . / figures / cartogram / cartogram - kwargs . png
Manipulate the outlines use the ` ` trace _ kwargs ` ` argument , which accepts the same ` matplotlib Polygon patch
< http : / / matplotlib . org / api / patches _ api . html # matplotlib . patches . Polygon > ` _ parameters .
. . code - block : : python
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
trace _ kwargs = { ' edgecolor ' : ' lightgreen ' } )
. . image : : . . / figures / cartogram / cartogram - trace - kwargs . png
Adjust the level of scaling to apply using the ` ` limits ` ` parameter .
. . code - block : : python
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
limits = ( 0.5 , 1 ) )
. . image : : . . / figures / cartogram / cartogram - limits . png
The default scaling function is linear : an observations at the midpoint of two others will be exactly midway
between them in size . To specify an alternative scaling function , use the ` ` scale _ func ` ` parameter . This should
be a factory function of two variables which , when given the maximum and minimum of the dataset ,
returns a scaling function which will be applied to the rest of the data . A demo is available in
the ` example gallery < examples / usa - city - elevations . html > ` _ .
. . code - block : : python
def trivial _ scale ( minval , maxval ) : return lambda v : 2
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
limits = ( 0.5 , 1 ) , scale _ func = trivial _ scale )
. . image : : . . / figures / cartogram / cartogram - scale - func . png
` ` cartogram ` ` also provides the same ` ` hue ` ` visual variable parameters provided by e . g . ` ` pointplot ` ` . For more
information on ` ` hue ` ` - related arguments , see the related sections in the ` ` pointplot ` ` ` documentation
< . / pointplot . html > ` _ .
. . code - block : : python
gplt . cartogram ( boroughs , scale = ' Population Density ' , projection = gcrs . AlbersEqualArea ( ) ,
hue = ' Population Density ' , k = None , cmap = ' Blues ' )
. . image : : . . / figures / cartogram / cartogram - hue . png""" | # Initialize the figure .
fig = _init_figure ( ax , figsize )
# Load the projection .
if projection :
projection = projection . load ( df , { 'central_longitude' : lambda df : np . mean ( np . array ( [ p . x for p in df . geometry . centroid ] ) ) , 'central_latitude' : lambda df : np . mean ( np . array ( [ p . y for p in df . geometry . centroid ] ) ) } )
# Set up the axis .
if not ax :
ax = plt . subplot ( 111 , projection = projection )
# Clean up patches .
else :
if not ax :
ax = plt . gca ( )
# Clean up patches .
_lay_out_axes ( ax , projection )
# Immediately return if input geometry is empty .
if len ( df . geometry ) == 0 :
return ax
# Set extent .
extrema = _get_envelopes_min_maxes ( df . geometry . envelope . exterior )
_set_extent ( ax , projection , extent , extrema )
# Check that the ` ` scale ` ` parameter is filled , and use it to fill a ` ` values ` ` name .
if not scale :
raise ValueError ( "No scale parameter provided." )
elif isinstance ( scale , str ) :
values = df [ scale ]
else :
values = scale
# Compute a scale function .
dmin , dmax = np . min ( values ) , np . max ( values )
if not scale_func :
dslope = ( limits [ 1 ] - limits [ 0 ] ) / ( dmax - dmin )
dscale = lambda dval : limits [ 0 ] + dslope * ( dval - dmin )
else :
dscale = scale_func ( dmin , dmax )
# Create a legend , if appropriate .
if legend :
_paint_carto_legend ( ax , values , legend_values , legend_labels , dscale , legend_kwargs )
# Validate hue input .
hue = _validate_hue ( df , hue )
# Generate the coloring information , if needed . Follows one of two schemes , categorical or continuous ,
# based on whether or not ` ` k ` ` is specified ( ` ` hue ` ` must be specified for either to work ) .
if k is not None and hue is not None : # Categorical colormap code path .
categorical , k , scheme = _validate_buckets ( categorical , k , scheme )
if hue is not None :
cmap , categories , hue_values = _discrete_colorize ( categorical , hue , scheme , k , cmap , vmin , vmax )
colors = [ cmap . to_rgba ( v ) for v in hue_values ]
# Add a legend , if appropriate .
if legend and ( legend_var != "scale" or scale is None ) :
_paint_hue_legend ( ax , categories , cmap , legend_labels , legend_kwargs )
else :
colors = [ 'None' ] * len ( df )
elif k is None and hue is not None : # Continuous colormap code path .
hue_values = hue
cmap = _continuous_colormap ( hue_values , cmap , vmin , vmax )
colors = [ cmap . to_rgba ( v ) for v in hue_values ]
# Add a legend , if appropriate .
if legend and ( legend_var != "scale" or scale is None ) :
_paint_colorbar_legend ( ax , hue_values , cmap , legend_kwargs )
elif 'facecolor' in kwargs :
colors = [ kwargs . pop ( 'facecolor' ) ] * len ( df )
else :
colors = [ 'None' ] * len ( df )
# Manipulate trace _ kwargs .
if trace :
if trace_kwargs is None :
trace_kwargs = dict ( )
if 'edgecolor' not in trace_kwargs . keys ( ) :
trace_kwargs [ 'edgecolor' ] = 'lightgray'
if 'facecolor' not in trace_kwargs . keys ( ) :
trace_kwargs [ 'facecolor' ] = 'None'
# Draw traces first , if appropriate .
if trace :
if projection :
for polygon in df . geometry :
features = ShapelyFeature ( [ polygon ] , ccrs . PlateCarree ( ) )
ax . add_feature ( features , ** trace_kwargs )
else :
for polygon in df . geometry :
try : # Duck test for MultiPolygon .
for subgeom in polygon :
feature = descartes . PolygonPatch ( subgeom , ** trace_kwargs )
ax . add_patch ( feature )
except ( TypeError , AssertionError ) : # Shapely Polygon .
feature = descartes . PolygonPatch ( polygon , ** trace_kwargs )
ax . add_patch ( feature )
# Finally , draw the scaled geometries .
for value , color , polygon in zip ( values , colors , df . geometry ) :
scale_factor = dscale ( value )
scaled_polygon = shapely . affinity . scale ( polygon , xfact = scale_factor , yfact = scale_factor )
if projection :
features = ShapelyFeature ( [ scaled_polygon ] , ccrs . PlateCarree ( ) )
ax . add_feature ( features , facecolor = color , ** kwargs )
else :
try : # Duck test for MultiPolygon .
for subgeom in scaled_polygon :
feature = descartes . PolygonPatch ( subgeom , facecolor = color , ** kwargs )
ax . add_patch ( feature )
except ( TypeError , AssertionError ) : # Shapely Polygon .
feature = descartes . PolygonPatch ( scaled_polygon , facecolor = color , ** kwargs )
ax . add_patch ( feature )
return ax |
def check_path_satisfiability ( code_analyzer , path , start_address ) :
"""Check satisfiability of a basic block path .""" | start_instr_found = False
sat = False
# Traverse basic blocks , translate its instructions to SMT
# expressions and add them as assertions .
for bb_curr , bb_next in zip ( path [ : - 1 ] , path [ 1 : ] ) :
logger . info ( "BB @ {:#x}" . format ( bb_curr . address ) )
# For each instruction . . .
for instr in bb_curr : # If the start instruction have not been found , keep
# looking . . .
if not start_instr_found :
if instr . address == start_address :
start_instr_found = True
else :
continue
logger . info ( "{:#x} {}" . format ( instr . address , instr ) )
# For each REIL instruction . . .
for reil_instr in instr . ir_instrs :
logger . info ( "{:#x} {:02d} {}" . format ( reil_instr . address >> 0x8 , reil_instr . address & 0xff , reil_instr ) )
if reil_instr . mnemonic == ReilMnemonic . JCC : # Check that the JCC is the last instruction of
# the basic block ( skip CALL instructions . )
if instr . address + instr . size - 1 != bb_curr . end_address :
logger . error ( "Unexpected JCC instruction: {:#x} {} ({})" . format ( instr . address , instr , reil_instr ) )
# raise Exception ( )
continue
# Make sure branch target address from current
# basic block is the start address of the next .
assert ( bb_curr . taken_branch == bb_next . address or bb_curr . not_taken_branch == bb_next . address or bb_curr . direct_branch == bb_next . address )
# Set branch condition accordingly .
if bb_curr . taken_branch == bb_next . address :
branch_var_goal = 0x1
elif bb_curr . not_taken_branch == bb_next . address :
branch_var_goal = 0x0
else :
continue
# Add branch condition goal constraint .
code_analyzer . add_constraint ( code_analyzer . get_operand_expr ( reil_instr . operands [ 0 ] ) == branch_var_goal )
# The JCC instruction was the last within the
# current basic block . End this iteration and
# start next one .
break
# Translate and add SMT expressions to the solver .
code_analyzer . add_instruction ( reil_instr )
sat = code_analyzer . check ( ) == 'sat'
logger . info ( "BB @ {:#x} sat? {}" . format ( bb_curr . address , sat ) )
if not sat :
break
# Return satisfiability .
return sat |
def ip_address ( address ) :
"""Take an IP string / int and return an object of the correct type .
Args :
address : A string or integer , the IP address . Either IPv4 or
IPv6 addresses may be supplied ; integers less than 2 * * 32 will
be considered to be IPv4 by default .
Returns :
An IPv4Address or IPv6Address object .
Raises :
ValueError : if the * address * passed isn ' t either a v4 or a v6
address""" | try :
return IPv4Address ( address )
except ( AddressValueError , NetmaskValueError ) :
pass
try :
return IPv6Address ( address )
except ( AddressValueError , NetmaskValueError ) :
pass
raise ValueError ( '%r does not appear to be an IPv4 or IPv6 address' % address ) |
def fill ( self , field , value ) :
"""Fill a specified form field in the current document .
: param field : an instance of : class : ` zombie . dom . DOMNode `
: param value : any string value
: return : self to allow function chaining .""" | self . client . nowait ( 'browser.fill' , ( field , value ) )
return self |
def get_inventory ( self , resources ) :
"""Returns a JSON object with the requested resources and their
properties , that are managed by the HMC .
This method performs the ' Get Inventory ' HMC operation .
Parameters :
resources ( : term : ` iterable ` of : term : ` string ` ) :
Resource classes and / or resource classifiers specifying the types
of resources that should be included in the result . For valid
values , see the ' Get Inventory ' operation in the : term : ` HMC API `
book .
Element resources of the specified resource types are automatically
included as children ( for example , requesting ' partition ' includes
all of its ' hba ' , ' nic ' and ' virtual - function ' element resources ) .
Must not be ` None ` .
Returns :
: term : ` JSON object ` :
The resources with their properties , for the requested resource
classes and resource classifiers .
Example :
resource _ classes = [ ' partition ' , ' adapter ' ]
result _ dict = client . get _ inventory ( resource _ classes )
Raises :
: exc : ` ~ zhmcclient . HTTPError `
: exc : ` ~ zhmcclient . ParseError `
: exc : ` ~ zhmcclient . ConnectionError `""" | uri = '/api/services/inventory'
body = { 'resources' : resources }
result = self . session . post ( uri , body = body )
return result |
def run_with_werkzeug ( self , ** options ) :
"""Run with werkzeug simple wsgi container .""" | threaded = self . threads is not None and ( self . threads > 0 )
self . app . run ( host = self . host , port = self . port , debug = self . debug , threaded = threaded , ** options ) |
def plot_probe ( mea , ax = None , xlim = None , ylim = None , color_currents = False , top = None , bottom = None , cmap = 'viridis' , type = 'shank' , alpha_elec = 0.7 , alpha_prb = 0.3 ) :
'''Parameters
mea
axis
xlim
ylim
Returns''' | if ax is None :
fig = plt . figure ( )
ax = fig . add_subplot ( 111 )
n_elec = mea . positions . shape [ 0 ]
elec_size = mea . size
if mea . type == 'mea' :
mea_pos = np . array ( [ np . dot ( mea . positions , mea . main_axes [ 0 ] ) , np . dot ( mea . positions , mea . main_axes [ 1 ] ) ] ) . T
min_x , max_x = [ np . min ( np . dot ( mea . positions , mea . main_axes [ 0 ] ) ) , np . max ( np . dot ( mea . positions , mea . main_axes [ 0 ] ) ) ]
center_x = ( min_x + max_x ) / 2.
min_y , max_y = [ np . min ( np . dot ( mea . positions , mea . main_axes [ 1 ] ) ) , np . max ( np . dot ( mea . positions , mea . main_axes [ 1 ] ) ) ]
center_y = ( min_y + max_y ) / 2.
if type == 'shank' :
if top is None :
probe_height = 200
else :
probe_height = top
probe_top = max_y + probe_height
if bottom is None :
probe_bottom = min_y - probe_height
else :
probe_bottom = min_y - bottom
probe_corner = min_y - 0.1 * probe_height
probe_left = min_x - 0.1 * probe_height
probe_right = max_x + 0.1 * probe_height
verts = [ ( min_x - 2 * elec_size , probe_top ) , # left , bottom
( min_x - 2 * elec_size , probe_corner ) , # left , top
( center_x , probe_bottom ) , # right , top
( max_x + 2 * elec_size , probe_corner ) , # right , bottom
( max_x + 2 * elec_size , probe_top ) , ( min_x - 2 * elec_size , max_y + 2 * elec_size ) # ignored
]
codes = [ Path . MOVETO , Path . LINETO , Path . LINETO , Path . LINETO , Path . LINETO , Path . CLOSEPOLY , ]
elif type == 'planar' :
probe_top = max_y + 2 * elec_size
probe_bottom = min_y - 2 * elec_size
probe_left = min_x - 2 * elec_size
probe_right = max_x + 2 * elec_size
verts = [ ( min_x - 2 * elec_size , max_y + 2 * elec_size ) , # left , bottom
( min_x - 2 * elec_size , min_y - 2 * elec_size ) , # left , top
( max_x + 2 * elec_size , min_y - 2 * elec_size ) , # right , bottom
( max_x + 2 * elec_size , max_y + 2 * elec_size ) , # ignored
( max_x + 2 * elec_size , max_y + 2 * elec_size ) # ignored
]
codes = [ Path . MOVETO , Path . LINETO , Path . LINETO , Path . LINETO , Path . CLOSEPOLY , ]
else :
raise AttributeError ( "'type' can be 'shank' or 'planar'" )
path = Path ( verts , codes )
patch = patches . PathPatch ( path , facecolor = 'green' , edgecolor = 'k' , lw = 0.5 , alpha = alpha_prb )
ax . add_patch ( patch )
if color_currents :
norm_curr = mea . currents / np . max ( np . abs ( mea . currents ) )
colormap = plt . get_cmap ( cmap )
elec_colors = colormap ( norm_curr )
else :
elec_colors = [ 'orange' ] * mea . number_electrodes
if mea . shape == 'square' :
for e in range ( n_elec ) :
elec = patches . Rectangle ( ( mea_pos [ e , 0 ] - elec_size , mea_pos [ e , 1 ] - elec_size ) , 2 * elec_size , 2 * elec_size , alpha = alpha_elec , facecolor = elec_colors [ e ] , edgecolor = [ 0.3 , 0.3 , 0.3 ] , lw = 0.5 )
ax . add_patch ( elec )
elif mea . shape == 'circle' :
for e in range ( n_elec ) :
elec = patches . Circle ( ( mea_pos [ e , 0 ] , mea_pos [ e , 1 ] ) , elec_size , alpha = alpha_elec , facecolor = elec_colors [ e ] , edgecolor = [ 0.3 , 0.3 , 0.3 ] , lw = 0.5 )
ax . add_patch ( elec )
else :
raise NotImplementedError ( 'Wire type plotting not implemented' )
ax . axis ( 'equal' )
if xlim :
ax . set_xlim ( xlim )
else :
ax . set_xlim ( probe_left - 5 * elec_size , probe_right + 5 * elec_size )
if ylim :
ax . set_ylim ( ylim )
else :
ax . set_ylim ( probe_bottom - 5 * elec_size , probe_top + 5 * elec_size )
return ax |
def record_diff ( lhs , rhs ) :
"Diff an individual row ." | delta = { }
for k in set ( lhs ) . union ( rhs ) :
from_ = lhs [ k ]
to_ = rhs [ k ]
if from_ != to_ :
delta [ k ] = { 'from' : from_ , 'to' : to_ }
return delta |
def _load_config ( config_filepath : str ) -> Dict [ str , Any ] :
"""Loads YAML config file to dictionary
: returns : dict from YAML config file""" | try :
config : Dict [ str , Any ] = yaml . load ( stream = open ( config_filepath , "r" ) )
except Exception as e :
raise Exception ( f"Invalid DAG Factory config file; err: {e}" )
return config |
def main ( _ ) :
"""Create or load configuration and launch the trainer .""" | utility . set_up_logging ( )
if not FLAGS . config :
raise KeyError ( 'You must specify a configuration.' )
logdir = FLAGS . logdir and os . path . expanduser ( os . path . join ( FLAGS . logdir , '{}-{}' . format ( FLAGS . timestamp , FLAGS . config ) ) )
try :
config = utility . load_config ( logdir )
except IOError :
config = tools . AttrDict ( getattr ( configs , FLAGS . config ) ( ) )
config = utility . save_config ( config , logdir )
for score in train ( config , FLAGS . env_processes ) :
tf . logging . info ( 'Score {}.' . format ( score ) ) |
def sadd ( self , key , * members ) :
"""Add the specified members to the set stored at key . Specified
members that are already a member of this set are ignored . If key does
not exist , a new set is created before adding the specified members .
An error is returned when the value stored at key is not a set .
Returns : data : ` True ` if all requested members are added . If more
than one member is passed in and not all members are added , the
number of added members is returned .
. . note : :
* * Time complexity * * : ` ` O ( N ) ` ` where ` ` N ` ` is the number of members
to be added .
: param key : The key of the set
: type key : : class : ` str ` , : class : ` bytes `
: param members : One or more positional arguments to add to the set
: type key : : class : ` str ` , : class : ` bytes `
: returns : Number of items added to the set
: rtype : bool , int""" | return self . _execute ( [ b'SADD' , key ] + list ( members ) , len ( members ) ) |
def _node_le ( self , node_self , node_other ) :
'''_ node _ le
Low - level api : Return True if all descendants of one node exist in the
other node . Otherwise False . This is a recursive method .
Parameters
node _ self : ` Element `
A node to be compared .
node _ other : ` Element `
Another node to be compared .
Returns
bool
True if all descendants of node _ self exist in node _ other , otherwise
False .''' | for x in [ 'tag' , 'text' , 'tail' ] :
if node_self . __getattribute__ ( x ) != node_other . __getattribute__ ( x ) :
return False
for a in node_self . attrib :
if a not in node_other . attrib or node_self . attrib [ a ] != node_other . attrib [ a ] :
return False
for child in node_self . getchildren ( ) :
peers = self . _get_peers ( child , node_other )
if len ( peers ) < 1 :
return False
elif len ( peers ) > 1 :
raise ConfigError ( 'not unique peer of node {}' . format ( self . device . get_xpath ( child ) ) )
else :
schma_node = self . device . get_schema_node ( child )
if schma_node . get ( 'ordered-by' ) == 'user' and schma_node . get ( 'type' ) == 'leaf-list' or schma_node . get ( 'ordered-by' ) == 'user' and schma_node . get ( 'type' ) == 'list' :
elder_siblings = list ( child . itersiblings ( tag = child . tag , preceding = True ) )
if elder_siblings :
immediate_elder_sibling = elder_siblings [ 0 ]
peers_of_immediate_elder_sibling = self . _get_peers ( immediate_elder_sibling , node_other )
if len ( peers_of_immediate_elder_sibling ) < 1 :
return False
elif len ( peers_of_immediate_elder_sibling ) > 1 :
p = self . device . get_xpath ( immediate_elder_sibling )
raise ConfigError ( 'not unique peer of node {}' . format ( p ) )
elder_siblings_of_peer = list ( peers [ 0 ] . itersiblings ( tag = child . tag , preceding = True ) )
if peers_of_immediate_elder_sibling [ 0 ] not in elder_siblings_of_peer :
return False
if not self . _node_le ( child , peers [ 0 ] ) :
return False
return True |
def _report_container_spec_metrics ( self , pod_list , instance_tags ) :
"""Reports pod requests & limits by looking at pod specs .""" | for pod in pod_list [ 'items' ] :
pod_name = pod . get ( 'metadata' , { } ) . get ( 'name' )
pod_phase = pod . get ( 'status' , { } ) . get ( 'phase' )
if self . _should_ignore_pod ( pod_name , pod_phase ) :
continue
for ctr in pod [ 'spec' ] [ 'containers' ] :
if not ctr . get ( 'resources' ) :
continue
c_name = ctr . get ( 'name' , '' )
cid = None
for ctr_status in pod [ 'status' ] . get ( 'containerStatuses' , [ ] ) :
if ctr_status . get ( 'name' ) == c_name : # it is already prefixed with ' runtime : / / '
cid = ctr_status . get ( 'containerID' )
break
if not cid :
continue
pod_uid = pod . get ( 'metadata' , { } ) . get ( 'uid' )
if self . pod_list_utils . is_excluded ( cid , pod_uid ) :
continue
tags = tagger . tag ( '%s' % cid , tagger . HIGH ) + instance_tags
try :
for resource , value_str in iteritems ( ctr . get ( 'resources' , { } ) . get ( 'requests' , { } ) ) :
value = self . parse_quantity ( value_str )
self . gauge ( '{}.{}.requests' . format ( self . NAMESPACE , resource ) , value , tags )
except ( KeyError , AttributeError ) as e :
self . log . debug ( "Unable to retrieve container requests for %s: %s" , c_name , e )
try :
for resource , value_str in iteritems ( ctr . get ( 'resources' , { } ) . get ( 'limits' , { } ) ) :
value = self . parse_quantity ( value_str )
self . gauge ( '{}.{}.limits' . format ( self . NAMESPACE , resource ) , value , tags )
except ( KeyError , AttributeError ) as e :
self . log . debug ( "Unable to retrieve container limits for %s: %s" , c_name , e ) |
def memory_read16 ( self , addr , num_halfwords , zone = None ) :
"""Reads memory from the target system in units of 16 - bits .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
addr ( int ) : start address to read from
num _ halfwords ( int ) : number of half words to read
zone ( str ) : memory zone to read from
Returns :
List of halfwords read from the target system .
Raises :
JLinkException : if memory could not be read""" | return self . memory_read ( addr , num_halfwords , zone = zone , nbits = 16 ) |
def __add_kickoff_task ( cls , job_config , mapreduce_spec ) :
"""Add kickoff task to taskqueue .
Args :
job _ config : map _ job . JobConfig .
mapreduce _ spec : model . MapreduceSpec ,""" | params = { "mapreduce_id" : job_config . job_id }
# Task is not named so that it can be added within a transaction .
kickoff_task = taskqueue . Task ( # TODO ( user ) : Perhaps make this url a computed field of job _ config .
url = job_config . _base_path + "/kickoffjob_callback/" + job_config . job_id , headers = util . _get_task_headers ( job_config . job_id ) , params = params )
if job_config . _hooks_cls :
hooks = job_config . _hooks_cls ( mapreduce_spec )
try :
hooks . enqueue_kickoff_task ( kickoff_task , job_config . queue_name )
return
except NotImplementedError :
pass
kickoff_task . add ( job_config . queue_name , transactional = True ) |
def set_maxsize ( self , maxsize , ** kwargs ) :
"""Set maxsize . This involves creating a new cache and transferring the items .""" | new_cache = self . _get_cache_impl ( self . impl_name , maxsize , ** kwargs )
self . _populate_new_cache ( new_cache )
self . cache = new_cache |
def get_bio ( self , section , language = None ) :
"""Returns a section of the bio .
section can be " content " , " summary " or
" published " ( for published date )""" | if language :
params = self . _get_params ( )
params [ "lang" ] = language
else :
params = None
return self . _extract_cdata_from_request ( self . ws_prefix + ".getInfo" , section , params ) |
def get ( self , name , section = None , fallback = False ) :
"""Returns a previously registered preference
: param section : The section name under which the preference is registered
: type section : str .
: param name : The name of the preference . You can use dotted notation ' section . name ' if you want to avoid providing section param
: type name : str .
: param fallback : Should we return a dummy preference object instead of raising an error if no preference is found ?
: type name : bool .
: return : a : py : class : ` prefs . BasePreference ` instance""" | # try dotted notation
try :
_section , name = name . split ( preferences_settings . SECTION_KEY_SEPARATOR )
return self [ _section ] [ name ]
except ValueError :
pass
# use standard params
try :
return self [ section ] [ name ]
except KeyError :
if fallback :
return self . _fallback ( section_name = section , pref_name = name )
raise NotFoundInRegistry ( "No such preference in {0} with section={1} and name={2}" . format ( self . __class__ . __name__ , section , name ) ) |
def mapIdentity ( self , primarySubject , secondarySubject , vendorSpecific = None ) :
"""See Also : mapIdentityResponse ( )
Args :
primarySubject :
secondarySubject :
vendorSpecific :
Returns :""" | response = self . mapIdentityResponse ( primarySubject , secondarySubject , vendorSpecific )
return self . _read_boolean_response ( response ) |
def remove_atoms ( self , indices ) :
"""Remove the atoms positioned at * indices * . The molecule
containing the atom is removed as well .
If you have a system of 10 water molecules ( and 30 atoms ) , if
you remove the atoms at indices 0 , 1 and 29 you will remove
the first and last water molecules .
* * Parameters * *
indices : np . ndarray ( ( N , ) , dtype = int )
Array of integers between 0 and System . n _ atoms""" | mol_indices = self . atom_to_molecule_indices ( indices )
self . copy_from ( self . sub ( molecule_index = mol_indices ) ) |
def p_common_scalar_magic_file ( p ) :
'common _ scalar : FILE' | value = getattr ( p . lexer , 'filename' , None )
p [ 0 ] = ast . MagicConstant ( p [ 1 ] . upper ( ) , value , lineno = p . lineno ( 1 ) ) |
def trim_common_suffixes ( strs , min_len = 0 ) :
"""trim common suffixes
> > > trim _ common _ suffixes ( ' A ' , 1)
(0 , ' A ' )""" | if len ( strs ) < 2 :
return 0 , strs
rev_strs = [ s [ : : - 1 ] for s in strs ]
trimmed , rev_strs = trim_common_prefixes ( rev_strs , min_len )
if trimmed :
strs = [ s [ : : - 1 ] for s in rev_strs ]
return trimmed , strs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.