signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def new_tbl ( cls , rows , cols , width ) :
"""Return a new ` w : tbl ` element having * rows * rows and * cols * columns
with * width * distributed evenly between the columns ."""
|
return parse_xml ( cls . _tbl_xml ( rows , cols , width ) )
|
def turn_on ( self , time ) :
"""( Helper ) Turn on an output"""
|
self . _elk . send ( cn_encode ( self . _index , time ) )
|
def text_base64decode ( data ) :
'''Base64 decode to unicode text .
: param str data : String data to decode to unicode .
: return : Base64 decoded string .
: rtype : str'''
|
try :
return b64decode ( data . encode ( 'utf-8' ) ) . decode ( 'utf-8' )
except ( ValueError , TypeError ) : # ValueError for Python 3 , TypeError for Python 2
raise ValueError ( _ERROR_MESSAGE_NOT_BASE64 )
|
def LCHab_to_Lab ( cobj , * args , ** kwargs ) :
"""Convert from LCH ( ab ) to Lab ."""
|
lab_l = cobj . lch_l
lab_a = math . cos ( math . radians ( cobj . lch_h ) ) * cobj . lch_c
lab_b = math . sin ( math . radians ( cobj . lch_h ) ) * cobj . lch_c
return LabColor ( lab_l , lab_a , lab_b , illuminant = cobj . illuminant , observer = cobj . observer )
|
def run_mcl ( matrix , expansion = 2 , inflation = 2 , loop_value = 1 , iterations = 100 , pruning_threshold = 0.001 , pruning_frequency = 1 , convergence_check_frequency = 1 , verbose = False ) :
"""Perform MCL on the given similarity matrix
: param matrix : The similarity matrix to cluster
: param expansion : The cluster expansion factor
: param inflation : The cluster inflation factor
: param loop _ value : Initialization value for self - loops
: param iterations : Maximum number of iterations
( actual number of iterations will be less if convergence is reached )
: param pruning _ threshold : Threshold below which matrix elements will be set
set to 0
: param pruning _ frequency : Perform pruning every ' pruning _ frequency '
iterations .
: param convergence _ check _ frequency : Perform the check for convergence
every convergence _ check _ frequency iterations
: param verbose : Print extra information to the console
: returns : The final matrix"""
|
assert expansion > 1 , "Invalid expansion parameter"
assert inflation > 1 , "Invalid inflation parameter"
assert loop_value >= 0 , "Invalid loop_value"
assert iterations > 0 , "Invalid number of iterations"
assert pruning_threshold >= 0 , "Invalid pruning_threshold"
assert pruning_frequency > 0 , "Invalid pruning_frequency"
assert convergence_check_frequency > 0 , "Invalid convergence_check_frequency"
printer = MessagePrinter ( verbose )
printer . print ( "-" * 50 )
printer . print ( "MCL Parameters" )
printer . print ( "Expansion: {}" . format ( expansion ) )
printer . print ( "Inflation: {}" . format ( inflation ) )
if pruning_threshold > 0 :
printer . print ( "Pruning threshold: {}, frequency: {} iteration{}" . format ( pruning_threshold , pruning_frequency , "s" if pruning_frequency > 1 else "" ) )
else :
printer . print ( "No pruning" )
printer . print ( "Convergence check: {} iteration{}" . format ( convergence_check_frequency , "s" if convergence_check_frequency > 1 else "" ) )
printer . print ( "Maximum iterations: {}" . format ( iterations ) )
printer . print ( "{} matrix mode" . format ( "Sparse" if isspmatrix ( matrix ) else "Dense" ) )
printer . print ( "-" * 50 )
# Initialize self - loops
if loop_value > 0 :
matrix = add_self_loops ( matrix , loop_value )
# Normalize
matrix = normalize ( matrix )
# iterations
for i in range ( iterations ) :
printer . print ( "Iteration {}" . format ( i + 1 ) )
# store current matrix for convergence checking
last_mat = matrix . copy ( )
# perform MCL expansion and inflation
matrix = iterate ( matrix , expansion , inflation )
# prune
if pruning_threshold > 0 and i % pruning_frequency == pruning_frequency - 1 :
printer . print ( "Pruning" )
matrix = prune ( matrix , pruning_threshold )
# Check for convergence
if i % convergence_check_frequency == convergence_check_frequency - 1 :
printer . print ( "Checking for convergence" )
if converged ( matrix , last_mat ) :
printer . print ( "Converged after {} iteration{}" . format ( i + 1 , "s" if i > 0 else "" ) )
break
printer . print ( "-" * 50 )
return matrix
|
def order_market_buy ( self , ** params ) :
"""Send in a new market buy order
: param symbol : required
: type symbol : str
: param quantity : required
: type quantity : decimal
: param newClientOrderId : A unique id for the order . Automatically generated if not sent .
: type newClientOrderId : str
: param newOrderRespType : Set the response JSON . ACK , RESULT , or FULL ; default : RESULT .
: type newOrderRespType : str
: param recvWindow : the number of milliseconds the request is valid for
: type recvWindow : int
: returns : API response
See order endpoint for full response options
: raises : BinanceRequestException , BinanceAPIException , BinanceOrderException , BinanceOrderMinAmountException , BinanceOrderMinPriceException , BinanceOrderMinTotalException , BinanceOrderUnknownSymbolException , BinanceOrderInactiveSymbolException"""
|
params . update ( { 'side' : self . SIDE_BUY } )
return self . order_market ( ** params )
|
def stop ( self ) :
"""Shutdown pyro naming server ."""
|
args = self . getShutdownArgs ( ) + [ 'shutdown' ]
Pyro . nsc . main ( args )
self . join ( )
|
def sub_list ( self , from_index , to_index ) :
"""Returns a sublist from this list , whose range is specified with from _ index ( inclusive ) and to _ index ( exclusive ) .
The returned list is backed by this list , so non - structural changes in the returned list are reflected in this
list , and vice - versa .
: param from _ index : ( int ) , the start point ( inclusive ) of the sub _ list .
: param to _ index : ( int ) , th end point ( exclusive ) of the sub _ list .
: return : ( Sequence ) , a view of the specified range within this list ."""
|
return self . _encode_invoke ( list_sub_codec , from_ = from_index , to = to_index )
|
def get_stack ( self , stack ) :
"""获取服务组
查看服务组的属性信息 。
Args :
- stack : 服务所属的服务组名称
Returns :
返回一个tuple对象 , 其格式为 ( < result > , < ResponseInfo > )
- result 成功返回stack信息 , 失败返回 { " error " : " < errMsg string > " }
- ResponseInfo 请求的Response信息"""
|
url = '{0}/v3/stacks/{1}' . format ( self . host , stack )
return self . __get ( url )
|
def from_events ( self , instance , ev_args , ctx ) :
"""Like : meth : ` . ChildList . from _ events ` , but the object is appended to the
list associated with its tag in the dict ."""
|
tag = ev_args [ 0 ] , ev_args [ 1 ]
cls = self . _tag_map [ tag ]
obj = yield from cls . parse_events ( ev_args , ctx )
mapping = self . __get__ ( instance , type ( instance ) )
mapping [ self . key ( obj ) ] . append ( obj )
|
def from_name_re ( cls , path : PathOrStr , fnames : FilePathList , pat : str , valid_pct : float = 0.2 , ** kwargs ) :
"Create from list of ` fnames ` in ` path ` with re expression ` pat ` ."
|
pat = re . compile ( pat )
def _get_label ( fn ) :
if isinstance ( fn , Path ) :
fn = fn . as_posix ( )
res = pat . search ( str ( fn ) )
assert res , f'Failed to find "{pat}" in "{fn}"'
return res . group ( 1 )
return cls . from_name_func ( path , fnames , _get_label , valid_pct = valid_pct , ** kwargs )
|
def RB_to_OPLS ( c0 , c1 , c2 , c3 , c4 , c5 ) :
"""Converts Ryckaert - Bellemans type dihedrals to OPLS type .
Parameters
c0 , c1 , c2 , c3 , c4 , c5 : Ryckaert - Belleman coefficients ( in kcal / mol )
Returns
opls _ coeffs : np . array , shape = ( 4 , )
Array containing the OPLS dihedrals coeffs f1 , f2 , f3 , and f4
( in kcal / mol )"""
|
f1 = ( - 1.5 * c3 ) - ( 2 * c1 )
f2 = c0 + c1 + c3
f3 = - 0.5 * c3
f4 = - 0.25 * c4
return np . array ( [ f1 , f2 , f3 , f4 ] )
|
def _process_get_cal_resp ( url , post_response , campus ) :
""": return : a dictionary of { calenderid , TrumbaCalendar }
None if error , { } if not exists
If the request is successful , process the response data
and load the json data into the return object ."""
|
request_id = "%s %s" % ( campus , url )
calendar_dict = { }
data = _load_json ( request_id , post_response )
if data [ 'd' ] [ 'Calendars' ] is not None and len ( data [ 'd' ] [ 'Calendars' ] ) > 0 :
_load_calendar ( campus , data [ 'd' ] [ 'Calendars' ] , calendar_dict , None )
return calendar_dict
|
def get_projected_plots_dots ( self , dictio , zero_to_efermi = True , ylim = None , vbm_cbm_marker = False ) :
"""Method returning a plot composed of subplots along different elements
and orbitals .
Args :
dictio : The element and orbitals you want a projection on . The
format is { Element : [ Orbitals ] } for instance
{ ' Cu ' : [ ' d ' , ' s ' ] , ' O ' : [ ' p ' ] } will give projections for Cu on
d and s orbitals and on oxygen p .
If you use this class to plot LobsterBandStructureSymmLine ,
the orbitals are named as in the FATBAND filename , e . g .
"2p " or " 2p _ x "
Returns :
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down .
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital ."""
|
band_linewidth = 1.0
fig_number = sum ( [ len ( v ) for v in dictio . values ( ) ] )
proj = self . _get_projections_by_branches ( dictio )
data = self . bs_plot_data ( zero_to_efermi )
plt = pretty_plot ( 12 , 8 )
e_min = - 4
e_max = 4
if self . _bs . is_metal ( ) :
e_min = - 10
e_max = 10
count = 1
for el in dictio :
for o in dictio [ el ] :
plt . subplot ( 100 * math . ceil ( fig_number / 2 ) + 20 + count )
self . _maketicks ( plt )
for b in range ( len ( data [ 'distances' ] ) ) :
for i in range ( self . _nb_bands ) :
plt . plot ( data [ 'distances' ] [ b ] , [ data [ 'energy' ] [ b ] [ str ( Spin . up ) ] [ i ] [ j ] for j in range ( len ( data [ 'distances' ] [ b ] ) ) ] , 'b-' , linewidth = band_linewidth )
if self . _bs . is_spin_polarized :
plt . plot ( data [ 'distances' ] [ b ] , [ data [ 'energy' ] [ b ] [ str ( Spin . down ) ] [ i ] [ j ] for j in range ( len ( data [ 'distances' ] [ b ] ) ) ] , 'r--' , linewidth = band_linewidth )
for j in range ( len ( data [ 'energy' ] [ b ] [ str ( Spin . up ) ] [ i ] ) ) :
plt . plot ( data [ 'distances' ] [ b ] [ j ] , data [ 'energy' ] [ b ] [ str ( Spin . down ) ] [ i ] [ j ] , 'ro' , markersize = proj [ b ] [ str ( Spin . down ) ] [ i ] [ j ] [ str ( el ) ] [ o ] * 15.0 )
for j in range ( len ( data [ 'energy' ] [ b ] [ str ( Spin . up ) ] [ i ] ) ) :
plt . plot ( data [ 'distances' ] [ b ] [ j ] , data [ 'energy' ] [ b ] [ str ( Spin . up ) ] [ i ] [ j ] , 'bo' , markersize = proj [ b ] [ str ( Spin . up ) ] [ i ] [ j ] [ str ( el ) ] [ o ] * 15.0 )
if ylim is None :
if self . _bs . is_metal ( ) :
if zero_to_efermi :
plt . ylim ( e_min , e_max )
else :
plt . ylim ( self . _bs . efermi + e_min , self . _bs . efermi + e_max )
else :
if vbm_cbm_marker :
for cbm in data [ 'cbm' ] :
plt . scatter ( cbm [ 0 ] , cbm [ 1 ] , color = 'r' , marker = 'o' , s = 100 )
for vbm in data [ 'vbm' ] :
plt . scatter ( vbm [ 0 ] , vbm [ 1 ] , color = 'g' , marker = 'o' , s = 100 )
plt . ylim ( data [ 'vbm' ] [ 0 ] [ 1 ] + e_min , data [ 'cbm' ] [ 0 ] [ 1 ] + e_max )
else :
plt . ylim ( ylim )
plt . title ( str ( el ) + " " + str ( o ) )
count += 1
return plt
|
def qualNorm ( data , qualitative ) :
"""Generates starting points using binarized data . If qualitative data is missing for a given gene , all of its entries should be - 1 in the qualitative matrix .
Args :
data ( array ) : 2d array of genes x cells
qualitative ( array ) : 2d array of numerical data - genes x clusters
Returns :
Array of starting positions for state estimation or
clustering , with shape genes x clusters"""
|
genes , cells = data . shape
clusters = qualitative . shape [ 1 ]
output = np . zeros ( ( genes , clusters ) )
missing_indices = [ ]
qual_indices = [ ]
thresholds = qualitative . min ( 1 ) + ( qualitative . max ( 1 ) - qualitative . min ( 1 ) ) / 2.0
for i in range ( genes ) :
if qualitative [ i , : ] . max ( ) == - 1 and qualitative [ i , : ] . min ( ) == - 1 :
missing_indices . append ( i )
continue
qual_indices . append ( i )
threshold = thresholds [ i ]
data_i = data [ i , : ]
if sparse . issparse ( data ) :
data_i = data_i . toarray ( ) . flatten ( )
assignments , means = poisson_cluster ( data_i . reshape ( ( 1 , cells ) ) , 2 )
means = means . flatten ( )
high_i = 1
low_i = 0
if means [ 0 ] > means [ 1 ] :
high_i = 0
low_i = 1
high_mean = np . median ( data_i [ assignments == high_i ] )
low_mean = np . median ( data_i [ assignments == low_i ] )
for k in range ( clusters ) :
if qualitative [ i , k ] > threshold :
output [ i , k ] = high_mean
else :
output [ i , k ] = low_mean
if missing_indices :
assignments , means = poisson_cluster ( data [ qual_indices , : ] , clusters , output [ qual_indices , : ] , max_iters = 1 )
for ind in missing_indices :
for k in range ( clusters ) :
if len ( assignments == k ) == 0 :
output [ ind , k ] = data [ ind , : ] . mean ( )
else :
output [ ind , k ] = data [ ind , assignments == k ] . mean ( )
return output
|
def get_current_project ( self ) :
"""Get name of current project using ` oc project ` command .
Raise ConuException in case of an error .
: return : str , project name"""
|
try :
command = self . _oc_command ( [ "project" , "-q" ] )
output = run_cmd ( command , return_output = True )
except subprocess . CalledProcessError as ex :
raise ConuException ( "Failed to obtain current project name : %s" % ex )
try :
return output . rstrip ( )
# remove ' \ n '
except IndexError :
raise ConuException ( "Failed to obtain project name" )
|
def validate ( self ) :
"""Validate process descriptor ."""
|
required_fields = ( 'slug' , 'name' , 'process_type' , 'version' )
for field in required_fields :
if getattr ( self . metadata , field , None ) is None :
raise ValidationError ( "process '{}' is missing required meta attribute: {}" . format ( self . metadata . slug or '<unknown>' , field ) )
if not PROCESSOR_TYPE_RE . match ( self . metadata . process_type ) :
raise ValidationError ( "process '{}' has invalid type: {}" . format ( self . metadata . slug , self . metadata . process_type ) )
|
def purge_all ( self , rate_limit_delay = 60 ) :
'''Purge all pending URLs , waiting for API rate - limits if necessary !'''
|
for batch , response in self . purge ( ) :
if response . status_code == 507 :
details = response . json ( ) . get ( 'detail' , '<response did not contain "detail">' )
logger . info ( 'Will retry request in %d seconds due to API rate-limit: %s' , rate_limit_delay , details )
time . sleep ( rate_limit_delay )
|
def post ( self , action , data = None , headers = None ) :
"""Makes a GET request"""
|
return self . request ( make_url ( self . endpoint , action ) , method = 'POST' , data = data , headers = headers )
|
def searchadmin ( self , searchstring = None ) :
"""search user page"""
|
self . _check_auth ( must_admin = True )
is_admin = self . _check_admin ( )
if searchstring is not None :
res = self . _search ( searchstring )
else :
res = None
attrs_list = self . attributes . get_search_attributes ( )
return self . temp [ 'searchadmin.tmpl' ] . render ( searchresult = res , attrs_list = attrs_list , is_admin = is_admin , custom_js = self . custom_js , notifications = self . _empty_notification ( ) , )
|
def _ensure_format ( rule , attribute , res_dict ) :
"""Verifies that attribute in res _ dict is properly formatted .
Since , in the . ini - files , lists are specified as ' : ' separated text and
UUID values can be plain integers we need to transform any such values
into proper format . Empty strings are converted to None if validator
specifies that None value is accepted ."""
|
if rule == 'type:uuid' or ( rule == 'type:uuid_or_none' and res_dict [ attribute ] ) :
res_dict [ attribute ] = uuidify ( res_dict [ attribute ] )
elif rule == 'type:uuid_list' :
if not res_dict [ attribute ] :
res_dict [ attribute ] = [ ]
else :
temp_list = res_dict [ attribute ] . split ( ':' )
res_dict [ attribute ] = [ ]
for item in temp_list :
res_dict [ attribute ] . append = uuidify ( item )
elif rule == 'type:string_or_none' and res_dict [ attribute ] == "" :
res_dict [ attribute ] = None
|
def fix_display ( self ) :
"""If this is being run on a headless system the Matplotlib
backend must be changed to one that doesn ' t need a display ."""
|
try :
tkinter . Tk ( )
except ( tkinter . TclError , NameError ) : # If there is no display .
try :
import matplotlib as mpl
except ImportError :
pass
else :
print ( "Setting matplotlib backend to Agg" )
mpl . use ( 'Agg' )
|
def delete ( self , force = False , volumes = False ) :
"""delete underlying image
: param force : bool - force delete , do not care about errors
: param volumes : not used anyhow
: return : None"""
|
try :
self . image . rmi ( )
except ConuException as ime :
if not force :
raise ime
else :
pass
|
def speak ( self , sentence ) :
"""Speak a sentence using Google TTS .
: param sentence : the sentence to speak ."""
|
temp_dir = "/tmp/"
filename = "gtts.mp3"
file_path = "{}/{}" . format ( temp_dir , filename )
if not os . path . exists ( temp_dir ) :
os . makedirs ( temp_dir )
def delete_file ( ) :
try :
os . remove ( file_path )
if not os . listdir ( temp_dir ) :
try :
os . rmdir ( temp_dir )
except OSError :
pass
except :
pass
if self . logger is not None :
self . logger . info ( "Google TTS: {}" . format ( sentence ) )
tts = gTTS ( text = sentence , lang = self . locale )
tts . save ( file_path )
AudioPlayer . play_async ( file_path , delete_file )
|
def row_number ( series , ascending = True ) :
"""Returns row number based on column rank
Equivalent to ` series . rank ( method = ' first ' , ascending = ascending ) ` .
Args :
series : column to rank .
Kwargs :
ascending ( bool ) : whether to rank in ascending order ( default is ` True ` ) .
Usage :
diamonds > > head ( ) > > mutate ( rn = row _ number ( X . x ) )
carat cut color clarity depth table price x y z rn
0 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43 2.0
1 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31 1.0
2 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31 3.0
3 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63 4.0
4 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75 5.0"""
|
series_rank = series . rank ( method = 'first' , ascending = ascending )
return series_rank
|
def acquire_reader ( self ) :
"""Acquire a read lock , several threads can hold this type of lock ."""
|
with self . mutex :
while self . rwlock < 0 or self . rwlock == self . max_reader_concurrency or self . writers_waiting :
self . readers_ok . wait ( )
self . rwlock += 1
|
def setMaximumWidth ( self , width ) :
"""Sets the maximum width value to the inputed width and emits the sizeConstraintChanged signal .
: param width | < int >"""
|
super ( XView , self ) . setMaximumWidth ( width )
if ( not self . signalsBlocked ( ) ) :
self . sizeConstraintChanged . emit ( )
|
def where ( self , field , value = None , operator = None ) :
"""Establece condiciones para la consulta unidas por AND"""
|
if field is None :
return self
conjunction = None
if value is None and isinstance ( field , dict ) :
for f , v in field . items ( ) :
if self . where_criteria . size ( ) > 0 :
conjunction = 'AND'
self . where_criteria . append ( expressions . ConditionExpression ( f , v , operator = operator , conjunction = conjunction ) )
else :
if self . where_criteria . size ( ) > 0 :
conjunction = 'AND'
self . where_criteria . append ( expressions . ConditionExpression ( field , value , operator = operator , conjunction = conjunction ) )
return self
|
def check_valid ( var , key , expected ) :
r"""Check that a variable ' s attribute has the expected value . Warn user otherwise ."""
|
att = getattr ( var , key , None )
if att is None :
e = 'Variable does not have a `{}` attribute.' . format ( key )
warn ( e )
elif att != expected :
e = 'Variable has a non-conforming {}. Got `{}`, expected `{}`' . format ( key , att , expected )
warn ( e )
|
def copy_data_item ( self , data_item : DataItem ) -> DataItem :
"""Copy a data item .
. . versionadded : : 1.0
Scriptable : No"""
|
data_item = copy . deepcopy ( data_item . _data_item )
self . __document_model . append_data_item ( data_item )
return DataItem ( data_item )
|
def scaled_dot_product_attention_simple ( q , k , v , bias , name = None ) :
"""Scaled dot - product attention . One head . One spatial dimension .
Args :
q : a Tensor with shape [ batch , length _ q , depth _ k ]
k : a Tensor with shape [ batch , length _ kv , depth _ k ]
v : a Tensor with shape [ batch , length _ kv , depth _ v ]
bias : optional Tensor broadcastable to [ batch , length _ q , length _ kv ]
name : an optional string
Returns :
A Tensor ."""
|
with tf . variable_scope ( name , default_name = "scaled_dot_product_attention_simple" ) :
scalar = tf . rsqrt ( tf . to_float ( common_layers . shape_list ( q ) [ 2 ] ) )
logits = tf . matmul ( q * scalar , k , transpose_b = True )
if bias is not None :
logits += bias
weights = tf . nn . softmax ( logits , name = "attention_weights" )
if common_layers . should_generate_summaries ( ) :
tf . summary . image ( "attention" , tf . expand_dims ( tf . pow ( weights , 0.2 ) , 3 ) , max_outputs = 1 )
return tf . matmul ( weights , v )
|
def show_queue ( self , name = None , count = 10 , delete = False ) :
"""Show up to ` ` count ` ` messages from the queue named ` ` name ` ` . If ` ` name ` `
is None , show for each queue in our config . If ` ` delete ` ` is True ,
delete the messages after showing them .
: param name : queue name , or None for all queues in config .
: type name : str
: param count : maximum number of messages to get from queue
: type count : int
: param delete : whether or not to delete messages after receipt
: type delete : bool"""
|
logger . debug ( 'Connecting to SQS API' )
conn = client ( 'sqs' )
if name is not None :
queues = [ name ]
else :
queues = self . _all_queue_names
for q_name in queues :
try :
self . _show_one_queue ( conn , q_name , count , delete = delete )
except Exception :
logger . error ( "Error showing queue '%s'" , q_name , exc_info = 1 )
|
async def _sem_crawl ( self , sem , res ) :
"""use semaphore ` ` encapsulate ` ` the crawl _ media \n
with async crawl , should avoid crawl too fast to become DDos attack to the crawled server
should set the ` ` semaphore size ` ` , and take ` ` a little gap ` ` between each crawl behavior .
: param sem : the size of semaphore
: type sem :
: param res :
: type res : dict
: return :
: rtype :"""
|
async with sem :
st_ = await self . crawl_raw ( res )
if st_ :
self . result [ 'ok' ] += 1
else :
self . result [ 'fail' ] += 1
# take a little gap
await asyncio . sleep ( random . randint ( 0 , 1 ) )
|
def function_to_serializable_representation ( fn ) :
"""Converts a Python function into a serializable representation . Does not
currently work for methods or functions with closure data ."""
|
if type ( fn ) not in ( FunctionType , BuiltinFunctionType ) :
raise ValueError ( "Can't serialize %s : %s, must be globally defined function" % ( fn , type ( fn ) , ) )
if hasattr ( fn , "__closure__" ) and fn . __closure__ is not None :
raise ValueError ( "No serializable representation for closure %s" % ( fn , ) )
return { "__module__" : get_module_name ( fn ) , "__name__" : fn . __name__ }
|
def _yarn_init ( self , rm_address , requests_config , tags ) :
"""Return a dictionary of { app _ id : ( app _ name , tracking _ url ) } for running Spark applications ."""
|
running_apps = self . _yarn_get_running_spark_apps ( rm_address , requests_config , tags )
# Report success after gathering all metrics from ResourceManager
self . service_check ( YARN_SERVICE_CHECK , AgentCheck . OK , tags = [ 'url:%s' % rm_address ] + tags , message = 'Connection to ResourceManager "%s" was successful' % rm_address , )
return running_apps
|
def rrmdir ( directory ) :
"""Recursivly delete a directory
: param directory : directory to remove"""
|
for root , dirs , files in os . walk ( directory , topdown = False ) :
for name in files :
os . remove ( os . path . join ( root , name ) )
for name in dirs :
os . rmdir ( os . path . join ( root , name ) )
os . rmdir ( directory )
|
def delete_disk ( kwargs = None , call = None ) :
'''Permanently delete a persistent disk .
CLI Example :
. . code - block : : bash
salt - cloud - f delete _ disk gce disk _ name = pd'''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The delete_disk function must be called with -f or --function.' )
if not kwargs or 'disk_name' not in kwargs :
log . error ( 'A disk_name must be specified when deleting a disk.' )
return False
conn = get_conn ( )
disk = conn . ex_get_volume ( kwargs . get ( 'disk_name' ) )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'delete disk' , 'salt/cloud/disk/deleting' , args = { 'name' : disk . name , 'location' : disk . extra [ 'zone' ] . name , 'size' : disk . size , } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
try :
result = conn . destroy_volume ( disk )
except ResourceInUseError as exc :
log . error ( 'Disk %s is in use and must be detached before deleting.\n' 'The following exception was thrown by libcloud:\n%s' , disk . name , exc , exc_info_on_loglevel = logging . DEBUG )
return False
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'deleted disk' , 'salt/cloud/disk/deleted' , args = { 'name' : disk . name , 'location' : disk . extra [ 'zone' ] . name , 'size' : disk . size , } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
return result
|
def load_isd_hourly_temp_data ( self , start , end , read_from_cache = True , write_to_cache = True , error_on_missing_years = True , ) :
"""Load resampled hourly ISD temperature data from start date to end date ( inclusive ) .
This is the primary convenience method for loading resampled hourly ISD temperature data .
Parameters
start : datetime . datetime
The earliest date from which to load data .
end : datetime . datetime
The latest date until which to load data .
read _ from _ cache : bool
Whether or not to load data from cache .
write _ to _ cache : bool
Whether or not to write newly loaded data to cache ."""
|
return load_isd_hourly_temp_data ( self . usaf_id , start , end , read_from_cache = read_from_cache , write_to_cache = write_to_cache , error_on_missing_years = error_on_missing_years , )
|
def _load_ini_based_io ( path , recursive = False , ini = None , subini = { } , include_core = True , only_coefficients = False ) :
"""DEPRECATED : For convert a previous version to the new json format
Loads a IOSystem or Extension from a ini files
This function can be used to load a IOSystem or Extension specified in a
ini file . DataFrames ( tables ) are loaded from text or binary pickle files .
For the latter , the extension . pkl or . pickle is assumed , in all other case
the tables are assumed to be in . txt format .
Parameters
path : string
path or ini file name for the data to load
recursive : boolean , optional
If True , load also the data in the subfolders and add them as
extensions to the IOSystem ( in that case path must point to the root ) .
Only first order subfolders are considered ( no subfolders in
subfolders ) and if a folder does not contain a ini file it ' s skipped .
Use the subini parameter in case of multiple ini files in a subfolder .
Attribute name of the extension in the IOSystem are based on the
subfolder name . Default is False
ini : string , optional
If there are several ini files in the root folder , take this one for
loading the data If None ( default ) take the ini found in the folder ,
error if several are found
subini : dict , optional
If there are multiple ini in the subfolder , use the ini given in the
dict . Format : ' subfoldername ' : ' ininame ' If a key for a subfolder is
not found or None ( default ) , the ini found in the folder will be taken ,
error if several are found
include _ core : boolean , optional
If False the load method does not include A , L and Z matrix . This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand .
Returns
IOSystem or Extension class depending on systemtype in the ini file
None in case of errors"""
|
# check path and given parameter
ini_file_name = None
path = os . path . abspath ( os . path . normpath ( path ) )
if os . path . splitext ( path ) [ 1 ] == '.ini' :
( path , ini_file_name ) = os . path . split ( path )
if ini :
ini_file_name = ini
if not os . path . exists ( path ) :
raise ReadError ( 'Given path does not exist' )
return None
if not ini_file_name :
_inifound = False
for file in os . listdir ( path ) :
if os . path . splitext ( file ) [ 1 ] == '.ini' :
if _inifound :
raise ReadError ( 'Found multiple ini files in folder - specify one' )
return None
ini_file_name = file
_inifound = True
# read the ini
io_ini = configparser . RawConfigParser ( )
io_ini . optionxform = lambda option : option
io_ini . read ( os . path . join ( path , ini_file_name ) )
systemtype = io_ini . get ( 'systemtype' , 'systemtype' , fallback = None )
name = io_ini . get ( 'meta' , 'name' , fallback = os . path . splitext ( ini_file_name ) [ 0 ] )
if systemtype == 'IOSystem' :
ret_system = IOSystem ( name = name )
elif systemtype == 'Extension' :
ret_system = Extension ( name = name )
else :
raise ReadError ( 'System not defined in ini' )
return None
for key in io_ini [ 'meta' ] :
setattr ( ret_system , key , io_ini . get ( 'meta' , key , fallback = None ) )
for key in io_ini [ 'files' ] :
if '_nr_index_col' in key :
continue
if '_nr_header' in key :
continue
if not include_core :
not_to_load = [ 'A' , 'L' , 'Z' ]
if key in not_to_load :
continue
if only_coefficients :
_io = IOSystem ( )
if key not in _io . __coefficients__ + [ 'unit' ] :
continue
file_name = io_ini . get ( 'files' , key )
nr_index_col = io_ini . get ( 'files' , key + '_nr_index_col' , fallback = None )
nr_header = io_ini . get ( 'files' , key + '_nr_header' , fallback = None )
if ( nr_index_col is None ) or ( nr_header is None ) :
raise ReadError ( 'Index or column specification missing for {}' . format ( str ( file_name ) ) )
return None
_index_col = list ( range ( int ( nr_index_col ) ) )
_header = list ( range ( int ( nr_header ) ) )
if _index_col == [ 0 ] :
_index_col = 0
if _header == [ 0 ] :
_header = 0
file = os . path . join ( path , file_name )
logging . info ( 'Load data from {}' . format ( file ) )
if ( os . path . splitext ( file ) [ 1 ] == '.pkl' or os . path . splitext ( file ) [ 1 ] == '.pickle' ) :
setattr ( ret_system , key , pd . read_pickle ( file ) )
else :
setattr ( ret_system , key , pd . read_table ( file , index_col = _index_col , header = _header ) )
if recursive : # look for subfolder in the given path
subfolder_list = os . walk ( path ) . __next__ ( ) [ 1 ]
# loop all subfolder and append extension based on
# ini file in subfolder
for subfolder in subfolder_list :
subini_file_name = subini . get ( subfolder )
subpath = os . path . abspath ( os . path . join ( path , subfolder ) )
if not subini_file_name :
_inifound = False
for file in os . listdir ( subpath ) :
if os . path . splitext ( file ) [ 1 ] == '.ini' :
if _inifound :
raise ReadError ( 'Found multiple ini files in subfolder ' '{} - specify one' . format ( subpath ) )
return None
subini_file_name = file
_inifound = True
if not _inifound :
continue
# read the ini
subio_ini = configparser . RawConfigParser ( )
subio_ini . optionxform = lambda option : option
subio_ini . read ( os . path . join ( subpath , subini_file_name ) )
systemtype = subio_ini . get ( 'systemtype' , 'systemtype' , fallback = None )
name = subio_ini . get ( 'meta' , 'name' , fallback = os . path . splitext ( subini_file_name ) [ 0 ] )
if systemtype == 'IOSystem' :
raise ReadError ( 'IOSystem found in subfolder {} - ' 'only extensions expected' . format ( subpath ) )
return None
elif systemtype == 'Extension' :
sub_system = Extension ( name = name )
else :
raise ReadError ( 'System not defined in ini' )
return None
for key in subio_ini [ 'meta' ] :
setattr ( sub_system , key , subio_ini . get ( 'meta' , key , fallback = None ) )
for key in subio_ini [ 'files' ] :
if '_nr_index_col' in key :
continue
if '_nr_header' in key :
continue
if only_coefficients :
_ext = Extension ( 'temp' )
if key not in _ext . __coefficients__ + [ 'unit' ] :
continue
file_name = subio_ini . get ( 'files' , key )
nr_index_col = subio_ini . get ( 'files' , key + '_nr_index_col' , fallback = None )
nr_header = subio_ini . get ( 'files' , key + '_nr_header' , fallback = None )
if ( nr_index_col is None ) or ( nr_header is None ) :
raise ReadError ( 'Index or column specification missing ' 'for {}' . format ( str ( file_name ) ) )
return None
_index_col = list ( range ( int ( nr_index_col ) ) )
_header = list ( range ( int ( nr_header ) ) )
if _index_col == [ 0 ] :
_index_col = 0
if _header == [ 0 ] :
_header = 0
file = os . path . join ( subpath , file_name )
logging . info ( 'Load data from {}' . format ( file ) )
if ( os . path . splitext ( file ) [ 1 ] == '.pkl' or os . path . splitext ( file ) [ 1 ] == '.pickle' ) :
setattr ( sub_system , key , pd . read_pickle ( file ) )
else :
setattr ( sub_system , key , pd . read_table ( file , index_col = _index_col , header = _header ) )
# get valid python name from folder
def clean ( varStr ) :
return re . sub ( '\W|^(?=\d)' , '_' , str ( varStr ) )
setattr ( ret_system , clean ( subfolder ) , sub_system )
return ret_system
|
def get_service_task_ids ( service_name , task_predicate = None , inactive = False , completed = False ) :
"""Get a list of task IDs associated with a service
: param service _ name : the service name
: type service _ name : str
: param task _ predicate : filter function which accepts a task object and returns a boolean
: type task _ predicate : function , or None
: param inactive : whether to include inactive services
: type inactive : bool
: param completed : whether to include completed services
: type completed : bool
: return : a list of task ids
: rtye : [ str ] , or None"""
|
tasks = get_service_tasks ( service_name , inactive , completed )
if task_predicate :
return [ t [ 'id' ] for t in tasks if task_predicate ( t ) ]
else :
return [ t [ 'id' ] for t in tasks ]
|
def listFiletypes ( targetfilename , directory ) :
"""Looks for all occurences of a specified filename in a directory and
returns a list of all present file extensions of this filename .
In this cas everything after the first dot is considered to be the file
extension : ` ` " filename . txt " - > " txt " ` ` , ` ` " filename . txt . zip " - > " txt . zip " ` `
: param targetfilename : a filename without any extensions
: param directory : only files present in this directory are compared
to the targetfilename
: returns : a list of file extensions ( str )"""
|
targetextensions = list ( )
for filename in os . listdir ( directory ) :
if not os . path . isfile ( joinpath ( directory , filename ) ) :
continue
splitname = filename . split ( '.' )
basename = splitname [ 0 ]
extension = '.' . join ( splitname [ 1 : ] )
if basename == targetfilename :
targetextensions . append ( extension )
return targetextensions
|
def setDataRdyInt ( self , int_cfg = 0x20 ) :
"""\~english
Set to enabled Data Ready Interrupt
int _ cfg : Register 55 ( 0x37 ) – INT Pin / Bypass Enable Configuration , page 26
\~chinese
启用数据就绪中断
@ param int _ cfg : 寄存器 55 ( 0x37 ) – INT Pin / Bypass Enable Configuration , page 26"""
|
self . _sendCmd ( self . REG_INT_PIN_CFG , int_cfg )
self . _sendCmd ( self . REG_INT_ENABLE , self . VAL_INT_ENABLE_DATA_RDY )
|
def get_quality ( cell ) :
"""Gets the quality of a network / cell .
@ param string cell
A network / cell from iwlist scan .
@ return string
The quality of the network ."""
|
quality = matching_line ( cell , "Quality=" )
if quality is None :
return ""
quality = quality . split ( ) [ 0 ] . split ( "/" )
quality = matching_line ( cell , "Quality=" ) . split ( ) [ 0 ] . split ( "/" )
return str ( int ( round ( float ( quality [ 0 ] ) / float ( quality [ 1 ] ) * 100 ) ) )
|
def _process_scrape_info ( self , scraper : BaseScraper , scrape_result : ScrapeResult , item_session : ItemSession ) :
'''Collect the URLs from the scrape info dict .'''
|
if not scrape_result :
return 0 , 0
num_inline = 0
num_linked = 0
for link_context in scrape_result . link_contexts :
url_info = self . parse_url ( link_context . link )
if not url_info :
continue
url_info = self . rewrite_url ( url_info )
child_url_record = item_session . child_url_record ( url_info . url , inline = link_context . inline )
if not self . _fetch_rule . consult_filters ( item_session . request . url_info , child_url_record ) [ 0 ] :
continue
if link_context . inline :
num_inline += 1
else :
num_linked += 1
item_session . add_child_url ( url_info . url , inline = link_context . inline , link_type = link_context . link_type )
return num_inline , num_linked
|
def get_roles ( self ) :
"""Return all the roles ( IAM or User Groups ) that can be granted to a safe deposit box .
Roles are permission levels that are granted to IAM or User Groups . Associating the id for the write role
would allow that IAM or User Group to write in the safe deposit box ."""
|
roles_resp = get_with_retry ( self . cerberus_url + '/v1/role' , headers = self . HEADERS )
throw_if_bad_response ( roles_resp )
return roles_resp . json ( )
|
def get_parent ( self ) :
"""Get Parent .
Fetch parent product if it exists .
Use ` parent _ asin ` to check if a parent exist before fetching .
: return :
An instance of : class : ` ~ . AmazonProduct ` representing the
parent product ."""
|
if not self . parent :
parent = self . _safe_get_element ( 'ParentASIN' )
if parent :
self . parent = self . api . lookup ( ItemId = parent )
return self . parent
|
def execute ( self ) :
"""Pause the cluster if it is running ."""
|
cluster_name = self . params . cluster
creator = make_creator ( self . params . config , storage_path = self . params . storage )
try :
cluster = creator . load_cluster ( cluster_name )
except ( ClusterNotFound , ConfigurationError ) as e :
log . error ( "Cannot load cluster `%s`: %s" , cluster_name , e )
return os . EX_NOINPUT
if not self . params . yes :
confirm_or_abort ( "Do you want really want to pause cluster `{cluster_name}`?" . format ( cluster_name = cluster_name ) , msg = "Aborting upon user request." )
print ( "Pausing cluster `%s` ..." % cluster_name )
cluster . pause ( )
|
def read_string ( source , offset , length ) :
"""Reads a string from a byte string .
: param bytes source : Source byte string
: param int offset : Point in byte string to start reading
: param int length : Length of string to read
: returns : Read string and offset at point after read data
: rtype : tuple of str and int
: raises SerializationError : if unable to unpack"""
|
end = offset + length
try :
return ( codecs . decode ( source [ offset : end ] , aws_encryption_sdk . internal . defaults . ENCODING ) , end )
except Exception :
raise SerializationError ( "Bad format of serialized context." )
|
def add_to_inventory ( self ) :
"""Adds lb IPs to stack inventory"""
|
if self . lb_attrs :
self . lb_attrs = self . consul . lb_details ( self . lb_attrs [ A . loadbalancer . ID ] )
host = self . lb_attrs [ 'virtualIps' ] [ 0 ] [ 'address' ]
self . stack . add_lb_secgroup ( self . name , [ host ] , self . backend_port )
self . stack . add_host ( host , [ self . name ] , self . lb_attrs )
|
def block ( self , before = '' , # type : typing . Text
after = '' , # type : typing . Text
delim = ( '{' , '}' ) , # type : DelimTuple
dent = None , # type : typing . Optional [ int ]
allman = False # type : bool
) : # type : ( . . . ) - > typing . Iterator [ None ]
"""A context manager that emits configurable lines before and after an
indented block of text .
This is convenient for class and function definitions in some
languages .
Args :
before ( str ) : The string to be output in the first line which is
not indented . .
after ( str ) : The string to be output in the last line which is
not indented .
delim ( str , str ) : The first element is added immediately following
` before ` and a space . The second element is added prior to a
space and then ` after ` .
dent ( int ) : The amount to indent the block . If none , the default
indentation increment is used ( four spaces or one tab ) .
allman ( bool ) : Indicates whether to use ` Allman ` style indentation ,
or the default ` K & R ` style . If there is no ` before ` string this
is ignored . For more details about indent styles see
http : / / en . wikipedia . org / wiki / Indent _ style"""
|
assert len ( delim ) == 2 , 'delim must be a tuple of length 2'
assert ( isinstance ( delim [ 0 ] , ( six . text_type , type ( None ) ) ) and isinstance ( delim [ 1 ] , ( six . text_type , type ( None ) ) ) ) , ( 'delim must be a tuple of two optional strings.' )
if before and not allman :
if delim [ 0 ] is not None :
self . emit ( '{} {}' . format ( before , delim [ 0 ] ) )
else :
self . emit ( before )
else :
if before :
self . emit ( before )
if delim [ 0 ] is not None :
self . emit ( delim [ 0 ] )
with self . indent ( dent ) :
yield
if delim [ 1 ] is not None :
self . emit ( delim [ 1 ] + after )
else :
self . emit ( after )
|
def action ( self , includes : dict , variables : dict ) -> tuple :
"""Call external script .
: param includes : testcase ' s includes
: param variables : variables
: return : script ' s output"""
|
json_args = fill_template_str ( json . dumps ( self . data ) , variables )
p = subprocess . Popen ( [ self . module , json_args ] , stdout = subprocess . PIPE , stderr = subprocess . STDOUT )
if p . wait ( ) == 0 :
out = p . stdout . read ( ) . decode ( )
debug ( out )
return variables , json . loads ( out )
else :
out = p . stdout . read ( ) . decode ( )
warning ( out )
raise Exception ( 'Execution failed.' )
|
async def send_and_receive ( self , message , generate_identifier = True , timeout = 5 ) :
"""Send a message and wait for a response ."""
|
await self . _connect_and_encrypt ( )
# Some messages will respond with the same identifier as used in the
# corresponding request . Others will not and one example is the crypto
# message ( for pairing ) . They will never include an identifer , but it
# it is in turn only possible to have one of those message outstanding
# at one time ( i . e . it ' s not possible to mix up the responses ) . In
# those cases , a " fake " identifier is used that includes the message
# type instead .
if generate_identifier :
identifier = str ( uuid . uuid4 ( ) )
message . identifier = identifier
else :
identifier = 'type_' + str ( message . type )
self . connection . send ( message )
return await self . _receive ( identifier , timeout )
|
def register_precmd_hook ( self , func : Callable [ [ plugin . PrecommandData ] , plugin . PrecommandData ] ) -> None :
"""Register a hook to be called before the command function ."""
|
self . _validate_prepostcmd_hook ( func , plugin . PrecommandData )
self . _precmd_hooks . append ( func )
|
def find ( file_node , dirs = ICON_DIRS , default_name = None , file_ext = '.png' ) :
"""Iterating all icon dirs , try to find a file called like the node ' s
extension / mime subtype / mime type ( in that order ) .
For instance , for an MP3 file ( " audio / mpeg " ) , this would look for :
" mp3 . png " / " audio / mpeg . png " / " audio . png " """
|
names = [ ]
for attr_name in ( 'extension' , 'mimetype' , 'mime_supertype' ) :
attr = getattr ( file_node , attr_name )
if attr :
names . append ( attr )
if default_name :
names . append ( default_name )
icon_path = StaticPathFinder . find ( names , dirs , file_ext )
if icon_path :
return StaticIconFile ( file_node , icon_path )
|
def shader_substring ( body , stack_frame = 1 ) :
"""Call this method from a function that defines a literal shader string as the " body " argument .
Dresses up a shader string in two ways :
1 ) Insert # line number declaration
2 ) un - indents
The line number information can help debug glsl compile errors .
The unindenting allows you to type the shader code at a pleasing indent level
in your python method , while still creating an unindented GLSL string at the end ."""
|
line_count = len ( body . splitlines ( True ) )
line_number = inspect . stack ( ) [ stack_frame ] [ 2 ] + 1 - line_count
return """\
#line %d
%s
""" % ( line_number , textwrap . dedent ( body ) )
|
def get_capabilities ( self ) :
"""Gets capabilities .
This is a simulation of a GetCapabilities WFS request . Returns a python dict
with LatLongBoundingBox and Name keys defined ."""
|
d = { }
ext = self . _layer . GetExtent ( )
# @ TODO if a filter is on this may give different results
llbb = [ round ( float ( v ) , 4 ) for v in ext ]
d [ 'LatLongBoundingBox' ] = box ( llbb [ 0 ] , llbb [ 2 ] , llbb [ 1 ] , llbb [ 3 ] )
d [ 'Name' ] = self . _file . split ( '/' ) [ - 1 ] . split ( '.' ) [ 0 ]
return d
|
def communicate_path ( self , path ) :
"""Communicates ` path ` to this peer if it qualifies .
Checks if ` path ` should be shared / communicated with this peer according
to various conditions : like bgp state , transmit side loop , local and
remote AS path , community attribute , etc ."""
|
LOG . debug ( 'Peer %s asked to communicate path' , self )
if not path :
raise ValueError ( 'Invalid path %s given.' % path )
# We do not send anything to peer who is not in established state .
if not self . in_established ( ) :
LOG . debug ( 'Skipping sending path as peer is not in ' 'ESTABLISHED state %s' , path )
return
# Check if this session is available for given paths afi / safi
path_rf = path . route_family
if not ( self . is_mpbgp_cap_valid ( path_rf ) or path_rf in [ RF_IPv4_UC , RF_IPv6_UC ] ) :
LOG . debug ( 'Skipping sending path as %s route family is not' ' available for this session' , path_rf )
return
# If RTC capability is available and path afi / saif is other than RT
# nlri
if path_rf != RF_RTC_UC and self . is_mpbgp_cap_valid ( RF_RTC_UC ) :
rtfilter = self . _peer_manager . curr_peer_rtfilter ( self )
# If peer does not have any rtfilter or if rtfilter does not have
# any RTs common with path RTs we do not share this path with the
# peer
if rtfilter and not path . has_rts_in ( rtfilter ) :
LOG . debug ( 'Skipping sending path as rffilter %s and path ' 'rts %s have no RT in common' , rtfilter , path . get_rts ( ) )
return
# Transmit side loop detection : We check if leftmost AS matches
# peers AS , if so we do not send UPDATE message to this peer .
as_path = path . get_pattr ( BGP_ATTR_TYPE_AS_PATH )
if as_path and as_path . has_matching_leftmost ( self . remote_as ) :
LOG . debug ( 'Skipping sending path as AS_PATH has peer AS %s' , self . remote_as )
return
# If this peer is a route server client , we forward the path
# regardless of AS PATH loop , whether the connection is iBGP or eBGP ,
# or path ' s communities .
if self . is_route_server_client :
outgoing_route = OutgoingRoute ( path )
self . enque_outgoing_msg ( outgoing_route )
if self . _neigh_conf . multi_exit_disc :
med_attr = path . get_pattr ( BGP_ATTR_TYPE_MULTI_EXIT_DISC )
if not med_attr :
path = bgp_utils . clone_path_and_update_med_for_target_neighbor ( path , self . _neigh_conf . multi_exit_disc )
# For connected / local - prefixes , we send update to all peers .
if path . source is None : # Construct OutgoingRoute specific for this peer and put it in
# its sink .
outgoing_route = OutgoingRoute ( path )
self . enque_outgoing_msg ( outgoing_route )
# If path from a bgp - peer is new best path , we share it with
# all bgp - peers except the source peer and other peers in his AS .
# This is default Junos setting that in Junos can be disabled with
# ' advertise - peer - as ' setting .
elif ( self != path . source or self . remote_as != path . source . remote_as ) : # When BGP speaker receives an UPDATE message from an internal
# peer , the receiving BGP speaker SHALL NOT re - distribute the
# routing information contained in that UPDATE message to other
# internal peers ( unless the speaker acts as a BGP Route
# Reflector ) [ RFC4271 ] .
if ( self . remote_as == self . _core_service . asn and self . remote_as == path . source . remote_as and isinstance ( path . source , Peer ) and not path . source . is_route_reflector_client and not self . is_route_reflector_client ) :
LOG . debug ( 'Skipping sending iBGP route to iBGP peer %s AS %s' , self . ip_address , self . remote_as )
return
# If new best path has community attribute , it should be taken into
# account when sending UPDATE to peers .
comm_attr = path . get_pattr ( BGP_ATTR_TYPE_COMMUNITIES )
if comm_attr :
comm_attr_na = comm_attr . has_comm_attr ( BGPPathAttributeCommunities . NO_ADVERTISE )
# If we have NO _ ADVERTISE attribute present , we do not send
# UPDATE to any peers
if comm_attr_na :
LOG . debug ( 'Path has community attr. NO_ADVERTISE = %s' '. Hence not advertising to peer' , comm_attr_na )
return
comm_attr_ne = comm_attr . has_comm_attr ( BGPPathAttributeCommunities . NO_EXPORT )
comm_attr_nes = comm_attr . has_comm_attr ( BGPPathAttributeCommunities . NO_EXPORT_SUBCONFED )
# If NO _ EXPORT _ SUBCONFED / NO _ EXPORT is one of the attribute , we
# do not advertise to eBGP peers as we do not have any
# confederation feature at this time .
if ( ( comm_attr_nes or comm_attr_ne ) and ( self . remote_as != self . _core_service . asn ) ) :
LOG . debug ( 'Skipping sending UPDATE to peer: %s as per ' 'community attribute configuration' , self )
return
# Construct OutgoingRoute specific for this peer and put it in
# its sink .
outgoing_route = OutgoingRoute ( path )
self . enque_outgoing_msg ( outgoing_route )
LOG . debug ( 'Enqueued outgoing route %s for peer %s' , outgoing_route . path . nlri , self )
|
def has_key ( self , key ) :
"""Does the key exist ?
This method will check to see if it has expired too ."""
|
if key in self . _dict :
try :
self [ key ]
return True
except ValueError :
return False
except KeyError :
return False
return False
|
def get_version ( version = None ) :
"""Returns a PEP 386 - compliant version number from VERSION .
: param version : A tuple that represent a version .
: type version : tuple
: returns : a PEP 386 - compliant version number .
: rtype : str"""
|
if version is None :
version_list = inasafe_version . split ( '.' )
version = tuple ( version_list + [ inasafe_release_status ] + [ '0' ] )
if len ( version ) != 5 :
msg = 'Version must be a tuple of length 5. I got %s' % ( version , )
raise RuntimeError ( msg )
if version [ 3 ] not in ( 'alpha' , 'beta' , 'rc' , 'final' ) :
msg = 'Version tuple not as expected. I got %s' % ( version , )
raise RuntimeError ( msg )
# Now build the two parts of the version number :
# main = X . Y [ . Z ]
# sub = . devN - for pre - alpha releases
# | { a | b | c } N - for alpha , beta and rc releases
parts = 2 if version [ 2 ] == 0 else 3
main = '.' . join ( str ( x ) for x in version [ : parts ] )
sub = ''
# This crashes on windows
if version [ 3 ] == 'alpha' and version [ 4 ] == '0' : # Currently failed on windows and mac
if 'win32' in sys . platform or 'darwin' in sys . platform :
sub = '.dev-master'
else :
try :
git_hash = current_git_hash ( )
if git_hash :
sub = '.dev-%s' % git_hash
except WindowsError :
sub = '.dev-master'
elif version [ 3 ] != 'final' :
mapping = { 'alpha' : 'a' , 'beta' : 'b' , 'rc' : 'c' }
sub = mapping [ version [ 3 ] ] + str ( version [ 4 ] )
return main + sub
|
def run_analysis ( self , argv ) :
"""Run this analysis"""
|
args = self . _parser . parse_args ( argv )
exttype = splitext ( args . infile ) [ - 1 ]
if exttype in [ '.fits' , '.npy' ] :
castro_data = CastroData . create_from_sedfile ( args . infile )
elif exttype in [ '.yaml' ] :
castro_data = CastroData . create_from_yamlfile ( args . infile )
else :
raise ValueError ( "Can not read file type %s for SED" % extype )
ylims = [ 1e-8 , 1e-5 ]
plot = plotCastro ( castro_data , ylims )
if args . outfile :
plot [ 0 ] . savefig ( args . outfile )
|
def align_seqprop_to_structprop ( self , seqprop , structprop , chains = None , outdir = None , engine = 'needle' , structure_already_parsed = False , parse = True , force_rerun = False , ** kwargs ) :
"""Run and store alignments of a SeqProp to chains in the ` ` mapped _ chains ` ` attribute of a StructProp .
Alignments are stored in the sequence _ alignments attribute , with the IDs formatted as
` ` < SeqProp _ ID > _ < StructProp _ ID > - < Chain _ ID > ` ` . Although it is more intuitive to align to individual ChainProps ,
StructProps should be loaded as little as possible to reduce run times so the alignment is done to the entire
structure .
Args :
seqprop ( SeqProp ) : SeqProp object with a loaded sequence
structprop ( StructProp ) : StructProp object with a loaded structure
chains ( str , list ) : Chain ID or IDs to map to . If not specified , ` ` mapped _ chains ` ` attribute is inspected
for chains . If no chains there , all chains will be aligned to .
outdir ( str ) : Directory to output sequence alignment files ( only if running with needle )
engine ( str ) : ` ` biopython ` ` or ` ` needle ` ` - which pairwise alignment program to use .
` ` needle ` ` is the standard EMBOSS tool to run pairwise alignments .
` ` biopython ` ` is Biopython ' s implementation of needle . Results can differ !
structure _ already _ parsed ( bool ) : If the structure has already been parsed and the chain sequences are
stored . Temporary option until Hadoop sequence file is implemented to reduce number of times a
structure is parsed .
parse ( bool ) : Store locations of mutations , insertions , and deletions in the alignment object ( as an annotation )
force _ rerun ( bool ) : If alignments should be rerun
* * kwargs : Other alignment options
Todo :
* Document * * kwargs for alignment options"""
|
if not outdir :
outdir = self . sequence_dir
if not structure_already_parsed : # Parse the structure so chain sequences are stored
structprop . parse_structure ( )
# XTODO : remove and use the " parsed " attribute in a structprop instead
if chains :
chains_to_align_to = ssbio . utils . force_list ( chains )
elif structprop . mapped_chains :
chains_to_align_to = structprop . mapped_chains
else :
log . warning ( '{}-{}: no chains specified in structure to align to, all chains will be aligned to' . format ( seqprop . id , structprop . id ) )
chains_to_align_to = structprop . chains . list_attr ( 'id' )
for chain_id in chains_to_align_to :
full_structure_id = '{}-{}' . format ( structprop . id , chain_id )
aln_id = '{}_{}' . format ( seqprop . id , full_structure_id )
outfile = '{}.needle' . format ( aln_id )
if self . sequence_alignments . has_id ( aln_id ) and not force_rerun :
log . debug ( '{}: alignment already completed, skipping' . format ( aln_id ) )
continue
log . debug ( '{}: running pairwise alignment to structure {}, chain {}' . format ( seqprop . id , structprop . id , chain_id ) )
# Check if the chain ID actually exists
if structprop . chains . has_id ( chain_id ) :
chain_prop = structprop . chains . get_by_id ( chain_id )
chain_seq_record = chain_prop . seq_record
else :
log . error ( '{}: chain not present in structure file!' . format ( full_structure_id ) )
continue
# Check if the chain sequence was parsed
if not chain_seq_record :
log . error ( '{}: chain sequence not available, was structure parsed?' . format ( full_structure_id ) )
continue
# Run the pairwise alignment
try :
aln = ssbio . protein . sequence . utils . alignment . pairwise_sequence_alignment ( a_seq = seqprop , a_seq_id = seqprop . id , b_seq = chain_seq_record , b_seq_id = full_structure_id , engine = engine , outdir = outdir , outfile = outfile , force_rerun = force_rerun )
except ValueError :
log . error ( '{}: alignment failed to run, unable to check structure\'s chain' . format ( full_structure_id ) )
continue
# Add an identifier to the MultipleSeqAlignment object for storage in a DictList
aln . id = aln_id
# Add annotations to keep track of what was aligned
aln . annotations [ 'a_seq' ] = seqprop . id
aln . annotations [ 'b_seq' ] = full_structure_id
aln . annotations [ 'structure_id' ] = structprop . id
aln . annotations [ 'chain_id' ] = chain_id
aln . annotations [ 'ssbio_type' ] = 'structalign'
# Optionally parse for locations of mutations , deletions , and insertions
# Store mapping to chain index as letter annotations in the sequence
# Store locations in the alignment ' s annotations
if parse :
aln_df = ssbio . protein . sequence . utils . alignment . get_alignment_df ( a_aln_seq = str ( list ( aln ) [ 0 ] . seq ) , b_aln_seq = str ( list ( aln ) [ 1 ] . seq ) )
chain_indices = aln_df [ pd . notnull ( aln_df . id_a_pos ) ] . id_b_pos . tolist ( )
seqprop . letter_annotations [ '{}_chain_index' . format ( aln_id ) ] = chain_indices
aln . annotations [ 'mutations' ] = ssbio . protein . sequence . utils . alignment . get_mutations ( aln_df )
aln . annotations [ 'deletions' ] = ssbio . protein . sequence . utils . alignment . get_deletions ( aln_df )
aln . annotations [ 'insertions' ] = ssbio . protein . sequence . utils . alignment . get_insertions ( aln_df )
if force_rerun and self . sequence_alignments . has_id ( aln . id ) :
self . sequence_alignments . remove ( aln . id )
self . sequence_alignments . append ( aln )
|
def setLogSettings ( state ) :
"""Update the current log settings .
This can restore an old saved log settings object returned by
getLogSettings
@ param state : the settings to set"""
|
global _DEBUG
global _log_handlers
global _log_handlers_limited
( _DEBUG , categories , _log_handlers , _log_handlers_limited ) = state
for category in categories :
registerCategory ( category )
|
def _fetch_secret ( pass_path ) :
'''Fetch secret from pass based on pass _ path . If there is
any error , return back the original pass _ path value'''
|
cmd = "pass show {0}" . format ( pass_path . strip ( ) )
log . debug ( 'Fetching secret: %s' , cmd )
proc = Popen ( cmd . split ( ' ' ) , stdout = PIPE , stderr = PIPE )
pass_data , pass_error = proc . communicate ( )
# The version of pass used during development sent output to
# stdout instead of stderr even though its returncode was non zero .
if proc . returncode or not pass_data :
log . warning ( 'Could not fetch secret: %s %s' , pass_data , pass_error )
pass_data = pass_path
return pass_data . strip ( )
|
async def close ( self , exception : BaseException = None ) -> None :
"""Close this context and call any necessary resource teardown callbacks .
If a teardown callback returns an awaitable , the return value is awaited on before calling
any further teardown callbacks .
All callbacks will be processed , even if some of them raise exceptions . If at least one
callback raised an error , this method will raise a : exc : ` ~ . TeardownError ` at the end .
After this method has been called , resources can no longer be requested or published on
this context .
: param exception : the exception , if any , that caused this context to be closed
: raises . TeardownError : if one or more teardown callbacks raise an exception"""
|
self . _check_closed ( )
self . _closed = True
exceptions = [ ]
for callback , pass_exception in reversed ( self . _teardown_callbacks ) :
try :
retval = callback ( exception ) if pass_exception else callback ( )
if isawaitable ( retval ) :
await retval
except Exception as e :
exceptions . append ( e )
del self . _teardown_callbacks
if exceptions :
raise TeardownError ( exceptions )
|
def template_list ( call = None ) :
'''Return available Xen template information .
This returns the details of
each template to show number cores , memory sizes , etc . .
. . code - block : : bash
salt - cloud - f template _ list myxen'''
|
templates = { }
session = _get_session ( )
vms = session . xenapi . VM . get_all ( )
for vm in vms :
record = session . xenapi . VM . get_record ( vm )
if record [ 'is_a_template' ] :
templates [ record [ 'name_label' ] ] = record
return templates
|
def drawRightStatus ( self , scr , vs ) :
'Draw right side of status bar . Return length displayed .'
|
rightx = self . windowWidth - 1
ret = 0
for rstatcolor in self . callHook ( 'rstatus' , vs ) :
if rstatcolor :
try :
rstatus , coloropt = rstatcolor
rstatus = ' ' + rstatus
attr = colors . get_color ( coloropt ) . attr
statuslen = clipdraw ( scr , self . windowHeight - 1 , rightx , rstatus , attr , rtl = True )
rightx -= statuslen
ret += statuslen
except Exception as e :
self . exceptionCaught ( e )
if scr :
curses . doupdate ( )
return ret
|
def write_stems ( audio , filename , rate = 44100 , bitrate = 256000 , codec = None , ffmpeg_params = None ) :
"""Write stems from numpy Tensor
Parameters
audio : array _ like
The tensor of Matrix of stems . The data shape is formatted as
: code : ` stems x channels x samples ` .
filename : str
Output file _ name of the stems file
rate : int
Output samplerate . Defaults to 44100 Hz .
bitrate : int
AAC Bitrate in Bits per second . Defaults to 256 Kbit / s
codec : str
AAC codec used . Defaults to ` None ` which automatically selects
either ` libfdk _ aac ` or ` aac ` in that order , determined by availability .
ffmpeg _ params : list ( str )
List of additional ffmpeg parameters
Notes
Output is written as 16bit / 44.1 kHz"""
|
if int ( stempeg . ffmpeg_version ( ) [ 0 ] ) < 3 :
warnings . warn ( "Writing STEMS with FFMPEG version < 3 is unsupported" , UserWarning )
if codec is None :
avail = check_available_aac_encoders ( )
if avail is not None :
if 'libfdk_aac' in avail :
codec = 'libfdk_aac'
else :
codec = 'aac'
warnings . warn ( "For better quality, please install libfdc_aac" )
else :
codec = 'aac'
warnings . warn ( "For better quality, please install libfdc_aac" )
tmps = [ tmp . NamedTemporaryFile ( delete = False , suffix = '.wav' ) for t in range ( audio . shape [ 0 ] ) ]
if audio . shape [ 1 ] % 1024 != 0 :
warnings . warn ( "Number of samples does not divide by 1024, be aware that " "the AAC encoder add silence to the input signal" )
for k in range ( audio . shape [ 0 ] ) :
sf . write ( tmps [ k ] . name , audio [ k ] , rate )
cmd = ( [ 'ffmpeg' , '-y' , "-f" , 's%dle' % ( 16 ) , "-acodec" , 'pcm_s%dle' % ( 16 ) , '-ar' , "%d" % rate , '-ac' , "%d" % 2 ] + list ( chain . from_iterable ( [ [ '-i' , i . name ] for i in tmps ] ) ) + list ( chain . from_iterable ( [ [ '-map' , str ( k ) ] for k , _ in enumerate ( tmps ) ] ) ) + [ '-vn' , '-acodec' , codec , '-ar' , "%d" % rate , '-strict' , '-2' , '-loglevel' , 'error' ] + ( [ '-ab' , str ( bitrate ) ] if ( bitrate is not None ) else [ ] ) + ( ffmpeg_params if ffmpeg_params else [ ] ) + [ filename ] )
sp . call ( cmd )
|
def _add_new_ide_controller_helper ( ide_controller_label , controller_key , bus_number ) :
'''Helper function for adding new IDE controllers
. . versionadded : : 2016.3.0
Args :
ide _ controller _ label : label of the IDE controller
controller _ key : if not None , the controller key to use ; otherwise it is randomly generated
bus _ number : bus number
Returns : created device spec for an IDE controller'''
|
if controller_key is None :
controller_key = randint ( - 200 , 250 )
ide_spec = vim . vm . device . VirtualDeviceSpec ( )
ide_spec . device = vim . vm . device . VirtualIDEController ( )
ide_spec . operation = vim . vm . device . VirtualDeviceSpec . Operation . add
ide_spec . device . key = controller_key
ide_spec . device . busNumber = bus_number
ide_spec . device . deviceInfo = vim . Description ( )
ide_spec . device . deviceInfo . label = ide_controller_label
ide_spec . device . deviceInfo . summary = ide_controller_label
return ide_spec
|
def sort_dict ( dict_ , part = 'keys' , key = None , reverse = False ) :
"""sorts a dictionary by its values or its keys
Args :
dict _ ( dict _ ) : a dictionary
part ( str ) : specifies to sort by keys or values
key ( Optional [ func ] ) : a function that takes specified part
and returns a sortable value
reverse ( bool ) : ( Defaults to False ) - True for descinding order . False
for ascending order .
Returns :
OrderedDict : sorted dictionary
CommandLine :
python - m utool . util _ dict sort _ dict
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ dict import * # NOQA
> > > import utool as ut
> > > dict _ = { ' a ' : 3 , ' c ' : 2 , ' b ' : 1}
> > > results = [ ]
> > > results . append ( sort _ dict ( dict _ , ' keys ' ) )
> > > results . append ( sort _ dict ( dict _ , ' vals ' ) )
> > > results . append ( sort _ dict ( dict _ , ' vals ' , lambda x : - x ) )
> > > result = ut . repr4 ( results )
> > > print ( result )
{ ' a ' : 3 , ' b ' : 1 , ' c ' : 2 } ,
{ ' b ' : 1 , ' c ' : 2 , ' a ' : 3 } ,
{ ' a ' : 3 , ' c ' : 2 , ' b ' : 1 } ,"""
|
if part == 'keys' :
index = 0
elif part in { 'vals' , 'values' } :
index = 1
else :
raise ValueError ( 'Unknown method part=%r' % ( part , ) )
if key is None :
_key = op . itemgetter ( index )
else :
def _key ( item ) :
return key ( item [ index ] )
sorted_items = sorted ( six . iteritems ( dict_ ) , key = _key , reverse = reverse )
sorted_dict = OrderedDict ( sorted_items )
return sorted_dict
|
def hazeDriver ( ) :
"""Process the command line arguments and run the appropriate haze subcommand .
We want to be able to do git - style handoffs to subcommands where if we
do ` haze aws foo bar ` and the executable haze - aws - foo exists , we ' ll call
it with the argument bar .
We deliberately don ' t do anything with the arguments other than hand
them off to the haze subcommand . Subcommands are responsible for their
own argument parsing ."""
|
try :
( command , args ) = findSubCommand ( sys . argv )
# If we can ' t construct a subcommand from sys . argv , it ' ll still be able
# to find this haze driver script , and re - running ourself isn ' t useful .
if os . path . basename ( command ) == "haze" :
print "Could not find a subcommand for %s" % " " . join ( sys . argv )
sys . exit ( 1 )
except StandardError :
print "Could not find a subcommand for %s" % " " . join ( sys . argv )
sys . exit ( 1 )
check_call ( [ command ] + args )
|
def make_node ( cls , node , * args ) :
'''Creates an array BOUND LIST .'''
|
if node is None :
return cls . make_node ( SymbolBOUNDLIST ( ) , * args )
if node . token != 'BOUNDLIST' :
return cls . make_node ( None , node , * args )
for arg in args :
if arg is None :
continue
node . appendChild ( arg )
return node
|
def call_cmd ( self , cmd , cwd ) :
"""Calls a command with Popen .
Writes stdout , stderr , and the command to separate files .
: param cmd : A string or array of strings .
: param tempdir :
: return : The pid of the command ."""
|
with open ( self . cmdfile , 'w' ) as f :
f . write ( str ( cmd ) )
stdout = open ( self . outfile , 'w' )
stderr = open ( self . errfile , 'w' )
logging . info ( 'Calling: ' + ' ' . join ( cmd ) )
process = subprocess . Popen ( cmd , stdout = stdout , stderr = stderr , close_fds = True , cwd = cwd )
stdout . close ( )
stderr . close ( )
return process . pid
|
def write ( self , presets_path ) :
"""Write this preset to disk in JSON notation .
: param presets _ path : the directory where the preset will be
written ."""
|
if self . builtin :
raise TypeError ( "Cannot write built-in preset" )
# Make dictionaries of PresetDefaults values
odict = self . opts . dict ( )
pdict = { self . name : { DESC : self . desc , NOTE : self . note , OPTS : odict } }
if not os . path . exists ( presets_path ) :
os . makedirs ( presets_path , mode = 0o755 )
with open ( os . path . join ( presets_path , self . name ) , "w" ) as pfile :
json . dump ( pdict , pfile )
|
def moving_average_value ( self , date ) :
"""計算 n 日成交股數均量與持續天數
: param int date : n 日
: rtype : tuple ( 序列 舊 → 新 , 持續天數 )"""
|
val , conti = self . __calculate_moving_average ( date , 1 )
val = ( round ( i / 1000 , 3 ) for i in val )
return list ( val ) , conti
|
def length_of_overlap ( first_start , first_end , second_start , second_end ) :
"""Find the length of the overlapping part of two segments .
Args :
first _ start ( float ) : Start of the first segment .
first _ end ( float ) : End of the first segment .
second _ start ( float ) : Start of the second segment .
second _ end ( float ) : End of the second segment .
Return :
float : The amount of overlap or 0 if they don ' t overlap at all ."""
|
if first_end <= second_start or first_start >= second_end :
return 0.0
if first_start < second_start :
if first_end < second_end :
return abs ( first_end - second_start )
else :
return abs ( second_end - second_start )
if first_start > second_start :
if first_end > second_end :
return abs ( second_end - first_start )
else :
return abs ( first_end - first_start )
|
def leaking ( self , z , module , name , node , context , * data ) :
'''an expression leaking . . .
assignment nodes into the nearest block list of nodes
c + + guys , stay calm'''
|
# input ( node . y )
args = [ node . receiver ] + node . args if node . type == 'standard_method_call' else node . args
z = z ( module , name , args )
if context == 'expression' :
if isinstance ( z , NormalLeakingNode ) :
leaked_nodes , exp = z . as_expression ( )
else :
leaked_nodes , exp = z . as_expression ( )
zz = local ( z . temp_name ( getattr ( z , 'default' , '' ) ) , node . pseudo_type )
leaked_nodes = z . as_assignment ( zz )
exp = local ( zz , node . pseudo_type )
if exp is None or exp . pseudo_type == 'Void' :
raise PseudoTypeError ( "pseudo can't handle values with void type in expression: %s?%s" % ( module , name ) )
self . leaked_nodes += leaked_nodes
return exp
elif context == 'assignment' :
if isinstance ( z , NormalLeakingNode ) :
leaked_nodes , exp = z . as_expression ( )
if exp is None or exp . pseudo_type == 'Void' :
raise PseudoTypeError ( "pseudo can't handle values with void type in expression: %s?%s" % ( module , name ) )
self . leaked_nodes += leaked_nodes
return assignment ( data [ 0 ] , exp )
else :
self . leaked_nodes += z . as_assignment ( data [ 0 ] )
return None
elif context == 'block' :
leaked_nodes , exp = z . as_expression ( )
self . leaked_nodes += leaked_nodes
return exp
|
def find_global ( self , pattern ) :
"""Searches for the pattern in the whole process memory space and returns the first occurrence .
This is exhaustive !"""
|
pos_s = self . reader . search ( pattern )
if len ( pos_s ) == 0 :
return - 1
return pos_s [ 0 ]
|
def open ( safe_file ) :
"""Return a SentinelDataSet object ."""
|
if os . path . isdir ( safe_file ) or os . path . isfile ( safe_file ) :
return SentinelDataSet ( safe_file )
else :
raise IOError ( "file not found: %s" % safe_file )
|
def encode_username_password ( username : Union [ str , bytes ] , password : Union [ str , bytes ] ) -> bytes :
"""Encodes a username / password pair in the format used by HTTP auth .
The return value is a byte string in the form ` ` username : password ` ` .
. . versionadded : : 5.1"""
|
if isinstance ( username , unicode_type ) :
username = unicodedata . normalize ( "NFC" , username )
if isinstance ( password , unicode_type ) :
password = unicodedata . normalize ( "NFC" , password )
return utf8 ( username ) + b":" + utf8 ( password )
|
def explore ( layer = None ) :
"""Function used to discover the Scapy layers and protocols .
It helps to see which packets exists in contrib or layer files .
params :
- layer : If specified , the function will explore the layer . If not ,
the GUI mode will be activated , to browse the available layers
examples :
> > > explore ( ) # Launches the GUI
> > > explore ( " dns " ) # Explore scapy . layers . dns
> > > explore ( " http2 " ) # Explore scapy . contrib . http2
> > > explore ( scapy . layers . bluetooth4LE )
Note : to search a packet by name , use ls ( " name " ) rather than explore ."""
|
if layer is None : # GUI MODE
if not conf . interactive :
raise Scapy_Exception ( "explore() GUI-mode cannot be run in " "interactive mode. Please provide a " "'layer' parameter !" )
# 0 - Imports
try :
import prompt_toolkit
except ImportError :
raise ImportError ( "prompt_toolkit is not installed ! " "You may install IPython, which contains it, via" " `pip install ipython`" )
if not _version_checker ( prompt_toolkit , ( 2 , 0 ) ) :
raise ImportError ( "prompt_toolkit >= 2.0.0 is required !" )
# Only available with prompt _ toolkit > 2.0 , not released on PyPi yet
from prompt_toolkit . shortcuts . dialogs import radiolist_dialog , button_dialog
from prompt_toolkit . formatted_text import HTML
# 1 - Ask for layer or contrib
action = button_dialog ( title = "Scapy v%s" % conf . version , text = HTML ( six . text_type ( '<style bg="white" fg="red">Chose the type of packets' ' you want to explore:</style>' ) ) , buttons = [ ( six . text_type ( "Layers" ) , "layers" ) , ( six . text_type ( "Contribs" ) , "contribs" ) , ( six . text_type ( "Cancel" ) , "cancel" ) ] )
# 2 - Retrieve list of Packets
if action == "layers" : # Get all loaded layers
_radio_values = conf . layers . layers ( )
# Restrict to layers - only ( not contribs ) + packet . py and asn1 * . py
_radio_values = [ x for x in _radio_values if ( "layers" in x [ 0 ] or "packet" in x [ 0 ] or "asn1" in x [ 0 ] ) ]
elif action == "contribs" : # Get all existing contribs
from scapy . main import list_contrib
_radio_values = list_contrib ( ret = True )
_radio_values = [ ( x [ 'name' ] , x [ 'description' ] ) for x in _radio_values ]
# Remove very specific modules
_radio_values = [ x for x in _radio_values if not ( "can" in x [ 0 ] ) ]
else : # Escape / Cancel was pressed
return
# Python 2 compat
if six . PY2 :
_radio_values = [ ( six . text_type ( x ) , six . text_type ( y ) ) for x , y in _radio_values ]
# 3 - Ask for the layer / contrib module to explore
result = radiolist_dialog ( values = _radio_values , title = "Scapy v%s" % conf . version , text = HTML ( six . text_type ( '<style bg="white" fg="red">Please select a layer among' ' the following, to see all packets contained in' ' it:</style>' ) ) )
if result is None :
return
# User pressed " Cancel "
# 4 - ( Contrib only ) : load contrib
if action == "contribs" :
from scapy . main import load_contrib
load_contrib ( result )
result = "scapy.contrib." + result
else : # NON - GUI MODE
# We handle layer as a short layer name , full layer name
# or the module itself
if isinstance ( layer , types . ModuleType ) :
layer = layer . __name__
if isinstance ( layer , str ) :
if layer . startswith ( "scapy.layers." ) :
result = layer
else :
if layer . startswith ( "scapy.contrib." ) :
layer = layer . replace ( "scapy.contrib." , "" )
from scapy . main import load_contrib
load_contrib ( layer )
result_layer , result_contrib = ( ( "scapy.layers.%s" % layer ) , ( "scapy.contrib.%s" % layer ) )
if result_layer in conf . layers . ldict :
result = result_layer
elif result_contrib in conf . layers . ldict :
result = result_contrib
else :
raise Scapy_Exception ( "Unknown scapy module '%s'" % layer )
else :
warning ( "Wrong usage ! Check out help(explore)" )
return
# COMMON PART
# Get the list of all Packets contained in that module
try :
all_layers = conf . layers . ldict [ result ]
except KeyError :
raise Scapy_Exception ( "Unknown scapy module '%s'" % layer )
# Print
print ( conf . color_theme . layer_name ( "Packets contained in %s:" % result ) )
rtlst = [ ( lay . __name__ or "" , lay . _name or "" ) for lay in all_layers ]
print ( pretty_list ( rtlst , [ ( "Class" , "Name" ) ] , borders = True ) )
|
def get_valid_fns ( self ) -> Tuple [ List [ str ] , List [ str ] ] :
"""Fetches the validation set of the corpus ."""
|
return self . prefixes_to_fns ( self . valid_prefixes )
|
def make_public ( self , container , ttl = None ) :
"""Enables CDN access for the specified container , and optionally sets the
TTL for the container ."""
|
return self . _set_cdn_access ( container , public = True , ttl = ttl )
|
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values ."""
|
assert all ( stddev_type in self . DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types )
C = self . COEFFS [ imt ]
mag = self . _convert_magnitude ( rup . mag )
mean = ( C [ 'c1' ] + C [ 'c2' ] * mag + C [ 'c10' ] * ( mag - 6 ) ** 2 + ( C [ 'c6' ] + C [ 'c7' ] * mag ) * np . log ( dists . rjb + np . exp ( C [ 'c4' ] ) ) )
mean = clip_mean ( imt , mean )
stddevs = self . _compute_stddevs ( C , dists . rjb . size , stddev_types )
return mean , stddevs
|
def __get_course_html ( self ) :
"""获得课表页面
: return : 已转码的html"""
|
self . _headers [ 'Referer' ] = 'http://jwc.wyu.edu.cn/student/menu.asp'
r = requests . get ( 'http://jwc.wyu.edu.cn/student/f3.asp' , headers = self . _headers , cookies = self . _cookies )
return r . content . decode ( _ . get_charset ( r . content ) )
|
def _post_request ( self , url , data = None , files = None ) :
'''a helper method for sending post requests to telegram api
https : / / core . telegram . org / bots / api # making - requests
https : / / requests . readthedocs . io / en / master / user / quickstart /
: param url : string with url for post request
: param data : [ optional ] dictionary with data to add to request
: param files : [ optional ] byte data to add to request
: return : dictionary with response details'''
|
import requests
# construct request fields
request_kwargs = { 'url' : url }
if data :
request_kwargs [ 'data' ] = data
if files :
request_kwargs [ 'files' ] = files
# send request
try :
response = requests . post ( ** request_kwargs )
except Exception :
if self . requests_handler :
request_kwargs [ 'method' ] = 'POST'
request_object = requests . Request ( ** request_kwargs )
return self . requests_handler ( request_object )
else :
raise
# handle response
response_details = self . telegram_handler . handle ( response )
return response_details
|
def show_fabric_trunk_info_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
show_fabric_trunk_info = ET . Element ( "show_fabric_trunk_info" )
config = show_fabric_trunk_info
input = ET . SubElement ( show_fabric_trunk_info , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def __slice_key_val ( line ) :
"""Get the key and value items from a line by looking for and lines that have a " : "
: param str line :
: return str str : Key , Value"""
|
position = line . find ( ":" )
# If value is - 1 , that means the item was not found in the string .
if position != - 1 :
key = line [ : position ]
value = line [ position + 1 : ]
value = value . lstrip ( )
return key , value
else :
key = line
value = None
return key , value
|
def check_subset ( arr , size , divisor ) :
"""This function checks if a subset of ' arr ' has a sum that can be divided by ' divisor ' with no remainder .
> > > check _ subset ( [ 3 , 1 , 7 , 5 ] , 4 , 6)
True
> > > check _ subset ( [ 1 , 7 ] , 2 , 5)
False
> > > check _ subset ( [ 1 , 6 ] , 2 , 5)
False"""
|
if ( size > divisor ) :
return True
modulerCheck = [ False for _ in range ( divisor ) ]
for i in range ( size ) :
if modulerCheck [ 0 ] :
return True
tempArr = [ False for _ in range ( divisor ) ]
for j in range ( divisor ) :
if ( modulerCheck [ j ] == True ) :
if ( modulerCheck [ ( j + arr [ i ] ) % divisor ] == False ) :
tempArr [ ( j + arr [ i ] ) % divisor ] = True
for j in range ( divisor ) :
if tempArr [ j ] :
modulerCheck [ j ] = True
modulerCheck [ arr [ i ] % divisor ] = True
return modulerCheck [ 0 ]
|
def check_next_match ( self , match , new_relations , subject_graph , one_match ) :
"""Check if the ( onset for a ) match can be a valid ( part of a ) ring"""
|
if not CustomPattern . check_next_match ( self , match , new_relations , subject_graph , one_match ) :
return False
if self . strong : # can this ever become a strong ring ?
vertex1_start = match . forward [ self . pattern_graph . central_vertex ]
for vertex1 in new_relations . values ( ) :
paths = list ( subject_graph . iter_shortest_paths ( vertex1 , vertex1_start ) )
if self . size % 2 == 0 and len ( match ) == self . size :
if len ( paths ) != 2 : # print " NRingPattern . check _ next _ match : not strong a . 1"
return False
for path in paths :
if len ( path ) != len ( match ) // 2 + 1 : # print " NRingPattern . check _ next _ match : not strong a . 2"
return False
else :
if len ( paths ) != 1 : # print " NRingPattern . check _ next _ match : not strong b . 1"
return False
if len ( paths [ 0 ] ) != ( len ( match ) + 1 ) // 2 : # print " NRingPattern . check _ next _ match : not strong b . 2"
return False
# print " RingPattern . check _ next _ match : no remarks "
return True
|
def preflightInfo ( info ) :
"""Returns a dict containing two items . The value for each
item will be a list of info attribute names .
missingRequired Required data that is missing .
missingRecommended Recommended data that is missing ."""
|
missingRequired = set ( )
missingRecommended = set ( )
for attr in requiredAttributes :
if not hasattr ( info , attr ) or getattr ( info , attr ) is None :
missingRequired . add ( attr )
for attr in recommendedAttributes :
if not hasattr ( info , attr ) or getattr ( info , attr ) is None :
missingRecommended . add ( attr )
return dict ( missingRequired = missingRequired , missingRecommended = missingRecommended )
|
def monitor ( name ) :
'''Get the summary from module monit and try to see if service is
being monitored . If not then monitor the service .'''
|
ret = { 'result' : None , 'name' : name , 'comment' : '' , 'changes' : { } }
result = __salt__ [ 'monit.summary' ] ( name )
try :
for key , value in result . items ( ) :
if 'Running' in value [ name ] :
ret [ 'comment' ] = ( '{0} is being being monitored.' ) . format ( name )
ret [ 'result' ] = True
else :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Service {0} is set to be monitored.' . format ( name )
ret [ 'result' ] = None
return ret
__salt__ [ 'monit.monitor' ] ( name )
ret [ 'comment' ] = ( '{0} started to be monitored.' ) . format ( name )
ret [ 'changes' ] [ name ] = 'Running'
ret [ 'result' ] = True
break
except KeyError :
ret [ 'comment' ] = ( '{0} not found in configuration.' ) . format ( name )
ret [ 'result' ] = False
return ret
|
def get_db ( db_name = None ) :
"""GetDB - simple function to wrap getting a database
connection from the connection pool ."""
|
import pymongo
return pymongo . Connection ( host = DB_HOST , port = DB_PORT ) [ db_name ]
|
def insert_completions ( self , e ) :
u"""Insert all completions of the text before point that would have
been generated by possible - completions ."""
|
completions = self . _get_completions ( )
b = self . begidx
e = self . endidx
for comp in completions :
rep = [ c for c in comp ]
rep . append ( ' ' )
self . l_buffer [ b : e ] = rep
b += len ( rep )
e = b
self . line_cursor = b
self . finalize ( )
|
def reset_index ( self , dims_or_levels , drop = False , inplace = None ) :
"""Reset the specified index ( es ) or multi - index level ( s ) .
Parameters
dims _ or _ levels : str or list
Name ( s ) of the dimension ( s ) and / or multi - index level ( s ) that will
be reset .
drop : bool , optional
If True , remove the specified indexes and / or multi - index levels
instead of extracting them as new coordinates ( default : False ) .
inplace : bool , optional
If True , modify the dataarray in - place . Otherwise , return a new
DataArray object .
Returns
obj : DataArray
Another dataarray , with this dataarray ' s data but replaced
coordinates .
See Also
DataArray . set _ index"""
|
inplace = _check_inplace ( inplace )
coords , _ = split_indexes ( dims_or_levels , self . _coords , set ( ) , self . _level_coords , drop = drop )
if inplace :
self . _coords = coords
else :
return self . _replace ( coords = coords )
|
def get_output ( self ) :
'''Get file content , selecting only lines we are interested in'''
|
if not os . path . isfile ( self . real_path ) :
logger . debug ( 'File %s does not exist' , self . real_path )
return
cmd = [ ]
cmd . append ( 'sed' )
cmd . append ( '-rf' )
cmd . append ( constants . default_sed_file )
cmd . append ( self . real_path )
sedcmd = Popen ( cmd , stdout = PIPE )
if self . exclude is not None :
exclude_file = NamedTemporaryFile ( )
exclude_file . write ( "\n" . join ( self . exclude ) . encode ( 'utf-8' ) )
exclude_file . flush ( )
cmd = "grep -v -F -f %s" % exclude_file . name
args = shlex . split ( cmd )
proc = Popen ( args , stdin = sedcmd . stdout , stdout = PIPE )
sedcmd . stdout . close ( )
stdin = proc . stdout
if self . pattern is None :
output = proc . communicate ( ) [ 0 ]
else :
sedcmd = proc
if self . pattern is not None :
pattern_file = NamedTemporaryFile ( )
pattern_file . write ( "\n" . join ( self . pattern ) . encode ( 'utf-8' ) )
pattern_file . flush ( )
cmd = "grep -F -f %s" % pattern_file . name
args = shlex . split ( cmd )
proc1 = Popen ( args , stdin = sedcmd . stdout , stdout = PIPE )
sedcmd . stdout . close ( )
if self . exclude is not None :
stdin . close ( )
output = proc1 . communicate ( ) [ 0 ]
if self . pattern is None and self . exclude is None :
output = sedcmd . communicate ( ) [ 0 ]
return output . decode ( 'utf-8' , 'ignore' ) . strip ( )
|
def list ( self , marker = None , limit = None , prefix = None , delimiter = None , end_marker = None , full_listing = False , return_raw = False ) :
"""List the objects in this container , using the parameters to control the
number and content of objects . Note that this is limited by the
absolute request limits of Swift ( currently 10,000 objects ) . If you
need to list all objects in the container , use the ` list _ all ( ) ` method
instead ."""
|
if full_listing :
return self . list_all ( prefix = prefix )
else :
return self . object_manager . list ( marker = marker , limit = limit , prefix = prefix , delimiter = delimiter , end_marker = end_marker , return_raw = return_raw )
|
def brightness ( frames ) :
"""parse a brightness message"""
|
reader = MessageReader ( frames )
res = reader . string ( "command" ) . uint32 ( "brightness" ) . assert_end ( ) . get ( )
if res . command != "brightness" :
raise MessageParserError ( "Command is not 'brightness'" )
return ( res . brightness / 1000 , )
|
def make_data ( self ) :
"""Return data of the field in a format that can be converted to JSON
Returns :
data ( dict ) : A dictionary of dictionaries , such that for a given x , y pair ,
data [ x ] [ y ] = { " dx " : dx , " dy " : dy } . Note that this is transposed from the
matrix representation in DX and DY"""
|
X , Y , DX , DY = self . _calc_partials ( )
data = { }
import pdb
for x in self . xrange :
data [ x ] = { }
for y in self . yrange :
data [ x ] [ y ] = { "dx" : DX [ y , x ] , "dy" : DY [ y , x ] }
return data
|
def sync_model ( self , comment = '' , compact_central = False , release_borrowed = True , release_workset = True , save_local = False ) :
"""Append a sync model entry to the journal .
This instructs Revit to sync the currently open workshared model .
Args :
comment ( str ) : comment to be provided for the sync step
compact _ central ( bool ) : if True compacts the central file
release _ borrowed ( bool ) : if True releases the borrowed elements
release _ workset ( bool ) : if True releases the borrowed worksets
save _ local ( bool ) : if True saves the local file as well"""
|
self . _add_entry ( templates . FILE_SYNC_START )
if compact_central :
self . _add_entry ( templates . FILE_SYNC_COMPACT )
if release_borrowed :
self . _add_entry ( templates . FILE_SYNC_RELEASE_BORROWED )
if release_workset :
self . _add_entry ( templates . FILE_SYNC_RELEASE_USERWORKSETS )
if save_local :
self . _add_entry ( templates . FILE_SYNC_RELEASE_SAVELOCAL )
self . _add_entry ( templates . FILE_SYNC_COMMENT_OK . format ( sync_comment = comment ) )
|
def check_attr ( node , n ) :
"""Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code ."""
|
if len ( node . children ) > n :
return node . children [ n ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.