signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def logging_syslog_server_port ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
logging = ET . SubElement ( config , "logging" , xmlns = "urn:brocade.com:mgmt:brocade-ras" )
syslog_server = ET . SubElement ( logging , "syslog-server" )
syslogip_key = ET . SubElement ( syslog_server , "syslogip" )
syslogip_key . text = kwargs . pop ( 'syslogip' )
use_vrf_key = ET . SubElement ( syslog_server , "use-vrf" )
use_vrf_key . text = kwargs . pop ( 'use_vrf' )
port = ET . SubElement ( syslog_server , "port" )
port . text = kwargs . pop ( 'port' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def autoscroll ( self , autoscroll ) :
"""Autoscroll will ' right justify ' text from the cursor if set True ,
otherwise it will ' left justify ' the text ."""
|
if autoscroll :
self . displaymode |= LCD_ENTRYSHIFTINCREMENT
else :
self . displaymode &= ~ LCD_ENTRYSHIFTINCREMENT
self . write8 ( LCD_ENTRYMODESET | self . displaymode )
|
def _reads_per_position ( bam_in , loci_file , out_dir ) :
"""Create input for compute entropy"""
|
data = Counter ( )
a = pybedtools . BedTool ( bam_in )
b = pybedtools . BedTool ( loci_file )
c = a . intersect ( b , s = True , bed = True , wo = True )
for line in c :
end = int ( line [ 1 ] ) + 1 + int ( line [ 2 ] ) if line [ 5 ] == "+" else int ( line [ 1 ] ) + 1
start = int ( line [ 1 ] ) + 1 if line [ 5 ] == "+" else int ( line [ 1 ] ) + 1 + int ( line [ 2 ] )
side5 = "%s\t5p\t%s" % ( line [ 15 ] , start )
side3 = "%s\t3p\t%s" % ( line [ 15 ] , end )
data [ side5 ] += 1
data [ side3 ] += 1
counts_reads = op . join ( out_dir , 'locus_readpos.counts' )
with open ( counts_reads , 'w' ) as out_handle :
for k in data :
print ( k , file = out_handle , end = "" )
return counts_reads
|
def validate ( ref_intervals , ref_pitches , ref_velocities , est_intervals , est_pitches , est_velocities ) :
"""Checks that the input annotations have valid time intervals , pitches ,
and velocities , and throws helpful errors if not .
Parameters
ref _ intervals : np . ndarray , shape = ( n , 2)
Array of reference notes time intervals ( onset and offset times )
ref _ pitches : np . ndarray , shape = ( n , )
Array of reference pitch values in Hertz
ref _ velocities : np . ndarray , shape = ( n , )
Array of MIDI velocities ( i . e . between 0 and 127 ) of reference notes
est _ intervals : np . ndarray , shape = ( m , 2)
Array of estimated notes time intervals ( onset and offset times )
est _ pitches : np . ndarray , shape = ( m , )
Array of estimated pitch values in Hertz
est _ velocities : np . ndarray , shape = ( m , )
Array of MIDI velocities ( i . e . between 0 and 127 ) of estimated notes"""
|
transcription . validate ( ref_intervals , ref_pitches , est_intervals , est_pitches )
# Check that velocities have the same length as intervals / pitches
if not ref_velocities . shape [ 0 ] == ref_pitches . shape [ 0 ] :
raise ValueError ( 'Reference velocities must have the same length as ' 'pitches and intervals.' )
if not est_velocities . shape [ 0 ] == est_pitches . shape [ 0 ] :
raise ValueError ( 'Estimated velocities must have the same length as ' 'pitches and intervals.' )
# Check that the velocities are positive
if ref_velocities . size > 0 and np . min ( ref_velocities ) < 0 :
raise ValueError ( 'Reference velocities must be positive.' )
if est_velocities . size > 0 and np . min ( est_velocities ) < 0 :
raise ValueError ( 'Estimated velocities must be positive.' )
|
def emoticons ( string ) :
'''emot . emoticons is use to detect emoticons from text
> > > text = " I love python 👨 : - ) "
> > > emot . emoticons ( text )
> > > { ' value ' : [ ' : - ) ' ] , ' location ' : [ [ 16 , 19 ] ] , ' mean ' : [ ' Happy face smiley ' ] , ' flag ' : True }'''
|
__entities = [ ]
flag = True
try :
pattern = u'(' + u'|' . join ( k for k in emo_unicode . EMOTICONS ) + u')'
__entities = [ ]
__value = [ ]
__location = [ ]
matches = re . finditer ( r"%s" % pattern , str ( string ) )
for et in matches :
__value . append ( et . group ( ) . strip ( ) )
__location . append ( [ et . start ( ) , et . end ( ) ] )
__mean = [ ]
for each in __value :
__mean . append ( emo_unicode . EMOTICONS_EMO [ each ] )
if len ( __value ) < 1 :
flag = False
__entities = { 'value' : __value , 'location' : __location , 'mean' : __mean , 'flag' : flag }
except Exception as e :
__entities = [ { 'flag' : False } ]
# print ( " No emoiticons found " )
return __entities
return __entities
|
def learn_q ( self , predicted_q_arr , real_q_arr ) :
'''Infernce Q - Value .
Args :
predicted _ q _ arr : ` np . ndarray ` of predicted Q - Values .
real _ q _ arr : ` np . ndarray ` of real Q - Values .'''
|
self . __predicted_q_arr_list . append ( predicted_q_arr )
while len ( self . __predicted_q_arr_list ) > self . __seq_len :
self . __predicted_q_arr_list = self . __predicted_q_arr_list [ 1 : ]
while len ( self . __predicted_q_arr_list ) < self . __seq_len :
self . __predicted_q_arr_list . append ( self . __predicted_q_arr_list [ - 1 ] )
predicted_q_arr = np . array ( self . __predicted_q_arr_list )
predicted_q_arr = predicted_q_arr . transpose ( ( 1 , 0 , 2 ) )
self . __real_q_arr_list . append ( real_q_arr )
while len ( self . __real_q_arr_list ) > self . __seq_len :
self . __real_q_arr_list = self . __real_q_arr_list [ 1 : ]
while len ( self . __real_q_arr_list ) < self . __seq_len :
self . __real_q_arr_list . append ( self . __real_q_arr_list [ - 1 ] )
real_q_arr = np . array ( self . __real_q_arr_list )
real_q_arr = real_q_arr . transpose ( ( 1 , 0 , 2 ) )
loss = self . __computable_loss . compute_loss ( predicted_q_arr , real_q_arr )
delta_arr = self . __computable_loss . compute_delta ( predicted_q_arr , real_q_arr )
delta_arr , lstm_output_grads_list = self . __lstm_model . output_back_propagate ( predicted_q_arr , delta_arr )
delta_arr , _ , lstm_hidden_grads_list = self . __lstm_model . hidden_back_propagate ( delta_arr [ : , - 1 ] )
lstm_grads_list = lstm_output_grads_list
lstm_grads_list . extend ( lstm_hidden_grads_list )
self . __lstm_model . optimize ( lstm_grads_list , self . __learning_rate , 1 )
self . __loss_list . append ( loss )
|
def export ( self , file_path = None , export_format = None ) :
"""Write the users to a file ."""
|
with io . open ( file_path , mode = 'w' , encoding = "utf-8" ) as export_file :
if export_format == 'yaml' :
import yaml
yaml . safe_dump ( self . to_dict ( ) , export_file , default_flow_style = False )
elif export_format == 'json' :
export_file . write ( text_type ( json . dumps ( self . to_dict ( ) , ensure_ascii = False ) ) )
return True
|
def list ( self , bank ) :
'''Lists entries stored in the specified bank .
: param bank :
The name of the location inside the cache which will hold the key
and its associated data .
: return :
An iterable object containing all bank entries . Returns an empty
iterator if the bank doesn ' t exists .
: raises SaltCacheError :
Raises an exception if cache driver detected an error accessing data
in the cache backend ( auth , permissions , etc ) .'''
|
fun = '{0}.list' . format ( self . driver )
return self . modules [ fun ] ( bank , ** self . _kwargs )
|
def createLearningRateScheduler ( self , optimizer ) :
"""Creates the learning rate scheduler and attach the optimizer"""
|
if self . lr_scheduler_class is None :
return None
return self . lr_scheduler_class ( optimizer , ** self . lr_scheduler_params )
|
def prepare_query ( self , symbol , start_date , end_date ) :
"""Method returns prepared request query for Yahoo YQL API ."""
|
query = 'select * from yahoo.finance.historicaldata where symbol = "%s" and startDate = "%s" and endDate = "%s"' % ( symbol , start_date , end_date )
return query
|
def get_song ( self , netease = False ) :
"""获取歌曲 , 对外统一接口"""
|
song = self . _playlist . get ( True )
self . hash_sid [ song [ 'sid' ] ] = True
# 去重
self . get_netease_song ( song , netease )
# 判断是否网易320k
self . _playingsong = song
return song
|
def stop_proxy ( self ) :
"""Stop the mitmproxy"""
|
self . runner . info_log ( "Stopping proxy..." )
if hasattr ( self , 'proxy_pid' ) :
try :
kill_by_pid ( self . proxy_pid )
except psutil . NoSuchProcess :
pass
|
def _extract_and_handle_mpbgp_withdraws ( self , mp_unreach_attr ) :
"""Extracts withdraws advertised in the given update message ' s
* MpUnReachNlri * attribute .
Assumes MPBGP capability is enabled .
Parameters :
- update _ msg : ( Update ) is assumed to be checked for all bgp
message errors .
Extracted withdraws are added to appropriate * Destination * for further
processing ."""
|
msg_rf = mp_unreach_attr . route_family
# Check if this route family is among supported route families .
if msg_rf not in SUPPORTED_GLOBAL_RF :
LOG . info ( 'Received route family %s is not supported. ' 'Ignoring withdraw routes on this UPDATE message.' , msg_rf )
return
w_nlris = mp_unreach_attr . withdrawn_routes
if not w_nlris : # If this is EOR of some kind , handle it
self . _handle_eor ( msg_rf )
for w_nlri in w_nlris :
w_path = bgp_utils . create_path ( self , w_nlri , is_withdraw = True )
block , blocked_cause = self . _apply_in_filter ( w_path )
received_route = ReceivedRoute ( w_path , self , block )
nlri_str = w_nlri . formatted_nlri_str
if nlri_str in self . _adj_rib_in :
del self . _adj_rib_in [ nlri_str ]
self . _signal_bus . adj_rib_in_changed ( self , received_route )
if not block : # Update appropriate table with withdraws .
tm = self . _core_service . table_manager
tm . learn_path ( w_path )
else :
LOG . debug ( 'prefix : %s is blocked by in-bound filter: %s' , w_nlri , blocked_cause )
|
def fragmentate ( self , give_only_index = False , use_lookup = None ) :
"""Get the indices of non bonded parts in the molecule .
Args :
give _ only _ index ( bool ) : If ` ` True ` ` a set of indices is returned .
Otherwise a new Cartesian instance .
use _ lookup ( bool ) : Use a lookup variable for
: meth : ` ~ chemcoord . Cartesian . get _ bonds ` .
use _ lookup ( bool ) : Use a lookup variable for
: meth : ` ~ chemcoord . Cartesian . get _ bonds ` . The default is
specified in ` ` settings [ ' defaults ' ] [ ' use _ lookup ' ] ` `
Returns :
list : A list of sets of indices or new Cartesian instances ."""
|
if use_lookup is None :
use_lookup = settings [ 'defaults' ] [ 'use_lookup' ]
fragments = [ ]
pending = set ( self . index )
self . get_bonds ( use_lookup = use_lookup )
while pending :
index = self . get_coordination_sphere ( pending . pop ( ) , use_lookup = True , n_sphere = float ( 'inf' ) , only_surface = False , give_only_index = True )
pending = pending - index
if give_only_index :
fragments . append ( index )
else :
fragment = self . loc [ index ]
fragment . _metadata [ 'bond_dict' ] = fragment . restrict_bond_dict ( self . _metadata [ 'bond_dict' ] )
try :
fragment . _metadata [ 'val_bond_dict' ] = ( fragment . restrict_bond_dict ( self . _metadata [ 'val_bond_dict' ] ) )
except KeyError :
pass
fragments . append ( fragment )
return fragments
|
def parse_n_jobs ( s ) :
"""This function parses a " math " - like string as a function of CPU count .
It is useful for specifying the number of jobs .
For example , on an 8 - core machine : :
assert parse _ n _ jobs ( ' 0.5 * n ' ) = = 4
assert parse _ n _ jobs ( ' 2n ' ) = = 16
assert parse _ n _ jobs ( ' n ' ) = = 8
assert parse _ n _ jobs ( ' 4 ' ) = = 4
: param str s : string to parse for number of CPUs"""
|
n_jobs = None
N = cpu_count ( )
if isinstance ( s , int ) :
n_jobs = s
elif isinstance ( s , float ) :
n_jobs = int ( s )
elif isinstance ( s , str ) :
m = re . match ( r'(\d*(?:\.\d*)?)?(\s*\*?\s*n)?$' , s . strip ( ) )
if m is None :
raise ValueError ( 'Unable to parse n_jobs="{}"' . format ( s ) )
k = float ( m . group ( 1 ) ) if m . group ( 1 ) else 1
if m . group ( 2 ) :
n_jobs = k * N
elif k < 1 :
n_jobs = k * N
else :
n_jobs = int ( k )
else :
raise TypeError ( 'n_jobs argument must be of type str, int, or float.' )
n_jobs = int ( n_jobs )
if n_jobs <= 0 :
warnings . warn ( 'n_jobs={} is invalid. Setting n_jobs=1.' . format ( n_jobs ) )
n_jobs = 1
# end if
return int ( n_jobs )
|
def _run_program ( self , bin , fastafile , params = None ) :
"""Run HMS and predict motifs from a FASTA file .
Parameters
bin : str
Command used to run the tool .
fastafile : str
Name of the FASTA input file .
params : dict , optional
Optional parameters . For some of the tools required parameters
are passed using this dictionary .
Returns
motifs : list of Motif instances
The predicted motifs .
stdout : str
Standard out of the tool .
stderr : str
Standard error of the tool ."""
|
params = self . _parse_params ( params )
default_params = { "width" : 10 }
if params is not None :
default_params . update ( params )
fgfile , summitfile , outfile = self . _prepare_files ( fastafile )
current_path = os . getcwd ( )
os . chdir ( self . tmpdir )
cmd = "{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2" . format ( bin , fgfile , params [ 'width' ] , summitfile )
p = Popen ( cmd , shell = True , stdout = PIPE , stderr = PIPE )
stdout , stderr = p . communicate ( )
os . chdir ( current_path )
motifs = [ ]
if os . path . exists ( outfile ) :
with open ( outfile ) as f :
motifs = self . parse ( f )
for i , m in enumerate ( motifs ) :
m . id = "HMS_w{}_{}" . format ( params [ 'width' ] , i + 1 )
return motifs , stdout , stderr
|
def get_chunks ( sequence , chunk_size ) :
"""Split sequence into chunks .
: param list sequence :
: param int chunk _ size :"""
|
return [ sequence [ idx : idx + chunk_size ] for idx in range ( 0 , len ( sequence ) , chunk_size ) ]
|
def _apply_auth ( self , view_func ) :
"""Apply decorator to authenticate the user who is making the request .
: param view _ func : The flask view func ."""
|
@ functools . wraps ( view_func )
def decorated ( * args , ** kwargs ) :
if not self . auth :
return view_func ( * args , ** kwargs )
auth_data = self . auth . get_authorization ( )
if auth_data is None :
return self . _handle_authentication_error ( )
if not self . _authentication_callback or not self . _authentication_callback ( auth_data ) :
return self . _handle_authentication_error ( )
return view_func ( * args , ** kwargs )
return decorated
|
def build ( tasks , worker_scheduler_factory = None , detailed_summary = False , ** env_params ) :
"""Run internally , bypassing the cmdline parsing .
Useful if you have some luigi code that you want to run internally .
Example :
. . code - block : : python
luigi . build ( [ MyTask1 ( ) , MyTask2 ( ) ] , local _ scheduler = True )
One notable difference is that ` build ` defaults to not using
the identical process lock . Otherwise , ` build ` would only be
callable once from each process .
: param tasks :
: param worker _ scheduler _ factory :
: param env _ params :
: return : True if there were no scheduling errors , even if tasks may fail ."""
|
if "no_lock" not in env_params :
env_params [ "no_lock" ] = True
luigi_run_result = _schedule_and_run ( tasks , worker_scheduler_factory , override_defaults = env_params )
return luigi_run_result if detailed_summary else luigi_run_result . scheduling_succeeded
|
def extract_index ( index_data , global_index = False ) :
'''Instantiates and returns an AllIndex object given a valid index
configuration
CLI Example :
salt myminion boto _ dynamodb . extract _ index index'''
|
parsed_data = { }
keys = [ ]
for key , value in six . iteritems ( index_data ) :
for item in value :
for field , data in six . iteritems ( item ) :
if field == 'hash_key' :
parsed_data [ 'hash_key' ] = data
elif field == 'hash_key_data_type' :
parsed_data [ 'hash_key_data_type' ] = data
elif field == 'range_key' :
parsed_data [ 'range_key' ] = data
elif field == 'range_key_data_type' :
parsed_data [ 'range_key_data_type' ] = data
elif field == 'name' :
parsed_data [ 'name' ] = data
elif field == 'read_capacity_units' :
parsed_data [ 'read_capacity_units' ] = data
elif field == 'write_capacity_units' :
parsed_data [ 'write_capacity_units' ] = data
elif field == 'includes' :
parsed_data [ 'includes' ] = data
elif field == 'keys_only' :
parsed_data [ 'keys_only' ] = True
if parsed_data [ 'hash_key' ] :
keys . append ( HashKey ( parsed_data [ 'hash_key' ] , data_type = parsed_data [ 'hash_key_data_type' ] ) )
if parsed_data . get ( 'range_key' ) :
keys . append ( RangeKey ( parsed_data [ 'range_key' ] , data_type = parsed_data [ 'range_key_data_type' ] ) )
if ( global_index and parsed_data [ 'read_capacity_units' ] and parsed_data [ 'write_capacity_units' ] ) :
parsed_data [ 'throughput' ] = { 'read' : parsed_data [ 'read_capacity_units' ] , 'write' : parsed_data [ 'write_capacity_units' ] }
if parsed_data [ 'name' ] and keys :
if global_index :
if parsed_data . get ( 'keys_only' ) and parsed_data . get ( 'includes' ) :
raise SaltInvocationError ( 'Only one type of GSI projection can be used.' )
if parsed_data . get ( 'includes' ) :
return GlobalIncludeIndex ( parsed_data [ 'name' ] , parts = keys , throughput = parsed_data [ 'throughput' ] , includes = parsed_data [ 'includes' ] )
elif parsed_data . get ( 'keys_only' ) :
return GlobalKeysOnlyIndex ( parsed_data [ 'name' ] , parts = keys , throughput = parsed_data [ 'throughput' ] , )
else :
return GlobalAllIndex ( parsed_data [ 'name' ] , parts = keys , throughput = parsed_data [ 'throughput' ] )
else :
return AllIndex ( parsed_data [ 'name' ] , parts = keys )
|
def request ( method = 'GET' ) :
"""send restful post http request decorator
Provide a brief way to manipulate restful api ,
: param method : : class : ` str ` ,
: return : : class : ` func `"""
|
def decorator ( func ) :
@ functools . wraps ( func )
def action ( self , * args , ** kwargs ) :
f = furl ( self . server )
path , body = func ( self , * args , ** kwargs )
# deal with query string
query = dict ( )
if isinstance ( path , tuple ) :
path , query = path
f . path . add ( path )
f . query . set ( query )
status_code , result = send_rest ( f . url , method = method . upper ( ) , body = body , session = self . session , headers = self . headers )
if status_code != httplib . OK :
self . logger . error ( "{impl} {url} headers: {headers}, code: {code}" . format ( impl = method , url = f . url , headers = self . headers , code = status_code ) )
return status_code , result
return action
return decorator
|
def _check_configs ( self ) :
"""Reloads the configuration files ."""
|
configs = set ( self . _find_configs ( ) )
known_configs = set ( self . configs . keys ( ) )
new_configs = configs - known_configs
for cfg in ( known_configs - configs ) :
self . log . debug ( "Compass configuration has been removed: " + cfg )
del self . configs [ cfg ]
for cfg in new_configs :
self . log . debug ( "Found new compass configuration: " + cfg )
self . configs [ cfg ] = CompassConfig ( cfg )
|
def user_tickets_assigned ( self , user_id , external_id = None , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / tickets # allowed - for"
|
api_path = "/api/v2/users/{user_id}/tickets/assigned.json"
api_path = api_path . format ( user_id = user_id )
api_query = { }
if "query" in kwargs . keys ( ) :
api_query . update ( kwargs [ "query" ] )
del kwargs [ "query" ]
if external_id :
api_query . update ( { "external_id" : external_id , } )
return self . call ( api_path , query = api_query , ** kwargs )
|
def detalhe ( self , posicao = 0 ) :
"""Retorna o detalhe de um CEP da última lista de resultados"""
|
handle = self . _url_open ( 'detalheCEPAction.do' , { 'Metodo' : 'detalhe' , 'TipoCep' : 2 , 'Posicao' : posicao + 1 , 'CEP' : '' } )
html = handle . read ( )
return self . _parse_detalhe ( html )
|
def list_distribute_contents_simple ( input_list , function = lambda x : x ) : # type : ( List , Callable [ [ Any ] , Any ] ) - > List
"""Distribute the contents of a list eg . [ 1 , 1 , 1 , 2 , 2 , 3 ] - > [ 1 , 2 , 3 , 1 , 2 , 1 ] . List can contain complex types
like dictionaries in which case the function can return the appropriate value eg . lambda x : x [ KEY ]
Args :
input _ list ( List ) : List to distribute values
function ( Callable [ [ Any ] , Any ] ) : Return value to use for distributing . Defaults to lambda x : x .
Returns :
List : Distributed list"""
|
dictionary = dict ( )
for obj in input_list :
dict_of_lists_add ( dictionary , function ( obj ) , obj )
output_list = list ( )
i = 0
done = False
while not done :
found = False
for key in sorted ( dictionary ) :
if i < len ( dictionary [ key ] ) :
output_list . append ( dictionary [ key ] [ i ] )
found = True
if found :
i += 1
else :
done = True
return output_list
|
def _cnn_filter ( in_file , vrn_files , data ) :
"""Perform CNN filtering on input VCF using pre - trained models ."""
|
# tensor _ type = " reference " # 1D , reference sequence
tensor_type = "read_tensor"
# 2D , reads , flags , mapping quality
score_file = _cnn_score_variants ( in_file , tensor_type , data )
return _cnn_tranch_filtering ( score_file , vrn_files , tensor_type , data )
|
def dissect ( self , data ) :
"""Dissect the field .
: param bytes data : The data to extract the field value from
: return : The rest of the data not used to dissect the field value
: rtype : bytes"""
|
size = struct . calcsize ( "B" )
if len ( data ) < size :
raise NotEnoughData ( "Not enough data to decode field '%s' value" % self . name )
curve_type = struct . unpack ( "B" , data [ : size ] ) [ 0 ]
if curve_type == 0x03 :
self . _value = ECParametersNamedCurveField ( "none" )
data = self . _value . dissect ( data )
else :
raise NotImplementedError ( "Decoding of KeyExchange message for curve 0x%.2X not implemented" % curve_type )
return data
|
def get_annotation ( self , id_ ) :
"""Data for a specific annotation ."""
|
endpoint = "annotations/{id}" . format ( id = id_ )
return self . _make_request ( endpoint )
|
def compile_string ( string , compiler_class = Compiler , ** kwargs ) :
"""Compile a single string , and return a string of CSS .
Keyword arguments are passed along to the underlying ` Compiler ` ."""
|
compiler = compiler_class ( ** kwargs )
return compiler . compile_string ( string )
|
def rpc_v2 ( self , cmd , arg_format , result_format , * args , ** kw ) :
"""Send an RPC call to this module , interpret the return value
according to the result _ type kw argument . Unless raise keyword
is passed with value False , raise an RPCException if the command
is not successful .
v2 enforces the use of arg _ format and result _ format
v2 combines the feature + cmd chunks in to a single 2 - byte chunk"""
|
if args :
packed_args = pack_rpc_payload ( arg_format , list ( args ) )
elif arg_format == "" :
packed_args = b''
else :
raise RPCInvalidArgumentsError ( "Arg format expects arguments to be present" , arg_format = arg_format , args = args )
passed_kw = dict ( )
if 'timeout' in kw :
passed_kw [ 'timeout' ] = kw [ 'timeout' ]
try :
should_retry = False
payload = self . stream . send_rpc ( self . addr , cmd , packed_args , ** passed_kw )
except BusyRPCResponse :
if "retries" not in kw :
kw [ 'retries' ] = 10
# Sleep 100 ms and try again unless we ' ve exhausted our retry attempts
if kw [ "retries" ] == 0 :
raise BusyRPCResponse ( "Could not complete RPC %d:%04X after 10 attempts due to busy tile" % ( self . addr , cmd ) )
should_retry = True
# If the tile was busy , automatically retry up to 10 times
if should_retry :
kw [ 'retries' ] -= 1
sleep ( 0.1 )
return self . rpc_v2 ( cmd , arg_format , result_format , * args , ** kw )
return unpack_rpc_payload ( result_format , payload )
|
def _inspect_and_map ( self , identifier ) :
"""example response :
u ' comment ' : u ' ' ,
u ' virtual _ size ' : 566087127,
u ' container ' : u ' a2ea130e8c7a945823c804253c20f7c877376c69a36311a0cc614d5c455f645f ' ,
u ' os ' : u ' linux ' ,
u ' parent ' : u ' 14e75a5684c2fabb7db24d92bdda09d11da1179a7d86ab4c9640ef70d1fd4082 ' ,
u ' author ' : u ' alexb @ tune . com ' ,
u ' checksum ' : u ' tarsum . dev + sha256:1b755912c77197c6a43539f2a708ef89d5849b8ce02642cb702e47afaa8195c3 ' ,
u ' created ' : u ' 2014-10-16T23:41:25.445801849Z ' ,
u ' container _ config ' : {
u ' tty ' : False ,
u ' on _ build ' : [ ] ,
u ' domainname ' : u ' ' ,
u ' attach _ stdin ' : False ,
u ' image ' : u ' 14e75a5684c2fabb7db24d92bdda09d11da1179a7d86ab4c9640ef70d1fd4082 ' ,
u ' port _ specs ' : None ,
u ' hostname ' : u ' 2c1b05d4dd63 ' ,
u ' mac _ address ' : u ' ' ,
u ' working _ dir ' : u ' ' ,
u ' entrypoint ' : [ u ' / usr / local / bin / dockerjenkins . sh ' ] ,
u ' env ' : [
u ' PATH = / usr / local / sbin : / usr / local / bin : / usr / sbin : / usr / bin : / sbin : / bin ' ,
u ' REFRESHED _ AT = 2014-10-14 ' ,
u ' JENKINS _ HOME = / opt / jenkins / data ' ,
u ' JENKINS _ MIRROR = http : / / mirrors . jenkins - ci . org '
u ' memory ' : 0,
u ' attach _ stdout ' : False ,
u ' exposed _ ports ' : {
u ' 8080 / tcp ' : { }
u ' stdin _ once ' : False ,
u ' cpuset ' : u ' ' ,
u ' user ' : u ' ' ,
u ' network _ disabled ' : False ,
u ' memory _ swap ' : 0,
u ' attach _ stderr ' : False ,
u ' cmd ' : [
u ' / bin / sh ' ,
u ' - c ' ,
u ' # ( nop ) ENTRYPOINT [ / usr / local / bin / dockerjenkins . sh ] '
u ' open _ stdin ' : False ,
u ' volumes ' : {
u ' / var / lib / docker ' : { }
u ' cpu _ shares ' : 0
u ' architecture ' : u ' amd64 ' ,
u ' docker _ version ' : u ' 1.3.0 ' ,
u ' config ' : {
u ' tty ' : False ,
u ' on _ build ' : [ ] ,
u ' domainname ' : u ' ' ,
u ' attach _ stdin ' : False ,
u ' image ' : u ' 14e75a5684c2fabb7db24d92bdda09d11da1179a7d86ab4c9640ef70d1fd4082 ' ,
u ' port _ specs ' : None ,
u ' hostname ' : u ' 2c1b05d4dd63 ' ,
u ' mac _ address ' : u ' ' ,
u ' working _ dir ' : u ' ' ,
u ' entrypoint ' : [ u ' / usr / local / bin / dockerjenkins . sh ' ] ,
u ' env ' : [
u ' PATH = / usr / local / sbin : / usr / local / bin : / usr / sbin : / usr / bin : / sbin : / bin ' ,
u ' REFRESHED _ AT = 2014-10-14 ' ,
u ' JENKINS _ HOME = / opt / jenkins / data ' ,
u ' JENKINS _ MIRROR = http : / / mirrors . jenkins - ci . org '
u ' memory ' : 0,
u ' attach _ stdout ' : False ,
u ' exposed _ ports ' : {
u ' 8080 / tcp ' : { }
u ' stdin _ once ' : False ,
u ' cpuset ' : u ' ' ,
u ' user ' : u ' ' ,
u ' network _ disabled ' : False ,
u ' memory _ swap ' : 0,
u ' attach _ stderr ' : False ,
u ' cmd ' : None ,
u ' open _ stdin ' : False ,
u ' volumes ' : {
u ' / var / lib / docker ' : { }
u ' cpu _ shares ' : 0
u ' id ' : u ' d37e267af092e02aaab68e962fadcc1107a3b42a34b0c581ee1e3a54aed62ad4 ' ,
u ' size ' : 0"""
|
# validate
try :
response = normalize_keys ( self . client . inspect_image ( identifier ) )
self . comment = response [ 'comment' ] if response [ 'comment' ] else None
self . id = response [ "id" ]
self . virtual_size = response [ 'virtual_size' ]
self . container = response [ 'container' ]
self . os = response [ 'os' ]
self . parent = response [ 'parent' ]
self . author = response . get ( 'author' , None )
self . created_at = self . created_at = dateutil . parser . parse ( response [ 'created' ] , ignoretz = True )
self . architecture = response [ 'architecture' ]
self . docker_version = response [ 'docker_version' ]
self . size = response [ 'size' ]
self . container_config = ContainerConfig ( response . get ( 'container_config' ) ) if response . get ( 'config' ) else ContainerConfig ( )
self . config = ContainerConfig ( response . get ( 'config' ) ) if response . get ( 'config' ) else ContainerConfig ( )
except Exception as e :
raise e
|
def to_primitive ( self , value , context = None ) :
"""Schematics serializer override
If epoch _ date is true then convert the ` datetime . datetime `
object into an epoch ` int ` ."""
|
if context and context . get ( 'epoch_date' ) :
epoch = dt ( 1970 , 1 , 1 )
value = ( value - epoch ) . total_seconds ( )
return int ( value )
elif context and context . get ( 'datetime_date' ) :
return value
else :
return super ( Type , self ) . to_primitive ( value , context )
|
def slamdunkFilterStatsTable ( self ) :
"""Take the parsed filter stats from Slamdunk and add it to a separate table"""
|
headers = OrderedDict ( )
headers [ 'mapped' ] = { 'namespace' : 'Slamdunk' , 'title' : '{} Mapped' . format ( config . read_count_prefix ) , 'description' : '# mapped reads ({})' . format ( config . read_count_desc ) , 'shared_key' : 'read_count' , 'min' : 0 , 'format' : '{:,.2f}' , 'suffix' : config . read_count_prefix , 'scale' : 'YlGn' , 'modify' : lambda x : float ( x ) * config . read_count_multiplier , }
headers [ 'multimapper' ] = { 'namespace' : 'Slamdunk' , 'title' : '{} Multimap-Filtered' . format ( config . read_count_prefix ) , 'description' : '# multimap-filtered reads ({})' . format ( config . read_count_desc ) , 'shared_key' : 'read_count' , 'min' : 0 , 'format' : '{:,.2f}' , 'suffix' : config . read_count_prefix , 'scale' : 'OrRd' , 'modify' : lambda x : float ( x ) * config . read_count_multiplier , }
headers [ 'nmfiltered' ] = { 'namespace' : 'Slamdunk' , 'title' : '{} NM-Filtered' . format ( config . read_count_prefix ) , 'description' : '# NM-filtered reads ({})' . format ( config . read_count_desc ) , 'shared_key' : 'read_count' , 'min' : 0 , 'format' : '{:,.2f}' , 'suffix' : config . read_count_prefix , 'scale' : 'OrRd' , 'modify' : lambda x : float ( x ) * config . read_count_multiplier , }
headers [ 'idfiltered' ] = { 'namespace' : 'Slamdunk' , 'title' : '{} Identity-Filtered' . format ( config . read_count_prefix ) , 'description' : '# identity-filtered reads ({})' . format ( config . read_count_desc ) , 'shared_key' : 'read_count' , 'min' : 0 , 'format' : '{:,.2f}' , 'suffix' : config . read_count_prefix , 'scale' : 'OrRd' , 'modify' : lambda x : float ( x ) * config . read_count_multiplier , }
headers [ 'mqfiltered' ] = { 'namespace' : 'Slamdunk' , 'title' : '{} MQ-Filtered' . format ( config . read_count_prefix ) , 'description' : '# MQ-filtered reads ({})' . format ( config . read_count_desc ) , 'shared_key' : 'read_count' , 'min' : 0 , 'format' : '{:,.2f}' , 'suffix' : config . read_count_prefix , 'scale' : 'OrRd' , 'modify' : lambda x : float ( x ) * config . read_count_multiplier , }
pconfig = { 'id' : 'slamdunk_filtering_table' , 'min' : 0 , }
self . add_section ( name = 'Filter statistics' , anchor = 'slamdunk_filtering' , description = 'This table shows the number of reads filtered with each filter criterion during filtering phase of slamdunk.' , plot = table . plot ( self . slamdunk_data , headers , pconfig ) )
|
def expand_nested ( self , cats ) :
"""Populate widget with nested catalogs"""
|
down = '│'
right = '└──'
def get_children ( parent ) :
return [ e ( ) for e in parent . _entries . values ( ) if e . _container == 'catalog' ]
if len ( cats ) == 0 :
return
cat = cats [ 0 ]
old = list ( self . options . items ( ) )
name = next ( k for k , v in old if v == cat )
index = next ( i for i , ( k , v ) in enumerate ( old ) if v == cat )
if right in name :
prefix = f'{name.split(right)[0]}{down} {right}'
else :
prefix = right
children = get_children ( cat )
for i , child in enumerate ( children ) :
old . insert ( index + i + 1 , ( f'{prefix} {child.name}' , child ) )
self . widget . options = dict ( old )
|
def predict_size_distribution_component_models ( self , model_names , input_columns , output_columns , metadata_cols , data_mode = "forecast" , location = 6 ) :
"""Make predictions using fitted size distribution models .
Args :
model _ names : Name of the models for predictions
input _ columns : Data columns used for input into ML models
output _ columns : Names of output columns
metadata _ cols : Columns from input data that should be included in the data frame with the predictions .
data _ mode : Set of data used as input for prediction models
location : Value of fixed location parameter
Returns :
Predictions in dictionary of data frames grouped by group type"""
|
groups = self . size_distribution_models . keys ( )
predictions = pd . DataFrame ( self . data [ data_mode ] [ "combo" ] [ metadata_cols ] )
for group in groups :
group_idxs = self . data [ data_mode ] [ "combo" ] [ self . group_col ] == group
group_count = np . count_nonzero ( group_idxs )
print ( self . size_distribution_models [ group ] )
if group_count > 0 :
log_mean = self . size_distribution_models [ group ] [ "lognorm" ] [ "mean" ]
log_sd = self . size_distribution_models [ group ] [ "lognorm" ] [ "sd" ]
for m , model_name in enumerate ( model_names ) :
raw_preds = np . zeros ( ( group_count , len ( output_columns ) ) )
for c in range ( len ( output_columns ) ) :
raw_preds [ : , c ] = self . size_distribution_models [ group ] [ "pc_{0:d}" . format ( c ) ] [ model_name ] . predict ( self . data [ data_mode ] [ "combo" ] . loc [ group_idxs , input_columns ] )
log_norm_preds = self . size_distribution_models [ group ] [ "lognorm" ] [ "pca" ] . inverse_transform ( raw_preds )
log_norm_preds [ : , 0 ] *= - 1
multi_predictions = np . exp ( log_norm_preds * log_sd + log_mean )
if multi_predictions . shape [ 1 ] == 2 :
multi_predictions_temp = np . zeros ( ( multi_predictions . shape [ 0 ] , 3 ) )
multi_predictions_temp [ : , 0 ] = multi_predictions [ : , 0 ]
multi_predictions_temp [ : , 1 ] = location
multi_predictions_temp [ : , 2 ] = multi_predictions [ : , 1 ]
multi_predictions = multi_predictions_temp
for p , pred_col in enumerate ( [ "shape" , "location" , "scale" ] ) :
predictions . loc [ group_idxs , model_name . replace ( " " , "-" ) + "_" + pred_col ] = multi_predictions [ : , p ]
return predictions
|
def numRegisteredByRole ( self ) :
'''Return a dictionary listing registrations by all available roles ( including no role )'''
|
role_list = list ( self . availableRoles ) + [ None , ]
return { getattr ( x , 'name' , None ) : self . numRegisteredForRole ( x ) for x in role_list }
|
def disable_auto_login ( ) :
'''. . versionadded : : 2016.3.0
Disables auto login on the machine
Returns :
bool : ` ` True ` ` if successful , otherwise ` ` False ` `
CLI Example :
. . code - block : : bash
salt ' * ' user . disable _ auto _ login'''
|
# Remove the kcpassword file
cmd = 'rm -f /etc/kcpassword'
__salt__ [ 'cmd.run' ] ( cmd )
# Remove the entry from the defaults file
cmd = [ 'defaults' , 'delete' , '/Library/Preferences/com.apple.loginwindow.plist' , 'autoLoginUser' ]
__salt__ [ 'cmd.run' ] ( cmd )
return True if not get_auto_login ( ) else False
|
def _GetRecord ( self , offset , record_size ) :
"""Retrieve a single record from the file .
Args :
offset : offset from start of input _ dat where header starts
record _ size : length of the header according to file ( untrusted )
Returns :
A dict containing a single browser history record ."""
|
record_header = "<4sLQQL"
get4 = lambda x : struct . unpack ( "<L" , self . input_dat [ x : x + 4 ] ) [ 0 ]
url_offset = struct . unpack ( "B" , self . input_dat [ offset + 52 : offset + 53 ] ) [ 0 ]
if url_offset in [ 0xFF , 0xFE ] :
return None
data_offset = get4 ( offset + 68 )
data_size = get4 ( offset + 72 )
start_pos = offset + data_offset
data = struct . unpack ( "{0}s" . format ( data_size ) , self . input_dat [ start_pos : start_pos + data_size ] ) [ 0 ]
fmt = record_header
unknown_size = url_offset - struct . calcsize ( fmt )
fmt += "{0}s" . format ( unknown_size )
fmt += "{0}s" . format ( record_size - struct . calcsize ( fmt ) )
dat = struct . unpack ( fmt , self . input_dat [ offset : offset + record_size ] )
header , blocks , mtime , ctime , ftime , _ , url = dat
url = url . split ( b"\x00" ) [ 0 ] . decode ( "utf-8" )
if mtime :
mtime = mtime // 10 - WIN_UNIX_DIFF_MSECS
if ctime :
ctime = ctime // 10 - WIN_UNIX_DIFF_MSECS
return { "header" : header , # the header
"blocks" : blocks , # number of blocks
"urloffset" : url_offset , # offset of URL in file
"data_offset" : data_offset , # offset for start of data
"data_size" : data_size , # size of data
"data" : data , # actual data
"mtime" : mtime , # modified time
"ctime" : ctime , # created time
"ftime" : ftime , # file time
"url" : url # the url visited
}
|
def flux_components ( self , kwargs_light , n_grid = 400 , delta_grid = 0.01 , deltaPix = 0.05 , type = "lens" ) :
"""computes the total flux in each component of the model
: param kwargs _ light :
: param n _ grid :
: param delta _ grid :
: return :"""
|
flux_list = [ ]
R_h_list = [ ]
x_grid , y_grid = util . make_grid ( numPix = n_grid , deltapix = delta_grid )
kwargs_copy = copy . deepcopy ( kwargs_light )
for k , kwargs in enumerate ( kwargs_light ) :
if 'center_x' in kwargs_copy [ k ] :
kwargs_copy [ k ] [ 'center_x' ] = 0
kwargs_copy [ k ] [ 'center_y' ] = 0
if type == 'lens' :
light = self . LensLightModel . surface_brightness ( x_grid , y_grid , kwargs_copy , k = k )
elif type == 'source' :
light = self . SourceModel . surface_brightness ( x_grid , y_grid , kwargs_copy , k = k )
else :
raise ValueError ( "type %s not supported!" % type )
flux = np . sum ( light ) * delta_grid ** 2 / deltaPix ** 2
R_h = analysis_util . half_light_radius ( light , x_grid , y_grid )
flux_list . append ( flux )
R_h_list . append ( R_h )
return flux_list , R_h_list
|
def CopyToDateTimeString ( self ) :
"""Copies the time elements to a date and time string .
Returns :
str : date and time value formatted as : " YYYY - MM - DD hh : mm : ss " or
" YYYY - MM - DD hh : mm : ss . # # # # # " or None if time elements are missing .
Raises :
ValueError : if the precision value is unsupported ."""
|
if self . _number_of_seconds is None or self . fraction_of_second is None :
return None
precision_helper = precisions . PrecisionHelperFactory . CreatePrecisionHelper ( self . _precision )
return precision_helper . CopyToDateTimeString ( self . _time_elements_tuple , self . fraction_of_second )
|
def tables_with_counts ( self ) :
"""Return the number of entries in all table ."""
|
table_to_count = lambda t : self . count_rows ( t )
return zip ( self . tables , map ( table_to_count , self . tables ) )
|
def write ( self , file ) :
r"""Save CAPTCHA image in given filepath .
Property calls self . image and saves image contents in a file ,
returning CAPTCHA text and filepath as a tuple .
See : image .
: param file :
Path to file , where CAPTCHA image will be saved .
: returns : ` ` tuple ` ` ( CAPTCHA text , filepath )"""
|
text , image = self . image
image . save ( file , format = self . format )
return ( text , file )
|
def get_client_ip ( environ ) : # type : ( Dict [ str , str ] ) - > Optional [ Any ]
"""Infer the user IP address from various headers . This cannot be used in
security sensitive situations since the value may be forged from a client ,
but it ' s good enough for the event payload ."""
|
try :
return environ [ "HTTP_X_FORWARDED_FOR" ] . split ( "," ) [ 0 ] . strip ( )
except ( KeyError , IndexError ) :
pass
try :
return environ [ "HTTP_X_REAL_IP" ]
except KeyError :
pass
return environ . get ( "REMOTE_ADDR" )
|
def format_phone_number ( number ) :
"""Formats a phone number in international notation .
: param number : str or phonenumber object
: return : str"""
|
if not isinstance ( number , phonenumbers . PhoneNumber ) :
number = phonenumbers . parse ( number )
return phonenumbers . format_number ( number , phonenumbers . PhoneNumberFormat . INTERNATIONAL )
|
def report ( self , filename = None ) :
"""Write details of each versioned target to file
: param string filename : file to write out the report to
Fields in the report :
invocation _ id : A sequence number that increases each time a task is invoked
task _ name : The name of the task
targets _ hash : an id from a hash of all target ids to identify a VersionedTargetSet
target _ id : target id
cache _ key _ id : the Id for the cache key
cache _ key _ hash : computed hash for the cache key
phase : What part of the validation check the values were captured
valid : True if the cache is valid for the VersionedTargetSet"""
|
# TODO ( zundel ) set report to stream to the file
filename = filename or self . _filename
if filename : # Usually the directory exists from reporting initialization , but not if clean - all was a goal .
with safe_open ( filename , 'w' ) as writer :
writer . write ( 'invocation_id,task_name,targets_hash,target_id,cache_key_id,cache_key_hash,phase,valid' + '\n' )
for task_report in self . _task_reports . values ( ) :
task_report . report ( writer )
|
def get_sites ( self , entry ) :
"""Return the sites linked in HTML ."""
|
try :
index_url = reverse ( 'zinnia:entry_archive_index' )
except NoReverseMatch :
index_url = ''
return format_html_join ( ', ' , '<a href="{}://{}{}" target="blank">{}</a>' , [ ( settings . PROTOCOL , site . domain , index_url , conditional_escape ( site . name ) ) for site in entry . sites . all ( ) ] )
|
def get_topic_triggers ( rs , topic , thats , depth = 0 , inheritance = 0 , inherited = False ) :
"""Recursively scan a topic and return a list of all triggers .
Arguments :
rs ( RiveScript ) : A reference to the parent RiveScript instance .
topic ( str ) : The original topic name .
thats ( bool ) : Are we getting triggers for ' previous ' replies ?
depth ( int ) : Recursion step counter .
inheritance ( int ) : The inheritance level counter , for topics that
inherit other topics .
inherited ( bool ) : Whether the current topic is inherited by others .
Returns :
[ ] str : List of all triggers found ."""
|
# Break if we ' re in too deep .
if depth > rs . _depth :
rs . _warn ( "Deep recursion while scanning topic inheritance" )
# Keep in mind here that there is a difference between ' includes ' and
# ' inherits ' - - topics that inherit other topics are able to OVERRIDE
# triggers that appear in the inherited topic . This means that if the top
# topic has a trigger of simply ' * ' , then NO triggers are capable of
# matching in ANY inherited topic , because even though * has the lowest
# priority , it has an automatic priority over all inherited topics .
# The getTopicTriggers method takes this into account . All topics that
# inherit other topics will have their triggers prefixed with a fictional
# { inherits } tag , which would start at { inherits = 0 } and increment if this
# topic has other inheriting topics . So we can use this tag to make sure
# topics that inherit things will have their triggers always be on top of
# the stack , from inherits = 0 to inherits = n .
# Important info about the depth vs inheritance params to this function :
# depth increments by 1 each time this function recursively calls itrs .
# inheritance increments by 1 only when this topic inherits another
# topic .
# This way , ' > topic alpha includes beta inherits gamma ' will have this
# effect :
# alpha and beta ' s triggers are combined together into one matching
# pool , and then those triggers have higher matching priority than
# gamma ' s .
# The inherited option is True if this is a recursive call , from a topic
# that inherits other topics . This forces the { inherits } tag to be added
# to the triggers . This only applies when the top topic ' includes '
# another topic .
rs . _say ( "\tCollecting trigger list for topic " + topic + "(depth=" + str ( depth ) + "; inheritance=" + str ( inheritance ) + "; " + "inherited=" + str ( inherited ) + ")" )
# topic : the name of the topic
# depth : starts at 0 and + + ' s with each recursion
# Topic doesn ' t exist ?
if not topic in rs . _topics :
rs . _warn ( "Inherited or included topic {} doesn't exist or has no triggers" . format ( topic ) )
return [ ]
# Collect an array of triggers to return .
triggers = [ ]
# Get those that exist in this topic directly .
inThisTopic = [ ]
if not thats : # The non - that structure is { topic } - > [ array of triggers ]
if topic in rs . _topics :
for trigger in rs . _topics [ topic ] :
inThisTopic . append ( [ trigger [ "trigger" ] , trigger ] )
else : # The ' that ' structure is : { topic } - > { cur trig } - > { prev trig } - > { trig info }
if topic in rs . _thats . keys ( ) :
for curtrig in rs . _thats [ topic ] . keys ( ) :
for previous , pointer in rs . _thats [ topic ] [ curtrig ] . items ( ) :
inThisTopic . append ( [ pointer [ "trigger" ] , pointer ] )
# Does this topic include others ?
if topic in rs . _includes : # Check every included topic .
for includes in rs . _includes [ topic ] :
rs . _say ( "\t\tTopic " + topic + " includes " + includes )
triggers . extend ( get_topic_triggers ( rs , includes , thats , ( depth + 1 ) , inheritance , True ) )
# Does this topic inherit others ?
if topic in rs . _lineage : # Check every inherited topic .
for inherits in rs . _lineage [ topic ] :
rs . _say ( "\t\tTopic " + topic + " inherits " + inherits )
triggers . extend ( get_topic_triggers ( rs , inherits , thats , ( depth + 1 ) , ( inheritance + 1 ) , False ) )
# Collect the triggers for * this * topic . If this topic inherits any
# other topics , it means that this topic ' s triggers have higher
# priority than those in any inherited topics . Enforce this with an
# { inherits } tag .
if topic in rs . _lineage or inherited :
for trigger in inThisTopic :
rs . _say ( "\t\tPrefixing trigger with {inherits=" + str ( inheritance ) + "}" + trigger [ 0 ] )
triggers . append ( [ "{inherits=" + str ( inheritance ) + "}" + trigger [ 0 ] , trigger [ 1 ] ] )
else :
triggers . extend ( inThisTopic )
return triggers
|
def construct ( path , name = None ) :
"Selects an appropriate CGroup subclass for the given CGroup path ."
|
name = name if name else path . split ( "/" ) [ 4 ]
classes = { "memory" : Memory , "cpu" : CPU , "cpuacct" : CPUAcct }
constructor = classes . get ( name , CGroup )
log . debug ( "Chose %s for: %s" , constructor . __name__ , path )
return constructor ( path , name )
|
def adddeploykey ( self , project_id , title , key ) :
"""Creates a new deploy key for a project .
: param project _ id : project id
: param title : title of the key
: param key : the key itself
: return : true if success , false if not"""
|
data = { 'id' : project_id , 'title' : title , 'key' : key }
request = requests . post ( '{0}/{1}/keys' . format ( self . projects_url , project_id ) , headers = self . headers , data = data , verify = self . verify_ssl , auth = self . auth , timeout = self . timeout )
if request . status_code == 201 :
return request . json ( )
else :
return False
|
def stc_system_info ( stc_addr ) :
"""Return dictionary of STC and API information .
If a session already exists , then use it to get STC information and avoid
taking the time to start a new session . A session is necessary to get
STC information ."""
|
stc = stchttp . StcHttp ( stc_addr )
sessions = stc . sessions ( )
if sessions : # If a session already exists , use it to get STC information .
stc . join_session ( sessions [ 0 ] )
sys_info = stc . system_info ( )
else : # Create a new session to get STC information .
stc . new_session ( 'anonymous' )
try :
sys_info = stc . system_info ( )
finally : # Make sure the temporary session in terminated .
stc . end_session ( )
return sys_info
|
def extend ( base : Dict [ Any , Any ] , extension : Dict [ Any , Any ] ) -> Dict [ Any , Any ] :
'''Extend base by updating with the extension .
* * Arguments * *
: ` ` base ` ` : dictionary to have keys updated or added
: ` ` extension ` ` : dictionary to update base with
* * Return Value ( s ) * *
Resulting dictionary from updating base with extension .'''
|
_ = copy . deepcopy ( base )
_ . update ( extension )
return _
|
def makedict ( self , dictfile , fnamefobject ) :
"""stuff file data into the blank dictionary"""
|
# fname = ' . / exapmlefiles / 5ZoneDD . idf '
# fname = ' . / 1ZoneUncontrolled . idf '
if isinstance ( dictfile , Idd ) :
localidd = copy . deepcopy ( dictfile )
dt , dtls = localidd . dt , localidd . dtls
else :
dt , dtls = self . initdict ( dictfile )
# astr = mylib2 . readfile ( fname )
astr = fnamefobject . read ( )
try :
astr = astr . decode ( 'ISO-8859-2' )
except AttributeError :
pass
fnamefobject . close ( )
nocom = removecomment ( astr , '!' )
idfst = nocom
# alist = string . split ( idfst , ' ; ' )
alist = idfst . split ( ';' )
lss = [ ]
for element in alist : # lst = string . split ( element , ' , ' )
lst = element . split ( ',' )
lss . append ( lst )
for i in range ( 0 , len ( lss ) ) :
for j in range ( 0 , len ( lss [ i ] ) ) :
lss [ i ] [ j ] = lss [ i ] [ j ] . strip ( )
for element in lss :
node = element [ 0 ] . upper ( )
if node in dt : # stuff data in this key
dt [ node . upper ( ) ] . append ( element )
else : # scream
if node == '' :
continue
print ( 'this node -%s-is not present in base dictionary' % ( node ) )
self . dt , self . dtls = dt , dtls
return dt , dtls
|
def _delete_partition ( self , tenant_id , tenant_name ) :
"""Function to delete a service partition ."""
|
self . dcnm_obj . delete_partition ( tenant_name , fw_const . SERV_PART_NAME )
|
def opens ( self , tag = None , fromdate = None , todate = None ) :
"""Gets total counts of recipients who opened your emails .
This is only recorded when open tracking is enabled for that email ."""
|
return self . call ( "GET" , "/stats/outbound/opens" , tag = tag , fromdate = fromdate , todate = todate )
|
def get_gradebook_column_ids_by_gradebooks ( self , gradebook_ids ) :
"""Gets the list of ` ` GradebookColumn Ids ` ` corresponding to a list of ` ` Gradebooks ` ` .
arg : gradebook _ ids ( osid . id . IdList ) : list of gradebook
` ` Ids ` `
return : ( osid . id . IdList ) - list of gradebook column ` ` Ids ` `
raise : NullArgument - ` ` gradebook _ ids ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceBinSession . get _ resource _ ids _ by _ bins
id_list = [ ]
for gradebook_column in self . get_gradebook_columns_by_gradebooks ( gradebook_ids ) :
id_list . append ( gradebook_column . get_id ( ) )
return IdList ( id_list )
|
def ssh ( gandi , resource , login , identity , wipe_key , wait , args ) :
"""Spawn an SSH session to virtual machine .
Resource can be a Hostname or an ID"""
|
if '@' in resource :
( login , resource ) = resource . split ( '@' , 1 )
if wipe_key :
gandi . iaas . ssh_keyscan ( resource )
if wait :
gandi . iaas . wait_for_sshd ( resource )
gandi . iaas . ssh ( resource , login , identity , args )
|
def listFieldsFromWorkitem ( self , copied_from , keep = False ) :
"""List all the attributes to be rendered directly from some
to - be - copied workitems
More details , please refer to
: class : ` rtcclient . template . Templater . listFieldsFromWorkitem `"""
|
return self . templater . listFieldsFromWorkitem ( copied_from , keep = keep )
|
def get_pfam_accession_numbers_from_pdb_id ( self , pdb_id ) :
'''Note : an alternative is to use the RCSB API e . g . http : / / www . rcsb . org / pdb / rest / hmmer ? structureId = 1cdg .'''
|
pdb_id = pdb_id . lower ( )
if self . pdb_chain_to_pfam_mapping . get ( pdb_id ) :
return self . pdb_chain_to_pfam_mapping [ pdb_id ] . copy ( )
|
def get_addon_id ( addonxml ) :
'''Parses an addon id from the given addon . xml filename .'''
|
xml = parse ( addonxml )
addon_node = xml . getElementsByTagName ( 'addon' ) [ 0 ]
return addon_node . getAttribute ( 'id' )
|
def binary_shader ( self , output_jar , main , jar , custom_rules = None , jvm_options = None ) :
"""Yields an ` Executor . Runner ` that will perform shading of the binary ` jar ` when ` run ( ) ` .
The default rules will ensure the ` main ` class name is un - changed along with a minimal set of
support classes but that everything else will be shaded .
Any ` custom _ rules ` are given highest precedence and so they can interfere with this automatic
binary shading . In general its safe to add exclusion rules to open up classes that need to be
shared between the binary and the code it runs over . An example would be excluding the
` org . junit . Test ` annotation class from shading since both a tool running junit needs to be able
to scan for this annotation applied to the user code it tests .
: param unicode output _ jar : The path to dump the shaded jar to ; will be over - written if it
exists .
: param unicode main : The main class in the ` jar ` to preserve as the entry point .
: param unicode jar : The path to the jar file to shade .
: param list custom _ rules : An optional list of custom ` Shader . Rule ` s .
: param list jvm _ options : an optional sequence of options for the underlying jvm
: returns : An ` Executor . Runner ` that can be ` run ( ) ` to shade the given ` jar ` .
: rtype : : class : ` pants . java . executor . Executor . Runner `"""
|
all_rules = self . assemble_binary_rules ( main , jar , custom_rules = custom_rules )
return self . binary_shader_for_rules ( output_jar , jar , all_rules , jvm_options = jvm_options )
|
def output ( ret , ** kwargs ) :
'''Display the output as table .
Args :
* nested _ indent : integer , specify the left alignment .
* has _ header : boolean specifying if header should be displayed . Default : True .
* row _ delimiter : character to separate rows . Default : ` ` _ ` ` .
* delim : character to separate columns . Default : ` ` " | " ` ` .
* justify : text alignment . Default : ` ` center ` ` .
* separate _ rows : boolean specifying if row separator will be displayed between consecutive rows . Default : True .
* prefix : character at the beginning of the row . Default : ` ` " | " ` ` .
* suffix : character at the end of the row . Default : ` ` " | " ` ` .
* width : column max width . Default : ` ` 50 ` ` .
* rows _ key : display the rows under a specific key .
* labels _ key : use the labels under a certain key . Otherwise will try to use the dictionary keys ( if any ) .
* title : display title when only one table is selected ( using the ` ` rows _ key ` ` argument ) .'''
|
# to facilitate re - use
if 'opts' in kwargs :
global __opts__
# pylint : disable = W0601
__opts__ = kwargs . pop ( 'opts' )
# Prefer kwargs before opts
base_indent = kwargs . get ( 'nested_indent' , 0 ) or __opts__ . get ( 'out.table.nested_indent' , 0 )
rows_key = kwargs . get ( 'rows_key' ) or __opts__ . get ( 'out.table.rows_key' )
labels_key = kwargs . get ( 'labels_key' ) or __opts__ . get ( 'out.table.labels_key' )
title = kwargs . get ( 'title' ) or __opts__ . get ( 'out.table.title' )
class_kvargs = { }
argks = ( 'has_header' , 'row_delimiter' , 'delim' , 'justify' , 'separate_rows' , 'prefix' , 'suffix' , 'width' )
for argk in argks :
argv = kwargs . get ( argk ) or __opts__ . get ( 'out.table.{key}' . format ( key = argk ) )
if argv is not None :
class_kvargs [ argk ] = argv
table = TableDisplay ( ** class_kvargs )
out = [ ]
if title and rows_key :
out . append ( table . ustring ( base_indent , title , table . WHITE , # pylint : disable = no - member
suffix = '\n' ) )
return '\n' . join ( table . display ( salt . utils . data . decode ( ret ) , base_indent , out , rows_key = rows_key , labels_key = labels_key ) )
|
def chat_post_message ( self , channel , text , ** params ) :
"""chat . postMessage
This method posts a message to a channel .
https : / / api . slack . com / methods / chat . postMessage"""
|
method = 'chat.postMessage'
params . update ( { 'channel' : channel , 'text' : text , } )
return self . _make_request ( method , params )
|
def split_sentences_spacy ( text , language_model = 'en' ) :
r"""You must download a spacy language model with python - m download ' en '
The default English language model for spacy tends to be a lot more agressive than NLTK ' s punkt :
> > > split _ sentences _ nltk ( " Hi Ms . Lovelace . \ nI ' m a wanna - \ nbe human @ I . B . M . ; ) - - Watson 2.0 " )
[ ' Hi Ms . Lovelace . ' , " I ' m a wanna - \ nbe human @ I . B . M . " , ' ; ) - - Watson 2.0 ' ]
> > > split _ sentences _ spacy ( " Hi Ms . Lovelace . \ nI ' m a wanna - \ nbe human @ I . B . M . ; ) - - Watson 2.0 " )
[ ' Hi Ms . Lovelace . ' , " I ' m a wanna - " , ' be human @ ' , ' I . B . M . ; ) - - Watson 2.0 ' ]
> > > split _ sentences _ spacy ( " Hi Ms . Lovelace . I ' m at I . B . M . - - Watson 2.0 " )
[ ' Hi Ms . Lovelace . ' , " I ' m at I . B . M . - - Watson 2.0 " ]
> > > split _ sentences _ nltk ( " Hi Ms . Lovelace . I ' m at I . B . M . - - Watson 2.0 " )
[ ' Hi Ms . Lovelace . ' , " I ' m at I . B . M . " , ' - - Watson 2.0 ' ]"""
|
doc = nlp ( text )
sentences = [ ]
if not hasattr ( doc , 'sents' ) :
logger . warning ( "Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded" )
return split_sentences_nltk ( text )
for w , span in enumerate ( doc . sents ) :
sent = '' . join ( doc [ i ] . string for i in range ( span . start , span . end ) ) . strip ( )
if len ( sent ) :
sentences . append ( sent )
return sentences
|
def hasField ( cls , fieldName ) :
"""returns True / False wether the collection has field K in it ' s schema . Use the dot notation for the nested fields : address . street"""
|
path = fieldName . split ( "." )
v = cls . _fields
for k in path :
try :
v = v [ k ]
except KeyError :
return False
return True
|
def is_zero_user ( self ) :
"""返回当前用户是否为三零用户 , 其实是四零 : 赞同0 , 感谢0 , 提问0 , 回答0.
: return : 是否是三零用户
: rtype : bool"""
|
return self . upvote_num + self . thank_num + self . question_num + self . answer_num == 0
|
def getBox ( box , pagesize ) :
"""Parse sizes by corners in the form :
< X - Left > < Y - Upper > < Width > < Height >
The last to values with negative values are interpreted as offsets form
the right and lower border ."""
|
box = str ( box ) . split ( )
if len ( box ) != 4 :
raise Exception ( "box not defined right way" )
x , y , w , h = [ getSize ( pos ) for pos in box ]
return getCoords ( x , y , w , h , pagesize )
|
def createEditor ( self , parent , option , index ) :
"""Returns the widget used to change data from the model and can be reimplemented to
customize editing behavior .
Reimplemented from QStyledItemDelegate ."""
|
logger . debug ( "ConfigItemDelegate.createEditor, parent: {!r}" . format ( parent . objectName ( ) ) )
assert index . isValid ( ) , "sanity check failed: invalid index"
cti = index . model ( ) . getItem ( index )
editor = cti . createEditor ( self , parent , option )
return editor
|
def maximum_size_bytes ( self ) :
"""Gets the biggest disk drive
: returns size in bytes ."""
|
return utils . max_safe ( [ device . get ( 'CapacityBytes' ) for device in self . devices if device . get ( 'CapacityBytes' ) is not None ] )
|
def mutagen_call ( action , path , func , * args , ** kwargs ) :
"""Call a Mutagen function with appropriate error handling .
` action ` is a string describing what the function is trying to do ,
and ` path ` is the relevant filename . The rest of the arguments
describe the callable to invoke .
We require at least Mutagen 1.33 , where ` IOError ` is * never * used ,
neither for internal parsing errors * nor * for ordinary IO error
conditions such as a bad filename . Mutagen - specific parsing errors and IO
errors are reraised as ` UnreadableFileError ` . Other exceptions
raised inside Mutagen - - - i . e . , bugs - - - are reraised as ` MutagenError ` ."""
|
try :
return func ( * args , ** kwargs )
except mutagen . MutagenError as exc :
log . debug ( u'%s failed: %s' , action , six . text_type ( exc ) )
raise UnreadableFileError ( path , six . text_type ( exc ) )
except Exception as exc : # Isolate bugs in Mutagen .
log . debug ( u'%s' , traceback . format_exc ( ) )
log . error ( u'uncaught Mutagen exception in %s: %s' , action , exc )
raise MutagenError ( path , exc )
|
def resolve ( self , definitions ) :
"""Resolve named references to other WSDL objects .
Can be safely called multiple times .
@ param definitions : A definitions object .
@ type definitions : L { Definitions }"""
|
if not self . __resolved :
self . do_resolve ( definitions )
self . __resolved = True
|
def do_cancel ( self , taskid : int ) -> None :
"""Cancel an indicated task"""
|
task = task_by_id ( taskid , self . _loop )
if task :
fut = asyncio . run_coroutine_threadsafe ( cancel_task ( task ) , loop = self . _loop )
fut . result ( timeout = 3 )
self . _sout . write ( 'Cancel task %d\n' % taskid )
else :
self . _sout . write ( 'No task %d\n' % taskid )
|
def rms ( self , x , params = ( ) ) :
"""Returns root mean square value of f ( x , params )"""
|
internal_x , internal_params = self . pre_process ( np . asarray ( x ) , np . asarray ( params ) )
if internal_params . ndim > 1 :
raise NotImplementedError ( "Parameters should be constant." )
result = np . empty ( internal_x . size // self . nx )
for idx in range ( internal_x . shape [ 0 ] ) :
result [ idx ] = np . sqrt ( np . mean ( np . square ( self . f_cb ( internal_x [ idx , : ] , internal_params ) ) ) )
return result
|
def valid_conkey ( self , conkey ) :
"""Check that the conkey is a valid one . Return True if valid . A
condition key is valid if it is one in the _ COND _ PREFIXES
list . With the prefix removed , the remaining string must be
either a number or the empty string ."""
|
for prefix in _COND_PREFIXES :
trailing = conkey . lstrip ( prefix )
if trailing == '' and conkey : # conkey is not empty
return True
try :
int ( trailing )
return True
except ValueError :
pass
return False
|
def matches ( self , a , b , ** config ) :
"""The message must match by package"""
|
package_a = self . processor . _u2p ( a [ 'msg' ] [ 'update' ] [ 'title' ] ) [ 0 ]
package_b = self . processor . _u2p ( b [ 'msg' ] [ 'update' ] [ 'title' ] ) [ 0 ]
if package_a != package_b :
return False
return True
|
def main ( argv = None ) :
"""The entry point of the application ."""
|
if argv is None :
argv = sys . argv [ 1 : ]
usage = '\n\n\n' . join ( __doc__ . split ( '\n\n\n' ) [ 1 : ] )
version = 'Gitpress ' + __version__
# Parse options
args = docopt ( usage , argv = argv , version = version )
# Execute command
try :
return execute ( args )
except RepositoryNotFoundError as ex :
error ( 'No Gitpress repository found at' , ex . directory )
|
def almost_unitary ( gate : Gate ) -> bool :
"""Return true if gate tensor is ( almost ) unitary"""
|
res = ( gate @ gate . H ) . asoperator ( )
N = gate . qubit_nb
return np . allclose ( asarray ( res ) , np . eye ( 2 ** N ) , atol = TOLERANCE )
|
def extract_names ( sender ) :
"""Tries to extract sender ' s names from ` From : ` header .
It could extract not only the actual names but e . g .
the name of the company , parts of email , etc .
> > > extract _ names ( ' Sergey N . Obukhov < serobnic @ mail . ru > ' )
[ ' Sergey ' , ' Obukhov ' , ' serobnic ' ]
> > > extract _ names ( ' ' )"""
|
sender = to_unicode ( sender , precise = True )
# Remove non - alphabetical characters
sender = "" . join ( [ char if char . isalpha ( ) else ' ' for char in sender ] )
# Remove too short words and words from " black " list i . e .
# words like ` ru ` , ` gmail ` , ` com ` , ` org ` , etc .
sender = [ word for word in sender . split ( ) if len ( word ) > 1 and not word in BAD_SENDER_NAMES ]
# Remove duplicates
names = list ( set ( sender ) )
return names
|
def validate_code_path ( valid_paths , code_path , ** kw ) :
"""Raise an exception if code _ path is not one of our
whitelisted valid _ paths ."""
|
root , name = code_path . split ( ':' , 1 )
if name not in valid_paths [ root ] :
raise ValueError ( "%r is not a valid code_path" % code_path )
|
def Fritzsche ( SG , Tavg , L = None , D = None , P1 = None , P2 = None , Q = None , Ts = 288.7 , Ps = 101325. , Zavg = 1 , E = 1 ) :
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula . Can calculate any of the following ,
given all other inputs :
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula . Here , the form as in [ 1 ] _
is used but with all inputs in base SI units .
. . math : :
Q = 93.500 \ frac { T _ s } { P _ s } \ left ( \ frac { P _ 1 ^ 2 - P _ 2 ^ 2}
{ L \ cdot { SG } ^ { 0.8587 } \ cdot T _ { avg } } \ right ) ^ { 0.538 } D ^ { 2.69}
Parameters
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure ` Ts ` and ` Ps ` , [ - ]
Tavg : float
Average temperature of the fluid in the pipeline , [ K ]
L : float , optional
Length of pipe , [ m ]
D : float , optional
Diameter of pipe , [ m ]
P1 : float , optional
Inlet pressure to pipe , [ Pa ]
P2 : float , optional
Outlet pressure from pipe , [ Pa ]
Q : float , optional
Flow rate of gas through pipe , [ m ^ 3 / s ]
Ts : float , optional
Reference temperature for the specific gravity of the gas , [ K ]
Ps : float , optional
Reference pressure for the specific gravity of the gas , [ Pa ]
Zavg : float , optional
Average compressibility factor for gas , [ - ]
E : float , optional
Pipeline efficiency , a correction factor between 0 and 1
Returns
Q , P1 , P2 , D , or L : float
The missing input which was solved for [ base SI ]
Notes
This model is also presented in [ 1 ] _ with a leading constant of 2.827,
the same exponents as used here , units of mm ( diameter ) , kPa , km ( length ) ,
and flow in m ^ 3 / hour .
This model is shown in base SI units in [ 2 ] _ , and with a leading constant
of 94.2565 , a diameter power of 2.6911 , main group power of 0.5382
and a specific gravity power of 0.858 . The difference is very small .
Examples
> > > Fritzsche ( D = 0.340 , P1 = 90E5 , P2 = 20E5 , L = 160E3 , SG = 0.693 , Tavg = 277.15)
39.421535157535565
References
. . [ 1 ] Menon , E . Shashi . Gas Pipeline Hydraulics . 1st edition . Boca Raton ,
FL : CRC Press , 2005.
. . [ 2 ] Coelho , Paulo M . , and Carlos Pinho . " Considerations about Equations
for Steady State Flow in Natural Gas Pipelines . " Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29 , no . 3
( September 2007 ) : 262-73 . doi : 10.1590 / S1678-58782007000300005.'''
|
# Rational ( ' 2.827E - 3 ' ) / ( 3600*24 ) * ( 1000 ) * * Rational ( ' 2.69 ' ) * ( 1000 ) * * Rational ( ' 0.538 ' ) * 1000 / ( 1000 * * 2 ) * * Rational ( ' 0.538 ' )
c5 = 93.50009798751128188757518688244137811221
# 14135*10 * * ( 57/125 ) / 432
c2 = 0.8587
c3 = 0.538
c4 = 2.69
if Q is None and ( None not in [ L , D , P1 , P2 ] ) :
return c5 * E * ( Ts / Ps ) * ( ( P1 ** 2 - P2 ** 2 ) / ( SG ** c2 * Tavg * L * Zavg ) ) ** c3 * D ** c4
elif D is None and ( None not in [ L , Q , P1 , P2 ] ) :
return ( Ps * Q * ( SG ** ( - c2 ) * ( P1 ** 2 - P2 ** 2 ) / ( L * Tavg * Zavg ) ) ** ( - c3 ) / ( E * Ts * c5 ) ) ** ( 1. / c4 )
elif P1 is None and ( None not in [ L , Q , D , P2 ] ) :
return ( L * SG ** c2 * Tavg * Zavg * ( D ** ( - c4 ) * Ps * Q / ( E * Ts * c5 ) ) ** ( 1. / c3 ) + P2 ** 2 ) ** 0.5
elif P2 is None and ( None not in [ L , Q , D , P1 ] ) :
return ( - L * SG ** c2 * Tavg * Zavg * ( D ** ( - c4 ) * Ps * Q / ( E * Ts * c5 ) ) ** ( 1. / c3 ) + P1 ** 2 ) ** 0.5
elif L is None and ( None not in [ P2 , Q , D , P1 ] ) :
return SG ** ( - c2 ) * ( D ** ( - c4 ) * Ps * Q / ( E * Ts * c5 ) ) ** ( - 1. / c3 ) * ( P1 ** 2 - P2 ** 2 ) / ( Tavg * Zavg )
else :
raise Exception ( 'This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.' )
|
def _getPowerupInterfaces ( self ) :
"""Collect powerup interfaces this object declares that it can be
installed on ."""
|
powerupInterfaces = getattr ( self . __class__ , "powerupInterfaces" , ( ) )
pifs = [ ]
for x in powerupInterfaces :
if isinstance ( x , type ( Interface ) ) : # just an interface
pifs . append ( ( x , 0 ) )
else : # an interface and a priority
pifs . append ( x )
m = getattr ( self , "__getPowerupInterfaces__" , None )
if m is not None :
pifs = m ( pifs )
try :
pifs = [ ( i , p ) for ( i , p ) in pifs ]
except ValueError :
raise ValueError ( "return value from %r.__getPowerupInterfaces__" " not an iterable of 2-tuples" % ( self , ) )
return pifs
|
def send_ether_over_wpa ( self , pkt , ** kwargs ) :
"""Send an Ethernet packet using the WPA channel
Extra arguments will be ignored , and are just left for compatibility"""
|
payload = LLC ( ) / SNAP ( ) / pkt [ Ether ] . payload
dest = pkt . dst
if dest == "ff:ff:ff:ff:ff:ff" :
self . send_wpa_to_group ( payload , dest )
else :
assert dest == self . client
self . send_wpa_to_client ( payload )
|
def normalized_messages ( self , no_field_name = '_entity' ) :
"""Return all the error messages as a dictionary"""
|
if isinstance ( self . messages , dict ) :
return self . messages
if not self . field_names :
return { no_field_name : self . messages }
return dict ( ( name , self . messages ) for name in self . field_names )
|
def as_dict ( self , join = '.' ) :
"""Returns the error as a path to message dictionary . Paths are joined
with the ` ` join ` ` string ."""
|
if self . path :
path = [ str ( node ) for node in self . path ]
else :
path = ''
return { join . join ( path ) : self . message }
|
def destroy ( self , request , pk = None ) :
'''For DELETE actions , actually deactivate the user , don ' t delete .'''
|
user = self . get_object ( )
user . is_active = False
user . save ( )
return Response ( status = status . HTTP_204_NO_CONTENT )
|
def ytickangle ( self , angle , index = 1 ) :
"""Set the angle of the y - axis tick labels .
Parameters
value : int
Angle in degrees
index : int , optional
Y - axis index
Returns
Chart"""
|
self . layout [ 'yaxis' + str ( index ) ] [ 'tickangle' ] = angle
return self
|
def make_response ( status , headers , payload , environ ) :
"""This function generates an appropriate response object for this async
mode ."""
|
return Response ( body = payload , status = int ( status . split ( ) [ 0 ] ) , headers = headers )
|
def is_transport_reaction_formulae ( rxn ) :
"""Return boolean if a reaction is a transport reaction ( from formulae ) .
Parameters
rxn : cobra . Reaction
The metabolic reaction under investigation ."""
|
# Collecting criteria to classify transporters by .
rxn_reactants = set ( [ met . formula for met in rxn . reactants ] )
rxn_products = set ( [ met . formula for met in rxn . products ] )
# Looking for formulas that stay the same on both side of the reaction .
transported_mets = [ formula for formula in rxn_reactants if formula in rxn_products ]
# Collect information on the elemental differences between
# compartments in the reaction .
delta_dicts = find_transported_elements ( rxn )
non_zero_array = [ v for ( k , v ) in iteritems ( delta_dicts ) if v != 0 ]
# Excluding reactions such as oxidoreductases where no net
# transport of Hydrogen is occurring , but rather just an exchange of
# electrons or charges effecting a change in protonation .
if set ( transported_mets ) != set ( 'H' ) and list ( delta_dicts . keys ( ) ) == [ 'H' ] :
pass
# All other reactions for which the amount of transported elements is
# not zero , which are not part of the model ' s exchange nor
# biomass reactions , are defined as transport reactions .
# This includes reactions where the transported metabolite reacts with
# a carrier molecule .
elif sum ( non_zero_array ) :
return True
|
def get_x ( self , var , coords = None ) :
"""Get the centers of the triangles in the x - dimension
Parameters
% ( CFDecoder . get _ y . parameters ) s
Returns
% ( CFDecoder . get _ y . returns ) s"""
|
if coords is None :
coords = self . ds . coords
# first we try the super class
ret = super ( UGridDecoder , self ) . get_x ( var , coords )
# but if that doesn ' t work because we get the variable name in the
# dimension of ` var ` , we use the means of the triangles
if ret is None or ret . name in var . dims :
bounds = self . get_cell_node_coord ( var , axis = 'x' , coords = coords )
if bounds is not None :
centers = bounds . mean ( axis = - 1 )
x = self . get_nodes ( self . get_mesh ( var , coords ) , coords ) [ 0 ]
try :
cls = xr . IndexVariable
except AttributeError : # xarray < 0.9
cls = xr . Coordinate
return cls ( x . name , centers , attrs = x . attrs . copy ( ) )
|
def nodes_to_dict_of_dataframes ( grid , nodes , lv_transformer = True ) :
"""Creates dictionary of dataframes containing grid
Parameters
grid : ding0 . Network
nodes : list of ding0 grid components objects
Nodes of the grid graph
lv _ transformer : bool , True
Toggle transformer representation in power flow analysis
Returns :
components : dict of pandas . DataFrame
DataFrames contain components attributes . Dict is keyed by components
type
components _ data : dict of pandas . DataFrame
DataFrame containing components time - varying data"""
|
generator_instances = [ MVStationDing0 , GeneratorDing0 ]
# TODO : MVStationDing0 has a slack generator
cos_phi_load = cfg_ding0 . get ( 'assumptions' , 'cos_phi_load' )
cos_phi_feedin = cfg_ding0 . get ( 'assumptions' , 'cos_phi_gen' )
srid = int ( cfg_ding0 . get ( 'geo' , 'srid' ) )
load_in_generation_case = cfg_ding0 . get ( 'assumptions' , 'load_in_generation_case' )
generation_in_load_case = cfg_ding0 . get ( 'assumptions' , 'generation_in_load_case' )
Q_factor_load = tan ( acos ( cos_phi_load ) )
Q_factor_generation = tan ( acos ( cos_phi_feedin ) )
voltage_set_slack = cfg_ding0 . get ( "mv_routing_tech_constraints" , "mv_station_v_level_operation" )
kw2mw = 1e-3
# define dictionaries
buses = { 'bus_id' : [ ] , 'v_nom' : [ ] , 'geom' : [ ] , 'grid_id' : [ ] }
bus_v_mag_set = { 'bus_id' : [ ] , 'temp_id' : [ ] , 'v_mag_pu_set' : [ ] , 'grid_id' : [ ] }
generator = { 'generator_id' : [ ] , 'bus' : [ ] , 'control' : [ ] , 'grid_id' : [ ] , 'p_nom' : [ ] }
generator_pq_set = { 'generator_id' : [ ] , 'temp_id' : [ ] , 'p_set' : [ ] , 'grid_id' : [ ] , 'q_set' : [ ] }
load = { 'load_id' : [ ] , 'bus' : [ ] , 'grid_id' : [ ] }
load_pq_set = { 'load_id' : [ ] , 'temp_id' : [ ] , 'p_set' : [ ] , 'grid_id' : [ ] , 'q_set' : [ ] }
# # TODO : consider other implications of ` lv _ transformer is True `
# if lv _ transformer is True :
# bus _ instances . append ( Transformer )
# # TODO : only for debugging , remove afterwards
# import csv
# nodeslist = sorted ( [ node . _ _ repr _ _ ( ) for node in nodes
# if node not in grid . graph _ isolated _ nodes ( ) ] )
# with open ( ' / home / guido / ding0 _ debug / nodes _ via _ dataframe . csv ' , ' w ' , newline = ' ' ) as csvfile :
# writer = csv . writer ( csvfile , delimiter = ' \ n ' )
# writer . writerow ( nodeslist )
for node in nodes :
if node not in grid . graph_isolated_nodes ( ) : # buses only
if isinstance ( node , MVCableDistributorDing0 ) :
buses [ 'bus_id' ] . append ( node . pypsa_id )
buses [ 'v_nom' ] . append ( grid . v_level )
buses [ 'geom' ] . append ( from_shape ( node . geo_data , srid = srid ) )
buses [ 'grid_id' ] . append ( grid . id_db )
bus_v_mag_set [ 'bus_id' ] . append ( node . pypsa_id )
bus_v_mag_set [ 'temp_id' ] . append ( 1 )
bus_v_mag_set [ 'v_mag_pu_set' ] . append ( [ 1 , 1 ] )
bus_v_mag_set [ 'grid_id' ] . append ( grid . id_db )
# bus + generator
elif isinstance ( node , tuple ( generator_instances ) ) : # slack generator
if isinstance ( node , MVStationDing0 ) :
logger . info ( 'Only MV side bus of MVStation will be added.' )
generator [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'slack' ] ) )
generator [ 'control' ] . append ( 'Slack' )
generator [ 'p_nom' ] . append ( 0 )
bus_v_mag_set [ 'v_mag_pu_set' ] . append ( [ voltage_set_slack , voltage_set_slack ] )
# other generators
if isinstance ( node , GeneratorDing0 ) :
generator [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'gen' , str ( node . id_db ) ] ) )
generator [ 'control' ] . append ( 'PQ' )
generator [ 'p_nom' ] . append ( node . capacity * node . capacity_factor )
generator_pq_set [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'gen' , str ( node . id_db ) ] ) )
generator_pq_set [ 'temp_id' ] . append ( 1 )
generator_pq_set [ 'p_set' ] . append ( [ node . capacity * node . capacity_factor * kw2mw * generation_in_load_case , node . capacity * node . capacity_factor * kw2mw ] )
generator_pq_set [ 'q_set' ] . append ( [ node . capacity * node . capacity_factor * kw2mw * Q_factor_generation * generation_in_load_case , node . capacity * node . capacity_factor * kw2mw * Q_factor_generation ] )
generator_pq_set [ 'grid_id' ] . append ( grid . id_db )
bus_v_mag_set [ 'v_mag_pu_set' ] . append ( [ 1 , 1 ] )
buses [ 'bus_id' ] . append ( node . pypsa_id )
buses [ 'v_nom' ] . append ( grid . v_level )
buses [ 'geom' ] . append ( from_shape ( node . geo_data , srid = srid ) )
buses [ 'grid_id' ] . append ( grid . id_db )
bus_v_mag_set [ 'bus_id' ] . append ( node . pypsa_id )
bus_v_mag_set [ 'temp_id' ] . append ( 1 )
bus_v_mag_set [ 'grid_id' ] . append ( grid . id_db )
generator [ 'grid_id' ] . append ( grid . id_db )
generator [ 'bus' ] . append ( node . pypsa_id )
# aggregated load at hv / mv substation
elif isinstance ( node , LVLoadAreaCentreDing0 ) :
load [ 'load_id' ] . append ( node . pypsa_id )
load [ 'bus' ] . append ( '_' . join ( [ 'HV' , str ( grid . id_db ) , 'trd' ] ) )
load [ 'grid_id' ] . append ( grid . id_db )
load_pq_set [ 'load_id' ] . append ( node . pypsa_id )
load_pq_set [ 'temp_id' ] . append ( 1 )
load_pq_set [ 'p_set' ] . append ( [ node . lv_load_area . peak_load * kw2mw , node . lv_load_area . peak_load * kw2mw * load_in_generation_case ] )
load_pq_set [ 'q_set' ] . append ( [ node . lv_load_area . peak_load * kw2mw * Q_factor_load , node . lv_load_area . peak_load * kw2mw * Q_factor_load * load_in_generation_case ] )
load_pq_set [ 'grid_id' ] . append ( grid . id_db )
# generator representing generation capacity of aggregate LA
# analogously to load , generation is connected directly to
# HV - MV substation
generator [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'lcg' , str ( node . id_db ) ] ) )
generator [ 'control' ] . append ( 'PQ' )
generator [ 'p_nom' ] . append ( node . lv_load_area . peak_generation )
generator [ 'grid_id' ] . append ( grid . id_db )
generator [ 'bus' ] . append ( '_' . join ( [ 'HV' , str ( grid . id_db ) , 'trd' ] ) )
generator_pq_set [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'lcg' , str ( node . id_db ) ] ) )
generator_pq_set [ 'temp_id' ] . append ( 1 )
generator_pq_set [ 'p_set' ] . append ( [ node . lv_load_area . peak_generation * kw2mw * generation_in_load_case , node . lv_load_area . peak_generation * kw2mw ] )
generator_pq_set [ 'q_set' ] . append ( [ node . lv_load_area . peak_generation * kw2mw * Q_factor_generation * generation_in_load_case , node . lv_load_area . peak_generation * kw2mw * Q_factor_generation ] )
generator_pq_set [ 'grid_id' ] . append ( grid . id_db )
# bus + aggregate load of lv grids ( at mv / ls substation )
elif isinstance ( node , LVStationDing0 ) : # Aggregated load representing load in LV grid
load [ 'load_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'loa' , str ( node . id_db ) ] ) )
load [ 'bus' ] . append ( node . pypsa_id )
load [ 'grid_id' ] . append ( grid . id_db )
load_pq_set [ 'load_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'loa' , str ( node . id_db ) ] ) )
load_pq_set [ 'temp_id' ] . append ( 1 )
load_pq_set [ 'p_set' ] . append ( [ node . peak_load * kw2mw , node . peak_load * kw2mw * load_in_generation_case ] )
load_pq_set [ 'q_set' ] . append ( [ node . peak_load * kw2mw * Q_factor_load , node . peak_load * kw2mw * Q_factor_load * load_in_generation_case ] )
load_pq_set [ 'grid_id' ] . append ( grid . id_db )
# bus at primary MV - LV transformer side
buses [ 'bus_id' ] . append ( node . pypsa_id )
buses [ 'v_nom' ] . append ( grid . v_level )
buses [ 'geom' ] . append ( from_shape ( node . geo_data , srid = srid ) )
buses [ 'grid_id' ] . append ( grid . id_db )
bus_v_mag_set [ 'bus_id' ] . append ( node . pypsa_id )
bus_v_mag_set [ 'temp_id' ] . append ( 1 )
bus_v_mag_set [ 'v_mag_pu_set' ] . append ( [ 1 , 1 ] )
bus_v_mag_set [ 'grid_id' ] . append ( grid . id_db )
# generator representing generation capacity in LV grid
generator [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'gen' , str ( node . id_db ) ] ) )
generator [ 'control' ] . append ( 'PQ' )
generator [ 'p_nom' ] . append ( node . peak_generation )
generator [ 'grid_id' ] . append ( grid . id_db )
generator [ 'bus' ] . append ( node . pypsa_id )
generator_pq_set [ 'generator_id' ] . append ( '_' . join ( [ 'MV' , str ( grid . id_db ) , 'gen' , str ( node . id_db ) ] ) )
generator_pq_set [ 'temp_id' ] . append ( 1 )
generator_pq_set [ 'p_set' ] . append ( [ node . peak_generation * kw2mw * generation_in_load_case , node . peak_generation * kw2mw ] )
generator_pq_set [ 'q_set' ] . append ( [ node . peak_generation * kw2mw * Q_factor_generation * generation_in_load_case , node . peak_generation * kw2mw * Q_factor_generation ] )
generator_pq_set [ 'grid_id' ] . append ( grid . id_db )
elif isinstance ( node , CircuitBreakerDing0 ) : # TODO : remove this elif - case if CircuitBreaker are removed from graph
continue
else :
raise TypeError ( "Node of type" , node , "cannot be handled here" )
else :
if not isinstance ( node , CircuitBreakerDing0 ) :
add_info = "LA is aggr. {0}" . format ( node . lv_load_area . is_aggregated )
else :
add_info = ""
logger . warning ( "Node {0} is not connected to the graph and will " "be omitted in power flow analysis. {1}" . format ( node , add_info ) )
components = { 'Bus' : DataFrame ( buses ) . set_index ( 'bus_id' ) , 'Generator' : DataFrame ( generator ) . set_index ( 'generator_id' ) , 'Load' : DataFrame ( load ) . set_index ( 'load_id' ) }
components_data = { 'Bus' : DataFrame ( bus_v_mag_set ) . set_index ( 'bus_id' ) , 'Generator' : DataFrame ( generator_pq_set ) . set_index ( 'generator_id' ) , 'Load' : DataFrame ( load_pq_set ) . set_index ( 'load_id' ) }
# with open ( ' / home / guido / ding0 _ debug / number _ of _ nodes _ buses . csv ' , ' a ' ) as csvfile :
# csvfile . write ( ' , ' . join ( [ ' \ n ' , str ( len ( nodes ) ) , str ( len ( grid . graph _ isolated _ nodes ( ) ) ) , str ( len ( components [ ' Bus ' ] ) ) ] ) )
return components , components_data
|
def is_domain_class_member_attribute ( ent , attr_name ) :
"""Checks if the given attribute name is a entity attribute of the given
registered resource ."""
|
attr = get_domain_class_attribute ( ent , attr_name )
return attr . kind == RESOURCE_ATTRIBUTE_KINDS . MEMBER
|
def coords_on_grid ( self , x , y ) :
"""Snap coordinates on the grid with integer coordinates"""
|
if isinstance ( x , float ) :
x = int ( self . _round ( x ) )
if isinstance ( y , float ) :
y = int ( self . _round ( y ) )
if not self . _y_coord_down :
y = self . _extents - y
return x , y
|
def colorize ( text , ansi = True ) :
"""If the client wants ansi , replace the tokens with ansi sequences - -
otherwise , simply strip them out ."""
|
if ansi :
text = text . replace ( '^^' , '\x00' )
for token , code in _ANSI_CODES :
text = text . replace ( token , code )
text = text . replace ( '\x00' , '^' )
else :
text = strip_caret_codes ( text )
return text
|
def rename ( dct , mapping ) :
"""Rename the keys of a dictionary with the given mapping
> > > rename ( { " a " : 1 , " BBB " : 2 } , { " a " : " AAA " } )
{ ' AAA ' : 1 , ' BBB ' : 2}"""
|
def _block ( memo , key ) :
if key in dct :
memo [ mapping [ key ] ] = dct [ key ]
return memo
else :
return memo
return reduce ( _block , mapping , omit ( dct , * mapping . keys ( ) ) )
|
def get_lowest_probable_prepared_certificate_in_view ( self , view_no ) -> Optional [ int ] :
"""Return lowest pp _ seq _ no of the view for which can be prepared but
choose from unprocessed PRE - PREPAREs and PREPAREs ."""
|
# TODO : Naive implementation , dont need to iterate over the complete
# data structures , fix this later
seq_no_pp = SortedList ( )
# pp _ seq _ no of PRE - PREPAREs
# pp _ seq _ no of PREPAREs with count of PREPAREs for each
seq_no_p = set ( )
for ( v , p ) in self . prePreparesPendingPrevPP :
if v == view_no :
seq_no_pp . add ( p )
if v > view_no :
break
for ( v , p ) , pr in self . preparesWaitingForPrePrepare . items ( ) :
if v == view_no and len ( pr ) >= self . quorums . prepare . value :
seq_no_p . add ( p )
for n in seq_no_pp :
if n in seq_no_p :
return n
return None
|
def average_series ( self , * args , ** kwargs ) -> InfoArray :
"""Average the actual time series of the | Variable | object for all
time points .
Method | IOSequence . average _ series | works similarly as method
| Variable . average _ values | of class | Variable | , from which we
borrow some examples . However , firstly , we have to prepare a
| Timegrids | object to define the | IOSequence . series | length :
> > > from hydpy import pub
> > > pub . timegrids = ' 2000-01-01 ' , ' 2000-01-04 ' , ' 1d '
As shown for method | Variable . average _ values | , for 0 - dimensional
| IOSequence | objects the result of | IOSequence . average _ series |
equals | IOSequence . series | itself :
> > > from hydpy . core . sequencetools import IOSequence
> > > class SoilMoisture ( IOSequence ) :
. . . NDIM = 0
> > > sm = SoilMoisture ( None )
> > > sm . activate _ ram ( )
> > > import numpy
> > > sm . series = numpy . array ( [ 190.0 , 200.0 , 210.0 ] )
> > > sm . average _ series ( )
InfoArray ( [ 190 . , 200 . , 210 . ] )
For | IOSequence | objects with an increased dimensionality , a
weighting parameter is required , again :
> > > SoilMoisture . NDIM = 1
> > > sm . shape = 3
> > > sm . activate _ ram ( )
> > > sm . series = (
. . . [ 190.0 , 390.0 , 490.0 ] ,
. . . [ 200.0 , 400.0 , 500.0 ] ,
. . . [ 210.0 , 410.0 , 510.0 ] )
> > > from hydpy . core . parametertools import Parameter
> > > class Area ( Parameter ) :
. . . NDIM = 1
. . . shape = ( 3 , )
. . . value = numpy . array ( [ 1.0 , 1.0 , 2.0 ] )
> > > area = Area ( None )
> > > SoilMoisture . refweights = property ( lambda self : area )
> > > sm . average _ series ( )
InfoArray ( [ 390 . , 400 . , 410 . ] )
The documentation on method | Variable . average _ values | provides
many examples on how to use different masks in different ways .
Here we restrict ourselves to the first example , where a new
mask enforces that | IOSequence . average _ series | takes only the
first two columns of the ` series ` into account :
> > > from hydpy . core . masktools import DefaultMask
> > > class Soil ( DefaultMask ) :
. . . @ classmethod
. . . def new ( cls , variable , * * kwargs ) :
. . . return cls . array2mask ( [ True , True , False ] )
> > > SoilMoisture . mask = Soil ( )
> > > sm . average _ series ( )
InfoArray ( [ 290 . , 300 . , 310 . ] )"""
|
try :
if not self . NDIM :
array = self . series
else :
mask = self . get_submask ( * args , ** kwargs )
if numpy . any ( mask ) :
weights = self . refweights [ mask ]
weights /= numpy . sum ( weights )
series = self . series [ : , mask ]
axes = tuple ( range ( 1 , self . NDIM + 1 ) )
array = numpy . sum ( weights * series , axis = axes )
else :
return numpy . nan
return InfoArray ( array , info = { 'type' : 'mean' } )
except BaseException :
objecttools . augment_excmessage ( 'While trying to calculate the mean value of ' 'the internal time series of sequence %s' % objecttools . devicephrase ( self ) )
|
async def terminateInstance ( self , * args , ** kwargs ) :
"""Terminate an instance
Terminate an instance in a specified region
This method is ` ` experimental ` `"""
|
return await self . _makeApiCall ( self . funcinfo [ "terminateInstance" ] , * args , ** kwargs )
|
def install ( self , package , force = False , upgrade = False , options = None ) :
"""Installs the given package into this virtual environment , as
specified in pip ' s package syntax or a tuple of ( ' name ' , ' ver ' ) ,
only if it is not already installed . Some valid examples :
' Django '
' Django = = 1.5'
( ' Django ' , ' 1.5 ' )
' - r requirements . txt '
If ` force ` is True , force an installation . If ` upgrade ` is True ,
attempt to upgrade the package in question . If both ` force ` and
` upgrade ` are True , reinstall the package and its dependencies .
The ` options ` is a list of strings that can be used to pass to
pip ."""
|
if self . readonly :
raise VirtualenvReadonlyException ( )
if options is None :
options = [ ]
if isinstance ( package , tuple ) :
package = '==' . join ( package )
if package . startswith ( ( '-e' , '-r' ) ) :
package_args = package . split ( )
else :
package_args = [ package ]
if not ( force or upgrade ) and ( package_args [ 0 ] != '-r' and self . is_installed ( package_args [ - 1 ] ) ) :
self . _write_to_log ( '%s is already installed, skipping (use force=True to override)' % package_args [ - 1 ] )
return
if not isinstance ( options , list ) :
raise ValueError ( "Options must be a list of strings." )
if upgrade :
options += [ '--upgrade' ]
if force :
options += [ '--force-reinstall' ]
elif force :
options += [ '--ignore-installed' ]
try :
self . _execute_pip ( [ 'install' ] + package_args + options )
except subprocess . CalledProcessError as e :
raise PackageInstallationException ( ( e . returncode , e . output , package ) )
|
def input_files_to_stage ( self ) :
"""Return a list of the input files needed by this link .
For ` Link ` sub - classes this will return the union
of all the input files of each internal ` Link ` .
That is to say this will include files produced by one
` Link ` in a ` Chain ` and used as input to another ` Link ` in the ` Chain `"""
|
ret_list = [ ]
for key , val in self . file_dict . items ( ) : # For input files we only want files that were marked as input
if val & FileFlags . in_stage_mask == FileFlags . in_stage_mask :
ret_list . append ( key )
return ret_list
|
def oei ( cn , ns = None , lo = None , di = None , iq = None , ico = None , pl = None , fl = None , fs = None , ot = None , coe = None , moc = None ) : # pylint : disable = too - many - arguments , redefined - outer - name
"""This function is a wrapper for
: meth : ` ~ pywbem . WBEMConnection . OpenEnumerateInstances ` .
Open an enumeration session to enumerate the instances of a class ( including
instances of its subclasses ) in a namespace .
Use the : func : ` ~ wbemcli . piwp ` function to retrieve the next set of
instances or the : func : ` ~ wbcmeli . ce ` function to close the enumeration
session before it is complete .
Parameters :
cn ( : term : ` string ` or : class : ` ~ pywbem . CIMClassName ` ) :
Name of the class to be enumerated ( case independent ) .
If specified as a ` CIMClassName ` object , its ` host ` attribute will be
ignored .
ns ( : term : ` string ` ) :
Name of the CIM namespace to be used ( case independent ) .
If ` None ` , defaults to the namespace of the ` cn ` parameter if
specified as a ` CIMClassName ` , or to the default namespace of the
connection .
lo ( : class : ` py : bool ` ) :
LocalOnly flag : Exclude inherited properties .
` None ` will cause the server default of ` True ` to be used .
Deprecated in : term : ` DSP0200 ` : WBEM server implementations for ` True `
may vary ; this parameter should be set to ` False ` by the caller .
di ( : class : ` py : bool ` ) :
DeepInheritance flag : Include properties added by subclasses .
` None ` will cause the server default of ` True ` to be used .
iq ( : class : ` py : bool ` ) :
IncludeQualifiers flag : Include qualifiers .
` None ` will cause the server default of ` False ` to be used .
Deprecated in : term : ` DSP0200 ` : Clients cannot rely on qualifiers to
be returned in this operation .
ico ( : class : ` py : bool ` ) :
IncludeClassOrigin flag : Include class origin information for the
properties in the retrieved instances .
` None ` will cause the server default of ` False ` to be used .
Deprecated in : term : ` DSP0200 ` : WBEM servers may either implement this
parameter as specified , or may treat any specified value as ` False ` .
pl ( : term : ` string ` or : term : ` py : iterable ` of : term : ` string ` ) :
PropertyList : Names of properties to be included ( if not otherwise
excluded ) . An empty iterable indicates to include no properties .
If ` None ` , all properties will be included .
fl ( : term : ` string ` ) :
Filter query language to be used for the filter defined in the ` fs `
parameter . The DMTF - defined Filter Query Language
( see : term : ` DSP0212 ` ) is specified as " DMTF : FQL " .
` None ` means that no such filtering is peformed .
fs ( : term : ` string ` ) :
Filter to apply to objects to be returned . Based on filter query
language defined by ` fl ` parameter .
` None ` means that no such filtering is peformed .
ot ( : class : ` ~ pywbem . Uint32 ` ) :
Operation timeout in seconds . This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session .
A value of 0 indicates that the server should never time out .
The server may reject the proposed value .
` None ` will cause the server to use its default timeout .
coe ( : class : ` py : bool ` ) :
Continue on error flag .
` None ` will cause the server to use its default of ` False ` .
moc ( : class : ` ~ pywbem . Uint32 ` ) :
Maximum number of objects to return for this operation .
` None ` will cause the server to use its default of 0.
Returns :
A : func : ` ~ py : collections . namedtuple ` object containing the following
named items :
* * * instances * * ( list of : class : ` ~ pywbem . CIMInstance ` ) :
The retrieved instances .
* * * eos * * ( : class : ` py : bool ` ) :
` True ` if the enumeration session is exhausted after this operation .
Otherwise ` eos ` is ` False ` and the ` context ` item is the context
object for the next operation on the enumeration session .
* * * context * * ( : func : ` py : tuple ` of server _ context , namespace ) :
A context object identifying the open enumeration session , including
its current enumeration state , and the namespace . This object must be
supplied with the next pull or close operation for this enumeration
session ."""
|
return CONN . OpenEnumerateInstances ( cn , ns , LocalOnly = lo , DeepInheritance = di , IncludeQualifiers = iq , IncludeClassOrigin = ico , PropertyList = pl , FilterQueryLanguage = fl , FilterQuery = fs , OperationTimeout = ot , ContinueOnError = coe , MaxObjectCount = moc )
|
def WalkChildren ( elem ) :
"""Walk the XML tree of children below elem , returning each in order ."""
|
for child in elem . childNodes :
yield child
for elem in WalkChildren ( child ) :
yield elem
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.