signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def align ( time , time2 , magnitude , magnitude2 , error , error2 ) :
"""Synchronizes the light - curves in the two different bands .
Returns
aligned _ time
aligned _ magnitude
aligned _ magnitude2
aligned _ error
aligned _ error2""" | error = np . zeros ( time . shape ) if error is None else error
error2 = np . zeros ( time2 . shape ) if error2 is None else error2
# this asume that the first series is the short one
sserie = pd . DataFrame ( { "mag" : magnitude , "error" : error } , index = time )
lserie = pd . DataFrame ( { "mag" : magnitude2 , "error" : error2 } , index = time2 )
# if the second serie is logest then revert
if len ( time ) > len ( time2 ) :
sserie , lserie = lserie , sserie
# make the merge
merged = sserie . join ( lserie , how = "inner" , rsuffix = '2' )
# recreate columns
new_time = merged . index . values
new_mag , new_mag2 = merged . mag . values , merged . mag2 . values
new_error , new_error2 = merged . error . values , merged . error2 . values
if len ( time ) > len ( time2 ) :
new_mag , new_mag2 = new_mag2 , new_mag
new_error , new_error2 = new_error2 , new_error
return new_time , new_mag , new_mag2 , new_error , new_error2 |
def set_querier_mode ( self , dpid , server_port ) :
"""set the datapath to work as a querier . note that you can set
up only the one querier . when you called this method several
times , only the last one becomes effective .""" | self . dpid = dpid
self . server_port = server_port
if self . _querier_thread :
hub . kill ( self . _querier_thread )
self . _querier_thread = None |
def failed_request_exception ( message , r ) :
"""Build ClickException from a failed request .""" | try :
resp = json . loads ( r . text )
message = '%s: %d\n%s' % ( message , resp [ 'error' ] [ 'code' ] , resp [ 'error' ] [ 'message' ] )
return click . ClickException ( message )
except ValueError : # fallback on raw text response if error is not structured .
return click . ClickException ( '%s: %d\n%s' % ( message , r . status_code , r . text ) ) |
def update_file ( filename , items ) :
'''Edits the given file in place , replacing any instances of { key } with the
appropriate value from the provided items dict . If the given filename ends
with " . xml " values will be quoted and escaped for XML .''' | # TODO : Implement something in the templates to denote whether the value
# being replaced is an XML attribute or a value . Perhaps move to dyanmic
# XML tree building rather than string replacement .
should_escape = filename . endswith ( 'addon.xml' )
with open ( filename , 'r' ) as inp :
text = inp . read ( )
for key , val in items . items ( ) :
if should_escape :
val = saxutils . quoteattr ( val )
text = text . replace ( '{%s}' % key , val )
output = text
with open ( filename , 'w' ) as out :
out . write ( output ) |
def _evalTimeStr ( self , datetimeString , sourceTime ) :
"""Evaluate text passed by L { _ partialParseTimeStr ( ) }""" | s = datetimeString . strip ( )
sourceTime = self . _evalDT ( datetimeString , sourceTime )
if s in self . ptc . re_values [ 'now' ] :
self . currentContext . updateAccuracy ( pdtContext . ACU_NOW )
else : # Given string is a natural language time string like
# lunch , midnight , etc
sTime = self . ptc . getSource ( s , sourceTime )
if sTime :
sourceTime = sTime
self . currentContext . updateAccuracy ( pdtContext . ACU_HALFDAY )
return sourceTime |
def add_method ( self , loop , callback ) :
"""Add a coroutine function
Args :
loop : The : class : ` event loop < asyncio . BaseEventLoop > ` instance
on which to schedule callbacks
callback : The : term : ` coroutine function ` to add""" | f , obj = get_method_vars ( callback )
wrkey = ( f , id ( obj ) )
self [ wrkey ] = obj
self . event_loop_map [ wrkey ] = loop |
def go_to ( self , url_or_text ) :
"""Go to page * address *""" | if is_text_string ( url_or_text ) :
url = QUrl ( url_or_text )
else :
url = url_or_text
self . webview . load ( url ) |
def get_description_metadata ( self ) :
"""Gets the metadata for a description .
return : ( osid . Metadata ) - metadata for the description
* compliance : mandatory - - This method must be implemented . *""" | metadata = dict ( self . _mdata [ 'description' ] )
metadata . update ( { 'existing_string_values' : self . _my_map [ 'description' ] [ 'text' ] } )
return Metadata ( ** metadata ) |
def transform ( self , v3 ) :
"""Calculates the vector transformed by this quaternion
: param v3 : Vector3 to be transformed
: returns : transformed vector""" | if isinstance ( v3 , Vector3 ) :
t = super ( Quaternion , self ) . transform ( [ v3 . x , v3 . y , v3 . z ] )
return Vector3 ( t [ 0 ] , t [ 1 ] , t [ 2 ] )
elif len ( v3 ) == 3 :
return super ( Quaternion , self ) . transform ( v3 )
else :
raise TypeError ( "param v3 is not a vector type" ) |
def __dict_to_deployment_spec ( spec ) :
'''Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance .''' | spec_obj = AppsV1beta1DeploymentSpec ( template = spec . get ( 'template' , '' ) )
for key , value in iteritems ( spec ) :
if hasattr ( spec_obj , key ) :
setattr ( spec_obj , key , value )
return spec_obj |
def Planck_2015 ( flat = False , extras = True ) :
"""Planck 2015 XII : Cosmological parameters Table 4
column Planck TT , TE , EE + lowP + lensing + ext
from Ade et al . ( 2015 ) A & A in press ( arxiv : 1502.01589v1)
Parameters
flat : boolean
If True , sets omega _ lambda _ 0 = 1 - omega _ M _ 0 to ensure omega _ k _ 0
= 0 exactly . Also sets omega _ k _ 0 = 0 explicitly .
extras : boolean
If True , sets neutrino number N _ nu = 0 , neutrino density
omega _ n _ 0 = 0.0 , Helium mass fraction Y _ He = 0.24.""" | omega_b_0 = 0.02230 / ( 0.6774 ** 2 )
cosmo = { 'omega_b_0' : omega_b_0 , 'omega_M_0' : 0.3089 , 'omega_lambda_0' : 0.6911 , 'h' : 0.6774 , 'n' : 0.9667 , 'sigma_8' : 0.8159 , 'tau' : 0.066 , 'z_reion' : 8.8 , 't_0' : 13.799 , }
if flat :
cosmo [ 'omega_lambda_0' ] = 1 - cosmo [ 'omega_M_0' ]
cosmo [ 'omega_k_0' ] = 0.0
if extras :
add_extras ( cosmo )
return cosmo |
def parse_account_key ( ) :
"""Parse account key to get public key""" | LOGGER . info ( "Parsing account key..." )
cmd = [ 'openssl' , 'rsa' , '-in' , os . path . join ( gettempdir ( ) , 'account.key' ) , '-noout' , '-text' ]
devnull = open ( os . devnull , 'wb' )
return subprocess . check_output ( cmd , stderr = devnull ) |
def removeAll ( self ) :
"""Remove all selectables , and return a list of them .""" | rv = self . _removeAll ( self . _reads , self . _writes )
return rv |
def matrix_to_lines ( matrix , x , y , incby = 1 ) :
"""Converts the ` matrix ` into an iterable of ( ( x1 , y1 ) , ( x2 , y2 ) ) tuples which
represent a sequence ( horizontal line ) of dark modules .
The path starts at the 1st row of the matrix and moves down to the last
row .
: param matrix : An iterable of bytearrays .
: param x : Initial position on the x - axis .
: param y : Initial position on the y - axis .
: param incby : Value to move along the y - axis ( default : 1 ) .
: rtype : iterable of ( x1 , y1 ) , ( x2 , y2 ) tuples""" | y -= incby
# Move along y - axis so we can simply increment y in the loop
last_bit = 0x1
for row in matrix :
x1 = x
x2 = x
y += incby
for bit in row :
if last_bit != bit and not bit :
yield ( x1 , y ) , ( x2 , y )
x1 = x2
x2 += 1
if not bit :
x1 += 1
last_bit = bit
if last_bit :
yield ( x1 , y ) , ( x2 , y )
last_bit = 0x0 |
def subscribe ( self , method , * params ) :
'''Perform a remote command which will stream events / data to us .
Expects a method name , which look like :
server . peers . subscribe
. . and sometimes take arguments , all of which are positional .
Returns a tuple : ( Future , asyncio . Queue ) .
The future will have the result of the initial
call , and the queue will receive additional
responses as they happen .''' | assert '.' in method
assert method . endswith ( 'subscribe' )
return self . _send_request ( method , params , is_subscribe = True ) |
def price ( self ) :
"""Current price .""" | # if accessing and stale - update first
if self . _needupdate or self . now != self . parent . now :
self . update ( self . root . now )
return self . _price |
def request_entity ( self , request_entity ) :
"""Sets the request _ entity of this FileSourceCreateOrUpdateRequest .
: param request _ entity : The request _ entity of this FileSourceCreateOrUpdateRequest .
: type : str""" | if request_entity is not None and len ( request_entity ) > 10000 :
raise ValueError ( "Invalid value for `request_entity`, length must be less than or equal to `10000`" )
self . _request_entity = request_entity |
def related_items_changed ( self , instance , related_manager ) :
"""Stores the number of comments . A custom ` ` count _ filter ` `
queryset gets checked for , allowing managers to implement
custom count logic .""" | try :
count = related_manager . count_queryset ( )
except AttributeError :
count = related_manager . count ( )
count_field_name = list ( self . fields . keys ( ) ) [ 0 ] % self . related_field_name
setattr ( instance , count_field_name , count )
instance . save ( ) |
def cmd_fence ( self , args ) :
'''fence commands''' | if len ( args ) < 1 :
self . print_usage ( )
return
if args [ 0 ] == "enable" :
self . set_fence_enabled ( 1 )
elif args [ 0 ] == "disable" :
self . set_fence_enabled ( 0 )
elif args [ 0 ] == "load" :
if len ( args ) != 2 :
print ( "usage: fence load <filename>" )
return
self . load_fence ( args [ 1 ] )
elif args [ 0 ] == "list" :
self . list_fence ( None )
elif args [ 0 ] == "move" :
self . cmd_fence_move ( args [ 1 : ] )
elif args [ 0 ] == "remove" :
self . cmd_fence_remove ( args [ 1 : ] )
elif args [ 0 ] == "save" :
if len ( args ) != 2 :
print ( "usage: fence save <filename>" )
return
self . list_fence ( args [ 1 ] )
elif args [ 0 ] == "show" :
if len ( args ) != 2 :
print ( "usage: fence show <filename>" )
return
self . fenceloader . load ( args [ 1 ] )
self . have_list = True
elif args [ 0 ] == "draw" :
if not 'draw_lines' in self . mpstate . map_functions :
print ( "No map drawing available" )
return
self . mpstate . map_functions [ 'draw_lines' ] ( self . fence_draw_callback )
print ( "Drawing fence on map" )
elif args [ 0 ] == "clear" :
self . param_set ( 'FENCE_TOTAL' , 0 , 3 )
else :
self . print_usage ( ) |
def revoke ( self , paths : Union [ str , Iterable [ str ] ] , users : Union [ str , Iterable [ str ] , User , Iterable [ User ] ] ) :
"""Revokes all access controls that are associated to the given path or collection of paths .
: param paths : the paths to remove access controls on
: param users : the users to revoke access controls for . User may be in the represented as a ` User ` object or in
the form " name # zone " """ | |
def sag_entropic_transport ( a , b , M , reg , numItermax = 10000 , lr = None ) :
'''Compute the SAG algorithm to solve the regularized discrete measures
optimal transport max problem
The function solves the following optimization problem :
. . math : :
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s . t . \ gamma 1 = a
\gamma^T 1 = b
\gamma \geq 0
Where :
- M is the ( ns , nt ) metric cost matrix
- : math : ` \ Omega ` is the entropic regularization term with : math : ` \ Omega ( \ gamma ) = \ sum _ { i , j } \ gamma _ { i , j } \ log ( \ gamma _ { i , j } ) `
- a and b are source and target weights ( sum to 1)
The algorithm used for solving the problem is the SAG algorithm
as proposed in [ 18 ] _ [ alg . 1]
Parameters
a : np . ndarray ( ns , ) ,
source measure
b : np . ndarray ( nt , ) ,
target measure
M : np . ndarray ( ns , nt ) ,
cost matrix
reg : float number ,
Regularization term > 0
numItermax : int number
number of iteration
lr : float number
learning rate
Returns
v : np . ndarray ( nt , )
dual variable
Examples
> > > n _ source = 7
> > > n _ target = 4
> > > reg = 1
> > > numItermax = 300000
> > > a = ot . utils . unif ( n _ source )
> > > b = ot . utils . unif ( n _ target )
> > > rng = np . random . RandomState ( 0)
> > > X _ source = rng . randn ( n _ source , 2)
> > > Y _ target = rng . randn ( n _ target , 2)
> > > M = ot . dist ( X _ source , Y _ target )
> > > method = " ASGD "
> > > asgd _ pi = stochastic . solve _ semi _ dual _ entropic ( a , b , M , reg ,
method , numItermax )
> > > print ( asgd _ pi )
References
[ Genevay et al . , 2016 ] :
Stochastic Optimization for Large - scale Optimal Transport ,
Advances in Neural Information Processing Systems ( 2016 ) ,
arXiv preprint arxiv : 1605.08527.''' | if lr is None :
lr = 1. / max ( a / reg )
n_source = np . shape ( M ) [ 0 ]
n_target = np . shape ( M ) [ 1 ]
cur_beta = np . zeros ( n_target )
stored_gradient = np . zeros ( ( n_source , n_target ) )
sum_stored_gradient = np . zeros ( n_target )
for _ in range ( numItermax ) :
i = np . random . randint ( n_source )
cur_coord_grad = a [ i ] * coordinate_grad_semi_dual ( b , M , reg , cur_beta , i )
sum_stored_gradient += ( cur_coord_grad - stored_gradient [ i ] )
stored_gradient [ i ] = cur_coord_grad
cur_beta += lr * ( 1. / n_source ) * sum_stored_gradient
return cur_beta |
def write_str2file ( pathname , astr ) :
"""writes a string to file""" | fname = pathname
fhandle = open ( fname , 'wb' )
fhandle . write ( astr )
fhandle . close ( ) |
def _key ( self ) :
"""A tuple key that uniquely describes this field .
Used to compute this instance ' s hashcode and evaluate equality .
Returns :
Tuple [ str ] : The contents of this : class : ` . RowRange ` .""" | return ( self . start_key , self . start_inclusive , self . end_key , self . end_inclusive ) |
def _add_ps2q ( self , throat , queue ) :
"""Helper method to add pores to the cluster queue""" | net = self . project . network
elem_type = 'pore'
# Find pores connected to newly invaded throat
Ps = net [ 'throat.conns' ] [ throat ]
# Remove already invaded pores from Ps
Ps = Ps [ self [ 'pore.invasion_sequence' ] [ Ps ] <= 0 ]
if len ( Ps ) > 0 :
self . _interface_Ps [ Ps ] = True
for P in Ps :
data = [ ]
# Pc
data . append ( self [ "pore.entry_pressure" ] [ P ] )
# Element Index
data . append ( P )
# Element Type ( Pore of Throat )
data . append ( elem_type )
hq . heappush ( queue , data ) |
def is_valid_hostname ( hostname ) :
'''Return True if hostname is valid , otherwise False .''' | if not isinstance ( hostname , str ) :
raise TypeError ( 'hostname must be a string' )
# strip exactly one dot from the right , if present
if hostname and hostname [ - 1 ] == "." :
hostname = hostname [ : - 1 ]
if not hostname or len ( hostname ) > 253 :
return False
labels = hostname . split ( '.' )
# the TLD must be not all - numeric
if re . match ( NUMERIC_REGEX , labels [ - 1 ] ) :
return False
return all ( LABEL_REGEX . match ( label ) for label in labels ) |
def _int_generator ( descriptor , bitwidth , unsigned ) :
'Helper to create a basic integer value generator' | vals = list ( values . get_integers ( bitwidth , unsigned ) )
return gen . IterValueGenerator ( descriptor . name , vals ) |
def get_extra_claims ( self , claims_set ) :
"""Get claims holding extra identity info from the claims set .
Returns a dictionary of extra claims or None if there are none .
: param claims _ set : set of claims , which was included in the received
token .""" | reserved_claims = ( self . userid_claim , "iss" , "aud" , "exp" , "nbf" , "iat" , "jti" , "refresh_until" , "nonce" )
extra_claims = { }
for claim in claims_set :
if claim not in reserved_claims :
extra_claims [ claim ] = claims_set [ claim ]
if not extra_claims :
return None
return extra_claims |
def depthfirstsearch ( self , function ) :
"""Generic depth first search algorithm using a callback function , continues as long as the callback function returns None""" | result = function ( self )
if result is not None :
return result
for e in self :
result = e . depthfirstsearch ( function )
if result is not None :
return result
return None |
def all_on_off ( self , power ) :
"""Turn all zones on or off
Note that the all on function is not supported by the Russound CAA66 , although it does support the all off .
On and off are supported by the CAV6.6.
Note : Not tested ( acambitsis )""" | send_msg = self . create_send_message ( "F0 7F 00 7F 00 00 @kk 05 02 02 00 00 F1 22 00 00 @pr 00 00 01" , None , None , power )
self . send_data ( send_msg )
self . get_response_message ( ) |
def quantity_yXL ( fig , left , bottom , top , quantity = params . L_yXL , label = r'$\mathcal{L}_{yXL}$' ) :
'''make a bunch of image plots , each showing the spatial normalized
connectivity of synapses''' | layers = [ 'L1' , 'L2/3' , 'L4' , 'L5' , 'L6' ]
ncols = len ( params . y ) / 4
# assess vlims
vmin = 0
vmax = 0
for y in params . y :
if quantity [ y ] . max ( ) > vmax :
vmax = quantity [ y ] . max ( )
gs = gridspec . GridSpec ( 4 , 4 , left = left , bottom = bottom , top = top )
for i , y in enumerate ( params . y ) :
ax = fig . add_subplot ( gs [ i / 4 , i % 4 ] )
masked_array = np . ma . array ( quantity [ y ] , mask = quantity [ y ] == 0 )
# cmap = plt . get _ cmap ( ' hot ' , 20)
# cmap . set _ bad ( ' k ' , 0.5)
# im = ax . imshow ( masked _ array ,
im = ax . pcolormesh ( masked_array , vmin = vmin , vmax = vmax , cmap = cmap , # interpolation = ' nearest ' ,
)
ax . invert_yaxis ( )
ax . axis ( ax . axis ( 'tight' ) )
ax . xaxis . set_ticks_position ( 'top' )
ax . set_xticks ( np . arange ( 9 ) + 0.5 )
ax . set_yticks ( np . arange ( 5 ) + 0.5 )
# if divmod ( i , 4 ) [ 1 ] = = 0:
if i % 4 == 0 :
ax . set_yticklabels ( layers , )
ax . set_ylabel ( '$L$' , labelpad = 0. )
else :
ax . set_yticklabels ( [ ] )
if i < 4 :
ax . set_xlabel ( r'$X$' , labelpad = - 1 , fontsize = 8 )
ax . set_xticklabels ( params . X , rotation = 270 )
else :
ax . set_xticklabels ( [ ] )
ax . xaxis . set_label_position ( 'top' )
ax . text ( 0.5 , - 0.13 , r'$y=$' + y , horizontalalignment = 'center' , verticalalignment = 'center' , transform = ax . transAxes , fontsize = 5.5 )
# colorbar
rect = np . array ( ax . get_position ( ) . bounds )
rect [ 0 ] += rect [ 2 ] + 0.01
rect [ 1 ] = bottom
rect [ 2 ] = 0.01
rect [ 3 ] = top - bottom
cax = fig . add_axes ( rect )
cbar = plt . colorbar ( im , cax = cax )
# cbar . set _ label ( label , ha = ' center ' )
cbar . set_label ( label , labelpad = 0 ) |
def past_trades ( self , symbol = 'btcusd' , limit_trades = 50 , timestamp = 0 ) :
"""Send a trade history request , return the response .
Arguements :
symbol - - currency symbol ( default ' btcusd ' )
limit _ trades - - maximum number of trades to return ( default 50)
timestamp - - only return trades after this unix timestamp ( default 0)""" | request = '/v1/mytrades'
url = self . base_url + request
params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'symbol' : symbol , 'limit_trades' : limit_trades , 'timestamp' : timestamp }
return requests . post ( url , headers = self . prepare ( params ) ) |
def _default ( cls , opts ) :
"""Setup default logger""" | logging . basicConfig ( level = logging . INFO , format = cls . _log_format )
return True |
def pairwise_permutations ( i , j ) :
'''Return all permutations of a set of groups
This routine takes two vectors :
i - the label of each group
j - the members of the group .
For instance , take a set of two groups with several members each :
i | j
1 | 1
1 | 2
1 | 3
2 | 1
2 | 4
2 | 5
2 | 6
The output will be
i | j1 | j2
1 | 1 | 2
1 | 1 | 3
1 | 2 | 3
2 | 1 | 4
2 | 1 | 5
2 | 1 | 6
2 | 4 | 5
2 | 4 | 6
2 | 5 | 6
etc''' | if len ( i ) == 0 :
return ( np . array ( [ ] , int ) , np . array ( [ ] , j . dtype ) , np . array ( [ ] , j . dtype ) )
# Sort by i then j
index = np . lexsort ( ( j , i ) )
i = i [ index ]
j = j [ index ]
# map the values of i to a range r
r_to_i = np . sort ( np . unique ( i ) )
i_to_r_off = np . min ( i )
i_to_r = np . zeros ( np . max ( i ) + 1 - i_to_r_off , int )
i_to_r [ r_to_i - i_to_r_off ] = np . arange ( len ( r_to_i ) )
# Advance the value of r by one each time i changes
r = np . cumsum ( np . hstack ( ( [ False ] , i [ : - 1 ] != i [ 1 : ] ) ) )
# find the counts per item
src_count = np . bincount ( r )
# The addresses of the starts of each item
src_idx = np . hstack ( ( [ 0 ] , np . cumsum ( src_count [ : - 1 ] ) ) )
# The sizes of each destination item : n * ( n - 1 ) / 2
# This is the number of permutations of n items against themselves .
dest_count = src_count * ( src_count - 1 ) // 2
# The indexes of the starts of each destination item ( + total sum at end )
dest_idx = np . hstack ( ( [ 0 ] , np . cumsum ( dest_count ) ) )
dest_size = dest_idx [ - 1 ]
# Allocate the destination arrays
d_r = np . zeros ( dest_size , i . dtype )
d_j1 , d_j2 = np . zeros ( ( 2 , dest_size ) , j . dtype )
# Mark the first item in the destination and then do a cumulative
# sum trick ( ( 1 + 0 + 0 + 0 + 1 + 0 + 0 ) - 1 =
# 0 , 0 , 0 , 0 , 1 , 1 , 1)
# to label each member of d _ i
not_empty_indices = np . arange ( 0 , len ( dest_idx ) - 1 )
not_empty_indices = not_empty_indices [ dest_idx [ : - 1 ] != dest_idx [ 1 : ] ]
increments = not_empty_indices - np . hstack ( [ [ 0 ] , not_empty_indices [ : - 1 ] ] )
d_r [ dest_idx [ not_empty_indices ] ] = increments
d_r = np . cumsum ( d_r )
d_i = r_to_i [ d_r ]
# Index each member of the destination array relative to its start . The
# above array would look like this : [ 0 , 1 , 2 , 3 , 0 , 1 , 2]
d_r_idx = np . arange ( len ( d_r ) ) - dest_idx [ d_r ]
# We can use a 2x2 matrix to look up which j1 and j2 for each of
# the d _ r _ idx . The first slot in the matrix is the number of values
# to permute ( from src _ count [ d _ r ] ) and the second slot is d _ r _ idx
# So here , we make a sparse array for the unique values of src _ count
unique_src_count = np . unique ( src_count )
unique_dest_len = ( unique_src_count * ( unique_src_count - 1 ) // 2 ) . astype ( int )
# src _ count repeated once per permutation
i_sparse = np . hstack ( [ np . ones ( dlen , int ) * c for c , dlen in zip ( unique_src_count , unique_dest_len ) ] )
# The indexes from zero to the # of permutations
j_sparse = np . hstack ( [ np . arange ( dlen ) for dlen in unique_dest_len ] )
# Repeat 0 n - 1 times , 1 n - 2 times , etc to get the first indexes in
# the permutation .
v_j1_sparse = np . hstack ( [ np . hstack ( [ np . ones ( n - x - 1 , int ) * x for x in range ( n ) ] ) for n in unique_src_count ] )
# Spit out a range from 1 to n - 1 , 2 to n - 1 , etc
v_j2_sparse = np . hstack ( [ np . hstack ( [ np . arange ( x + 1 , n ) for x in range ( n ) ] ) for n in unique_src_count ] )
if len ( i_sparse ) > 0 :
j1_sparse = scipy . sparse . coo_matrix ( ( v_j1_sparse , ( i_sparse , j_sparse ) ) ) . tocsc ( )
j2_sparse = scipy . sparse . coo_matrix ( ( v_j2_sparse , ( i_sparse , j_sparse ) ) ) . tocsc ( )
else :
j1_sparse = j2_sparse = np . array ( [ [ ] ] , j . dtype )
# And now we can spit out the j1 and j2 dest . This is whatever element
# from the group in j indexed by either j1 or j2 sparse . We find the
# group ' s start by using d _ r to look up the group ' s start .
d_j1_idx = np . array ( j1_sparse [ src_count [ d_r ] , d_r_idx ] ) . flatten ( )
d_j1 = j [ src_idx [ d_r ] + d_j1_idx ]
d_j2_idx = np . array ( j2_sparse [ src_count [ d_r ] , d_r_idx ] ) . flatten ( )
d_j2 = j [ src_idx [ d_r ] + d_j2_idx ]
return ( d_i , d_j1 , d_j2 ) |
def enter ( self , n = 1 , interval = 0 , pre_dl = None , post_dl = None ) :
"""Press enter key n times .
* * 中文文档 * *
按回车键 / 换行键 n 次 。""" | self . delay ( pre_dl )
self . k . tap_key ( self . k . enter_key , n , interval )
self . delay ( post_dl ) |
def get_nested ( self , * args ) :
"""get a nested value , returns None if path does not exist""" | data = self . data
for key in args :
if key not in data :
return None
data = data [ key ]
return data |
def get_sensor_data ( self ) :
"""Get sensor reading objects
Iterates sensor reading objects pertaining to the currently
managed BMC .
: returns : Iterator of sdr . SensorReading objects""" | self . init_sdr ( )
for sensor in self . _sdr . get_sensor_numbers ( ) :
rsp = self . raw_command ( command = 0x2d , netfn = 4 , data = ( sensor , ) )
if 'error' in rsp :
if rsp [ 'code' ] == 203 : # Sensor does not exist , optional dev
continue
raise exc . IpmiException ( rsp [ 'error' ] , code = rsp [ 'code' ] )
yield self . _sdr . sensors [ sensor ] . decode_sensor_reading ( rsp [ 'data' ] )
self . oem_init ( )
for reading in self . _oem . get_sensor_data ( ) :
yield reading |
async def graphql ( schema : GraphQLSchema , source : Union [ str , Source ] , root_value : Any = None , context_value : Any = None , variable_values : Dict [ str , Any ] = None , operation_name : str = None , field_resolver : GraphQLFieldResolver = None , type_resolver : GraphQLTypeResolver = None , middleware : Middleware = None , execution_context_class : Type [ ExecutionContext ] = ExecutionContext , ) -> ExecutionResult :
"""Execute a GraphQL operation asynchronously .
This is the primary entry point function for fulfilling GraphQL operations by
parsing , validating , and executing a GraphQL document along side a GraphQL schema .
More sophisticated GraphQL servers , such as those which persist queries , may wish
to separate the validation and execution phases to a static time tooling step ,
and a server runtime step .
Accepts the following arguments :
: arg schema :
The GraphQL type system to use when validating and executing a query .
: arg source :
A GraphQL language formatted string representing the requested operation .
: arg root _ value :
The value provided as the first argument to resolver functions on the top level
type ( e . g . the query object type ) .
: arg context _ value :
The context value is provided as an attribute of the second argument
( the resolve info ) to resolver functions . It is used to pass shared information
useful at any point during query execution , for example the currently logged in
user and connections to databases or other services .
: arg variable _ values :
A mapping of variable name to runtime value to use for all variables defined
in the request string .
: arg operation _ name :
The name of the operation to use if request string contains multiple possible
operations . Can be omitted if request string contains only one operation .
: arg field _ resolver :
A resolver function to use when one is not provided by the schema .
If not provided , the default field resolver is used ( which looks for a value
or method on the source value with the field ' s name ) .
: arg type _ resolver :
A type resolver function to use when none is provided by the schema .
If not provided , the default type resolver is used ( which looks for a
` _ _ typename ` field or alternatively calls the ` isTypeOf ` method ) .
: arg middleware :
The middleware to wrap the resolvers with
: arg execution _ context _ class :
The execution context class to use to build the context""" | # Always return asynchronously for a consistent API .
result = graphql_impl ( schema , source , root_value , context_value , variable_values , operation_name , field_resolver , type_resolver , middleware , execution_context_class , )
if isawaitable ( result ) :
return await cast ( Awaitable [ ExecutionResult ] , result )
return cast ( ExecutionResult , result ) |
def to_tag ( self ) -> str :
"""Convert a Language back to a standard language tag , as a string .
This is also the str ( ) representation of a Language object .
> > > Language . make ( language = ' en ' , region = ' GB ' ) . to _ tag ( )
' en - GB '
> > > Language . make ( language = ' yue ' , script = ' Hant ' , region = ' HK ' ) . to _ tag ( )
' yue - Hant - HK '
> > > Language . make ( script = ' Arab ' ) . to _ tag ( )
' und - Arab '
> > > str ( Language . make ( region = ' IN ' ) )
' und - IN '""" | if self . _str_tag is not None :
return self . _str_tag
subtags = [ 'und' ]
if self . language :
subtags [ 0 ] = self . language
if self . extlangs :
for extlang in sorted ( self . extlangs ) :
subtags . append ( extlang )
if self . script :
subtags . append ( self . script )
if self . region :
subtags . append ( self . region )
if self . variants :
for variant in sorted ( self . variants ) :
subtags . append ( variant )
if self . extensions :
for ext in self . extensions :
subtags . append ( ext )
if self . private :
subtags . append ( self . private )
self . _str_tag = '-' . join ( subtags )
return self . _str_tag |
def ProgramScanner ( ** kw ) :
"""Return a prototype Scanner instance for scanning executable
files for static - lib dependencies""" | kw [ 'path_function' ] = SCons . Scanner . FindPathDirs ( 'LIBPATH' )
ps = SCons . Scanner . Base ( scan , "ProgramScanner" , ** kw )
return ps |
def _lookup_unconflicted_symbol ( self , symbol ) :
"""Attempt to find a unique asset whose symbol is the given string .
If multiple assets have held the given symbol , return a 0.
If no asset has held the given symbol , return a NaN .""" | try :
uppered = symbol . upper ( )
except AttributeError : # The mapping fails because symbol was a non - string
return numpy . nan
try :
return self . finder . lookup_symbol ( uppered , as_of_date = None , country_code = self . country_code , )
except MultipleSymbolsFound : # Fill conflicted entries with zeros to mark that they need to be
# resolved by date .
return 0
except SymbolNotFound : # Fill not found entries with nans .
return numpy . nan |
def _fitnesses_to_probabilities ( fitnesses ) :
"""Return a list of probabilities proportional to fitnesses .""" | # Do not allow negative fitness values
min_fitness = min ( fitnesses )
if min_fitness < 0.0 : # Make smallest fitness value 0
fitnesses = map ( lambda f : f - min_fitness , fitnesses )
fitness_sum = sum ( fitnesses )
# Generate probabilities
# Creates a list of increasing values .
# The greater the gap between two values , the greater the probability .
# Ex . [ 0.1 , 0.23 , 0.56 , 1.0]
prob_sum = 0.0
probabilities = [ ]
for fitness in fitnesses :
if fitness < 0 :
raise ValueError ( "Fitness cannot be negative, fitness = {}." . format ( fitness ) )
prob_sum += ( fitness / fitness_sum )
probabilities . append ( prob_sum )
probabilities [ - 1 ] += 0.0001
# to compensate for rounding errors
return probabilities |
def set_stream_rates ( ) :
'''set mavlink stream rates''' | if ( not msg_period . trigger ( ) and mpstate . status . last_streamrate1 == mpstate . settings . streamrate and mpstate . status . last_streamrate2 == mpstate . settings . streamrate2 ) :
return
mpstate . status . last_streamrate1 = mpstate . settings . streamrate
mpstate . status . last_streamrate2 = mpstate . settings . streamrate2
for master in mpstate . mav_master :
if master . linknum == 0 :
rate = mpstate . settings . streamrate
else :
rate = mpstate . settings . streamrate2
if rate != - 1 :
master . mav . request_data_stream_send ( mpstate . settings . target_system , mpstate . settings . target_component , mavutil . mavlink . MAV_DATA_STREAM_ALL , rate , 1 ) |
def get_axes ( self , projection = None ) :
"""Find all ` Axes ` , optionally matching the given projection
Parameters
projection : ` str `
name of axes types to return
Returns
axlist : ` list ` of ` ~ matplotlib . axes . Axes `""" | if projection is None :
return self . axes
return [ ax for ax in self . axes if ax . name == projection . lower ( ) ] |
def delete_message ( self , message_id ) :
"""Delete message from this chat
: param int message _ id : ID of the message""" | return self . bot . api_call ( "deleteMessage" , chat_id = self . id , message_id = message_id ) |
def get_vault_form ( self , * args , ** kwargs ) :
"""Pass through to provider VaultAdminSession . get _ vault _ form _ for _ update""" | # Implemented from kitosid template for -
# osid . resource . BinAdminSession . get _ bin _ form _ for _ update _ template
# This method might be a bit sketchy . Time will tell .
if isinstance ( args [ - 1 ] , list ) or 'vault_record_types' in kwargs :
return self . get_vault_form_for_create ( * args , ** kwargs )
else :
return self . get_vault_form_for_update ( * args , ** kwargs ) |
def plot_spectrogram ( self , fmin = None , fmax = None , method = 'scipy-fourier' , deg = False , window = 'hann' , detrend = 'linear' , nperseg = None , noverlap = None , boundary = 'constant' , padded = True , wave = 'morlet' , invert = True , plotmethod = 'imshow' , cmap_f = None , cmap_img = None , ms = 4 , ntMax = None , nfMax = None , Bck = True , fs = None , dmargin = None , wintit = None , tit = None , vmin = None , vmax = None , normt = False , draw = True , connect = True , returnspect = False , warn = True ) :
"""Plot the spectrogram of all channels with chosen method
All non - plotting arguments are fed to self . calc _ spectrogram ( )
see self . calc _ spectrogram ? for details
Parameters
Return
kh : tofu . utils . HeyHandler
The tofu KeyHandler object handling figure interactivity""" | if self . _isSpectral ( ) :
msg = "spectrogram not implemented yet for spectral data class"
raise Exception ( msg )
tf , f , lpsd , lang = _comp . spectrogram ( self . data , self . t , fmin = fmin , deg = deg , method = method , window = window , detrend = detrend , nperseg = nperseg , noverlap = noverlap , boundary = boundary , padded = padded , wave = wave , warn = warn )
kh = _plot . Data_plot_spectrogram ( self , tf , f , lpsd , lang , fmax = fmax , invert = invert , plotmethod = plotmethod , cmap_f = cmap_f , cmap_img = cmap_img , ms = ms , ntMax = ntMax , nfMax = nfMax , Bck = Bck , fs = fs , dmargin = dmargin , wintit = wintit , tit = tit , vmin = vmin , vmax = vmax , normt = normt , draw = draw , connect = connect )
if returnspect :
return kh , tf , f , lpsd , lang
else :
return kh |
def get_data ( self , data_format ) :
"""Reads the common format and converts to output data
data _ format - the format of the output data . See utils . input . dataformats""" | if data_format not in self . output_formats :
raise Exception ( "Output format {0} not available with this class. Available formats are {1}." . format ( data_format , self . output_formats ) )
data_converter = getattr ( self , "to_" + data_format )
return data_converter ( ) |
def add_view ( self , view_name , map_func , reduce_func = None , ** kwargs ) :
"""Appends a MapReduce view to the locally cached DesignDocument View
dictionary . To create a JSON query index use
: func : ` ~ cloudant . database . CloudantDatabase . create _ query _ index ` instead .
A CloudantException is raised if an attempt to add a QueryIndexView
( JSON query index ) using this method is made .
: param str view _ name : Name used to identify the View .
: param str map _ func : Javascript map function .
: param str reduce _ func : Optional Javascript reduce function .""" | if self . get_view ( view_name ) is not None :
raise CloudantArgumentError ( 107 , view_name )
if self . get ( 'language' , None ) == QUERY_LANGUAGE :
raise CloudantDesignDocumentException ( 101 )
view = View ( self , view_name , map_func , reduce_func , ** kwargs )
self . views . __setitem__ ( view_name , view ) |
def create_from_euler_angles ( cls , rx , ry , rz , degrees = False ) :
"""Classmethod to create a quaternion given the euler angles .""" | if degrees :
rx , ry , rz = np . radians ( [ rx , ry , rz ] )
# Obtain quaternions
qx = Quaternion ( np . cos ( rx / 2 ) , 0 , 0 , np . sin ( rx / 2 ) )
qy = Quaternion ( np . cos ( ry / 2 ) , 0 , np . sin ( ry / 2 ) , 0 )
qz = Quaternion ( np . cos ( rz / 2 ) , np . sin ( rz / 2 ) , 0 , 0 )
# Almost done
return qx * qy * qz |
def x10 ( cls , housecode , unitcode ) :
"""Create an X10 device address .""" | if housecode . lower ( ) in [ 'a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g' , 'h' , 'i' , 'j' , 'k' , 'l' , 'm' , 'n' , 'o' , 'p' ] :
byte_housecode = insteonplm . utils . housecode_to_byte ( housecode )
else :
if isinstance ( housecode , str ) :
_LOGGER . error ( 'X10 house code error: %s' , housecode )
else :
_LOGGER . error ( 'X10 house code is not a string' )
raise ValueError
# 20 , 21 and 22 for All Units Off , All Lights On and All Lights Off
# ' fake ' units
if unitcode in range ( 1 , 17 ) or unitcode in range ( 20 , 23 ) :
byte_unitcode = insteonplm . utils . unitcode_to_byte ( unitcode )
else :
if isinstance ( unitcode , int ) :
_LOGGER . error ( 'X10 unit code error: %d' , unitcode )
else :
_LOGGER . error ( 'X10 unit code is not an integer 1 - 16' )
raise ValueError
addr = Address ( bytearray ( [ 0x00 , byte_housecode , byte_unitcode ] ) )
addr . is_x10 = True
return addr |
def wtime_to_minutes ( time_string ) :
'''wtime _ to _ minutes
Convert standard wallclock time string to minutes .
Args :
- Time _ string in HH : MM : SS format
Returns :
( int ) minutes''' | hours , mins , seconds = time_string . split ( ':' )
return int ( hours ) * 60 + int ( mins ) + 1 |
def run_delete_sm ( self , tenant_id , fw_dict , is_fw_virt ) :
"""Runs the delete State Machine .
Goes through every state function until the end or when one state
returns failure .""" | # Read the current state from the DB
ret = True
serv_obj = self . get_service_obj ( tenant_id )
state = serv_obj . get_state ( )
# Preserve the ordering of the next lines till while
new_state = serv_obj . fixup_state ( fw_const . FW_DEL_OP , state )
serv_obj . store_local_final_result ( fw_const . RESULT_FW_DELETE_INIT )
if state != new_state :
state = new_state
serv_obj . store_state ( state )
while ret :
try :
ret = self . fabric_fsm [ state ] [ 1 ] ( tenant_id , fw_dict , is_fw_virt = is_fw_virt )
except Exception as exc :
LOG . error ( "Exception %(exc)s for state %(state)s" , { 'exc' : str ( exc ) , 'state' : fw_const . fw_state_fn_del_dict . get ( state ) } )
ret = False
if ret :
LOG . info ( "State %s return successfully" , fw_const . fw_state_fn_del_dict . get ( state ) )
if state == fw_const . INIT_STATE :
break
state = self . get_next_state ( state , ret , fw_const . FW_DEL_OP )
serv_obj . store_state ( state )
return ret |
def clear ( self ) :
"""Removes all data from the buffer .""" | self . io . seek ( 0 )
self . io . truncate ( )
for item in self . monitors :
item [ 2 ] = 0 |
def _git_run ( command , cwd = None , user = None , password = None , identity = None , ignore_retcode = False , failhard = True , redirect_stderr = False , saltenv = 'base' , output_encoding = None , ** kwargs ) :
'''simple , throw an exception with the error message on an error return code .
this function may be moved to the command module , spliced with
' cmd . run _ all ' , and used as an alternative to ' cmd . run _ all ' . Some
commands don ' t return proper retcodes , so this can ' t replace ' cmd . run _ all ' .''' | env = { }
if identity :
_salt_cli = __opts__ . get ( '__cli' , '' )
errors = [ ]
missing_keys = [ ]
# if the statefile provides multiple identities , they need to be tried
# ( but also allow a string instead of a list )
if not isinstance ( identity , list ) : # force it into a list
identity = [ identity ]
# try each of the identities , independently
tmp_identity_file = None
for id_file in identity :
if 'salt://' in id_file :
with salt . utils . files . set_umask ( 0o077 ) :
tmp_identity_file = salt . utils . files . mkstemp ( )
_id_file = id_file
id_file = __salt__ [ 'cp.get_file' ] ( id_file , tmp_identity_file , saltenv )
if not id_file :
log . error ( 'identity %s does not exist.' , _id_file )
__salt__ [ 'file.remove' ] ( tmp_identity_file )
continue
else :
if user :
os . chown ( id_file , __salt__ [ 'file.user_to_uid' ] ( user ) , - 1 )
else :
if not __salt__ [ 'file.file_exists' ] ( id_file ) :
missing_keys . append ( id_file )
log . error ( 'identity %s does not exist.' , id_file )
continue
env = { 'GIT_IDENTITY' : id_file }
# copy wrapper to area accessible by ` ` runas ` ` user
# currently no support in windows for wrapping git ssh
ssh_id_wrapper = os . path . abspath ( os . path . join ( salt . utils . templates . TEMPLATE_DIRNAME , 'git/ssh-id-wrapper' ) )
tmp_ssh_wrapper = None
if salt . utils . platform . is_windows ( ) :
ssh_exe = _find_ssh_exe ( )
if ssh_exe is None :
raise CommandExecutionError ( 'Failed to find ssh.exe, unable to use identity file' )
env [ 'GIT_SSH_EXE' ] = ssh_exe
# Use the windows batch file instead of the bourne shell script
ssh_id_wrapper += '.bat'
env [ 'GIT_SSH' ] = ssh_id_wrapper
elif not user or _path_is_executable_others ( ssh_id_wrapper ) :
env [ 'GIT_SSH' ] = ssh_id_wrapper
else :
tmp_ssh_wrapper = salt . utils . files . mkstemp ( )
salt . utils . files . copyfile ( ssh_id_wrapper , tmp_ssh_wrapper )
os . chmod ( tmp_ssh_wrapper , 0o500 )
os . chown ( tmp_ssh_wrapper , __salt__ [ 'file.user_to_uid' ] ( user ) , - 1 )
env [ 'GIT_SSH' ] = tmp_ssh_wrapper
if 'salt-call' not in _salt_cli and __utils__ [ 'ssh.key_is_encrypted' ] ( id_file ) :
errors . append ( 'Identity file {0} is passphrase-protected and cannot be ' 'used in a non-interactive command. Using salt-call from ' 'the minion will allow a passphrase-protected key to be ' 'used.' . format ( id_file ) )
continue
log . info ( 'Attempting git authentication using identity file %s' , id_file )
try :
result = __salt__ [ 'cmd.run_all' ] ( command , cwd = cwd , runas = user , password = password , env = env , python_shell = False , log_callback = salt . utils . url . redact_http_basic_auth , ignore_retcode = ignore_retcode , redirect_stderr = redirect_stderr , output_encoding = output_encoding , ** kwargs )
finally :
if tmp_ssh_wrapper : # Cleanup the temporary ssh wrapper file
try :
__salt__ [ 'file.remove' ] ( tmp_ssh_wrapper )
log . debug ( 'Removed ssh wrapper file %s' , tmp_ssh_wrapper )
except AttributeError : # No wrapper was used
pass
except ( SaltInvocationError , CommandExecutionError ) as exc :
log . warning ( 'Failed to remove ssh wrapper file %s: %s' , tmp_ssh_wrapper , exc )
if tmp_identity_file : # Cleanup the temporary identity file
try :
__salt__ [ 'file.remove' ] ( tmp_identity_file )
log . debug ( 'Removed identity file %s' , tmp_identity_file )
except AttributeError : # No identify file was used
pass
except ( SaltInvocationError , CommandExecutionError ) as exc :
log . warning ( 'Failed to remove identity file %s: %s' , tmp_identity_file , exc )
# If the command was successful , no need to try additional IDs
if result [ 'retcode' ] == 0 :
return result
else :
err = result [ 'stdout' if redirect_stderr else 'stderr' ]
if err :
errors . append ( salt . utils . url . redact_http_basic_auth ( err ) )
# We ' ve tried all IDs and still haven ' t passed , so error out
if failhard :
msg = ( 'Unable to authenticate using identity file:\n\n{0}' . format ( '\n' . join ( errors ) ) )
if missing_keys :
if errors :
msg += '\n\n'
msg += ( 'The following identity file(s) were not found: {0}' . format ( ', ' . join ( missing_keys ) ) )
raise CommandExecutionError ( msg )
return result
else :
result = __salt__ [ 'cmd.run_all' ] ( command , cwd = cwd , runas = user , password = password , env = env , python_shell = False , log_callback = salt . utils . url . redact_http_basic_auth , ignore_retcode = ignore_retcode , redirect_stderr = redirect_stderr , output_encoding = output_encoding , ** kwargs )
if result [ 'retcode' ] == 0 :
return result
else :
if failhard :
gitcommand = ' ' . join ( command ) if isinstance ( command , list ) else command
msg = 'Command \'{0}\' failed' . format ( salt . utils . url . redact_http_basic_auth ( gitcommand ) )
err = result [ 'stdout' if redirect_stderr else 'stderr' ]
if err :
msg += ': {0}' . format ( salt . utils . url . redact_http_basic_auth ( err ) )
raise CommandExecutionError ( msg )
return result |
def draw ( self , ** kwargs ) :
"""Called from the fit method , this method creates the canvas and
draws the part - of - speech tag mapping as a bar chart .
Parameters
kwargs : dict
generic keyword arguments .
Returns
ax : matplotlib axes
Axes on which the PosTagVisualizer was drawn .""" | colors = resolve_colors ( n_colors = len ( self . pos_tag_counts_ ) , colormap = self . colormap , colors = self . colors , )
if self . frequency : # Sort tags with respect to frequency in corpus
sorted_tags = sorted ( self . pos_tag_counts_ , key = self . pos_tag_counts_ . get , reverse = True )
sorted_counts = [ self . pos_tag_counts_ [ tag ] for tag in sorted_tags ]
self . ax . bar ( range ( len ( sorted_tags ) ) , sorted_counts , color = colors )
return self . ax
self . ax . bar ( range ( len ( self . pos_tag_counts_ ) ) , list ( self . pos_tag_counts_ . values ( ) ) , color = colors , )
return self . ax |
def add_request_participants ( self , issue_id_or_key , users_list ) :
"""Add users as participants to an existing customer request
The calling user must have permission to manage participants for this customer request
: param issue _ id _ or _ key : str
: param users _ list : list
: return :""" | url = 'rest/servicedeskapi/request/{}/participant' . format ( issue_id_or_key )
data = { 'usernames' : users_list }
return self . post ( url , data = data ) |
def ncbi_geneid ( self ) :
"""Retrieve this feature ' s NCBI GeneID if it ' s present .
NCBI GFF3 files contain gene IDs encoded in * * Dbxref * * attributes
( example : ` Dbxref = GeneID : 103504972 ` ) . This function locates and returns
the GeneID if present , or returns ` None ` otherwise .""" | values = self . get_attribute ( 'Dbxref' , as_list = True )
if values is None :
return None
for value in values :
if value . startswith ( 'GeneID:' ) :
key , geneid = value . split ( ':' )
return geneid
return None |
def by_user_and_perm ( cls , user_id , perm_name , db_session = None ) :
"""return by user and permission name
: param user _ id :
: param perm _ name :
: param db _ session :
: return :""" | db_session = get_db_session ( db_session )
query = db_session . query ( cls . model ) . filter ( cls . model . user_id == user_id )
query = query . filter ( cls . model . perm_name == perm_name )
return query . first ( ) |
def _check_infinite_flows ( self , steps , flows = None ) :
"""Recursively loop through the flow _ config and check if there are any cycles .
: param steps : Set of step definitions to loop through
: param flows : Flows already visited .
: return : None""" | if flows is None :
flows = [ ]
for step in steps . values ( ) :
if "flow" in step :
flow = step [ "flow" ]
if flow == "None" :
continue
if flow in flows :
raise FlowInfiniteLoopError ( "Infinite flows detected with flow {}" . format ( flow ) )
flows . append ( flow )
flow_config = self . project_config . get_flow ( flow )
self . _check_infinite_flows ( flow_config . steps , flows ) |
def list_tokens ( opts ) :
'''List all tokens in the store .
: param opts : Salt master config options
: returns : List of dicts ( token _ data )''' | ret = [ ]
redis_client = _redis_client ( opts )
if not redis_client :
return [ ]
serial = salt . payload . Serial ( opts )
try :
return [ k . decode ( 'utf8' ) for k in redis_client . keys ( ) ]
except Exception as err :
log . warning ( 'Failed to list keys: %s' , err )
return [ ] |
def check_token ( request ) :
"""Resource check is token valid .
request _ serializer : serializers . CheckToken
type :
username :
required : true
type : string
description : token related user
responseMessages :
- code : 200
message : Token is valid
- code : 400
message : Token is not valid
- code : 401
message : Unauthorized""" | serializer = serializers . CheckToken ( data = request . data )
serializer . is_valid ( raise_exception = True )
token = serializer . validated_data [ 'token' ]
logger . debug ( 'Token correct' , extra = { 'token' : token , 'username' : token . user . username } )
return Response ( { 'username' : token . user . username } ) |
def get ( self , rel ) :
"""Get the resource by rel name
: param str rel _ name : name of rel
: raises UnsupportedEntryPoint : entry point not found in this version
of the API""" | for link in self . _entry_points :
if link . get ( 'rel' ) == rel :
return link . get ( 'href' )
raise UnsupportedEntryPoint ( "The specified entry point '{}' was not found in this " "version of the SMC API. Check the element documentation " "to determine the correct version and specify the api_version " "parameter during session.login() if necessary." . format ( rel ) ) |
def checkImportBindingsExtensions ( ) :
"""Check that nupic . bindings extension libraries can be imported .
Throws ImportError on failure .""" | import nupic . bindings . math
import nupic . bindings . algorithms
import nupic . bindings . engine_internal |
def get_next_page ( self ) :
"""Returns the next page of results as a sequence of Track objects .""" | master_node = self . _retrieve_next_page ( )
seq = [ ]
for node in master_node . getElementsByTagName ( "track" ) :
track = Track ( _extract ( node , "artist" ) , _extract ( node , "name" ) , self . network , info = { "image" : _extract_all ( node , "image" ) } , )
track . listener_count = _number ( _extract ( node , "listeners" ) )
seq . append ( track )
return seq |
def score_for_task ( properties , category , result ) :
"""Return the possible score of task , depending on whether the result is correct or not .""" | assert result is not None
if properties and Property . create_from_names ( properties ) . is_svcomp :
return _svcomp_score ( category , result )
return None |
def text_to_data ( self , text , elt , ps ) :
'''convert text into typecode specific data .''' | prefix , localName = SplitQName ( text )
nsdict = ps . GetElementNSdict ( elt )
prefix = prefix or ''
try :
namespaceURI = nsdict [ prefix ]
except KeyError , ex :
raise EvaluateException ( 'cannot resolve prefix(%s)' % prefix , ps . Backtrace ( elt ) )
v = ( namespaceURI , localName )
if self . pyclass is not None :
return self . pyclass ( v )
return v |
def on_post ( self , req , resp ) :
"""Send a POST request with id / nic / interval / filter / iters and it will start
a container for collection with those specifications""" | resp . content_type = falcon . MEDIA_TEXT
resp . status = falcon . HTTP_200
# verify payload is in the correct format
# default to no filter
payload = { }
if req . content_length :
try :
payload = json . load ( req . stream )
except Exception as e : # pragma : no cover
resp . body = "(False, 'malformed payload')"
return
else :
resp . body = "(False, 'malformed payload')"
return
if 'filter' not in payload :
payload [ 'filter' ] = ''
# payload should have the following fields :
# - id
# - nic
# - interval
# - filter
# - iters
# should spin up a tcpdump container that writes out pcap files based
# on the filter needs to be attached to the nic specified , if iters is
# -1 then loops until killed , otherwise completes iters number of
# captures ( and creates that many pcap files ) should keep track of
# container id , container name , and id of filter and filter + whatever
# verify payload has necessary information
if 'nic' not in payload :
resp . body = "(False, 'payload missing nic')"
return
if 'id' not in payload :
resp . body = "(False, 'payload missing id')"
return
if 'interval' not in payload :
resp . body = "(False, 'payload missing interval')"
return
if 'iters' not in payload :
resp . body = "(False, 'payload missing iters')"
return
# connect to docker
c = None
try :
c = docker . from_env ( )
except Exception as e : # pragma : no cover
resp . body = "(False, 'unable to connect to docker because: " + str ( e ) + "')"
return
# spin up container with payload specifications
if c :
tool_d = { 'network_mode' : 'host' , 'volumes_from' : [ socket . gethostname ( ) ] }
cmd = '/tmp/run.sh ' + payload [ 'nic' ] + ' ' + payload [ 'interval' ]
cmd += ' ' + payload [ 'id' ] + ' ' + payload [ 'iters' ] + ' "'
cmd += payload [ 'filter' ] + '"'
try :
container = c . containers . run ( image = 'cyberreboot/vent-ncapture:master' , command = cmd , remove = True , detach = True , ** tool_d )
resp . body = "(True, 'successfully created and started filter: " + str ( payload [ 'id' ] ) + ' on container: ' + str ( container . id ) + "')"
except Exception as e : # pragma : no cover
resp . body = "(False, 'unable to start container because: " + str ( e ) + "')"
return
return |
def get_molecule ( self , index = 0 ) :
"""Get a molecule from the trajectory
Optional argument :
| ` ` index ` ` - - The frame index [ default = 0]""" | return Molecule ( self . numbers , self . geometries [ index ] , self . titles [ index ] , symbols = self . symbols ) |
def reifyWidget ( self , parent , item ) :
'''Convert a JSON description of a widget into a WxObject''' | from gooey . gui . components import widgets
widgetClass = getattr ( widgets , item [ 'type' ] )
return widgetClass ( parent , item ) |
def pow ( base , exp ) :
"""Returns element - wise result of base element raised to powers from exp element .
Both inputs can be Symbol or scalar number .
Broadcasting is not supported . Use ` broadcast _ pow ` instead .
` sym . pow ` is being deprecated , please use ` sym . power ` instead .
Parameters
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
Symbol or scalar
The bases in x raised to the exponents in y .
Examples
> > > mx . sym . pow ( 2 , 3)
> > > x = mx . sym . Variable ( ' x ' )
> > > y = mx . sym . Variable ( ' y ' )
> > > z = mx . sym . pow ( x , 2)
> > > z . eval ( x = mx . nd . array ( [ 1,2 ] ) ) [ 0 ] . asnumpy ( )
array ( [ 1 . , 4 . ] , dtype = float32)
> > > z = mx . sym . pow ( 3 , y )
> > > z . eval ( y = mx . nd . array ( [ 2,3 ] ) ) [ 0 ] . asnumpy ( )
array ( [ 9 . , 27 . ] , dtype = float32)
> > > z = mx . sym . pow ( x , y )
> > > z . eval ( x = mx . nd . array ( [ 3,4 ] ) , y = mx . nd . array ( [ 2,3 ] ) ) [ 0 ] . asnumpy ( )
array ( [ 9 . , 64 . ] , dtype = float32)""" | if isinstance ( base , Symbol ) and isinstance ( exp , Symbol ) :
return _internal . _Power ( base , exp )
if isinstance ( base , Symbol ) and isinstance ( exp , Number ) :
return _internal . _PowerScalar ( base , scalar = exp )
if isinstance ( base , Number ) and isinstance ( exp , Symbol ) :
return _internal . _RPowerScalar ( exp , scalar = base )
if isinstance ( base , Number ) and isinstance ( exp , Number ) :
return base ** exp
else :
raise TypeError ( 'types (%s, %s) not supported' % ( str ( type ( base ) ) , str ( type ( exp ) ) ) ) |
def create_syslog ( self , service_id , version_number , name , address , port = 514 , use_tls = "0" , tls_ca_cert = None , token = None , _format = None , response_condition = None ) :
"""Create a Syslog for a particular service and version .""" | body = self . _formdata ( { "name" : name , "address" : address , "port" : port , "use_tls" : use_tls , "tls_ca_cert" : tls_ca_cert , "token" : token , "format" : _format , "response_condition" : response_condition , } , FastlySyslog . FIELDS )
content = self . _fetch ( "/service/%s/version/%d/syslog" % ( service_id , version_number ) , method = "POST" , body = body )
return FastlySyslog ( self , content ) |
def get_max_seq_len ( self ) -> Optional [ int ] :
""": return : The maximum length supported by the encoder if such a restriction exists .""" | max_seq_len = min ( ( encoder . get_max_seq_len ( ) for encoder in self . encoders if encoder . get_max_seq_len ( ) is not None ) , default = None )
return max_seq_len |
def revision ( directory , message , autogenerate , sql , head , splice , branch_label , version_path , rev_id ) :
"""Create a new revision file .""" | _revision ( directory , message , autogenerate , sql , head , splice , branch_label , version_path , rev_id ) |
def bot_config ( player_config_path : Path , team : Team ) -> 'PlayerConfig' :
"""A function to cover the common case of creating a config for a bot .""" | bot_config = PlayerConfig ( )
bot_config . bot = True
bot_config . rlbot_controlled = True
bot_config . team = team . value
bot_config . config_path = str ( player_config_path . absolute ( ) )
# TODO : Refactor to use Path ' s
config_bundle = get_bot_config_bundle ( bot_config . config_path )
bot_config . name = config_bundle . name
bot_config . loadout_config = load_bot_appearance ( config_bundle . get_looks_config ( ) , bot_config . team )
return bot_config |
def get ( section , key ) :
"""returns the value of a given key of a given section of the main
config file .
: param section : the section .
: type section : str .
: param key : the key .
: type key : str .
: returns : the value which will be casted to float , int or boolean .
if no cast is successfull , the raw string will be returned .""" | # FILE = ' config _ misc '
if not _loaded :
init ( FILE )
try :
return cfg . getfloat ( section , key )
except Exception :
try :
return cfg . getint ( section , key )
except :
try :
return cfg . getboolean ( section , key )
except :
return cfg . get ( section , key ) |
def processPrePrepare ( self , pre_prepare : PrePrepare , sender : str ) :
"""Validate and process provided PRE - PREPARE , create and
broadcast PREPARE for it .
: param pre _ prepare : message
: param sender : name of the node that sent this message""" | key = ( pre_prepare . viewNo , pre_prepare . ppSeqNo )
self . logger . debug ( "{} received PRE-PREPARE{} from {}" . format ( self , key , sender ) )
# TODO : should we still do it ?
# Converting each req _ idrs from list to tuple
req_idrs = { f . REQ_IDR . nm : [ key for key in pre_prepare . reqIdr ] }
pre_prepare = updateNamedTuple ( pre_prepare , ** req_idrs )
def report_suspicious ( reason ) :
ex = SuspiciousNode ( sender , reason , pre_prepare )
self . report_suspicious_node ( ex )
why_not = self . _can_process_pre_prepare ( pre_prepare , sender )
if why_not is None :
why_not_applied = self . _process_valid_preprepare ( pre_prepare , sender )
if why_not_applied is not None :
if why_not_applied == PP_APPLY_REJECT_WRONG :
report_suspicious ( Suspicions . PPR_REJECT_WRONG )
elif why_not_applied == PP_APPLY_WRONG_DIGEST :
report_suspicious ( Suspicions . PPR_DIGEST_WRONG )
elif why_not_applied == PP_APPLY_WRONG_STATE :
report_suspicious ( Suspicions . PPR_STATE_WRONG )
elif why_not_applied == PP_APPLY_ROOT_HASH_MISMATCH :
report_suspicious ( Suspicions . PPR_TXN_WRONG )
elif why_not_applied == PP_APPLY_HOOK_ERROR :
report_suspicious ( Suspicions . PPR_PLUGIN_EXCEPTION )
elif why_not_applied == PP_SUB_SEQ_NO_WRONG :
report_suspicious ( Suspicions . PPR_SUB_SEQ_NO_WRONG )
elif why_not_applied == PP_NOT_FINAL : # this is fine , just wait for another
return
elif why_not_applied == PP_APPLY_AUDIT_HASH_MISMATCH :
report_suspicious ( Suspicions . PPR_AUDIT_TXN_ROOT_HASH_WRONG )
elif why_not_applied == PP_REQUEST_ALREADY_ORDERED :
report_suspicious ( Suspicions . PPR_WITH_ORDERED_REQUEST )
elif why_not == PP_CHECK_NOT_FROM_PRIMARY :
report_suspicious ( Suspicions . PPR_FRM_NON_PRIMARY )
elif why_not == PP_CHECK_TO_PRIMARY :
report_suspicious ( Suspicions . PPR_TO_PRIMARY )
elif why_not == PP_CHECK_DUPLICATE :
report_suspicious ( Suspicions . DUPLICATE_PPR_SENT )
elif why_not == PP_CHECK_INCORRECT_POOL_STATE_ROOT :
report_suspicious ( Suspicions . PPR_POOL_STATE_ROOT_HASH_WRONG )
elif why_not == PP_CHECK_OLD :
self . logger . info ( "PRE-PREPARE {} has ppSeqNo lower " "then the latest one - ignoring it" . format ( key ) )
elif why_not == PP_CHECK_REQUEST_NOT_FINALIZED :
absents = set ( )
non_fin = set ( )
non_fin_payload = set ( )
for key in pre_prepare . reqIdr :
req = self . requests . get ( key )
if req is None :
absents . add ( key )
elif not req . finalised :
non_fin . add ( key )
non_fin_payload . add ( req . request . payload_digest )
absent_str = ', ' . join ( str ( key ) for key in absents )
non_fin_str = ', ' . join ( '{} ({} : {})' . format ( str ( key ) , str ( len ( self . requests [ key ] . propagates ) ) , ', ' . join ( self . requests [ key ] . propagates . keys ( ) ) ) for key in non_fin )
self . logger . warning ( "{} found requests in the incoming pp, of {} ledger, that are not finalized. " "{} of them don't have propagates: {}." "{} of them don't have enough propagates: {}." . format ( self , pre_prepare . ledgerId , len ( absents ) , absent_str , len ( non_fin ) , non_fin_str ) )
def signal_suspicious ( req ) :
self . logger . info ( "Request digest {} already ordered. Discard {} " "from {}" . format ( req , pre_prepare , sender ) )
report_suspicious ( Suspicions . PPR_WITH_ORDERED_REQUEST )
# checking for payload digest is more effective
for payload_key in non_fin_payload :
if self . node . seqNoDB . get_by_payload_digest ( payload_key ) != ( None , None ) :
signal_suspicious ( payload_key )
return
# for absents we can only check full digest
for full_key in absents :
if self . node . seqNoDB . get_by_full_digest ( full_key ) is not None :
signal_suspicious ( full_key )
return
bad_reqs = absents | non_fin
self . enqueue_pre_prepare ( pre_prepare , sender , bad_reqs )
# TODO : An optimisation might be to not request PROPAGATEs
# if some PROPAGATEs are present or a client request is
# present and sufficient PREPAREs and PRE - PREPARE are present ,
# then the digest can be compared but this is expensive as the
# PREPARE and PRE - PREPARE contain a combined digest
self . _schedule ( partial ( self . request_propagates_if_needed , bad_reqs , pre_prepare ) , self . config . PROPAGATE_REQUEST_DELAY )
elif why_not == PP_CHECK_NOT_NEXT :
pp_view_no = pre_prepare . viewNo
pp_seq_no = pre_prepare . ppSeqNo
last_pp_view_no , last_pp_seq_no = self . __last_pp_3pc
if pp_view_no >= last_pp_view_no and ( self . isMaster or self . last_ordered_3pc [ 1 ] != 0 ) :
seq_frm = last_pp_seq_no + 1 if pp_view_no == last_pp_view_no else 1
seq_to = pp_seq_no - 1
if seq_to >= seq_frm >= pp_seq_no - CHK_FREQ + 1 :
self . logger . warning ( "{} missing PRE-PREPAREs from {} to {}, " "going to request" . format ( self , seq_frm , seq_to ) )
self . _request_missing_three_phase_messages ( pp_view_no , seq_frm , seq_to )
self . enqueue_pre_prepare ( pre_prepare , sender )
self . _setup_last_ordered_for_non_master ( )
elif why_not == PP_CHECK_WRONG_TIME :
key = ( pre_prepare . viewNo , pre_prepare . ppSeqNo )
item = ( pre_prepare , sender , False )
self . pre_prepares_stashed_for_incorrect_time [ key ] = item
report_suspicious ( Suspicions . PPR_TIME_WRONG )
elif why_not == BlsBftReplica . PPR_BLS_MULTISIG_WRONG :
report_suspicious ( Suspicions . PPR_BLS_MULTISIG_WRONG )
else :
self . logger . warning ( "Unknown PRE-PREPARE check status: {}" . format ( why_not ) ) |
def get_lang ( tweet ) :
"""Get the language that the Tweet is written in .
Args :
tweet ( Tweet or dict ) : A Tweet object or dictionary
Returns :
str : 2 - letter BCP 47 language code ( or None if undefined )
Example :
> > > from tweet _ parser . getter _ methods . tweet _ text import get _ lang
> > > original = { " created _ at " : " Wed May 24 20:17:19 + 0000 2017 " ,
. . . " lang " : " en " }
> > > get _ lang ( original )
' en '
> > > activity = { " postedTime " : " 2017-05-24T20:17:19.000Z " ,
. . . " twitter _ lang " : " en " }
> > > get _ lang ( activity )
' en '""" | if is_original_format ( tweet ) :
lang_field = "lang"
else :
lang_field = "twitter_lang"
if tweet [ lang_field ] is not None and tweet [ lang_field ] != "und" :
return tweet [ lang_field ]
else :
return None |
def _insert_automodapi_configs ( c ) :
"""Add configurations related to automodapi , autodoc , and numpydoc to the
state .""" | # Don ' t show summaries of the members in each class along with the
# class ' docstring
c [ 'numpydoc_show_class_members' ] = False
c [ 'autosummary_generate' ] = True
c [ 'automodapi_toctreedirnm' ] = 'py-api'
c [ 'automodsumm_inherited_members' ] = True
# Docstrings for classes and methods are inherited from parents .
c [ 'autodoc_inherit_docstrings' ] = True
# Class documentation should only contain the class docstring and
# ignore the _ _ init _ _ docstring , account to LSST coding standards .
# c [ ' autoclass _ content ' ] = " both "
c [ 'autoclass_content' ] = "class"
# Default flags for automodapi directives . Special members are dunder
# methods .
# NOTE : We want to used ` inherited - members ` , but it seems to be causing
# documentation duplication in the automodapi listings . We ' re leaving
# this out for now . See https : / / jira . lsstcorp . org / browse / DM - 14782 for
# additional notes .
# NOTE : Without inherited members set , special - members doesn ' t need seem
# to have an effect ( even for special members where the docstrings are
# directly written in the class , not inherited .
# c [ ' autodoc _ default _ flags ' ] = [ ' inherited - members ' ]
c [ 'autodoc_default_flags' ] = [ 'show-inheritance' , 'special-members' ]
return c |
def p_expression_ulnot ( self , p ) :
'expression : LNOT expression % prec ULNOT' | p [ 0 ] = Ulnot ( p [ 2 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def deobfuscate_email ( text ) :
"""Deobfuscate email addresses in provided text""" | text = unescape ( text )
# Find the " dot "
text = _deobfuscate_dot1_re . sub ( '.' , text )
text = _deobfuscate_dot2_re . sub ( r'\1.\2' , text )
text = _deobfuscate_dot3_re . sub ( r'\1.\2' , text )
# Find the " at "
text = _deobfuscate_at1_re . sub ( '@' , text )
text = _deobfuscate_at2_re . sub ( r'\1@\2' , text )
text = _deobfuscate_at3_re . sub ( r'\1@\2' , text )
return text |
def run ( exe , args , capturestd = False , env = None ) :
"""Runs an executable with an array of arguments , optionally in the
specified environment .
Returns stdout and stderr""" | command = [ exe ] + args
if env :
log . info ( "Executing [custom environment]: %s" , " " . join ( command ) )
else :
log . info ( "Executing : %s" , " " . join ( command ) )
start = time . time ( )
# Temp files will be automatically deleted on close ( )
# If run ( ) throws the garbage collector should call close ( ) , so don ' t
# bother with try - finally
outfile = None
errfile = None
if capturestd :
outfile = tempfile . TemporaryFile ( )
errfile = tempfile . TemporaryFile ( )
# Use call instead of Popen so that stdin is connected to the console ,
# in case user input is required
# On Windows shell = True is needed otherwise the modified environment
# PATH variable is ignored . On Unix this breaks things .
r = subprocess . call ( command , env = env , stdout = outfile , stderr = errfile , shell = WINDOWS )
stdout = None
stderr = None
if capturestd :
outfile . seek ( 0 )
stdout = outfile . read ( )
outfile . close ( )
errfile . seek ( 0 )
stderr = errfile . read ( )
errfile . close ( )
end = time . time ( )
if r != 0 :
log . error ( "Failed [%.3f s]" , end - start )
raise RunException ( "Non-zero return code" , exe , args , r , stdout , stderr )
log . info ( "Completed [%.3f s]" , end - start )
return stdout , stderr |
def atomic ( self , func ) :
"""A decorator that wraps a function in an atomic block .
Example : :
db = CustomSQLAlchemy ( )
@ db . atomic
def f ( ) :
write _ to _ db ( ' a message ' )
return ' OK '
assert f ( ) = = ' OK '
This code defines the function ` ` f ` ` , which is wrapped in an
atomic block . Wrapping a function in an atomic block gives
several guarantees :
1 . The database transaction will be automatically committed if
the function returns normally , and automatically rolled
back if the function raises unhandled exception .
2 . When the transaction is committed , all objects in
` ` db . session ` ` will be expunged . This means that no lazy
loading will be performed on them .
3 . If a transaction serialization error occurs during the
execution of the function , the function will be
re - executed . ( It might be re - executed several times . )
Atomic blocks can be nested , but in this case the outermost
block takes full control of transaction ' s life - cycle , and
inner blocks do nothing .""" | @ wraps ( func )
def wrapper ( * args , ** kwargs ) :
session = self . session
session_info = session . info
if session_info . get ( _ATOMIC_FLAG_SESSION_INFO_KEY ) :
return func ( * args , ** kwargs )
f = retry_on_deadlock ( session ) ( func )
session_info [ _ATOMIC_FLAG_SESSION_INFO_KEY ] = True
try :
result = f ( * args , ** kwargs )
session . flush ( )
session . expunge_all ( )
session . commit ( )
return result
except Exception :
session . rollback ( )
raise
finally :
session_info [ _ATOMIC_FLAG_SESSION_INFO_KEY ] = False
return wrapper |
def can_route ( self , endpoint , method = None , ** kwargs ) :
"""Make sure we can route to the given endpoint or url .
This checks for ` http . get ` permission ( or other methods ) on the ACL of
route functions , attached via the ` ACL ` decorator .
: param endpoint : A URL or endpoint to check for permission to access .
: param method : The HTTP method to check ; defaults to ` ' GET ' ` .
: param * * kwargs : The context to pass to predicates .""" | view = flask . current_app . view_functions . get ( endpoint )
if not view :
endpoint , args = flask . _request_ctx . top . match ( endpoint )
view = flask . current_app . view_functions . get ( endpoint )
if not view :
return False
return self . can ( 'http.' + ( method or 'GET' ) . lower ( ) , view , ** kwargs ) |
def help ( self ) :
'''Prints exposed methods and their docstrings .''' | cmds = self . get_exposed_cmds ( )
t = text_helper . Table ( fields = [ 'command' , 'doc' ] , lengths = [ 50 , 85 ] )
return t . render ( ( reflect . formatted_function_name ( x ) , x . __doc__ , ) for x in cmds . values ( ) ) |
def check_outputs ( self ) :
"""Check for the existence of output files""" | self . outputs = self . expand_filenames ( self . outputs )
result = False
if self . files_exist ( self . outputs ) :
if self . dependencies_are_newer ( self . outputs , self . inputs ) :
result = True
print ( "Dependencies are newer than outputs." )
print ( "Running task." )
elif self . force :
print ( "Dependencies are older than inputs, but 'force' option present." )
print ( "Running task." )
result = True
else :
print ( "Dependencies are older than inputs." )
else :
print ( "No ouput file(s)." )
print ( "Running task." )
result = True
return result |
def delete ( all = False , * databases ) :
'''Remove description snapshots from the system .
: : parameter : all . Default : False . Remove all snapshots , if set to True .
CLI example :
. . code - block : : bash
salt myminion inspector . delete < ID > < ID1 > < ID2 > . .
salt myminion inspector . delete all = True''' | if not all and not databases :
raise CommandExecutionError ( 'At least one database ID required.' )
try :
ret = dict ( )
inspector = _ ( "collector" ) . Inspector ( cachedir = __opts__ [ 'cachedir' ] , piddir = os . path . dirname ( __opts__ [ 'pidfile' ] ) )
for dbid in all and inspector . db . list ( ) or databases :
ret [ dbid ] = inspector . db . _db . purge ( six . text_type ( dbid ) )
return ret
except InspectorSnapshotException as err :
raise CommandExecutionError ( err )
except Exception as err :
log . error ( _get_error_message ( err ) )
raise Exception ( err ) |
def create_token ( self , user ) :
"""Create a signed token from a user .""" | # The password is expected to be a secure hash but we hash it again
# for additional safety . We default to MD5 to minimize the length of
# the token . ( Remember , if an attacker obtains the URL , he can already
# log in . This isn ' t high security . )
h = crypto . pbkdf2 ( self . get_revocation_key ( user ) , self . salt , self . iterations , digest = self . digest , )
return self . sign ( self . packer . pack_pk ( user . pk ) + h ) |
def add_edge ( self , u , v , ** kwargs ) :
"""Add an edge between u and v .
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
u , v : nodes
Nodes can be any hashable Python object .
Examples
> > > from pgmpy . models import MarkovModel
> > > G = MarkovModel ( )
> > > G . add _ nodes _ from ( [ ' Alice ' , ' Bob ' , ' Charles ' ] )
> > > G . add _ edge ( ' Alice ' , ' Bob ' )""" | # check that there is no self loop .
if u != v :
super ( MarkovModel , self ) . add_edge ( u , v , ** kwargs )
else :
raise ValueError ( 'Self loops are not allowed' ) |
def post_process ( self , layer ) :
"""More process after getting the impact layer with data .
: param layer : The vector layer to use for post processing .
: type layer : QgsVectorLayer""" | LOGGER . info ( 'ANALYSIS : Post processing' )
# Set the layer title
purpose = layer . keywords [ 'layer_purpose' ]
if purpose != layer_purpose_aggregation_summary [ 'key' ] : # On an aggregation layer , the default title does make any sense .
layer_title ( layer )
for post_processor in post_processors :
run , run_message = should_run ( layer . keywords , post_processor )
if run :
valid , message = enough_input ( layer , post_processor [ 'input' ] )
name = post_processor [ 'name' ]
if valid :
valid , message = run_single_post_processor ( layer , post_processor )
if valid :
self . set_state_process ( 'post_processor' , name )
message = '{name} : Running' . format ( name = name )
LOGGER . info ( message )
else : # message = u ' { name } : Could not run : { reason } ' . format (
# name = name , reason = message )
# LOGGER . info ( message )
pass
else : # LOGGER . info ( run _ message )
pass
self . debug_layer ( layer , add_to_datastore = False ) |
def open_state_machine ( path = None , recent_opened_notification = False ) :
"""Open a state machine from respective file system path
: param str path : file system path to the state machine
: param bool recent _ opened _ notification : flags that indicates that this call also should update recently open
: rtype rafcon . core . state _ machine . StateMachine
: return : opened state machine""" | start_time = time . time ( )
if path is None :
if interface . open_folder_func is None :
logger . error ( "No function defined for opening a folder" )
return
load_path = interface . open_folder_func ( "Please choose the folder of the state machine" )
if load_path is None :
return
else :
load_path = path
if state_machine_manager . is_state_machine_open ( load_path ) :
logger . info ( "State machine already open. Select state machine instance from path {0}." . format ( load_path ) )
sm = state_machine_manager . get_open_state_machine_of_file_system_path ( load_path )
gui_helper_state . gui_singletons . state_machine_manager_model . selected_state_machine_id = sm . state_machine_id
return state_machine_manager . get_open_state_machine_of_file_system_path ( load_path )
state_machine = None
try :
state_machine = storage . load_state_machine_from_path ( load_path )
state_machine_manager . add_state_machine ( state_machine )
if recent_opened_notification :
global_runtime_config . update_recently_opened_state_machines_with ( state_machine )
duration = time . time ( ) - start_time
stat = state_machine . root_state . get_states_statistics ( 0 )
logger . info ( "It took {0:.2}s to load {1} states with {2} hierarchy levels." . format ( duration , stat [ 0 ] , stat [ 1 ] ) )
except ( AttributeError , ValueError , IOError ) as e :
logger . error ( 'Error while trying to open state machine: {0}' . format ( e ) )
return state_machine |
def is_intersect ( line_a , line_b ) :
"""Determine if lina _ a intersect with line _ b
: param lina _ a :
: type lina _ a : models . Line
: param lina _ b :
: type line _ b : models . Line
: return :
: rtype : bool""" | # Find the four orientations needed for general and special cases
orientation_1 = orientation ( line_a . endpoint_a , line_a . endpoint_b , line_b . endpoint_a )
orientation_2 = orientation ( line_a . endpoint_a , line_a . endpoint_b , line_b . endpoint_b )
orientation_3 = orientation ( line_b . endpoint_a , line_b . endpoint_b , line_a . endpoint_a )
orientation_4 = orientation ( line_b . endpoint_a , line_b . endpoint_b , line_a . endpoint_b )
# General case
if ( orientation_1 != orientation_2 and orientation_3 != orientation_4 ) :
return True
# Special cases
if ( orientation_1 == 0 and on_segment ( line_a . endpoint_a , line_b . endpoint_a , line_a . endpoint_b ) ) :
return True
if ( orientation_2 == 0 and on_segment ( line_a . endpoint_a , line_b . endpoint_b , line_a . endpoint_b ) ) :
return True
if ( orientation_3 == 0 and on_segment ( line_b . endpoint_a , line_a . endpoint_a , line_b . endpoint_b ) ) :
return True
if ( orientation_4 == 0 and on_segment ( line_b . endpoint_a , line_a . endpoint_b , line_b . endpoint_b ) ) :
return True
return False |
def _import ( self , record_key , record_data , overwrite = True , last_modified = 0.0 , ** kwargs ) :
'''a helper method for other storage clients to import into appdata
: param record _ key : string with key for record
: param record _ data : byte data for body of record
: param overwrite : [ optional ] boolean to overwrite existing records
: param last _ modified : [ optional ] float to record last modified date
: param kwargs : [ optional ] keyword arguments from other import methods
: return : boolean indicating whether record was imported''' | title = '%s._import' % self . __class__ . __name__
# check overwrite
if not overwrite :
if self . exists ( record_key ) :
return False
# check max size
import sys
record_max = self . fields . metadata [ 'record_max_bytes' ]
record_size = sys . getsizeof ( record_data )
error_prefix = '%s(record_key="%s", record_data=b"...")' % ( title , record_key )
if record_size > record_max :
raise ValueError ( '%s exceeds maximum record data size of %s bytes.' % ( error_prefix , record_max ) )
# TODO : apply session upload for files greater than record _ max
# construct upload kwargs
upload_kwargs = { 'f' : record_data , 'path' : '/%s' % record_key , 'mute' : True , 'mode' : self . objects . WriteMode . overwrite }
# modify file time
import re
if re . search ( '\\.drep$' , record_key ) :
from labpack . records . time import labDT
drep_time = labDT . fromEpoch ( 1 )
upload_kwargs [ 'client_modified' ] = drep_time
elif last_modified :
from labpack . records . time import labDT
mod_time = labDT . fromEpoch ( last_modified )
upload_kwargs [ 'client_modified' ] = mod_time
# send upload request
try :
self . dropbox . files_upload ( ** upload_kwargs )
except :
raise DropboxConnectionError ( title )
return True |
def get_content_descendants_by_type ( self , content_id , child_type , expand = None , start = None , limit = None , callback = None ) :
"""Returns the direct descendants of a piece of Content , limited to a single descendant type .
The { @ link ContentType } ( s ) of the descendants returned is specified by the " type " path parameter in the request .
Currently the only supported descendants are comment descendants of non - comment Content .
: param content _ id ( string ) : A string containing the id of the content to retrieve descendants for
: param child _ type ( string ) : A { @ link ContentType } to filter descendants on .
: param expand ( string ) : OPTIONAL : A comma separated list of properties to expand on the descendants .
Default : Empty
: param start ( int ) : OPTIONAL : The index of the first item within the result set that should be returned .
Default : 0.
: param limit ( int ) : OPTIONAL : How many items should be returned after the start index .
Default : 25 or site limit .
: param callback : OPTIONAL : The callback to execute on the resulting data , before the method returns .
Default : None ( no callback , raw data returned ) .
: return : The JSON data returned from the content / { id } / descendant / { type } endpoint , or the results of the
callback . Will raise requests . HTTPError on bad input , potentially .""" | params = { }
if expand :
params [ "expand" ] = expand
if start is not None :
params [ "start" ] = int ( start )
if limit is not None :
params [ "limit" ] = int ( limit )
return self . _service_get_request ( "rest/api/content/{id}/descendant/{type}" "" . format ( id = content_id , type = child_type ) , params = params , callback = callback ) |
def DbGetAliasAttribute ( self , argin ) :
"""Get the attribute name from the given alias .
If the given alias is not found in database , returns an empty string
: param argin : The attribute alias
: type : tango . DevString
: return : The attribute name ( dev _ name / att _ name )
: rtype : tango . DevString""" | self . _log . debug ( "In DbGetAliasAttribute()" )
alias_name = argin [ 0 ]
return self . db . get_alias_attribute ( alias_name ) |
def generate_secret_key ( ) :
"""Generate secret key .""" | import string
import random
rng = random . SystemRandom ( )
return '' . join ( rng . choice ( string . ascii_letters + string . digits ) for dummy in range ( 0 , 256 ) ) |
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_action ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
ip = ET . SubElement ( config , "ip" , xmlns = "urn:brocade.com:mgmt:brocade-common-def" )
hide_ext_community_list_holder = ET . SubElement ( ip , "hide-ext-community-list-holder" , xmlns = "urn:brocade.com:mgmt:brocade-ip-policy" )
extcommunity_list = ET . SubElement ( hide_ext_community_list_holder , "extcommunity-list" )
extcommunity_list_num_key = ET . SubElement ( extcommunity_list , "extcommunity-list-num" )
extcommunity_list_num_key . text = kwargs . pop ( 'extcommunity_list_num' )
ext_community_action = ET . SubElement ( extcommunity_list , "ext-community-action" )
ext_community_action . text = kwargs . pop ( 'ext_community_action' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def construct_api_url ( input , representation , resolvers = None , get3d = False , tautomers = False , xml = True , ** kwargs ) :
"""Return the URL for the desired API endpoint .
: param string input : Chemical identifier to resolve
: param string representation : Desired output representation
: param list ( str ) resolvers : ( Optional ) Ordered list of resolvers to use
: param bool get3d : ( Optional ) Whether to return 3D coordinates ( where applicable )
: param bool tautomers : ( Optional ) Whether to return all tautomers
: param bool xml : ( Optional ) Whether to return full XML response
: returns : CIR API URL
: rtype : str""" | # File formats require representation = file and the format in the querystring
if representation in FILE_FORMATS :
kwargs [ 'format' ] = representation
representation = 'file'
# Prepend input with ' tautomers : ' to return all tautomers
if tautomers :
input = 'tautomers:%s' % input
url = '%s/%s/%s' % ( API_BASE , quote ( input ) , representation )
if xml :
url += '/xml'
if resolvers :
kwargs [ 'resolver' ] = ',' . join ( resolvers )
if get3d :
kwargs [ 'get3d' ] = True
if kwargs :
url += '?%s' % urlencode ( kwargs )
return url |
def h ( self , * args , ** kwargs ) :
"""This function searches through hkeys for one * containing * a key string
supplied by args [ 0 ] and returns that header value .
Also can take integers , returning the key ' th header value .
kwargs can be specified to set header elements .
Finally , if called with no arguments or keyword arguments , this
simply prints the header information .""" | # If not arguments , print everything
if len ( args ) + len ( kwargs ) == 0 :
print ( "Headers" )
for n in range ( len ( self . hkeys ) ) :
print ( ' ' + str ( n ) + ': ' + str ( self . hkeys [ n ] ) + ' = ' + repr ( self . h ( n ) ) )
return
# first loop over kwargs if there are any to set header elements
for k in list ( kwargs . keys ( ) ) :
self . insert_header ( k , kwargs [ k ] )
# Meow search for a key if specified
if len ( args ) : # this can be shortened . Eventually , it ' d be nice to get a tuple back !
hkey = args [ 0 ]
# if this is an index
if type ( hkey ) in [ int , int ] :
return self . headers [ self . hkeys [ hkey ] ]
# if this is an exact match
elif hkey in self . hkeys :
return self . headers [ hkey ]
# Look for a fragment .
else :
for k in self . hkeys :
if k . find ( hkey ) >= 0 :
return self . headers [ k ]
print ( )
print ( "ERROR: Couldn't find '" + str ( hkey ) + "' in header." )
print ( "Possible values:" )
for k in self . hkeys :
print ( k )
print ( )
return None |
def step ( self , step , total , label = 'STEP' , speed_label = 'STEPS/S' , size = 1 ) :
"""Increase the step indicator , which is a sub progress circle of the actual
main progress circle ( epoch , progress ( ) method ) .""" | self . lock . acquire ( )
try :
time_diff = time . time ( ) - self . last_step_time
if self . last_step > step : # it restarted
self . last_step = 0
made_steps_since_last_call = step - self . last_step
self . last_step = step
self . made_steps_since_last_sync += made_steps_since_last_call
self . made_steps_size_since_last_sync += made_steps_since_last_call * size
if time_diff >= 1 or step == total : # only each second or last batch
self . set_system_info ( 'step' , step , True )
self . set_system_info ( 'steps' , total , True )
steps_per_second = self . made_steps_since_last_sync / time_diff
samples_per_second = self . made_steps_size_since_last_sync / time_diff
self . last_step_time = time . time ( )
if size :
self . report_speed ( samples_per_second )
epochs_per_second = steps_per_second / total
# all batches
self . set_system_info ( 'epochsPerSecond' , epochs_per_second , True )
current_epochs = self . current_epoch if self . current_epoch else 1
total_epochs = self . total_epochs if self . total_epochs else 1
self . made_steps_since_last_sync = 0
self . made_steps_size_since_last_sync = 0
eta = 0
if step < total : # time to end this epoch
if steps_per_second != 0 :
eta = ( total - step ) / steps_per_second
# time until all epochs are done
if total_epochs - current_epochs > 0 :
if epochs_per_second != 0 :
eta += ( total_epochs - ( current_epochs ) ) / epochs_per_second
self . git . store_file ( 'aetros/job/times/eta.json' , simplejson . dumps ( eta ) )
if label and self . step_label != label :
self . set_system_info ( 'stepLabel' , label , True )
self . step_label = label
if speed_label and self . step_speed_label != speed_label :
self . set_system_info ( 'stepSpeedLabel' , speed_label , True )
self . step_speed_label = speed_label
finally :
self . lock . release ( ) |
def get_default_session ( self ) :
"""The default session is nothing more than the first session added
into the session handler pool . This will likely change in the future
but for now each session identifies the domain and also manages
domain switching within a single session .
: rtype : Session""" | if self . _sessions :
return self . get_session ( next ( iter ( self . _sessions ) ) )
return self . get_session ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.