signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def load_probe ( name ) :
"""Load one of the built - in probes .""" | if op . exists ( name ) : # The argument can be either a path to a PRB file .
path = name
else : # Or the name of a built - in probe .
curdir = op . realpath ( op . dirname ( __file__ ) )
path = op . join ( curdir , 'probes/{}.prb' . format ( name ) )
if not op . exists ( path ) :
raise IOError ( "The probe `{}` cannot be found." . format ( name ) )
return MEA ( probe = _read_python ( path ) ) |
def _solve_conflict ( list_c , s2p , n_cluster ) :
"""Make sure sequences are counts once .
Resolve by most - vote or exclussion
: params list _ c : dict of objects cluster
: param s2p : dict of [ loci ] . coverage = # num of seqs
: param n _ cluster : number of clusters
return dict : new set of clusters""" | logger . debug ( "_solve_conflict: count once" )
if parameters . decision_cluster == "bayes" :
return decide_by_bayes ( list_c , s2p )
loci_similarity = _calculate_similarity ( list_c )
loci_similarity = sorted ( loci_similarity . iteritems ( ) , key = operator . itemgetter ( 1 ) , reverse = True )
common = sum ( [ score for p , score in loci_similarity ] )
while common > 0 :
n_cluster += 1
logger . debug ( "_solve_conflict: ma %s" % loci_similarity )
pairs = loci_similarity [ 0 ] [ 0 ]
score = loci_similarity [ 0 ] [ 1 ]
logger . debug ( "_solve_conflict: common %s, new %s" % ( score , n_cluster ) )
if parameters . decision_cluster . startswith ( "most-voted" ) :
list_c = _split_cluster_by_most_vote ( list_c , pairs )
else :
list_c = _split_cluster ( list_c , pairs , n_cluster )
list_c = { k : v for k , v in list_c . iteritems ( ) if len ( v . loci2seq ) > 0 }
loci_similarity = _calculate_similarity ( list_c )
loci_similarity = sorted ( loci_similarity . iteritems ( ) , key = operator . itemgetter ( 1 ) , reverse = True )
# logger . note ( " % s % s " % ( pairs , loci _ similarity [ 0 ] [ 1 ] ) )
common = sum ( [ score for p , score in loci_similarity ] )
logger . debug ( "_solve_conflict: solved clusters %s" % len ( list_c . keys ( ) ) )
return list_c |
def draw_multi_dispersion_chart ( self , nan_locs ) :
"""Draws a multi dimensional dispersion chart , each color corresponds
to a different target variable .""" | for index , nan_values in enumerate ( nan_locs ) :
label , nan_locations = nan_values
# if features passed in then , label as such
if self . classes_ is not None :
label = self . classes_ [ index ]
color = self . colors [ index ]
x_ , y_ = list ( zip ( * nan_locations ) )
self . ax . scatter ( x_ , y_ , alpha = self . alpha , marker = self . marker , color = color , label = label ) |
def replace ( self , new_node ) :
"""Replace a node after first checking integrity of node stack .""" | cur_node = self . cur_node
nodestack = self . nodestack
cur = nodestack . pop ( )
prev = nodestack [ - 1 ]
index = prev [ - 1 ] - 1
oldnode , name = prev [ - 2 ] [ index ]
assert cur [ 0 ] is cur_node is oldnode , ( cur [ 0 ] , cur_node , prev [ - 2 ] , index )
parent = prev [ 0 ]
if isinstance ( parent , list ) :
parent [ index ] = new_node
else :
setattr ( parent , name , new_node ) |
def notes_placeholder ( self ) :
"""Return the notes placeholder on this notes slide , the shape that
contains the actual notes text . Return | None | if no notes placeholder
is present ; while this is probably uncommon , it can happen if the
notes master does not have a body placeholder , or if the notes
placeholder has been deleted from the notes slide .""" | for placeholder in self . placeholders :
if placeholder . placeholder_format . type == PP_PLACEHOLDER . BODY :
return placeholder
return None |
def _Rphideriv ( self , R , phi = 0. , t = 0. ) :
"""NAME :
_ Rphideriv
PURPOSE :
evaluate the mixed radial - azimuthal derivative
INPUT :
phi
OUTPUT :
d2phi / dRdphi
HISTORY :
2016-06-02 - Written - Bovy ( UofT )""" | return self . _Pot . Rphideriv ( R , 0. , phi = phi , t = t , use_physical = False ) |
def translate_ref_to_url ( self , ref , in_comment = None ) :
"""Translates an @ see or @ link reference to a URL . If the ref is of the
form # methodName , it looks for a method of that name on the class
` in _ comment ` or parent class of method ` in _ comment ` . In this case , it
returns a local hash URL , since the method is guaranteed to be on the
same page :
> > > doc = CodeBaseDoc ( [ ' examples ' ] )
> > > doc . translate _ ref _ to _ url ( ' # public _ method ' , doc . all _ methods [ ' private _ method ' ] )
' # public _ method '
> > > doc . translate _ ref _ to _ url ( ' # public _ method ' , doc . all _ classes [ ' MySubClass ' ] )
' # public _ method '
If it doesn ' t find it there , it looks for a global function :
> > > doc . translate _ ref _ to _ url ( ' # make _ class ' )
' module _ closure . html # make _ class '
A reference of the form ClassName # method _ name looks up a specific method :
> > > doc . translate _ ref _ to _ url ( ' MyClass # first _ method ' )
' class . html # first _ method '
Finally , a reference of the form ClassName looks up a specific class :
> > > doc . translate _ ref _ to _ url ( ' MyClass ' )
' class . html # MyClass '""" | if ref . startswith ( '#' ) :
method_name = ref [ 1 : ]
if isinstance ( in_comment , FunctionDoc ) and in_comment . member :
search_in = self . all_classes [ in_comment . member ]
elif isinstance ( in_comment , ClassDoc ) :
search_in = in_comment
else :
search_in = None
try :
return search_in . get_method ( method_name ) . url
except AttributeError :
pass
def lookup_ref ( file_doc ) :
for fn in file_doc . functions :
if fn . name == method_name :
return fn . url
return None
elif '#' in ref :
class_name , method_name = ref . split ( '#' )
def lookup_ref ( file_doc ) :
for cls in file_doc . classes :
if cls . name == class_name :
try :
return cls . get_method ( method_name ) . url
except AttributeError :
pass
return None
else :
class_name = ref
def lookup_ref ( file_doc ) :
for cls in file_doc . classes :
if cls . name == class_name :
return cls . url
return None
for file_doc in list ( self . values ( ) ) :
url = lookup_ref ( file_doc )
if url :
return file_doc . url + url
return '' |
def parseLayoutFeatures ( font ) :
"""Parse OpenType layout features in the UFO and return a
feaLib . ast . FeatureFile instance .""" | featxt = tounicode ( font . features . text or "" , "utf-8" )
if not featxt :
return ast . FeatureFile ( )
buf = UnicodeIO ( featxt )
# the path is used by the lexer to resolve ' include ' statements
# and print filename in error messages . For the UFO spec , this
# should be the path of the UFO , not the inner features . fea :
# https : / / github . com / unified - font - object / ufo - spec / issues / 55
ufoPath = font . path
if ufoPath is not None :
buf . name = ufoPath
glyphNames = set ( font . keys ( ) )
try :
parser = Parser ( buf , glyphNames )
doc = parser . parse ( )
except IncludedFeaNotFound as e :
if ufoPath and os . path . exists ( os . path . join ( ufoPath , e . args [ 0 ] ) ) :
logger . warning ( "Please change the file name in the include(...); " "statement to be relative to the UFO itself, " "instead of relative to the 'features.fea' file " "contained in it." )
raise
return doc |
def get_importer ( cls , name ) :
"""Get an importer for the given registered type .
: param cls : class to import
: type cls : : class : ` type `
: param name : registered name of importer
: type name : : class : ` str `
: return : an importer instance of the given type
: rtype : : class : ` Importer `
: raises TypeError : if importer cannot be found""" | if name not in importer_index :
raise TypeError ( ( "importer type '%s' is not registered: " % name ) + ( "registered types: %r" % sorted ( importer_index . keys ( ) ) ) )
for base_class in importer_index [ name ] :
if issubclass ( cls , base_class ) :
return importer_index [ name ] [ base_class ] ( cls )
raise TypeError ( "importer type '%s' for a %r is not registered" % ( name , cls ) ) |
def cdf_info ( self ) :
"""Returns a dictionary that shows the basic CDF information .
This information includes
| [ ' CDF ' ] | the name of the CDF |
| [ ' Version ' ] | the version of the CDF |
| [ ' Encoding ' ] | the endianness of the CDF |
| [ ' Majority ' ] | the row / column majority |
| [ ' zVariables ' ] | the dictionary for zVariable numbers and their corresponding names |
| [ ' rVariables ' ] | the dictionary for rVariable numbers and their corresponding names |
| [ ' Attributes ' ] | the dictionary for attribute numbers and their corresponding names and scopes |
| [ ' Checksum ' ] | the checksum indicator |
| [ ' Num _ rdim ' ] | the number of dimensions , applicable only to rVariables |
| [ ' rDim _ sizes ' ] | the dimensional sizes , applicable only to rVariables |
| [ ' Compressed ' ] | CDF is compressed at the file - level |
| [ ' LeapSecondUpdated ' ] | The last updated for the leap second table , if applicable |""" | mycdf_info = { }
mycdf_info [ 'CDF' ] = self . file
mycdf_info [ 'Version' ] = self . _version
mycdf_info [ 'Encoding' ] = self . _encoding
mycdf_info [ 'Majority' ] = self . _majority
mycdf_info [ 'rVariables' ] , mycdf_info [ 'zVariables' ] = self . _get_varnames ( )
mycdf_info [ 'Attributes' ] = self . _get_attnames ( )
mycdf_info [ 'Copyright' ] = self . _copyright
mycdf_info [ 'Checksum' ] = self . _md5
mycdf_info [ 'Num_rdim' ] = self . _num_rdim
mycdf_info [ 'rDim_sizes' ] = self . _rdim_sizes
mycdf_info [ 'Compressed' ] = self . _compressed
if ( self . cdfversion > 2 ) :
mycdf_info [ 'LeapSecondUpdated' ] = self . _leap_second_updated
return mycdf_info |
def Difference ( left : vertex_constructor_param_types , right : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex :
"""Subtracts one vertex from another
: param left : the vertex that will be subtracted from
: param right : the vertex to subtract""" | return Double ( context . jvm_view ( ) . DifferenceVertex , label , cast_to_double_vertex ( left ) , cast_to_double_vertex ( right ) ) |
def timeout ( self , duration = 3600 ) :
"""Timeout the uploader of this file""" | self . room . check_owner ( )
self . conn . make_call ( "timeoutFile" , self . fid , duration ) |
def to_contracts ( instruments , prices , multipliers , desired_ccy = None , instr_fx = None , fx_rates = None , rounder = None ) :
"""Convert notional amount of tradeable instruments to number of instrument
contracts , rounding to nearest integer number of contracts .
Parameters
instruments : pandas . Series
Series of instrument holdings . Index is instrument name and values are
notional amount on instrument .
prices : pandas . Series
Series of instrument prices . Index is instrument name and values are
instrument prices . prices . index should be a superset of
instruments . index
multipliers : pandas . Series
Series of instrument multipliers . Index is instrument name and
values are the multiplier associated with the contract .
multipliers . index should be a superset of instruments . index
desired _ ccy : str
Three letter string representing desired currency to convert notional
values to , e . g . ' USD ' . If None is given currency conversion is ignored .
instr _ fx : pandas . Series
Series of instrument fx denominations . Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in . instr _ fx . index should match prices . index
fx _ rates : pandas . Series
Series of fx rates used for conversion to desired _ ccy . Index is strings
representing the FX pair , e . g . ' AUDUSD ' or ' USDCAD ' . Values are the
corresponding exchange rates .
rounder : function
Function to round pd . Series contracts to integers , if None default
pd . Series . round is used .
Returns
pandas . Series of contract numbers of instruments with Index of instruments
names""" | contracts = _instr_conv ( instruments , prices , multipliers , False , desired_ccy , instr_fx , fx_rates )
if rounder is None :
rounder = pd . Series . round
contracts = rounder ( contracts )
contracts = contracts . astype ( int )
return contracts |
def get_traindata ( self ) -> np . ndarray :
"""Pulls all available data and concatenates for model training
: return : 2d array of points""" | traindata = None
for key , value in self . data . items ( ) :
if key not in [ '__header__' , '__version__' , '__globals__' ] :
if traindata is None :
traindata = value [ np . where ( value [ : , 4 ] != 0 ) ]
else :
traindata = np . concatenate ( ( traindata , value [ np . where ( value [ : , 4 ] != 0 ) ] ) )
return traindata |
def iteration ( self , node_status = True ) :
"""Execute a single model iteration
: return : Iteration _ id , Incremental node status ( dictionary node - > status )""" | self . clean_initial_status ( self . available_statuses . values ( ) )
actual_status = { node : nstatus for node , nstatus in future . utils . iteritems ( self . status ) }
if self . actual_iteration == 0 :
self . actual_iteration += 1
delta , node_count , status_delta = self . status_delta ( actual_status )
if node_status :
return { "iteration" : 0 , "status" : actual_status . copy ( ) , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
else :
return { "iteration" : 0 , "status" : { } , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
for u in self . graph . nodes ( ) :
u_status = self . status [ u ]
for i in range ( 0 , self . compartment_progressive ) :
if u_status == self . available_statuses [ self . compartment [ i ] [ 0 ] ] :
rule = self . compartment [ i ] [ 2 ]
test = rule . execute ( node = u , graph = self . graph , status = self . status , status_map = self . available_statuses , params = self . params )
if test :
actual_status [ u ] = self . available_statuses [ self . compartment [ i ] [ 1 ] ]
break
delta , node_count , status_delta = self . status_delta ( actual_status )
self . status = actual_status
self . actual_iteration += 1
if node_status :
return { "iteration" : self . actual_iteration - 1 , "status" : delta . copy ( ) , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
else :
return { "iteration" : self . actual_iteration - 1 , "status" : { } , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) } |
def _get_offset ( text , visible_width , unicode_aware = True ) :
"""Find the character offset within some text for a given visible offset ( taking into account the
fact that some character glyphs are double width ) .
: param text : The text to analyze
: param visible _ width : The required location within that text ( as seen on screen ) .
: return : The offset within text ( as a character offset within the string ) .""" | result = 0
width = 0
if unicode_aware :
for c in text :
if visible_width - width <= 0 :
break
result += 1
width += wcwidth ( c )
if visible_width - width < 0 :
result -= 1
else :
result = min ( len ( text ) , visible_width )
return result |
def get_form_kwargs ( self ) :
"""Returns the keyword arguments to provide tp the associated form .""" | kwargs = super ( ModelFormMixin , self ) . get_form_kwargs ( )
kwargs [ 'poll' ] = self . object
return kwargs |
def run ( X_train , X_test , y_train , y_test , PARAMS ) :
'''Train model and predict result''' | model . fit ( X_train , y_train )
predict_y = model . predict ( X_test )
score = r2_score ( y_test , predict_y )
LOG . debug ( 'r2 score: %s' % score )
nni . report_final_result ( score ) |
def export_to_dict ( session , recursive , back_references , include_defaults ) :
"""Exports databases and druid clusters to a dictionary""" | logging . info ( 'Starting export' )
dbs = session . query ( Database )
databases = [ database . export_to_dict ( recursive = recursive , include_parent_ref = back_references , include_defaults = include_defaults ) for database in dbs ]
logging . info ( 'Exported %d %s' , len ( databases ) , DATABASES_KEY )
cls = session . query ( DruidCluster )
clusters = [ cluster . export_to_dict ( recursive = recursive , include_parent_ref = back_references , include_defaults = include_defaults ) for cluster in cls ]
logging . info ( 'Exported %d %s' , len ( clusters ) , DRUID_CLUSTERS_KEY )
data = dict ( )
if databases :
data [ DATABASES_KEY ] = databases
if clusters :
data [ DRUID_CLUSTERS_KEY ] = clusters
return data |
def save ( self , path , compress = True ) :
"""Writes the ` ` . proteins ` ` and ` ` . peptides ` ` entries to the hard disk
as a ` ` proteindb ` ` file .
. . note : :
If ` ` . save ( ) ` ` is called and no ` ` proteindb ` ` file is present in the
specified path a new files is generated , otherwise the old file is
replaced .
: param path : filedirectory to which the ` ` proteindb ` ` file is written .
The output file name is specified by ` ` self . info [ ' name ' ] ` `
: param compress : bool , True to use zip file compression""" | with aux . PartiallySafeReplace ( ) as msr :
filename = self . info [ 'name' ] + '.proteindb'
filepath = aux . joinpath ( path , filename )
with msr . open ( filepath , mode = 'w+b' ) as openfile :
self . _writeContainer ( openfile , compress = compress ) |
def _set_camera_properties ( self , msg ) :
"""Set the camera intrinsics from an info msg .""" | focal_x = msg . K [ 0 ]
focal_y = msg . K [ 4 ]
center_x = msg . K [ 2 ]
center_y = msg . K [ 5 ]
im_height = msg . height
im_width = msg . width
self . _camera_intr = CameraIntrinsics ( self . _frame , focal_x , focal_y , center_x , center_y , height = im_height , width = im_width ) |
def _do_cb ( self , cb , error_cb , * args , ** kw ) :
"""Called internally by callback ( ) . Does cb and error _ cb selection .""" | try :
res = self . work ( * args , ** kw )
except Exception as e :
if error_cb is None :
show_err ( )
elif error_cb :
error_cb ( e )
else : # Success , let ' s call away !
cb ( res ) |
def hex2web ( hex ) :
"""Converts HEX representation to WEB
: param rgb : 3 hex char or 6 hex char string representation
: rtype : web string representation ( human readable if possible )
WEB representation uses X11 rgb . txt to define conversion
between RGB and english color names .
Usage
> > > from colour import hex2web
> > > hex2web ( ' # ff0000 ' )
' red '
> > > hex2web ( ' # aaaaa ' )
' # aaa '
> > > hex2web ( ' # abc ' )
' # abc '
> > > hex2web ( ' # acacac ' )
' # acacac '""" | dec_rgb = tuple ( int ( v * 255 ) for v in hex2rgb ( hex ) )
if dec_rgb in RGB_TO_COLOR_NAMES : # # take the first one
color_name = RGB_TO_COLOR_NAMES [ dec_rgb ] [ 0 ]
# # Enforce full lowercase for single worded color name .
return color_name if len ( re . sub ( r"[^A-Z]" , "" , color_name ) ) > 1 else color_name . lower ( )
# Hex format is verified by hex2rgb function . And should be 3 or 6 digit
if len ( hex ) == 7 :
if hex [ 1 ] == hex [ 2 ] and hex [ 3 ] == hex [ 4 ] and hex [ 5 ] == hex [ 6 ] :
return '#' + hex [ 1 ] + hex [ 3 ] + hex [ 5 ]
return hex |
def communicate ( self ) :
"""Retrieve information .""" | self . _communicate_first = True
self . _process . waitForFinished ( )
if self . _partial_stdout is None :
raw_stdout = self . _process . readAllStandardOutput ( )
stdout = handle_qbytearray ( raw_stdout , _CondaAPI . UTF8 )
else :
stdout = self . _partial_stdout
raw_stderr = self . _process . readAllStandardError ( )
stderr = handle_qbytearray ( raw_stderr , _CondaAPI . UTF8 )
result = [ stdout . encode ( _CondaAPI . UTF8 ) , stderr . encode ( _CondaAPI . UTF8 ) ]
# FIXME : Why does anaconda client print to stderr ? ? ?
if PY2 :
stderr = stderr . decode ( )
if 'using anaconda' not in stderr . lower ( ) :
if stderr . strip ( ) and self . _conda :
logger . error ( '{0}:\nSTDERR:\n{1}\nEND' . format ( ' ' . join ( self . _cmd_list ) , stderr ) )
elif stderr . strip ( ) and self . _pip :
logger . error ( "pip error: {}" . format ( self . _cmd_list ) )
result [ - 1 ] = ''
if self . _parse and stdout :
try :
result = json . loads ( stdout ) , result [ - 1 ]
except Exception as error :
result = stdout , str ( error )
if 'error' in result [ 0 ] :
if not isinstance ( result [ 0 ] , dict ) :
result = { 'error' : str ( result [ 0 ] ) } , None
error = '{0}: {1}' . format ( " " . join ( self . _cmd_list ) , result [ 0 ] [ 'error' ] )
result = result [ 0 ] , error
if self . _callback :
result = self . _callback ( result [ 0 ] , result [ - 1 ] , ** self . _extra_kwargs ) , result [ - 1 ]
self . _result = result
self . sig_finished . emit ( self , result [ 0 ] , result [ - 1 ] )
if result [ - 1 ] :
logger . error ( str ( ( 'error' , result [ - 1 ] ) ) )
self . _fired = True
return result |
def regress ( self , method = 'lstsq' ) :
"""regress performs linear least squares regression of the designmatrix on the data .
: param method : method , or backend to be used for the regression analysis .
: type method : string , one of [ ' lstsq ' , ' sm _ ols ' ]
: returns : instance variables ' betas ' ( nr _ betas x nr _ signals ) and ' residuals ' ( nr _ signals x nr _ samples ) are created .""" | if method is 'lstsq' :
self . betas , residuals_sum , rank , s = LA . lstsq ( self . design_matrix . T , self . resampled_signal . T )
self . residuals = self . resampled_signal - self . predict_from_design_matrix ( self . design_matrix )
elif method is 'sm_ols' :
import statsmodels . api as sm
assert self . resampled_signal . shape [ 0 ] == 1 , 'signal input into statsmodels OLS cannot contain multiple signals at once, present shape %s' % str ( self . resampled_signal . shape )
model = sm . OLS ( np . squeeze ( self . resampled_signal ) , self . design_matrix . T )
results = model . fit ( )
# make betas and residuals that are compatible with the LA . lstsq type .
self . betas = np . array ( results . params ) . reshape ( ( self . design_matrix . shape [ 0 ] , self . resampled_signal . shape [ 0 ] ) )
self . residuals = np . array ( results . resid ) . reshape ( self . resampled_signal . shape )
self . logger . debug ( 'performed %s regression on %s design_matrix and %s signal' % ( method , str ( self . design_matrix . shape ) , str ( self . resampled_signal . shape ) ) ) |
def encode ( self , encoding = 'utf-8' , errors = 'strict' ) :
"""Returns bytes
Encode S using the codec registered for encoding . Default encoding
is ' utf - 8 ' . errors may be given to set a different error
handling scheme . Default is ' strict ' meaning that encoding errors raise
a UnicodeEncodeError . Other possible values are ' ignore ' , ' replace ' and
' xmlcharrefreplace ' as well as any other name registered with
codecs . register _ error that can handle UnicodeEncodeErrors .""" | from future . types . newbytes import newbytes
# Py2 unicode . encode ( ) takes encoding and errors as optional parameter ,
# not keyword arguments as in Python 3 str .
# For the surrogateescape error handling mechanism , the
# codecs . register _ error ( ) function seems to be inadequate for an
# implementation of it when encoding . ( Decoding seems fine , however . )
# For example , in the case of
# u ' \ udcc3 ' . encode ( ' ascii ' , ' surrogateescape _ handler ' )
# after registering the ` ` surrogateescape _ handler ` ` function in
# future . utils . surrogateescape , both Python 2 . x and 3 . x raise an
# exception anyway after the function is called because the unicode
# string it has to return isn ' t encodable strictly as ASCII .
if errors == 'surrogateescape' :
if encoding == 'utf-16' : # Known to fail here . See test _ encoding _ works _ normally ( )
raise NotImplementedError ( 'FIXME: surrogateescape handling is ' 'not yet implemented properly' )
# Encode char by char , building up list of byte - strings
mybytes = [ ]
for c in self :
code = ord ( c )
if 0xD800 <= code <= 0xDCFF :
mybytes . append ( newbytes ( [ code - 0xDC00 ] ) )
else :
mybytes . append ( c . encode ( encoding = encoding ) )
return newbytes ( b'' ) . join ( mybytes )
return newbytes ( super ( newstr , self ) . encode ( encoding , errors ) ) |
async def mailed_confirm ( self , ** params ) :
"""Sends mail to user after offer receiveing
Accepts :
- cid
- buyer address
- price
- offer _ type
- point
- coinid""" | if not params :
return { "error" : 400 , "reason" : "Missed required fields" }
# Check if required fields exists
cid = params . get ( "cid" )
buyer_address = params . get ( "buyer_address" )
price = params . get ( "price" )
offer_type = params . get ( "offer_type" )
coinid = params . get ( "coinid" ) . upper ( )
try :
coinid = coinid . replace ( "TEST" , "" )
except :
pass
# Check if required fileds
if not all ( [ cid , buyer_address , price ] ) :
return { "error" : 400 , "reason" : "Missed required fields" }
# Get content owner address
# if coinid in settings . AVAILABLE _ COIN _ ID :
# client _ bridge . endpoint = settings . bridges [ coinid ]
# else :
# return { " error " : 400 , " reason " : " Invalid coin ID " }
# owneraddr = await client _ bridge . request ( method _ name = " ownerbycid " , cid = cid )
# Send appropriate mail to seller if exists
# seller = await getaccountbywallet ( wallet = owneraddr )
# logging . debug ( seller )
# if " error " in seller . keys ( ) :
# return seller
# if seller . get ( " email " ) :
# emaildata = {
# " to " : seller [ " email " ] ,
# " subject " : " Robin8 support " ,
# " optional " : " You ` ve got a new offer from % s " % seller [ " public _ key " ]
# await client _ email . request ( method _ name = " sendmail " , * * emaildata )
# Send news for seller
buyer = await getaccountbywallet ( wallet = buyer_address )
if "error" in buyer . keys ( ) :
buyer [ "public_key" ] = None
newsdata = { "event_type" : "made offer" , "cid" : cid , "access_string" : buyer [ "public_key" ] , "buyer_pubkey" : buyer [ "public_key" ] , "buyer_address" : buyer_address , # " owneraddr " : owneraddr ,
"price" : price , "offer_type" : offer_type , "coinid" : coinid }
news = await self . insert_news ( ** newsdata )
return { "result" : "ok" } |
def get_species ( taxdump_file , select_divisions = None , exclude_divisions = None , nrows = None ) :
"""Get a dataframe with species information .""" | if select_divisions and exclude_divisions :
raise ValueError ( 'Cannot specify "select_divisions" and ' '"exclude_divisions" at the same time.' )
select_taxon_ids = _get_species_taxon_ids ( taxdump_file , select_divisions = select_divisions , exclude_divisions = exclude_divisions )
select_taxon_ids = set ( select_taxon_ids )
with tarfile . open ( taxdump_file ) as tf :
with tf . extractfile ( 'names.dmp' ) as fh :
df = pd . read_csv ( fh , header = None , sep = '|' , encoding = 'ascii' , nrows = nrows )
# only keep information we need
df = df . iloc [ : , [ 0 , 1 , 3 ] ]
# only select selected species
df = df . loc [ df . iloc [ : , 0 ] . isin ( select_taxon_ids ) ]
# remove tab characters flanking each " name class " entry
df . iloc [ : , 2 ] = df . iloc [ : , 2 ] . str . strip ( '\t' )
# select only " scientific name " and " common name " rows
df = df . loc [ df . iloc [ : , 2 ] . isin ( [ 'scientific name' , 'common name' ] ) ]
# remove tab characters flanking each " name " entry
df . iloc [ : , 1 ] = df . iloc [ : , 1 ] . str . strip ( '\t' )
# collapse common names for each scientific name
common_names = defaultdict ( list )
cn = df . loc [ df . iloc [ : , 2 ] == 'common name' ]
for _ , row in cn . iterrows ( ) :
common_names [ row . iloc [ 0 ] ] . append ( row . iloc [ 1 ] )
# build final dataframe ( this is very slow )
sn = df . loc [ df . iloc [ : , 2 ] == 'scientific name' ]
species = [ ]
for i , row in sn . iterrows ( ) :
species . append ( [ row . iloc [ 0 ] , row . iloc [ 1 ] , '|' . join ( common_names [ row . iloc [ 0 ] ] ) ] )
species_df = pd . DataFrame ( species ) . set_index ( 0 )
species_df . columns = [ 'scientific_name' , 'common_names' ]
species_df . index . name = 'taxon_id'
return species_df |
def _timer ( self , state_transition_event = None ) :
"""Timer loop used to keep track of the time while roasting or
cooling . If the time remaining reaches zero , the roaster will call the
supplied state transistion function or the roaster will be set to
the idle state .""" | while not self . _teardown . value :
state = self . get_roaster_state ( )
if ( state == 'roasting' or state == 'cooling' ) :
time . sleep ( 1 )
self . total_time += 1
if ( self . time_remaining > 0 ) :
self . time_remaining -= 1
else :
if ( state_transition_event is not None ) :
state_transition_event . set ( )
else :
self . idle ( )
else :
time . sleep ( 0.01 ) |
def find_unique ( self , product_type , short_name , include_hidden = False ) :
"""Find the unique provider of a given product by its short name .
This function will ensure that the product is only provided by exactly
one tile ( either this tile or one of its dependencies and raise a
BuildError if not .
Args :
product _ type ( str ) : The type of product that we are looking for , like
firmware _ image , library etc .
short _ name ( str ) : The short name of the product that we wish to find ,
usually its os . path . basename ( )
include _ hidden ( bool ) : Return products that are hidden and not selected
as visible in the depends section of this tile ' s module settings .
This defaults to False .
Returns :
ProductInfo : The information of the one unique provider of this product .""" | prods = self . find_all ( product_type , short_name , include_hidden )
if len ( prods ) == 0 :
raise BuildError ( "Could not find product by name in find_unique" , name = short_name , type = product_type )
if len ( prods ) > 1 :
raise BuildError ( "Multiple providers of the same product in find_unique" , name = short_name , type = product_type , products = prods )
if self . _tracking :
self . _resolved_products . append ( prods [ 0 ] )
return prods [ 0 ] |
def repr_parameter ( param : inspect . Parameter ) -> str :
"""Provides a ` ` repr ` ` - style representation of a function parameter .""" | return ( "Parameter(name={name}, annotation={annotation}, kind={kind}, " "default={default}" . format ( name = param . name , annotation = param . annotation , kind = param . kind , default = param . default ) ) |
def format_ft_def ( func , full_name : str = None ) -> str :
"Format and link ` func ` definition to show in documentation" | sig = inspect . signature ( func )
name = f'<code>{full_name or func.__name__}</code>'
fmt_params = [ format_param ( param ) for name , param in sig . parameters . items ( ) if name not in ( 'self' , 'cls' ) ]
arg_str = f"({', '.join(fmt_params)})"
if sig . return_annotation and ( sig . return_annotation != sig . empty ) :
arg_str += f" → {anno_repr(sig.return_annotation)}"
if is_fastai_class ( type ( func ) ) :
arg_str += f" :: {link_type(type(func))}"
f_name = f"<code>class</code> {name}" if inspect . isclass ( func ) else name
return f'{f_name}' , f'{name}{arg_str}' |
def alwaysCalledWithExactly ( self , * args , ** kwargs ) : # pylint : disable = invalid - name
"""Determining whether args / kwargs are the ONLY fully matched args / kwargs called previously
Eg .
f ( 1 , 2 , 3)
spy . alwaysCalledWith ( 1 , 2 , 3 ) will return True , because they are fully matched
f ( 1 , 2 , 4)
spy . alwaysCalledWith ( 1 , 2 , 4 ) will return False , because they are not fully matched
Return : Boolean""" | self . __remove_args_first_item ( )
if args and kwargs :
return True if ( uch . tuple_in_list_always ( self . args , args ) and uch . dict_in_list_always ( self . kwargs , kwargs ) ) else False
elif args :
return True if uch . tuple_in_list_always ( self . args , args ) else False
elif kwargs :
return True if uch . dict_in_list_always ( self . kwargs , kwargs ) else False
else :
ErrorHandler . called_with_empty_error ( ) |
def set_data_path ( self , pth ) :
"""Set the location of the measures data directory .
: param pth : The absolute path to the measures data directory .""" | if os . path . exists ( pth ) :
if not os . path . exists ( os . path . join ( pth , 'data' , 'geodetic' ) ) :
raise IOError ( "The given path doesn't contain a 'data' " "subdirectory" )
os . environ [ "AIPSPATH" ] = "%s dummy dummy" % pth |
def get_meter ( id = None , name = None , maxS = 2 , maxW = 2 , splitheavies = 0 , constraints = DEFAULT_CONSTRAINTS , return_dict = False ) :
"""{ ' constraints ' : [ ' footmin - w - resolution / 1 ' ,
' footmin - f - resolution / 1 ' ,
' strength . w = > - p / 1 ' ,
' headedness ! = rising / 1 ' ,
' number _ feet ! = 5/1 ' ] ,
' id ' : ' iambic _ pentameter ' ,
' maxS ' : 2,
' maxW ' : 2,
' name ' : ' Iambic Pentameter ' ,
' splitheavies ' : 0}""" | if 'Meter.Meter' in str ( id . __class__ ) :
return id
if not id :
id = 'Meter_%s' % now ( )
if not name :
name = id + '[' + ' ' . join ( constraints ) + ']'
config = locals ( )
import prosodic
if id in prosodic . config [ 'meters' ] :
return prosodic . config [ 'meters' ] [ id ]
if return_dict :
return config
return Meter ( config ) |
def remove_handler ( self , handler ) :
"""Detach active handler from the session
` handler `
Handler to remove""" | super ( Session , self ) . remove_handler ( handler )
self . promote ( )
self . stop_heartbeat ( ) |
def empty ( val ) :
"""Checks if value is empty .
All unknown data types considered as empty values .
@ return : bool""" | if val == None :
return True
if isinstance ( val , str ) and len ( val ) > 0 :
return False
return True |
def check_unique ( self ) :
"""Check the user ' s email is unique""" | emails = yield self . view . values ( key = self . email )
user_id = getattr ( self , 'id' , None )
users = { x for x in emails if x != user_id and x }
if users :
raise exceptions . ValidationError ( "User with email '{}' already exists" . format ( self . email ) ) |
def scale_and_crop_with_subject_location ( im , size , subject_location = False , zoom = None , crop = False , upscale = False , ** kwargs ) :
"""Like ` ` easy _ thumbnails . processors . scale _ and _ crop ` ` , but will use the
coordinates in ` ` subject _ location ` ` to make sure that that part of the
image is in the center or at least somewhere on the cropped image .
Please not that this does * not * work correctly if the image has been
resized by a previous processor ( e . g ` ` autocrop ` ` ) .
` ` crop ` ` needs to be set for this to work , but any special cropping
parameters will be ignored .""" | subject_location = normalize_subject_location ( subject_location )
if not ( subject_location and crop ) : # use the normal scale _ and _ crop
return processors . scale_and_crop ( im , size , zoom = zoom , crop = crop , upscale = upscale , ** kwargs )
# for here on we have a subject _ location and cropping is on
# - - snip - - this is a copy and paste of the first few
# lines of ` ` scale _ and _ crop ` `
source_x , source_y = [ float ( v ) for v in im . size ]
target_x , target_y = [ float ( v ) for v in size ]
if crop or not target_x or not target_y :
scale = max ( target_x / source_x , target_y / source_y )
else :
scale = min ( target_x / source_x , target_y / source_y )
# Handle one - dimensional targets .
if not target_x :
target_x = source_x * scale
elif not target_y :
target_y = source_y * scale
if zoom :
if not crop :
target_x = round ( source_x * scale )
target_y = round ( source_y * scale )
scale *= ( 100 + int ( zoom ) ) / 100.0
if scale < 1.0 or ( scale > 1.0 and upscale ) :
im = im . resize ( ( int ( source_x * scale ) , int ( source_y * scale ) ) , resample = Image . ANTIALIAS )
# - - endsnip - - begin real code
# subject location aware cropping
# res _ x , res _ y : the resolution of the possibly already resized image
res_x , res_y = [ float ( v ) for v in im . size ]
# subj _ x , subj _ y : the position of the subject ( maybe already re - scaled )
subj_x = res_x * float ( subject_location [ 0 ] ) / source_x
subj_y = res_y * float ( subject_location [ 1 ] ) / source_y
ex = ( res_x - min ( res_x , target_x ) ) / 2
ey = ( res_y - min ( res_y , target_y ) ) / 2
fx , fy = res_x - ex , res_y - ey
# box _ width , box _ height : dimensions of the target image
box_width , box_height = fx - ex , fy - ey
# try putting the box in the center around the subject point
# ( this will be partially outside of the image in most cases )
tex , tey = subj_x - ( box_width / 2 ) , subj_y - ( box_height / 2 )
tfx , tfy = subj_x + ( box_width / 2 ) , subj_y + ( box_height / 2 )
if tex < 0 : # its out of the img to the left , move both to the right until tex is 0
tfx = tfx - tex
# tex is negative !
tex = 0
elif tfx > res_x : # its out of the img to the right
tex = tex - ( tfx - res_x )
tfx = res_x
if tey < 0 : # its out of the img to the top , move both to the bottom until tey is 0
tfy = tfy - tey
# tey is negative ! )
tey = 0
elif tfy > res_y : # its out of the img to the bottom
tey = tey - ( tfy - res_y )
tfy = res_y
if ex or ey :
crop_box = ( ( int ( tex ) , int ( tey ) , int ( tfx ) , int ( tfy ) ) )
if FILER_SUBJECT_LOCATION_IMAGE_DEBUG : # draw elipse on focal point for Debugging
draw = ImageDraw . Draw ( im )
esize = 10
draw . ellipse ( ( ( subj_x - esize , subj_y - esize ) , ( subj_x + esize , subj_y + esize ) ) , outline = "#FF0000" )
im = im . crop ( crop_box )
return im |
def mask_loss ( input_tensor , binary_tensor ) :
"""Mask a loss by using a tensor filled with 0 or 1.
: param input _ tensor : A float tensor of shape [ batch _ size , . . . ] representing the loss / cross _ entropy
: param binary _ tensor : A float tensor of shape [ batch _ size , . . . ] representing the mask .
: return : A float tensor of shape [ batch _ size , . . . ] representing the masked loss .""" | with tf . variable_scope ( "mask_loss" ) :
mask = tf . cast ( tf . cast ( binary_tensor , tf . bool ) , tf . float32 )
return input_tensor * mask |
def process ( self , pair ) : # TODO : check docstring
"""Processes a pair of nodes into the current solution
MUST CREATE A NEW INSTANCE , NOT CHANGE ANY INSTANCE ATTRIBUTES
Returns a new instance ( deep copy ) of self object
Args
pair : type
description
Returns
type
Description ( Copy of self ? )""" | a , b = pair
new_solution = self . clone ( )
i , j = new_solution . get_pair ( ( a , b ) )
route_i = i . route_allocation ( )
route_j = j . route_allocation ( )
inserted = False
if ( ( route_i is not None and route_j is not None ) and ( route_i != route_j ) ) :
if route_i . _nodes . index ( i ) == 0 and route_j . _nodes . index ( j ) == len ( route_j . _nodes ) - 1 :
if route_j . can_allocate ( route_i . _nodes ) :
route_j . allocate ( route_i . _nodes )
if i . route_allocation ( ) != j . route_allocation ( ) :
raise Exception ( 'wtf' )
inserted = True
elif route_j . _nodes . index ( j ) == 0 and route_i . _nodes . index ( i ) == len ( route_i . _nodes ) - 1 :
if route_i . can_allocate ( route_j . _nodes ) :
route_i . allocate ( route_j . _nodes )
if i . route_allocation ( ) != j . route_allocation ( ) :
raise Exception ( 'wtf j' )
inserted = True
new_solution . _routes = [ route for route in new_solution . _routes if route . _nodes ]
return new_solution , inserted |
def fitPlaneSVD ( XYZ ) :
"""Fit a plane to input point data using SVD""" | [ rows , cols ] = XYZ . shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b ( 1 ) * X + b ( 2 ) * Y + b ( 3 ) * Z + b ( 4 ) = 0.
p = ( np . ones ( ( rows , 1 ) ) )
AB = np . hstack ( [ XYZ , p ] )
[ u , d , v ] = np . linalg . svd ( AB , 0 )
# Solution is last column of v .
B = np . array ( v [ 3 , : ] )
coeff = - B [ [ 0 , 1 , 3 ] ] / B [ 2 ]
return coeff |
def filter_visited ( curr_dir , subdirs , already_visited , follow_dirlinks , on_error ) :
"""Filter subdirs that have already been visited .
This is used to avoid loops in the search performed by os . walk ( ) in
index _ files _ by _ size .
curr _ dir is the path of the current directory , as returned by os . walk ( ) .
subdirs is the list of subdirectories for the current directory , as
returned by os . walk ( ) .
already _ visited is a set of tuples ( st _ dev , st _ ino ) of already
visited directories . This set will not be modified .
on error is a function f ( OSError ) - > None , to be called in case of
error .
Returns a tuple : the new ( possibly filtered ) subdirs list , and a new
set of already visited directories , now including the subdirs .""" | filtered = [ ]
to_visit = set ( )
_already_visited = already_visited . copy ( )
try : # mark the current directory as visited , so we catch symlinks to it
# immediately instead of after one iteration of the directory loop
file_info = os . stat ( curr_dir ) if follow_dirlinks else os . lstat ( curr_dir )
_already_visited . add ( ( file_info . st_dev , file_info . st_ino ) )
except OSError as e :
on_error ( e )
for subdir in subdirs :
full_path = os . path . join ( curr_dir , subdir )
try :
file_info = os . stat ( full_path ) if follow_dirlinks else os . lstat ( full_path )
except OSError as e :
on_error ( e )
continue
if not follow_dirlinks and stat . S_ISLNK ( file_info . st_mode ) : # following links to dirs is disabled , ignore this one
continue
dev_inode = ( file_info . st_dev , file_info . st_ino )
if dev_inode not in _already_visited :
filtered . append ( subdir )
to_visit . add ( dev_inode )
else :
on_error ( OSError ( errno . ELOOP , "directory loop detected" , full_path ) )
return filtered , _already_visited . union ( to_visit ) |
def dependency_check ( self , task_cls , skip_unresolved = False ) :
"""Check dependency of task for irresolvable conflicts ( like task to task mutual dependency )
: param task _ cls : task to check
: param skip _ unresolved : flag controls this method behaviour for tasks that could not be found . When False , method will raise an exception if task tag was set in dependency and the related task wasn ' t found in registry . When True that unresolvable task will be omitted
: return : None""" | def check ( check_task_cls , global_dependencies ) :
if check_task_cls . __registry_tag__ in global_dependencies :
raise RuntimeError ( 'Recursion dependencies for %s' % task_cls . __registry_tag__ )
dependencies = global_dependencies . copy ( )
dependencies . append ( check_task_cls . __registry_tag__ )
for dependency in check_task_cls . __dependency__ :
dependent_task = self . tasks_by_tag ( dependency )
if dependent_task is None and skip_unresolved is False :
raise RuntimeError ( "Task '%s' dependency unresolved (%s)" % ( task_cls . __registry_tag__ , dependency ) )
if dependent_task is not None :
check ( dependent_task , dependencies )
check ( task_cls , [ ] ) |
def capitalize ( string ) :
"""Capitalize a sentence .
Parameters
string : ` str `
String to capitalize .
Returns
` str `
Capitalized string .
Examples
> > > capitalize ( ' worD WORD WoRd ' )
' Word word word '""" | if not string :
return string
if len ( string ) == 1 :
return string . upper ( )
return string [ 0 ] . upper ( ) + string [ 1 : ] . lower ( ) |
def append_rows ( self , rows , between , refresh_presision ) :
"""Transform the rows of data to Measurements .
Keyword arguments :
rows - - an array of arrays [ datetime , integral _ measurement ]
between - - time between integral _ measurements in seconds
refresh _ presision - - time between sensor values that compose the integral _ measurements""" | for r in rows :
Measurement . register_or_check ( finish = r [ 0 ] , mean = r [ 1 ] / between , between = between , refresh_presision = refresh_presision , configuration = self ) |
def subsample_input ( infiles ) :
"""Returns a random subsample of the input files .
- infiles : a list of input files for analysis""" | logger . info ( "--subsample: %s" , args . subsample )
try :
samplesize = float ( args . subsample )
except TypeError : # Not a number
logger . error ( "--subsample must be int or float, got %s (exiting)" , type ( args . subsample ) )
sys . exit ( 1 )
if samplesize <= 0 : # Not a positive value
logger . error ( "--subsample must be positive value, got %s" , str ( args . subsample ) )
sys . exit ( 1 )
if int ( samplesize ) > 1 :
logger . info ( "Sample size integer > 1: %d" , samplesize )
k = min ( int ( samplesize ) , len ( infiles ) )
else :
logger . info ( "Sample size proportion in (0, 1]: %.3f" , samplesize )
k = int ( min ( samplesize , 1.0 ) * len ( infiles ) )
logger . info ( "Randomly subsampling %d sequences for analysis" , k )
if args . seed :
logger . info ( "Setting random seed with: %s" , args . seed )
random . seed ( args . seed )
else :
logger . warning ( "Subsampling without specified random seed!" )
logger . warning ( "Subsampling may NOT be easily reproducible!" )
return random . sample ( infiles , k ) |
def install_sql_hook ( ) : # type : ( ) - > None
"""If installed this causes Django ' s queries to be captured .""" | try :
from django . db . backends . utils import CursorWrapper
# type : ignore
except ImportError :
from django . db . backends . util import CursorWrapper
# type : ignore
try :
real_execute = CursorWrapper . execute
real_executemany = CursorWrapper . executemany
except AttributeError : # This won ' t work on Django versions < 1.6
return
def record_many_sql ( sql , param_list , cursor ) :
for params in param_list :
record_sql ( sql , params , cursor )
def execute ( self , sql , params = None ) :
try :
return real_execute ( self , sql , params )
finally :
record_sql ( sql , params , self . cursor )
def executemany ( self , sql , param_list ) :
try :
return real_executemany ( self , sql , param_list )
finally :
record_many_sql ( sql , param_list , self . cursor )
CursorWrapper . execute = execute
CursorWrapper . executemany = executemany
ignore_logger ( "django.db.backends" ) |
def get_default ( self ) :
"""Return the default value to use when validating data if no input
is provided for this field .
If a default has not been set for this field then this will simply
return ` empty ` , indicating that no value should be set in the
validated data for this field .""" | if self . default is empty :
raise SkipField ( )
if callable ( self . default ) :
if hasattr ( self . default , 'set_context' ) :
self . default . set_context ( self )
return self . default ( )
return self . default |
def remove_page ( self , route ) :
"""Remove a proxied page from the Web UI .
Parameters
route : str
The route for the proxied page . Must be a valid path * segment * in a
url ( e . g . ` ` foo ` ` in ` ` / foo / bar / baz ` ` ) . Routes must be unique
across the application .""" | req = proto . RemoveProxyRequest ( route = route )
self . _client . _call ( 'RemoveProxy' , req ) |
def ChoiceHumanReadable ( choices , choice ) :
"""Return the human readable representation for a list of choices .
@ see https : / / docs . djangoproject . com / en / dev / ref / models / fields / # choices""" | if choice == None :
raise NoChoiceError ( )
for _choice in choices :
if _choice [ 0 ] == choice :
return _choice [ 1 ]
raise NoChoiceMatchError ( "The choice '%s' does not exist in '%s'" % ( choice , ", " . join ( [ choice [ 0 ] for choice in choices ] ) ) ) |
def write_csv ( self , file : str , table : str , libref : str = "" , nosub : bool = False , dsopts : dict = None , opts : dict = None ) -> 'The LOG showing the results of the step' :
"""This method will export a SAS Data Set to a file in CSV format .
file - the OS filesystem path of the file to be created ( exported from the SAS Data Set )
table - the name of the SAS Data Set you want to export to a CSV file
libref - the libref for the SAS Data Set .
dsopts - a dictionary containing any of the following SAS data set options ( where , drop , keep , obs , firstobs )
opts - a dictionary containing any of the following Proc Export options ( delimiter , putnames )""" | dsopts = dsopts if dsopts is not None else { }
opts = opts if opts is not None else { }
code = "options nosource;\n"
code += "filename x \"" + file + "\";\n"
code += "proc export data=" + libref + "." + table + self . _sb . _dsopts ( dsopts ) + " outfile=x dbms=csv replace; "
code += self . _sb . _expopts ( opts ) + " run\n;"
code += "options source;\n"
if nosub :
print ( code )
else :
ll = self . submit ( code , "text" )
return ll [ 'LOG' ] |
def get_version ( module ) :
"""Attempts to read a version attribute from the given module that
could be specified via several different names and formats .""" | version_names = [ "__version__" , "get_version" , "version" ]
version_names . extend ( [ name . upper ( ) for name in version_names ] )
for name in version_names :
try :
version = getattr ( module , name )
except AttributeError :
continue
if callable ( version ) :
version = version ( )
try :
version = "." . join ( [ str ( i ) for i in version . __iter__ ( ) ] )
except AttributeError :
pass
return version |
def updateStaticEC2Instances ( ) :
"""Generates a new python file of fetchable EC2 Instances by region with current prices and specs .
Takes a few ( ~ 3 + ) minutes to run ( you ' ll need decent internet ) .
: return : Nothing . Writes a new ' generatedEC2Lists . py ' file .""" | logger . info ( "Updating Toil's EC2 lists to the most current version from AWS's bulk API. " "This may take a while, depending on your internet connection." )
dirname = os . path . dirname ( __file__ )
# the file Toil uses to get info about EC2 instance types
origFile = os . path . join ( dirname , 'generatedEC2Lists.py' )
assert os . path . exists ( origFile )
# use a temporary file until all info is fetched
genFile = os . path . join ( dirname , 'generatedEC2Lists_tmp.py' )
assert not os . path . exists ( genFile )
# will be used to save a copy of the original when finished
oldFile = os . path . join ( dirname , 'generatedEC2Lists_old.py' )
# provenance note , copyright and imports
with open ( genFile , 'w' ) as f :
f . write ( textwrap . dedent ( '''
# !!! AUTOGENERATED FILE !!!
# Update with: src/toil/utils/toilUpdateEC2Instances.py
#
# Copyright (C) 2015-{year} UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems
from toil.lib.ec2nodes import InstanceType\n\n\n''' ) . format ( year = datetime . date . today ( ) . strftime ( "%Y" ) ) [ 1 : ] )
currentEC2List = [ ]
instancesByRegion = { }
for regionNickname , _ in iteritems ( EC2Regions ) :
currentEC2Dict = fetchEC2InstanceDict ( regionNickname = regionNickname )
for instanceName , instanceTypeObj in iteritems ( currentEC2Dict ) :
if instanceTypeObj not in currentEC2List :
currentEC2List . append ( instanceTypeObj )
instancesByRegion . setdefault ( regionNickname , [ ] ) . append ( instanceName )
# write header of total EC2 instance type list
genString = "# {num} Instance Types. Generated {date}.\n" . format ( num = str ( len ( currentEC2List ) ) , date = str ( datetime . datetime . now ( ) ) )
genString = genString + "E2Instances = {\n"
sortedCurrentEC2List = sorted ( currentEC2List , key = lambda x : x . name )
# write the list of all instances types
for i in sortedCurrentEC2List :
z = " '{name}': InstanceType(name='{name}', cores={cores}, memory={memory}, disks={disks}, disk_capacity={disk_capacity})," "\n" . format ( name = i . name , cores = i . cores , memory = i . memory , disks = i . disks , disk_capacity = i . disk_capacity )
genString = genString + z
genString = genString + '}\n\n'
genString = genString + 'regionDict = {\n'
for regionName , instanceList in iteritems ( instancesByRegion ) :
genString = genString + " '{regionName}': [" . format ( regionName = regionName )
for instance in sorted ( instanceList ) :
genString = genString + "'{instance}', " . format ( instance = instance )
if genString . endswith ( ', ' ) :
genString = genString [ : - 2 ]
genString = genString + '],\n'
if genString . endswith ( ',\n' ) :
genString = genString [ : - len ( ',\n' ) ]
genString = genString + '}\n'
with open ( genFile , 'a+' ) as f :
f . write ( genString )
# append key for fetching at the end
regionKey = '\nec2InstancesByRegion = dict((region, [E2Instances[i] for i in instances]) for region, instances in iteritems(regionDict))\n'
with open ( genFile , 'a+' ) as f :
f . write ( regionKey )
# preserve the original file unless it already exists
if not os . path . exists ( oldFile ) :
os . rename ( origFile , oldFile )
# delete the original file if it ' s still there
if os . path . exists ( origFile ) :
os . remove ( origFile )
# replace the instance list with a current list
os . rename ( genFile , origFile ) |
def get_indels ( sbjct_seq , qry_seq , start_pos ) :
"""This function uses regex to find inserts and deletions in sequences
given as arguments . A list of these indels are returned . The list
includes , type of mutations ( ins / del ) , subject codon no of found
mutation , subject sequence position , insert / deletions nucleotide
sequence , and the affected qry codon no .""" | seqs = [ sbjct_seq , qry_seq ]
indels = [ ]
gap_obj = re . compile ( r"-+" )
for i in range ( len ( seqs ) ) :
for match in gap_obj . finditer ( seqs [ i ] ) :
pos = int ( match . start ( ) )
gap = match . group ( )
# Find position of the mutation corresponding to the subject sequence
sbj_pos = len ( sbjct_seq [ : pos ] . replace ( "-" , "" ) ) + start_pos
# Get indel sequence and the affected sequences in sbjct and qry in the reading frame
indel = seqs [ abs ( i - 1 ) ] [ pos : pos + len ( gap ) ]
# Find codon number for mutation
codon_no = int ( math . ceil ( ( sbj_pos ) / 3 ) )
qry_pos = len ( qry_seq [ : pos ] . replace ( "-" , "" ) ) + start_pos
qry_codon = int ( math . ceil ( ( qry_pos ) / 3 ) )
if i == 0 :
mut = "ins"
else :
mut = "del"
indels . append ( [ mut , codon_no , sbj_pos , indel , qry_codon ] )
# Sort indels based on codon position and sequence position
indels = sorted ( indels , key = lambda x : ( x [ 1 ] , x [ 2 ] ) )
return indels |
def dtype_for ( t ) :
"""return my dtype mapping , whether number or name""" | if t in dtype_dict :
return dtype_dict [ t ]
return np . typeDict . get ( t , t ) |
async def get_access_token ( self , code , loop = None , redirect_uri = None , ** payload ) :
"""Get an access _ token from OAuth provider .
: returns : ( access _ token , provider _ data )""" | # Possibility to provide REQUEST DATA to the method
payload . setdefault ( 'grant_type' , 'authorization_code' )
payload . update ( { 'client_id' : self . client_id , 'client_secret' : self . client_secret } )
if not isinstance ( code , str ) and self . shared_key in code :
code = code [ self . shared_key ]
payload [ 'refresh_token' if payload [ 'grant_type' ] == 'refresh_token' else 'code' ] = code
redirect_uri = redirect_uri or self . params . get ( 'redirect_uri' )
if redirect_uri :
payload [ 'redirect_uri' ] = redirect_uri
self . access_token = None
data = await self . request ( 'POST' , self . access_token_url , data = payload , loop = loop )
try :
self . access_token = data [ 'access_token' ]
except KeyError :
self . logger . error ( 'Error when getting the access token.\nData returned by OAuth server: %r' , data , )
raise web . HTTPBadRequest ( reason = 'Failed to obtain OAuth access token.' )
return self . access_token , data |
def get_color ( self ) :
"""Return the array of rgba colors ( same order as lStruct )""" | col = np . full ( ( self . _dStruct [ 'nObj' ] , 4 ) , np . nan )
ii = 0
for k in self . _dStruct [ 'lorder' ] :
k0 , k1 = k . split ( '_' )
col [ ii , : ] = self . _dStruct [ 'dObj' ] [ k0 ] [ k1 ] . get_color ( )
ii += 1
return col |
def subscribe ( self , objectID , varIDs = ( tc . VAR_ROAD_ID , tc . VAR_LANEPOSITION ) , begin = 0 , end = 2 ** 31 - 1 ) :
"""subscribe ( string , list ( integer ) , int , int ) - > None
Subscribe to one or more object values for the given interval .""" | Domain . subscribe ( self , objectID , varIDs , begin , end ) |
def subdivide ( self ) :
r"""Split the curve : math : ` B ( s ) ` into a left and right half .
Takes the interval : math : ` \ left [ 0 , 1 \ right ] ` and splits the curve into
: math : ` B _ 1 = B \ left ( \ left [ 0 , \ frac { 1 } { 2 } \ right ] \ right ) ` and
: math : ` B _ 2 = B \ left ( \ left [ \ frac { 1 } { 2 } , 1 \ right ] \ right ) ` . In
order to do this , also reparameterizes the curve , hence the resulting
left and right halves have new nodes .
. . image : : . . / . . / images / curve _ subdivide . png
: align : center
. . doctest : : curve - subdivide
: options : + NORMALIZE _ WHITESPACE
> > > nodes = np . asfortranarray ( [
. . . [ 0.0 , 1.25 , 2.0 ] ,
. . . [ 0.0 , 3.0 , 1.0 ] ,
> > > curve = bezier . Curve ( nodes , degree = 2)
> > > left , right = curve . subdivide ( )
> > > left . nodes
array ( [ [ 0 . , 0.625 , 1.125 ] ,
[0 . , 1.5 , 1.75 ] ] )
> > > right . nodes
array ( [ [ 1.125 , 1.625 , 2 . ] ,
[1.75 , 2 . , 1 . ] ] )
. . testcleanup : : curve - subdivide
import make _ images
make _ images . curve _ subdivide ( curve , left , right )
Returns :
Tuple [ Curve , Curve ] : The left and right sub - curves .""" | left_nodes , right_nodes = _curve_helpers . subdivide_nodes ( self . _nodes )
left = Curve ( left_nodes , self . _degree , _copy = False )
right = Curve ( right_nodes , self . _degree , _copy = False )
return left , right |
def encode_data ( self ) :
"""Encode the data back into a dict .""" | output_data = { }
output_data [ "groupTypeList" ] = encode_array ( self . group_type_list , 4 , 0 )
output_data [ "xCoordList" ] = encode_array ( self . x_coord_list , 10 , 1000 )
output_data [ "yCoordList" ] = encode_array ( self . y_coord_list , 10 , 1000 )
output_data [ "zCoordList" ] = encode_array ( self . z_coord_list , 10 , 1000 )
output_data [ "bFactorList" ] = encode_array ( self . b_factor_list , 10 , 100 )
output_data [ "occupancyList" ] = encode_array ( self . occupancy_list , 9 , 100 )
output_data [ "atomIdList" ] = encode_array ( self . atom_id_list , 8 , 0 )
output_data [ "altLocList" ] = encode_array ( self . alt_loc_list , 6 , 0 )
output_data [ "insCodeList" ] = encode_array ( self . ins_code_list , 6 , 0 )
output_data [ "groupIdList" ] = encode_array ( self . group_id_list , 8 , 0 )
output_data [ "groupList" ] = self . group_list
output_data [ "sequenceIndexList" ] = encode_array ( self . sequence_index_list , 8 , 0 )
output_data [ "chainNameList" ] = encode_array ( self . chain_name_list , 5 , 4 )
output_data [ "chainIdList" ] = encode_array ( self . chain_id_list , 5 , 4 )
output_data [ "bondAtomList" ] = encode_array ( self . bond_atom_list , 4 , 0 )
output_data [ "bondOrderList" ] = encode_array ( self . bond_order_list , 2 , 0 )
output_data [ "secStructList" ] = encode_array ( self . sec_struct_list , 2 , 0 )
output_data [ "chainsPerModel" ] = self . chains_per_model
output_data [ "groupsPerChain" ] = self . groups_per_chain
output_data [ "spaceGroup" ] = self . space_group
output_data [ "mmtfVersion" ] = self . mmtf_version
output_data [ "mmtfProducer" ] = self . mmtf_producer
output_data [ "structureId" ] = self . structure_id
output_data [ "entityList" ] = self . entity_list
output_data [ "bioAssemblyList" ] = self . bio_assembly
output_data [ "rFree" ] = self . r_free
output_data [ "rWork" ] = self . r_work
output_data [ "resolution" ] = self . resolution
output_data [ "title" ] = self . title
output_data [ "experimentalMethods" ] = self . experimental_methods
output_data [ "depositionDate" ] = self . deposition_date
output_data [ "releaseDate" ] = self . release_date
output_data [ "unitCell" ] = self . unit_cell
output_data [ "numBonds" ] = self . num_bonds
output_data [ "numChains" ] = self . num_chains
output_data [ "numModels" ] = self . num_models
output_data [ "numAtoms" ] = self . num_atoms
output_data [ "numGroups" ] = self . num_groups
return output_data |
def _sql_expand_update ( self , spec , key_prefix = '' , col_prefix = '' ) :
"""Expand a dict so it fits in a INSERT clause""" | sql = ', ' . join ( col_prefix + key + ' = %(' + key_prefix + key + ')s' for key in spec )
params = { }
for key in spec :
params [ key_prefix + key ] = spec [ key ]
return sql , params |
def validate_tree ( self ) :
"""Validate all nodes in this tree recusively .
Args : None
Returns : boolean indicating if tree is valid""" | self . validate ( )
for child in self . children :
assert child . validate_tree ( )
return True |
def create_dialog ( self ) :
"""Create the dialog .""" | box0 = QGroupBox ( 'Info' )
self . name = FormStr ( )
self . name . setText ( 'sw' )
self . idx_group . activated . connect ( self . update_channels )
form = QFormLayout ( box0 )
form . addRow ( 'Event name' , self . name )
form . addRow ( 'Channel group' , self . idx_group )
form . addRow ( 'Channel(s)' , self . idx_chan )
form . addRow ( 'Cycle(s)' , self . idx_cycle )
form . addRow ( 'Stage(s)' , self . idx_stage )
box1 = QGroupBox ( 'Parameters' )
mbox = QComboBox ( )
method_list = SLOW_WAVE_METHODS
for method in method_list :
mbox . addItem ( method )
self . idx_method = mbox
self . method = mbox . currentText ( )
mbox . currentIndexChanged . connect ( self . update_values )
self . index [ 'f1' ] = FormFloat ( )
self . index [ 'f2' ] = FormFloat ( )
self . index [ 'min_trough_dur' ] = FormFloat ( )
self . index [ 'max_trough_dur' ] = FormFloat ( )
self . index [ 'max_trough_amp' ] = FormFloat ( )
self . index [ 'min_ptp' ] = FormFloat ( )
self . index [ 'min_dur' ] = FormFloat ( )
self . index [ 'max_dur' ] = FormFloat ( )
form = QFormLayout ( box1 )
form . addRow ( 'Method' , mbox )
form . addRow ( 'Lowcut (Hz)' , self . index [ 'f1' ] )
form . addRow ( 'Highcut (Hz)' , self . index [ 'f2' ] )
form . addRow ( 'Min. trough duration (sec)' , self . index [ 'min_trough_dur' ] )
form . addRow ( ' Max. trough duration (sec)' , self . index [ 'max_trough_dur' ] )
form . addRow ( ' Max. trough amplitude (uV)' , self . index [ 'max_trough_amp' ] )
form . addRow ( 'Min. peak-to-peak amplitude (uV)' , self . index [ 'min_ptp' ] )
form . addRow ( 'Min. duration (sec)' , self . index [ 'min_dur' ] )
form . addRow ( ' Max. duration (sec)' , self . index [ 'max_dur' ] )
box3 = QGroupBox ( 'Options' )
self . index [ 'detrend' ] = FormBool ( 'Detrend (linear)' )
self . index [ 'invert' ] = FormBool ( 'Invert detection (down-then-up)' )
self . index [ 'excl_epoch' ] = FormBool ( 'Exclude Poor signal epochs' )
self . index [ 'excl_event' ] = FormMenu ( [ 'none' , 'channel-specific' , 'from any channel' ] )
self . index [ 'min_seg_dur' ] = FormFloat ( 5 )
self . index [ 'excl_epoch' ] . set_value ( True )
self . index [ 'detrend' ] . set_value ( True )
form = QFormLayout ( box3 )
form . addRow ( self . index [ 'excl_epoch' ] )
form . addRow ( 'Exclude Artefact events' , self . index [ 'excl_event' ] )
form . addRow ( 'Minimum subsegment duration' , self . index [ 'min_seg_dur' ] )
form . addRow ( self . index [ 'detrend' ] )
form . addRow ( self . index [ 'invert' ] )
self . bbox . clicked . connect ( self . button_clicked )
btnlayout = QHBoxLayout ( )
btnlayout . addStretch ( 1 )
btnlayout . addWidget ( self . bbox )
vlayout = QVBoxLayout ( )
vlayout . addWidget ( box1 )
vlayout . addWidget ( box3 )
vlayout . addStretch ( 1 )
vlayout . addLayout ( btnlayout )
hlayout = QHBoxLayout ( )
hlayout . addWidget ( box0 )
hlayout . addLayout ( vlayout )
self . update_values ( )
self . setLayout ( hlayout ) |
def set_iprouting ( self , value = None , default = False , disable = False ) :
"""Configures the state of global ip routing
EosVersion :
4.13.7M
Args :
value ( bool ) : True if ip routing should be enabled or False if
ip routing should be disabled
default ( bool ) : Controls the use of the default keyword
disable ( bool ) : Controls the use of the no keyword
Returns :
bool : True if the commands completed successfully otherwise False""" | if value is False :
disable = True
cmd = self . command_builder ( 'ip routing' , value = value , default = default , disable = disable )
return self . configure ( cmd ) |
def _set_static_route_nh ( self , v , load = False ) :
"""Setter method for static _ route _ nh , mapped from YANG variable / rbridge _ id / vrf / address _ family / ipv6 / unicast / ipv6 / route / static _ route _ nh ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ static _ route _ nh is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ static _ route _ nh ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "static_route_dest static_route_next_hop" , static_route_nh . static_route_nh , yang_name = "static-route-nh" , rest_name = "static-route-nh" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'static-route-dest static-route-next-hop' , extensions = { u'tailf-common' : { u'info' : u'Route with nexthop IP address' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-full-no' : None , u'cli-drop-node-name' : None , u'callpoint' : u'Ipv6StaticRouteNh' } } ) , is_container = 'list' , yang_name = "static-route-nh" , rest_name = "static-route-nh" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Route with nexthop IP address' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-full-no' : None , u'cli-drop-node-name' : None , u'callpoint' : u'Ipv6StaticRouteNh' } } , namespace = 'urn:brocade.com:mgmt:brocade-ipv6-rtm' , defining_module = 'brocade-ipv6-rtm' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """static_route_nh must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("static_route_dest static_route_next_hop",static_route_nh.static_route_nh, yang_name="static-route-nh", rest_name="static-route-nh", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='static-route-dest static-route-next-hop', extensions={u'tailf-common': {u'info': u'Route with nexthop IP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'Ipv6StaticRouteNh'}}), is_container='list', yang_name="static-route-nh", rest_name="static-route-nh", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route with nexthop IP address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-drop-node-name': None, u'callpoint': u'Ipv6StaticRouteNh'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='list', is_config=True)""" , } )
self . __static_route_nh = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def update ( self , sha , force = False ) :
"""Update this reference .
: param str sha : ( required ) , sha of the reference
: param bool force : ( optional ) , force the update or not
: returns : bool""" | data = { 'sha' : sha , 'force' : force }
json = self . _json ( self . _patch ( self . _api , data = dumps ( data ) ) , 200 )
if json :
self . _update_ ( json )
return True
return False |
def get_unused_sauce_port ( ) :
"""This returns an unused port among those that Sauce Connect
forwards . Note that this function does * * not * * lock the port !
: returns : A presumably free port that Sauce Connect forwards .
: rtype : int""" | # We exclude 80 , 443 , 888 from the list since they are reserved .
sc_ports = [ 8000 , 8001 , 8003 , 8031 , 8080 , 8081 , 8765 , 8777 , 8888 , 9000 , 9001 , 9080 , 9090 , 9876 , 9877 , 9999 , 49221 , 55001 ]
for candidate in sc_ports :
candidate_str = str ( candidate )
# lsof will report a hit whenever * either * the source * or *
# the destination is using the port we are interested
# in . There does not seem to be a reliable way to tell
# lsof to just get ports used on the source side
# ( localhost ) . We ' d have to get all addresses on which the
# host can listen and list them all . Rather than do this ,
# we check the output to see whether the port is used
# locally .
try :
out = subprocess . check_output ( [ "lsof" , "-n" , "-P" , "-i" , ":" + candidate_str ] )
except subprocess . CalledProcessError as ex : # When lsof returns an empty list , the exit code is 1,
# even if there was no actual error . We handle this
# here .
if ex . returncode != 1 : # returncode other than 1 is a real error . . .
raise
# else we swallow the exception . . .
out = ""
used = False
# Slice in the next line to skip the header line .
for line in out . splitlines ( True ) [ 1 : ] : # Grab the NAME column .
name = line . split ( ) [ 8 ]
# The file is of the form ` ` source - > destination ` ` . We
# only care about the source .
src = name . split ( "->" ) [ 0 ]
if src . endswith ( ":" + candidate_str ) : # We ' ve found that we are actually using the port . . .
used = True
break
if not used :
port = candidate
break
return port |
def run_spy ( group , port , verbose ) :
"""Runs the multicast spy
: param group : Multicast group
: param port : Multicast port
: param verbose : If True , prints more details""" | # Create the socket
socket , group = multicast . create_multicast_socket ( group , port )
print ( "Socket created:" , group , "port:" , port )
# Set the socket as non - blocking
socket . setblocking ( 0 )
# Prepare stats storage
stats = { "total_bytes" : 0 , "total_count" : 0 , "sender_bytes" : { } , "sender_count" : { } , }
print ( "Press Ctrl+C to exit" )
try :
loop_nb = 0
while True :
if loop_nb % 50 == 0 :
loop_nb = 0
print ( "Reading..." )
loop_nb += 1
ready = select . select ( [ socket ] , [ ] , [ ] , .1 )
if ready [ 0 ] : # Socket is ready
data , sender = socket . recvfrom ( 1024 )
len_data = len ( data )
# Store stats
stats [ "total_bytes" ] += len_data
stats [ "total_count" ] += 1
try :
stats [ "sender_bytes" ] [ sender ] += len_data
stats [ "sender_count" ] [ sender ] += 1
except KeyError :
stats [ "sender_bytes" ] [ sender ] = len_data
stats [ "sender_count" ] [ sender ] = 1
print ( "Got" , len_data , "bytes from" , sender [ 0 ] , "port" , sender [ 1 ] , "at" , datetime . datetime . now ( ) )
if verbose :
print ( hexdump ( data ) )
except KeyboardInterrupt : # Interrupt
print ( "Ctrl+C received: bye !" )
# Print statistics
print ( "Total number of packets:" , stats [ "total_count" ] )
print ( "Total read bytes.......:" , stats [ "total_bytes" ] )
for sender in stats [ "sender_count" ] :
print ( "\nSender" , sender [ 0 ] , "from port" , sender [ 1 ] )
print ( "\tTotal packets:" , stats [ "sender_count" ] [ sender ] )
print ( "\tTotal bytes..:" , stats [ "sender_bytes" ] [ sender ] )
return 0 |
def hpd_credible_interval ( mu_in , post , alpha = 0.9 , tolerance = 1e-3 ) :
'''Returns the minimum and maximum rate values of the HPD
( Highest Posterior Density ) credible interval for a posterior
post defined at the sample values mu _ in . Samples need not be
uniformly spaced and posterior need not be normalized .
Will not return a correct credible interval if the posterior
is multimodal and the correct interval is not contiguous ;
in this case will over - cover by including the whole range from
minimum to maximum mu .''' | if alpha == 1 :
nonzero_samples = mu_in [ post > 0 ]
mu_low = numpy . min ( nonzero_samples )
mu_high = numpy . max ( nonzero_samples )
elif 0 < alpha < 1 : # determine the highest PDF for which the region with
# higher density has sufficient coverage
pthresh = hpd_threshold ( mu_in , post , alpha , tol = tolerance )
samples_over_threshold = mu_in [ post > pthresh ]
mu_low = numpy . min ( samples_over_threshold )
mu_high = numpy . max ( samples_over_threshold )
return mu_low , mu_high |
def random_targets ( gt , nb_classes ) :
"""Take in an array of correct labels and randomly select a different label
for each label in the array . This is typically used to randomly select a
target class in targeted adversarial examples attacks ( i . e . , when the
search algorithm takes in both a source class and target class to compute
the adversarial example ) .
: param gt : the ground truth ( correct ) labels . They can be provided as a
1D vector or 2D array of one - hot encoded labels .
: param nb _ classes : The number of classes for this task . The random class
will be chosen between 0 and nb _ classes such that it
is different from the correct class .
: return : A numpy array holding the randomly - selected target classes
encoded as one - hot labels .""" | # If the ground truth labels are encoded as one - hot , convert to labels .
if len ( gt . shape ) == 2 :
gt = np . argmax ( gt , axis = 1 )
# This vector will hold the randomly selected labels .
result = np . zeros ( gt . shape , dtype = np . int32 )
for class_ind in xrange ( nb_classes ) : # Compute all indices in that class .
in_cl = gt == class_ind
size = np . sum ( in_cl )
# Compute the set of potential targets for this class .
potential_targets = other_classes ( nb_classes , class_ind )
# Draw with replacement random targets among the potential targets .
result [ in_cl ] = np . random . choice ( potential_targets , size = size )
# Encode vector of random labels as one - hot labels .
result = to_categorical ( result , nb_classes )
result = result . astype ( np . int32 )
return result |
async def main ( ) :
"""Main code""" | # Create Client from endpoint string in Duniter format
client = Client ( BMAS_ENDPOINT )
# Get the node summary infos to test the connection
response = await client ( bma . node . summary )
print ( response )
# capture current block to get version and currency and blockstamp
current_block = await client ( bma . blockchain . current )
# prompt entry
uid = input ( "Enter your Unique IDentifier (pseudonym): " )
# prompt hidden user entry
salt = getpass . getpass ( "Enter your passphrase (salt): " )
# prompt hidden user entry
password = getpass . getpass ( "Enter your password: " )
# create our signed identity document
identity = get_identity_document ( current_block , uid , salt , password )
# send the identity document to the node
response = await client ( bma . wot . add , identity . signed_raw ( ) )
if response . status == 200 :
print ( await response . text ( ) )
else :
print ( "Error while publishing identity : {0}" . format ( await response . text ( ) ) )
# Close client aiohttp session
await client . close ( ) |
def create_call ( self , raw_request , ** kwargs ) :
"""create a call object that has endpoints understandable request and response
instances""" | req = self . create_request ( raw_request , ** kwargs )
res = self . create_response ( ** kwargs )
rou = self . create_router ( ** kwargs )
c = self . call_class ( req , res , rou )
return c |
def get ( self , collection , doc_id , ** kwargs ) :
""": param str collection : The name of the collection for the request
: param str doc _ id : ID of the document to be retrieved .
Retrieve document from Solr based on the ID . : :
> > > solr . get ( ' SolrClient _ unittest ' , ' changeme ' )""" | resp , con_inf = self . transport . send_request ( method = 'GET' , endpoint = 'get' , collection = collection , params = { 'id' : doc_id } , ** kwargs )
if 'doc' in resp and resp [ 'doc' ] :
return resp [ 'doc' ]
raise NotFoundError |
def validate_object_id ( object_id ) :
"""It ' s easy to make a mistake entering these , validate the format""" | result = re . match ( OBJECT_ID_RE , str ( object_id ) )
if not result :
print ( "'%s' appears not to be a valid 990 object_id" % object_id )
raise RuntimeError ( OBJECT_ID_MSG )
return object_id |
def all_time ( self ) :
"""Access the all _ time
: returns : twilio . rest . api . v2010 . account . usage . record . all _ time . AllTimeList
: rtype : twilio . rest . api . v2010 . account . usage . record . all _ time . AllTimeList""" | if self . _all_time is None :
self . _all_time = AllTimeList ( self . _version , account_sid = self . _solution [ 'account_sid' ] , )
return self . _all_time |
def set_temp_url_key ( self , key = None ) :
"""Sets the key for the Temporary URL for the account . It should be a key
that is secret to the owner .
If no key is provided , a UUID value will be generated and used . It can
later be obtained by calling get _ temp _ url _ key ( ) .""" | if key is None :
key = uuid . uuid4 ( ) . hex
meta = { "Temp-Url-Key" : key }
self . set_account_metadata ( meta )
self . _cached_temp_url_key = key |
def build ( path = None , repository = None , tag = None , cache = True , rm = True , api_response = False , fileobj = None , dockerfile = None , buildargs = None , network_mode = None , labels = None , cache_from = None , target = None ) :
'''. . versionchanged : : 2018.3.0
If the built image should be tagged , then the repository and tag must
now be passed separately using the ` ` repository ` ` and ` ` tag ` `
arguments , rather than together in the ( now deprecated ) ` ` image ` `
argument .
Builds a docker image from a Dockerfile or a URL
path
Path to directory on the Minion containing a Dockerfile
repository
Optional repository name for the image being built
. . versionadded : : 2018.3.0
tag : latest
Tag name for the image ( required if ` ` repository ` ` is passed )
. . versionadded : : 2018.3.0
image
. . deprecated : : 2018.3.0
Use both ` ` repository ` ` and ` ` tag ` ` instead
cache : True
Set to ` ` False ` ` to force the build process not to use the Docker image
cache , and pull all required intermediate image layers
rm : True
Remove intermediate containers created during build
api _ response : False
If ` ` True ` ` : an ` ` API _ Response ` ` key will be present in the return
data , containing the raw output from the Docker API .
fileobj
Allows for a file - like object containing the contents of the Dockerfile
to be passed in place of a file ` ` path ` ` argument . This argument should
not be used from the CLI , only from other Salt code .
dockerfile
Allows for an alternative Dockerfile to be specified . Path to
alternative Dockefile is relative to the build path for the Docker
container .
. . versionadded : : 2016.11.0
buildargs
A dictionary of build arguments provided to the docker build process .
network _ mode
networking mode ( or name of docker network ) to use when executing RUN commands .
labels
A dictionary of labels to set for the image
cache _ from
list of image names to use a sources of cached layers ( when cache is True )
target
Name of build stage to build for a multi - stage Dockerfile .
* * RETURN DATA * *
A dictionary containing one or more of the following keys :
- ` ` Id ` ` - ID of the newly - built image
- ` ` Time _ Elapsed ` ` - Time in seconds taken to perform the build
- ` ` Intermediate _ Containers ` ` - IDs of containers created during the course
of the build process
* ( Only present if rm = False ) *
- ` ` Images ` ` - A dictionary containing one or more of the following keys :
- ` ` Already _ Pulled ` ` - Layers that that were already present on the
Minion
- ` ` Pulled ` ` - Layers that that were pulled
* ( Only present if the image specified by the " repository " and " tag "
arguments was not present on the Minion , or if cache = False ) *
- ` ` Status ` ` - A string containing a summary of the pull action ( usually a
message saying that an image was downloaded , or that it was up to date ) .
* ( Only present if the image specified by the " repository " and " tag "
arguments was not present on the Minion , or if cache = False ) *
CLI Example :
. . code - block : : bash
salt myminion docker . build / path / to / docker / build / dir
salt myminion docker . build https : / / github . com / myuser / myrepo . git repository = myimage tag = latest
salt myminion docker . build / path / to / docker / build / dir dockerfile = Dockefile . different repository = myimage tag = dev''' | _prep_pull ( )
if repository or tag :
if not repository and tag : # Have to have both or neither
raise SaltInvocationError ( 'If tagging, both a repository and tag are required' )
else :
if not isinstance ( repository , six . string_types ) :
repository = six . text_type ( repository )
if not isinstance ( tag , six . string_types ) :
tag = six . text_type ( tag )
# For the build function in the low - level API , the " tag " refers to the full
# tag ( e . g . myuser / myimage : mytag ) . This is different than in other
# functions , where the repo and tag are passed separately .
image_tag = '{0}:{1}' . format ( repository , tag ) if repository and tag else None
time_started = time . time ( )
response = _client_wrapper ( 'build' , path = path , tag = image_tag , quiet = False , fileobj = fileobj , rm = rm , nocache = not cache , dockerfile = dockerfile , buildargs = buildargs , network_mode = network_mode , labels = labels , cache_from = cache_from , target = target )
ret = { 'Time_Elapsed' : time . time ( ) - time_started }
_clear_context ( )
if not response :
raise CommandExecutionError ( 'Build failed for {0}, no response returned from Docker API' . format ( path ) )
stream_data = [ ]
for line in response :
stream_data . extend ( salt . utils . json . loads ( line , cls = DockerJSONDecoder ) )
errors = [ ]
# Iterate through API response and collect information
for item in stream_data :
try :
item_type = next ( iter ( item ) )
except StopIteration :
continue
if item_type == 'status' :
_pull_status ( ret , item )
if item_type == 'stream' :
_build_status ( ret , item )
elif item_type == 'errorDetail' :
_error_detail ( errors , item )
if 'Id' not in ret : # API returned information , but there was no confirmation of a
# successful build .
msg = 'Build failed for {0}' . format ( path )
log . error ( msg )
log . error ( stream_data )
if errors :
msg += '. Error(s) follow:\n\n{0}' . format ( '\n\n' . join ( errors ) )
raise CommandExecutionError ( msg )
resolved_tag = resolve_tag ( ret [ 'Id' ] , all = True )
if resolved_tag :
ret [ 'Image' ] = resolved_tag
else :
ret [ 'Warning' ] = 'Failed to tag image as {0}' . format ( image_tag )
if api_response :
ret [ 'API_Response' ] = stream_data
if rm :
ret . pop ( 'Intermediate_Containers' , None )
return ret |
def _value_loss ( self , observ , reward , length ) :
"""Compute the loss function for the value baseline .
The value loss is the difference between empirical and approximated returns
over the collected episodes . Returns the loss tensor and a summary strin .
Args :
observ : Sequences of observations .
reward : Sequences of reward .
length : Batch of sequence lengths .
Returns :
Tuple of loss tensor and summary tensor .""" | with tf . name_scope ( 'value_loss' ) :
value = self . _network ( observ , length ) . value
return_ = utility . discounted_return ( reward , length , self . _config . discount )
advantage = return_ - value
value_loss = 0.5 * self . _mask ( advantage ** 2 , length )
summary = tf . summary . merge ( [ tf . summary . histogram ( 'value_loss' , value_loss ) , tf . summary . scalar ( 'avg_value_loss' , tf . reduce_mean ( value_loss ) ) ] )
value_loss = tf . reduce_mean ( value_loss )
return tf . check_numerics ( value_loss , 'value_loss' ) , summary |
def report ( self , meter = None , include_bounded = False , reverse = True ) :
"""Print all parses and their violations in a structured format .""" | ReportStr = ''
if not meter :
from Meter import Meter
meter = Meter . genDefault ( )
if ( hasattr ( self , 'allParses' ) ) :
self . om ( unicode ( self ) )
allparses = self . allParses ( meter = meter , include_bounded = include_bounded )
numallparses = len ( allparses )
# allparses = reversed ( allparses ) if reverse else allparses
for pi , parseList in enumerate ( allparses ) :
line = self . iparse2line ( pi ) . txt
# parseList . sort ( key = lambda P : P . score ( ) )
hdr = "\n\n" + '=' * 30 + '\n[line #' + str ( pi + 1 ) + ' of ' + str ( numallparses ) + ']: ' + line + '\n\n\t'
ftr = '=' * 30 + '\n'
ReportStr += self . om ( hdr + meter . printParses ( parseList , reverse = reverse ) . replace ( '\n' , '\n\t' ) [ : - 1 ] + ftr , conscious = False )
else :
for child in self . children :
if type ( child ) == type ( [ ] ) :
continue
ReportStr += child . report ( )
return ReportStr |
def _types_match ( type1 , type2 ) :
"""Returns False only if it can show that no value of type1
can possibly match type2.
Supports only a limited selection of types .""" | if isinstance ( type1 , six . string_types ) and isinstance ( type2 , six . string_types ) :
type1 = type1 . rstrip ( '?' )
type2 = type2 . rstrip ( '?' )
if type1 != type2 :
return False
return True |
def slowlog_get ( self , length = None ) :
"""Returns the Redis slow queries log .""" | if length is not None :
if not isinstance ( length , int ) :
raise TypeError ( "length must be int or None" )
return self . execute ( b'SLOWLOG' , b'GET' , length )
else :
return self . execute ( b'SLOWLOG' , b'GET' ) |
def _fetch_contribution_info ( self ) :
"""Build the list of information entries for contributions of the event""" | self . contributions = { }
query = ( Contribution . query . with_parent ( self . event ) . options ( joinedload ( 'legacy_mapping' ) , joinedload ( 'timetable_entry' ) . lazyload ( '*' ) ) )
for contribution in query :
if not contribution . start_dt :
continue
cid = ( contribution . legacy_mapping . legacy_contribution_id if contribution . legacy_mapping else contribution . id )
key = '{}t{}' . format ( contribution . event_id , cid )
self . contributions [ key ] = u'{} ({})' . format ( contribution . title , to_unicode ( format_time ( contribution . start_dt ) ) ) |
def estimate ( self , upgrades ) :
"""Estimate the time needed to apply upgrades .
If an upgrades does not specify and estimate it is assumed to be
in the order of 1 second .
: param upgrades : List of upgrades sorted in topological order .""" | val = 0
for u in upgrades :
val += u . estimate ( )
return val |
def gunzip ( gzipfile , template = None , runas = None , options = None ) :
'''Uses the gunzip command to unpack gzip files
template : None
Can be set to ' jinja ' or another supported template engine to render
the command arguments before execution :
. . code - block : : bash
salt ' * ' archive . gunzip template = jinja / tmp / { { grains . id } } . txt . gz
runas : None
The user with which to run the gzip command line
options : None
Pass any additional arguments to gzip
. . versionadded : : 2016.3.4
CLI Example :
. . code - block : : bash
# Create / tmp / sourcefile . txt
salt ' * ' archive . gunzip / tmp / sourcefile . txt . gz
salt ' * ' archive . gunzip / tmp / sourcefile . txt options = ' - - verbose ' ''' | cmd = [ 'gunzip' ]
if options :
cmd . append ( options )
cmd . append ( '{0}' . format ( gzipfile ) )
return __salt__ [ 'cmd.run' ] ( cmd , template = template , runas = runas , python_shell = False ) . splitlines ( ) |
def ensure_sink ( self ) :
"""Ensure the log sink and its pub sub topic exist .""" | topic_info = self . pubsub . ensure_topic ( )
scope , sink_path , sink_info = self . get_sink ( topic_info )
client = self . session . client ( 'logging' , 'v2' , '%s.sinks' % scope )
try :
sink = client . execute_command ( 'get' , { 'sinkName' : sink_path } )
except HttpError as e :
if e . resp . status != 404 :
raise
sink = client . execute_command ( 'create' , sink_info )
else :
delta = delta_resource ( sink , sink_info [ 'body' ] )
if delta :
sink_info [ 'updateMask' ] = ',' . join ( delta )
sink_info [ 'sinkName' ] = sink_path
sink_info . pop ( 'parent' )
sink = client . execute_command ( 'update' , sink_info )
else :
return sink_path
self . pubsub . ensure_iam ( publisher = sink [ 'writerIdentity' ] )
return sink_path |
def _clear_context ( ) :
'''Clear any lxc variables set in _ _ context _ _''' | for var in [ x for x in __context__ if x . startswith ( 'lxc.' ) ] :
log . trace ( 'Clearing __context__[\'%s\']' , var )
__context__ . pop ( var , None ) |
def run_process ( self , analysis , action_name , message = '__nomessagetoken__' ) :
"""Executes an process in the analysis with the given message .
It also handles the start and stop signals in case a process _ id
is given .""" | if action_name == 'connect' :
analysis . on_connect ( self . executable , self . zmq_publish )
while not analysis . zmq_handshake :
yield tornado . gen . sleep ( 0.1 )
log . debug ( 'sending action {}' . format ( action_name ) )
analysis . zmq_send ( { 'signal' : action_name , 'load' : message } )
if action_name == 'disconnected' : # Give kernel time to process disconnected message .
yield tornado . gen . sleep ( 0.1 )
analysis . on_disconnected ( ) |
def client ( self , container ) :
"""Return a client instance that is bound to that container .
: param container : container id
: return : Client object bound to the specified container id
Return a ContainerResponse from container . create""" | self . _client_chk . check ( container )
return ContainerClient ( self . _client , int ( container ) ) |
def is_instance_running ( self , instance_id ) :
"""Check whether the instance is up and running .
: param str instance _ id : instance identifier
: reutrn : True if instance is running , False otherwise""" | items = self . list_instances ( filter = ( 'name eq "%s"' % instance_id ) )
for item in items :
if item [ 'status' ] == 'RUNNING' :
return True
return False |
def _getPOS ( self , token , onlyFirst = True ) :
'''Returns POS of the current token .''' | if onlyFirst :
return token [ ANALYSIS ] [ 0 ] [ POSTAG ]
else :
return [ a [ POSTAG ] for a in token [ ANALYSIS ] ] |
def random ( self , size , n_and , max_in , n = 1 ) :
"""Generates ` n ` random logical networks with given size range , number of AND gates and maximum
input signals for AND gates . Logical networks are saved in the attribute : attr : ` networks ` .
Parameters
n : int
Number of random logical networks to be generated
size : ( int , int )
Minimum and maximum sizes
n _ and : ( int , int )
Minimum and maximum AND gates
max _ in : int
Maximum input signals for AND gates""" | args = [ '-c minsize=%s' % size [ 0 ] , '-c maxsize=%s' % size [ 1 ] , '-c minnand=%s' % n_and [ 0 ] , '-c maxnand=%s' % n_and [ 1 ] , '-c maxin=%s' % max_in ]
encodings = [ 'guess' , 'random' ]
self . networks . reset ( )
clingo = self . __get_clingo__ ( args , encodings )
clingo . conf . solve . models = str ( n )
clingo . conf . solver . seed = str ( randint ( 0 , 32767 ) )
clingo . conf . solver . sign_def = '3'
clingo . ground ( [ ( "base" , [ ] ) ] )
clingo . solve ( on_model = self . __save__ ) |
def _imm_merge_class ( cls , parent ) :
'''_ imm _ merge _ class ( imm _ class , parent ) updates the given immutable class imm _ class to have the
appropriate attributes of its given parent class . The parents should be passed through this
function in method - resolution order .''' | # If this is not an immutable parent , ignore it
if not hasattr ( parent , '_pimms_immutable_data_' ) :
return cls
# otherwise , let ' s look at the data
cdat = cls . _pimms_immutable_data_
pdat = parent . _pimms_immutable_data_
# for params , values , and checks , we add them to cls only if they do not already exist in cls
cparams = cdat [ 'params' ]
cvalues = cdat [ 'values' ]
cconsts = cdat [ 'consts' ]
for ( param , ( dflt , tx_fn , arg_lists , check_fns , deps ) ) in six . iteritems ( pdat [ 'params' ] ) :
if param not in cparams and param not in cvalues :
cparams [ param ] = ( dflt , tx_fn , [ ] , [ ] , [ ] )
for ( value , ( arg_list , calc_fn , deps ) ) in six . iteritems ( pdat [ 'values' ] ) :
if value in cparams :
raise ValueError ( 'cannot convert value into parameter: %s' % value )
if value not in cvalues :
cvalues [ value ] = ( arg_list , calc_fn , [ ] )
if len ( arg_list ) == 0 :
cconsts [ value ] = ( [ ] , [ ] )
cchecks = cdat [ 'checks' ]
for ( check , ( arg_list , check_fn ) ) in six . iteritems ( pdat [ 'checks' ] ) :
if check not in cchecks :
cchecks [ check ] = ( arg_list , check_fn )
# That ' s it for now
return cls |
def draw_group_labels ( self ) :
"""Renders group labels to the figure .""" | for i , label in enumerate ( self . groups ) :
label_x = self . group_label_coords [ "x" ] [ i ]
label_y = self . group_label_coords [ "y" ] [ i ]
label_ha = self . group_label_aligns [ "has" ] [ i ]
label_va = self . group_label_aligns [ "vas" ] [ i ]
color = self . group_label_color [ i ]
self . ax . text ( s = label , x = label_x , y = label_y , ha = label_ha , va = label_va , color = color , fontsize = self . fontsize , family = self . fontfamily , ) |
def get_dependencies ( self ) :
"""Returns the set of data dependencies as producer infos corresponding to data requirements .""" | producer_infos = set ( )
for product_type in self . _dependencies :
producer_infos . update ( self . _get_producer_infos_by_product_type ( product_type ) )
return producer_infos |
def do_child_watch ( self , params ) :
"""\x1b [1mNAME \x1b [0m
child _ watch - Watch a path for child changes
\x1b [1mSYNOPSIS \x1b [0m
child _ watch < path > [ verbose ]
\x1b [1mOPTIONS \x1b [0m
* verbose : prints list of znodes ( default : false )
\x1b [1mEXAMPLES \x1b [0m
# only prints the current number of children
> child _ watch /
# prints num of children along with znodes listing
> child _ watch / true""" | get_child_watcher ( self . _zk , print_func = self . show_output ) . update ( params . path , params . verbose ) |
def present ( name , Name = None , ScheduleExpression = None , EventPattern = None , Description = None , RoleArn = None , State = None , Targets = None , region = None , key = None , keyid = None , profile = None ) :
'''Ensure trail exists .
name
The name of the state definition
Name
Name of the event rule . Defaults to the value of the ' name ' param if
not provided .
ScheduleExpression
The scheduling expression . For example , ` ` cron ( 0 20 * * ? * ) ` ` ,
" rate ( 5 minutes ) "
EventPattern
The event pattern .
Description
A description of the rule
State
Indicates whether the rule is ENABLED or DISABLED .
RoleArn
The Amazon Resource Name ( ARN ) of the IAM role associated with the
rule .
Targets
A list of rresources to be invoked when the rule is triggered .
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string ) that
contains a dict with region , key and keyid .''' | ret = { 'name' : Name , 'result' : True , 'comment' : '' , 'changes' : { } }
Name = Name if Name else name
if isinstance ( Targets , six . string_types ) :
Targets = salt . utils . json . loads ( Targets )
if Targets is None :
Targets = [ ]
r = __salt__ [ 'boto_cloudwatch_event.exists' ] ( Name = Name , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in r :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
return ret
if not r . get ( 'exists' ) :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'CloudWatch event rule {0} is set to be created.' . format ( Name )
ret [ 'result' ] = None
return ret
r = __salt__ [ 'boto_cloudwatch_event.create_or_update' ] ( Name = Name , ScheduleExpression = ScheduleExpression , EventPattern = EventPattern , Description = Description , RoleArn = RoleArn , State = State , region = region , key = key , keyid = keyid , profile = profile )
if not r . get ( 'created' ) :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
return ret
_describe = __salt__ [ 'boto_cloudwatch_event.describe' ] ( Name , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in _describe :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create event rule: {0}.' . format ( _describe [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
ret [ 'changes' ] [ 'old' ] = { 'rule' : None }
ret [ 'changes' ] [ 'new' ] = _describe
ret [ 'comment' ] = 'CloudTrail {0} created.' . format ( Name )
if bool ( Targets ) :
r = __salt__ [ 'boto_cloudwatch_event.put_targets' ] ( Rule = Name , Targets = Targets , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in r :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
ret [ 'changes' ] [ 'new' ] [ 'rule' ] [ 'Targets' ] = Targets
return ret
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'CloudWatch event rule {0} is present.' . format ( Name ) ] )
ret [ 'changes' ] = { }
# trail exists , ensure config matches
_describe = __salt__ [ 'boto_cloudwatch_event.describe' ] ( Name = Name , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in _describe :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update event rule: {0}.' . format ( _describe [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
_describe = _describe . get ( 'rule' )
r = __salt__ [ 'boto_cloudwatch_event.list_targets' ] ( Rule = Name , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in r :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
_describe [ 'Targets' ] = r . get ( 'targets' , [ ] )
need_update = False
rule_vars = { 'ScheduleExpression' : 'ScheduleExpression' , 'EventPattern' : 'EventPattern' , 'Description' : 'Description' , 'RoleArn' : 'RoleArn' , 'State' : 'State' , 'Targets' : 'Targets' }
for invar , outvar in six . iteritems ( rule_vars ) :
if _describe [ outvar ] != locals ( ) [ invar ] :
need_update = True
ret [ 'changes' ] . setdefault ( 'new' , { } ) [ invar ] = locals ( ) [ invar ]
ret [ 'changes' ] . setdefault ( 'old' , { } ) [ invar ] = _describe [ outvar ]
if need_update :
if __opts__ [ 'test' ] :
msg = 'CloudWatch event rule {0} set to be modified.' . format ( Name )
ret [ 'comment' ] = msg
ret [ 'result' ] = None
return ret
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'CloudWatch event rule to be modified' ] )
r = __salt__ [ 'boto_cloudwatch_event.create_or_update' ] ( Name = Name , ScheduleExpression = ScheduleExpression , EventPattern = EventPattern , Description = Description , RoleArn = RoleArn , State = State , region = region , key = key , keyid = keyid , profile = profile )
if not r . get ( 'created' ) :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
if _describe [ 'Targets' ] != Targets :
removes = [ i . get ( 'Id' ) for i in _describe [ 'Targets' ] ]
log . error ( Targets )
if bool ( Targets ) :
for target in Targets :
tid = target . get ( 'Id' , None )
if tid is not None and tid in removes :
ix = removes . index ( tid )
removes . pop ( ix )
r = __salt__ [ 'boto_cloudwatch_event.put_targets' ] ( Rule = Name , Targets = Targets , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in r :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
if bool ( removes ) :
r = __salt__ [ 'boto_cloudwatch_event.remove_targets' ] ( Rule = Name , Ids = removes , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in r :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update event rule: {0}.' . format ( r [ 'error' ] [ 'message' ] )
ret [ 'changes' ] = { }
return ret
return ret |
def isAuthorized ( self , request ) :
"""Is the user authorized for the requested action with this event ?""" | restrictions = self . get_view_restrictions ( )
if restrictions and request is None :
return False
else :
return all ( restriction . accept_request ( request ) for restriction in restrictions ) |
def parse_commit_log ( name , content , releases , get_head_fn ) :
"""Parses the given commit log
: param name : str , package name
: param content : list , directory paths
: param releases : list , releases
: param get _ head _ fn : function
: return : dict , changelog""" | log = ""
raw_log = ""
for path , _ in content :
log += "\n" . join ( changelog ( repository = GitRepos ( path ) , tag_filter_regexp = r"v?\d+\.\d+(\.\d+)?" ) )
raw_log += "\n" + subprocess . check_output ( [ "git" , "-C" , path , "--no-pager" , "log" , "--decorate" ] ) . decode ( "utf-8" )
shutil . rmtree ( path )
log = parse ( name , log , releases , get_head_fn )
return log , raw_log |
def mandelbrot ( x , y , params ) :
"""Computes the number of iterations of the given plane - space coordinates .
: param x : X coordinate on the plane .
: param y : Y coordinate on the plane .
: param params : Current application parameters .
: type params : params . Params
: return : Discrete number of iterations .""" | mb_x , mb_y = get_coords ( x , y , params )
mb = mandelbrot_iterate ( mb_x + 1j * mb_y , params . max_iterations , params . julia_seed )
return mb [ 1 ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.