signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def report_unknown ( bytes_so_far , total_size , speed , eta ) :
'''This callback for the download function is used
when the total size is unknown''' | sys . stdout . write ( "Downloading: {0} / Unknown - {1}/s " . format ( approximate_size ( bytes_so_far ) , approximate_size ( speed ) ) )
sys . stdout . write ( "\r" )
sys . stdout . flush ( ) |
def _acl_changes ( name , id = None , type = None , rules = None , consul_url = None , token = None ) :
'''return True if the acl need to be update , False if it doesn ' t need to be update''' | info = __salt__ [ 'consul.acl_info' ] ( id = id , token = token , consul_url = consul_url )
if info [ 'res' ] and info [ 'data' ] [ 0 ] [ 'Name' ] != name :
return True
elif info [ 'res' ] and info [ 'data' ] [ 0 ] [ 'Rules' ] != rules :
return True
elif info [ 'res' ] and info [ 'data' ] [ 0 ] [ 'Type' ] != type :
return True
else :
return False |
def safe_print ( text , file = sys . stdout , flush = False ) :
"""Prints a ( unicode ) string to the console , encoded depending on
the stdout / file encoding ( eg . cp437 on Windows ) . This is to avoid
encoding errors in case of funky path names .
Works with Python 2 and 3.""" | if not isinstance ( text , basestring ) :
return print ( text , file = file )
try :
file . write ( text )
except UnicodeEncodeError :
bytes_string = text . encode ( file . encoding , 'backslashreplace' )
if hasattr ( file , 'buffer' ) :
file . buffer . write ( bytes_string )
else :
text = bytes_string . decode ( file . encoding , 'strict' )
file . write ( text )
file . write ( "\n" ) |
def delay ( self , seconds = 0 , minutes = 0 ) :
"""Parameters
seconds : float
The number of seconds to freeze in place .""" | minutes += int ( seconds / 60 )
seconds = seconds % 60
seconds += float ( minutes * 60 )
self . robot . pause ( )
if not self . robot . is_simulating ( ) :
_sleep ( seconds )
self . robot . resume ( )
return self |
def _data_as_matrix ( self , X_keys , y_key = None , alias = None , legend = None , match_only = None , field = None , field_function = None , legend_field = None , table = None , basis = None , step = None , window_length = None , window_step = 1 , uwis = None , include_basis = False , include_index = False , include = None , complete_only = False , ) :
"""Make X .""" | alias = alias or self . alias
if include is not None :
include = np . array ( include )
if window_length is None :
window_length = 1
# Seed with known size .
cols = window_length * len ( X_keys )
cols += sum ( [ include_basis , include_index ] )
def get_cols ( q ) :
if q is None :
return 0
a = np . array ( q )
try :
s = a . shape [ 0 ]
except IndexError :
s = 1
return s
cols += get_cols ( include )
X = np . zeros ( cols )
y = np . zeros ( 1 )
# Build up the data .
for i , w in enumerate ( self . get_wells ( uwis ) ) :
print ( w . uwi , end = ' ' )
if not w . is_complete ( X_keys , alias ) :
continue
_X , z = w . data_as_matrix ( X_keys , basis = basis , step = step , window_length = window_length , window_step = window_step , return_basis = True , alias = alias )
if include is not None :
try :
if np . ndim ( include ) == 0 :
x = include * np . ones_like ( z )
_X = np . hstack ( [ np . expand_dims ( x , 1 ) , _X ] )
elif np . ndim ( include ) == 1 :
for c in include :
x = c * np . ones_like ( z )
_X = np . hstack ( [ np . expand_dims ( x , 1 ) , _X ] )
elif np . ndim ( include ) == 2 :
for c in include :
x = c [ i ] * np . ones_like ( z )
_X = np . hstack ( [ np . expand_dims ( x , 1 ) , _X ] )
else :
raise IndexError ( 'Too many dimensions in include.' )
except :
raise WellError ( 'Problem braodcasting include into X matrix.' )
if include_basis :
_X = np . hstack ( [ np . expand_dims ( z , 1 ) , _X ] )
if include_index :
index = i * np . ones_like ( z )
_X = np . hstack ( [ np . expand_dims ( index , 1 ) , _X ] )
X = np . vstack ( [ X , _X ] )
print ( _X . shape [ 0 ] )
if y_key is None :
continue
y_key = w . get_mnemonic ( y_key , alias = alias )
if y_key is None :
continue
try :
_y = w . data [ y_key ] . to_basis ( basis = z )
except :
_y = w . data [ y_key ] . to_log ( basis = z , legend = legend , match_only = match_only , field = field , field_function = field_function , table = table , legend_field = legend_field , )
y = np . hstack ( [ y , _y ] )
# Get rid of the ' seed ' .
X = X [ 1 : ]
if y_key is None :
y = None
else :
y = y [ 1 : ]
return X , y |
def posthoc_quade ( a , y_col = None , block_col = None , group_col = None , dist = 't' , melted = False , sort = False , p_adjust = None ) :
'''Calculate pairwise comparisons using Quade ' s post hoc test for
unreplicated blocked data . This test is usually conducted if significant
results were obtained by the omnibus test [ 1 ] _ , [ 2 ] _ , [ 3 ] _ .
Parameters
a : array _ like or pandas DataFrame object
An array , any object exposing the array interface or a pandas
DataFrame .
If ` melted ` is set to False ( default ) , ` a ` is a typical matrix of
block design , i . e . rows are blocks , and columns are groups . In this
case you do not need to specify col arguments .
If ` a ` is an array and ` melted ` is set to True ,
y _ col , block _ col and group _ col must specify the indices of columns
containing elements of correspondary type .
If ` a ` is a Pandas DataFrame and ` melted ` is set to True ,
y _ col , block _ col and group _ col must specify columns names ( string ) .
y _ col : str or int , optional
Must be specified if ` a ` is a pandas DataFrame object .
Name of the column that contains y data .
block _ col : str or int
Must be specified if ` a ` is a pandas DataFrame object .
Name of the column that contains blocking factor values .
group _ col : str or int
Must be specified if ` a ` is a pandas DataFrame object .
Name of the column that contains treatment ( group ) factor values .
dist : str , optional
Method for determining p values .
The default distribution is " t " , else " normal " .
melted : bool , optional
Specifies if data are given as melted columns " y " , " blocks " , and
" groups " .
sort : bool , optional
If True , sort data by block and group columns .
p _ adjust : str , optional
Method for adjusting p values .
See statsmodels . sandbox . stats . multicomp for details .
Available methods are :
' bonferroni ' : one - step correction
' sidak ' : one - step correction
' holm - sidak ' : step - down method using Sidak adjustments
' holm ' : step - down method using Bonferroni adjustments
' simes - hochberg ' : step - up method ( independent )
' hommel ' : closed method based on Simes tests ( non - negative )
' fdr _ bh ' : Benjamini / Hochberg ( non - negative )
' fdr _ by ' : Benjamini / Yekutieli ( negative )
' fdr _ tsbh ' : two stage fdr correction ( non - negative )
' fdr _ tsbky ' : two stage fdr correction ( non - negative )
Returns
Pandas DataFrame containing p values .
References
. . [ 1 ] W . J . Conover ( 1999 ) , Practical nonparametric Statistics , 3rd . Edition ,
Wiley .
. . [ 2 ] N . A . Heckert and J . J . Filliben ( 2003 ) . NIST Handbook 148 : Dataplot
Reference Manual , Volume 2 : Let Subcommands and Library Functions .
National Institute of Standards and Technology Handbook Series , June 2003.
. . [ 3 ] D . Quade ( 1979 ) , Using weighted rankings in the analysis of complete
blocks with additive block effects . Journal of the American Statistical
Association , 74 , 680-683.
Examples
> > > x = np . array ( [ [ 31,27,24 ] , [ 31,28,31 ] , [ 45,29,46 ] , [ 21,18,48 ] , [ 42,36,46 ] , [ 32,17,40 ] ] )
> > > sp . posthoc _ quade ( x )''' | if melted and not all ( [ block_col , group_col , y_col ] ) :
raise ValueError ( 'block_col, group_col, y_col should be explicitly specified if using melted data' )
def compare_stats_t ( i , j ) :
dif = np . abs ( S [ groups [ i ] ] - S [ groups [ j ] ] )
tval = dif / denom
pval = 2. * ss . t . sf ( np . abs ( tval ) , df = ( b - 1 ) * ( k - 1 ) )
return pval
def compare_stats_norm ( i , j ) :
dif = np . abs ( W [ groups [ i ] ] * ff - W [ groups [ j ] ] * ff )
zval = dif / denom
pval = 2. * ss . norm . sf ( np . abs ( zval ) )
return pval
x , y_col , group_col , block_col = __convert_to_block_df ( a , y_col , group_col , block_col , melted )
if not sort :
x [ group_col ] = Categorical ( x [ group_col ] , categories = x [ group_col ] . unique ( ) , ordered = True )
x [ block_col ] = Categorical ( x [ block_col ] , categories = x [ block_col ] . unique ( ) , ordered = True )
x . sort_values ( by = [ block_col , group_col ] , ascending = True , inplace = True )
x . dropna ( inplace = True )
groups = x [ group_col ] . unique ( )
k = len ( groups )
b = x [ block_col ] . unique ( ) . size
x [ 'r' ] = x . groupby ( block_col ) [ y_col ] . rank ( )
q = ( x . groupby ( block_col ) [ y_col ] . max ( ) - x . groupby ( block_col ) [ y_col ] . min ( ) ) . rank ( )
x [ 'rr' ] = x [ 'r' ] - ( k + 1 ) / 2
x [ 's' ] = x . apply ( lambda x , y : x [ 'rr' ] * y [ x [ 'blocks' ] ] , axis = 1 , args = ( q , ) )
x [ 'w' ] = x . apply ( lambda x , y : x [ 'r' ] * y [ x [ 'blocks' ] ] , axis = 1 , args = ( q , ) )
A = ( x [ 's' ] ** 2 ) . sum ( )
S = x . groupby ( group_col ) [ 's' ] . sum ( )
B = np . sum ( S ** 2 ) / b
W = x . groupby ( group_col ) [ 'w' ] . sum ( )
vs = np . zeros ( ( k , k ) , dtype = np . float )
combs = it . combinations ( range ( k ) , 2 )
tri_upper = np . triu_indices ( vs . shape [ 0 ] , 1 )
tri_lower = np . tril_indices ( vs . shape [ 0 ] , - 1 )
vs [ : , : ] = 0
if dist == 't' :
denom = np . sqrt ( ( 2 * b * ( A - B ) ) / ( ( b - 1 ) * ( k - 1 ) ) )
for i , j in combs :
vs [ i , j ] = compare_stats_t ( i , j )
else :
n = b * k
denom = np . sqrt ( ( k * ( k + 1 ) * ( 2 * n + 1 ) * ( k - 1 ) ) / ( 18 * n * ( n + 1 ) ) )
ff = 1. / ( b * ( b + 1 ) / 2 )
for i , j in combs :
vs [ i , j ] = compare_stats_norm ( i , j )
if p_adjust :
vs [ tri_upper ] = multipletests ( vs [ tri_upper ] , method = p_adjust ) [ 1 ]
vs [ tri_lower ] = vs . T [ tri_lower ]
np . fill_diagonal ( vs , - 1 )
return DataFrame ( vs , index = groups , columns = groups ) |
def tocimxml ( self ) :
"""Return the CIM - XML representation of this CIM qualifier type ,
as an object of an appropriate subclass of : term : ` Element ` .
The returned CIM - XML representation is a ` QUALIFIER . DECLARATION `
element consistent with : term : ` DSP0201 ` .
Returns :
The CIM - XML representation , as an object of an appropriate subclass
of : term : ` Element ` .""" | if self . value is None :
value_xml = None
elif isinstance ( self . value , ( tuple , list ) ) :
array_xml = [ ]
for v in self . value :
if v is None :
if SEND_VALUE_NULL :
array_xml . append ( cim_xml . VALUE_NULL ( ) )
else :
array_xml . append ( cim_xml . VALUE ( None ) )
else :
array_xml . append ( cim_xml . VALUE ( atomic_to_cim_xml ( v ) ) )
value_xml = cim_xml . VALUE_ARRAY ( array_xml )
else :
value_xml = cim_xml . VALUE ( atomic_to_cim_xml ( self . value ) )
return cim_xml . QUALIFIER_DECLARATION ( self . name , self . type , value_xml , is_array = self . is_array , array_size = self . array_size , qualifier_scopes = self . scopes , overridable = self . overridable , tosubclass = self . tosubclass , toinstance = self . toinstance , translatable = self . translatable ) |
def parse_tags ( content , reference_id = None , canonicalize = True ) :
"""Returns the TAGS of a cable .
Acc . to the U . S . SD every cable needs at least one tag .
` content `
The content of the cable .
` reference _ id `
The reference identifier of the cable .
` canonicalize `
Indicates if duplicates should be removed and malformed
TAGs like " ECONEFIN " should be corrected ( becomes " ECON " , " EFIN " ) .
` ` False ` ` indicates that the TAGs should be returned as found in
cable .""" | max_idx = _MAX_HEADER_IDX
m = _SUBJECT_MAX_PATTERN . search ( content )
if m :
max_idx = m . start ( )
m = _SUBJECT_PATTERN . search ( content , 0 , max_idx )
if m :
max_idx = min ( max_idx , m . start ( ) )
m = _TAGS_PATTERN . search ( content , 0 , max_idx )
if not m :
if reference_id not in _CABLES_WITHOUT_TAGS :
logger . debug ( 'No TAGS found in cable ID "%r", content: "%s"' % ( reference_id , content ) )
return [ ]
tags = _TAGS_CLEANUP_PATTERN . sub ( u' ' , m . group ( 1 ) )
min_idx = m . end ( )
if tags . endswith ( ',' ) or tags . endswith ( ', ' ) or _TAGS_CONT_NEXT_LINE_PATTERN . match ( content , min_idx , max_idx ) :
m2 = _TAGS_CONT_PATTERN . match ( content , m . end ( ) , max_idx )
if m2 :
tags = re . sub ( ur'\s+' , u' ' , u' ' . join ( [ tags , _TAGS_CLEANUP_PATTERN . sub ( u' ' , m2 . group ( 1 ) ) ] ) )
res = [ ]
if not canonicalize :
return [ u'' . join ( tag ) . upper ( ) for tag in _TAG_PATTERN . findall ( tags ) if tag ]
for t in _TAG_PATTERN . findall ( tags ) :
tag = u'' . join ( t ) . upper ( ) . replace ( u')' , u'' ) . replace ( u'(' , u'' )
if tag == u'SIPDIS' : # Found in 05OTTAWA3726 and 05OTTAWA3709 . I think it ' s an error
continue
for tag in _TAG_FIXES . get ( tag , ( tag , ) ) :
if tag == u'ECONSOCIXR' : # 08BRASILIA1504
for tag in _TAG_FIXES [ tag ] :
if not tag in res :
res . append ( tag )
continue
if not tag in res :
res . append ( tag )
return res |
def stream_file ( self , url , folder = None , filename = None , overwrite = False ) : # type : ( str , Optional [ str ] , Optional [ str ] , bool ) - > str
"""Stream file from url and store in provided folder or temporary folder if no folder supplied .
Must call setup method first .
Args :
url ( str ) : URL to download
filename ( Optional [ str ] ) : Filename to use for downloaded file . Defaults to None ( derive from the url ) .
folder ( Optional [ str ] ) : Folder to download it to . Defaults to None ( temporary folder ) .
overwrite ( bool ) : Whether to overwrite existing file . Defaults to False .
Returns :
str : Path of downloaded file""" | path = self . get_path_for_url ( url , folder , filename , overwrite )
f = None
try :
f = open ( path , 'wb' )
for chunk in self . response . iter_content ( chunk_size = 10240 ) :
if chunk : # filter out keep - alive new chunks
f . write ( chunk )
f . flush ( )
return f . name
except Exception as e :
raisefrom ( DownloadError , 'Download of %s failed in retrieval of stream!' % url , e )
finally :
if f :
f . close ( ) |
def compose ( * coros ) :
"""Creates a coroutine function based on the composition of the passed
coroutine functions .
Each function consumes the yielded result of the coroutine that follows .
Composing coroutine functions f ( ) , g ( ) , and h ( ) would produce
the result of f ( g ( h ( ) ) ) .
Arguments :
* coros ( coroutinefunction ) : variadic coroutine functions to compose .
Raises :
RuntimeError : if cannot execute a coroutine function .
Returns :
coroutinefunction
Usage : :
async def sum _ 1 ( num ) :
return num + 1
async def mul _ 2 ( num ) :
return num * 2
coro = paco . compose ( sum _ 1 , mul _ 2 , sum _ 1)
await coro ( 2)""" | # Make list to inherit built - in type methods
coros = list ( coros )
@ asyncio . coroutine
def reducer ( acc , coro ) :
return ( yield from coro ( acc ) )
@ asyncio . coroutine
def wrapper ( acc ) :
return ( yield from reduce ( reducer , coros , initializer = acc , right = True ) )
return wrapper |
def cancel_per_farm_db_replication ( ) :
"""Cancel replication of the per - farm databases from the local server to the
cloud server .""" | cloud_url = config [ "cloud_server" ] [ "url" ]
local_url = config [ "local_server" ] [ "url" ]
server = Server ( local_url )
for db_name in per_farm_dbs :
server . cancel_replication ( db_name ) |
def drawDisplay ( self , painter , option , rect , text ) :
"""Overloads the drawDisplay method to render HTML if the rich text \
information is set to true .
: param painter | < QtGui . QPainter >
option | < QtGui . QStyleOptionItem >
rect | < QtCore . QRect >
text | < str >""" | if self . showRichText ( ) : # create the document
doc = QtGui . QTextDocument ( )
doc . setTextWidth ( float ( rect . width ( ) ) )
doc . setHtml ( text )
# draw the contents
painter . translate ( rect . x ( ) , rect . y ( ) )
doc . drawContents ( painter , QtCore . QRectF ( 0 , 0 , float ( rect . width ( ) ) , float ( rect . height ( ) ) ) )
painter . translate ( - rect . x ( ) , - rect . y ( ) )
else :
if type ( text ) . __name__ not in ( 'str' , 'unicode' , 'QString' ) :
text = nativestring ( text )
metrics = QtGui . QFontMetrics ( option . font )
text = metrics . elidedText ( text , QtCore . Qt . TextElideMode ( option . textElideMode ) , rect . width ( ) )
painter . setFont ( option . font )
painter . drawText ( rect , int ( option . displayAlignment ) , text ) |
def expected_information_gain ( self , expparams ) :
r"""Calculates the expected information gain for each hypothetical experiment .
: param expparams : The experiments at which to compute expected
information gain .
: type expparams : : class : ` ~ numpy . ndarray ` of dtype given by the current
model ' s : attr : ` ~ qinfer . abstract _ model . Simulatable . expparams _ dtype ` property ,
and of shape ` ` ( n , ) ` `
: return float : The expected information gain for each
hypothetical experiment in ` ` expparams ` ` .""" | # This is a special case of the KL divergence estimator ( see below ) ,
# in which the other distribution is guaranteed to share support .
# for models whose outcome number changes with experiment , we
# take the easy way out and for - loop over experiments
n_eps = expparams . size
if n_eps > 1 and not self . model . is_n_outcomes_constant :
risk = np . empty ( n_eps )
for idx in range ( n_eps ) :
risk [ idx ] = self . expected_information_gain ( expparams [ idx , np . newaxis ] )
return risk
# number of outcomes for the first experiment
os = self . model . domain ( expparams [ 0 , np . newaxis ] ) [ 0 ] . values
# compute the hypothetical weights , likelihoods and normalizations for
# every possible outcome and expparam
# the likelihood over outcomes should sum to 1 , so don ' t compute for last outcome
w_hyp , L , N = self . hypothetical_update ( os [ : - 1 ] , expparams , return_normalization = True , return_likelihood = True )
w_hyp_last_outcome = ( 1 - L . sum ( axis = 0 ) ) * self . particle_weights [ np . newaxis , : ]
N = np . concatenate ( [ N [ : , : , 0 ] , np . sum ( w_hyp_last_outcome [ np . newaxis , : , : ] , axis = 2 ) ] , axis = 0 )
w_hyp_last_outcome = w_hyp_last_outcome / N [ - 1 , : , np . newaxis ]
w_hyp = np . concatenate ( [ w_hyp , w_hyp_last_outcome [ np . newaxis , : , : ] ] , axis = 0 )
# w _ hyp . shape = = ( n _ out , n _ eps , n _ particles )
# N . shape = = ( n _ out , n _ eps )
# compute the Kullback - Liebler divergence for every experiment and possible outcome
# KLD . shape = = ( n _ out , n _ eps )
KLD = np . sum ( w_hyp * np . log ( w_hyp / self . particle_weights ) , axis = 2 )
# return the expected KLD ( ie expected info gain ) for every experiment
return np . sum ( N * KLD , axis = 0 ) |
def _get_directory_path ( context ) :
"""Get the storage path fro the output .""" | path = os . path . join ( settings . BASE_PATH , 'store' )
path = context . params . get ( 'path' , path )
path = os . path . join ( path , context . crawler . name )
path = os . path . abspath ( os . path . expandvars ( path ) )
try :
os . makedirs ( path )
except Exception :
pass
return path |
def _get_cur_remotes ( path ) :
"""Retrieve remote references defined in the CWL .""" | cur_remotes = set ( [ ] )
if isinstance ( path , ( list , tuple ) ) :
for v in path :
cur_remotes |= _get_cur_remotes ( v )
elif isinstance ( path , dict ) :
for v in path . values ( ) :
cur_remotes |= _get_cur_remotes ( v )
elif path and isinstance ( path , six . string_types ) :
if path . startswith ( tuple ( INTEGRATION_MAP . keys ( ) ) ) :
cur_remotes . add ( INTEGRATION_MAP . get ( path . split ( ":" ) [ 0 ] + ":" ) )
return cur_remotes |
def set_logxticks_for_all ( self , row_column_list = None , logticks = None ) :
"""Manually specify the x - axis log tick values .
: param row _ column _ list : a list containing ( row , column ) tuples to
specify the subplots , or None to indicate * all * subplots .
: type row _ column _ list : list or None
: param logticks : logarithm of the locations for the ticks along the
axis .
For example , if you specify [ 1 , 2 , 3 ] , ticks will be placed at 10,
100 and 1000.""" | if row_column_list is None :
self . ticks [ 'x' ] = [ '1e%d' % u for u in logticks ]
else :
for row , column in row_column_list :
self . set_logxticks ( row , column , logticks ) |
def write_pad_codewords ( buff , version , capacity , length ) :
"""Writes the pad codewords iff the data does not fill the capacity of the
symbol .
: param buff : The byte buffer .
: param int version : The ( Micro ) QR Code version .
: param int capacity : The total capacity of the symbol ( incl . error correction )
: param int length : Length of the data bit stream .""" | # ISO / IEC 18004:2015 ( E ) - - 7.4.10 Bit stream to codeword conversion ( page 32)
# The message bit stream shall then be extended to fill the data capacity
# of the symbol corresponding to the Version and Error Correction Level , as
# defined in Table 8 , by adding the Pad Codewords 11101100 and 00010001
# alternately . For Micro QR Code versions M1 and M3 symbols , the final data
# codeword is 4 bits long . The Pad Codeword used in the final data symbol
# character position in Micro QR Code versions M1 and M3 symbols shall be
# represented as 0000.
write = buff . extend
if version in ( consts . VERSION_M1 , consts . VERSION_M3 ) :
write ( [ 0 ] * ( capacity - length ) )
else :
pad_codewords = ( ( 1 , 1 , 1 , 0 , 1 , 1 , 0 , 0 ) , ( 0 , 0 , 0 , 1 , 0 , 0 , 0 , 1 ) )
for i in range ( capacity // 8 - length // 8 ) :
write ( pad_codewords [ i % 2 ] ) |
def HsvToRgb ( h , s , v ) :
'''Convert the color from RGB coordinates to HSV .
Parameters :
The Hus component value [ 0 . . . 1]
The Saturation component value [ 0 . . . 1]
The Value component [ 0 . . . 1]
Returns :
The color as an ( r , g , b ) tuple in the range :
r [ 0 . . . 1 ] ,
g [ 0 . . . 1 ] ,
b [ 0 . . . 1]
> > > Color . HslToRgb ( 30.0 , 1.0 , 0.5)
(1.0 , 0.5 , 0.0)''' | if s == 0 :
return ( v , v , v )
# achromatic ( gray )
h /= 60.0
h = h % 6.0
i = int ( h )
f = h - i
if not ( i & 1 ) :
f = 1 - f
# if i is even
m = v * ( 1.0 - s )
n = v * ( 1.0 - ( s * f ) )
if i == 0 :
return ( v , n , m )
if i == 1 :
return ( n , v , m )
if i == 2 :
return ( m , v , n )
if i == 3 :
return ( m , n , v )
if i == 4 :
return ( n , m , v )
return ( v , m , n ) |
def combine_heads ( self , x ) :
"""Combine tensor that has been split .
Args :
x : A tensor [ batch _ size , num _ heads , length , hidden _ size / num _ heads ]
Returns :
A tensor with shape [ batch _ size , length , hidden _ size ]""" | with tf . name_scope ( "combine_heads" ) :
batch_size = tf . shape ( x ) [ 0 ]
length = tf . shape ( x ) [ 2 ]
x = tf . transpose ( x , [ 0 , 2 , 1 , 3 ] )
# - - > [ batch , length , num _ heads , depth ]
return tf . reshape ( x , [ batch_size , length , self . hidden_size ] ) |
def line_similarity ( p1a , p1b , p2a , p2b , T = CLOSE_DISTANCE_THRESHOLD ) :
"""Similarity between two lines
Args :
p1a ( [ float , float ] ) : x and y coordinates . Line A start
p1b ( [ float , float ] ) : x and y coordinates . Line A end
p2a ( [ float , float ] ) : x and y coordinates . Line B start
p2b ( [ float , float ] ) : x and y coordinates . Line B end
Returns :
float : between 0 and 1 . Where 1 is very similar and 0 is completely different""" | d = line_distance_similarity ( p1a , p1b , p2a , p2b , T = T )
a = abs ( angle_similarity ( normalize ( line ( p1a , p1b ) ) , normalize ( line ( p2a , p2b ) ) ) )
return d * a |
def _to_tonnetz ( chromagram ) :
"""Project a chromagram on the tonnetz .
Returned value is normalized to prevent numerical instabilities .""" | if np . sum ( np . abs ( chromagram ) ) == 0. : # The input is an empty chord , return zero .
return np . zeros ( 6 )
_tonnetz = np . dot ( __TONNETZ_MATRIX , chromagram )
one_norm = np . sum ( np . abs ( _tonnetz ) )
# Non - zero value
_tonnetz = _tonnetz / float ( one_norm )
# Normalize tonnetz vector
return _tonnetz |
def _drop_oldest_chunk ( self ) :
'''To handle the case when the items comming in the chunk
is more than the maximum capacity of the chunk . Our intent
behind is to remove the oldest chunk . So that the items come
flowing in .
> > > s = StreamCounter ( 5,5)
> > > data _ stream = [ ' a ' , ' b ' , ' c ' , ' d ' ]
> > > for item in data _ stream :
. . . s . add ( item )
> > > min ( s . chunked _ counts . keys ( ) )
> > > s . chunked _ counts
{0 : { ' a ' : 1 , ' b ' : 1 , ' c ' : 1 , ' d ' : 1 } }
> > > data _ stream = [ ' a ' , ' b ' , ' c ' , ' d ' , ' a ' , ' e ' , ' f ' ]
> > > for item in data _ stream :
. . . s . add ( item )
> > > min ( s . chunked _ counts . keys ( ) )
> > > s . chunked _ counts
{2 : { ' f ' : 1 } }''' | chunk_id = min ( self . chunked_counts . keys ( ) )
chunk = self . chunked_counts . pop ( chunk_id )
self . n_counts -= len ( chunk )
for k , v in list ( chunk . items ( ) ) :
self . counts [ k ] -= v
self . counts_total -= v |
def write ( self , fptr ) :
"""Write a data entry url box to file .""" | # Make sure it is written out as null - terminated .
url = self . url
if self . url [ - 1 ] != chr ( 0 ) :
url = url + chr ( 0 )
url = url . encode ( )
length = 8 + 1 + 3 + len ( url )
write_buffer = struct . pack ( '>I4sBBBB' , length , b'url ' , self . version , self . flag [ 0 ] , self . flag [ 1 ] , self . flag [ 2 ] )
fptr . write ( write_buffer )
fptr . write ( url ) |
def match ( self , * command_tokens , ** command_env ) :
""": meth : ` . WCommandProto . match ` implementation""" | command = self . command ( )
if len ( command_tokens ) >= len ( command ) :
return command_tokens [ : len ( command ) ] == command
return False |
def error ( self , message = None ) :
"""Delegates to ` ArgumentParser . error `""" | if self . __parser__ : # pylint : disable - msg = E1101
self . __parser__ . error ( message )
# pylint : disable - msg = E1101
else :
self . logger . error ( message )
sys . exit ( 2 ) |
def FilePrinter ( filename , mode = 'a' , closing = True ) :
path = os . path . abspath ( os . path . expanduser ( filename ) )
"""Opens the given file and returns a printer to it .""" | f = open ( path , mode )
return Printer ( f , closing ) |
def advance ( self , word_id : int ) -> 'ConstrainedHypothesis' :
"""Updates the constraints object based on advancing on word _ id .
There is a complication , in that we may have started but not
yet completed a multi - word constraint . We need to allow constraints
to be added as unconstrained words , so if the next word is
invalid , we must " back out " of the current ( incomplete ) phrase ,
re - setting all of its words as unmet .
: param word _ id : The word ID to advance on .
: return : A deep copy of the object , advanced on word _ id .""" | obj = copy . deepcopy ( self )
# First , check if we ' re updating a sequential constraint .
if obj . last_met != - 1 and obj . is_sequence [ obj . last_met ] == 1 :
if word_id == obj . constraints [ obj . last_met + 1 ] : # Here , the word matches what we expect next in the constraint , so we update everything
obj . met [ obj . last_met + 1 ] = True
obj . last_met += 1
else : # Here , the word is not the expected next word of the constraint , so we back out of the constraint .
index = obj . last_met
while obj . is_sequence [ index ] :
obj . met [ index ] = False
index -= 1
obj . last_met = - 1
# If not , check whether we ' re meeting a single - word constraint
else : # Build a list from all constraints of tuples of the
# form ( constraint , whether it ' s a non - initial sequential , whether it ' s been met )
constraint_tuples = list ( zip ( obj . constraints , [ False ] + obj . is_sequence [ : - 1 ] , obj . met ) )
# We are searching for an unmet constraint ( word _ id ) that is not the middle of a phrase and is not met
query = ( word_id , False , False )
try :
pos = constraint_tuples . index ( query )
obj . met [ pos ] = True
obj . last_met = pos
except ValueError : # query not found ; identical but duplicated object will be returned
pass
return obj |
def rename_window ( self , new_name ) :
"""Return : class : ` Window ` object ` ` $ tmux rename - window < new _ name > ` ` .
Parameters
new _ name : str
name of the window""" | import shlex
lex = shlex . shlex ( new_name )
lex . escape = ' '
lex . whitespace_split = False
try :
self . cmd ( 'rename-window' , new_name )
self [ 'window_name' ] = new_name
except Exception as e :
logger . error ( e )
self . server . _update_windows ( )
return self |
def spacetodepth ( attrs , inputs , proto_obj ) :
"""Rearranges blocks of spatial data into depth .""" | new_attrs = translation_utils . _fix_attribute_names ( attrs , { 'blocksize' : 'block_size' } )
return "space_to_depth" , new_attrs , inputs |
def trigger ( self , event : str , * args : T . Any , ** kw : T . Any ) -> bool :
"""Triggers all handlers which are subscribed to an event .
Returns True when there were callbacks to execute , False otherwise .""" | callbacks = list ( self . _events . get ( event , [ ] ) )
if not callbacks :
return False
for callback in callbacks :
callback ( * args , ** kw )
return True |
def format ( self , record ) :
"""Overridden method that applies SGR codes to log messages .""" | # XXX : idea , colorize message arguments
s = super ( ANSIFormatter , self ) . format ( record )
if hasattr ( self . context , 'ansi' ) :
s = self . context . ansi ( s , ** self . get_sgr ( record ) )
return s |
def export ( self ) :
"""returns a dictionary pre - seriasation of the field
: hide :
> > > from pprint import pprint
> > > from reliure . types import Text , Numeric
> > > doc = Doc ( docnum = ' 1 ' )
> > > doc . terms = Text ( multi = True , uniq = True , attrs = { ' tf ' : Numeric ( default = 1 ) } )
> > > doc . terms . add ( ' chat ' )
> > > doc . terms . add ( ' rat ' , tf = 5)
> > > doc . terms . add ( ' chien ' , tf = 2)
> > > pprint ( doc . terms . export ( ) )
{ ' keys ' : { ' chat ' : 0 , ' chien ' : 2 , ' rat ' : 1 } , ' tf ' : [ 1 , 5 , 2 ] }""" | data = { }
data [ "keys" ] = dict ( zip ( self . keys ( ) , range ( len ( self ) ) ) )
# each attr
for name in self . _attrs . keys ( ) :
data [ name ] = self . get_attribute ( name ) . export ( )
return data |
def ssh_accept_sec_context ( self , hostname , recv_token , username = None ) :
"""Accept a GSS - API context ( server mode ) .
: param str hostname : The servers hostname
: param str username : The name of the user who attempts to login
: param str recv _ token : The GSS - API Token received from the server ,
if it ' s not the initial call .
: return : A ` ` String ` ` if the GSS - API has returned a token or ` ` None ` `
if no token was returned""" | # hostname and username are not required for GSSAPI , but for SSPI
self . _gss_host = hostname
self . _username = username
if self . _gss_srv_ctxt is None :
self . _gss_srv_ctxt = gssapi . AcceptContext ( )
token = self . _gss_srv_ctxt . step ( recv_token )
self . _gss_srv_ctxt_status = self . _gss_srv_ctxt . established
return token |
def from_dict ( cls , dictionary ) :
"""Create a MetadataRb instance from a dict .""" | cookbooks = set ( )
# put these in order
groups = [ cookbooks ]
for key , val in dictionary . items ( ) :
if key == 'depends' :
cookbooks . update ( { cls . depends_statement ( cbn , meta ) for cbn , meta in val . items ( ) } )
body = ''
for group in groups :
if group :
body += '\n'
body += '\n' . join ( group )
return cls . from_string ( body ) |
def logging_file_config ( self , config_file ) :
"""Setup logging via the logging module ' s fileConfig function with the
specified ` ` config _ file ` ` , if applicable .
ConfigParser defaults are specified for the special ` ` _ _ file _ _ ` `
and ` ` here ` ` variables , similar to PasteDeploy config loading .""" | parser = ConfigParser . ConfigParser ( )
parser . read ( [ config_file ] )
if parser . has_section ( 'loggers' ) :
config_file = os . path . abspath ( config_file )
fileConfig ( config_file , dict ( __file__ = config_file , here = os . path . dirname ( config_file ) ) ) |
def load_scalars ( filename , strict_type_checks = True ) :
"""Parses a YAML file containing the scalar definition .
: param filename : the YAML file containing the scalars definition .
: raises ParserError : if the scalar file cannot be opened or parsed .""" | # Parse the scalar definitions from the YAML file .
scalars = None
try :
with open ( filename , 'r' ) as f :
scalars = yaml . safe_load ( f )
except IOError as e :
raise ParserError ( 'Error opening ' + filename + ': ' + e . message )
except ValueError as e :
raise ParserError ( 'Error parsing scalars in {}: {}' '.\nSee: {}' . format ( filename , e . message , BASE_DOC_URL ) )
scalar_list = [ ]
# Scalars are defined in a fixed two - level hierarchy within the definition file .
# The first level contains the category name , while the second level contains the
# probe name ( e . g . " category . name : probe : . . . " ) .
for category_name in scalars :
category = scalars [ category_name ]
# Make sure that the category has at least one probe in it .
if not category or len ( category ) == 0 :
raise ParserError ( 'Category "{}" must have at least one probe in it' + '.\nSee: {}' . format ( category_name , BASE_DOC_URL ) )
for probe_name in category : # We found a scalar type . Go ahead and parse it .
scalar_info = category [ probe_name ]
scalar_list . append ( ScalarType ( category_name , probe_name , scalar_info , strict_type_checks ) )
return scalar_list |
def _node_has_namespace_helper ( node : BaseEntity , namespace : str ) -> bool :
"""Check that the node has namespace information .
Might have cross references in future .""" | return namespace == node . get ( NAMESPACE ) |
def _restore_coordinator ( self ) :
"""Do the coordinator - only part of the restore .""" | # Start by ensuring that the speaker is paused as we don ' t want
# things all rolling back when we are changing them , as this could
# include things like audio
transport_info = self . device . get_current_transport_info ( )
if transport_info is not None :
if transport_info [ 'current_transport_state' ] == 'PLAYING' :
self . device . pause ( )
# Check if the queue should be restored
self . _restore_queue ( )
# Reinstate what was playing
if self . is_playing_queue and self . playlist_position > 0 : # was playing from playlist
if self . playlist_position is not None : # The position in the playlist returned by
# get _ current _ track _ info starts at 1 , but when
# playing from playlist , the index starts at 0
# if position > 0:
self . playlist_position -= 1
self . device . play_from_queue ( self . playlist_position , False )
if self . track_position is not None :
if self . track_position != "" :
self . device . seek ( self . track_position )
# reinstate track , position , play mode , cross fade
# Need to make sure there is a proper track selected first
self . device . play_mode = self . play_mode
self . device . cross_fade = self . cross_fade
elif self . is_playing_cloud_queue : # was playing a cloud queue started by Alexa
# No way yet to re - start this so prevent it throwing an error !
pass
else : # was playing a stream ( radio station , file , or nothing )
# reinstate uri and meta data
if self . media_uri != "" :
self . device . play_uri ( self . media_uri , self . media_metadata , start = False ) |
def ekrced ( handle , segno , recno , column , nelts = _SPICE_EK_EKRCEX_ROOM_DEFAULT ) :
"""Read data from a double precision column in a specified EK record .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / ekrced _ c . html
: param handle : Handle attached to EK file .
: type handle : int
: param segno : Index of segment containing record .
: type segno : int
: param recno : Record from which data is to be read .
: type recno : int
: param column : Column name .
: type column : str
: return :
Number of values in column entry ,
Float values in column entry ,
Flag indicating whether column entry is null .
: rtype : tuple""" | handle = ctypes . c_int ( handle )
segno = ctypes . c_int ( segno )
recno = ctypes . c_int ( recno )
column = stypes . stringToCharP ( column )
nvals = ctypes . c_int ( 0 )
dvals = stypes . emptyDoubleVector ( nelts )
isnull = ctypes . c_int ( )
libspice . ekrced_c ( handle , segno , recno , column , ctypes . byref ( nvals ) , dvals , ctypes . byref ( isnull ) )
assert failed ( ) or ( nvals . value <= nelts )
return nvals . value , stypes . cVectorToPython ( dvals ) [ : nvals . value ] , bool ( isnull . value ) |
def Gregory_Scott ( x , rhol , rhog ) :
r'''Calculates void fraction in two - phase flow according to the model of
[1 ] _ as given in [ 2 ] _ and [ 3 ] _ .
. . math : :
\ alpha = \ frac { x } { \ rho _ g } \ left [ C _ 0 \ left ( \ frac { x } { \ rho _ g } + \ frac { 1 - x }
{ \ rho _ l } \ right ) + \ frac { v _ { gm } } { G } \ right ] ^ { - 1}
. . math : :
v _ { gm } = 0
. . math : :
C _ 0 = 1.19
Parameters
x : float
Quality at the specific tube interval [ ]
rhol : float
Density of the liquid [ kg / m ^ 3]
rhog : float
Density of the gas [ kg / m ^ 3]
Returns
alpha : float
Void fraction ( area of gas / total area of channel ) , [ - ]
Notes
Examples
> > > Gregory _ Scott ( 0.4 , 800 . , 2.5)
0.8364154370924108
References
. . [ 1 ] Gregory , G . A . , and D . S . Scott . " Correlation of Liquid Slug
Velocity and Frequency in Horizontal Cocurrent Gas - Liquid Slug Flow . "
AIChE Journal 15 , no . 6 ( November 1 , 1969 ) : 933-35.
doi : 10.1002 / aic . 690150623.
. . [ 2 ] Xu , Yu , and Xiande Fang . " Correlations of Void Fraction for Two -
Phase Refrigerant Flow in Pipes . " Applied Thermal Engineering 64 , no .
1-2 ( March 2014 ) : 242–51 . doi : 10.1016 / j . applthermaleng . 2013.12.032.
. . [ 3 ] Woldesemayat , Melkamu A . , and Afshin J . Ghajar . " Comparison of Void
Fraction Correlations for Different Flow Patterns in Horizontal and
Upward Inclined Pipes . " International Journal of Multiphase Flow 33,
no . 4 ( April 2007 ) : 347-370 . doi : 10.1016 / j . ijmultiphaseflow . 2006.09.004.''' | C0 = 1.19
return x / rhog * ( C0 * ( x / rhog + ( 1 - x ) / rhol ) ) ** - 1 |
def wcs_pix_transform ( ct , i , format = 0 ) :
"""Computes the WCS corrected pixel value given a coordinate
transformation and the raw pixel value .
Input :
ct coordinate transformation . instance of coord _ tran .
i raw pixel intensity .
format format string ( optional ) .
Returns :
WCS corrected pixel value""" | z1 = float ( ct . z1 )
z2 = float ( ct . z2 )
i = float ( i )
yscale = 128.0 / ( z2 - z1 )
if ( format == 'T' or format == 't' ) :
format = 1
if ( i == 0 ) :
t = 0.
else :
if ( ct . zt == W_LINEAR ) :
t = ( ( i - 1 ) * ( z2 - z1 ) / 199.0 ) + z1
t = max ( z1 , min ( z2 , t ) )
else :
t = float ( i )
if ( format > 1 ) :
t = ( z2 - t ) * yscale
return ( t ) |
def encode ( self , data : mx . sym . Symbol , data_length : Optional [ mx . sym . Symbol ] , seq_len : int ) -> Tuple [ mx . sym . Symbol , mx . sym . Symbol , int ] :
"""Encodes data given sequence lengths of individual examples and maximum sequence length .
: param data : Input data .
: param data _ length : Vector with sequence lengths .
: param seq _ len : Maximum sequence length .
: return : Encoded versions of input data ( data , data _ length , seq _ len ) .""" | outputs , _ = self . rnn . unroll ( seq_len , inputs = data , merge_outputs = True , layout = self . layout )
return outputs , data_length , seq_len |
def multi_process ( func , data , num_process = None , verbose = True , ** args ) :
'''Function to use multiprocessing to process pandas Dataframe .
This function applies a function on each row of the input DataFrame by
multiprocessing .
Args :
func ( function ) : The function to apply on each row of the input
Dataframe . The func must accept pandas . Series as the first
positional argument and return a pandas . Series .
data ( pandas . DataFrame ) : A DataFrame to be processed .
num _ process ( int , optional ) : The number of processes to run in
parallel . Defaults to be the number of CPUs of the computer .
verbose ( bool , optional ) : Set to False to disable verbose output .
args ( dict ) : Keyword arguments to pass as keywords arguments to ` func `
return :
A dataframe containing the results''' | # Check arguments value
assert isinstance ( data , pd . DataFrame ) , 'Input data must be a pandas.DataFrame instance'
if num_process is None :
num_process = multiprocessing . cpu_count ( )
# Establish communication queues
tasks = multiprocessing . JoinableQueue ( )
results = multiprocessing . Queue ( )
error_queue = multiprocessing . Queue ( )
start_time = time . time ( )
# Enqueue tasks
num_task = len ( data )
for i in range ( num_task ) :
tasks . put ( data . iloc [ i , : ] )
# Add a poison pill for each consumer
for i in range ( num_process ) :
tasks . put ( None )
logger . info ( 'Create {} processes' . format ( num_process ) )
consumers = [ Consumer ( func , tasks , results , error_queue , ** args ) for i in range ( num_process ) ]
for w in consumers :
w . start ( )
# Add a task tracking process
task_tracker = TaskTracker ( tasks , verbose )
task_tracker . start ( )
# Wait for all input data to be processed
tasks . join ( )
# If there is any error in any process , output the error messages
num_error = error_queue . qsize ( )
if num_error > 0 :
for i in range ( num_error ) :
logger . error ( error_queue . get ( ) )
raise RuntimeError ( 'Multi process jobs failed' )
else : # Collect results
result_table = [ ]
while num_task :
result_table . append ( results . get ( ) )
num_task -= 1
df_results = pd . DataFrame ( result_table )
logger . info ( "Jobs finished in {0:.2f}s" . format ( time . time ( ) - start_time ) )
return df_results |
def iteritems ( d , ** kw ) :
"""Return an iterator over the ( key , value ) pairs of a dictionary .""" | if not PY2 :
return iter ( d . items ( ** kw ) )
return d . iteritems ( ** kw ) |
def composite_qc ( df_orig , size = ( 16 , 12 ) ) :
"""Plot composite QC figures""" | df = df_orig . rename ( columns = { "hli_calc_age_sample_taken" : "Age" , "hli_calc_gender" : "Gender" , "eth7_max" : "Ethnicity" , "MeanCoverage" : "Mean coverage" , "Chemistry" : "Sequencing chemistry" , "Release Client" : "Cohort" , } )
fig = plt . figure ( 1 , size )
ax1 = plt . subplot2grid ( ( 2 , 7 ) , ( 0 , 0 ) , rowspan = 1 , colspan = 2 )
ax2 = plt . subplot2grid ( ( 2 , 7 ) , ( 0 , 2 ) , rowspan = 1 , colspan = 2 )
ax3 = plt . subplot2grid ( ( 2 , 7 ) , ( 0 , 4 ) , rowspan = 1 , colspan = 3 )
ax4 = plt . subplot2grid ( ( 2 , 7 ) , ( 1 , 0 ) , rowspan = 1 , colspan = 2 )
ax5 = plt . subplot2grid ( ( 2 , 7 ) , ( 1 , 2 ) , rowspan = 1 , colspan = 2 )
ax6 = plt . subplot2grid ( ( 2 , 7 ) , ( 1 , 4 ) , rowspan = 1 , colspan = 3 )
sns . distplot ( df [ "Age" ] . dropna ( ) , kde = False , ax = ax1 )
sns . countplot ( x = "Gender" , data = df , ax = ax2 )
sns . countplot ( x = "Ethnicity" , data = df , ax = ax3 , order = df [ 'Ethnicity' ] . value_counts ( ) . index )
sns . distplot ( df [ "Mean coverage" ] . dropna ( ) , kde = False , ax = ax4 )
ax4 . set_xlim ( 0 , 100 )
sns . countplot ( x = "Sequencing chemistry" , data = df , ax = ax5 )
sns . countplot ( x = "Cohort" , data = df , ax = ax6 , order = df [ 'Cohort' ] . value_counts ( ) . index )
# Anonymize the cohorts
cohorts = ax6 . get_xticklabels ( )
newCohorts = [ ]
for i , c in enumerate ( cohorts ) :
if c . get_text ( ) == "Spector" :
c = "TwinsUK"
elif c . get_text ( ) != "Health Nucleus" :
c = "C{}" . format ( i + 1 )
newCohorts . append ( c )
ax6 . set_xticklabels ( newCohorts )
for ax in ( ax6 , ) :
ax . set_xticklabels ( ax . get_xticklabels ( ) , ha = "right" , rotation = 30 )
for ax in ( ax1 , ax2 , ax3 , ax4 , ax5 , ax6 ) :
ax . set_title ( ax . get_xlabel ( ) )
ax . set_xlabel ( "" )
plt . tight_layout ( )
root = fig . add_axes ( ( 0 , 0 , 1 , 1 ) )
labels = ( ( .02 , .96 , "A" ) , ( .3 , .96 , "B" ) , ( .6 , .96 , "C" ) , ( .02 , .52 , "D" ) , ( .3 , .52 , "E" ) , ( .6 , .52 , "F" ) )
panel_labels ( root , labels )
root . set_xlim ( 0 , 1 )
root . set_ylim ( 0 , 1 )
root . set_axis_off ( ) |
def _serialize ( self ) :
"""A helper method to build a dict of all mutable Properties of
this object""" | result = { a : getattr ( self , a ) for a in type ( self ) . properties if type ( self ) . properties [ a ] . mutable }
for k , v in result . items ( ) :
if isinstance ( v , Base ) :
result [ k ] = v . id
return result |
def get_distutils_display_options ( ) :
"""Returns a set of all the distutils display options in their long and
short forms . These are the setup . py arguments such as - - name or - - version
which print the project ' s metadata and then exit .
Returns
opts : set
The long and short form display option arguments , including the - or - -""" | short_display_opts = set ( '-' + o [ 1 ] for o in Distribution . display_options if o [ 1 ] )
long_display_opts = set ( '--' + o [ 0 ] for o in Distribution . display_options )
# Include - h and - - help which are not explicitly listed in
# Distribution . display _ options ( as they are handled by optparse )
short_display_opts . add ( '-h' )
long_display_opts . add ( '--help' )
# This isn ' t the greatest approach to hardcode these commands .
# However , there doesn ' t seem to be a good way to determine
# whether build * will be * run as part of the command at this
# phase .
display_commands = set ( [ 'clean' , 'register' , 'setopt' , 'saveopts' , 'egg_info' , 'alias' ] )
return short_display_opts . union ( long_display_opts . union ( display_commands ) ) |
def message ( message_type , payload , payload_length ) :
"""Build a message .""" | return packet . build ( Container ( type = message_type , id = 1 , refer = 0 , sent = Container ( secs = 0 , usecs = 0 ) , recv = Container ( secs = 0 , usecs = 0 ) , payload_length = payload_length , payload = payload ) ) |
def one_vertical_total_stress ( self , z_c ) :
"""Determine the vertical total stress at a single depth z _ c .
: param z _ c : depth from surface""" | total_stress = 0.0
depths = self . depths
end = 0
for layer_int in range ( 1 , len ( depths ) + 1 ) :
l_index = layer_int - 1
if z_c > depths [ layer_int - 1 ] :
if l_index < len ( depths ) - 1 and z_c > depths [ l_index + 1 ] :
height = depths [ l_index + 1 ] - depths [ l_index ]
bottom_depth = depths [ l_index + 1 ]
else :
end = 1
height = z_c - depths [ l_index ]
bottom_depth = z_c
if bottom_depth <= self . gwl :
total_stress += height * self . layer ( layer_int ) . unit_dry_weight
else :
if self . layer ( layer_int ) . unit_sat_weight is None :
raise AnalysisError ( "Saturated unit weight not defined for layer %i." % layer_int )
sat_height = bottom_depth - max ( self . gwl , depths [ l_index ] )
dry_height = height - sat_height
total_stress += dry_height * self . layer ( layer_int ) . unit_dry_weight + sat_height * self . layer ( layer_int ) . unit_sat_weight
else :
end = 1
if end :
break
return total_stress |
def linesplit ( string , columns ) : # type : ( Union [ Text , FmtStr ] , int ) - > List [ FmtStr ]
"""Returns a list of lines , split on the last possible space of each line .
Split spaces will be removed . Whitespaces will be normalized to one space .
Spaces will be the color of the first whitespace character of the
normalized whitespace .
If a word extends beyond the line , wrap it anyway .
> > > linesplit ( fmtstr ( " home is where the heart - eating mummy is " , ' blue ' ) , 10)
[ blue ( ' home ' ) + blue ( ' ' ) + blue ( ' is ' ) , blue ( ' where ' ) + blue ( ' ' ) + blue ( ' the ' ) , blue ( ' heart - eati ' ) , blue ( ' ng ' ) + blue ( ' ' ) + blue ( ' mummy ' ) , blue ( ' is ' ) ]""" | if not isinstance ( string , FmtStr ) :
string = fmtstr ( string )
string_s = string . s
matches = list ( re . finditer ( r'\s+' , string_s ) )
spaces = [ string [ m . start ( ) : m . end ( ) ] for m in matches if m . start ( ) != 0 and m . end ( ) != len ( string_s ) ]
words = [ string [ start : end ] for start , end in zip ( [ 0 ] + [ m . end ( ) for m in matches ] , [ m . start ( ) for m in matches ] + [ len ( string_s ) ] ) if start != end ]
word_to_lines = lambda word : [ word [ columns * i : columns * ( i + 1 ) ] for i in range ( ( len ( word ) - 1 ) // columns + 1 ) ]
lines = word_to_lines ( words [ 0 ] )
for word , space in zip ( words [ 1 : ] , spaces ) :
if len ( lines [ - 1 ] ) + len ( word ) < columns :
lines [ - 1 ] += fmtstr ( ' ' , ** space . shared_atts )
lines [ - 1 ] += word
else :
lines . extend ( word_to_lines ( word ) )
return lines |
def where_clause ( self ) :
"""convert self . restriction to the SQL WHERE clause""" | cond = self . _make_condition ( self . restriction )
return '' if cond is True else ' WHERE %s' % cond |
def wait_for_boot_completion ( self , timeout = DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND ) :
"""Waits for Android framework to broadcast ACTION _ BOOT _ COMPLETED .
This function times out after 15 minutes .
Args :
timeout : float , the number of seconds to wait before timing out .
If not specified , no timeout takes effect .""" | timeout_start = time . time ( )
self . adb . wait_for_device ( timeout = timeout )
while time . time ( ) < timeout_start + timeout :
try :
if self . is_boot_completed ( ) :
return
except adb . AdbError : # adb shell calls may fail during certain period of booting
# process , which is normal . Ignoring these errors .
pass
time . sleep ( 5 )
raise DeviceError ( self , 'Booting process timed out' ) |
def parse_json ( json_file ) :
"""Parse a whole json record from the given file .
Return None if the json file does not exists or exception occurs .
Args :
json _ file ( str ) : File path to be parsed .
Returns :
A dict of json info .""" | if not os . path . exists ( json_file ) :
return None
try :
with open ( json_file , "r" ) as f :
info_str = f . readlines ( )
info_str = "" . join ( info_str )
json_info = json . loads ( info_str )
return unicode2str ( json_info )
except BaseException as e :
logging . error ( e . message )
return None |
def isvalid ( path , access = None , extensions = None , filetype = None , minsize = None ) :
"""Check whether file meets access , extension , size , and type criteria .""" | return ( ( access is None or os . access ( path , access ) ) and ( extensions is None or checkext ( path , extensions ) ) and ( ( ( filetype == 'all' and os . path . exists ( path ) ) or ( filetype == 'dir' and os . path . isdir ( path ) ) or ( filetype == 'file' and os . path . isfile ( path ) ) ) or filetype is None ) and ( minsize is None or ( not os . path . isfile ( path ) or os . path . getsize ( path ) > minsize ) ) ) |
def pick ( source , keys , * , transform = None ) :
"""Returns a dictionary including only specified keys from a source dictionary .
: source : a dictionary
: keys : a set of keys , or a predicate function that accepting a key
: transform : a function that transforms the values""" | check = keys if callable ( keys ) else lambda key : key in keys
return { key : transform ( source [ key ] ) if transform else source [ key ] for key in source if check ( key ) } |
def hash_key ( self , key ) :
"""" Hash " all keys in a timerange to the same value .""" | for i , destination_key in enumerate ( self . _dict ) :
if key < destination_key :
return destination_key
return key |
def pp_xml ( body ) :
"""Pretty print format some XML so it ' s readable .""" | pretty = xml . dom . minidom . parseString ( body )
return pretty . toprettyxml ( indent = " " ) |
def scan ( self , match = "*" , count = 1000 , cursor = 0 ) :
""": see : : RedisMap . scan""" | cursor , data = self . _client . sscan ( self . key_prefix , cursor = cursor , match = match , count = count )
return ( cursor , set ( map ( self . _loads , data ) ) ) |
def cpp_flag ( compiler ) :
"""Return the - std = c + + [ 0x / 11/14 ] compiler flag .
The c + + 14 is preferred over c + + 0x / 11 ( when it is available ) .""" | standards = [ '-std=c++14' , '-std=c++11' , '-std=c++0x' ]
for standard in standards :
if has_flag ( compiler , [ standard ] ) :
return standard
raise RuntimeError ( 'Unsupported compiler -- at least C++0x support ' 'is needed!' ) |
def ensure_num_chosen_alts_equals_num_obs ( obs_id_col , choice_col , df ) :
"""Checks that the total number of recorded choices equals the total number of
observations . If this is not the case , raise helpful ValueError messages .
Parameters
obs _ id _ col : str .
Denotes the column in ` df ` that contains the observation ID values for
each row .
choice _ col : str .
Denotes the column in ` long _ data ` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise .
df : pandas dataframe .
The dataframe whose choices and observations will be checked .
Returns
None .""" | num_obs = df [ obs_id_col ] . unique ( ) . shape [ 0 ]
num_choices = df [ choice_col ] . sum ( )
if num_choices < num_obs :
msg = "One or more observations have not chosen one "
msg_2 = "of the alternatives available to him/her"
raise ValueError ( msg + msg_2 )
if num_choices > num_obs :
msg = "One or more observations has chosen multiple alternatives"
raise ValueError ( msg )
return None |
def _add_internal_event ( self , name , send_event = False , internal_event_factory = None ) :
"""This is only here to ensure my constant hatred for Python 2 ' s horrid variable argument support .""" | if not internal_event_factory :
internal_event_factory = self . internal_event_factory
return self . add_event ( names , send_event = send_event , event_factory = internal_event_factory ) |
def add_issue_comment ( self , issue_id_or_key , content , extra_request_params = { } ) :
"""client = BacklogClient ( " your _ space _ name " , " your _ api _ key " )
client . add _ issue _ comment ( " YOUR _ PROJECT - 999 " , u " or . . . else e . " )""" | request_params = extra_request_params
request_params [ "content" ] = content
return self . do ( "POST" , "issues/{issue_id_or_key}/comments" , url_params = { "issue_id_or_key" : issue_id_or_key } , request_params = request_params , ) |
def wait_for_settle_all_channels ( raiden : 'RaidenService' , retry_timeout : float , ) -> None :
"""Wait until all channels are settled .
Note :
This does not time out , use gevent . Timeout .""" | chain_state = views . state_from_raiden ( raiden )
id_paymentnetworkstate = chain_state . identifiers_to_paymentnetworks . items ( )
for payment_network_id , payment_network_state in id_paymentnetworkstate :
id_tokennetworkstate = payment_network_state . tokenidentifiers_to_tokennetworks . items ( )
for token_network_id , token_network_state in id_tokennetworkstate :
channel_ids = cast ( List [ ChannelID ] , token_network_state . channelidentifiers_to_channels . keys ( ) , )
wait_for_settle ( raiden = raiden , payment_network_id = payment_network_id , token_address = TokenAddress ( token_network_id ) , channel_ids = channel_ids , retry_timeout = retry_timeout , ) |
def draw_commands ( self , surf ) :
"""Draw the list of available commands .""" | past_abilities = { act . ability for act in self . _past_actions if act . ability }
for y , cmd in enumerate ( sorted ( self . _abilities ( lambda c : c . name != "Smart" ) , key = lambda c : c . name ) , start = 2 ) :
if self . _queued_action and cmd == self . _queued_action :
color = colors . green
elif self . _queued_hotkey and cmd . hotkey . startswith ( self . _queued_hotkey ) :
color = colors . green * 0.75
elif cmd . ability_id in past_abilities :
color = colors . red
else :
color = colors . yellow
hotkey = cmd . hotkey [ 0 : 3 ]
# truncate " escape " - > " esc "
surf . write_screen ( self . _font_large , color , ( 0.2 , y ) , hotkey )
surf . write_screen ( self . _font_large , color , ( 3 , y ) , cmd . name ) |
def features_properties_null_remove ( obj ) :
"""Remove any properties of features in the collection that have
entries mapping to a null ( i . e . , None ) value""" | features = obj [ 'features' ]
for i in tqdm ( range ( len ( features ) ) ) :
if 'properties' in features [ i ] :
properties = features [ i ] [ 'properties' ]
features [ i ] [ 'properties' ] = { p : properties [ p ] for p in properties if properties [ p ] is not None }
return obj |
def cluster_reset ( self , * , hard = False ) :
"""Reset a Redis Cluster node .""" | reset = hard and b'HARD' or b'SOFT'
fut = self . execute ( b'CLUSTER' , b'RESET' , reset )
return wait_ok ( fut ) |
def reduce ( self , func , dim = None , keep_attrs = None , ** kwargs ) :
"""Reduce the items in this group by applying ` func ` along some
dimension ( s ) .
Parameters
func : function
Function which can be called in the form
` func ( x , axis = axis , * * kwargs ) ` to return the result of collapsing
an np . ndarray over an integer valued axis .
dim : str or sequence of str , optional
Dimension ( s ) over which to apply ` func ` .
axis : int or sequence of int , optional
Axis ( es ) over which to apply ` func ` . Only one of the ' dimension '
and ' axis ' arguments can be supplied . If neither are supplied , then
` func ` is calculated over all dimension for each group item .
keep _ attrs : bool , optional
If True , the datasets ' s attributes ( ` attrs ` ) will be copied from
the original object to the new one . If False ( default ) , the new
object will be returned without attributes .
* * kwargs : dict
Additional keyword arguments passed on to ` func ` .
Returns
reduced : Array
Array with summarized data and the indicated dimension ( s )
removed .""" | if dim == DEFAULT_DIMS :
dim = ALL_DIMS
# TODO change this to dim = self . _ group _ dim after
# the deprecation process . Do not forget to remove _ reduce _ method
warnings . warn ( "Default reduction dimension will be changed to the " "grouped dimension in a future version of xarray. To " "silence this warning, pass dim=xarray.ALL_DIMS " "explicitly." , FutureWarning , stacklevel = 2 )
elif dim is None :
dim = self . _group_dim
if keep_attrs is None :
keep_attrs = _get_keep_attrs ( default = False )
def reduce_dataset ( ds ) :
return ds . reduce ( func , dim , keep_attrs , ** kwargs )
return self . apply ( reduce_dataset ) |
def _detailTuples ( self , uriRefs ) :
"""Given a list of uriRefs , return a list of dicts :
{ ' subject ' : s , ' predicate ' : p , ' object ' : o }
all values are strings""" | details = [ ]
for uriRef in uriRefs :
for subject , predicate , object_ in self . _rdfGraph . triples ( ( uriRef , None , None ) ) :
details . append ( { 'subject' : subject . toPython ( ) , 'predicate' : predicate . toPython ( ) , 'object' : object_ . toPython ( ) } )
return details |
def main ( title , authors , year , email , journal = '' , volume = '' , number = '' , pages = '' , publisher = '' , doi = '' , tags = [ ] , DFT_code = 'Quantum ESPRESSO' , DFT_functionals = [ 'BEEF-vdW' ] , reactions = [ { 'reactants' : [ '2.0H2Ogas' , '-1.5H2gas' , 'star' ] , 'products' : [ 'OOHstar@ontop' ] } ] , energy_corrections = { } , bulk_compositions = [ 'Pt' , 'Ag' ] , crystal_structures = [ 'fcc' , 'hcp' ] , facets = [ '111' ] , custom_base = None ) :
"""Automatically generate an organized folder structure for a DFT
calculation .
Start by copying the script to a folder in your username
and assign the right information to the arguments in the function .
You can change the parameters and run the script several times if you ,
for example , are using different functionals or are doing different
reactions on different surfaces .
Remember to include the reaction that gives the adsorption energy of
reaction intermediates , taking gas phase molecules as references
( preferably H20 , H2 , CH4 , CO , NH3 ) .
Parameters
title : str
Publication or working title if not yet published .
authors : list
Author names , e . g . [ ' Doe , John ' , ' Einstein , Albert ' ]
year : str
Year of ( submission ? )
email : str
email address of the person responsible for uploading .
Login at catalysis - hub . org currently only supports @ gmail or
Slack login email addresses .
journal : str
Publications journal name
volume : str
Publications volume number
number : str
Publication number
pages : str
Publication page numbers
publisher : str
Publisher name
doi : str , optional
DOI of publication
tags : list , optional
User defined quire tags
DFT _ code : str
e . g . ' Quantum ESPRESSO '
DFT _ functionals : list of str
Calculator functional used , e . g . ' BEEF - vdW '
reactions : list of dict
A new dictionary is required for each reaction , and should include two
lists , ' reactants ' and ' products ' . Remember to include a minus sign and
prefactor in the name when relevant . If your reaction is not balanced ,
you will receive an error when running the script .
Include the phase if mixing gas phase and surface phase .
e . g . ' star ' for empty site or adsorbed phase , ' gas ' if in gas phase .
Include the adsorption site if relevant .
e . g . star @ top or star @ bridge .
For example , we can write an entry for the adsorption of CH2:
CH4 ( g ) - H2 ( g ) + * - > CH2*
as :
{ ' reactants ' : [ ' CH4gas ' , ' H2gas ' , ' star ' ] ,
' products ' : [ ' CH2star @ bridge ' ] }
A complete entry could read :
reactions = [
{ ' reactants ' : [ ' CH4gas ' , ' - H2gas ' , ' star ' ] ,
' products ' : [ ' CH2star @ bridge ' ] } ,
{ ' reactants ' : [ ' CH4gas ' , ' - 0.5H2gas ' , ' star ' ] ,
' products ' : [ ' CH3star @ top ' ] } ]
energy _ corrections : dict , optional
e . g . { ' H2gas ' : 0.1}
bulk _ compositions : list of str
e . g . [ ' Pt ' , ' Ag ' ]
crystal _ structures : list of str
e . g . [ ' fcc ' , ' hcp ' ]
facets : list
For complicated structures use term you would use in publication .
e . g . [ ' 111 ' ]
custom _ base : str
TODO""" | for reaction in reactions :
check_reaction ( reaction [ 'reactants' ] , reaction [ 'products' ] )
# Set up directories
if custom_base is not None :
base = custom_base + '/'
else :
catbase = os . path . abspath ( os . path . curdir )
base = '%s/%s/' % ( catbase , username )
if not os . path . exists ( base ) :
os . mkdir ( base )
publication_shortname = get_pub_id ( title , authors , year )
publication_base = base + publication_shortname + '/'
if not os . path . exists ( publication_base ) :
os . mkdir ( publication_base )
# save publication info to publications . txt
publication_dict = { 'title' : title , 'authors' : authors , 'journal' : journal , 'volume' : volume , 'number' : number , 'pages' : pages , 'year' : year , 'email' : email , 'publisher' : publisher , 'doi' : doi , 'tags' : tags }
pub_txt = publication_base + 'publication.txt'
with open ( pub_txt , 'w' ) as f :
yaml . dump ( publication_dict , f )
if not len ( energy_corrections . keys ( ) ) == 0 :
energy_txt = publication_base + 'energy_corrections.txt'
with open ( energy_txt , 'w' ) as fe :
yaml . dump ( energy_corrections , fe )
def create ( path ) :
if not os . path . exists ( path ) :
os . mkdir ( path )
return path
base = create ( publication_base + DFT_code + '/' )
bulk_bases = [ ]
gas_bases = [ ]
for DFT_functional in DFT_functionals :
bulk_bases += [ create ( base + DFT_functional + '/' ) ]
gas_bases += [ create ( base + DFT_functional + '/gas/' ) ]
gas_names = [ ]
ads_names = [ ]
for i in range ( len ( reactions ) ) :
rnames = [ r . split ( '@' ) [ 0 ] for r in reactions [ i ] [ 'reactants' ] + reactions [ i ] [ 'products' ] ]
states = [ get_state ( r ) for r in rnames ]
gas_names += [ clear_state ( clear_prefactor ( rnames [ i ] ) ) for i in range ( len ( states ) ) if states [ i ] == 'gas' ]
for gas_base in gas_bases :
for name in set ( gas_names ) :
with open ( gas_base + 'MISSING:{}_gas' . format ( name ) , 'w' ) :
pass
for bulk_base in bulk_bases :
for bulk in bulk_compositions :
for crystal_structure in crystal_structures :
bulk_name = bulk + '_' + crystal_structure
facet_base = create ( bulk_base + bulk_name + '/' )
with open ( facet_base + 'MISSING:{}_bulk' . format ( bulk_name ) , 'w' ) :
pass
for facet in facets :
reaction_base = create ( facet_base + facet + '/' )
with open ( reaction_base + 'MISSING:empty_slab' . format ( bulk_name ) , 'w' ) :
pass
for i in range ( len ( reactions ) ) :
rname = '_' . join ( reactions [ i ] [ 'reactants' ] )
pname = '_' . join ( reactions [ i ] [ 'products' ] )
reaction_name = '__' . join ( [ rname , pname ] )
base = create ( reaction_base + reaction_name + '/' )
rnames = [ r . split ( '@' ) [ 0 ] for r in reactions [ i ] [ 'reactants' ] + reactions [ i ] [ 'products' ] ]
states = [ get_state ( r ) for r in rnames ]
ads_names = [ clear_prefactor ( clear_state ( rnames [ i ] ) ) for i in range ( len ( states ) ) if states [ i ] == 'star' ]
for ads in ads_names :
if ads == '' :
continue
with open ( base + 'MISSING:{}_slab' . format ( ads ) , 'w' ) :
pass
with open ( base + 'MISSING:TS?' . format ( ads ) , 'w' ) :
pass
print ( 'Folders were succesfully created under {}' . format ( publication_base ) ) |
def describe_spot_price_history ( DryRun = None , StartTime = None , EndTime = None , InstanceTypes = None , ProductDescriptions = None , Filters = None , AvailabilityZone = None , MaxResults = None , NextToken = None ) :
"""Describes the Spot price history . For more information , see Spot Instance Pricing History in the Amazon Elastic Compute Cloud User Guide .
When you specify a start and end time , this operation returns the prices of the instance types within the time range that you specified and the time when the price changed . The price is valid within the time period that you specified ; the response merely indicates the last time that the price changed .
See also : AWS API Documentation
Examples
This example returns the Spot Price history for m1 . xlarge , Linux / UNIX ( Amazon VPC ) instances for a particular day in January .
Expected Output :
: example : response = client . describe _ spot _ price _ history (
DryRun = True | False ,
StartTime = datetime ( 2015 , 1 , 1 ) ,
EndTime = datetime ( 2015 , 1 , 1 ) ,
InstanceTypes = [
' t1 . micro ' | ' t2 . nano ' | ' t2 . micro ' | ' t2 . small ' | ' t2 . medium ' | ' t2 . large ' | ' t2 . xlarge ' | ' t2.2xlarge ' | ' m1 . small ' | ' m1 . medium ' | ' m1 . large ' | ' m1 . xlarge ' | ' m3 . medium ' | ' m3 . large ' | ' m3 . xlarge ' | ' m3.2xlarge ' | ' m4 . large ' | ' m4 . xlarge ' | ' m4.2xlarge ' | ' m4.4xlarge ' | ' m4.10xlarge ' | ' m4.16xlarge ' | ' m2 . xlarge ' | ' m2.2xlarge ' | ' m2.4xlarge ' | ' cr1.8xlarge ' | ' r3 . large ' | ' r3 . xlarge ' | ' r3.2xlarge ' | ' r3.4xlarge ' | ' r3.8xlarge ' | ' r4 . large ' | ' r4 . xlarge ' | ' r4.2xlarge ' | ' r4.4xlarge ' | ' r4.8xlarge ' | ' r4.16xlarge ' | ' x1.16xlarge ' | ' x1.32xlarge ' | ' i2 . xlarge ' | ' i2.2xlarge ' | ' i2.4xlarge ' | ' i2.8xlarge ' | ' i3 . large ' | ' i3 . xlarge ' | ' i3.2xlarge ' | ' i3.4xlarge ' | ' i3.8xlarge ' | ' i3.16xlarge ' | ' hi1.4xlarge ' | ' hs1.8xlarge ' | ' c1 . medium ' | ' c1 . xlarge ' | ' c3 . large ' | ' c3 . xlarge ' | ' c3.2xlarge ' | ' c3.4xlarge ' | ' c3.8xlarge ' | ' c4 . large ' | ' c4 . xlarge ' | ' c4.2xlarge ' | ' c4.4xlarge ' | ' c4.8xlarge ' | ' cc1.4xlarge ' | ' cc2.8xlarge ' | ' g2.2xlarge ' | ' g2.8xlarge ' | ' cg1.4xlarge ' | ' p2 . xlarge ' | ' p2.8xlarge ' | ' p2.16xlarge ' | ' d2 . xlarge ' | ' d2.2xlarge ' | ' d2.4xlarge ' | ' d2.8xlarge ' | ' f1.2xlarge ' | ' f1.16xlarge ' ,
ProductDescriptions = [
' string ' ,
Filters = [
' Name ' : ' string ' ,
' Values ' : [
' string ' ,
AvailabilityZone = ' string ' ,
MaxResults = 123,
NextToken = ' string '
: type DryRun : boolean
: param DryRun : Checks whether you have the required permissions for the action , without actually making the request , and provides an error response . If you have the required permissions , the error response is DryRunOperation . Otherwise , it is UnauthorizedOperation .
: type StartTime : datetime
: param StartTime : The date and time , up to the past 90 days , from which to start retrieving the price history data , in UTC format ( for example , YYYY - MM - DD T * HH * : MM : SS Z ) .
: type EndTime : datetime
: param EndTime : The date and time , up to the current date , from which to stop retrieving the price history data , in UTC format ( for example , YYYY - MM - DD T * HH * : MM : SS Z ) .
: type InstanceTypes : list
: param InstanceTypes : Filters the results by the specified instance types . Note that T2 and HS1 instance types are not supported .
( string ) - -
: type ProductDescriptions : list
: param ProductDescriptions : Filters the results by the specified basic product descriptions .
( string ) - -
: type Filters : list
: param Filters : One or more filters .
availability - zone - The Availability Zone for which prices should be returned .
instance - type - The type of instance ( for example , m3 . medium ) .
product - description - The product description for the Spot price ( Linux / UNIX | SUSE Linux | Windows | Linux / UNIX ( Amazon VPC ) | SUSE Linux ( Amazon VPC ) | Windows ( Amazon VPC ) ) .
spot - price - The Spot price . The value must match exactly ( or use wildcards ; greater than or less than comparison is not supported ) .
timestamp - The timestamp of the Spot price history , in UTC format ( for example , YYYY - MM - DD T * HH * : MM : SS Z ) . You can use wildcards ( * and ? ) . Greater than or less than comparison is not supported .
( dict ) - - A filter name and value pair that is used to return a more specific list of results . Filters can be used to match a set of resources by various criteria , such as tags , attributes , or IDs .
Name ( string ) - - The name of the filter . Filter names are case - sensitive .
Values ( list ) - - One or more filter values . Filter values are case - sensitive .
( string ) - -
: type AvailabilityZone : string
: param AvailabilityZone : Filters the results by the specified Availability Zone .
: type MaxResults : integer
: param MaxResults : The maximum number of results to return in a single call . Specify a value between 1 and 1000 . The default value is 1000 . To retrieve the remaining results , make another call with the returned NextToken value .
: type NextToken : string
: param NextToken : The token for the next set of results .
: rtype : dict
: return : {
' SpotPriceHistory ' : [
' InstanceType ' : ' t1 . micro ' | ' t2 . nano ' | ' t2 . micro ' | ' t2 . small ' | ' t2 . medium ' | ' t2 . large ' | ' t2 . xlarge ' | ' t2.2xlarge ' | ' m1 . small ' | ' m1 . medium ' | ' m1 . large ' | ' m1 . xlarge ' | ' m3 . medium ' | ' m3 . large ' | ' m3 . xlarge ' | ' m3.2xlarge ' | ' m4 . large ' | ' m4 . xlarge ' | ' m4.2xlarge ' | ' m4.4xlarge ' | ' m4.10xlarge ' | ' m4.16xlarge ' | ' m2 . xlarge ' | ' m2.2xlarge ' | ' m2.4xlarge ' | ' cr1.8xlarge ' | ' r3 . large ' | ' r3 . xlarge ' | ' r3.2xlarge ' | ' r3.4xlarge ' | ' r3.8xlarge ' | ' r4 . large ' | ' r4 . xlarge ' | ' r4.2xlarge ' | ' r4.4xlarge ' | ' r4.8xlarge ' | ' r4.16xlarge ' | ' x1.16xlarge ' | ' x1.32xlarge ' | ' i2 . xlarge ' | ' i2.2xlarge ' | ' i2.4xlarge ' | ' i2.8xlarge ' | ' i3 . large ' | ' i3 . xlarge ' | ' i3.2xlarge ' | ' i3.4xlarge ' | ' i3.8xlarge ' | ' i3.16xlarge ' | ' hi1.4xlarge ' | ' hs1.8xlarge ' | ' c1 . medium ' | ' c1 . xlarge ' | ' c3 . large ' | ' c3 . xlarge ' | ' c3.2xlarge ' | ' c3.4xlarge ' | ' c3.8xlarge ' | ' c4 . large ' | ' c4 . xlarge ' | ' c4.2xlarge ' | ' c4.4xlarge ' | ' c4.8xlarge ' | ' cc1.4xlarge ' | ' cc2.8xlarge ' | ' g2.2xlarge ' | ' g2.8xlarge ' | ' cg1.4xlarge ' | ' p2 . xlarge ' | ' p2.8xlarge ' | ' p2.16xlarge ' | ' d2 . xlarge ' | ' d2.2xlarge ' | ' d2.4xlarge ' | ' d2.8xlarge ' | ' f1.2xlarge ' | ' f1.16xlarge ' ,
' ProductDescription ' : ' Linux / UNIX ' | ' Linux / UNIX ( Amazon VPC ) ' | ' Windows ' | ' Windows ( Amazon VPC ) ' ,
' SpotPrice ' : ' string ' ,
' Timestamp ' : datetime ( 2015 , 1 , 1 ) ,
' AvailabilityZone ' : ' string '
' NextToken ' : ' string '""" | pass |
def check_coverage ( ) :
"""Checks if the coverage is 100 % .""" | with lcd ( settings . LOCAL_COVERAGE_PATH ) :
total_line = local ( 'grep -n Total index.html' , capture = True )
match = re . search ( r'^(\d+):' , total_line )
total_line_number = int ( match . groups ( ) [ 0 ] )
percentage_line_number = total_line_number + 5
percentage_line = local ( 'awk NR=={0} index.html' . format ( percentage_line_number ) , capture = True )
match = re . search ( r'(\d.+)%' , percentage_line )
try :
percentage = float ( match . groups ( ) [ 0 ] )
except ValueError : # If there ' s no dotting try another search
match = re . search ( r'(\d+)%' , percentage_line )
percentage = float ( match . groups ( ) [ 0 ] )
if percentage < 100 :
abort ( red ( 'Coverage is {0}%' . format ( percentage ) ) )
print ( green ( 'Coverage is {0}%' . format ( percentage ) ) ) |
def bbox_vert_aligned ( box1 , box2 ) :
"""Returns true if the horizontal center point of either span is within the
horizontal range of the other""" | if not ( box1 and box2 ) :
return False
# NEW : any overlap counts
# return box1 . left < = box2 . right and box2 . left < = box1 . right
box1_left = box1 . left + 1.5
box2_left = box2 . left + 1.5
box1_right = box1 . right - 1.5
box2_right = box2 . right - 1.5
return not ( box1_left > box2_right or box2_left > box1_right ) |
def load_neighbour_info ( self , cache_dir , mask = None , ** kwargs ) :
"""Read index arrays from either the in - memory or disk cache .""" | mask_name = getattr ( mask , 'name' , None )
filename = self . _create_cache_filename ( cache_dir , mask = mask_name , ** kwargs )
if kwargs . get ( 'mask' ) in self . _index_caches :
self . _apply_cached_indexes ( self . _index_caches [ kwargs . get ( 'mask' ) ] )
elif cache_dir :
cache = np . load ( filename , mmap_mode = 'r' )
# copy the dict so we can modify it ' s keys
new_cache = dict ( cache . items ( ) )
cache . close ( )
self . _apply_cached_indexes ( new_cache )
# modifies cache dict in - place
self . _index_caches [ mask_name ] = new_cache
else :
raise IOError |
def cmd_iter_no_block ( self , tgt , fun , arg = ( ) , timeout = None , tgt_type = 'glob' , ret = '' , kwarg = None , show_jid = False , verbose = False , ** kwargs ) :
'''Yields the individual minion returns as they come in , or None
when no returns are available .
The function signature is the same as : py : meth : ` cmd ` with the
following exceptions .
: returns : A generator yielding the individual minion returns , or None
when no returns are available . This allows for actions to be
injected in between minion returns .
. . code - block : : python
> > > ret = local . cmd _ iter _ no _ block ( ' * ' , ' test . ping ' )
> > > for i in ret :
. . . print ( i )
None
{ ' jerry ' : { ' ret ' : True } }
{ ' dave ' : { ' ret ' : True } }
None
{ ' stewart ' : { ' ret ' : True } }''' | was_listening = self . event . cpub
try :
pub_data = self . run_job ( tgt , fun , arg , tgt_type , ret , timeout , kwarg = kwarg , listen = True , ** kwargs )
if not pub_data :
yield pub_data
else :
for fn_ret in self . get_iter_returns ( pub_data [ 'jid' ] , pub_data [ 'minions' ] , timeout = timeout , tgt = tgt , tgt_type = tgt_type , block = False , ** kwargs ) :
if fn_ret and any ( [ show_jid , verbose ] ) :
for minion in fn_ret :
fn_ret [ minion ] [ 'jid' ] = pub_data [ 'jid' ]
yield fn_ret
self . _clean_up_subscriptions ( pub_data [ 'jid' ] )
finally :
if not was_listening :
self . event . close_pub ( ) |
def coroutine ( func ) :
"""A decorator to wrap a generator function into a callable interface .
> > > @ coroutine
. . . def sum ( count ) :
. . . sum = 0
. . . for _ in range ( 0 , count ) :
. . . # note that generator arguments are passed as a tuple , hence ` num , = . . . ` instead of ` num = . . . `
. . . num , = yield sum
. . . sum + = num
. . . yield sum
> > > add = sum ( 2)
> > > add ( 2)
> > > add ( 3)
> > > add ( 4)
Traceback ( most recent call last ) :
StopIteration
As you can see , this lets you keep state between calls easily , as expected from a generator , while calling the
function looks like a function . The same without ` @ coroutine ` would look like this :
> > > def sum ( count ) :
. . . sum = 0
. . . for _ in range ( 0 , count ) :
. . . num = yield sum
. . . sum + = num
. . . yield sum
> > > add = sum ( 2)
> > > next ( add ) # initial next call is necessary
> > > add . send ( 2 ) # to call the function , next or send must be used
> > > add . send ( 3)
> > > add . send ( 4)
Traceback ( most recent call last ) :
StopIteration
Here is an example that shows how to translate traditional functions to use this decorator :
> > > def foo ( a , b ) :
. . . # do some foo
. . . return a + b
> > > def bar ( c ) :
. . . # do some bar
. . . return 2 * c
> > > foo ( 1 , 2)
> > > bar ( 3)
> > > @ coroutine
. . . def func _ maker ( ) :
. . . a , b = yield
. . . # do some foo
. . . c , = yield foo ( a , b )
. . . # do some bar
. . . yield bar ( c )
> > > func _ once = func _ maker ( )
> > > func _ once ( 1 , 2)
> > > func _ once ( 3)
The two differences are that a ) using traditional functions , func1 and func2 don ' t share any context and b ) using
the decorator , both calls use the same function name , and calling the function is limited to wice ( in this case ) .""" | def decorator ( * args , ** kwargs ) :
generator = func ( * args , ** kwargs )
next ( generator )
return lambda * args : generator . send ( args )
return decorator |
def source ( self ) :
"""Grab sources downloads links""" | source , source64 , = "" , ""
for line in self . SLACKBUILDS_TXT . splitlines ( ) :
if line . startswith ( self . line_name ) :
sbo_name = line [ 17 : ] . strip ( )
if line . startswith ( self . line_down ) :
if sbo_name == self . name and line [ 21 : ] . strip ( ) :
source = line [ 21 : ]
if line . startswith ( self . line_down_64 ) :
if sbo_name == self . name and line [ 28 : ] . strip ( ) :
source64 = line [ 28 : ]
return self . _select_source_arch ( source , source64 ) |
def _postprocess_response ( self , result ) :
"""Apply fixups mimicking ActionBase . _ execute _ module ( ) ; this is copied
verbatim from action / _ _ init _ _ . py , the guts of _ parse _ returned _ data are
garbage and should be removed or reimplemented once tests exist .
: param dict result :
Dictionary with format : :
" rc " : int ,
" stdout " : " stdout data " ,
" stderr " : " stderr data " """ | data = self . _parse_returned_data ( result )
# Cutpasted from the base implementation .
if 'stdout' in data and 'stdout_lines' not in data :
data [ 'stdout_lines' ] = ( data [ 'stdout' ] or u'' ) . splitlines ( )
if 'stderr' in data and 'stderr_lines' not in data :
data [ 'stderr_lines' ] = ( data [ 'stderr' ] or u'' ) . splitlines ( )
return data |
def nl_socket_add_memberships ( sk , * group ) :
"""Join groups .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / socket . c # L417
Joins the specified groups using the modern socket option . The list of groups has to be terminated by 0.
Make sure to use the correct group definitions as the older bitmask definitions for nl _ join _ groups ( ) are likely to
still be present for backward compatibility reasons .
Positional arguments :
sk - - Netlink socket ( nl _ sock class instance ) .
group - - group identifier ( integer ) .
Returns :
0 on success or a negative error code .""" | if sk . s_fd == - 1 :
return - NLE_BAD_SOCK
for grp in group :
if not grp :
break
if grp < 0 :
return - NLE_INVAL
try :
sk . socket_instance . setsockopt ( SOL_NETLINK , NETLINK_ADD_MEMBERSHIP , grp )
except OSError as exc :
return - nl_syserr2nlerr ( exc . errno )
return 0 |
def get_events_for_blocks ( self , blocks , subscriptions ) :
"""Get a list of events associated with all the blocks .
Args :
blocks ( list of BlockWrapper ) : The blocks to search for events that
match each subscription .
subscriptions ( list of EventSubscriptions ) : EventFilter and
event type to filter events .
Returns ( list of Events ) : The Events associated which each block id .
Raises :
KeyError A receipt is missing from the receipt store .""" | events = [ ]
for blkw in blocks :
events . extend ( self . get_events_for_block ( blkw , subscriptions ) )
return events |
def from_cn ( cls , common_name ) :
"""Retrieve a certificate by its common name .""" | # search with cn
result_cn = [ ( cert [ 'id' ] , [ cert [ 'cn' ] ] + cert [ 'altnames' ] ) for cert in cls . list ( { 'status' : [ 'pending' , 'valid' ] , 'items_per_page' : 500 , 'cn' : common_name } ) ]
# search with altname
result_alt = [ ( cert [ 'id' ] , [ cert [ 'cn' ] ] + cert [ 'altnames' ] ) for cert in cls . list ( { 'status' : [ 'pending' , 'valid' ] , 'items_per_page' : 500 , 'altname' : common_name } ) ]
result = result_cn + result_alt
ret = { }
for id_ , fqdns in result :
for fqdn in fqdns :
ret . setdefault ( fqdn , [ ] ) . append ( id_ )
cert_id = ret . get ( common_name )
if not cert_id :
return
return cert_id |
def is_subnet_source_fw ( cls , tenant_id , subnet ) :
"""Check if the subnet is created as a result of any FW operation .""" | cfg = config . CiscoDFAConfig ( ) . cfg
subnet = subnet . split ( '/' ) [ 0 ]
in_sub_dict = cls . get_in_ip_addr ( tenant_id )
if not in_sub_dict :
return False
if in_sub_dict . get ( 'subnet' ) == subnet :
return True
out_sub_dict = cls . get_out_ip_addr ( tenant_id )
if not out_sub_dict :
return False
if out_sub_dict . get ( 'subnet' ) == subnet :
return True
dummy_sub = cfg . firewall . fw_service_dummy_ip_subnet
dummy_sub = dummy_sub . split ( '/' ) [ 0 ]
return subnet == dummy_sub |
def _setup_locale ( self , locale : str = locales . DEFAULT_LOCALE ) -> None :
"""Set up locale after pre - check .
: param str locale : Locale
: raises UnsupportedLocale : When locale is not supported .
: return : Nothing .""" | if not locale :
locale = locales . DEFAULT_LOCALE
locale = locale . lower ( )
if locale not in locales . SUPPORTED_LOCALES :
raise UnsupportedLocale ( locale )
self . locale = locale |
def inspect ( self , refresh = True ) :
"""provide metadata about the image ; flip refresh = True if cached metadata are enough
: param refresh : bool , update the metadata with up to date content
: return : dict""" | if refresh or not self . _inspect_data :
identifier = self . _id or self . get_full_name ( )
if not identifier :
raise ConuException ( "This image does not have a valid identifier." )
self . _inspect_data = self . d . inspect_image ( identifier )
return self . _inspect_data |
def decompose_once ( val : Any , default = RaiseTypeErrorIfNotProvided , ** kwargs ) :
"""Decomposes a value into operations , if possible .
This method decomposes the value exactly once , instead of decomposing it
and then continuing to decomposing the decomposed operations recursively
until some criteria is met ( which is what ` cirq . decompose ` does ) .
Args :
val : The value to call ` _ decompose _ ` on , if possible .
default : A default result to use if the value doesn ' t have a
` _ decompose _ ` method or that method returns ` NotImplemented ` or
` None ` . If not specified , undecomposable values cause a ` TypeError ` .
kwargs : Arguments to forward into the ` _ decompose _ ` method of ` val ` .
For example , this is used to tell gates what qubits they are being
applied to .
Returns :
The result of ` val . _ decompose _ ( * * kwargs ) ` , if ` val ` has a ` _ decompose _ `
method and it didn ' t return ` NotImplemented ` or ` None ` . Otherwise
` default ` is returned , if it was specified . Otherwise an error is
raised .
TypeError :
` val ` didn ' t have a ` _ decompose _ ` method ( or that method returned
` NotImplemented ` or ` None ` ) and ` default ` wasn ' t set .""" | method = getattr ( val , '_decompose_' , None )
decomposed = NotImplemented if method is None else method ( ** kwargs )
if decomposed is not NotImplemented and decomposed is not None :
from cirq import ops
# HACK : Avoids circular dependencies .
return list ( ops . flatten_op_tree ( decomposed ) )
if default is not RaiseTypeErrorIfNotProvided :
return default
if method is None :
raise TypeError ( "object of type '{}' " "has no _decompose_ method." . format ( type ( val ) ) )
raise TypeError ( "object of type '{}' does have a _decompose_ method, " "but it returned NotImplemented or None." . format ( type ( val ) ) ) |
def parse_yaml ( self , node ) :
'''Parse a YAML specification of a component group into this
object .''' | self . group_id = y [ 'groupId' ]
self . _members = [ ]
if 'members' in y :
for m in y . get ( 'members' ) :
self . _members . append ( TargetComponent ( ) . parse_yaml ( m ) )
return self |
def set_api_rpc ( self , rpc : str = None , rpctls : bool = False ) -> None :
"""Sets the RPC mode to either of ganache or infura
: param rpc : either of the strings - ganache , infura - mainnet , infura - rinkeby , infura - kovan , infura - ropsten""" | if rpc == "ganache" :
rpcconfig = ( "localhost" , 8545 , False )
else :
m = re . match ( r"infura-(.*)" , rpc )
if m and m . group ( 1 ) in [ "mainnet" , "rinkeby" , "kovan" , "ropsten" ] :
rpcconfig = ( m . group ( 1 ) + ".infura.io" , 443 , True )
else :
try :
host , port = rpc . split ( ":" )
rpcconfig = ( host , int ( port ) , rpctls )
except ValueError :
raise CriticalError ( "Invalid RPC argument, use 'ganache', 'infura-[network]' or 'HOST:PORT'" )
if rpcconfig :
log . info ( "Using RPC settings: %s" % str ( rpcconfig ) )
self . eth = EthJsonRpc ( rpcconfig [ 0 ] , int ( rpcconfig [ 1 ] ) , rpcconfig [ 2 ] )
else :
raise CriticalError ( "Invalid RPC settings, check help for details." ) |
def hgetall ( key , host = None , port = None , db = None , password = None ) :
'''Get all fields and values from a redis hash , returns dict
CLI Example :
. . code - block : : bash
salt ' * ' redis . hgetall foo _ hash''' | server = _connect ( host , port , db , password )
return server . hgetall ( key ) |
def task_start ( self , ** kw ) :
"""Marks a task as started .""" | id , task = self . get_task ( ** kw )
self . _execute ( id , 'start' )
return self . get_task ( uuid = task [ 'uuid' ] ) [ 1 ] |
def clean_filter_dict ( filter_dict , strip = False ) :
'''Clear / del Django ORM filter kwargs dict queries like ` filter ( { " < field > _ _ in " : member _ list ` where ` member _ list ` is empty
Brute force processing of user - entered lists of query parameters can often produce null ` _ _ in ` filters
which will return no results if not " cleaned " by deleting these dict entries .
Similarly ` exclude ` dicts sometimes contain emtpy or all ` None ` lists , but these often produce the intended result
so they usually do not need to be cleaned .
Examples :
> > > clean _ filter _ dict ( { ' acctno _ _ in ' : None , ' serialno ' : None } )
{ ' serialno ' : None }
> > > clean _ filter _ dict ( { ' acctno _ _ in ' : [ ] , ' name ' : None , ' serialno _ _ in ' : [ u ' ' , None , ' ' ] , ' serialno _ _ in ' : [ ' ' , None , 0 ] } )
{ ' serialno _ _ in ' : [ ' ' , None , 0 ] }
> > > exclude _ dict = { ' acctno _ _ in ' : [ ] , ' serialno _ _ in ' : [ u ' ' , None , r " " ] }
> > > clean _ filter _ dict ( exclude _ dict )
{ ' serialno _ _ in ' : [ u ' ' , None , ' ' ] }
> > > print exclude _ dict
{ ' serialno _ _ in ' : [ u ' ' , None , ' ' ] }
> > > clean _ filter _ dict ( exclude _ dict , strip = True )
> > > print exclude _ dict
> > > clean _ filter _ dict ( { ' num _ _ in ' : [ 0 ] , ' bool _ _ in ' : [ False ] , ' str _ _ in ' : [ ' \t \r \n ' ] } , strip = True ) = = { ' bool _ _ in ' : [ False ] , ' num _ _ in ' : [ 0 ] }
True''' | if not strip :
strip = lambda s : s
elif not callable ( strip ) :
strip = lambda s : str ( s ) . strip ( )
keys_to_del = set ( )
for k , values_list in filter_dict . iteritems ( ) :
if k . endswith ( '__in' ) :
if not values_list or not any ( ( v != None and strip ( v ) != '' ) for v in values_list ) :
keys_to_del . add ( k )
for k in keys_to_del :
del filter_dict [ k ]
print 'cleaned_filter_dict:'
print filter_dict
return filter_dict |
def ip_address ( self ) :
"""The IP address of the first interface listed in the droplet ' s
` ` networks ` ` field ( ordering IPv4 before IPv6 ) , or ` None ` if there
are no interfaces""" | networks = self . get ( "networks" , { } )
v4nets = networks . get ( "v4" , [ ] )
v6nets = networks . get ( "v6" , [ ] )
try :
return ( v4nets + v6nets ) [ 0 ] . ip_address
except IndexError :
return None |
def _call_in_reactor_thread ( self , f , * args , ** kwargs ) :
"""Call the given function with args in the reactor thread .""" | self . _reactor . callFromThread ( f , * args , ** kwargs ) |
def min ( a , axis = None ) :
"""Request the minimum of an Array over any number of axes .
. . note : : Currently limited to operating on a single axis .
Parameters
a : Array object
The object whose minimum is to be found .
axis : None , or int , or iterable of ints
Axis or axes along which the operation is performed . The default
( axis = None ) is to perform the operation over all the dimensions of the
input array . The axis may be negative , in which case it counts from
the last to the first axis . If axis is a tuple of ints , the operation
is performed over multiple axes .
Returns
out : Array
The Array representing the requested mean .""" | axes = _normalise_axis ( axis , a )
assert axes is not None and len ( axes ) == 1
return _Aggregation ( a , axes [ 0 ] , _MinStreamsHandler , _MinMaskedStreamsHandler , a . dtype , { } ) |
def _make_request ( self , url , params = None , opener = None ) :
"""Configure a HTTP request , fire it off and return the response .""" | # Create the request object
args = [ i for i in [ url , params ] if i ]
request = urllib . request . Request ( * args )
# If the client has credentials , include them as a header
if self . username and self . password :
credentials = '%s:%s' % ( self . username , self . password )
encoded_credentials = base64 . encodestring ( credentials . encode ( "utf-8" ) ) . decode ( "utf-8" ) . replace ( "\n" , "" )
header = 'Basic %s' % encoded_credentials
request . add_header ( 'Authorization' , header )
# If the request provides a custom opener , like the upload request ,
# which relies on a multipart request , it is applied here .
if opener :
opener = urllib . request . build_opener ( opener )
request_method = opener . open
else :
request_method = urllib . request . urlopen
# Make the request
try :
response = request_method ( request )
except Exception :
e = sys . exc_info ( ) [ 1 ]
if getattr ( e , 'code' , None ) == 404 :
raise DoesNotExistError ( "The resource you've requested does \
not exist or is unavailable without the proper credentials." )
elif getattr ( e , 'code' , None ) == 401 :
raise CredentialsFailedError ( "The resource you've requested \
requires proper credentials." )
else :
raise e
# Read the response and return it
return response . read ( ) |
def calibration ( date , satellite ) :
"""Return the calibration dictionary .
Keyword arguments :
satellite - - the name of the satellite .
date - - the datetime of an image .""" | counts_shift = CountsShift ( )
space_measurement = SpaceMeasurement ( )
prelaunch = PreLaunch ( )
postlaunch = PostLaunch ( )
return { 'counts_shift' : counts_shift . coefficient ( satellite ) , 'space_measurement' : space_measurement . coefficient ( satellite ) , 'prelaunch' : prelaunch . coefficient ( satellite ) , 'postlaunch' : postlaunch . coefficient ( date , satellite ) } |
def get_formatted_value ( self , value ) :
"""Returns a string from datetime using : member : ` parse _ format ` .
: param value : Datetime to cast to string
: type value : datetime
: return : str""" | def get_formatter ( parser_desc ) :
try :
return parser_desc [ 'formatter' ]
except TypeError :
if isinstance ( parser_desc , str ) :
try :
return get_formatter ( self . date_parsers [ parser_desc ] )
except KeyError :
return parser_desc
else :
pass
except KeyError :
try :
if isinstance ( parser_desc [ 'parser' ] , str ) :
return parser_desc [ 'parser' ]
except KeyError :
pass
formatter = get_formatter ( self . parse_format )
if formatter is None :
return str ( value )
if callable ( formatter ) :
return formatter ( value )
return value . strftime ( format = formatter ) |
def get_change_type ( self , ref , a1 , a2 ) :
"""Given ref , allele1 , and allele2 , returns the type of change .
The only case of an amino acid insertion is when the ref is
represented as a ' . ' .""" | if ref == '.' :
return self . INSERTION
elif a1 == '.' or a2 == '.' :
return self . DELETION |
def get_supprates ( _ , data ) :
"""http : / / git . kernel . org / cgit / linux / kernel / git / jberg / iw . git / tree / scan . c ? id = v3.17 # n227.
Positional arguments :
data - - bytearray data to read .""" | answer = list ( )
for i in range ( len ( data ) ) :
r = data [ i ] & 0x7f
if r == BSS_MEMBERSHIP_SELECTOR_VHT_PHY and data [ i ] & 0x80 :
value = 'VHT'
elif r == BSS_MEMBERSHIP_SELECTOR_HT_PHY and data [ i ] & 0x80 :
value = 'HT'
else :
value = '{0}.{1}' . format ( int ( r / 2 ) , int ( 5 * ( r & 1 ) ) )
answer . append ( '{0}{1}' . format ( value , '*' if data [ i ] & 0x80 else '' ) )
return answer |
def add_opinion ( self , opinion_obj ) :
"""Adds an opinion to the opinion layer
@ type opinion _ obj : L { Copinion }
@ param opinion _ obj : the opinion object""" | if self . opinion_layer is None :
self . opinion_layer = Copinions ( )
self . root . append ( self . opinion_layer . get_node ( ) )
self . opinion_layer . add_opinion ( opinion_obj ) |
def plot ( self ) :
"""Export a given dataset to a ` CSV ` file .
This method is a slot connected to the ` export ` QAction . See the
: meth : ` addEntry ` method for details .""" | # The PyTables node tied to the current leaf of the databases tree
current = self . vtgui . dbs_tree_view . currentIndex ( )
leaf = self . vtgui . dbs_tree_model . nodeFromIndex ( current ) . node
data_name = leaf . name
hists_1d = [ 'HistRelBcid' , 'HistErrorCounter' , 'HistTriggerErrorCounter' , 'HistServiceRecord' , 'HistTot' , 'HistTdc' , 'HistClusterTot' , 'HistClusterSize' ]
hists_2d = [ 'HistOcc' , 'Enable' , 'Imon' , 'C_High' , 'EnableDigInj' , 'C_Low' , 'FDAC' , 'TDAC' , 'HistTdcPixel' , 'HistTotPixel' , 'HistThreshold' , 'HistNoise' , 'HistThresholdFitted' , 'HistNoiseFitted' , 'HistThresholdFittedCalib' , 'HistNoiseFittedCalib' ]
if data_name in hists_1d :
plot_1d_hist ( hist = leaf [ : ] , title = data_name )
elif data_name in hists_2d :
if data_name == 'HistOcc' :
leaf = np . sum ( leaf [ : ] , axis = 2 )
plot_2d_hist ( hist = leaf [ : ] , title = data_name )
elif 'Table' in str ( type ( leaf ) ) and len ( leaf [ : ] . dtype . names ) <= 3 : # detect tables with less than 4 columns
plot_table ( leaf [ : ] , title = data_name )
elif data_name == 'HitOrCalibration' :
print 'Comming soon'
else :
print 'Plotting' , data_name , '(%s) is not supported!' % type ( leaf ) |
def audio_open ( path , backends = None ) :
"""Open an audio file using a library that is available on this
system .
The optional ` backends ` parameter can be a list of audio file
classes to try opening the file with . If it is not provided ,
` audio _ open ` tries all available backends . If you call this function
many times , you can avoid the cost of checking for available
backends every time by calling ` available _ backends ` once and passing
the result to each ` audio _ open ` call .
If all backends fail to read the file , a NoBackendError exception is
raised .""" | if backends is None :
backends = available_backends ( )
for BackendClass in backends :
try :
return BackendClass ( path )
except DecodeError :
pass
# All backends failed !
raise NoBackendError ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.