signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_mean ( self , distribution_function ) :
"""Get the mean value for a distribution .
If the distribution function is [ normal , uniform , choice , triangular ] the analytic value is being calculted .
Else , the distribution is instantiated and then the mean is being calculated .
: param distribution _ function :
: return : the mean as a scalar""" | name = self . distribution_name
params = self . random_function_params
if name == 'normal' :
return params [ 0 ]
if name == 'uniform' :
return ( params [ 0 ] + params [ 1 ] ) / 2.
if name == 'choice' :
return params [ 0 ] . mean ( )
if name == 'triangular' :
return ( params [ 0 ] + params [ 1 ] + params [ 2 ] ) / 3.
return distribution_function ( ) . mean ( ) |
def guess_series ( input_string ) :
u"""Tries to convert < input _ string > into a list of floats .
Example :
> > > guess _ series ( " 0.5 1.2 3.5 7.3 8 12.5 , 13.2 , "
. . . " 15.0 , 14.2 , 11.8 , 6.1 , 1.9 " )
[0.5 , 1.2 , 3.5 , 7.3 , 8.0 , 12.5 , 13.2 , 15.0 , 14.2 , 11.8 , 6.1 , 1.9]""" | float_finder = re . compile ( "([-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)" )
return ( [ i for i in [ _convert_to_float ( j ) for j in float_finder . findall ( input_string ) # Remove entires we couldn ' t convert to a sensible value .
] if i is not None and not math . isnan ( i ) and not math . isinf ( i ) ] ) |
def put_key ( self , source , rel_path ) :
'''Copy a file to the repository
Args :
source : Absolute path to the source file , or a file - like object
rel _ path : path relative to the root of the repository''' | k = self . _get_boto_key ( rel_path )
try :
k . set_contents_from_file ( source )
except AttributeError :
if os . path . getsize ( source ) > 4.8 * 1024 * 1024 * 1024 : # Need to do multi - part uploads here
k . set_contents_from_filename ( source )
else :
k . set_contents_from_filename ( source ) |
def build_transitive_closure ( self , rel , tc_dict ) :
"""Build a transitive closure for a given relation in a given dict .""" | # Make a function with the righ argument structure
rel_fun = lambda node , graph : rel ( node )
for x in self . graph . all_nodes ( ) :
rel_closure = self . graph . transitiveClosure ( rel_fun , x )
xs = x . toPython ( )
for y in rel_closure :
ys = y . toPython ( )
if xs == ys :
continue
try :
tc_dict [ xs ] . append ( ys )
except KeyError :
tc_dict [ xs ] = [ ys ]
if rel == self . isa_or_partof_objects :
self . _add_component ( xs , ys ) |
def get_vault_form_for_update ( self , vault_id ) :
"""Gets the vault form for updating an existing vault .
A new vault form should be requested for each update
transaction .
arg : vault _ id ( osid . id . Id ) : the ` ` Id ` ` of the ` ` Vault ` `
return : ( osid . authorization . VaultForm ) - the vault form
raise : NotFound - ` ` vault _ id ` ` is not found
raise : NullArgument - ` ` vault _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . BinAdminSession . get _ bin _ form _ for _ update _ template
if self . _catalog_session is not None :
return self . _catalog_session . get_catalog_form_for_update ( catalog_id = vault_id )
collection = JSONClientValidated ( 'authorization' , collection = 'Vault' , runtime = self . _runtime )
if not isinstance ( vault_id , ABCId ) :
raise errors . InvalidArgument ( 'the argument is not a valid OSID Id' )
result = collection . find_one ( { '_id' : ObjectId ( vault_id . get_identifier ( ) ) } )
cat_form = objects . VaultForm ( osid_object_map = result , runtime = self . _runtime , proxy = self . _proxy )
self . _forms [ cat_form . get_id ( ) . get_identifier ( ) ] = not UPDATED
return cat_form |
def add ( self ) :
"""Add service definition to hierarchy .""" | yield self . client . create ( self . path )
yield self . client . create ( self . path + "/type" , self . name )
yield self . client . create ( self . path + "/state" )
yield self . client . create ( self . path + "/machines" , "[]" )
log . debug ( "registered service '%s' at %s." % ( self . name , self . path ) ) |
def flatten ( repertoire , big_endian = False ) :
"""Flatten a repertoire , removing empty dimensions .
By default , the flattened repertoire is returned in little - endian order .
Args :
repertoire ( np . ndarray or None ) : A repertoire .
Keyword Args :
big _ endian ( boolean ) : If ` ` True ` ` , flatten the repertoire in big - endian
order .
Returns :
np . ndarray : The flattened repertoire .""" | if repertoire is None :
return None
order = 'C' if big_endian else 'F'
# For efficiency , use ` ravel ` ( which returns a view of the array ) instead
# of ` np . flatten ` ( which copies the whole array ) .
return repertoire . squeeze ( ) . ravel ( order = order ) |
def get_symbol_dict ( self , voigt = True , zero_index = False , ** kwargs ) :
"""Creates a summary dict for tensor with associated symbol
Args :
voigt ( bool ) : whether to get symbol dict for voigt
notation tensor , as opposed to full notation ,
defaults to true
zero _ index ( bool ) : whether to set initial index to zero ,
defaults to false , since tensor notations tend to use
one - indexing , rather than zero indexing like python
* * kwargs : keyword args for np . isclose . Can take atol
and rtol for absolute and relative tolerance , e . g .
> > > tensor . get _ symbol _ dict ( atol = 1e - 8)
or
> > > tensor . get _ symbol _ dict ( rtol = 1e - 5)
Returns :
list of index groups where tensor values are equivalent to
within tolerances
Returns :""" | d = { }
if voigt :
array = self . voigt
else :
array = self
grouped = self . get_grouped_indices ( voigt = voigt , ** kwargs )
if zero_index :
p = 0
else :
p = 1
for indices in grouped :
sym_string = self . symbol + '_'
sym_string += '' . join ( [ str ( i + p ) for i in indices [ 0 ] ] )
value = array [ indices [ 0 ] ]
if not np . isclose ( value , 0 ) :
d [ sym_string ] = array [ indices [ 0 ] ]
return d |
def cost_zerg_corrected ( self ) -> "Cost" :
"""This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively""" | if self . race == Race . Zerg and Attribute . Structure . value in self . attributes : # a = self . _ game _ data . units ( UnitTypeId . ZERGLING )
# print ( a )
# print ( vars ( a ) )
return Cost ( self . _proto . mineral_cost - 50 , self . _proto . vespene_cost , self . _proto . build_time )
else :
return self . cost |
def setupModule ( self ) :
"""* The setupModule method *
* * Return : * *
- ` ` log ` ` - - a logger
- ` ` dbConn ` ` - - a database connection to a test database ( details from yaml settings file )
- ` ` pathToInputDir ` ` - - path to modules own test input directory
- ` ` pathToOutputDir ` ` - - path to modules own test output directory""" | import pymysql as ms
# # VARIABLES # #
logging . config . dictConfig ( yaml . load ( self . loggerConfig ) )
log = logging . getLogger ( __name__ )
connDict = yaml . load ( self . dbConfig )
dbConn = ms . connect ( host = connDict [ 'host' ] , user = connDict [ 'user' ] , passwd = connDict [ 'password' ] , db = connDict [ 'db' ] , use_unicode = True , charset = 'utf8' , local_infile = 1 , client_flag = ms . constants . CLIENT . MULTI_STATEMENTS , connect_timeout = 3600 )
dbConn . autocommit ( True )
return log , dbConn , self . pathToInputDir , self . pathToOutputDir |
def _pairwise_chisq ( self ) :
"""Pairwise comparisons ( Chi - Square ) along axis , as numpy . ndarray .
Returns a list of square and symmetric matrices of test statistics for the null
hypothesis that each vector along * axis * is equal to each other .""" | return [ self . _chi_squared ( mr_subvar_proportions , self . _margin [ idx ] , self . _opposite_axis_margin [ idx ] / np . sum ( self . _opposite_axis_margin [ idx ] ) , ) for ( idx , mr_subvar_proportions ) in enumerate ( self . _proportions ) ] |
def intersperse ( iterable , element ) :
"""Generator yielding all elements of ` iterable ` , but with ` element `
inserted between each two consecutive elements""" | iterable = iter ( iterable )
yield next ( iterable )
while True :
next_from_iterable = next ( iterable )
yield element
yield next_from_iterable |
def _get_bookmark ( repo , name ) :
'''Find the requested bookmark in the specified repo''' | try :
return [ x for x in _all_bookmarks ( repo ) if x [ 0 ] == name ] [ 0 ]
except IndexError :
return False |
def qindex2index ( index ) :
"""from a QIndex ( row / column coordinate system ) , get the buffer index of the byte""" | r = index . row ( )
c = index . column ( )
if c > 0x10 :
return ( 0x10 * r ) + c - 0x11
else :
return ( 0x10 * r ) + c |
def layer_register ( log_shape = False , use_scope = True ) :
"""Args :
log _ shape ( bool ) : log input / output shape of this layer
use _ scope ( bool or None ) :
Whether to call this layer with an extra first argument as variable scope .
When set to None , it can be called either with or without
the scope name argument , depend on whether the first argument
is string or not .
Returns :
A decorator used to register a layer .
Example :
. . code - block : : python
@ layer _ register ( use _ scope = True )
def add10 ( x ) :
return x + tf . get _ variable ( ' W ' , shape = [ 10 ] )
# use it :
output = add10 ( ' layer _ name ' , input ) # the function will be called under variable scope " layer _ name " .""" | def wrapper ( func ) :
@ wraps ( func )
def wrapped_func ( * args , ** kwargs ) :
assert args [ 0 ] is not None , args
if use_scope :
name , inputs = args [ 0 ] , args [ 1 ]
args = args [ 1 : ]
# actual positional args used to call func
assert isinstance ( name , six . string_types ) , "First argument for \"{}\" should be a string. " . format ( func . __name__ ) + "Did you forget to specify the name of the layer?"
else :
assert not log_shape
if isinstance ( args [ 0 ] , six . string_types ) :
if use_scope is False :
logger . warn ( "Please call layer {} without the first scope name argument, " "or register the layer with use_scope=None to allow " "two calling methods." . format ( func . __name__ ) )
name , inputs = args [ 0 ] , args [ 1 ]
args = args [ 1 : ]
# actual positional args used to call func
else :
inputs = args [ 0 ]
name = None
if not ( isinstance ( inputs , ( tf . Tensor , tf . Variable ) ) or ( isinstance ( inputs , ( list , tuple ) ) and isinstance ( inputs [ 0 ] , ( tf . Tensor , tf . Variable ) ) ) ) :
raise ValueError ( "Invalid inputs to layer: " + str ( inputs ) )
# use kwargs from current argument scope
actual_args = copy . copy ( get_arg_scope ( ) [ func . __name__ ] )
# explicit kwargs overwrite argscope
actual_args . update ( kwargs )
# if six . PY3:
# # explicit positional args also override argscope . only work in PY3
# posargmap = inspect . signature ( func ) . bind _ partial ( * args ) . arguments
# for k in six . iterkeys ( posargmap ) :
# if k in actual _ args :
# del actual _ args [ k ]
if name is not None : # use scope
with tfv1 . variable_scope ( name ) as scope : # this name is only used to surpress logging , doesn ' t hurt to do some heuristics
scope_name = re . sub ( 'tower[0-9]+/' , '' , scope . name )
do_log_shape = log_shape and scope_name not in _LAYER_LOGGED
if do_log_shape :
logger . info ( "{} input: {}" . format ( scope . name , get_shape_str ( inputs ) ) )
# run the actual function
outputs = func ( * args , ** actual_args )
if do_log_shape : # log shape info and add activation
logger . info ( "{} output: {}" . format ( scope . name , get_shape_str ( outputs ) ) )
_LAYER_LOGGED . add ( scope_name )
else : # run the actual function
outputs = func ( * args , ** actual_args )
return outputs
wrapped_func . symbolic_function = func
# attribute to access the underlying function object
wrapped_func . use_scope = use_scope
_register ( func . __name__ , wrapped_func )
return wrapped_func
return wrapper |
def get_form ( formcls ) :
"""get form class according form class path or form class object""" | from uliweb . form import Form
import inspect
if inspect . isclass ( formcls ) and issubclass ( formcls , Form ) :
return formcls
elif isinstance ( formcls , ( str , unicode ) ) :
path = settings . FORMS . get ( formcls )
if path :
_cls = import_attr ( path )
return _cls
else :
raise UliwebError ( "Can't find formcls name %s in settings.FORMS" % formcls )
else :
raise UliwebError ( "formcls should be Form class object or string path format, but %r found!" % formcls ) |
def taper ( path , length , final_width , final_distance , direction = None , layer = 0 , datatype = 0 ) :
'''Linear tapers for the lazy .
path : ` gdspy . Path ` to append the taper
length : total length
final _ width : final width of th taper
direction : taper direction
layer : GDSII layer number ( int or list )
datatype : GDSII datatype number ( int or list )
Parameters ` layer ` and ` datatype ` must be of the same type . If they
are lists , they must have the same length . Their length indicate the
number of pieces that compose the taper .
Return ` path ` .''' | if layer . __class__ == datatype . __class__ == [ ] . __class__ :
assert len ( layer ) == len ( datatype )
elif isinstance ( layer , int ) and isinstance ( datatype , int ) :
layer = [ layer ]
datatype = [ datatype ]
else :
raise ValueError ( 'Parameters layer and datatype must have the same ' 'type (either int or list) and length.' )
n = len ( layer )
w = numpy . linspace ( 2 * path . w , final_width , n + 1 ) [ 1 : ]
d = numpy . linspace ( path . distance , final_distance , n + 1 ) [ 1 : ]
l = float ( length ) / n
for i in range ( n ) :
path . segment ( l , direction , w [ i ] , d [ i ] , layer = layer [ i ] , datatype = datatype [ i ] )
return path |
def _mean_prediction ( self , mu , Y , h , t_z ) :
"""Creates a h - step ahead mean prediction
Parameters
mu : np . ndarray
The past predicted values
Y : np . ndarray
The past data
h : int
How many steps ahead for the prediction
t _ z : np . ndarray
A vector of ( transformed ) latent variables
Returns
h - length vector of mean predictions""" | # Create arrays to iteratre over
Y_exp = Y . copy ( )
# Loop over h time periods
for t in range ( 0 , h ) :
if self . ar != 0 :
Y_exp_normalized = ( Y_exp [ - self . ar : ] [ : : - 1 ] - self . _norm_mean ) / self . _norm_std
new_value = self . predict_new ( np . append ( 1.0 , Y_exp_normalized ) , self . latent_variables . get_z_values ( ) )
else :
new_value = self . predict_new ( np . array ( [ 1.0 ] ) , self . latent_variables . get_z_values ( ) )
Y_exp = np . append ( Y_exp , [ self . link ( new_value ) ] )
return Y_exp |
def _process_prb_strain_genotype_view ( self , limit = None ) :
"""Here we fetch the free text descriptions of the phenotype associations .
Triples :
< annot _ id > dc : description " description text "
: param limit :
: return :""" | line_counter = 0
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
LOG . info ( "Getting genotypes for strains" )
raw = '/' . join ( ( self . rawdir , 'prb_strain_genotype_view' ) )
with open ( raw , 'r' , encoding = "utf8" ) as csvfile :
filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' )
for line in filereader :
line_counter += 1
if line_counter == 1 :
continue
( strain_key , genotype_key ) = line
if self . test_mode is True :
if int ( genotype_key ) not in self . test_keys . get ( 'genotype' ) and int ( strain_key ) not in self . test_keys . get ( 'strain' ) :
continue
strain_id = self . idhash [ 'strain' ] . get ( strain_key )
if strain_id is None :
strain_id = self . _makeInternalIdentifier ( 'strain' , strain_key )
genotype_id = self . idhash [ 'genotype' ] . get ( genotype_key )
if genotype_id is None :
genotype_id = self . _makeInternalIdentifier ( 'genotype' , genotype_key )
if strain_id is not None and genotype_id is not None :
self . strain_to_genotype_map [ strain_id ] = genotype_id
graph . addTriple ( strain_id , self . globaltt [ 'has_genotype' ] , genotype_id )
# TODO
# verify if this should be contingent on the exactness or not
# if qualifier = = ' Exact ' :
# gu . addTriple (
# graph , strain _ id ,
# self . globaltt [ ' has _ genotype ' ] ,
# genotype _ id )
# else :
# gu . addXref ( graph , strain _ id , genotype _ id )
if not self . test_mode and limit is not None and line_counter > limit :
break
return |
def from_networkx ( cls , graph , weight = 'weight' ) :
r"""Import a graph from NetworkX .
Edge weights are retrieved as an edge attribute ,
under the name specified by the ` ` weight ` ` parameter .
Signals are retrieved from node attributes ,
and stored in the : attr : ` signals ` dictionary under the attribute name .
` N ` - dimensional signals that were broken during export are joined .
Parameters
graph : : class : ` networkx . Graph `
A NetworkX graph object .
weight : string or None , optional
The edge attribute that holds the numerical values used as the edge
weights . All edge weights are set to 1 if None , or not found .
Returns
graph : : class : ` ~ pygsp . graphs . Graph `
A PyGSP graph object .
Notes
The nodes are ordered according to : meth : ` networkx . Graph . nodes ` .
In NetworkX , node attributes need not be set for every node .
If a node attribute is not set for a node , a NaN is assigned to the
corresponding signal for that node .
If the graph is a : class : ` networkx . MultiGraph ` , multiedges are
aggregated by summation .
See Also
from _ graphtool : import from graph - tool
load : load from a file
Examples
> > > import networkx as nx
> > > graph = nx . Graph ( )
> > > graph . add _ edge ( 1 , 2 , weight = 0.2)
> > > graph . add _ edge ( 2 , 3 , weight = 0.9)
> > > graph . add _ node ( 4 , sig = 3.1416)
> > > graph . nodes ( )
NodeView ( ( 1 , 2 , 3 , 4 ) )
> > > graph = graphs . Graph . from _ networkx ( graph )
> > > graph . W . toarray ( )
array ( [ [ 0 . , 0.2 , 0 . , 0 . ] ,
[0.2 , 0 . , 0.9 , 0 . ] ,
[0 . , 0.9 , 0 . , 0 . ] ,
[0 . , 0 . , 0 . , 0 . ] ] )
> > > graph . signals
{ ' sig ' : array ( [ nan , nan , nan , 3.1416 ] ) }""" | nx = _import_networkx ( )
from . graph import Graph
adjacency = nx . to_scipy_sparse_matrix ( graph , weight = weight )
graph_pg = Graph ( adjacency )
for i , node in enumerate ( graph . nodes ( ) ) :
for name in graph . nodes [ node ] . keys ( ) :
try :
signal = graph_pg . signals [ name ]
except KeyError :
signal = np . full ( graph_pg . n_vertices , np . nan )
graph_pg . set_signal ( signal , name )
try :
signal [ i ] = graph . nodes [ node ] [ name ]
except KeyError :
pass
# attribute not set for node
graph_pg . _join_signals ( )
return graph_pg |
def targets_by_artifact_set ( self , targets ) :
"""Partitions the input targets by the sets of pinned artifacts they are managed by .
: param collections . Iterable targets : the input targets ( typically just JarLibrary targets ) .
: return : a mapping of PinnedJarArtifactSet - > list of targets .
: rtype : dict""" | sets_to_targets = defaultdict ( list )
for target in targets :
sets_to_targets [ self . for_target ( target ) ] . append ( target )
return dict ( sets_to_targets ) |
def create_autosummary_file ( modules , opts ) : # type : ( List [ unicode ] , Any , unicode ) - > None
"""Create the module ' s index .""" | lines = [ 'API Reference' , '=============' , '' , '.. autosummary::' , ' :template: api_module.rst' , ' :toctree: {}' . format ( opts . destdir ) , '' , ]
modules . sort ( )
for module in modules :
lines . append ( ' {}' . format ( module ) )
lines . append ( '' )
fname = path . join ( opts . srcdir , '{}.rst' . format ( opts . docname ) )
logger . info ( '[apigen] creating API docs file: {}' . format ( fname ) )
with FileAvoidWrite ( fname ) as f :
f . write ( '\n' . join ( lines ) ) |
def run_from_argv ( self , argv ) :
"""Overriden in order to access the command line arguments .""" | self . argv_string = ' ' . join ( argv )
super ( EmailNotificationCommand , self ) . run_from_argv ( argv ) |
def _initialize_pop ( self , pop_size ) :
"""Assigns indices to individuals in population .""" | self . toolbox . register ( "individual" , self . _generate )
self . toolbox . register ( "population" , tools . initRepeat , list , self . toolbox . individual )
self . population = self . toolbox . population ( n = pop_size )
self . assign_fitnesses ( self . population )
self . _model_count += len ( self . population )
return |
def close ( self ) :
'''Closes the CDF Class .
1 . If compression was set , this is where the compressed file is
written .
2 . If a checksum is needed , this will place the checksum at the end
of the file .''' | if self . compressed_file is None :
with self . path . open ( 'rb+' ) as f :
f . seek ( 0 , 2 )
eof = f . tell ( )
self . _update_offset_value ( f , self . gdr_head + 36 , 8 , eof )
if self . checksum :
f . write ( self . _md5_compute ( f ) )
return
with self . path . open ( 'rb+' ) as f :
f . seek ( 0 , 2 )
eof = f . tell ( )
self . _update_offset_value ( f , self . gdr_head + 36 , 8 , eof )
with self . compressed_file . open ( 'wb+' ) as g :
g . write ( bytearray . fromhex ( CDF . V3magicNUMBER_1 ) )
g . write ( bytearray . fromhex ( CDF . V3magicNUMBER_2c ) )
self . _write_ccr ( f , g , self . compression )
if self . checksum :
g . seek ( 0 , 2 )
g . write ( self . _md5_compute ( g ) )
self . path . unlink ( )
# NOTE : for Windows this is necessary
self . compressed_file . rename ( self . path ) |
def configure ( self , cnf = { } , ** kw ) :
"""Configure resources of the widget .
To get the list of options for this widget , call the method : meth : ` ~ TickScale . keys ` .
See : meth : ` ~ TickScale . _ _ init _ _ ` for a description of the widget specific option .""" | kw . update ( cnf )
reinit = False
if 'orient' in kw :
if kw [ 'orient' ] == 'vertical' :
self . _style_name = self . _style_name . replace ( 'Horizontal' , 'Vertical' )
if 'tickpos' not in kw :
self . _tickpos = 'w'
else :
self . _style_name = self . _style_name . replace ( 'Vertical' , 'Horizontal' )
if 'tickpos' not in kw :
self . _tickpos = 's'
self . scale . configure ( style = self . _style_name )
reinit = True
if 'showvalue' in kw :
self . _showvalue = bool ( kw . pop ( 'showvalue' ) )
reinit = True
if 'tickinterval' in kw :
self . _tickinterval = kw . pop ( 'tickinterval' )
reinit = True
if 'tickpos' in kw :
tickpos = kw . pop ( 'tickpos' )
orient = kw . get ( 'orient' , str ( self . cget ( 'orient' ) ) )
if orient == 'vertical' and tickpos not in [ 'w' , 'e' ] :
raise ValueError ( "For a vertical TickScale, 'tickpos' must be 'w' or 'e'." )
elif orient == 'horizontal' and tickpos not in [ 'n' , 's' ] :
raise ValueError ( "For a horizontal TickScale, 'tickpos' must be 'n' or 's'." )
elif orient in [ 'vertical' , 'horizontal' ] :
self . _tickpos = tickpos
reinit = True
if 'labelpos' in kw :
labelpos = kw . pop ( 'labelpos' )
if labelpos not in [ 'w' , 'e' , 'n' , 's' ] :
raise ValueError ( "'labelpos' must be 'n', 's', 'e' or 'w'." )
else :
self . _labelpos = labelpos
reinit = True
if 'resolution' in kw :
try :
self . _resolution = float ( kw . pop ( 'resolution' ) )
if self . _resolution < 0 :
raise ValueError ( "'resolution' must be non negative." )
except ValueError :
raise TypeError ( "'resolution' must be a float." )
if self . _tickinterval != 0 and self . _resolution > self . _tickinterval :
self . _tickinterval = self . _resolution
reinit = True
if 'digits' in kw :
digits = kw . pop ( 'digits' )
if not isinstance ( digits , int ) :
raise TypeError ( "'digits' must be an integer." )
elif digits < 0 :
self . _digits = digits
self . _formatter = '{:g}'
reinit = True
else :
self . _digits = digits
self . _formatter = '{:.' + str ( self . _digits ) + 'f}'
interv = self . _get_precision ( self . _tickinterval )
resol = self . _get_precision ( self . _resolution )
start = kw . get ( 'from' , kw . get ( 'from_' , self . _start ) )
end = kw . get ( 'to' , self . scale . cget ( 'to' ) )
from_ = self . _get_precision ( start )
to = self . _get_precision ( end )
d = max ( interv , resol , from_ , to )
if self . _digits < d :
self . _resolution = float ( '1e-{}' . format ( self . _digits ) )
self . _tickinterval = round ( self . _tickinterval , self . _digits )
if self . _resolution > self . _tickinterval :
self . _tickinterval = self . _resolution
kw [ 'to' ] = round ( end , self . _digits )
if 'from_' in kw :
del kw [ 'from_' ]
kw [ 'from' ] = round ( start , self . _digits )
reinit = True
elif self . _digits > 0 :
start = kw . get ( 'from' , kw . get ( 'from_' , self . _start ) )
end = kw . get ( 'to' , self . scale . cget ( 'to' ) )
from_ = self . _get_precision ( start )
to = self . _get_precision ( end )
interv = self . _get_precision ( self . _tickinterval )
resol = self . _get_precision ( self . _resolution )
digits = max ( self . _digits , interv , resol , from_ , to )
if digits != self . _digits :
self . _digits = digits
self . _formatter = '{:.' + str ( self . _digits ) + 'f}'
reinit = True
if 'variable' in kw :
self . _var = kw [ 'variable' ]
if not self . _var :
self . _var = tk . DoubleVar ( self , self . get ( ) )
kw [ 'variable' ] = self . _var
try :
self . _var . trace_add ( 'write' , self . _increment )
except AttributeError : # backward compatibility
self . _var . trace ( 'w' , self . _increment )
self . scale . configure ( ** kw )
if 'from_' in kw or 'from' in kw or 'to' in kw :
self . _extent = self . scale . cget ( 'to' ) - self . scale . cget ( 'from' )
self . _start = self . scale . cget ( 'from' )
reinit = True
if 'style' in kw :
self . _style_name = kw [ 'style' ]
if not self . _style_name :
self . _style_name = '%s.TScale' % ( str ( self . scale . cget ( 'orient' ) ) . capitalize ( ) )
if reinit :
self . _init ( )
if 'orient' in kw : # needed after the reinitialization in case of orientation change
self . _apply_style ( ) |
def GetAlias ( session = None ) :
"""Return specified alias or if none the alias associated with the provided credentials .
> > > clc . v2 . Account . GetAlias ( )
u ' BTDI '""" | if session is not None :
return session [ 'alias' ]
if not clc . ALIAS :
clc . v2 . API . _Login ( )
return ( clc . ALIAS ) |
def run_dot ( dot ) :
"""Converts a graph in DOT format into an IPython displayable object .""" | global impl
if impl is None :
impl = guess_impl ( )
if impl == "dot" :
return run_dot_dot ( dot )
elif impl == "js" :
return run_dot_js ( dot )
else :
raise ValueError ( "unknown implementation {}" . format ( impl ) ) |
def options ( argv = [ ] ) :
"""A helper function that returns a dictionary of the default key - values pairs""" | parser = HendrixOptionParser
parsed_args = parser . parse_args ( argv )
return vars ( parsed_args [ 0 ] ) |
def error_unzip_helper ( values , func , func_kwargs ) :
'''Splits [ ( x1 , y1 ) , ( x2 , y2 ) , . . . ] and gives to func''' | x_values = [ x for x , y in values ]
y_values = [ y for x , y in values ]
result = func ( x_values , y_values , ** func_kwargs )
if isinstance ( result , float ) :
return result
else :
return result [ 0 ] |
def filter ( self , * args , ** kwargs ) :
"""Adds WHERE arguments to the queryset , returning a new queryset
# TODO : show examples
: rtype : AbstractQuerySet""" | # add arguments to the where clause filters
if len ( [ x for x in kwargs . values ( ) if x is None ] ) :
raise CQLEngineException ( "None values on filter are not allowed" )
clone = copy . deepcopy ( self )
for operator in args :
if not isinstance ( operator , WhereClause ) :
raise QueryException ( '{} is not a valid query operator' . format ( operator ) )
clone . _where . append ( operator )
for arg , val in kwargs . items ( ) :
col_name , col_op = self . _parse_filter_arg ( arg )
quote_field = True
# resolve column and operator
try :
column = self . model . _get_column ( col_name )
except KeyError :
if col_name == 'pk__token' :
if not isinstance ( val , Token ) :
raise QueryException ( "Virtual column 'pk__token' may only be compared to Token() values" )
column = columns . _PartitionKeysToken ( self . model )
quote_field = False
else :
raise QueryException ( "Can't resolve column name: '{}'" . format ( col_name ) )
if isinstance ( val , Token ) :
if col_name != 'pk__token' :
raise QueryException ( "Token() values may only be compared to the 'pk__token' virtual column" )
partition_columns = column . partition_columns
if len ( partition_columns ) != len ( val . value ) :
raise QueryException ( 'Token() received {} arguments but model has {} partition keys' . format ( len ( val . value ) , len ( partition_columns ) ) )
val . set_columns ( partition_columns )
# get query operator , or use equals if not supplied
operator_class = BaseWhereOperator . get_operator ( col_op or 'EQ' )
operator = operator_class ( )
if isinstance ( operator , InOperator ) :
if not isinstance ( val , ( list , tuple ) ) :
raise QueryException ( 'IN queries must use a list/tuple value' )
query_val = [ column . to_database ( v ) for v in val ]
elif isinstance ( val , BaseQueryFunction ) :
query_val = val
else :
query_val = column . to_database ( val )
clone . _where . append ( WhereClause ( column . db_field_name , operator , query_val , quote_field = quote_field ) )
return clone |
def getPorts ( self ) :
"""acquire ports to be used by the SC2 client launched by this process""" | if self . ports : # no need to get ports if ports are al
return self . ports
if not self . _gotPorts :
self . ports = [ portpicker . pick_unused_port ( ) , # game _ port
portpicker . pick_unused_port ( ) , # base _ port
portpicker . pick_unused_port ( ) , # shared _ port / init port
]
self . _gotPorts = True
return self . ports |
def _extract_cause ( cls , exc_val ) :
"""Helper routine to extract nested cause ( if any ) .""" | # See : https : / / www . python . org / dev / peps / pep - 3134 / for why / what
# these are . . .
# ' _ _ cause _ _ ' attribute for explicitly chained exceptions
# ' _ _ context _ _ ' attribute for implicitly chained exceptions
# ' _ _ traceback _ _ ' attribute for the traceback
# See : https : / / www . python . org / dev / peps / pep - 0415 / for why / what
# the ' _ _ suppress _ context _ _ ' is / means / implies . . .
nested_exc_vals = [ ]
seen = [ exc_val ]
while True :
suppress_context = getattr ( exc_val , '__suppress_context__' , False )
if suppress_context :
attr_lookups = [ '__cause__' ]
else :
attr_lookups = [ '__cause__' , '__context__' ]
nested_exc_val = None
for attr_name in attr_lookups :
attr_val = getattr ( exc_val , attr_name , None )
if attr_val is None :
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen :
break
seen . append ( nested_exc_val )
nested_exc_vals . append ( nested_exc_val )
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed ( nested_exc_vals ) :
f = cls . from_exception ( exc_val , cause = last_cause , find_cause = False )
last_cause = f
return last_cause |
def get_distance_function ( distance ) :
"""Returns the distance function from the string name provided
: param distance : The string name of the distributions
: return :""" | # If we provided distance function ourselves , use it
if callable ( distance ) :
return distance
try :
return _supported_distances_lookup ( ) [ distance ]
except KeyError :
raise KeyError ( 'Unsupported distance function {0!r}' . format ( distance . lower ( ) ) ) |
def _join_parameters ( base , nxt ) :
"""join parameters from the lhs to the rhs , if compatible .""" | if nxt is None :
return base
if isinstance ( base , set ) and isinstance ( nxt , set ) :
return base | nxt
else :
return nxt |
def error_response ( self , kwargs_lens , kwargs_ps ) :
"""returns the 1d array of the error estimate corresponding to the data response
: return : 1d numpy array of response , 2d array of additonal errors ( e . g . point source uncertainties )""" | model_error = self . error_map ( kwargs_lens , kwargs_ps )
error_map_1d = self . ImageNumerics . image2array ( model_error )
C_D_response = self . ImageNumerics . C_D_response + error_map_1d
return C_D_response , model_error |
def complement_alleles ( self ) :
"""Complement the alleles of this variant .
This will call this module ' s ` complement _ alleles ` function .
Note that this will not create a new object , but modify the state of
the current instance .""" | self . alleles = self . _encode_alleles ( [ complement_alleles ( i ) for i in self . alleles ] ) |
def _url_collapse_path ( path ) :
"""Given a URL path , remove extra ' / ' s and ' . ' path elements and collapse
any ' . . ' references and returns a colllapsed path .
Implements something akin to RFC - 2396 5.2 step 6 to parse relative paths .
The utility of this function is limited to is _ cgi method and helps
preventing some security attacks .
Returns : A tuple of ( head , tail ) where tail is everything after the final /
and head is everything before it . Head will always start with a ' / ' and ,
if it contains anything else , never have a trailing ' / ' .
Raises : IndexError if too many ' . . ' occur within the path .""" | # Similar to os . path . split ( os . path . normpath ( path ) ) but specific to URL
# path semantics rather than local operating system semantics .
path_parts = path . split ( '/' )
head_parts = [ ]
for part in path_parts [ : - 1 ] :
if part == '..' :
head_parts . pop ( )
# IndexError if more ' . . ' than prior parts
elif part and part != '.' :
head_parts . append ( part )
if path_parts :
tail_part = path_parts . pop ( )
if tail_part :
if tail_part == '..' :
head_parts . pop ( )
tail_part = ''
elif tail_part == '.' :
tail_part = ''
else :
tail_part = ''
splitpath = ( '/' + '/' . join ( head_parts ) , tail_part )
collapsed_path = "/" . join ( splitpath )
return collapsed_path |
def copy_data ( self , project , logstore , from_time , to_time = None , to_client = None , to_project = None , to_logstore = None , shard_list = None , batch_size = None , compress = None , new_topic = None , new_source = None ) :
"""copy data from one logstore to another one ( could be the same or in different region ) , the time is log received time on server side .
: type project : string
: param project : project name
: type logstore : string
: param logstore : logstore name
: type from _ time : string / int
: param from _ time : curosr value , could be begin , timestamp or readable time in readable time like " % Y - % m - % d % H : % M : % S < time _ zone > " e . g . " 2018-01-02 12:12:10 + 8:00 " , also support human readable string , e . g . " 1 hour ago " , " now " , " yesterday 0:0:0 " , refer to https : / / aliyun - log - cli . readthedocs . io / en / latest / tutorials / tutorial _ human _ readable _ datetime . html
: type to _ time : string / int
: param to _ time : curosr value , default is " end " , could be begin , timestamp or readable time in readable time like " % Y - % m - % d % H : % M : % S < time _ zone > " e . g . " 2018-01-02 12:12:10 + 8:00 " , also support human readable string , e . g . " 1 hour ago " , " now " , " yesterday 0:0:0 " , refer to https : / / aliyun - log - cli . readthedocs . io / en / latest / tutorials / tutorial _ human _ readable _ datetime . html
: type to _ client : LogClient
: param to _ client : logclient instance , if empty will use source client
: type to _ project : string
: param to _ project : project name , if empty will use source project
: type to _ logstore : string
: param to _ logstore : logstore name , if empty will use source logstore
: type shard _ list : string
: param shard _ list : shard number list . could be comma seperated list or range : 1,20,31-40
: type batch _ size : int
: param batch _ size : batch size to fetch the data in each iteration . by default it ' s 500
: type compress : bool
: param compress : if use compression , by default it ' s True
: type new _ topic : string
: param new _ topic : overwrite the copied topic with the passed one
: type new _ source : string
: param new _ source : overwrite the copied source with the passed one
: return : LogResponse { " total _ count " : 30 , " shards " : { 0 : 10 , 1 : 20 } } )""" | return copy_data ( self , project , logstore , from_time , to_time = to_time , to_client = to_client , to_project = to_project , to_logstore = to_logstore , shard_list = shard_list , batch_size = batch_size , compress = compress , new_topic = new_topic , new_source = new_source ) |
def _get_named_graph ( context ) :
"""Returns the named graph for this context .""" | if context is None :
return None
return models . NamedGraph . objects . get_or_create ( identifier = context . identifier ) [ 0 ] |
def _split_response ( self , data ) :
"""_ split _ response : binary data - > { ' id ' : str ,
' param ' : binary data ,
_ split _ response takes a data packet received from an XBee device
and converts it into a dictionary . This dictionary provides
names for each segment of binary data as specified in the
api _ responses spec .""" | # Fetch the first byte , identify the packet
# If the spec doesn ' t exist , raise exception
packet_id = data [ 0 : 1 ]
try :
packet = self . api_responses [ packet_id ]
except AttributeError :
raise NotImplementedError ( "API response specifications could not " "be found; use a derived class which " "defines 'api_responses'." )
except KeyError : # Check to see if this ID can be found among transmittable packets
for cmd_name , cmd in list ( self . api_commands . items ( ) ) :
if cmd [ 0 ] [ 'default' ] == data [ 0 : 1 ] :
raise CommandFrameException ( "Incoming frame with id {} " "looks like a command frame of " "type '{}' (these should not be" " received). Are you sure your " "devices are in " "API mode?" . format ( data [ 0 ] , cmd_name ) )
raise KeyError ( "Unrecognized response packet with id byte {0}" . format ( data [ 0 ] ) )
# Current byte index in the data stream
index = 1
# Result info
info = { 'id' : packet [ 'name' ] }
packet_spec = packet [ 'structure' ]
# Parse the packet in the order specified
for field in packet_spec :
if field [ 'len' ] == 'null_terminated' :
field_data = b''
while data [ index : index + 1 ] != b'\x00' :
field_data += data [ index : index + 1 ]
index += 1
index += 1
info [ field [ 'name' ] ] = field_data
elif field [ 'len' ] is not None : # Store the number of bytes specified
# Are we trying to read beyond the last data element ?
expected_len = index + field [ 'len' ]
if expected_len > len ( data ) :
raise ValueError ( "Response packet was shorter than " "expected; expected: {}, got: {} " "bytes" . format ( expected_len , len ( data ) ) )
field_data = data [ index : index + field [ 'len' ] ]
info [ field [ 'name' ] ] = field_data
index += field [ 'len' ]
# If the data field has no length specified , store any
# leftover bytes and quit
else :
field_data = data [ index : ]
# Were there any remaining bytes ?
if field_data : # If so , store them
info [ field [ 'name' ] ] = field_data
index += len ( field_data )
break
# If there are more bytes than expected , raise an exception
if index < len ( data ) :
raise ValueError ( "Response packet was longer than expected; " "expected: {}, got: {} bytes" . format ( index , len ( data ) ) )
# Apply parsing rules if any exist
if 'parsing' in packet :
for parse_rule in packet [ 'parsing' ] : # Only apply a rule if it is relevant ( raw data is available )
if parse_rule [ 0 ] in info : # Apply the parse function to the indicated field and
# replace the raw data with the result
info [ parse_rule [ 0 ] ] = parse_rule [ 1 ] ( self , info )
return info |
def create_signed_url ( self , url , keypair_id , expire_time = None , valid_after_time = None , ip_address = None , policy_url = None , private_key_file = None , private_key_string = None ) :
"""Creates a signed CloudFront URL that is only valid within the specified
parameters .
: type url : str
: param url : The URL of the protected object .
: type keypair _ id : str
: param keypair _ id : The keypair ID of the Amazon KeyPair used to sign
theURL . This ID MUST correspond to the private key
specified with private _ key _ file or private _ key _ string .
: type expire _ time : int
: param expire _ time : The expiry time of the URL . If provided , the URL
will expire after the time has passed . If not provided the URL will
never expire . Format is a unix epoch .
Use time . time ( ) + duration _ in _ sec .
: type valid _ after _ time : int
: param valid _ after _ time : If provided , the URL will not be valid until
after valid _ after _ time . Format is a unix epoch .
Use time . time ( ) + secs _ until _ valid .
: type ip _ address : str
: param ip _ address : If provided , only allows access from the specified
IP address . Use ' 192.168.0.10 ' for a single IP or
use ' 192.168.0.0/24 ' CIDR notation for a subnet .
: type policy _ url : str
: param policy _ url : If provided , allows the signature to contain
wildcard globs in the URL . For example , you could
provide : ' http : / / example . com / media / \ * ' and the policy
and signature would allow access to all contents of
the media subdirectory . If not specified , only
allow access to the exact url provided in ' url ' .
: type private _ key _ file : str or file object .
: param private _ key _ file : If provided , contains the filename of the
private key file used for signing or an open
file object containing the private key
contents . Only one of private _ key _ file or
private _ key _ string can be provided .
: type private _ key _ string : str
: param private _ key _ string : If provided , contains the private key string
used for signing . Only one of private _ key _ file or
private _ key _ string can be provided .
: rtype : str
: return : The signed URL .""" | # Get the required parameters
params = self . _create_signing_params ( url = url , keypair_id = keypair_id , expire_time = expire_time , valid_after_time = valid_after_time , ip_address = ip_address , policy_url = policy_url , private_key_file = private_key_file , private_key_string = private_key_string )
# combine these into a full url
if "?" in url :
sep = "&"
else :
sep = "?"
signed_url_params = [ ]
for key in [ "Expires" , "Policy" , "Signature" , "Key-Pair-Id" ] :
if key in params :
param = "%s=%s" % ( key , params [ key ] )
signed_url_params . append ( param )
signed_url = url + sep + "&" . join ( signed_url_params )
return signed_url |
def nearest_point ( query , root_id , get_properties , dist_fun = euclidean_dist ) :
"""Find the point in the tree that minimizes the distance to the query .
This method implements the nearest _ point query for any structure
implementing a kd - tree . The only requirement is a function capable to
extract the relevant properties from a node representation of the
particular implementation .
Args :
query ( : obj : ` tuple ` of float or int ) : Stores the position of the
node .
root _ id ( : obj ) : The identifier of the root in the kd - tree
implementation .
get _ properties ( : obj : ` function ` ) : The function to extract the
relevant properties from a node , namely its point , region ,
axis , left child identifier , right child identifier and
if it is active . If the implementation does not uses
the active attribute the function should return always True .
dist _ fun ( : obj : ` function ` , optional ) : The distance function ,
euclidean distance by default .
Returns :
: obj : ` tuple ` : Tuple of length 2 , where the first element is the
identifier of the nearest node , the second is the distance
to the query .""" | k = len ( query )
dist = math . inf
nearest_node_id = None
# stack _ node : stack of identifiers to nodes within a region that
# contains the query .
# stack _ look : stack of identifiers to nodes within a region that
# does not contains the query .
stack_node = deque ( [ root_id ] )
stack_look = deque ( )
while stack_node or stack_look :
if stack_node :
node_id = stack_node . pop ( )
look_node = False
else :
node_id = stack_look . pop ( )
look_node = True
point , region , axis , active , left , right = get_properties ( node_id )
# Should consider this node ?
# As it is within a region that does not contains the query , maybe
# there is no chance to find a closer node in this region
if look_node :
inside_region = True
for i in range ( k ) :
inside_region &= interval_condition ( query [ i ] , region [ i ] [ 0 ] , region [ i ] [ 1 ] , dist )
if not inside_region :
continue
# Update the distance only if the node is active .
if active :
node_distance = dist_fun ( query , point )
if nearest_node_id is None or dist > node_distance :
nearest_node_id = node_id
dist = node_distance
if query [ axis ] < point [ axis ] :
side_node = left
side_look = right
else :
side_node = right
side_look = left
if side_node is not None :
stack_node . append ( side_node )
if side_look is not None :
stack_look . append ( side_look )
return nearest_node_id , dist |
def get_TRM_star ( C , ptrms_vectors , start , end ) :
"""input : C , ptrms _ vectors , start , end
output : TRM _ star , x _ star ( for delta _ pal statistic )""" | TRM_star = numpy . zeros ( [ len ( ptrms_vectors ) , 3 ] )
TRM_star [ 0 ] = [ 0. , 0. , 0. ]
x_star = numpy . zeros ( len ( ptrms_vectors ) )
for num , vec in enumerate ( ptrms_vectors [ 1 : ] ) :
TRM_star [ num + 1 ] = vec + C [ num ]
# print ' vec ' , vec
# print ' C ' , C [ num ]
for num , trm in enumerate ( TRM_star ) :
x_star [ num ] = numpy . linalg . norm ( trm )
# print " x _ star ( should match corr _ TRM / NRM ) "
# print x _ star [ start : end + 1]
return TRM_star [ start : end + 1 ] , x_star [ start : end + 1 ] |
def normalize ( vectors ) :
"""Normalize a matrix of row vectors to unit length .
Contains a shortcut if there are no zero vectors in the matrix .
If there are zero vectors , we do some indexing tricks to avoid
dividing by 0.
Parameters
vectors : np . array
The vectors to normalize .
Returns
vectors : np . array
The input vectors , normalized to unit length .""" | if np . ndim ( vectors ) == 1 :
norm = np . linalg . norm ( vectors )
if norm == 0 :
return np . zeros_like ( vectors )
return vectors / norm
norm = np . linalg . norm ( vectors , axis = 1 )
if np . any ( norm == 0 ) :
nonzero = norm > 0
result = np . zeros_like ( vectors )
n = norm [ nonzero ]
p = vectors [ nonzero ]
result [ nonzero ] = p / n [ : , None ]
return result
else :
return vectors / norm [ : , None ] |
def get_token_details ( self , show_listing_details = False , show_inactive = False ) :
"""Function to fetch the available tokens available to trade on the Switcheo exchange .
Execution of this function is as follows : :
get _ token _ details ( )
get _ token _ details ( show _ listing _ details = True )
get _ token _ details ( show _ inactive = True )
get _ token _ details ( show _ listing _ details = True , show _ inactive = True )
The expected return result for this function is as follows : :
' NEO ' : {
' hash ' : ' c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b ' ,
' decimals ' : 8
' GAS ' : {
' hash ' : ' 602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7 ' ,
' decimals ' : 8
' SWTH ' : {
' hash ' : ' ab38352559b8b203bde5fddfa0b07d8b2525e132 ' ,
' decimals ' : 8
: param show _ listing _ details : Parameter flag to indicate whether or not to show the token listing details .
: type show _ listing _ details : bool
: param show _ inactive : Flag to return the tokens that are no longer traded on the Switcheo Exchange .
: type show _ inactive : bool
: return : Dictionary in the form of a JSON message with the available tokens for trade on the Switcheo exchange .""" | api_params = { "show_listing_details" : show_listing_details , "show_inactive" : show_inactive }
return self . request . get ( path = '/exchange/tokens' , params = api_params ) |
def get_port_bindings ( container_config , client_config ) :
"""Generates the input dictionary contents for the ` ` port _ bindings ` ` argument .
: param container _ config : Container configuration .
: type container _ config : dockermap . map . config . container . ContainerConfiguration
: param client _ config : Client configuration .
: type client _ config : dockermap . map . config . client . ClientConfiguration
: return : Dictionary of ports with mapped port , and if applicable , with bind address
: rtype : dict [ unicode | str , list [ unicode | str | int | tuple ] ]""" | port_bindings = { }
if_ipv4 = client_config . interfaces
if_ipv6 = client_config . interfaces_ipv6
for exposed_port , ex_port_bindings in itertools . groupby ( sorted ( container_config . exposes , key = _get_ex_port ) , _get_ex_port ) :
bind_list = list ( _get_port_bindings ( ex_port_bindings , if_ipv4 , if_ipv6 ) )
if bind_list :
port_bindings [ exposed_port ] = bind_list
return port_bindings |
def path ( self ) :
"""Absolute path to the directory on the camera ' s filesystem .""" | if self . parent is None :
return "/"
else :
return os . path . join ( self . parent . path , self . name ) |
def open_scene ( f , kwargs = None ) :
"""Opens the given JB _ File
: param f : the file to open
: type f : : class : ` jukeboxcore . filesys . JB _ File `
: param kwargs : keyword arguments for the command maya . cmds file .
defaultflags that are always used :
: open : ` ` True ` `
e . g . to force the open command use ` ` { ' force ' = True } ` ` .
: type kwargs : dict | None
: returns : An action status . The returnvalue of the actionstatus is the opened mayafile
: rtype : : class : ` ActionStatus `
: raises : None""" | defaultkwargs = { 'open' : True }
if kwargs is None :
kwargs = { }
kwargs . update ( defaultkwargs )
fp = f . get_fullpath ( )
mayafile = cmds . file ( fp , ** kwargs )
msg = "Successfully opened file %s with arguments: %s" % ( fp , kwargs )
return ActionStatus ( ActionStatus . SUCCESS , msg , returnvalue = mayafile ) |
def focal ( self ) :
"""Get the focal length in pixels for the camera .
Returns
focal : ( 2 , ) float
Focal length in pixels""" | if self . _focal is None : # calculate focal length from FOV
focal = [ ( px / 2.0 ) / np . tan ( np . radians ( fov / 2.0 ) ) for px , fov in zip ( self . _resolution , self . fov ) ]
# store as correct dtype
self . _focal = np . asanyarray ( focal , dtype = np . float64 )
return self . _focal |
def compile ( self , X , verbose = False ) :
"""method to validate and prepare data - dependent parameters
Parameters
X : array - like
Input dataset
verbose : bool
whether to show warnings
Returns
None""" | for term in self . _terms :
term . compile ( X , verbose = False )
if self . by is not None and self . by >= X . shape [ 1 ] :
raise ValueError ( 'by variable requires feature {}, ' 'but X has only {} dimensions' . format ( self . by , X . shape [ 1 ] ) )
return self |
def generate_listall_output ( lines , resources , aws_config , template , arguments , nodup = False ) :
"""Format and print the output of ListAll
: param lines :
: param resources :
: param aws _ config :
: param template :
: param arguments :
: param nodup :
: return :""" | for line in lines :
output = [ ]
for resource in resources :
current_path = resource . split ( '.' )
outline = line [ 1 ]
for key in line [ 2 ] :
outline = outline . replace ( '_KEY_(' + key + ')' , get_value_at ( aws_config [ 'services' ] , current_path , key , True ) )
output . append ( outline )
output = '\n' . join ( line for line in sorted ( set ( output ) ) )
template = template . replace ( line [ 0 ] , output )
for ( i , argument ) in enumerate ( arguments ) :
template = template . replace ( '_ARG_%d_' % i , argument )
return template |
def manual ( cls , node , tval , ns = None ) :
"""Set the node ' s xsi : type attribute based on either I { value } ' s or the
node text ' s class . Then adds the referenced prefix ( s ) to the node ' s
prefix mapping .
@ param node : XML node .
@ type node : L { sax . element . Element }
@ param tval : XSD schema type name .
@ type tval : str
@ param ns : I { tval } XML namespace .
@ type ns : ( prefix , URI )
@ return : Specified node .
@ rtype : L { sax . element . Element }""" | xta = ":" . join ( ( Namespace . xsins [ 0 ] , "type" ) )
node . addPrefix ( Namespace . xsins [ 0 ] , Namespace . xsins [ 1 ] )
if ns is None :
node . set ( xta , tval )
else :
ns = cls . genprefix ( node , ns )
qname = ":" . join ( ( ns [ 0 ] , tval ) )
node . set ( xta , qname )
node . addPrefix ( ns [ 0 ] , ns [ 1 ] )
return node |
def disable_paging ( self , command = "terminal length 999" , delay_factor = 1 ) :
"""Disable paging default to a Cisco CLI method .""" | delay_factor = self . select_delay_factor ( delay_factor )
time . sleep ( delay_factor * 0.1 )
self . clear_buffer ( )
command = self . normalize_cmd ( command )
log . debug ( "In disable_paging" )
log . debug ( "Command: {0}" . format ( command ) )
self . write_channel ( command )
output = self . read_until_prompt ( )
if self . ansi_escape_codes :
output = self . strip_ansi_escape_codes ( output )
log . debug ( "{0}" . format ( output ) )
log . debug ( "Exiting disable_paging" )
return output |
def _pipe_stdio ( cls , sock , stdin_isatty , stdout_isatty , stderr_isatty , handle_stdin ) :
"""Handles stdio redirection in the case of pipes and / or mixed pipes and ttys .""" | stdio_writers = ( ( ChunkType . STDOUT , stdout_isatty ) , ( ChunkType . STDERR , stderr_isatty ) )
types , ttys = zip ( * ( stdio_writers ) )
@ contextmanager
def maybe_handle_stdin ( want ) :
if want : # TODO : Launching this thread pre - fork to handle @ rule input currently results
# in an unhandled SIGILL in ` src / python / pants / engine / scheduler . py , line 313 in pre _ fork ` .
# More work to be done here in https : / / github . com / pantsbuild / pants / issues / 6005
with NailgunStreamStdinReader . open ( sock , stdin_isatty ) as fd :
yield fd
else :
with open ( '/dev/null' , 'rb' ) as fh :
yield fh . fileno ( )
with maybe_handle_stdin ( handle_stdin ) as stdin_fd , NailgunStreamWriter . open_multi ( sock , types , ttys ) as ( ( stdout_fd , stderr_fd ) , writer ) , stdio_as ( stdout_fd = stdout_fd , stderr_fd = stderr_fd , stdin_fd = stdin_fd ) : # N . B . This will be passed to and called by the ` DaemonExiter ` prior to sending an
# exit chunk , to avoid any socket shutdown vs write races .
stdout , stderr = sys . stdout , sys . stderr
def finalizer ( ) :
try :
stdout . flush ( )
stderr . flush ( )
finally :
time . sleep ( .001 )
# HACK : Sleep 1ms in the main thread to free the GIL .
writer . stop ( )
writer . join ( )
stdout . close ( )
stderr . close ( )
yield finalizer |
def cira_stretch ( img , ** kwargs ) :
"""Logarithmic stretch adapted to human vision .
Applicable only for visible channels .""" | LOG . debug ( "Applying the cira-stretch" )
def func ( band_data ) :
log_root = np . log10 ( 0.0223 )
denom = ( 1.0 - log_root ) * 0.75
band_data *= 0.01
band_data = band_data . clip ( np . finfo ( float ) . eps )
band_data = xu . log10 ( band_data )
band_data -= log_root
band_data /= denom
return band_data
return apply_enhancement ( img . data , func ) |
def get_logging_dir ( appname = 'default' ) :
"""The default log dir is in the system resource directory
But the utool global cache allows for the user to override
where the logs for a specific app should be stored .
Returns :
log _ dir _ realpath ( str ) : real path to logging directory""" | from utool . _internal import meta_util_cache
from utool . _internal import meta_util_cplat
from utool import util_cache
if appname is None or appname == 'default' :
appname = util_cache . get_default_appname ( )
resource_dpath = meta_util_cplat . get_resource_dir ( )
default = join ( resource_dpath , appname , 'logs' )
# Check global cache for a custom logging dir otherwise
# use the default .
log_dir = meta_util_cache . global_cache_read ( logdir_cacheid , appname = appname , default = default )
log_dir_realpath = realpath ( log_dir )
return log_dir_realpath |
def _filter_orientdb_simple_optional_edge ( query_metadata_table , optional_edge_location , inner_location_name ) :
"""Return an Expression that is False for rows that don ' t follow the @ optional specification .
OrientDB does not filter correctly within optionals . Namely , a result where the optional edge
DOES EXIST will be returned regardless of whether the inner filter is satisfed .
To mitigate this , we add a final filter to reject such results .
A valid result must satisfy either of the following :
- The location within the optional exists ( the filter will have been applied in this case )
- The optional edge field does not exist at the root location of the optional traverse
So , if the inner location within the optional was never visited , it must be the case that
the corresponding edge field does not exist at all .
Example :
A MATCH traversal which starts at location ` Animal _ _ _ 1 ` , and follows the optional edge
` out _ Animal _ ParentOf ` to the location ` Animal _ _ out _ Animal _ ParentOf _ _ _ 1 `
results in the following filtering Expression :
( Animal _ _ _ 1 . out _ Animal _ ParentOf IS null )
OR
( Animal _ _ _ 1 . out _ Animal _ ParentOf . size ( ) = 0)
OR
( Animal _ _ out _ Animal _ ParentOf _ _ _ 1 IS NOT null )
Here , the ` optional _ edge _ location ` is ` Animal _ _ _ 1 . out _ Animal _ ParentOf ` .
Args :
query _ metadata _ table : QueryMetadataTable object containing all metadata collected during
query processing , including location metadata ( e . g . which locations
are folded or optional ) .
optional _ edge _ location : Location object representing the optional edge field
inner _ location _ name : string representing location within the corresponding optional traverse
Returns :
Expression that evaluates to False for rows that do not follow the @ optional specification""" | inner_local_field = LocalField ( inner_location_name )
inner_location_existence = BinaryComposition ( u'!=' , inner_local_field , NullLiteral )
# The optional _ edge _ location here is actually referring to the edge field itself .
# This is definitely non - standard , but required to get the proper semantics .
# To get its type , we construct the location of the vertex field on the other side of the edge .
vertex_location = ( optional_edge_location . at_vertex ( ) . navigate_to_subpath ( optional_edge_location . field ) )
location_type = query_metadata_table . get_location_info ( vertex_location ) . type
edge_context_field = GlobalContextField ( optional_edge_location , location_type )
edge_field_non_existence = filter_edge_field_non_existence ( edge_context_field )
return BinaryComposition ( u'||' , edge_field_non_existence , inner_location_existence ) |
def logpdf ( self , mu ) :
"""Log PDF for Laplace prior
Parameters
mu : float
Latent variable for which the prior is being formed over
Returns
- log ( p ( mu ) )""" | if self . transform is not None :
mu = self . transform ( mu )
return ss . laplace . logpdf ( mu , loc = self . loc0 , scale = self . scale0 ) |
def import_from_json ( self , data ) :
"""Replace the current roster with the : meth : ` export _ as _ json ` - compatible
dictionary in ` data ` .
No events are fired during this activity . After this method completes ,
the whole roster contents are exchanged with the contents from ` data ` .
Also , no data is transferred to the server ; this method is intended to
be used for roster versioning . See below ( in the docs of
: class : ` Service ` ) .""" | self . version = data . get ( "ver" , None )
self . items . clear ( )
self . groups . clear ( )
for jid , data in data . get ( "items" , { } ) . items ( ) :
jid = structs . JID . fromstr ( jid )
item = Item ( jid )
item . update_from_json ( data )
self . items [ jid ] = item
for group in item . groups :
self . groups . setdefault ( group , set ( ) ) . add ( item ) |
def _set_get_flexports ( self , v , load = False ) :
"""Setter method for get _ flexports , mapped from YANG variable / brocade _ hardware _ rpc / get _ flexports ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ get _ flexports is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ get _ flexports ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = get_flexports . get_flexports , is_leaf = True , yang_name = "get-flexports" , rest_name = "get-flexports" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'connector_group_show' } } , namespace = 'urn:brocade.com:mgmt:brocade-hardware' , defining_module = 'brocade-hardware' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """get_flexports must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=get_flexports.get_flexports, is_leaf=True, yang_name="get-flexports", rest_name="get-flexports", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'connector_group_show'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='rpc', is_config=True)""" , } )
self . __get_flexports = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def run ( cmd , env = None , return_object = False , block = True , cwd = None , verbose = False , nospin = False , spinner_name = None , combine_stderr = True , display_limit = 200 , write_to_stdout = True , ) :
"""Use ` subprocess . Popen ` to get the output of a command and decode it .
: param list cmd : A list representing the command you want to run .
: param dict env : Additional environment settings to pass through to the subprocess .
: param bool return _ object : When True , returns the whole subprocess instance
: param bool block : When False , returns a potentially still - running : class : ` subprocess . Popen ` instance
: param str cwd : Current working directory contect to use for spawning the subprocess .
: param bool verbose : Whether to print stdout in real time when non - blocking .
: param bool nospin : Whether to disable the cli spinner .
: param str spinner _ name : The name of the spinner to use if enabled , defaults to bouncingBar
: param bool combine _ stderr : Optionally merge stdout and stderr in the subprocess , false if nonblocking .
: param int dispay _ limit : The max width of output lines to display when using a spinner .
: param bool write _ to _ stdout : Whether to write to stdout when using a spinner , default True .
: returns : A 2 - tuple of ( output , error ) or a : class : ` subprocess . Popen ` object .
. . Warning : : Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal . Consider disabling
this functionality .""" | _env = os . environ . copy ( )
if env :
_env . update ( env )
if six . PY2 :
fs_encode = partial ( to_bytes , encoding = locale_encoding )
_env = { fs_encode ( k ) : fs_encode ( v ) for k , v in _env . items ( ) }
else :
_env = { k : fs_str ( v ) for k , v in _env . items ( ) }
if not spinner_name :
spinner_name = "bouncingBar"
if six . PY2 :
if isinstance ( cmd , six . string_types ) :
cmd = cmd . encode ( "utf-8" )
elif isinstance ( cmd , ( list , tuple ) ) :
cmd = [ c . encode ( "utf-8" ) for c in cmd ]
if not isinstance ( cmd , Script ) :
cmd = Script . parse ( cmd )
if block or not return_object :
combine_stderr = False
start_text = ""
with spinner ( spinner_name = spinner_name , start_text = start_text , nospin = nospin , write_to_stdout = write_to_stdout , ) as sp :
return _create_subprocess ( cmd , env = _env , return_object = return_object , block = block , cwd = cwd , verbose = verbose , spinner = sp , combine_stderr = combine_stderr , start_text = start_text , write_to_stdout = True , ) |
def ParseOptions ( self , options ) :
"""Parses tool specific options .
Args :
options ( argparse . Namespace ) : command line arguments .
Raises :
BadConfigOption : if the options are invalid .""" | # The extraction options are dependent on the data location .
helpers_manager . ArgumentHelperManager . ParseOptions ( options , self , names = [ 'data_location' ] )
self . _ReadParserPresetsFromFile ( )
# The output modules options are dependent on the preferred language
# and preferred time zone options .
self . _ParseTimezoneOption ( options )
argument_helper_names = [ 'artifact_definitions' , 'hashers' , 'language' , 'parsers' ]
helpers_manager . ArgumentHelperManager . ParseOptions ( options , self , names = argument_helper_names )
self . list_hashers = self . _hasher_names_string == 'list'
self . list_language_identifiers = self . _preferred_language == 'list'
self . list_parsers_and_plugins = self . _parser_filter_expression == 'list'
# Check the list options first otherwise required options will raise .
if ( self . list_hashers or self . list_language_identifiers or self . list_parsers_and_plugins or self . list_timezones ) :
return
# Check output modules after the other listable options , otherwise
# it could raise with " requires an output file " .
helpers_manager . ArgumentHelperManager . ParseOptions ( options , self , names = [ 'output_modules' ] )
self . list_output_modules = self . _output_format == 'list'
if self . list_output_modules :
return
self . _ParseInformationalOptions ( options )
argument_helper_names = [ 'extraction' , 'status_view' ]
helpers_manager . ArgumentHelperManager . ParseOptions ( options , self , names = argument_helper_names )
self . _ParseLogFileOptions ( options )
self . _ParseStorageMediaOptions ( options )
self . _ParsePerformanceOptions ( options )
self . _ParseProcessingOptions ( options )
self . _storage_file_path = getattr ( options , 'storage_file' , None )
if not self . _storage_file_path :
self . _storage_file_path = self . _GenerateStorageFileName ( )
self . _output_filename = getattr ( options , 'write' , None )
if not self . _output_filename :
raise errors . BadConfigOption ( ( 'Output format: {0:s} requires an output file ' '(-w OUTPUT_FILE)' ) . format ( self . _output_format ) )
if os . path . exists ( self . _output_filename ) :
raise errors . BadConfigOption ( 'Output file already exists: {0:s}.' . format ( self . _output_filename ) )
self . _EnforceProcessMemoryLimit ( self . _process_memory_limit )
self . _output_module = self . _CreateOutputModule ( options ) |
def _get_text ( self , text ) :
"""Returns the text content of ` text ` , with all multi - character tokens
replaced with a single character . Substitutions are recorded
in self . _ substitutes .
: param text : text to get content from
: type text : ` Text `
: rtype : ` str `""" | tokens = text . get_tokens ( )
for i , token in enumerate ( tokens ) :
if len ( token ) > 1 :
char = chr ( self . _char_code )
substitute = self . _substitutes . setdefault ( token , char )
if substitute == char :
self . _char_code += 1
tokens [ i ] = substitute
return self . _tokenizer . joiner . join ( tokens ) |
def authenticate ( self , request ) :
"""Authenticate a client against all the backends configured in
: attr : ` authentication ` .""" | for backend in self . authentication :
client = backend ( ) . authenticate ( request )
if client is not None :
return client
return None |
def wait_for_stable_cluster ( hosts , jolokia_port , jolokia_prefix , check_interval , check_count , unhealthy_time_limit , ) :
"""Block the caller until the cluster can be considered stable .
: param hosts : list of brokers ip addresses
: type hosts : list of strings
: param jolokia _ port : HTTP port for Jolokia
: type jolokia _ port : integer
: param jolokia _ prefix : HTTP prefix on the server for the Jolokia queries
: type jolokia _ prefix : string
: param check _ interval : the number of seconds it will wait between each check
: type check _ interval : integer
: param check _ count : the number of times the check should be positive before
restarting the next broker
: type check _ count : integer
: param unhealthy _ time _ limit : the maximum number of seconds it will wait for
the cluster to become stable before exiting with error
: type unhealthy _ time _ limit : integer""" | stable_counter = 0
max_checks = int ( math . ceil ( unhealthy_time_limit / check_interval ) )
for i in itertools . count ( ) :
partitions , brokers = read_cluster_status ( hosts , jolokia_port , jolokia_prefix , )
if partitions or brokers :
stable_counter = 0
else :
stable_counter += 1
print ( "Under replicated partitions: {p_count}, missing brokers: {b_count} ({stable}/{limit})" . format ( p_count = partitions , b_count = brokers , stable = stable_counter , limit = check_count , ) )
if stable_counter >= check_count :
print ( "The cluster is stable" )
return
if i >= max_checks :
raise WaitTimeoutException ( )
time . sleep ( check_interval ) |
def reduce ( self , values , inplace = True ) :
"""Reduces the distribution to the context of the given variable values .
Let C ( X , Y ; K , h , g ) be some canonical form over X , Y where ,
k = [ [ K _ XX , K _ XY ] , ; h = [ [ h _ X ] ,
[ K _ YX , K _ YY ] ] [ h _ Y ] ]
The formula for the obtained conditional distribution for setting
Y = y is given by ,
. . math : : K ' = K _ { XX }
. . math : : h ' = h _ X - K _ { XY } * y
. . math : : g ' = g + { h ^ T } _ Y * y - 0.5 * y ^ T * K _ { YY } * y
Parameters
values : list , array - like
A list of tuples of the form ( variable name , variable value ) .
inplace : boolean
If inplace = True it will modify the factor itself , else would return
a new CaninicalFactor object .
Returns
CanonicalDistribution or None :
if inplace = True ( default ) returns None
if inplace = False returns a new CanonicalDistribution instance .
Examples
> > > import numpy as np
> > > from pgmpy . factors . continuous import CanonicalDistribution
> > > phi = CanonicalDistribution ( [ ' X1 ' , ' X2 ' , ' X3 ' ] ,
. . . np . array ( [ [ 1 , - 1 , 0 ] , [ - 1 , 4 , - 2 ] , [ 0 , - 2 , 4 ] ] ) ,
. . . np . array ( [ [ 1 ] , [ 4 ] , [ - 1 ] ] ) , - 2)
> > > phi . variables
[ ' X1 ' , ' X2 ' , ' X3 ' ]
> > > phi . K
array ( [ [ 1 . , - 1 . ] ,
[ - 1 . , 3 . ] ] )
> > > phi . h
array ( [ [ 1 . ] ,
[ 3.5 ] ] )
> > > phi . g
> > > phi . reduce ( [ ( ' X3 ' , 0.25 ) ] )
> > > phi . variables
[ ' X1 ' , ' X2 ' ]
> > > phi . K
array ( [ [ 1 , - 1 ] ,
[ - 1 , 4 ] ] )
> > > phi . h
array ( [ [ 1 . ] ,
[ 4.5 ] ] )
> > > phi . g
-2.375""" | if not isinstance ( values , ( list , tuple , np . ndarray ) ) :
raise TypeError ( "variables: Expected type list or array-like, " "got type {var_type}" . format ( var_type = type ( values ) ) )
if not all ( [ var in self . variables for var , value in values ] ) :
raise ValueError ( "Variable not in scope." )
phi = self if inplace else self . copy ( )
var_to_reduce = [ var for var , value in values ]
# index _ to _ keep - > j vector
index_to_keep = [ self . variables . index ( var ) for var in self . variables if var not in var_to_reduce ]
# index _ to _ reduce - > i vector
index_to_reduce = [ self . variables . index ( var ) for var in var_to_reduce ]
K_i_i = self . K [ np . ix_ ( index_to_keep , index_to_keep ) ]
K_i_j = self . K [ np . ix_ ( index_to_keep , index_to_reduce ) ]
K_j_j = self . K [ np . ix_ ( index_to_reduce , index_to_reduce ) ]
h_i = self . h [ index_to_keep ]
h_j = self . h [ index_to_reduce ]
# The values for the reduced variables .
y = np . array ( [ value for var , value in values ] ) . reshape ( len ( index_to_reduce ) , 1 )
phi . variables = [ self . variables [ index ] for index in index_to_keep ]
phi . K = K_i_i
phi . h = h_i - np . dot ( K_i_j , y )
phi . g = self . g + ( np . dot ( h_j . T , y ) - ( 0.5 * np . dot ( np . dot ( y . T , K_j_j ) , y ) ) ) [ 0 ] [ 0 ]
if not inplace :
return phi |
def _generate_phrases ( self , sentences ) :
"""Method to generate contender phrases given the sentences of the text
document .
: param sentences : List of strings where each string represents a
sentence which forms the text .
: return : Set of string tuples where each tuple is a collection
of words forming a contender phrase .""" | phrase_list = set ( )
# Create contender phrases from sentences .
for sentence in sentences :
word_list = [ word . lower ( ) for word in wordpunct_tokenize ( sentence ) ]
phrase_list . update ( self . _get_phrase_list_from_words ( word_list ) )
return phrase_list |
def sim_givenAdj ( self , Adj : np . array , model = 'line' ) :
"""Simulate data given only an adjacancy matrix and a model .
The model is a bivariate funtional dependence . The adjacancy matrix
needs to be acyclic .
Parameters
Adj
adjacancy matrix of shape ( dim , dim ) .
Returns
Data array of shape ( n _ samples , dim ) .""" | # nice examples
examples = [ { 'func' : 'sawtooth' , 'gdist' : 'uniform' , 'sigma_glob' : 1.8 , 'sigma_noise' : 0.1 } ]
# nr of samples
n_samples = 100
# noise
sigma_glob = 1.8
sigma_noise = 0.4
# coupling function / model
func = self . funcs [ model ]
# glob distribution
sourcedist = 'uniform'
# loop over source nodes
dim = Adj . shape [ 0 ]
X = np . zeros ( ( n_samples , dim ) )
# source nodes have no parents themselves
nrpar = 0
children = list ( range ( dim ) )
parents = [ ]
for gp in range ( dim ) :
if Adj [ gp , : ] . sum ( ) == nrpar :
if sourcedist == 'gaussian' :
X [ : , gp ] = np . random . normal ( 0 , sigma_glob , n_samples )
if sourcedist == 'uniform' :
X [ : , gp ] = np . random . uniform ( - sigma_glob , sigma_glob , n_samples )
parents . append ( gp )
children . remove ( gp )
# all of the following guarantees for 3 dim , that we generate the data
# in the correct sequence
# then compute all nodes that have 1 parent , then those with 2 parents
children_sorted = [ ]
nrchildren_par = np . zeros ( dim )
nrchildren_par [ 0 ] = len ( parents )
for nrpar in range ( 1 , dim ) : # loop over child nodes
for gp in children :
if Adj [ gp , : ] . sum ( ) == nrpar :
children_sorted . append ( gp )
nrchildren_par [ nrpar ] += 1
# if there is more than a child with a single parent
# order these children ( there are two in three dim )
# by distance to the source / parent
if nrchildren_par [ 1 ] > 1 :
if Adj [ children_sorted [ 0 ] , parents [ 0 ] ] == 0 :
help = children_sorted [ 0 ]
children_sorted [ 0 ] = children_sorted [ 1 ]
children_sorted [ 1 ] = help
for gp in children_sorted :
for g in range ( dim ) :
if Adj [ gp , g ] > 0 :
X [ : , gp ] += 1. / Adj [ gp , : ] . sum ( ) * func ( X [ : , g ] )
X [ : , gp ] += np . random . normal ( 0 , sigma_noise , n_samples )
# fig = pl . figure ( )
# fig . add _ subplot ( 311)
# pl . plot ( X [ : , 0 ] , X [ : , 1 ] , ' . ' , mec = ' white ' )
# fig . add _ subplot ( 312)
# pl . plot ( X [ : , 1 ] , X [ : , 2 ] , ' . ' , mec = ' white ' )
# fig . add _ subplot ( 313)
# pl . plot ( X [ : , 2 ] , X [ : , 0 ] , ' . ' , mec = ' white ' )
# pl . show ( )
return X |
def onlasso ( self , verts ) :
"""Main function to control the action of the lasso , allows user to draw on data image and adjust thematic map
: param verts : the vertices selected by the lasso
: return : nothin , but update the selection array so lassoed region now has the selected theme , redraws canvas""" | p = path . Path ( verts )
ind = p . contains_points ( self . pix , radius = 1 )
self . history . append ( self . selection_array . copy ( ) )
self . selection_array = self . updateArray ( self . selection_array , ind , self . solar_class_var . get ( ) )
self . mask . set_data ( self . selection_array )
self . fig . canvas . draw_idle ( ) |
def from_html_one ( html_code , ** kwargs ) :
"""Generates a PrettyTables from a string of HTML code which contains only a
single < table >""" | tables = from_html ( html_code , ** kwargs )
try :
assert len ( tables ) == 1
except AssertionError :
raise Exception ( "More than one <table> in provided HTML code! Use from_html instead." )
return tables [ 0 ] |
def list ( self , status = values . unset , iccid = values . unset , rate_plan = values . unset , e_id = values . unset , sim_registration_code = values . unset , limit = None , page_size = None ) :
"""Lists SimInstance records from the API as a list .
Unlike stream ( ) , this operation is eager and will load ` limit ` records into
memory before returning .
: param unicode status : The status
: param unicode iccid : The iccid
: param unicode rate _ plan : The rate _ plan
: param unicode e _ id : The e _ id
: param unicode sim _ registration _ code : The sim _ registration _ code
: param int limit : Upper limit for the number of records to return . list ( ) guarantees
never to return more than limit . Default is no limit
: param int page _ size : Number of records to fetch per request , when not set will use
the default value of 50 records . If no page _ size is defined
but a limit is defined , list ( ) will attempt to read the limit
with the most efficient page size , i . e . min ( limit , 1000)
: returns : Generator that will yield up to limit results
: rtype : list [ twilio . rest . preview . wireless . sim . SimInstance ]""" | return list ( self . stream ( status = status , iccid = iccid , rate_plan = rate_plan , e_id = e_id , sim_registration_code = sim_registration_code , limit = limit , page_size = page_size , ) ) |
def zip ( self , other ) :
"""zips two sequences unifying the corresponding points .""" | return self . __class__ ( p1 % p2 for p1 , p2 in zip ( self , other ) ) |
def check_dir ( directory , newly_created_files ) :
"""Returns list of files that fail the check .""" | header_parse_failures = [ ]
for root , dirs , files in os . walk ( directory ) :
for f in files :
if f . endswith ( '.py' ) and os . path . basename ( f ) != '__init__.py' :
filename = os . path . join ( root , f )
try :
check_header ( filename , filename in newly_created_files )
except HeaderCheckFailure as e :
header_parse_failures . append ( e . message )
return header_parse_failures |
def get ( path , objectType , user = None ) :
'''Get the ACL of an object . Will filter by user if one is provided .
Args :
path : The path to the object
objectType : The type of object ( FILE , DIRECTORY , REGISTRY )
user : A user name to filter by
Returns ( dict ) : A dictionary containing the ACL
CLI Example :
. . code - block : : bash
salt ' minion - id ' win _ dacl . get c : \t emp directory''' | ret = { 'Path' : path , 'ACLs' : [ ] }
sidRet = _getUserSid ( user )
if path and objectType :
dc = daclConstants ( )
objectTypeBit = dc . getObjectTypeBit ( objectType )
path = dc . processPath ( path , objectTypeBit )
tdacl = _get_dacl ( path , objectTypeBit )
if tdacl :
for counter in range ( 0 , tdacl . GetAceCount ( ) ) :
tAce = tdacl . GetAce ( counter )
if not sidRet [ 'sid' ] or ( tAce [ 2 ] == sidRet [ 'sid' ] ) :
ret [ 'ACLs' ] . append ( _ace_to_text ( tAce , objectTypeBit ) )
return ret |
def EnumType ( enum_class , nested_type = _Undefined , ** kwargs ) :
"""Create and return a : class : ` EnumCDataType ` or : class : ` EnumElementType ` ,
depending on the type of ` nested _ type ` .
If ` nested _ type ` is a : class : ` AbstractCDataType ` or omitted , a
: class : ` EnumCDataType ` is constructed . Otherwise , : class : ` EnumElementType `
is used .
The arguments are forwarded to the respective class ’ constructor .
. . versionadded : : 0.10
. . deprecated : : 0.10
This function was introduced to ease the transition in 0.10 from
a unified : class : ` EnumType ` to split : class : ` EnumCDataType ` and
: class : ` EnumElementType ` .
It will be removed in 1.0.""" | if nested_type is _Undefined :
return EnumCDataType ( enum_class , ** kwargs )
if isinstance ( nested_type , AbstractCDataType ) :
return EnumCDataType ( enum_class , nested_type , ** kwargs )
else :
return EnumElementType ( enum_class , nested_type , ** kwargs ) |
def luminance ( mycolour ) :
r"""Determine ( relative ) luminance of a colour .
Args :
mycolour ( colourettu . Colour ) : a colour
Luminance is a measure of how ' bright ' a colour is . Values are
normalized so that the Luminance of White is 1 and the Luminance of
Black is 0 . That is to say :
. . code : : pycon
> > > colourettu . luminance ( " # FFF " ) # white
0.99999
> > > colourettu . luminance ( " # 000 " ) # black
0.0
` ` luminance ( ) ` ` can also be called on an already existing colour :
. . code : : pycon
> > > c3 . luminance ( )
0.2641668488934239
> > > colourettu . luminance ( c4)
0.08007571268096524
. . note : :
Uses the formula :
\ \ [ lum = \ \ sqrt { 0.299 r ^ 2 + 0.587 g ^ 2 + 0.114 b ^ 2 } \ \ ]""" | colour_for_type = Colour ( )
if type ( mycolour ) is type ( colour_for_type ) :
mycolour2 = mycolour
else :
try :
mycolour2 = Colour ( mycolour )
except :
raise TypeError ( "Must supply a colourettu.Colour" )
( r1 , g1 , b1 ) = mycolour2 . normalized_rgb ( )
return math . sqrt ( 0.299 * math . pow ( r1 , 2 ) + 0.587 * math . pow ( g1 , 2 ) + 0.114 * math . pow ( b1 , 2 ) ) |
def verify_password ( self , user , password ) :
"""Returns ` ` True ` ` if the password is valid for the specified user .
Additionally , the hashed password in the database is updated if the
hashing algorithm happens to have changed .
: param user : The user to verify against
: param password : The plaintext password to verify""" | if self . use_double_hash ( user . password ) :
verified = self . security . pwd_context . verify ( self . get_hmac ( password ) , user . password )
else : # Try with original password .
verified = self . security . pwd_context . verify ( password , user . password )
if verified and self . security . pwd_context . needs_update ( user . password ) :
user . password = password
self . user_manager . save ( user )
return verified |
def _GetUrl ( self , url_id , cache , database ) :
"""Retrieves an URL from a reference to an entry in the from _ visit table .
Args :
url _ id ( str ) : identifier of the visited URL .
cache ( SQLiteCache ) : cache .
database ( SQLiteDatabase ) : database .
Returns :
str : URL and hostname .""" | url_cache_results = cache . GetResults ( 'url' )
if not url_cache_results :
result_set = database . Query ( self . URL_CACHE_QUERY )
cache . CacheQueryResults ( result_set , 'url' , 'id' , ( 'url' , 'rev_host' ) )
url_cache_results = cache . GetResults ( 'url' )
url , reverse_host = url_cache_results . get ( url_id , [ '' , '' ] )
if not url :
return ''
hostname = self . _ReverseHostname ( reverse_host )
return '{0:s} ({1:s})' . format ( url , hostname ) |
def lab_to_xyz ( l , a = None , b = None , wref = _DEFAULT_WREF ) :
"""Convert the color from CIE L * a * b * to CIE 1931 XYZ .
Parameters :
The L component [ 0 . . . 100]
The a component [ - 1 . . . 1]
The a component [ - 1 . . . 1]
: wref :
The whitepoint reference , default is 2 ° D65.
Returns :
The color as an ( x , y , z ) tuple in the range :
x [ 0 . . . q ] ,
y [ 0 . . . 1 ] ,
z [ 0 . . . 1]
> > > ' ( % g , % g , % g ) ' % lab _ to _ xyz ( 66.9518 , 0.43084 , 0.739692)
' ( 0.48894 , 0.365682 , 0.0448137 ) '
> > > ' ( % g , % g , % g ) ' % lab _ to _ xyz ( 66.9518 , 0.411663 , 0.67282 , WHITE _ REFERENCE [ ' std _ D50 ' ] )
' ( 0.488942 , 0.365682 , 0.0448137 ) '""" | if type ( l ) in [ list , tuple ] :
l , a , b = l
y = ( l + 16 ) / 116
x = ( a / 5.0 ) + y
z = y - ( b / 2.0 )
return tuple ( ( ( ( v > 0.206893 ) and [ v ** 3 ] or [ ( v - _sixteenHundredsixteenth ) / 7.787 ] ) [ 0 ] * w for v , w in zip ( ( x , y , z ) , wref ) ) ) |
def add_email_addresses ( self , addresses = [ ] ) :
"""Add the email addresses in ` ` addresses ` ` to the authenticated
user ' s account .
: param list addresses : ( optional ) , email addresses to be added
: returns : list of email addresses""" | json = [ ]
if addresses :
url = self . _build_url ( 'user' , 'emails' )
json = self . _json ( self . _post ( url , data = addresses ) , 201 )
return json |
def wage ( return_X_y = True ) :
"""wage dataset
Parameters
return _ X _ y : bool ,
if True , returns a model - ready tuple of data ( X , y )
otherwise , returns a Pandas DataFrame
Returns
model - ready tuple of data ( X , y )
OR
Pandas DataFrame
Notes
X contains the year , age and education of each sampled person .
The education category has been transformed to integers .
y contains the wage .
Source :
https : / / github . com / JWarmenhoven / ISLR - python / blob / master / Notebooks / Data / Wage . csv""" | # y is real
# recommend LinearGAM
wage = pd . read_csv ( PATH + '/wage.csv' , index_col = 0 )
if return_X_y :
X = wage [ [ 'year' , 'age' , 'education' ] ] . values
X [ : , - 1 ] = np . unique ( X [ : , - 1 ] , return_inverse = True ) [ 1 ]
y = wage [ 'wage' ] . values
return _clean_X_y ( X , y )
return wage |
def rfcformat ( dt , localtime = False ) :
"""Return the RFC822 - formatted representation of a datetime object .
: param datetime dt : The datetime .
: param bool localtime : If ` ` True ` ` , return the date relative to the local
timezone instead of UTC , displaying the proper offset ,
e . g . " Sun , 10 Nov 2013 08:23:45 - 0600" """ | if not localtime :
return formatdate ( timegm ( dt . utctimetuple ( ) ) )
else :
return local_rfcformat ( dt ) |
def predict ( self , name , payload , params = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Perform an online prediction . The prediction result will be directly
returned in the response . Available for following ML problems , and their
expected request payloads :
- Image Classification - Image in . JPEG , . GIF or . PNG format ,
image \ _ bytes up to 30MB .
- Image Object Detection - Image in . JPEG , . GIF or . PNG format ,
image \ _ bytes up to 30MB .
- Text Classification - TextSnippet , content up to 10,000 characters ,
UTF - 8 encoded .
- Text Extraction - TextSnippet , content up to 30,000 characters , UTF - 8
NFC encoded . \ * Translation - TextSnippet , content up to 25,000
characters , UTF - 8 encoded .
- Tables - Row , with column values matching the columns of the model ,
up to 5MB .
- Text Sentiment - TextSnippet , content up 500 characters , UTF - 8
encoded .
Example :
> > > from google . cloud import automl _ v1beta1
> > > client = automl _ v1beta1 . PredictionServiceClient ( )
> > > name = client . model _ path ( ' [ PROJECT ] ' , ' [ LOCATION ] ' , ' [ MODEL ] ' )
> > > # TODO : Initialize ` payload ` :
> > > payload = { }
> > > response = client . predict ( name , payload )
Args :
name ( str ) : Name of the model requested to serve the prediction .
payload ( Union [ dict , ~ google . cloud . automl _ v1beta1 . types . ExamplePayload ] ) : Required .
Payload to perform a prediction on . The payload must match the
problem type that the model was trained to solve .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . automl _ v1beta1 . types . ExamplePayload `
params ( dict [ str - > str ] ) : Additional domain - specific parameters , any string must be up to 25000
characters long .
- For Image Classification :
` ` score _ threshold ` ` - ( float ) A value from 0.0 to 1.0 . When the model
makes predictions for an image , it will only produce results that
have at least this confidence score . The default is 0.5.
- For Image Object Detection : ` ` score _ threshold ` ` - ( float ) When Model
detects objects on the image , it will only produce bounding boxes
which have at least this confidence score . Value in 0 to 1 range ,
default is 0.5 . ` ` max _ bounding _ box _ count ` ` - ( int64 ) No more than
this number of bounding boxes will be returned in the response .
Default is 100 , the requested value may be limited by server .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . automl _ v1beta1 . types . PredictResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid .""" | # Wrap the transport method to add retry and timeout logic .
if "predict" not in self . _inner_api_calls :
self . _inner_api_calls [ "predict" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . predict , default_retry = self . _method_configs [ "Predict" ] . retry , default_timeout = self . _method_configs [ "Predict" ] . timeout , client_info = self . _client_info , )
request = prediction_service_pb2 . PredictRequest ( name = name , payload = payload , params = params )
if metadata is None :
metadata = [ ]
metadata = list ( metadata )
try :
routing_header = [ ( "name" , name ) ]
except AttributeError :
pass
else :
routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( routing_header )
metadata . append ( routing_metadata )
return self . _inner_api_calls [ "predict" ] ( request , retry = retry , timeout = timeout , metadata = metadata ) |
def _get_file_creation_time ( file_path ) :
"""Returns the creation time of the file at the specified file path in Microsoft FILETIME
structure format ( https : / / msdn . microsoft . com / en - us / library / windows / desktop / ms724284 . aspx ) ,
formatted as a 8 - byte unsigned integer bytearray .""" | ctime = getctime ( file_path )
if ctime < - 11644473600 or ctime >= 253402300800 :
raise FileTimeOutOfRangeException ( ctime )
creation_time_datetime = datetime . utcfromtimestamp ( ctime )
creation_time_epoch_offset = creation_time_datetime - datetime ( 1601 , 1 , 1 )
creation_time_secs_from_epoch = _convert_timedelta_to_seconds ( creation_time_epoch_offset )
creation_time_filetime = int ( creation_time_secs_from_epoch * ( 10 ** 7 ) )
file_creation_time = bytearray ( 8 )
pack_into ( b"Q" , file_creation_time , 0 , creation_time_filetime )
return file_creation_time |
def _comparator_approximate ( filter_value , tested_value ) :
"""Tests if the filter value is nearly equal to the tested value .
If the tested value is a string or an array of string , it compares their
lower case forms""" | lower_filter_value = filter_value . lower ( )
if is_string ( tested_value ) : # Lower case comparison
return _comparator_eq ( lower_filter_value , tested_value . lower ( ) )
elif hasattr ( tested_value , "__iter__" ) : # Extract a list of strings
new_tested = [ value . lower ( ) for value in tested_value if is_string ( value ) ]
if _comparator_eq ( lower_filter_value , new_tested ) : # Value found in the strings
return True
# Compare the raw values
return _comparator_eq ( filter_value , tested_value ) or _comparator_eq ( lower_filter_value , tested_value ) |
def _fix_up_fields ( cls ) :
"""Add names to all of the Endpoint ' s Arguments .
This method will get called on class declaration because of
Endpoint ' s metaclass . The functionality is based on Google ' s NDB
implementation .""" | cls . _arguments = dict ( )
if cls . __module__ == __name__ : # skip the classes in this file
return
for name in set ( dir ( cls ) ) :
attr = getattr ( cls , name , None )
if isinstance ( attr , BaseArgument ) :
if name . startswith ( '_' ) :
raise TypeError ( "Endpoint argument %s cannot begin with " "an underscore, as these attributes are reserved " "for instance variables of the endpoint object, " "rather than for arguments to your HTTP Endpoint." % name )
attr . _fix_up ( cls , name )
cls . _arguments [ attr . name ] = attr |
def add_parameters ( self , template_file , in_file = None , pst_path = None ) :
"""add new parameters to a control file
Parameters
template _ file : str
template file
in _ file : str ( optional )
model input file . If None , template _ file . replace ( ' . tpl ' , ' ' ) is used
pst _ path : str ( optional )
the path to append to the template _ file and in _ file in the control file . If
not None , then any existing path in front of the template or in file is split off
and pst _ path is prepended . Default is None
Returns
new _ par _ data : pandas . DataFrame
the data for the new parameters that were added . If no new parameters are in the
new template file , returns None
Note
populates the new parameter information with default values""" | assert os . path . exists ( template_file ) , "template file '{0}' not found" . format ( template_file )
assert template_file != in_file
# get the parameter names in the template file
parnme = pst_utils . parse_tpl_file ( template_file )
# find " new " parameters that are not already in the control file
new_parnme = [ p for p in parnme if p not in self . parameter_data . parnme ]
if len ( new_parnme ) == 0 :
warnings . warn ( "no new parameters found in template file {0}" . format ( template_file ) , PyemuWarning )
new_par_data = None
else : # extend pa
# rameter _ data
new_par_data = pst_utils . populate_dataframe ( new_parnme , pst_utils . pst_config [ "par_fieldnames" ] , pst_utils . pst_config [ "par_defaults" ] , pst_utils . pst_config [ "par_dtype" ] )
new_par_data . loc [ new_parnme , "parnme" ] = new_parnme
self . parameter_data = self . parameter_data . append ( new_par_data )
if in_file is None :
in_file = template_file . replace ( ".tpl" , '' )
if pst_path is not None :
template_file = os . path . join ( pst_path , os . path . split ( template_file ) [ - 1 ] )
in_file = os . path . join ( pst_path , os . path . split ( in_file ) [ - 1 ] )
self . template_files . append ( template_file )
self . input_files . append ( in_file )
return new_par_data |
def StartsWith ( self , value ) :
"""Sets the type of the WHERE clause as " starts with " .
Args :
value : The value to be used in the WHERE condition .
Returns :
The query builder that this WHERE builder links to .""" | self . _awql = self . _CreateSingleValueCondition ( value , 'STARTS_WITH' )
return self . _query_builder |
def _eap_check_config ( eap_config : Dict [ str , Any ] ) -> Dict [ str , Any ] :
"""Check the eap specific args , and replace values where needed .
Similar to _ check _ configure _ args but for only EAP .""" | eap_type = eap_config . get ( 'eapType' )
for method in EAP_CONFIG_SHAPE [ 'options' ] :
if method [ 'name' ] == eap_type :
options = method [ 'options' ]
break
else :
raise ConfigureArgsError ( 'EAP method {} is not valid' . format ( eap_type ) )
_eap_check_no_extra_args ( eap_config , options )
for opt in options : # type : ignore
# Ignoring most types to do with EAP _ CONFIG _ SHAPE because of issues
# wth type inference for dict comprehensions
_eap_check_option_ok ( opt , eap_config )
if opt [ 'type' ] == 'file' and opt [ 'name' ] in eap_config : # Special work for file : rewrite from key id to path
eap_config [ opt [ 'name' ] ] = _get_key_file ( eap_config [ opt [ 'name' ] ] )
return eap_config |
def _get_heading_level ( self , element ) :
"""Returns the level of heading .
: param element : The heading .
: type element : hatemile . util . html . htmldomelement . HTMLDOMElement
: return : The level of heading .
: rtype : int""" | # pylint : disable = no - self - use
tag = element . get_tag_name ( )
if tag == 'H1' :
return 1
elif tag == 'H2' :
return 2
elif tag == 'H3' :
return 3
elif tag == 'H4' :
return 4
elif tag == 'H5' :
return 5
elif tag == 'H6' :
return 6
return - 1 |
def validate_gps ( value ) :
"""Validate GPS value .""" | try :
latitude , longitude , altitude = value . split ( ',' )
vol . Coerce ( float ) ( latitude )
vol . Coerce ( float ) ( longitude )
vol . Coerce ( float ) ( altitude )
except ( TypeError , ValueError , vol . Invalid ) :
raise vol . Invalid ( 'GPS value should be of format "latitude,longitude,altitude"' )
return value |
def keep_resample ( nkeep , X_train , y_train , X_test , y_test , attr_test , model_generator , metric , trained_model , random_state ) :
"""The model is revaluated for each test sample with the non - important features set to resample background values .""" | # why broken ? overwriting ?
X_train , X_test = to_array ( X_train , X_test )
# how many features to mask
assert X_train . shape [ 1 ] == X_test . shape [ 1 ]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N , M = X_test . shape
X_test_tmp = np . tile ( X_test , [ 1 , nsamples ] ) . reshape ( nsamples * N , M )
tie_breaking_noise = const_rand ( M ) * 1e-6
inds = sklearn . utils . resample ( np . arange ( N ) , n_samples = nsamples , random_state = random_state )
for i in range ( N ) :
if nkeep [ i ] < M :
ordering = np . argsort ( - attr_test [ i , : ] + tie_breaking_noise )
X_test_tmp [ i * nsamples : ( i + 1 ) * nsamples , ordering [ nkeep [ i ] : ] ] = X_train [ inds , : ] [ : , ordering [ nkeep [ i ] : ] ]
yp_masked_test = trained_model . predict ( X_test_tmp )
yp_masked_test = np . reshape ( yp_masked_test , ( N , nsamples ) ) . mean ( 1 )
# take the mean output over all samples
return metric ( y_test , yp_masked_test ) |
def _get_linewise_report ( self ) :
"""Returns a report each line of which comprises a pair of an input line
and an error . Unlike in the standard report , errors will appear as many
times as they occur .
Helper for the get _ report method .""" | d = defaultdict ( list )
# line : [ ] of errors
for error , lines in self . errors . items ( ) :
for line_num in lines :
d [ line_num ] . append ( error )
return '\n' . join ( [ '{:>3} → {}' . format ( line , error . string ) for line in sorted ( d . keys ( ) ) for error in d [ line ] ] ) |
def topological_sort ( dag ) :
"""topological sort
: param dag : directed acyclic graph
: type dag : dict
. . seealso : : ` Topographical Sorting
< http : / / en . wikipedia . org / wiki / Topological _ sorting > ` _ ,
` Directed Acyclic Graph ( DAG )
< https : / / en . wikipedia . org / wiki / Directed _ acyclic _ graph > ` _""" | # find all edges of dag
topsort = [ node for node , edge in dag . iteritems ( ) if not edge ]
# loop through nodes until topologically sorted
while len ( topsort ) < len ( dag ) :
num_nodes = len ( topsort )
# number of nodes
# unsorted nodes
for node in dag . viewkeys ( ) - set ( topsort ) : # nodes with no incoming edges
if set ( dag [ node ] ) <= set ( topsort ) :
topsort . append ( node )
break
# circular dependencies
if len ( topsort ) == num_nodes :
raise CircularDependencyError ( dag . viewkeys ( ) - set ( topsort ) )
return topsort |
def delete ( self ) :
"""Delete this key from mist . io
: returns : An updated list of added keys""" | req = self . request ( self . mist_client . uri + '/keys/' + self . id )
req . delete ( )
self . mist_client . update_keys ( ) |
def save ( self , filename ) :
"""Save host keys into a file , in the format used by openssh . The order of
keys in the file will be preserved when possible ( if these keys were
loaded from a file originally ) . The single exception is that combined
lines will be split into individual key lines , which is arguably a bug .
@ param filename : name of the file to write
@ type filename : str
@ raise IOError : if there was an error writing the file
@ since : 1.6.1""" | f = open ( filename , 'w' )
for e in self . _entries :
line = e . to_line ( )
if line :
f . write ( line )
f . close ( ) |
def clicked ( self , event ) :
"""Call if an element of this plottype is clicked .
Implement in sub class .""" | group = event . artist . _mt_group
indices = event . ind
# double click only supported on 1.2 or later
major , minor , _ = mpl_version . split ( '.' )
if ( int ( major ) , int ( minor ) ) < ( 1 , 2 ) or not event . mouseevent . dblclick :
for i in indices :
print ( self . groups [ group ] [ i ] . line_str )
else : # toggle durline
first = indices [ 0 ]
logevent = self . groups [ group ] [ first ]
try : # remove triangle for this event
idx = map ( itemgetter ( 0 ) , self . durlines ) . index ( logevent )
_ , poly = self . durlines [ idx ]
poly . remove ( )
plt . gcf ( ) . canvas . draw ( )
del self . durlines [ idx ]
except ValueError : # construct triangle and add to list of durlines
if self . args [ 'optime_start' ] :
pts = [ [ date2num ( logevent . datetime ) , 0 ] , [ date2num ( logevent . datetime ) , logevent . duration ] , [ date2num ( logevent . datetime + timedelta ( milliseconds = logevent . duration ) ) , 0 ] ]
else :
pts = [ [ date2num ( logevent . datetime ) , 0 ] , [ date2num ( logevent . datetime ) , logevent . duration ] , [ date2num ( logevent . datetime - timedelta ( milliseconds = logevent . duration ) ) , 0 ] ]
poly = Polygon ( pts , closed = True , alpha = 0.2 , linewidth = 0 , facecolor = event . artist . get_markerfacecolor ( ) , edgecolor = None , zorder = - 10000 )
ax = plt . gca ( )
ax . add_patch ( poly )
plt . gcf ( ) . canvas . draw ( )
self . durlines . append ( ( logevent , poly ) ) |
def retry ( num_attempts = 3 , exception_class = Exception , log = None , sleeptime = 1 ) :
"""> > > def fail ( ) :
. . . runs [ 0 ] + = 1
. . . raise ValueError ( )
> > > runs = [ 0 ] ; retry ( sleeptime = 0 ) ( fail ) ( )
Traceback ( most recent call last ) :
ValueError
> > > runs
> > > runs = [ 0 ] ; retry ( 2 , sleeptime = 0 ) ( fail ) ( )
Traceback ( most recent call last ) :
ValueError
> > > runs
> > > runs = [ 0 ] ; retry ( exception _ class = IndexError , sleeptime = 0 ) ( fail ) ( )
Traceback ( most recent call last ) :
ValueError
> > > runs
> > > logger = DoctestLogger ( )
> > > runs = [ 0 ] ; retry ( log = logger , sleeptime = 0 ) ( fail ) ( )
Traceback ( most recent call last ) :
ValueError
> > > runs
> > > logger . print _ logs ( )
Failed with error ValueError ( ) , trying again
Failed with error ValueError ( ) , trying again""" | def decorator ( func ) :
@ functools . wraps ( func )
def wrapper ( * args , ** kwargs ) :
for i in range ( num_attempts ) :
try :
return func ( * args , ** kwargs )
except exception_class as e :
if i == num_attempts - 1 :
raise
else :
if log :
log . warn ( 'Failed with error %r, trying again' , e )
sleep ( sleeptime )
return wrapper
return decorator |
def basic_diff ( source1 , source2 , start = None , end = None ) :
"""Perform a basic diff between two equal - sized binary strings and
return a list of ( offset , size ) tuples denoting the differences .
source1
The first byte string source .
source2
The second byte string source .
start
Start offset to read from ( default : start )
end
End offset to stop reading at ( default : end )""" | start = start if start is not None else 0
end = end if end is not None else min ( len ( source1 ) , len ( source2 ) )
end_point = min ( end , len ( source1 ) , len ( source2 ) )
pointer = start
diff_start = None
results = [ ]
while pointer < end_point :
if source1 [ pointer ] != source2 [ pointer ] :
if diff_start is None :
diff_start = pointer
else :
if diff_start is not None :
results . append ( ( diff_start , pointer - diff_start ) )
diff_start = None
pointer += 1
if diff_start is not None :
results . append ( ( diff_start , pointer - diff_start ) )
diff_start = None
return results |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.