signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def do_reconfig ( self , params ) :
"""\x1b [1mNAME \x1b [0m
reconfig - Reconfigures a ZooKeeper cluster ( adds / removes members )
\x1b [1mSYNOPSIS \x1b [0m
reconfig < add | remove > < arg > [ from _ config ]
\x1b [1mDESCRIPTION \x1b [0m
reconfig add < members > [ from _ config ]
adds the given members ( i . e . : ' server . 100 = 10.0.0.10:2889:3888 : observer ; 0.0.0.0:2181 ' ) .
reconfig remove < members _ ids > [ from _ config ]
removes the members with the given ids ( i . e . : ' 2,3,5 ' ) .
\x1b [1mEXAMPLES \x1b [0m
> reconfig add server . 100 = 0.0.0.0:56954:37866 : observer ; 0.0.0.0:42969
server . 1 = localhost : 20002:20001 : participant
server . 2 = localhost : 20012:20011 : participant
server . 3 = localhost : 20022:20021 : participant
server . 100 = 0.0.0.0:56954:37866 : observer ; 0.0.0.0:42969
version = 1000003
> reconfig remove 100
server . 1 = localhost : 20002:20001 : participant
server . 2 = localhost : 20012:20011 : participant
server . 3 = localhost : 20022:20021 : participant
version = 1000004""" | if params . cmd not in [ "add" , "remove" ] :
raise ValueError ( "Bad command: %s" % params . cmd )
joining , leaving , from_config = None , None , params . from_config
if params . cmd == "add" :
joining = params . args
elif params . cmd == "remove" :
leaving = params . args
try :
value , _ = self . _zk . reconfig ( joining = joining , leaving = leaving , new_members = None , from_config = from_config )
self . show_output ( value )
except NewConfigNoQuorumError :
self . show_output ( "No quorum available to perform reconfig." )
except ReconfigInProcessError :
self . show_output ( "There's a reconfig in process." ) |
def AddFileDescriptor ( self , file_desc ) :
"""Adds a FileDescriptor to the pool , non - recursively .
If the FileDescriptor contains messages or enums , the caller must explicitly
register them .
Args :
file _ desc : A FileDescriptor .""" | self . _AddFileDescriptor ( file_desc )
# TODO ( jieluo ) : This is a temporary solution for FieldDescriptor . file .
# Remove it when FieldDescriptor . file is added in code gen .
for extension in file_desc . extensions_by_name . values ( ) :
self . _file_desc_by_toplevel_extension [ extension . full_name ] = file_desc |
def file_detector_context ( self , file_detector_class , * args , ** kwargs ) :
"""Overrides the current file detector ( if necessary ) in limited context .
Ensures the original file detector is set afterwards .
Example :
with webdriver . file _ detector _ context ( UselessFileDetector ) :
someinput . send _ keys ( ' / etc / hosts ' )
: Args :
- file _ detector _ class - Class of the desired file detector . If the class is different
from the current file _ detector , then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager .
- args - Optional arguments that get passed to the file detector class during
instantiation .
- kwargs - Keyword arguments , passed the same way as args .""" | last_detector = None
if not isinstance ( self . file_detector , file_detector_class ) :
last_detector = self . file_detector
self . file_detector = file_detector_class ( * args , ** kwargs )
try :
yield
finally :
if last_detector is not None :
self . file_detector = last_detector |
def white_move ( self ) :
"""Calls the white player ' s ` ` generate _ move ( ) ` `
method and updates the board with the move returned .""" | move = self . player_white . generate_move ( self . position )
move = make_legal ( move , self . position )
self . position . update ( move ) |
def coord_to_pixel ( self , lat , lon , width , ground_width , lat2 , lon2 ) :
'''return pixel coordinate ( px , py ) for position ( lat2 , lon2)
in an area image . Note that the results are relative to top , left
and may be outside the image''' | pixel_width = ground_width / float ( width )
if lat is None or lon is None or lat2 is None or lon2 is None :
return ( 0 , 0 )
dx = mp_util . gps_distance ( lat , lon , lat , lon2 )
if lon2 < lon :
dx = - dx
dy = mp_util . gps_distance ( lat , lon , lat2 , lon )
if lat2 > lat :
dy = - dy
dx /= pixel_width
dy /= pixel_width
return ( int ( dx ) , int ( dy ) ) |
def _resolve_duplicates ( self ) :
'''Merge variables connected by identity operator to reduce the number of redundant variables''' | self . _initialize_graph_status_for_traversing ( )
# Traverse the graph from roots to leaves
for operator in self . topological_operator_iterator ( ) :
if operator . type != 'identity' :
continue
if any ( variable . is_root for variable in operator . inputs ) and any ( variable . is_leaf for variable in operator . outputs ) :
continue
# Replace the output variable with the input variable everywhere
original = operator . inputs [ 0 ]
duplicate = operator . outputs [ 0 ]
for another_scope in self . scopes :
for another_operator in another_scope . operators . values ( ) :
for i in range ( len ( another_operator . inputs ) ) :
if another_operator . inputs [ i ] . onnx_name != duplicate . onnx_name :
continue
another_operator . inputs [ i ] = original
# When original variable ' s documentation string or denotation is empty but duplicate ' s is not , we
# copy that field to the original variable to avoid information loss .
if not original . type . doc_string and duplicate . type . doc_string :
original . type . doc_string = duplicate . type . doc_string
if isinstance ( original . type , TensorType ) and isinstance ( duplicate . type , TensorType ) :
if not original . type . denotation and duplicate . type . denotation :
original . type . denotation = duplicate . type . denotation
if not original . type . channel_denotations :
original . type . channel_denotations = duplicate . type . channel_denotations
elif duplicate . type . channel_denotations : # Merge the channel denotations if available in both the original and the duplicate
for i in range ( len ( original . type . channel_denotations ) ) :
if original . type . channel_denotations [ i ] :
continue
original . type . channel_denotations [ i ] = duplicate . type . channel_denotations [ i ]
# Sometime , shapes of duplicates are different . We try to replace the original variable ' s unknown dimensions
# as many as possible because we will get rid of the duplicate .
if len ( original . type . shape ) == len ( duplicate . type . shape ) :
for i in range ( len ( original . type . shape ) ) :
if original . type . shape [ i ] != 'None' :
continue
original . type . shape [ i ] = duplicate . type . shape [ i ]
# Because we ' re iterating through the topology , we cannot delete any operator or variable . Otherwise ,
# the traversing function may be broken . We will delete those abandoned ones later .
duplicate . is_abandoned = True
operator . is_abandoned = True
for scope in self . scopes : # Find out who is going to be abandoned
abandoned_operator_names = set ( onnx_name for onnx_name , operator in scope . operators . items ( ) if operator . is_abandoned )
abandoned_variable_names = set ( onnx_name for onnx_name , variable in scope . variables . items ( ) if variable . is_abandoned )
# Remove abandoned operators
for name in abandoned_operator_names :
scope . delete_local_operator ( name )
# Remove abandoned variables
for name in abandoned_variable_names :
scope . delete_local_variable ( name ) |
def lrun ( command , * args , ** kwargs ) :
'''Run a local command from project root''' | return run ( 'cd {0} && {1}' . format ( ROOT , command ) , * args , ** kwargs ) |
def determine_opening_indent ( indent_texts ) :
'''Determine the opening indent level for a docstring .
The opening indent level is the indent level is the first non - zero indent
level of a non - empty line in the docstring .
Args :
indent _ texts : The lines of the docstring as an iterable over 2 - tuples
each containing an integer indent level as the first element and
the text as the second element .
Returns :
The opening indent level as an integer .''' | num_lines = len ( indent_texts )
if num_lines < 1 :
return 0
assert num_lines >= 1
first_line_indent = indent_texts [ 0 ] [ 0 ]
if num_lines == 1 :
return first_line_indent
assert num_lines >= 2
second_line_indent = indent_texts [ 1 ] [ 0 ]
second_line_text = indent_texts [ 1 ] [ 1 ]
if len ( second_line_text ) == 0 :
return first_line_indent
return second_line_indent |
def get_base_type_of_signal ( signal ) : # type : ( canmatrix . Signal ) - > typing . Tuple [ str , int ]
"""Get signal arxml - type and size based on the Signal properties .""" | if signal . is_float :
if signal . size > 32 :
create_type = "double"
size = 64
else :
create_type = "single"
size = 32
else :
if signal . size > 32 :
if signal . is_signed :
create_type = "sint64"
else :
create_type = "uint64"
size = 64
elif signal . size > 16 :
if signal . is_signed :
create_type = "sint32"
else :
create_type = "uint32"
size = 32
elif signal . size > 8 :
if signal . is_signed :
create_type = "sint16"
else :
create_type = "uint16"
size = 16
else :
if signal . is_signed :
create_type = "sint8"
else :
create_type = "uint8"
size = 8
return create_type , size |
def getVersionNumber ( self ) :
"""get OpenThreadWpan stack firmware version number""" | print '%s call getVersionNumber' % self . port
versionStr = self . __sendCommand ( WPANCTL_CMD + 'getprop -v NCP:Version' ) [ 0 ]
return self . __stripValue ( versionStr ) |
def applies ( self , dataset ) :
"""Determines whether the dim transform can be applied to the
Dataset , i . e . whether all referenced dimensions can be
resolved .""" | if isinstance ( self . dimension , dim ) :
applies = self . dimension . applies ( dataset )
else :
applies = dataset . get_dimension ( self . dimension ) is not None
if isinstance ( dataset , Graph ) and not applies :
applies = dataset . nodes . get_dimension ( self . dimension ) is not None
for op in self . ops :
args = op . get ( 'args' )
if not args :
continue
for arg in args :
if isinstance ( arg , dim ) :
applies &= arg . applies ( dataset )
return applies |
def _get_nits ( self , filename ) :
"""Iterate over the instances style checker and yield Nits .
: param filename : str pointing to a file within the buildroot .""" | try :
python_file = PythonFile . parse ( filename , root = self . _root_dir )
except CheckSyntaxError as e :
yield e . as_nit ( )
return
if noqa_file_filter ( python_file ) :
return
if self . _excluder : # Filter out any suppressed plugins
check_plugins = [ ( plugin_name , plugin_factory ) for plugin_name , plugin_factory in self . _plugin_factories . items ( ) if self . _excluder . should_include ( filename , plugin_name ) ]
else :
check_plugins = self . _plugin_factories . items ( )
for plugin_name , plugin_factory in check_plugins :
for i , nit in enumerate ( plugin_factory ( python_file ) ) :
if i == 0 : # NB : Add debug log header for nits from each plugin , but only if there are nits from it .
self . log . debug ( 'Nits from plugin {} for {}' . format ( plugin_name , filename ) )
if not nit . has_lines_to_display :
yield nit
continue
if all ( not line_contains_noqa ( line ) for line in nit . lines ) :
yield nit |
def get_rudder_scores ( self , category ) :
'''Computes Rudder score .
Parameters
category : str
category name to score
Returns
np . array''' | category_percentiles = self . _get_term_percentiles_in_category ( category )
not_category_percentiles = self . _get_term_percentiles_not_in_category ( category )
rudder_scores = self . _get_rudder_scores_for_percentile_pair ( category_percentiles , not_category_percentiles )
return rudder_scores |
def _set_ldp_protocol_errors_instance_since_clear ( self , v , load = False ) :
"""Setter method for ldp _ protocol _ errors _ instance _ since _ clear , mapped from YANG variable / mpls _ state / ldp / statistics / ldp _ protocol _ errors _ instance _ since _ clear ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ldp _ protocol _ errors _ instance _ since _ clear is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ldp _ protocol _ errors _ instance _ since _ clear ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = ldp_protocol_errors_instance_since_clear . ldp_protocol_errors_instance_since_clear , is_container = 'container' , presence = False , yang_name = "ldp-protocol-errors-instance-since-clear" , rest_name = "ldp-protocol-errors-instance-since-clear" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'mpls-ldp-protocol-errors-instance-ldp-protocol-errors-instance-since-clear-1' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls-operational' , defining_module = 'brocade-mpls-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ldp_protocol_errors_instance_since_clear must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=ldp_protocol_errors_instance_since_clear.ldp_protocol_errors_instance_since_clear, is_container='container', presence=False, yang_name="ldp-protocol-errors-instance-since-clear", rest_name="ldp-protocol-errors-instance-since-clear", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-errors-instance-ldp-protocol-errors-instance-since-clear-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""" , } )
self . __ldp_protocol_errors_instance_since_clear = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def copyHdfsDirectoryToLocal ( hdfsDirectory , localDirectory , hdfsClient ) :
'''Copy directory from HDFS to local''' | if not os . path . exists ( localDirectory ) :
os . makedirs ( localDirectory )
try :
listing = hdfsClient . list_status ( hdfsDirectory )
except Exception as exception :
nni_log ( LogType . Error , 'List hdfs directory {0} error: {1}' . format ( hdfsDirectory , str ( exception ) ) )
raise exception
for f in listing :
if f . type == 'DIRECTORY' :
subHdfsDirectory = posixpath . join ( hdfsDirectory , f . pathSuffix )
subLocalDirectory = os . path . join ( localDirectory , f . pathSuffix )
copyHdfsDirectoryToLocal ( subHdfsDirectory , subLocalDirectory , hdfsClient )
elif f . type == 'FILE' :
hdfsFilePath = posixpath . join ( hdfsDirectory , f . pathSuffix )
localFilePath = os . path . join ( localDirectory , f . pathSuffix )
copyHdfsFileToLocal ( hdfsFilePath , localFilePath , hdfsClient )
else :
raise AssertionError ( 'unexpected type {}' . format ( f . type ) ) |
def open_session ( self ) :
"""Open a new session to modify this server .
You can either call this fnc directly , or turn on autosession which will
open / commit sessions for you transparently .""" | if self . session is not None :
msg = "session already open; commit it or rollback before opening another one in %s" % self
logger . error ( msg )
raise RuntimeError ( msg )
logger . info ( "opening a new session" )
logger . info ( "removing %s" % self . loc_session )
try :
shutil . rmtree ( self . loc_session )
except :
logger . info ( "failed to delete %s" % self . loc_session )
logger . info ( "cloning server from %s to %s" % ( self . loc_stable , self . loc_session ) )
shutil . copytree ( self . loc_stable , self . loc_session )
self . session = SimServer ( self . loc_session , use_locks = self . use_locks )
self . lock_update . acquire ( ) |
def define_natives ( cls ) :
"""Define the native functions for PFP""" | if len ( cls . _natives ) > 0 :
return
glob_pattern = os . path . join ( os . path . dirname ( __file__ ) , "native" , "*.py" )
for filename in glob . glob ( glob_pattern ) :
basename = os . path . basename ( filename ) . replace ( ".py" , "" )
if basename == "__init__" :
continue
try :
mod_base = __import__ ( "pfp.native" , globals ( ) , locals ( ) , fromlist = [ basename ] )
except Exception as e :
sys . stderr . write ( "cannot import native module {} at '{}'" . format ( basename , filename ) )
raise e
continue
mod = getattr ( mod_base , basename )
setattr ( mod , "PYVAL" , fields . get_value )
setattr ( mod , "PYSTR" , fields . get_str ) |
def has_response ( self , beacon_config , request , client_address ) :
""": meth : ` . WBeaconMessengerBase . has _ response ` method implementation . This method compares request
headers as : meth : ` . WBeaconGouverneurMessenger . has _ response ` do and compares specified group names
with internal names .""" | try :
groups , address = self . _message_hostgroup_parse ( request )
if len ( self . __hostgroups ) == 0 or len ( groups ) == 0 :
return True
for group_name in groups :
if group_name in self . __hostgroups :
return True
return False
except ValueError :
pass
return False |
def polygon_to_geohashes ( polygon , precision , inner = True ) :
""": param polygon : shapely polygon .
: param precision : int . Geohashes ' precision that form resulting polygon .
: param inner : bool , default ' True ' . If false , geohashes that are completely outside from the polygon are ignored .
: return : set . Set of geohashes that form the polygon .""" | inner_geohashes = set ( )
outer_geohashes = set ( )
envelope = polygon . envelope
centroid = polygon . centroid
testing_geohashes = queue . Queue ( )
testing_geohashes . put ( geohash . encode ( centroid . y , centroid . x , precision ) )
while not testing_geohashes . empty ( ) :
current_geohash = testing_geohashes . get ( )
if current_geohash not in inner_geohashes and current_geohash not in outer_geohashes :
current_polygon = geohash_to_polygon ( current_geohash )
condition = envelope . contains ( current_polygon ) if inner else envelope . intersects ( current_polygon )
if condition :
if inner :
if polygon . contains ( current_polygon ) :
inner_geohashes . add ( current_geohash )
else :
outer_geohashes . add ( current_geohash )
else :
if polygon . intersects ( current_polygon ) :
inner_geohashes . add ( current_geohash )
else :
outer_geohashes . add ( current_geohash )
for neighbor in geohash . neighbors ( current_geohash ) :
if neighbor not in inner_geohashes and neighbor not in outer_geohashes :
testing_geohashes . put ( neighbor )
return inner_geohashes |
def PopupGetFolder ( message , title = None , default_path = '' , no_window = False , size = ( None , None ) , button_color = None , background_color = None , text_color = None , icon = DEFAULT_WINDOW_ICON , font = None , no_titlebar = False , grab_anywhere = False , keep_on_top = False , location = ( None , None ) , initial_folder = None ) :
"""Display popup with text entry field and browse button . Browse for folder
: param message :
: param default _ path :
: param no _ window :
: param size :
: param button _ color :
: param background _ color :
: param text _ color :
: param icon :
: param font :
: param no _ titlebar :
: param grab _ anywhere :
: param keep _ on _ top :
: param location :
: return : Contents of text field . None if closed using X or cancelled""" | if no_window :
if Window . QTApplication is None :
Window . QTApplication = QApplication ( sys . argv )
folder_name = QFileDialog . getExistingDirectory ( dir = initial_folder )
return folder_name
layout = [ [ Text ( message , auto_size_text = True , text_color = text_color , background_color = background_color ) ] , [ InputText ( default_text = default_path , size = size ) , FolderBrowse ( initial_folder = initial_folder ) ] , [ CloseButton ( 'Ok' , size = ( 60 , 20 ) , bind_return_key = True ) , CloseButton ( 'Cancel' , size = ( 60 , 20 ) ) ] ]
_title = title if title is not None else message
window = Window ( title = _title , icon = icon , auto_size_text = True , button_color = button_color , background_color = background_color , font = font , no_titlebar = no_titlebar , grab_anywhere = grab_anywhere , keep_on_top = keep_on_top , location = location )
( button , input_values ) = window . Layout ( layout ) . Read ( )
if button != 'Ok' :
return None
else :
path = input_values [ 0 ]
return path |
def encode_ndarray ( obj ) :
"""Write a numpy array and its shape to base64 buffers""" | shape = obj . shape
if len ( shape ) == 1 :
shape = ( 1 , obj . shape [ 0 ] )
if obj . flags . c_contiguous :
obj = obj . T
elif not obj . flags . f_contiguous :
obj = asfortranarray ( obj . T )
else :
obj = obj . T
try :
data = obj . astype ( float64 ) . tobytes ( )
except AttributeError :
data = obj . astype ( float64 ) . tostring ( )
data = base64 . b64encode ( data ) . decode ( 'utf-8' )
return data , shape |
def get_redditor ( self , user_name , * args , ** kwargs ) :
"""Return a Redditor instance for the user _ name specified .
The additional parameters are passed directly into the
: class : ` . Redditor ` constructor .""" | return objects . Redditor ( self , user_name , * args , ** kwargs ) |
def data_type_to_numpy ( datatype , unsigned = False ) :
"""Convert an ncstream datatype to a numpy one .""" | basic_type = _dtypeLookup [ datatype ]
if datatype in ( stream . STRING , stream . OPAQUE ) :
return np . dtype ( basic_type )
if unsigned :
basic_type = basic_type . replace ( 'i' , 'u' )
return np . dtype ( '=' + basic_type ) |
def describe_parameters ( name , Source = None , MaxRecords = None , Marker = None , region = None , key = None , keyid = None , profile = None ) :
'''Returns a list of ` DBParameterGroup ` parameters .
CLI example to description of parameters : :
salt myminion boto _ rds . describe _ parameters parametergroupname region = us - east - 1''' | res = __salt__ [ 'boto_rds.parameter_group_exists' ] ( name , tags = None , region = region , key = key , keyid = keyid , profile = profile )
if not res . get ( 'exists' ) :
return { 'result' : False , 'message' : 'Parameter group {0} does not exist' . format ( name ) }
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if not conn :
return { 'result' : False , 'message' : 'Could not establish a connection to RDS' }
kwargs = { }
kwargs . update ( { 'DBParameterGroupName' : name } )
for key in ( 'Marker' , 'Source' ) :
if locals ( ) [ key ] is not None :
kwargs [ key ] = str ( locals ( ) [ key ] )
# future lint : disable = blacklisted - function
if locals ( ) [ 'MaxRecords' ] is not None :
kwargs [ 'MaxRecords' ] = int ( locals ( ) [ 'MaxRecords' ] )
pag = conn . get_paginator ( 'describe_db_parameters' )
pit = pag . paginate ( ** kwargs )
keys = [ 'ParameterName' , 'ParameterValue' , 'Description' , 'Source' , 'ApplyType' , 'DataType' , 'AllowedValues' , 'IsModifieable' , 'MinimumEngineVersion' , 'ApplyMethod' ]
parameters = odict . OrderedDict ( )
ret = { 'result' : True }
for p in pit :
for result in p [ 'Parameters' ] :
data = odict . OrderedDict ( )
for k in keys :
data [ k ] = result . get ( k )
parameters [ result . get ( 'ParameterName' ) ] = data
ret [ 'parameters' ] = parameters
return ret
except ClientError as e :
return { 'error' : __utils__ [ 'boto3.get_error' ] ( e ) } |
def print_table ( table , title = '' , delim = '|' , centering = 'center' , col_padding = 2 , header = True , headerchar = '-' ) :
"""Print a table from a list of lists representing the rows of a table .
Parameters
table : list
list of lists , e . g . a table with 3 columns and 2 rows could be
[ [ ' 0,0 ' , ' 0,1 ' , ' 0,2 ' ] , [ ' 1,0 ' , ' 1,1 ' , ' 1,2 ' ] ]
title : string
Printed centered above the table
delim : string
character to delimit columns
centering : { ' left ' , ' right ' , ' center ' }
chooses justification for columns
col _ padding : int
number of blank spaces to add to each column
header : { True , False }
Does the first entry of table contain column headers ?
headerchar : { string }
character to separate column headers from rest of table
Returns
string representing table that ' s ready to be printed
Notes
The string for the table will have correctly justified columns
with extra padding added into each column entry to ensure columns align .
The characters to delimit the columns can be user defined . This
should be useful for printing convergence data from tests .
Examples
> > > from pyamg . util . utils import print _ table
> > > table = [ [ ' cos ( 0 ) ' , ' cos ( pi / 2 ) ' , ' cos ( pi ) ' ] , [ ' 0.0 ' , ' 1.0 ' , ' 0.0 ' ] ]
> > > table1 = print _ table ( table ) # string to print
> > > table2 = print _ table ( table , delim = ' | | ' )
> > > table3 = print _ table ( table , headerchar = ' * ' )
> > > table4 = print _ table ( table , col _ padding = 6 , centering = ' left ' )""" | table_str = '\n'
# sometimes , the table will be passed in as ( title , table )
if isinstance ( table , tuple ) :
title = table [ 0 ]
table = table [ 1 ]
# Calculate each column ' s width
colwidths = [ ]
for i in range ( len ( table ) ) : # extend colwidths for row i
for k in range ( len ( table [ i ] ) - len ( colwidths ) ) :
colwidths . append ( - 1 )
# Update colwidths if table [ i ] [ j ] is wider than colwidth [ j ]
for j in range ( len ( table [ i ] ) ) :
if len ( table [ i ] [ j ] ) > colwidths [ j ] :
colwidths [ j ] = len ( table [ i ] [ j ] )
# Factor in extra column padding
for i in range ( len ( colwidths ) ) :
colwidths [ i ] += col_padding
# Total table width
ttwidth = sum ( colwidths ) + len ( delim ) * ( len ( colwidths ) - 1 )
# Print Title
if len ( title ) > 0 :
title = title . split ( "\n" )
for i in range ( len ( title ) ) :
table_str += str . center ( title [ i ] , ttwidth ) + '\n'
table_str += "\n"
# Choose centering scheme
centering = centering . lower ( )
if centering == 'center' :
centering = str . center
if centering == 'right' :
centering = str . rjust
if centering == 'left' :
centering = str . ljust
if header : # Append Column Headers
for elmt , elmtwidth in zip ( table [ 0 ] , colwidths ) :
table_str += centering ( str ( elmt ) , elmtwidth ) + delim
if table [ 0 ] != [ ] :
table_str = table_str [ : - len ( delim ) ] + '\n'
# Append Header Separator
# Total Column Width Total Col Delimiter Widths
if len ( headerchar ) == 0 :
headerchar = ' '
table_str += headerchar * int ( sp . ceil ( float ( ttwidth ) / float ( len ( headerchar ) ) ) ) + '\n'
table = table [ 1 : ]
for row in table :
for elmt , elmtwidth in zip ( row , colwidths ) :
table_str += centering ( str ( elmt ) , elmtwidth ) + delim
if row != [ ] :
table_str = table_str [ : - len ( delim ) ] + '\n'
else :
table_str += '\n'
return table_str |
def read_gpx ( xml , gpxns = None ) :
"""Parse a GPX file into a GpxModel .
Args :
xml : A file - like - object opened in binary mode - that is containing
bytes rather than characters . The root element of the XML should
be a < gpx > element containing a version attribute . GPX versions
1.1 is supported .
gpxns : The XML namespace for GPX in Clarke notation ( i . e . delimited
by curly braces ) . If None , ( the default ) the namespace used in
the document will be determined automatically .""" | tree = etree . parse ( xml )
gpx_element = tree . getroot ( )
return parse_gpx ( gpx_element , gpxns = gpxns ) |
def command_cycles_per_sec ( self , event = None ) :
"""TODO : refactor : move code to CPU !""" | try :
cycles_per_sec = self . cycles_per_sec_var . get ( )
except ValueError :
self . cycles_per_sec_var . set ( self . runtime_cfg . cycles_per_sec )
return
self . cycles_per_sec_label_var . set ( "cycles/sec / 1000000 = %f MHz CPU frequency * 16 = %f Mhz crystal" % ( cycles_per_sec / 1000000 , cycles_per_sec / 1000000 * 16 , ) )
self . runtime_cfg . cycles_per_sec = cycles_per_sec |
def mandelbrot_capture ( x , y , w , h , params ) :
"""Computes the number of iterations of the given pixel - space coordinates ,
for high - res capture purposes .
Contrary to : func : ` mandelbrot ` , this function returns a continuous
number of iterations to avoid banding .
: param x : X coordinate on the picture
: param y : Y coordinate on the picture
: param w : Width of the picture
: param h : Height of the picture
: param params : Current application parameters .
: type params : params . Params
: return : Continuous number of iterations .""" | # FIXME : Figure out why these corrections are necessary or how to make them perfect
# Viewport is offset compared to window when capturing without these ( found empirically )
if params . plane_ratio >= 1.0 :
x -= params . plane_w
else :
x += 3.0 * params . plane_w
ratio = w / h
n_x = x * 2.0 / w * ratio - 1.0
n_y = y * 2.0 / h - 1.0
mb_x = params . zoom * n_x + params . mb_cx
mb_y = params . zoom * n_y + params . mb_cy
mb = mandelbrot_iterate ( mb_x + 1j * mb_y , params . max_iterations , params . julia_seed )
z , iterations = mb
# Continuous iteration count for no banding
# https : / / en . wikipedia . org / wiki / Mandelbrot _ set # Continuous _ . 28smooth . 29 _ coloring
nu = params . max_iterations
if iterations < params . max_iterations :
nu = iterations + 2 - abs ( cmath . log ( cmath . log ( abs ( z ) ) / cmath . log ( params . max_iterations ) , 2 ) )
return clamp ( nu , 0 , params . max_iterations ) |
def walknode ( top , skiphidden = True ) :
"""Returns a recursive iterator over all Nodes under top .
If skiphidden is True ( the default ) then structure branches starting with
an underscore will be ignored .""" | if skiphidden and top . name . startswith ( '._' ) :
return
yield top
for child in top :
for c in walknode ( child ) :
yield c |
def assess_quality ( feed : "Feed" ) -> DataFrame :
"""Return a DataFrame of various feed indicators and values ,
e . g . number of trips missing shapes .
Parameters
feed : Feed
Returns
DataFrame
The columns are
- ` ` ' indicator ' ` ` : string ; name of an indicator , e . g . ' num _ routes '
- ` ` ' value ' ` ` : value of the indicator , e . g . 27
Notes
- An odd function , but useful to see roughly how broken a feed is
- Not a GTFS validator""" | d = OrderedDict ( )
# Count duplicate route short names
r = feed . routes
dup = r . duplicated ( subset = [ "route_short_name" ] )
n = dup [ dup ] . count ( )
d [ "num_route_short_names_duplicated" ] = n
d [ "frac_route_short_names_duplicated" ] = n / r . shape [ 0 ]
# Count stop times missing shape _ dist _ traveled values
st = feed . stop_times . sort_values ( [ "trip_id" , "stop_sequence" ] )
if "shape_dist_traveled" in st . columns : # Count missing distances
n = st [ st [ "shape_dist_traveled" ] . isnull ( ) ] . shape [ 0 ]
d [ "num_stop_time_dists_missing" ] = n
d [ "frac_stop_time_dists_missing" ] = n / st . shape [ 0 ]
else :
d [ "num_stop_time_dists_missing" ] = st . shape [ 0 ]
d [ "frac_stop_time_dists_missing" ] = 1
# Count direction _ ids missing
t = feed . trips
if "direction_id" in t . columns :
n = t [ t [ "direction_id" ] . isnull ( ) ] . shape [ 0 ]
d [ "num_direction_ids_missing" ] = n
d [ "frac_direction_ids_missing" ] = n / t . shape [ 0 ]
else :
d [ "num_direction_ids_missing" ] = t . shape [ 0 ]
d [ "frac_direction_ids_missing" ] = 1
# Count trips missing shapes
if feed . shapes is not None :
n = t [ t [ "shape_id" ] . isnull ( ) ] . shape [ 0 ]
else :
n = t . shape [ 0 ]
d [ "num_trips_missing_shapes" ] = n
d [ "frac_trips_missing_shapes" ] = n / t . shape [ 0 ]
# Count missing departure times
n = st [ st [ "departure_time" ] . isnull ( ) ] . shape [ 0 ]
d [ "num_departure_times_missing" ] = n
d [ "frac_departure_times_missing" ] = n / st . shape [ 0 ]
# Count missing first departure times missing
g = st . groupby ( "trip_id" ) . first ( ) . reset_index ( )
n = g [ g [ "departure_time" ] . isnull ( ) ] . shape [ 0 ]
d [ "num_first_departure_times_missing" ] = n
d [ "frac_first_departure_times_missing" ] = n / st . shape [ 0 ]
# Count missing last departure times
g = st . groupby ( "trip_id" ) . last ( ) . reset_index ( )
n = g [ g [ "departure_time" ] . isnull ( ) ] . shape [ 0 ]
d [ "num_last_departure_times_missing" ] = n
d [ "frac_last_departure_times_missing" ] = n / st . shape [ 0 ]
# Opine
if ( ( d [ "frac_first_departure_times_missing" ] >= 0.1 ) or ( d [ "frac_last_departure_times_missing" ] >= 0.1 ) or d [ "frac_trips_missing_shapes" ] >= 0.8 ) :
d [ "assessment" ] = "bad feed"
elif ( d [ "frac_direction_ids_missing" ] or d [ "frac_stop_time_dists_missing" ] or d [ "num_route_short_names_duplicated" ] ) :
d [ "assessment" ] = "probably a fixable feed"
else :
d [ "assessment" ] = "good feed"
f = pd . DataFrame ( list ( d . items ( ) ) , columns = [ "indicator" , "value" ] )
return f |
def get_font ( self , face , bold = False , italic = False ) :
"""Get a font described by face and size""" | key = '%s-%s-%s' % ( face , bold , italic )
if key not in self . _fonts :
font = dict ( face = face , bold = bold , italic = italic )
self . _fonts [ key ] = TextureFont ( font , self . _renderer )
return self . _fonts [ key ] |
def _succeed ( self , result ) :
"""Fire the success chain .""" | for fn , args , kwargs in self . _callbacks :
fn ( result , * args , ** kwargs )
self . _resulted_in = result |
def returner ( ret ) :
'''Return data to a Cassandra ColumnFamily''' | consistency_level = getattr ( pycassa . ConsistencyLevel , __opts__ [ 'cassandra.consistency_level' ] )
pool = pycassa . ConnectionPool ( __opts__ [ 'cassandra.keyspace' ] , __opts__ [ 'cassandra.servers' ] )
ccf = pycassa . ColumnFamily ( pool , __opts__ [ 'cassandra.column_family' ] , write_consistency_level = consistency_level )
columns = { 'fun' : ret [ 'fun' ] , 'id' : ret [ 'id' ] }
if isinstance ( ret [ 'return' ] , dict ) :
for key , value in six . iteritems ( ret [ 'return' ] ) :
columns [ 'return.{0}' . format ( key ) ] = six . text_type ( value )
else :
columns [ 'return' ] = six . text_type ( ret [ 'return' ] )
log . debug ( columns )
ccf . insert ( ret [ 'jid' ] , columns ) |
def close ( self ) :
"""Closes all currently open file pointers""" | if not self . active :
return
self . active = False
if self . _file :
self . _file . close ( )
self . _sincedb_update_position ( force_update = True )
if self . _current_event :
event = '\n' . join ( self . _current_event )
self . _current_event . clear ( )
self . _callback_wrapper ( [ event ] ) |
def get_threads ( self ) :
"""Returns a dict of all threads and indicates thread being debugged .
key is thread ident and values thread info .
Information from this list can be used to swap thread being debugged .""" | thread_list = { }
for thread in threading . enumerate ( ) :
thread_ident = thread . ident
thread_list [ thread_ident ] = { "ident" : thread_ident , "name" : thread . name , "is_debugger" : thread_ident == self . debugger_thread_ident , "is_debugged" : thread_ident == self . debugged_thread_ident }
return thread_list |
def pcre ( tgt , minion_id = None ) :
'''Return True if the minion ID matches the given pcre target
minion _ id
Specify the minion ID to match against the target expression
. . versionadded : : 2014.7.0
CLI Example :
. . code - block : : bash
salt ' * ' match . pcre ' . * ' ''' | if minion_id is not None :
opts = copy . copy ( __opts__ )
if not isinstance ( minion_id , six . string_types ) :
minion_id = six . text_type ( minion_id )
opts [ 'id' ] = minion_id
else :
opts = __opts__
matchers = salt . loader . matchers ( opts )
try :
return matchers [ 'pcre_match.match' ] ( tgt , opts = __opts__ )
except Exception as exc :
log . exception ( exc )
return False |
def get_param ( self ) :
"""Method to get current optimizer ' s parameter value""" | cycle_progress = self . event_index / self . cycle_size
return self . start_value + ( ( self . end_value - self . start_value ) / 2 ) * ( 1 - math . cos ( math . pi * cycle_progress ) ) |
def parse_from_xml ( self , xml_spec ) :
'''Parse a string or file containing an XML specification .
Example :
> > > s = RtsProfile ( )
> > > s . parse _ from _ xml ( open ( ' test / rtsystem . xml ' ) )
> > > len ( s . components )
Load of invalid data should throw exception :
> > > s . parse _ from _ xml ( ' non - XML string ' )
Traceback ( most recent call last ) :
ExpatError : syntax error : line 1 , column 0''' | if type ( xml_spec ) in string_types ( ) :
dom = xml . dom . minidom . parseString ( xml_spec )
else :
dom = xml . dom . minidom . parse ( xml_spec )
self . _parse_xml ( dom )
dom . unlink ( ) |
def dispatch_request ( self , * args , ** kwargs ) :
"""Dispatch current request .
Dispatch the current request using
: class : ` flask . views . MethodView ` ` dispatch _ request ( ) ` then , if the
result is not already a : py : class : ` flask . Response ` , search for the
serializing function which matches the best the current request ' s
Accept header and use it to build the : py : class : ` flask . Response ` .
: rtype : : class : ` flask . Response `
: raises werkzeug . exceptions . NotAcceptable : If no media type matches
current Accept header .
: returns : The response returned by the request handler or created by
the serializing function .""" | result = super ( ContentNegotiatedMethodView , self ) . dispatch_request ( * args , ** kwargs )
if isinstance ( result , Response ) :
return result
elif isinstance ( result , ( list , tuple ) ) :
return self . make_response ( * result )
else :
return self . make_response ( result ) |
def _end_of_century ( self ) :
"""Reset the date to the last day of the century
and the time to 23:59:59.99999.
: rtype : DateTime""" | year = self . year - 1 - ( self . year - 1 ) % YEARS_PER_CENTURY + YEARS_PER_CENTURY
return self . set ( year , 12 , 31 , 23 , 59 , 59 , 999999 ) |
def read ( self , source_path ) :
"""Parse content and metadata of textile files .""" | with pelican_open ( source_path ) as text :
parts = text . split ( '----' , 1 )
if len ( parts ) == 2 :
headerlines = parts [ 0 ] . splitlines ( )
headerpairs = map ( lambda l : l . split ( ':' , 1 ) , headerlines )
headerdict = { pair [ 0 ] : pair [ 1 ] . strip ( ) for pair in headerpairs if len ( pair ) == 2 }
metadata = self . _parse_metadata ( headerdict )
content = textile ( parts [ 1 ] )
else :
metadata = { }
content = textile ( text )
return content , metadata |
def _internal_function_call ( self , call_conf ) :
'''Call internal function .
: param call _ conf :
: return :''' | def stub ( * args , ** kwargs ) :
message = 'Function {} is not available' . format ( call_conf [ 'fun' ] )
self . out . error ( message )
log . debug ( 'Attempt to run "%s" with %s arguments and %s parameters.' , call_conf [ 'fun' ] , call_conf [ 'arg' ] , call_conf [ 'kwargs' ] )
return message
return getattr ( salt . cli . support . intfunc , call_conf [ 'fun' ] , stub ) ( self . collector , * call_conf [ 'arg' ] , ** call_conf [ 'kwargs' ] ) |
def serialize ( self ) :
"""Produce YAML version of this catalog .
Note that this is not the same as ` ` . yaml ( ) ` ` , which produces a YAML
block referring to this catalog .""" | import yaml
output = { "metadata" : self . metadata , "sources" : { } , "name" : self . name }
for key , entry in self . items ( ) :
output [ "sources" ] [ key ] = entry . _captured_init_kwargs
return yaml . dump ( output ) |
def _write_apt_gpg_keyfile ( key_name , key_material ) :
"""Writes GPG key material into a file at a provided path .
: param key _ name : A key name to use for a key file ( could be a fingerprint )
: type key _ name : str
: param key _ material : A GPG key material ( binary )
: type key _ material : ( str , bytes )""" | with open ( '/etc/apt/trusted.gpg.d/{}.gpg' . format ( key_name ) , 'wb' ) as keyf :
keyf . write ( key_material ) |
def readline ( self ) -> bytes :
'''Read a line of data .''' | assert self . _state == ConnectionState . created , 'Expect conn created. Got {}.' . format ( self . _state )
with self . _close_timer . with_timeout ( ) :
data = yield from self . run_network_operation ( self . reader . readline ( ) , close_timeout = self . _timeout , name = 'Readline' )
return data |
def _read_utf ( cls , data , pos , kind = None ) :
""": param kind : Optional ; a human - friendly identifier for the kind of UTF - 8 data we ' re loading ( e . g . is it a keystore alias ? an algorithm identifier ? something else ? ) .
Used to construct more informative exception messages when a decoding error occurs .""" | size = b2 . unpack_from ( data , pos ) [ 0 ]
pos += 2
try :
return data [ pos : pos + size ] . decode ( 'utf-8' ) , pos + size
except ( UnicodeEncodeError , UnicodeDecodeError ) as e :
raise BadKeystoreFormatException ( ( "Failed to read %s, contains bad UTF-8 data: %s" % ( kind , str ( e ) ) ) if kind else ( "Encountered bad UTF-8 data: %s" % str ( e ) ) ) |
def get_data_item_for_reference_key ( self , data_item_reference_key : str = None , create_if_needed : bool = False , large_format : bool = False ) -> DataItem :
"""Get the data item associated with data item reference key . Optionally create if missing .
: param data _ item _ reference _ key : The data item reference key .
: param create _ if _ needed : Whether to create a new data item if none is found .
: return : The associated data item . May be None .
. . versionadded : : 1.0
Status : Provisional
Scriptable : Yes""" | document_model = self . _document_model
data_item_reference = document_model . get_data_item_reference ( data_item_reference_key )
data_item = data_item_reference . data_item
if data_item is None and create_if_needed :
data_item = DataItemModule . DataItem ( large_format = large_format )
data_item . ensure_data_source ( )
document_model . append_data_item ( data_item )
document_model . setup_channel ( data_item_reference_key , data_item )
data_item . session_id = document_model . session_id
data_item = document_model . get_data_item_reference ( data_item_reference_key ) . data_item
return DataItem ( data_item ) if data_item else None |
def generate_voxel_grid ( bbox , szval , use_cubes = False ) :
"""Generates the voxel grid with the desired size .
: param bbox : bounding box
: type bbox : list , tuple
: param szval : size in x - , y - , z - directions
: type szval : list , tuple
: param use _ cubes : use cube voxels instead of cuboid ones
: type use _ cubes : bool
: return : voxel grid
: rtype : list""" | # Input validation
if szval [ 0 ] <= 1 or szval [ 1 ] <= 1 or szval [ 2 ] <= 1 :
raise GeomdlException ( "Size values must be bigger than 1" , data = dict ( sizevals = szval ) )
# Find step size for each direction
steps = [ float ( bbox [ 1 ] [ idx ] - bbox [ 0 ] [ idx ] ) / float ( szval [ idx ] - 1 ) for idx in range ( 0 , 3 ) ]
# It is possible to use cubes instead of cuboids
if use_cubes :
min_val = min ( * steps )
steps = [ min_val for _ in range ( 0 , 3 ) ]
# Find range in each direction
ranges = [ list ( linalg . frange ( bbox [ 0 ] [ idx ] , bbox [ 1 ] [ idx ] , steps [ idx ] ) ) for idx in range ( 0 , 3 ) ]
voxel_grid = [ ]
for u in ranges [ 0 ] :
for v in ranges [ 1 ] :
for w in ranges [ 2 ] :
bbmin = [ u , v , w ]
bbmax = [ k + l for k , l in zip ( bbmin , steps ) ]
voxel_grid . append ( [ bbmin , bbmax ] )
return voxel_grid |
def _accumulate_sufficient_statistics ( self , stats , X , framelogprob , posteriors , fwdlattice , bwdlattice ) :
"""Updates sufficient statistics from a given sample .
Parameters
stats : dict
Sufficient statistics as returned by
: meth : ` ~ base . _ BaseHMM . _ initialize _ sufficient _ statistics ` .
X : array , shape ( n _ samples , n _ features )
Sample sequence .
framelogprob : array , shape ( n _ samples , n _ components )
Log - probabilities of each sample under each of the model states .
posteriors : array , shape ( n _ samples , n _ components )
Posterior probabilities of each sample being generated by each
of the model states .
fwdlattice , bwdlattice : array , shape ( n _ samples , n _ components )
Log - forward and log - backward probabilities .""" | stats [ 'nobs' ] += 1
if 's' in self . params :
stats [ 'start' ] += posteriors [ 0 ]
if 't' in self . params :
n_samples , n_components = framelogprob . shape
# when the sample is of length 1 , it contains no transitions
# so there is no reason to update our trans . matrix estimate
if n_samples <= 1 :
return
log_xi_sum = np . full ( ( n_components , n_components ) , - np . inf )
_hmmc . _compute_log_xi_sum ( n_samples , n_components , fwdlattice , log_mask_zero ( self . transmat_ ) , bwdlattice , framelogprob , log_xi_sum )
with np . errstate ( under = "ignore" ) :
stats [ 'trans' ] += np . exp ( log_xi_sum ) |
def get ( cls , action , suffix = None ) :
"""Get or register a handler for the given action .
: param func action : Callback that is called when invoking the Handler
: param func suffix : Optional suffix for the handler ' s ID""" | action_id = _action_id ( action , suffix )
if action_id not in cls . _HANDLERS :
if LOG_OPTS [ 'register' ] :
hookenv . log ( 'Registering reactive handler for %s' % _short_action_id ( action , suffix ) , level = hookenv . DEBUG )
cls . _HANDLERS [ action_id ] = cls ( action , suffix )
return cls . _HANDLERS [ action_id ] |
def find_or_new ( self , id , columns = None ) :
"""Find a model by its primary key or return new instance of the related model .
: param id : The primary key
: type id : mixed
: param columns : The columns to retrieve
: type columns : list
: rtype : Collection or Model""" | if columns is None :
columns = [ "*" ]
instance = self . _query . find ( id , columns )
if instance is None :
instance = self . _related . new_instance ( )
instance . set_attribute ( self . get_plain_foreign_key ( ) , self . get_parent_key ( ) )
return instance |
def rescue ( env , identifier ) :
"""Reboot into a rescue image .""" | vsi = SoftLayer . VSManager ( env . client )
vs_id = helpers . resolve_id ( vsi . resolve_ids , identifier , 'VS' )
if not ( env . skip_confirmations or formatting . confirm ( "This action will reboot this VSI. Continue?" ) ) :
raise exceptions . CLIAbort ( 'Aborted' )
vsi . rescue ( vs_id ) |
def raw_mod ( opts , name , functions , mod = 'modules' ) :
'''Returns a single module loaded raw and bypassing the _ _ virtual _ _ function
. . code - block : : python
import salt . config
import salt . loader
_ _ opts _ _ = salt . config . minion _ config ( ' / etc / salt / minion ' )
testmod = salt . loader . raw _ mod ( _ _ opts _ _ , ' test ' , None )
testmod [ ' test . ping ' ] ( )''' | loader = LazyLoader ( _module_dirs ( opts , mod , 'module' ) , opts , tag = 'rawmodule' , virtual_enable = False , pack = { '__salt__' : functions } , )
# if we don ' t have the module , return an empty dict
if name not in loader . file_mapping :
return { }
loader . _load_module ( name )
# load a single module ( the one passed in )
return dict ( loader . _dict ) |
def plot_skyreg ( header , data , ** kwargs ) :
"""Plot sky region defined by header and data
header : FITS header
data : Data array""" | kwargs . setdefault ( 'cmap' , 'binary' )
fig = plt . figure ( )
ax = pywcsgrid2 . subplot ( 111 , header = header )
ax . set_ticklabel_type ( "dms" )
im = ax . imshow ( data , origin = "center" , ** kwargs )
ax . grid ( )
ax . add_compass ( loc = 1 , coord = 'fk5' )
ax . add_compass ( loc = 4 , coord = 'gal' )
return ax , im |
def redo ( self , channel , image ) :
"""This method is called when an image is set in a channel .""" | imname = image . get ( 'name' , 'none' )
chname = channel . name
# is image in contents tree yet ?
in_contents = self . is_in_contents ( chname , imname )
# get old highlighted entries for this channel - - will be
# an empty set or one key
old_highlight = channel . extdata . contents_old_highlight
# calculate new highlight keys - - again , an empty set or one key
if image is not None :
key = self . _get_hl_key ( chname , image )
new_highlight = set ( [ key ] )
else : # no image has the focus
new_highlight = set ( [ ] )
# Only highlights active image in the current channel
if self . highlight_tracks_keyboard_focus :
if in_contents :
self . update_highlights ( self . _hl_path , new_highlight )
self . _hl_path = new_highlight
# Highlight all active images in all channels
else :
if in_contents :
self . update_highlights ( old_highlight , new_highlight )
channel . extdata . contents_old_highlight = new_highlight
return True |
def tile_and_reflect ( input ) :
"""Make 3x3 tiled array .
Central area is ' input ' , surrounding areas are reflected .
Adapted from https : / / github . com / nicjhan / gaussian - filter""" | tiled_input = np . tile ( input , ( 3 , 3 ) )
rows = input . shape [ 0 ]
cols = input . shape [ 1 ]
# Now we have a 3x3 tiles - do the reflections .
# All those on the sides need to be flipped left - to - right .
for i in range ( 3 ) : # Left hand side tiles
tiled_input [ i * rows : ( i + 1 ) * rows , 0 : cols ] = np . fliplr ( tiled_input [ i * rows : ( i + 1 ) * rows , 0 : cols ] )
# Right hand side tiles
tiled_input [ i * rows : ( i + 1 ) * rows , - cols : ] = np . fliplr ( tiled_input [ i * rows : ( i + 1 ) * rows , - cols : ] )
# All those on the top and bottom need to be flipped up - to - down
for i in range ( 3 ) : # Top row
tiled_input [ 0 : rows , i * cols : ( i + 1 ) * cols ] = np . flipud ( tiled_input [ 0 : rows , i * cols : ( i + 1 ) * cols ] )
# Bottom row
tiled_input [ - rows : , i * cols : ( i + 1 ) * cols ] = np . flipud ( tiled_input [ - rows : , i * cols : ( i + 1 ) * cols ] )
# The central array should be unchanged .
assert ( np . array_equal ( input , tiled_input [ rows : 2 * rows , cols : 2 * cols ] ) )
# All sides of the middle array should be the same as those bordering them .
# Check this starting at the top and going around clockwise . This can be
# visually checked by plotting the ' tiled _ input ' array .
assert ( np . array_equal ( input [ 0 , : ] , tiled_input [ rows - 1 , cols : 2 * cols ] ) )
assert ( np . array_equal ( input [ : , - 1 ] , tiled_input [ rows : 2 * rows , 2 * cols ] ) )
assert ( np . array_equal ( input [ - 1 , : ] , tiled_input [ 2 * rows , cols : 2 * cols ] ) )
assert ( np . array_equal ( input [ : , 0 ] , tiled_input [ rows : 2 * rows , cols - 1 ] ) )
return tiled_input |
def _add_custom_headers ( self , dct ) :
"""Add the Client - ID header required by Cloud Queues""" | if self . client_id is None :
self . client_id = os . environ . get ( "CLOUD_QUEUES_ID" )
if self . client_id :
dct [ "Client-ID" ] = self . client_id |
def get_task_quota ( self , task_name ) :
"""Get queueing info of the task .
Note that time between two calls should larger than 30 seconds , otherwise empty dict is returned .
: param task _ name : name of the task
: return : quota info in dict format""" | params = OrderedDict ( [ ( 'instancequota' , '' ) , ( 'taskname' , task_name ) ] )
resp = self . _client . get ( self . resource ( ) , params = params )
return json . loads ( resp . text ) |
def assertJsonContains ( jsonStr = None , key = None , message = None ) :
"""Assert that jsonStr contains key .
: param jsonStr : Json as string
: param key : Key to look for
: param message : Failure message
: raises : TestStepFail if key is not in jsonStr or
if loading jsonStr to a dictionary fails or if jsonStr is None .""" | if jsonStr is not None :
try :
data = json . loads ( jsonStr )
if key not in data :
raise TestStepFail ( format_message ( message ) if message is not None else "Assert: " "Key : %s is not " "in : %s" % ( str ( key ) , str ( jsonStr ) ) )
except ( TypeError , ValueError ) as e :
raise TestStepFail ( format_message ( message ) if message is not None else "Unable to parse json " + str ( e ) )
else :
raise TestStepFail ( format_message ( message ) if message is not None else "Json string is empty" ) |
def database_set_properties ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / database - xxxx / setProperties API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Properties # API - method % 3A - % 2Fclass - xxxx % 2FsetProperties""" | return DXHTTPRequest ( '/%s/setProperties' % object_id , input_params , always_retry = always_retry , ** kwargs ) |
def get_atoms ( self , inc_alt_states = False ) :
"""Returns all atoms in the ` Monomer ` .
Parameters
inc _ alt _ states : bool , optional
If ` True ` , will return ` Atoms ` for alternate states .""" | if inc_alt_states :
return itertools . chain ( * [ x [ 1 ] . values ( ) for x in sorted ( list ( self . states . items ( ) ) ) ] )
return self . atoms . values ( ) |
def _get_bottom_line_color ( self ) :
"""Returns color rgb tuple of bottom line""" | color = self . cell_attributes [ self . key ] [ "bordercolor_bottom" ]
return tuple ( c / 255.0 for c in color_pack2rgb ( color ) ) |
def _parse_message ( self , data ) :
"""Interpret each message datagram from device and do the needful .
This function receives datagrams from _ assemble _ buffer and inerprets
what they mean . It ' s responsible for maintaining the internal state
table for each device attribute and also for firing the update _ callback
function ( if one was supplied )""" | recognized = False
newdata = False
if data . startswith ( '!I' ) :
self . log . warning ( 'Invalid command: %s' , data [ 2 : ] )
recognized = True
elif data . startswith ( '!R' ) :
self . log . warning ( 'Out-of-range command: %s' , data [ 2 : ] )
recognized = True
elif data . startswith ( '!E' ) :
self . log . warning ( 'Cannot execute recognized command: %s' , data [ 2 : ] )
recognized = True
elif data . startswith ( '!Z' ) :
self . log . warning ( 'Ignoring command for powered-off zone: %s' , data [ 2 : ] )
recognized = True
else :
for key in LOOKUP :
if data . startswith ( key ) :
recognized = True
value = data [ len ( key ) : ]
oldvalue = getattr ( self , '_' + key )
if oldvalue != value :
changeindicator = 'New Value'
newdata = True
else :
changeindicator = 'Unchanged'
if key in LOOKUP :
if 'description' in LOOKUP [ key ] :
if value in LOOKUP [ key ] :
self . log . info ( '%s: %s (%s) -> %s (%s)' , changeindicator , LOOKUP [ key ] [ 'description' ] , key , LOOKUP [ key ] [ value ] , value )
else :
self . log . info ( '%s: %s (%s) -> %s' , changeindicator , LOOKUP [ key ] [ 'description' ] , key , value )
else :
self . log . info ( '%s: %s -> %s' , changeindicator , key , value )
setattr ( self , '_' + key , value )
if key == 'Z1POW' and value == '1' and oldvalue == '0' :
self . log . info ( 'Power on detected, refreshing all attributes' )
self . _poweron_refresh_successful = False
self . _loop . call_later ( 1 , self . poweron_refresh )
if key == 'Z1POW' and value == '0' and oldvalue == '1' :
self . _poweron_refresh_successful = False
break
if data . startswith ( 'ICN' ) :
self . log . warning ( 'ICN update received' )
recognized = True
self . _populate_inputs ( int ( value ) )
if data . startswith ( 'ISN' ) :
recognized = True
self . _poweron_refresh_successful = True
input_number = int ( data [ 3 : 5 ] )
value = data [ 5 : ]
oldname = self . _input_names . get ( input_number , '' )
if oldname != value :
self . _input_numbers [ value ] = input_number
self . _input_names [ input_number ] = value
self . log . info ( 'New Value: Input %d is called %s' , input_number , value )
newdata = True
if newdata :
if self . _update_callback :
self . _loop . call_soon ( self . _update_callback , data )
else :
self . log . debug ( 'no new data encountered' )
if not recognized :
self . log . warning ( 'Unrecognized response: %s' , data ) |
def get ( self , session , discount_id = None , ext_fields = None ) :
'''taobao . fenxiao . discounts . get 获取折扣信息
查询折扣信息''' | request = TOPRequest ( 'taobao.fenxiao.discounts.get' )
if discount_id != None :
request [ 'discount_id' ] = discount_id
if ext_fields != None :
request [ 'ext_fields' ] = ext_fields
self . create ( self . execute ( request , session ) )
return self . discounts |
def denoise ( self , bitthresh = 0.5 ) :
"""m . denoise ( bitthresh = 0.5 ) - - Set low - information positions ( below bitthresh ) to Ns""" | for i in range ( self . width ) :
tot = 0
for letter in ACGT :
if self . logP :
Pij = pow ( 2.0 , self . logP [ i ] [ letter ] )
else :
Pij = pow ( 2.0 , self . ll [ i ] [ letter ] ) * self . background [ letter ]
if Pij > 0.01 :
bit = Pij * self . ll [ i ] [ letter ]
tot = tot + bit
if tot < bitthresh : # Zero Column
for letter in ACGT :
self . ll [ i ] [ letter ] = 0.0
self . compute_from_ll ( self . ll ) |
def transitive_subgraph_of_addresses_bfs ( self , addresses , predicate = None , dep_predicate = None ) :
"""Returns the transitive dependency closure of ` addresses ` using BFS .
: API : public
: param list < Address > addresses : The closure of ` addresses ` will be walked .
: param function predicate : If this parameter is not given , no Targets will be filtered
out of the closure . If it is given , any Target which fails the predicate will not be
walked , nor will its dependencies . Thus predicate effectively trims out any subgraph
that would only be reachable through Targets that fail the predicate .
: param function dep _ predicate : Takes two parameters , the current target and the dependency of
the current target . If this parameter is not given , no dependencies will be filtered
when traversing the closure . If it is given , when the predicate fails , the edge to the dependency
will not be expanded .""" | walk = self . _walk_factory ( dep_predicate )
ordered_closure = OrderedSet ( )
to_walk = deque ( ( 0 , addr ) for addr in addresses )
while len ( to_walk ) > 0 :
level , address = to_walk . popleft ( )
if not walk . expand_once ( address , level ) :
continue
target = self . _target_by_address [ address ]
if predicate and not predicate ( target ) :
continue
if walk . do_work_once ( address ) :
ordered_closure . add ( target )
for dep_address in self . _target_dependencies_by_address [ address ] :
if walk . expanded_or_worked ( dep_address ) :
continue
if walk . dep_predicate ( target , self . _target_by_address [ dep_address ] , level ) :
to_walk . append ( ( level + 1 , dep_address ) )
return ordered_closure |
def update_node ( self , char , node , patch ) :
"""Change a node ' s stats according to a dictionary .
The ` ` patch ` ` dictionary should hold the new values of stats ,
keyed by the stats ' names ; a value of ` ` None ` ` deletes the
stat .""" | character = self . _real . character [ char ]
if patch is None :
del character . node [ node ]
elif node not in character . node :
character . node [ node ] = patch
return
else :
character . node [ node ] . update ( patch ) |
def _filter_index_pages ( docnames , base_dir ) :
"""Filter docnames to only yield paths of the form
` ` < base _ dir > / < name > / index ` `
Parameters
docnames : ` list ` of ` str `
List of document names ( ` ` env . found _ docs ` ` ) .
base _ dir : ` str `
Base directory of all sub - directories containing index pages .
Yields
docname : ` str `
Document name that meets the pattern .""" | for docname in docnames :
parts = docname . split ( '/' )
if len ( parts ) == 3 and parts [ 0 ] == base_dir and parts [ 2 ] == 'index' :
yield docname |
def set_runtime_value_bool ( self , resourceid : int , value : bool ) -> bool :
"""Set a boolean runtime value""" | if value :
boolvalue = "true"
else :
boolvalue = "false"
payload = """
<setResourceValue1 xmlns=\"utcs\"
xmlns:i=\"http://www.w3.org/2001/XMLSchema-instance\">
<value i:type=\"a:WSBooleanValue\" xmlns:a=\"utcs.values\">
<a:value>{value}</a:value></value>
<typeString/>
<resourceID>{id}</resourceID>
<isValueRuntime>true</isValueRuntime>
</setResourceValue1>
""" . format ( id = resourceid , value = boolvalue )
xdoc = self . connection . soap_action ( '/ws/ResourceInteractionService' , 'setResourceValue' , payload )
if xdoc :
result = xdoc . find ( r'./SOAP-ENV:Body/ns1:setResourceValue2' , IHCSoapClient . ihcns ) . text
return result == "true"
return False |
def filter ( self , datax , datay ) :
"""Filter a set of datax and datay according to ` self . points `""" | f = np . ones ( datax . shape , dtype = bool )
for i , p in enumerate ( zip ( datax , datay ) ) :
f [ i ] = PolygonFilter . point_in_poly ( p , self . points )
if self . inverted :
np . invert ( f , f )
return f |
def add ( self , name , obj = None ) :
"""Add the view named ` name ` to the report text""" | if obj :
text = '\n::\n\n' + indent ( str ( obj ) )
else :
text = views . view ( name , self . dstore )
if text :
title = self . title [ name ]
line = '-' * len ( title )
self . text += '\n' . join ( [ '\n\n' + title , line , text ] ) |
def linearize_data_types ( self ) : # type : ( ) - > typing . List [ UserDefined ]
"""Returns a list of all data types used in the namespace . Because the
inheritance of data types can be modeled as a DAG , the list will be a
linearization of the DAG . It ' s ideal to generate data types in this
order so that composite types that reference other composite types are
defined in the correct order .""" | linearized_data_types = [ ]
seen_data_types = set ( )
# type : typing . Set [ UserDefined ]
def add_data_type ( data_type ) : # type : ( UserDefined ) - > None
if data_type in seen_data_types :
return
elif data_type . namespace != self : # We ' re only concerned with types defined in this namespace .
return
if is_composite_type ( data_type ) and data_type . parent_type :
add_data_type ( data_type . parent_type )
linearized_data_types . append ( data_type )
seen_data_types . add ( data_type )
for data_type in self . data_types :
add_data_type ( data_type )
return linearized_data_types |
def scan2 ( self , tablename , expr_values = None , alias = None , attributes = None , consistent = False , select = None , index = None , limit = None , return_capacity = None , filter = False , segment = None , total_segments = None , exclusive_start_key = None , ** kwargs ) :
"""Perform a full - table scan
For many parameters you will want to reference the DynamoDB API :
http : / / docs . aws . amazon . com / amazondynamodb / latest / APIReference / API _ Scan . html
Parameters
tablename : str
Name of the table to scan
expr _ values : dict , optional
See docs for ExpressionAttributeValues . See also : kwargs
alias : dict , optional
See docs for ExpressionAttributeNames
attributes : str or list , optional
See docs for ProjectionExpression . If list , it will be joined by
commas .
consistent : bool , optional
Perform a strongly consistent read of the data ( default False )
select : str , optional
See docs for Select
index : str , optional
The name of the index to query
limit : int , optional
Maximum number of items to return
return _ capacity : { NONE , INDEXES , TOTAL } , optional
INDEXES will return the consumed capacity for indexes , TOTAL will
return the consumed capacity for the table and the indexes .
( default NONE )
filter : str , optional
See docs for FilterExpression
segment : int , optional
When doing a parallel scan , the unique thread identifier for this
scan . If present , total _ segments must also be present .
total _ segments : int , optional
When doing a parallel scan , the total number of threads performing
the scan .
exclusive _ start _ key : dict , optional
The ExclusiveStartKey to resume a previous query
* * kwargs : dict , optional
If expr _ values is not provided , the kwargs dict will be used as the
ExpressionAttributeValues ( a ' : ' will be automatically prepended to
all keys ) .
Examples
. . code - block : : python
connection . scan2 ( ' mytable ' , filter = ' contains ( tags , : search ) ' , search = ' text )
connection . scan2 ( ' mytable ' , filter = ' id = : id ' , expr _ values = { ' : id ' : ' dsa ' } )""" | keywords = { 'TableName' : tablename , 'ReturnConsumedCapacity' : self . _default_capacity ( return_capacity ) , 'ConsistentRead' : consistent , }
values = build_expression_values ( self . dynamizer , expr_values , kwargs )
if values :
keywords [ 'ExpressionAttributeValues' ] = values
if attributes is not None :
if not isinstance ( attributes , six . string_types ) :
attributes = ', ' . join ( attributes )
keywords [ 'ProjectionExpression' ] = attributes
if index is not None :
keywords [ 'IndexName' ] = index
if alias :
keywords [ 'ExpressionAttributeNames' ] = alias
if select :
keywords [ 'Select' ] = select
if filter :
keywords [ 'FilterExpression' ] = filter
if segment is not None :
keywords [ 'Segment' ] = segment
if total_segments is not None :
keywords [ 'TotalSegments' ] = total_segments
if exclusive_start_key is not None :
keywords [ 'ExclusiveStartKey' ] = self . dynamizer . maybe_encode_keys ( exclusive_start_key )
if not isinstance ( limit , Limit ) :
limit = Limit ( limit )
if select == COUNT :
return self . _count ( 'scan' , limit , keywords )
else :
return ResultSet ( self , limit , 'scan' , ** keywords ) |
def inject_func_as_unbound_method ( class_ , func , method_name = None ) :
"""This is actually quite simple""" | if method_name is None :
method_name = get_funcname ( func )
setattr ( class_ , method_name , func ) |
def _set_traffic_class_dscp ( self , v , load = False ) :
"""Setter method for traffic _ class _ dscp , mapped from YANG variable / qos / map / traffic _ class _ dscp ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ traffic _ class _ dscp is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ traffic _ class _ dscp ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "name" , traffic_class_dscp . traffic_class_dscp , yang_name = "traffic-class-dscp" , rest_name = "traffic-class-dscp" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'name' , extensions = { u'tailf-common' : { u'info' : u'Configure Traffic-Class-to-DSCP map' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-full-command' : None , u'callpoint' : u'traffic_class_dscp_map' , u'cli-mode-name' : u'traffic-class-dscp-$(name)' } } ) , is_container = 'list' , yang_name = "traffic-class-dscp" , rest_name = "traffic-class-dscp" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure Traffic-Class-to-DSCP map' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-full-command' : None , u'callpoint' : u'traffic_class_dscp_map' , u'cli-mode-name' : u'traffic-class-dscp-$(name)' } } , namespace = 'urn:brocade.com:mgmt:brocade-qos-mls' , defining_module = 'brocade-qos-mls' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """traffic_class_dscp must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name",traffic_class_dscp.traffic_class_dscp, yang_name="traffic-class-dscp", rest_name="traffic-class-dscp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-DSCP map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_dscp_map', u'cli-mode-name': u'traffic-class-dscp-$(name)'}}), is_container='list', yang_name="traffic-class-dscp", rest_name="traffic-class-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-DSCP map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_dscp_map', u'cli-mode-name': u'traffic-class-dscp-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""" , } )
self . __traffic_class_dscp = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def sign ( self , keys ) :
"""Sign the current document .
Warning : current signatures will be replaced with the new ones .""" | key = keys [ 0 ]
signed = self . raw ( ) [ - 2 : ]
signing = base64 . b64encode ( key . signature ( bytes ( signed , 'ascii' ) ) )
self . signatures = [ signing . decode ( "ascii" ) ] |
def parse_route_attr_filter ( route_attr_filter , debug = False ) :
"""Args :
route _ attr _ filter ( str ) : The raw command - line input of the route
filter .
Returns :
Tuple [ FilterExpr , List [ str ] ] : The second element is a list of errors .""" | assert isinstance ( route_attr_filter , six . text_type ) , type ( route_attr_filter )
parser = FilterExprParser ( debug )
return parser . parse ( route_attr_filter ) |
def run ( self ) :
"""Start and supervise services workers
This method will start and supervise all children processes
until the master process asked to shutdown by a SIGTERM .
All spawned processes are part of the same unix process group .""" | self . _systemd_notify_once ( )
self . _child_supervisor = _utils . spawn ( self . _child_supervisor_thread )
self . _wait_forever ( ) |
def insert_query ( connection , publicId , aead , keyhandle , aeadobj ) :
"""this functions read the response fields and creates sql query . then
inserts everything inside the database""" | # turn the keyhandle into an integer
keyhandle = key_handle_to_int ( keyhandle )
if not keyhandle == aead . key_handle :
print ( "WARNING: keyhandle does not match aead.key_handle" )
return None
# creates the query object
try :
sql = aeadobj . insert ( ) . values ( public_id = publicId , keyhandle = aead . key_handle , nonce = aead . nonce , aead = aead . data )
# insert the query
result = connection . execute ( sql )
return result
except sqlalchemy . exc . IntegrityError :
pass
return None |
def rejectionOptionsList ( self ) :
"Return a sorted list with the options defined in bikasetup" | plone = getSite ( )
settings = plone . bika_setup
# RejectionReasons will return something like :
# [ { ' checkbox ' : u ' on ' , ' textfield - 2 ' : u ' b ' , ' textfield - 1 ' : u ' c ' , ' textfield - 0 ' : u ' a ' } ]
if len ( settings . RejectionReasons ) > 0 :
reject_reasons = settings . RejectionReasons [ 0 ]
else :
return [ ]
sorted_keys = sorted ( reject_reasons . keys ( ) )
if 'checkbox' in sorted_keys :
sorted_keys . remove ( 'checkbox' )
# Building the list with the values only because the keys are not needed any more
items = [ ]
for key in sorted_keys :
items . append ( reject_reasons [ key ] . strip ( ) )
return items |
def export_to_tf_tensor ( self , x , laid_out_x ) :
"""Turn a Tensor into a tf . Tensor .
Args :
x : a Tensor
laid _ out _ x : a LaidOutTensor
Returns :
a tf . Tensor""" | return self . combine_slices ( laid_out_x . all_slices , x . shape ) |
async def digital_write ( self , command ) :
"""This method writes a zero or one to a digital pin .
: param command : { " method " : " digital _ write " , " params " : [ PIN , DIGITAL _ DATA _ VALUE ] }
: returns : No return message . .""" | pin = int ( command [ 0 ] )
value = int ( command [ 1 ] )
await self . core . digital_write ( pin , value ) |
def _match ( self , kind ) :
"""The ' match ' primitive of RD parsers .
* Verifies that the current token is of the given kind ( kind can
be a tuple , in which the kind must match one of its members ) .
* Returns the value of the current token
* Reads in the next token""" | if ( isinstance ( kind , tuple ) and self . cur_token . kind in kind or self . cur_token . kind == kind ) :
value = self . cur_token . value
self . _advance ( )
return value
else :
raise ASDLSyntaxError ( 'Unmatched {} (found {})' . format ( kind , self . cur_token . kind ) , self . cur_token . lineno ) |
def current ( ) :
"""Returns the current environment manager for the projex system .
: return < EnvManager >""" | if not EnvManager . _current :
path = os . environ . get ( 'PROJEX_ENVMGR_PATH' )
module = os . environ . get ( 'PROJEX_ENVMGR_MODULE' )
clsname = os . environ . get ( 'PROJEX_ENVMGR_CLASS' )
cls = EnvManager
if module and clsname : # check if the user specified an import path
if path :
logger . info ( 'Adding env manager path: %s' % path )
sys . path . insert ( 0 , path )
logger . info ( 'Loading env manager: %s.%s' % ( module , clsname ) )
try :
__import__ ( module )
mod = sys . modules [ module ]
cls = getattr ( mod , clsname )
except ImportError :
logger . error ( 'Could not import env manager %s' , module )
except KeyError :
logger . error ( 'Could not import env manager %s' , module )
except AttributeError :
msg = '%s is not a valid class of %s' % ( clsname , module )
logger . error ( msg )
EnvManager . _current = cls ( )
return EnvManager . _current |
def scheme_specification ( cls ) :
""": meth : ` . WSchemeHandler . scheme _ specification ` method implementation""" | return WSchemeSpecification ( 'file' , WURIComponentVerifier ( WURI . Component . path , WURIComponentVerifier . Requirement . optional ) ) |
def move_users ( self , user_id_list , group_id ) :
"""批量移动用户分组 。
: param user _ id _ list : 用户 ID 的列表 ( 长度不能超过50)
: param group _ id : 分组 ID
: return : 返回的 JSON 数据包""" | return self . post ( url = "https://api.weixin.qq.com/cgi-bin/groups/members/batchupdate" , data = { "openid_list" : user_id_list , "to_groupid" : group_id } ) |
def parse_version ( ver , pre = False ) :
"""Parse version into a comparable Version tuple .""" | m = RE_VER . match ( ver )
# Handle major , minor , micro
major = int ( m . group ( 'major' ) )
minor = int ( m . group ( 'minor' ) ) if m . group ( 'minor' ) else 0
micro = int ( m . group ( 'micro' ) ) if m . group ( 'micro' ) else 0
# Handle pre releases
if m . group ( 'type' ) :
release = PRE_REL_MAP [ m . group ( 'type' ) ]
pre = int ( m . group ( 'pre' ) )
else :
release = "final"
pre = 0
# Handle development releases
dev = m . group ( 'dev' ) if m . group ( 'dev' ) else 0
if m . group ( 'dev' ) :
dev = int ( m . group ( 'dev' ) )
release = '.dev-' + release if pre else '.dev'
else :
dev = 0
# Handle post
post = int ( m . group ( 'post' ) ) if m . group ( 'post' ) else 0
return Version ( major , minor , micro , release , pre , post , dev ) |
def patch_namespaced_horizontal_pod_autoscaler_status ( self , name , namespace , body , ** kwargs ) :
"""partially update status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . patch _ namespaced _ horizontal _ pod _ autoscaler _ status ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the HorizontalPodAutoscaler ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param object body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint . This field is required for apply requests ( application / apply - patch ) but optional for non - apply patch types ( JsonPatch , MergePatch , StrategicMergePatch ) .
: param bool force : Force is going to \" force \" Apply requests . It means user will re - acquire conflicting fields owned by other people . Force flag must be unset for non - apply patch requests .
: return : V2beta1HorizontalPodAutoscaler
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . patch_namespaced_horizontal_pod_autoscaler_status_with_http_info ( name , namespace , body , ** kwargs )
else :
( data ) = self . patch_namespaced_horizontal_pod_autoscaler_status_with_http_info ( name , namespace , body , ** kwargs )
return data |
def vb_list_machines ( ** kwargs ) :
'''Which machines does the hypervisor have
@ param kwargs : Passed to vb _ xpcom _ to _ attribute _ dict to filter the attributes
@ type kwargs : dict
@ return : Untreated dicts of the machines known to the hypervisor
@ rtype : [ { } ]''' | manager = vb_get_manager ( )
machines = manager . getArray ( vb_get_box ( ) , 'machines' )
return [ vb_xpcom_to_attribute_dict ( machine , 'IMachine' , ** kwargs ) for machine in machines ] |
def libvlc_video_get_track_description ( p_mi ) :
'''Get the description of available video tracks .
@ param p _ mi : media player .
@ return : list with description of available video tracks , or NULL on error .''' | f = _Cfunctions . get ( 'libvlc_video_get_track_description' , None ) or _Cfunction ( 'libvlc_video_get_track_description' , ( ( 1 , ) , ) , None , ctypes . POINTER ( TrackDescription ) , MediaPlayer )
return f ( p_mi ) |
def trace_method ( cls , method , tracer = tracer ) :
"""Traces given class method using given tracer .
: param cls : Class of the method .
: type cls : object
: param method : Method to trace .
: type method : object
: param tracer : Tracer .
: type tracer : object
: return : Definition success .
: rtype : bool""" | if is_traced ( method ) :
return False
name = get_method_name ( method )
if is_untracable ( method ) or name in UNTRACABLE_NAMES :
return False
if is_class_method ( method ) :
setattr ( cls , name , classmethod ( tracer ( method . im_func ) ) )
elif is_static_method ( method ) :
setattr ( cls , name , staticmethod ( tracer ( method ) ) )
else :
setattr ( cls , name , tracer ( method ) )
return True |
def add_load_constant ( self , name , output_name , constant_value , shape ) :
"""Add a load constant layer .
Parameters
name : str
The name of this layer .
output _ name : str
The output blob name of this layer .
constant _ value : numpy . array
value of the constant as a numpy array .
shape : [ int ]
List of ints representing the shape of the constant . Must be of length 3 : [ C , H , W ]
See Also
add _ elementwise""" | spec = self . spec
nn_spec = self . nn_spec
# Add a new layer
spec_layer = nn_spec . layers . add ( )
spec_layer . name = name
spec_layer . output . append ( output_name )
spec_layer_params = spec_layer . loadConstant
data = spec_layer_params . data
data . floatValue . extend ( map ( float , constant_value . flatten ( ) ) )
spec_layer_params . shape . extend ( shape )
if len ( data . floatValue ) != np . prod ( shape ) :
raise ValueError ( "Dimensions of 'shape' do not match the size of the provided constant" )
if len ( shape ) != 3 :
raise ValueError ( "'shape' must be of length 3" ) |
def extract_queries ( self , args , kwargs ) :
'''This function normalizes the config block into a set of queries we
can use . The return is a list of consistently laid out dicts .''' | return super ( POSTGRESExtPillar , self ) . extract_queries ( args , kwargs ) |
def _valid ( m , comment = VALID_RESPONSE , out = None ) :
'''Return valid status .''' | return _set_status ( m , status = True , comment = comment , out = out ) |
def update_tags ( self , idlist , tags_add = None , tags_remove = None ) :
"""Updates the ' tags ' field for a bug .""" | tags = { }
if tags_add :
tags [ "add" ] = self . _listify ( tags_add )
if tags_remove :
tags [ "remove" ] = self . _listify ( tags_remove )
d = { "ids" : self . _listify ( idlist ) , "tags" : tags , }
return self . _proxy . Bug . update_tags ( d ) |
def circular_references ( self ) -> Set [ str ] :
"""Return the set of recursive ( circular ) references
: return :""" | rval = set ( )
for k in self . grammarelts . keys ( ) :
if k in self . dependency_closure ( k ) :
rval . add ( k )
return rval |
def param_logx_diagram ( run_list , ** kwargs ) :
"""Creates diagrams of a nested sampling run ' s evolution as it iterates
towards higher likelihoods , expressed as a function of log X , where X ( L ) is
the fraction of the prior volume with likelihood greater than some value L .
For a more detailed description and some example use cases , see ' nestcheck :
diagnostic tests for nested sampling calculations " ( Higson et al . 2019 ) .
Parameters
run _ list : dict or list of dicts
Nested sampling run ( s ) to plot .
fthetas : list of functions , optional
Quantities to plot . Each must map a 2d theta array to 1d ftheta array -
i . e . map every sample ' s theta vector ( every row ) to a scalar quantity .
E . g . use lambda x : x [ : , 0 ] to plot the first parameter .
labels : list of strs , optional
Labels for each ftheta .
ftheta _ lims : dict , optional
Plot limits for each ftheta .
plot _ means : bool , optional
Should the mean value of each ftheta be plotted ?
n _ simulate : int , optional
Number of bootstrap replications to use for the fgivenx distributions .
random _ seed : int , optional
Seed to make sure results are consistent and fgivenx caching can be
used .
logx _ min : float , optional
Lower limit of logx axis .
figsize : tuple , optional
Matplotlib figure size ( in inches ) .
colors : list of strs , optional
Colors to plot run scatter plots with .
colormaps : list of strs , optional
Colormaps to plot run fgivenx plots with .
npoints : int , optional
How many points to have in the logx array used to calculate and plot
analytical weights .
cache : str or None
Root for fgivenx caching ( no caching if None ) .
parallel : bool , optional
fgivenx parallel optional
point _ size : float , optional
size of markers on scatter plot ( in pts )
thin : float , optional
factor by which to reduce the number of samples before plotting the
scatter plot . Must be in half - closed interval ( 0 , 1 ] .
rasterize _ contours : bool , optional
fgivenx rasterize _ contours option .
tqdm _ kwargs : dict , optional
Keyword arguments to pass to the tqdm progress bar when it is used in
fgivenx while plotting contours .
Returns
fig : matplotlib figure""" | fthetas = kwargs . pop ( 'fthetas' , [ lambda theta : theta [ : , 0 ] , lambda theta : theta [ : , 1 ] ] )
labels = kwargs . pop ( 'labels' , [ r'$\theta_' + str ( i + 1 ) + '$' for i in range ( len ( fthetas ) ) ] )
ftheta_lims = kwargs . pop ( 'ftheta_lims' , [ [ - 1 , 1 ] ] * len ( fthetas ) )
threads_to_plot = kwargs . pop ( 'threads_to_plot' , [ 0 ] )
plot_means = kwargs . pop ( 'plot_means' , True )
n_simulate = kwargs . pop ( 'n_simulate' , 100 )
random_seed = kwargs . pop ( 'random_seed' , 0 )
logx_min = kwargs . pop ( 'logx_min' , None )
figsize = kwargs . pop ( 'figsize' , ( 6.4 , 2 * ( 1 + len ( fthetas ) ) ) )
colors = kwargs . pop ( 'colors' , [ 'red' , 'blue' , 'grey' , 'green' , 'orange' ] )
colormaps = kwargs . pop ( 'colormaps' , [ 'Reds_r' , 'Blues_r' , 'Greys_r' , 'Greens_r' , 'Oranges_r' ] )
# Options for fgivenx
cache_in = kwargs . pop ( 'cache' , None )
parallel = kwargs . pop ( 'parallel' , True )
rasterize_contours = kwargs . pop ( 'rasterize_contours' , True )
point_size = kwargs . pop ( 'point_size' , 0.2 )
thin = kwargs . pop ( 'thin' , 1 )
npoints = kwargs . pop ( 'npoints' , 100 )
tqdm_kwargs = kwargs . pop ( 'tqdm_kwargs' , { 'disable' : True } )
if kwargs :
raise TypeError ( 'Unexpected **kwargs: {0}' . format ( kwargs ) )
if not isinstance ( run_list , list ) :
run_list = [ run_list ]
# Use random seed to make samples consistent and allow caching .
# To avoid fixing seed use random _ seed = None
state = np . random . get_state ( )
# save initial random state
np . random . seed ( random_seed )
if not plot_means :
mean_colors = [ None ] * len ( colors )
else :
mean_colors = [ 'dark' + col for col in colors ]
nlogx = npoints
ny_posterior = npoints
assert len ( fthetas ) == len ( labels )
assert len ( fthetas ) == len ( ftheta_lims )
thread_linestyles = [ '-' , '-.' , ':' ]
# make figure
fig , axes = plt . subplots ( nrows = 1 + len ( fthetas ) , ncols = 2 , figsize = figsize , gridspec_kw = { 'wspace' : 0 , 'hspace' : 0 , 'width_ratios' : [ 15 , 40 ] } )
# make colorbar axes in top left corner
axes [ 0 , 0 ] . set_visible ( False )
divider = mpl_toolkits . axes_grid1 . make_axes_locatable ( axes [ 0 , 0 ] )
colorbar_ax_list = [ ]
for i in range ( len ( run_list ) ) :
colorbar_ax_list . append ( divider . append_axes ( "left" , size = 0.05 , pad = 0.05 ) )
# Reverse color bar axis order so when an extra run is added the other
# colorbars stay in the same place
colorbar_ax_list = list ( reversed ( colorbar_ax_list ) )
# plot runs in reverse order to put the first run on top
for nrun , run in reversed ( list ( enumerate ( run_list ) ) ) : # Weight Plot
ax_weight = axes [ 0 , 1 ]
ax_weight . set_ylabel ( 'posterior\nmass' )
samples = np . zeros ( ( n_simulate , run [ 'nlive_array' ] . shape [ 0 ] * 2 ) )
for i in range ( n_simulate ) :
logx_temp = nestcheck . ns_run_utils . get_logx ( run [ 'nlive_array' ] , simulate = True ) [ : : - 1 ]
logw_rel = logx_temp + run [ 'logl' ] [ : : - 1 ]
w_rel = np . exp ( logw_rel - logw_rel . max ( ) )
w_rel /= np . trapz ( w_rel , x = logx_temp )
samples [ i , : : 2 ] = logx_temp
samples [ i , 1 : : 2 ] = w_rel
if logx_min is None :
logx_min = samples [ : , 0 ] . min ( )
logx_sup = np . linspace ( logx_min , 0 , nlogx )
try :
cache = cache_in + '_' + str ( nrun ) + '_weights'
except TypeError :
cache = None
interp_alt = functools . partial ( alternate_helper , func = np . interp )
y , pmf = fgivenx . drivers . compute_pmf ( interp_alt , logx_sup , samples , cache = cache , ny = npoints , parallel = parallel , tqdm_kwargs = tqdm_kwargs )
cbar = fgivenx . plot . plot ( logx_sup , y , pmf , ax_weight , rasterize_contours = rasterize_contours , colors = plt . get_cmap ( colormaps [ nrun ] ) )
ax_weight . set_xlim ( [ logx_min , 0 ] )
ax_weight . set_ylim ( bottom = 0 )
ax_weight . set_yticks ( [ ] )
ax_weight . set_xticklabels ( [ ] )
# color bar plot
colorbar_plot = plt . colorbar ( cbar , cax = colorbar_ax_list [ nrun ] , ticks = [ 1 , 2 , 3 ] )
colorbar_ax_list [ nrun ] . yaxis . set_ticks_position ( 'left' )
colorbar_plot . solids . set_edgecolor ( 'face' )
colorbar_plot . ax . set_yticklabels ( [ ] )
if nrun == 0 :
colorbar_plot . ax . set_yticklabels ( [ r'$1\sigma$' , r'$2\sigma$' , r'$3\sigma$' ] )
# samples plot
logx = nestcheck . ns_run_utils . get_logx ( run [ 'nlive_array' ] , simulate = False )
scatter_x = logx
scatter_theta = run [ 'theta' ]
if thin != 1 :
assert 0 < thin <= 1 , ( 'thin={} should be in the half-closed interval(0, 1]' . format ( thin ) )
state = np . random . get_state ( )
# save initial random state
np . random . seed ( random_seed )
inds = np . where ( np . random . random ( logx . shape ) <= thin ) [ 0 ]
np . random . set_state ( state )
# return to original random state
scatter_x = logx [ inds ]
scatter_theta = run [ 'theta' ] [ inds , : ]
for nf , ftheta in enumerate ( fthetas ) :
ax_samples = axes [ 1 + nf , 1 ]
ax_samples . scatter ( scatter_x , ftheta ( scatter_theta ) , s = point_size , color = colors [ nrun ] )
if threads_to_plot is not None :
for i in threads_to_plot :
thread_inds = np . where ( run [ 'thread_labels' ] == i ) [ 0 ]
ax_samples . plot ( logx [ thread_inds ] , ftheta ( run [ 'theta' ] [ thread_inds ] ) , linestyle = thread_linestyles [ nrun ] , color = 'black' , lw = 1 )
ax_samples . set_xlim ( [ logx_min , 0 ] )
ax_samples . set_ylim ( ftheta_lims [ nf ] )
# Plot posteriors
posterior_axes = [ axes [ i + 1 , 0 ] for i in range ( len ( fthetas ) ) ]
_ = plot_bs_dists ( run , fthetas , posterior_axes , ftheta_lims = ftheta_lims , flip_axes = True , n_simulate = n_simulate , rasterize_contours = rasterize_contours , cache = cache_in , nx = npoints , ny = ny_posterior , colormap = colormaps [ nrun ] , mean_color = mean_colors [ nrun ] , parallel = parallel , tqdm_kwargs = tqdm_kwargs )
# Plot means onto scatter plot
if plot_means :
w_rel = nestcheck . ns_run_utils . get_w_rel ( run , simulate = False )
w_rel /= np . sum ( w_rel )
means = [ np . sum ( w_rel * f ( run [ 'theta' ] ) ) for f in fthetas ]
for nf , mean in enumerate ( means ) :
axes [ nf + 1 , 1 ] . axhline ( y = mean , lw = 1 , linestyle = '--' , color = mean_colors [ nrun ] )
# Format axes
for nf , ax in enumerate ( posterior_axes ) :
ax . set_ylim ( ftheta_lims [ nf ] )
ax . invert_xaxis ( )
# only invert each axis once , not for every run !
axes [ - 1 , 1 ] . set_xlabel ( r'$\log X$' )
# Add labels
for i , label in enumerate ( labels ) :
axes [ i + 1 , 0 ] . set_ylabel ( label )
# Prune final ytick label so it doesn ' t overlap with next plot
prune = 'upper' if i != 0 else None
axes [ i + 1 , 0 ] . yaxis . set_major_locator ( matplotlib . ticker . MaxNLocator ( nbins = 3 , prune = prune ) )
for _ , ax in np . ndenumerate ( axes ) :
if not ax . is_first_col ( ) :
ax . set_yticklabels ( [ ] )
if not ( ax . is_last_row ( ) and ax . is_last_col ( ) ) :
ax . set_xticks ( [ ] )
np . random . set_state ( state )
# return to original random state
return fig |
def parse_task ( self , task ) :
'''Parses a WDL task AST subtree .
Currently looks at and parses 4 sections :
1 . Declarations ( e . g . string x = ' helloworld ' )
2 . Commandline ( a bash command with dynamic variables inserted )
3 . Runtime ( docker image ; disk ; CPU ; RAM ; etc . )
4 . Outputs ( expected return values / files )
: param task : An AST subtree of a WDL " Task " .
: return : Returns nothing but adds a task to the self . tasks _ dictionary
necessary for much of the parser .''' | task_name = task . attributes [ "name" ] . source_string
# task declarations
declaration_array = [ ]
for declaration_subAST in task . attr ( "declarations" ) :
declaration_array . append ( self . parse_task_declaration ( declaration_subAST ) )
self . tasks_dictionary . setdefault ( task_name , OrderedDict ( ) ) [ 'inputs' ] = declaration_array
for section in task . attr ( "sections" ) : # task commandline entries section [ command ( s ) to run ]
if section . name == "RawCommand" :
command_array = self . parse_task_rawcommand ( section )
self . tasks_dictionary . setdefault ( task_name , OrderedDict ( ) ) [ 'raw_commandline' ] = command_array
# task runtime section ( docker image ; disk ; CPU ; RAM ; etc . )
if section . name == "Runtime" :
runtime_dict = self . parse_task_runtime ( section . attr ( "map" ) )
self . tasks_dictionary . setdefault ( task_name , OrderedDict ( ) ) [ 'runtime' ] = runtime_dict
# task output filenames section ( expected return values / files )
if section . name == "Outputs" :
output_array = self . parse_task_outputs ( section )
self . tasks_dictionary . setdefault ( task_name , OrderedDict ( ) ) [ 'outputs' ] = output_array |
def cwd ( self , new_path ) :
'''Sets the cwd during reads and writes''' | old_cwd = self . _cwd
self . _cwd = new_path
return old_cwd |
def get_cmd_help ( self ) :
"""Get the single - line help of this command .
: returns :
` ` self . help ` ` , if defined
: returns :
The first line of the docstring , without the trailing dot , if
present .
: returns :
None , otherwise""" | try :
return self . help
except AttributeError :
pass
try :
return get_localized_docstring ( self , self . get_gettext_domain ( ) ) . splitlines ( ) [ 0 ] . rstrip ( '.' ) . lower ( )
except ( AttributeError , IndexError , ValueError ) :
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.