signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def listen ( self , addr = None ) :
"""Wait for a connection / reconnection from a DCC peer .
Returns the DCCConnection object .
The local IP address and port are available as
self . localaddress and self . localport . After connection from a
peer , the peer address and port are available as
self . peeraddress and self . peerport .""" | self . buffer = buffer . LineBuffer ( )
self . handlers = { }
self . socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
self . passive = True
default_addr = socket . gethostbyname ( socket . gethostname ( ) ) , 0
try :
self . socket . bind ( addr or default_addr )
self . localaddress , self . localport = self . socket . getsockname ( )
self . socket . listen ( 10 )
except socket . error as x :
raise DCCConnectionError ( "Couldn't bind socket: %s" % x )
return self |
def multi_option ( * param_decls , ** attrs ) :
"""modify help text and indicate option is permitted multiple times
: param param _ decls :
: param attrs :
: return :""" | attrhelp = attrs . get ( 'help' , None )
if attrhelp is not None :
newhelp = attrhelp + " (multiple occurrence permitted)"
attrs [ 'help' ] = newhelp
attrs [ 'multiple' ] = True
return click . option ( * param_decls , ** attrs ) |
def next_joystick_device ( ) :
"""Finds the next available js device name .""" | for i in range ( 100 ) :
dev = "/dev/input/js{0}" . format ( i )
if not os . path . exists ( dev ) :
return dev |
def formdata_encode ( fields ) :
"""Encode fields ( a dict ) as a multipart / form - data HTTP request
payload . Returns a ( content type , request body ) pair .""" | BOUNDARY = '----form-data-boundary-ZmRkNzJkMjUtMjkyMC00'
out = [ ]
for ( key , value ) in fields . items ( ) :
out . append ( '--' + BOUNDARY )
out . append ( 'Content-Disposition: form-data; name="%s"' % key )
out . append ( '' )
out . append ( value )
out . append ( '--' + BOUNDARY + '--' )
out . append ( '' )
body = '\r\n' . join ( out )
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type , body |
def release_lock ( dax , key , lock_mode = LockMode . wait ) :
"""Manually release a pg advisory lock .
: dax : a DataAccess instance
: key : either a big int or a 2 - tuple of integers
: lock _ mode : a member of the LockMode enum""" | lock_fxn = _lock_fxn ( "unlock" , lock_mode , False )
return dax . get_scalar ( dax . callproc ( lock_fxn , key if isinstance ( key , ( list , tuple ) ) else [ key ] ) [ 0 ] ) |
def _create_target_dir_if_needed ( self , target , depth_limit = 20 ) :
"""Creates the directory for the path given , recursively creating
parent directories when needed""" | if depth_limit <= 0 :
raise FtpCreateDirsException ( 'Depth limit exceeded' )
if not target :
return
target_dir = os . path . dirname ( target )
parent_dir , dir_name = os . path . split ( target_dir )
parent_dir_ls = [ ]
try :
parent_dir_ls = self . ftp . nlst ( parent_dir )
except : # Possibly a microsoft server
# They throw exceptions when we try to ls non - existing folders
pass
parent_dir_files = [ os . path . basename ( d ) for d in parent_dir_ls ]
if dir_name not in parent_dir_files :
if parent_dir and target_dir != '/' :
self . _create_target_dir_if_needed ( target_dir , depth_limit = depth_limit - 1 )
self . logger . info ( 'Will create dir: %s' % target )
self . ftp . mkd ( target_dir ) |
def normalized ( beta , beta_list ) :
"""归一化函数
Keyword arguments :
beta - - 当前文本行的beta值 , float类型
beta _ list - - 标题候选队列的beta队列 , list类型
Return :
result - - 归一化结果 , 区间 【 0,1】""" | if len ( beta_list ) <= 2 : # beta _ list元素小于等于2时 , 根据jiaccard相似度公式进行判定
return 1
try :
result = ( beta - min ( beta_list ) ) / ( max ( beta_list ) - min ( beta_list ) )
except ZeroDivisionError :
result = 1
return result |
def csr_matrix ( arg1 , shape = None , ctx = None , dtype = None ) :
"""Creates a ` CSRNDArray ` , an 2D array with compressed sparse row ( CSR ) format .
The CSRNDArray can be instantiated in several ways :
- csr _ matrix ( D ) :
to construct a CSRNDArray with a dense 2D array ` ` D ` `
- * * D * * ( * array _ like * ) - An object exposing the array interface , an object whose ` _ _ array _ _ ` method returns an array , or any ( nested ) sequence .
- * * ctx * * ( * Context , optional * ) - Device context ( default is the current default context ) .
- * * dtype * * ( * str or numpy . dtype , optional * ) - The data type of the output array . The default dtype is ` ` D . dtype ` ` if ` ` D ` ` is an NDArray or numpy . ndarray , float32 otherwise .
- csr _ matrix ( S )
to construct a CSRNDArray with a sparse 2D array ` ` S ` `
- * * S * * ( * CSRNDArray or scipy . sparse . csr . csr _ matrix * ) - A sparse matrix .
- * * ctx * * ( * Context , optional * ) - Device context ( default is the current default context ) .
- * * dtype * * ( * str or numpy . dtype , optional * ) - The data type of the output array . The default dtype is ` ` S . dtype ` ` .
- csr _ matrix ( ( M , N ) )
to construct an empty CSRNDArray with shape ` ` ( M , N ) ` `
- * * M * * ( * int * ) - Number of rows in the matrix
- * * N * * ( * int * ) - Number of columns in the matrix
- * * ctx * * ( * Context , optional * ) - Device context ( default is the current default context ) .
- * * dtype * * ( * str or numpy . dtype , optional * ) - The data type of the output array . The default dtype is float32.
- csr _ matrix ( ( data , indices , indptr ) )
to construct a CSRNDArray based on the definition of compressed sparse row format using three separate arrays , where the column indices for row i are stored in ` ` indices [ indptr [ i ] : indptr [ i + 1 ] ] ` ` and their corresponding values are stored in ` ` data [ indptr [ i ] : indptr [ i + 1 ] ] ` ` . The column indices for a given row are expected to be * * sorted in ascending order . * * Duplicate column entries for the same row are not allowed .
- * * data * * ( * array _ like * ) - An object exposing the array interface , which holds all the non - zero entries of the matrix in row - major order .
- * * indices * * ( * array _ like * ) - An object exposing the array interface , which stores the column index for each non - zero element in ` ` data ` ` .
- * * indptr * * ( * array _ like * ) - An object exposing the array interface , which stores the offset into ` ` data ` ` of the first non - zero element number of each row of the matrix .
- * * shape * * ( * tuple of int , optional * ) - The shape of the array . The default shape is inferred from the indices and indptr arrays .
- * * ctx * * ( * Context , optional * ) - Device context ( default is the current default context ) .
- * * dtype * * ( * str or numpy . dtype , optional * ) - The data type of the output array . The default dtype is ` ` data . dtype ` ` if ` ` data ` ` is an NDArray or numpy . ndarray , float32 otherwise .
- csr _ matrix ( ( data , ( row , col ) ) )
to construct a CSRNDArray based on the COOrdinate format using three seperate arrays , where ` ` row [ i ] ` ` is the row index of the element , ` ` col [ i ] ` ` is the column index of the element and ` ` data [ i ] ` ` is the data corresponding to the element . All the missing elements in the input are taken to be zeroes .
- * * data * * ( * array _ like * ) - An object exposing the array interface , which holds all the non - zero entries of the matrix in COO format .
- * * row * * ( * array _ like * ) - An object exposing the array interface , which stores the row index for each non zero element in ` ` data ` ` .
- * * col * * ( * array _ like * ) - An object exposing the array interface , which stores the col index for each non zero element in ` ` data ` ` .
- * * shape * * ( * tuple of int , optional * ) - The shape of the array . The default shape is inferred from the ` ` row ` ` and ` ` col ` ` arrays .
- * * ctx * * ( * Context , optional * ) - Device context ( default is the current default context ) .
- * * dtype * * ( * str or numpy . dtype , optional * ) - The data type of the output array . The default dtype is float32.
Parameters
arg1 : tuple of int , tuple of array _ like , array _ like , CSRNDArray , scipy . sparse . csr _ matrix , scipy . sparse . coo _ matrix , tuple of int or tuple of array _ like
The argument to help instantiate the csr matrix . See above for further details .
shape : tuple of int , optional
The shape of the csr matrix .
ctx : Context , optional
Device context ( default is the current default context ) .
dtype : str or numpy . dtype , optional
The data type of the output array .
Returns
CSRNDArray
A ` CSRNDArray ` with the ` csr ` storage representation .
Example
> > > a = mx . nd . sparse . csr _ matrix ( ( [ 1 , 2 , 3 ] , [ 1 , 0 , 2 ] , [ 0 , 1 , 2 , 2 , 3 ] ) , shape = ( 4 , 3 ) )
> > > a . asnumpy ( )
array ( [ [ 0 . , 1 . , 0 . ] ,
[ 2 . , 0 . , 0 . ] ,
[ 0 . , 0 . , 0 . ] ,
[ 0 . , 0 . , 3 . ] ] , dtype = float32)
See Also
CSRNDArray : MXNet NDArray in compressed sparse row format .""" | # construct a csr matrix from ( M , N ) or ( data , indices , indptr )
if isinstance ( arg1 , tuple ) :
arg_len = len ( arg1 )
if arg_len == 2 : # construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance ( arg1 [ 1 ] , tuple ) and len ( arg1 [ 1 ] ) == 2 :
data , ( row , col ) = arg1
if isinstance ( data , NDArray ) :
data = data . asnumpy ( )
if isinstance ( row , NDArray ) :
row = row . asnumpy ( )
if isinstance ( col , NDArray ) :
col = col . asnumpy ( )
coo = spsp . coo_matrix ( ( data , ( row , col ) ) , shape = shape )
_check_shape ( coo . shape , shape )
csr = coo . tocsr ( )
return array ( csr , ctx = ctx , dtype = dtype )
else : # empty matrix with shape
_check_shape ( arg1 , shape )
return empty ( 'csr' , arg1 , ctx = ctx , dtype = dtype )
elif arg_len == 3 : # data , indices , indptr
return _csr_matrix_from_definition ( arg1 [ 0 ] , arg1 [ 1 ] , arg1 [ 2 ] , shape = shape , ctx = ctx , dtype = dtype )
else :
raise ValueError ( "Unexpected length of input tuple: " + str ( arg_len ) )
else : # construct a csr matrix from a sparse / dense one
if isinstance ( arg1 , CSRNDArray ) or ( spsp and isinstance ( arg1 , spsp . csr . csr_matrix ) ) : # construct a csr matrix from scipy or CSRNDArray
_check_shape ( arg1 . shape , shape )
return array ( arg1 , ctx = ctx , dtype = dtype )
elif isinstance ( arg1 , RowSparseNDArray ) :
raise ValueError ( "Unexpected input type: RowSparseNDArray" )
else : # construct a csr matrix from a dense one
# prepare default ctx and dtype since mx . nd . array doesn ' t use default values
# based on source _ array
dtype = _prepare_default_dtype ( arg1 , dtype )
# create dns array with provided dtype . ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array ( arg1 , dtype = dtype )
if ctx is not None and dns . context != ctx :
dns = dns . as_in_context ( ctx )
_check_shape ( dns . shape , shape )
return dns . tostype ( 'csr' ) |
def random_tracing ( ) :
"""Create new Tracing ( ) tuple with random IDs .""" | new_id = _uniq_id ( )
return Tracing ( span_id = new_id , parent_id = 0 , trace_id = new_id , traceflags = 0 ) |
def _format_option_strings ( self , option , mvarfmt = ' <%s>' , optsep = ', ' ) :
"""Return a comma - separated list of option strings and metavars .
: param option : tuple of ( short opt , long opt ) , e . g : ( ' - f ' , ' - - format ' )
: param mvarfmt : metavar format string - evaluated as mvarfmt % metavar
: param optsep : separator""" | opts = [ ]
if option . _short_opts :
opts . append ( option . _short_opts [ 0 ] )
if option . _long_opts :
opts . append ( option . _long_opts [ 0 ] )
if len ( opts ) > 1 :
opts . insert ( 1 , optsep )
if option . takes_value ( ) :
metavar = option . metavar or option . dest . lower ( )
opts . append ( mvarfmt % metavar . lower ( ) )
return '' . join ( opts ) |
def put_job_into ( self , tube_name , data , pri = 65536 , delay = 0 , ttr = 120 ) :
"""Insert a new job into a specific queue . Wrapper around : func : ` put _ job ` .
: param tube _ name : Tube name
: type tube _ name : str
: param data : Job body
: type data : Text ( either str which will be encoded as utf - 8 , or bytes which are already utf - 8
: param pri : Priority for the job
: type pri : int
: param delay : Delay in seconds before the job should be placed on the ready queue
: type delay : int
: param ttr : Time to reserve ( how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
: type ttr : int
. . seealso : :
: func : ` put _ job ( ) `
Put a job into whatever the current tube is
: func : ` using ( ) `
Insert a job using an external guard""" | with self . using ( tube_name ) as inserter :
return inserter . put_job ( data = data , pri = pri , delay = delay , ttr = ttr ) |
def group_by_types ( self ) :
"""Iterate over species grouped by type""" | for t in self . types_of_specie :
for site in self :
if site . specie == t :
yield site |
def get_parameters ( self , packet_count = None ) :
"""Returns the special tshark parameters to be used according to the configuration of this class .""" | params = [ ]
if self . _capture_filter :
params += [ '-f' , self . _capture_filter ]
if self . _display_filter :
params += [ get_tshark_display_filter_flag ( self . tshark_path ) , self . _display_filter ]
# Raw is only enabled when JSON is also enabled .
if self . include_raw :
params += [ "-x" ]
if packet_count :
params += [ '-c' , str ( packet_count ) ]
if self . _custom_parameters :
for key , val in self . _custom_parameters . items ( ) :
params += [ key , val ]
if all ( self . encryption ) :
params += [ '-o' , 'wlan.enable_decryption:TRUE' , '-o' , 'uat:80211_keys:"' + self . encryption [ 1 ] + '","' + self . encryption [ 0 ] + '"' ]
if self . _override_prefs :
for preference_name , preference_value in self . _override_prefs . items ( ) :
if all ( self . encryption ) and preference_name in ( 'wlan.enable_decryption' , 'uat:80211_keys' ) :
continue
# skip if override preferences also given via - - encryption options
params += [ '-o' , '{0}:{1}' . format ( preference_name , preference_value ) ]
if self . _output_file :
params += [ '-w' , self . _output_file ]
if self . _decode_as :
for criterion , decode_as_proto in self . _decode_as . items ( ) :
params += [ '-d' , ',' . join ( [ criterion . strip ( ) , decode_as_proto . strip ( ) ] ) ]
if self . _disable_protocol :
params += [ '--disable-protocol' , self . _disable_protocol . strip ( ) ]
return params |
def handle_new_selection ( self , models ) :
"""Handles the selection for generic widgets
This is a helper method for generic widgets that want to modify the selection . These widgets can pass a list
of newly selected ( or clicked on ) models .
The method looks at the previous selection , the passed models and the list of pressed ( modifier ) keys :
* If no modifier key is pressed , the previous selection is cleared and the new selection is set to the passed
models
* If the extend - selection modifier key is pressed , elements of ` models ` that are _ not _ in the previous
selection are selected , those that are in the previous selection are deselected
: param models : The list of models that are newly selected / clicked on""" | models = self . _check_model_types ( models )
if extend_selection ( ) :
already_selected_elements = models & self . _selected
newly_selected_elements = models - self . _selected
self . _selected . difference_update ( already_selected_elements )
self . _selected . update ( newly_selected_elements )
else :
self . _selected = models
self . _selected = reduce_to_parent_states ( self . _selected ) |
def execute ( self , input_args = None , monitor = False ) :
"""Executes the workflow .
: param input _ args : External input arguments to the workflow . They have to be in a form of a dictionary where
each key is an EOTask used in the workflow and each value is a dictionary or a tuple of arguments .
: type input _ args : dict ( EOTask : dict ( str : object ) or tuple ( object ) )
: param monitor : If True workflow execution will be monitored
: type monitor : bool
: return : An immutable mapping containing results of terminal tasks
: rtype : WorkflowResults""" | out_degs = dict ( self . dag . get_outdegrees ( ) )
input_args = self . parse_input_args ( input_args )
_ , intermediate_results = self . _execute_tasks ( input_args = input_args , out_degs = out_degs , monitor = monitor )
return WorkflowResults ( intermediate_results ) |
def remove_module ( self , module ) :
"""Ownership of module is returned""" | with ffi . OutputString ( ) as outerr :
if ffi . lib . LLVMPY_RemoveModule ( self , module , outerr ) :
raise RuntimeError ( str ( outerr ) )
self . _modules . remove ( module )
module . _owned = False |
def to_pixel ( self , wcs , mode = 'all' ) :
"""Convert the aperture to a ` CircularAnnulus ` object defined in
pixel coordinates .
Parameters
wcs : ` ~ astropy . wcs . WCS `
The world coordinate system ( WCS ) transformation to use .
mode : { ' all ' , ' wcs ' } , optional
Whether to do the transformation including distortions
( ` ` ' all ' ` ` ; default ) or only including only the core WCS
transformation ( ` ` ' wcs ' ` ` ) .
Returns
aperture : ` CircularAnnulus ` object
A ` CircularAnnulus ` object .""" | pixel_params = self . _to_pixel_params ( wcs , mode = mode )
return CircularAnnulus ( ** pixel_params ) |
def set_empty_region ( self , region_id , type_id , generated_at , error_if_orders_present = True ) :
"""Prepares for the given region + item combo by instantiating a
: py : class : ` MarketItemsInRegionList ` instance , which will track
region ID , type ID , and generated time . This is mostly used for
the JSON deserialization process in case there are no orders for
the given region + item combo .
: param int region _ id : The region ID .
: param int type _ id : The item ' s type ID .
: param datetime . datetime generated _ at : The time that the order set
was generated .
: keyword bool error _ if _ orders _ present : If True , raise an exception if
an order already exists for this item + region combo when this is
called . This failsafe may be disabled by passing False here .""" | key = '%s_%s' % ( region_id , type_id )
if error_if_orders_present and self . _orders . has_key ( key ) :
raise ItemAlreadyPresentError ( "Orders already exist for the given region and type ID. " "Pass error_if_orders_present=False to disable this failsafe, " "if desired." )
self . _orders [ key ] = MarketItemsInRegionList ( region_id , type_id , generated_at ) |
def curses_session ( ) :
"""Setup terminal and initialize curses . Most of this copied from
curses . wrapper in order to convert the wrapper into a context manager .""" | try : # Curses must wait for some time after the Escape key is pressed to
# check if it is the beginning of an escape sequence indicating a
# special key . The default wait time is 1 second , which means that
# http : / / stackoverflow . com / questions / 27372068
os . environ [ 'ESCDELAY' ] = '25'
# Initialize curses
stdscr = curses . initscr ( )
# Turn off echoing of keys , and enter cbreak mode , where no buffering
# is performed on keyboard input
curses . noecho ( )
curses . cbreak ( )
# In keypad mode , escape sequences for special keys ( like the cursor
# keys ) will be interpreted and a special value like curses . KEY _ LEFT
# will be returned
stdscr . keypad ( 1 )
# Start color , too . Harmless if the terminal doesn ' t have color ; user
# can test with has _ color ( ) later on . The try / catch works around a
# minor bit of over - conscientiousness in the curses module - - the error
# return from C start _ color ( ) is ignorable .
try :
curses . start_color ( )
curses . use_default_colors ( )
except :
_logger . warning ( 'Curses failed to initialize color support' )
# Hide the blinking cursor
try :
curses . curs_set ( 0 )
except :
_logger . warning ( 'Curses failed to initialize the cursor mode' )
yield stdscr
finally :
if 'stdscr' in locals ( ) :
stdscr . keypad ( 0 )
curses . echo ( )
curses . nocbreak ( )
curses . endwin ( ) |
def _recognize_basic_types ( s ) :
"""If value of given string ` s ` is an integer ( or long ) , float or boolean , convert it
to a proper type and return it .""" | tps = [ int , float ]
if not six . PY3 : # compat for older versions of six that don ' t have PY2
tps . append ( long )
for tp in tps :
try :
return tp ( s )
except ValueError :
pass
if s . lower ( ) == 'true' :
return True
if s . lower ( ) == 'false' :
return False
if s . lower ( ) in [ 'none' , 'null' ] :
return None
return s |
def get_object_from_classbased_instance ( instance , queryset , request , * args , ** kwargs ) :
"""Get object from an instance of classbased generic view
Parameters
instance : instance
An instance of classbased generic view
queryset : instance
A queryset instance
request : instance
A instance of HttpRequest
Returns
instance
An instance of model object or None""" | from django . views . generic . edit import BaseCreateView
# initialize request , args , kwargs of classbased _ instance
# most of methods of classbased view assumed these attributes
# but these attributes is initialized in ` ` dispatch ` ` method .
instance . request = request
instance . args = args
instance . kwargs = kwargs
# get queryset from class if ` ` queryset _ or _ model ` ` is not specified
if hasattr ( instance , 'get_queryset' ) and not queryset :
queryset = instance . get_queryset ( )
elif hasattr ( instance , 'queryset' ) and not queryset :
queryset = instance . queryset
elif hasattr ( instance , 'model' ) and not queryset :
queryset = instance . model . _default_manager . all ( )
# get object
if hasattr ( instance , 'get_object' ) :
try :
obj = instance . get_object ( queryset )
except AttributeError as e : # CreateView has ` ` get _ object ` ` method but CreateView
# should not have any object before thus simply set
# None
if isinstance ( instance , BaseCreateView ) :
obj = None
else :
raise e
elif hasattr ( instance , 'object' ) :
obj = instance . object
else :
obj = None
return obj |
def loads ( buf , mutable = True , value_encoding = None , value_errors = None ) :
"""Deserialize a BSER - encoded blob .
@ param buf : The buffer to deserialize .
@ type buf : bytes
@ param mutable : Whether to return mutable results .
@ type mutable : bool
@ param value _ encoding : Optional codec to use to decode values . If
unspecified or None , return values as bytestrings .
@ type value _ encoding : str
@ param value _ errors : Optional error handler for codec . ' strict ' by default .
The other most common argument is ' surrogateescape ' on
Python 3 . If value _ encoding is None , this is ignored .
@ type value _ errors : str""" | info = _pdu_info_helper ( buf )
expected_len = info [ 2 ]
pos = info [ 3 ]
if len ( buf ) != expected_len + pos :
raise ValueError ( "bser data len %d != header len %d" % ( expected_len + pos , len ( buf ) ) )
bunser = Bunser ( mutable = mutable , value_encoding = value_encoding , value_errors = value_errors )
return bunser . loads_recursive ( buf , pos ) [ 0 ] |
def classify_file ( f ) :
"""Examine the column names to determine which type of file
this is . Return a tuple :
retvalue [ 0 ] = " file is non - parameterized "
retvalue [ 1 ] = " file contains error column " """ | cols = f [ 1 ] . columns
if len ( cols ) == 2 : # Then we must have a simple file
return ( True , False )
elif len ( cols ) == 3 and ( 'ERROR' in cols . names ) :
return ( True , True )
elif len ( cols ) > 2 and ( 'ERROR' not in cols . names ) :
return ( True , False )
else :
return ( False , True ) |
def up ( queue , host = None ) :
'''Up a queue , by removing a down file - - if a queue has no down file ,
this function is a no - op .''' | down_path = fsq_path . down ( queue , host = host )
_queue_ok ( os . path . dirname ( down_path ) )
try :
os . unlink ( down_path )
except ( OSError , IOError , ) , e :
if e . errno != errno . ENOENT :
raise FSQConfigError ( e . errno , wrap_io_os_err ( e ) ) |
def _histplot_op ( values , values2 , rotated , ax , hist_kwargs ) :
"""Add a histogram for the data to the axes .""" | if values2 is not None :
raise NotImplementedError ( "Insert hexbin plot here" )
bins = hist_kwargs . pop ( "bins" )
if bins is None :
bins = get_bins ( values )
ax . hist ( values , bins = bins , ** hist_kwargs )
if rotated :
ax . set_yticks ( bins [ : - 1 ] )
else :
ax . set_xticks ( bins [ : - 1 ] )
if hist_kwargs [ "label" ] is not None :
ax . legend ( )
return ax |
def shorrocks_index ( A ) :
r"""Implements Shorrocks mobility index
Parameters
A : array _ like ( float )
Square matrix with transition probabilities ( mobility matrix ) of
dimension m
Returns
Shorrocks index : float
The Shorrocks mobility index calculated as
. . math : :
s ( A ) = \ frac { m - \ sum _ j a _ { jj } } { m - 1 } \ in ( 0 , 1)
An index equal to 0 indicates complete immobility .
References
. . [ 1 ] Wealth distribution and social mobility in the US : A quantitative approach
( Benhabib , Bisin , Luo , 2017 ) .
https : / / www . econ . nyu . edu / user / bisina / RevisionAugust . pdf""" | A = np . asarray ( A )
# Convert to array if not already
m , n = A . shape
if m != n :
raise ValueError ( 'A must be a square matrix' )
diag_sum = np . diag ( A ) . sum ( )
return ( m - diag_sum ) / ( m - 1 ) |
def redef ( obj , key , value , ** kwargs ) :
'''A static constructor helper function''' | return Redef ( obj , key , value = value , ** kwargs ) |
def on_get ( self , req , resp , ** kwargs ) :
"""Respond on GET requests using ` ` self . retrieve ( ) ` ` handler .""" | return super ( ) . on_get ( req , resp , handler = self . _retrieve , ** kwargs ) |
def load_version ( fname : str ) -> str :
"""Loads version from file .
: param fname : Name of file to load version from .
: return : Version string .""" | if not os . path . exists ( fname ) :
logger . warning ( "No version file found. Defaulting to 1.0.3" )
return "1.0.3"
with open ( fname ) as inp :
return inp . read ( ) . strip ( ) |
def __new_submodule ( self , name , obj ) :
"""Create a new submodule documentation object for this ` obj ` ,
which must by a Python module object and pass along any
settings in this module .""" | # Forcefully set the module name so that it is always the absolute
# import path . We can ' t rely on ` obj . _ _ name _ _ ` , since it doesn ' t
# necessarily correspond to the public exported name of the module .
obj . __dict__ [ '__budoc_module_name' ] = '%s.%s' % ( self . refname , name )
return Module ( obj , docfilter = self . _docfilter , allsubmodules = self . _allsubmodules ) |
def parse_DID ( did , name_type = None ) :
"""Given a DID string , parse it into { ' address ' : . . . , ' index ' : . . . , ' name _ type ' }
Raise on invalid DID""" | did_pattern = '^did:stack:v0:({}{{25,35}})-([0-9]+)$' . format ( OP_BASE58CHECK_CLASS )
m = re . match ( did_pattern , did )
assert m , 'Invalid DID: {}' . format ( did )
original_address = str ( m . groups ( ) [ 0 ] )
name_index = int ( m . groups ( ) [ 1 ] )
vb = keylib . b58check . b58check_version_byte ( original_address )
name_type = None
if vb in [ SUBDOMAIN_ADDRESS_VERSION_BYTE , SUBDOMAIN_ADDRESS_MULTISIG_VERSION_BYTE ] :
name_type = 'subdomain'
# decode version
if vb == SUBDOMAIN_ADDRESS_VERSION_BYTE :
vb = bitcoin_blockchain . version_byte
else :
vb = bitcoin_blockchain . multisig_version_byte
original_address = virtualchain . address_reencode ( original_address , version_byte = vb )
else :
name_type = 'name'
original_address = virtualchain . address_reencode ( original_address )
return { 'address' : original_address , 'index' : name_index , 'name_type' : name_type } |
def create ( cls , name , engines , policy = None , comment = None , ** kwargs ) :
"""Create a new validate policy task .
If a policy is not specified , the engines existing policy will
be validated . Override default validation settings as kwargs .
: param str name : name of task
: param engines : list of engines to validate
: type engines : list ( Engine )
: param Policy policy : policy to validate . Uses the engines assigned
policy if none specified .
: param kwargs : see : func : ` ~ policy _ validation _ settings ` for keyword
arguments and default values .
: raises ElementNotFound : engine or policy specified does not exist
: raises CreateElementFailed : failure to create the task
: return : the task
: rtype : ValidatePolicyTask""" | json = { 'name' : name , 'resources' : [ eng . href for eng in engines ] , 'policy' : policy . href if policy is not None else policy , 'comment' : comment }
if kwargs :
json . update ( policy_validation_settings ( ** kwargs ) )
return ElementCreator ( cls , json ) |
def key_validation_check ( tweet_keys_list , superset_keys , minset_keys ) :
"""Validates the keys present in a Tweet .
Args :
tweet _ keys _ list ( list ) : the keys present in a tweet
superset _ keys ( set ) : the set of all possible keys for a tweet
minset _ keys ( set ) : the set of minimal keys expected in a tweet .
Returns :
0 if no errors
Raises :
UnexpectedFormatError on any mismatch of keys .""" | # check for keys that must be present
tweet_keys = set ( tweet_keys_list )
minset_overlap = tweet_keys & minset_keys
if minset_overlap != minset_keys :
raise UnexpectedFormatError ( "keys ({}) missing from Tweet (Public API data is not supported)" . format ( minset_keys - tweet_keys ) )
# check for keys that could be present
unexpected_keys = tweet_keys - superset_keys
if len ( unexpected_keys ) > 0 :
raise UnexpectedFormatError ( "Unexpected keys ({}) are in this Tweet" . format ( unexpected_keys ) )
return 0 |
def get_film ( film_id ) :
'''Return a single film''' | result = _get ( film_id , settings . FILMS )
return Film ( result . content ) |
def _set_igp_sync ( self , v , load = False ) :
"""Setter method for igp _ sync , mapped from YANG variable / mpls _ state / rsvp / igp _ sync ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ igp _ sync is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ igp _ sync ( ) directly .
YANG Description : MPLS Rsvp IGP Synchronization information""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = igp_sync . igp_sync , is_container = 'container' , presence = False , yang_name = "igp-sync" , rest_name = "igp-sync" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'mpls-rsvp-igp-sync' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls-operational' , defining_module = 'brocade-mpls-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """igp_sync must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""" , } )
self . __igp_sync = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def is_email ( string ) :
"""> > > is _ email ( ' username @ example . com ' )
True
> > > is _ email ( ' example . com ' )
False
> > > is _ email ( ' firstname . lastname @ domain . co . uk ' )
True""" | email_regex = r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$'
if isinstance ( string , str ) and not re . match ( email_regex , string ) :
return False
else :
return True |
def _set_platform_specific_keyboard_shortcuts ( self ) :
"""QtDesigner does not support QKeySequence : : StandardKey enum based default keyboard shortcuts .
This means that all default key combinations ( " Save " , " Quit " , etc ) have to be defined in code .""" | self . action_new_phrase . setShortcuts ( QKeySequence . New )
self . action_save . setShortcuts ( QKeySequence . Save )
self . action_close_window . setShortcuts ( QKeySequence . Close )
self . action_quit . setShortcuts ( QKeySequence . Quit )
self . action_undo . setShortcuts ( QKeySequence . Undo )
self . action_redo . setShortcuts ( QKeySequence . Redo )
self . action_cut_item . setShortcuts ( QKeySequence . Cut )
self . action_copy_item . setShortcuts ( QKeySequence . Copy )
self . action_paste_item . setShortcuts ( QKeySequence . Paste )
self . action_delete_item . setShortcuts ( QKeySequence . Delete )
self . action_configure_autokey . setShortcuts ( QKeySequence . Preferences ) |
def _call_java ( sc , java_obj , name , * args ) :
"""Method copied from pyspark . ml . wrapper . Uses private Spark APIs .""" | m = getattr ( java_obj , name )
java_args = [ _py2java ( sc , arg ) for arg in args ]
return _java2py ( sc , m ( * java_args ) ) |
def reflect ( source , model , cache = None ) :
'''Finds an object of class ` model ` with the same identifier as the
` source ` object''' | if source is None :
return None
if cache and source in cache :
return cache [ source ]
db = object_session ( source )
ident = identity_key ( instance = source ) [ 1 ]
assert ident is not None
return db . query ( model ) . get ( ident ) |
def scroll_mouse ( self , mouse_x : int ) :
"""Scrolls the mouse if ROI Selection reaches corner of view
: param mouse _ x :
: return :""" | scrollbar = self . horizontalScrollBar ( )
if mouse_x - self . view_rect ( ) . x ( ) > self . view_rect ( ) . width ( ) :
scrollbar . setValue ( scrollbar . value ( ) + 5 )
elif mouse_x < self . view_rect ( ) . x ( ) :
scrollbar . setValue ( scrollbar . value ( ) - 5 ) |
def flownet2_sd ( self , x ) :
"""Architecture in Table 3 of FlowNet 2.0.
Args :
x : concatenation of two inputs , of shape [ 1 , 2xC , H , W ]""" | with argscope ( [ tf . layers . conv2d ] , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) , padding = 'valid' , strides = 2 , kernel_size = 3 , data_format = 'channels_first' ) , argscope ( [ tf . layers . conv2d_transpose ] , padding = 'same' , activation = tf . identity , data_format = 'channels_first' , strides = 2 , kernel_size = 4 ) :
x = tf . layers . conv2d ( pad ( x , 1 ) , 64 , name = 'conv0' , strides = 1 )
x = tf . layers . conv2d ( pad ( x , 1 ) , 64 , name = 'conv1' )
conv1 = tf . layers . conv2d ( pad ( x , 1 ) , 128 , name = 'conv1_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv1 , 1 ) , 128 , name = 'conv2' )
conv2 = tf . layers . conv2d ( pad ( x , 1 ) , 128 , name = 'conv2_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv2 , 1 ) , 256 , name = 'conv3' )
conv3 = tf . layers . conv2d ( pad ( x , 1 ) , 256 , name = 'conv3_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv3 , 1 ) , 512 , name = 'conv4' )
conv4 = tf . layers . conv2d ( pad ( x , 1 ) , 512 , name = 'conv4_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv4 , 1 ) , 512 , name = 'conv5' )
conv5 = tf . layers . conv2d ( pad ( x , 1 ) , 512 , name = 'conv5_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv5 , 1 ) , 1024 , name = 'conv6' )
conv6 = tf . layers . conv2d ( pad ( x , 1 ) , 1024 , name = 'conv6_1' , strides = 1 )
flow6 = tf . layers . conv2d ( pad ( conv6 , 1 ) , 2 , name = 'predict_flow6' , strides = 1 , activation = tf . identity )
flow6_up = tf . layers . conv2d_transpose ( flow6 , 2 , name = 'upsampled_flow6_to_5' )
x = tf . layers . conv2d_transpose ( conv6 , 512 , name = 'deconv5' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat5 = tf . concat ( [ conv5 , x , flow6_up ] , axis = 1 , name = 'concat5' )
interconv5 = tf . layers . conv2d ( pad ( concat5 , 1 ) , 512 , strides = 1 , name = 'inter_conv5' , activation = tf . identity )
flow5 = tf . layers . conv2d ( pad ( interconv5 , 1 ) , 2 , name = 'predict_flow5' , strides = 1 , activation = tf . identity )
flow5_up = tf . layers . conv2d_transpose ( flow5 , 2 , name = 'upsampled_flow5_to_4' )
x = tf . layers . conv2d_transpose ( concat5 , 256 , name = 'deconv4' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat4 = tf . concat ( [ conv4 , x , flow5_up ] , axis = 1 , name = 'concat4' )
interconv4 = tf . layers . conv2d ( pad ( concat4 , 1 ) , 256 , strides = 1 , name = 'inter_conv4' , activation = tf . identity )
flow4 = tf . layers . conv2d ( pad ( interconv4 , 1 ) , 2 , name = 'predict_flow4' , strides = 1 , activation = tf . identity )
flow4_up = tf . layers . conv2d_transpose ( flow4 , 2 , name = 'upsampled_flow4_to_3' )
x = tf . layers . conv2d_transpose ( concat4 , 128 , name = 'deconv3' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat3 = tf . concat ( [ conv3 , x , flow4_up ] , axis = 1 , name = 'concat3' )
interconv3 = tf . layers . conv2d ( pad ( concat3 , 1 ) , 128 , strides = 1 , name = 'inter_conv3' , activation = tf . identity )
flow3 = tf . layers . conv2d ( pad ( interconv3 , 1 ) , 2 , name = 'predict_flow3' , strides = 1 , activation = tf . identity )
flow3_up = tf . layers . conv2d_transpose ( flow3 , 2 , name = 'upsampled_flow3_to_2' )
x = tf . layers . conv2d_transpose ( concat3 , 64 , name = 'deconv2' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat2 = tf . concat ( [ conv2 , x , flow3_up ] , axis = 1 , name = 'concat2' )
interconv2 = tf . layers . conv2d ( pad ( concat2 , 1 ) , 64 , strides = 1 , name = 'inter_conv2' , activation = tf . identity )
flow2 = tf . layers . conv2d ( pad ( interconv2 , 1 ) , 2 , name = 'predict_flow2' , strides = 1 , activation = tf . identity )
return resize ( flow2 / DISP_SCALE , mode = 'nearest' ) |
async def jsk_show ( self , ctx : commands . Context ) :
"""Shows Jishaku in the help command .""" | if not self . jsk . hidden :
return await ctx . send ( "Jishaku is already visible." )
self . jsk . hidden = False
await ctx . send ( "Jishaku is now visible." ) |
def samblaster_dedup_sort ( data , tx_out_file , tx_sr_file , tx_disc_file ) :
"""Deduplicate and sort with samblaster , produces split read and discordant pair files .""" | samblaster = config_utils . get_program ( "samblaster" , data [ "config" ] )
samtools = config_utils . get_program ( "samtools" , data [ "config" ] )
tmp_prefix = "%s-sorttmp" % utils . splitext_plus ( tx_out_file ) [ 0 ]
tobam_cmd = ( "{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} {out_file} -" )
# full BAM - - associate more memory and cores
cores , mem = _get_cores_memory ( data , downscale = 2 )
# Potentially downsample to maximum coverage here if not splitting and whole genome sample
ds_cmd = None if data . get ( "align_split" ) else bam . get_maxcov_downsample_cl ( data , "samtools" )
sort_opt = "-n" if data . get ( "align_split" ) and dd . get_mark_duplicates ( data ) else ""
if ds_cmd :
dedup_cmd = "%s %s > %s" % ( tobam_cmd . format ( out_file = "" , dext = "full" , ** locals ( ) ) , ds_cmd , tx_out_file )
else :
dedup_cmd = tobam_cmd . format ( out_file = "-o %s" % tx_out_file , dext = "full" , ** locals ( ) )
# split and discordant BAMs - - give less memory / cores since smaller files
sort_opt = ""
cores , mem = _get_cores_memory ( data , downscale = 4 )
splitter_cmd = tobam_cmd . format ( out_file = "-o %s" % tx_sr_file , dext = "spl" , ** locals ( ) )
discordant_cmd = tobam_cmd . format ( out_file = "-o %s" % tx_disc_file , dext = "disc" , ** locals ( ) )
# samblaster 0.1.22 and better require the - M flag for compatibility with bwa - mem
cmd = ( "{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) " "| {dedup_cmd}" )
return cmd . format ( ** locals ( ) ) |
def retain_identities ( retention_time , es_enrichment_url , sortinghat_db , data_source , active_data_sources ) :
"""Select the unique identities not seen before ` retention _ time ` and
delete them from SortingHat . Furthermore , it deletes also the orphan unique identities ,
those ones stored in SortingHat but not in IDENTITIES _ INDEX .
: param retention _ time : maximum number of minutes wrt the current date to retain the identities
: param es _ enrichment _ url : URL of the ElasticSearch where the enriched data is stored
: param sortinghat _ db : instance of the SortingHat database
: param data _ source : target data source ( e . g . , git , github , slack )
: param active _ data _ sources : list of active data sources""" | before_date = get_diff_current_date ( minutes = retention_time )
before_date_str = before_date . isoformat ( )
es = Elasticsearch ( [ es_enrichment_url ] , timeout = 120 , max_retries = 20 , retry_on_timeout = True , verify_certs = False )
# delete the unique identities which have not been seen after ` before _ date `
delete_inactive_unique_identities ( es , sortinghat_db , before_date_str )
# delete the unique identities for a given data source which are not in the IDENTITIES _ INDEX
delete_orphan_unique_identities ( es , sortinghat_db , data_source , active_data_sources ) |
def run ( self ) :
'''Run listener''' | self . running = True
for msg in self . recv ( 1 ) :
if msg is None :
if self . running :
continue
else :
break
self . logger . debug ( "New message received: %s" , str ( msg ) )
self . add_to_queue ( msg ) |
def on_config_value_changed ( self , config_m , prop_name , info ) :
"""Callback when a config value has been changed
: param ConfigModel config _ m : The config model that has been changed
: param str prop _ name : Should always be ' config '
: param dict info : Information e . g . about the changed config key""" | config_key = info [ 'args' ] [ 1 ]
if "LOGGING" in config_key :
self . update_log_button_state ( ) |
def first ( self ) :
"""Gets item with highest priority . Performance : O ( 1)""" | with self . lock :
try :
return self . data [ 0 ] [ 0 ]
except IndexError as ex :
ex . args = ( 'DEPQ is empty' , )
raise |
def get_deserializer ( serializer_format ) :
"""Get the deserializer for a specific format""" | if serializer_format == Format . JSON :
return _deserialize_json
if serializer_format == Format . PICKLE :
return _deserialize_pickle |
def __vCmdCamTrigger ( self , args ) :
'''Trigger Camera''' | # print ( self . camera _ list )
for cam in self . camera_list :
cam . take_picture ( )
print ( "Trigger Cam %s" % cam ) |
def save ( self , commit = True ) :
"""Save and send""" | contact = super ( ContactFormBase , self ) . save ( )
context = { 'contact' : contact }
context . update ( get_site_metas ( ) )
subject = '' . join ( render_to_string ( self . mail_subject_template , context ) . splitlines ( ) )
content = render_to_string ( self . mail_content_template , context )
send_mail ( subject , content , settings . DEFAULT_FROM_EMAIL , settings . CONTACT_FORM_TO , fail_silently = not settings . DEBUG )
return contact |
def _next_raw_dimension ( self ) :
"""_ RawDimension for next * dimension _ dict * in sequence or None for last .
Returns None if this dimension is the last in sequence for this cube .""" | dimension_dicts = self . _dimension_dicts
this_idx = dimension_dicts . index ( self . _dimension_dict )
if this_idx > len ( dimension_dicts ) - 2 :
return None
return _RawDimension ( dimension_dicts [ this_idx + 1 ] , self . _dimension_dicts ) |
def asizeof ( self , * objs , ** opts ) :
'''Return the combined size of the given objects
( with modified options , see method * * set * * ) .''' | if opts :
self . set ( ** opts )
s , _ = self . _sizes ( objs , None )
return s |
def get_letters_iterable ( word ) :
"""splits the word into a character - list of tamil / english
characters present in the stream""" | WLEN , idx = len ( word ) , 0
while ( idx < WLEN ) :
c = word [ idx ]
# print ( idx , hex ( ord ( c ) ) , len ( ta _ letters ) )
if c in uyir_letter_set or c == ayudha_letter :
idx = idx + 1
yield c
elif c in grantha_agaram_set :
if idx + 1 < WLEN and word [ idx + 1 ] in all_symbol_set :
c2 = word [ idx + 1 ]
idx = idx + 2
yield ( c + c2 )
else :
idx = idx + 1
yield c
else :
idx = idx + 1
yield c
return |
def dead_links ( self ) :
"""Generate the coordinates of all dead links leaving working chips .
Any link leading to a dead chip will also be included in the list of
dead links . In non - torroidal SpiNNaker sysmtes ( e . g . single SpiNN - 5
boards ) , links on the periphery of the system will be marked as dead .
Yields
( x , y , : py : class : ` rig . links . Links ` )
A working link leaving a chip from the perspective of the chip . For
example ` ` ( 0 , 0 , Links . north ) ` ` would be the link going north from
chip ( 0 , 0 ) to chip ( 0 , 1 ) .""" | for ( x , y ) , chip_info in iteritems ( self ) :
for link in Links :
if link not in chip_info . working_links :
yield ( x , y , link ) |
def json ( self ) :
"""Load response body as json .
: raises : : class : ` ContentDecodingError `""" | try :
return json . loads ( self . text )
except Exception as e :
raise ContentDecodingError ( e ) |
def getprefix ( self , u ) :
"""Get the prefix for the specified namespace ( uri )
@ param u : A namespace uri .
@ type u : str
@ return : The namspace .
@ rtype : ( prefix , uri ) .""" | for ns in Namespace . all :
if u == ns [ 1 ] :
return ns [ 0 ]
for ns in self . prefixes :
if u == ns [ 1 ] :
return ns [ 0 ]
raise Exception ( 'ns (%s) not mapped' % u ) |
def get_upstream_paths ( self , port ) :
"""Retrieve a dictionary containing the full URLs of the upstream apps
: param int port : The port used by the replay and cdx servers
: return : A dictionary containing the upstream paths ( replay , cdx - server , record [ if enabled ] )
: rtype : dict [ str , str ]""" | base_paths = { 'replay' : self . REPLAY_API % port , 'cdx-server' : self . CDX_API % port , }
if self . recorder_path :
base_paths [ 'record' ] = self . recorder_path
return base_paths |
def write_hw_scgink ( hw , filename = 'mathbrush-test.txt' ) :
"""Parameters
hw : HandwrittenData object
filename : string
Path , where the SCG INK file gets written""" | with open ( filename , 'w' ) as f :
f . write ( 'SCG_INK\n' )
f . write ( '%i\n' % len ( hw . get_pointlist ( ) ) )
for stroke in hw . get_pointlist ( ) :
f . write ( '%i\n' % len ( stroke ) )
for point in stroke :
f . write ( '%i %i\n' % ( point [ 'x' ] , point [ 'y' ] ) ) |
def process_lines ( self , input_lines , ** kwargs ) :
'''Executes the pipeline of subsequent VISL _ CG3 commands . The first process
in pipeline gets input _ lines as an input , and each subsequent process gets
the output of the previous process as an input .
The idea of how to construct the pipeline borrows from :
https : / / github . com / estnltk / estnltk / blob / 1.4.0 / estnltk / syntax / tagger . py
Returns the result of the last process in the pipeline , either as a string
or , alternatively , as a list of strings ( if split _ result = = True ) ;
Parameters
input _ lines : list of str
The input text for the pipeline ; Should be in same format as the output
of SyntaxPreprocessing ;
split _ result : bool
Optional argument specifying whether the result should be split by
newlines , and returned as a list of strings / lines instead ;
Default : False
remove _ info : bool
Optional argument specifying whether the additional information added
during the preprocessing and syntactic processing should be removed
from the results ;
Default : True ;
The method cleanup _ lines ( ) will be used for removing additional info ,
and all the parameters passed to this method will be also forwarded to
the cleanup method ;''' | split_result_lines = False
remove_info = True
for argName , argVal in kwargs . items ( ) :
if argName in [ 'split_result_lines' , 'split_result' ] and argVal in [ True , False ] :
split_result_lines = argVal
if argName in [ 'remove_info' , 'info_remover' , 'clean_up' ] and argVal in [ True , False ] :
remove_info = argVal
# 1 ) Construct the input file for the first process in the pipeline
temp_input_file = tempfile . NamedTemporaryFile ( prefix = 'vislcg3_in.' , mode = 'w' , delete = False )
temp_input_file . close ( )
# We have to open separately here for writing , because Py 2.7 does not support
# passing parameter encoding = ' utf - 8 ' to the NamedTemporaryFile ;
out_f = codecs . open ( temp_input_file . name , mode = 'w' , encoding = 'utf-8' )
for line in input_lines :
out_f . write ( line . rstrip ( ) )
out_f . write ( '\n' )
out_f . close ( )
# TODO : tempfile is currently used to ensure that the input is in ' utf - 8 ' ,
# but perhaps we can somehow ensure it without using tempfile ? ?
# 2 ) Dynamically construct the pipeline and open processes
pipeline = [ ]
for i in range ( len ( self . rules_pipeline ) ) :
rule_file = self . rules_pipeline [ i ]
process_cmd = [ self . vislcg_cmd , '-o' , '-g' , os . path . join ( self . rules_dir , rule_file ) ]
process = None
if i == 0 : # The first process takes input from the file
process_cmd . extend ( [ '-I' , temp_input_file . name ] )
process = Popen ( process_cmd , stdin = PIPE , stdout = PIPE )
else : # A subsequent process takes output of the last process as an input
process = Popen ( process_cmd , stdin = pipeline [ - 1 ] [ 'process' ] . stdout , stdout = PIPE )
# Record the process
process_dict = { 'process' : process , 'cmd' : process_cmd }
pipeline . append ( process_dict )
# 3 ) Close all stdout streams , except the last one
for i in range ( len ( pipeline ) ) :
if i != len ( pipeline ) - 1 :
pipeline [ i ] [ 'process' ] . stdout . close ( )
# 4 ) Communicate results form the last item in the pipeline
result = as_unicode ( pipeline [ - 1 ] [ 'process' ] . communicate ( ) [ 0 ] )
pipeline [ - 1 ] [ 'process' ] . stdout . close ( )
# Close the last process
# Clean - up
# 1 ) remove temp file
os . remove ( temp_input_file . name )
# 2 ) remove additional info , if required
if remove_info :
result = '\n' . join ( cleanup_lines ( result . split ( '\n' ) , ** kwargs ) )
return result if not split_result_lines else result . split ( '\n' ) |
def _dir_size ( directory ) :
"""Returns total size ( in bytes ) of the given ' directory ' .""" | size = 0
for elem in tf_v1 . gfile . ListDirectory ( directory ) :
elem_full_path = os . path . join ( directory , elem )
stat = tf_v1 . gfile . Stat ( elem_full_path )
size += _dir_size ( elem_full_path ) if stat . is_directory else stat . length
return size |
def _imm_new ( cls ) :
'''All immutable new classes use a hack to make sure the post - init cleanup occurs .''' | imm = object . __new__ ( cls )
# Note that right now imm has a normal setattr method ;
# Give any parameter that has one a default value
params = cls . _pimms_immutable_data_ [ 'params' ]
for ( p , dat ) in six . iteritems ( params ) :
dat = dat [ 0 ]
if dat :
object . __setattr__ ( imm , p , dat [ 0 ] )
# Clear any values ; they are not allowed yet
_imm_clear ( imm )
# Note that we are initializing . . .
dd = object . __getattribute__ ( imm , '__dict__' )
dd [ '_pimms_immutable_is_init' ] = True
# That should do it !
return imm |
def read_distributions_from_config ( cp , section = "prior" ) :
"""Returns a list of PyCBC distribution instances for a section in the
given configuration file .
Parameters
cp : WorflowConfigParser
An open config file to read .
section : { " prior " , string }
Prefix on section names from which to retrieve the distributions .
Returns
list
A list of the parsed distributions .""" | dists = [ ]
variable_args = [ ]
for subsection in cp . get_subsections ( section ) :
name = cp . get_opt_tag ( section , "name" , subsection )
dist = distribs [ name ] . from_config ( cp , section , subsection )
if set ( dist . params ) . isdisjoint ( variable_args ) :
dists . append ( dist )
variable_args += dist . params
else :
raise ValueError ( "Same parameter in more than one distribution." )
return dists |
def process_request ( self , request , client_address ) :
"""Call finish _ request .""" | self . finish_request ( request , client_address )
self . shutdown_request ( request ) |
def get_taf_alt_ice_turb ( wxdata : [ str ] ) -> ( [ str ] , str , [ str ] , [ str ] ) : # type : ignore
"""Returns the report list and removed : Altimeter string , Icing list , Turbulance list""" | altimeter = ''
icing , turbulence = [ ] , [ ]
for i , item in reversed ( list ( enumerate ( wxdata ) ) ) :
if len ( item ) > 6 and item . startswith ( 'QNH' ) and item [ 3 : 7 ] . isdigit ( ) :
altimeter = wxdata . pop ( i ) [ 3 : 7 ]
elif item . isdigit ( ) :
if item [ 0 ] == '6' :
icing . append ( wxdata . pop ( i ) )
elif item [ 0 ] == '5' :
turbulence . append ( wxdata . pop ( i ) )
return wxdata , altimeter , icing , turbulence |
def domain_add ( self , domain , description = DESCRIPTION ) :
"""Sends a POST to / 1.0 / domains / using this post - data :
{ " domain " : " www . fogfu . com " ,
" description " : " Added by tagcube - api " }
: param domain : The domain name to add as a new resource
: return : The newly created resource""" | data = { "domain" : domain , "description" : description }
url = self . build_full_url ( self . DOMAINS )
return self . create_resource ( url , data ) |
def append_on_chord ( self , on_chord , root ) :
"""Append on chord
To create Am7 / G
q = Quality ( ' m7 ' )
q . append _ on _ chord ( ' G ' , root = ' A ' )
: param str on _ chord : bass note of the chord
: param str root : root note of the chord""" | root_val = note_to_val ( root )
on_chord_val = note_to_val ( on_chord ) - root_val
list_ = list ( self . components )
for idx , val in enumerate ( list_ ) :
if val % 12 == on_chord_val :
self . components . remove ( val )
break
if on_chord_val > root_val :
on_chord_val -= 12
if on_chord_val not in self . components :
self . components . insert ( 0 , on_chord_val ) |
def start ( self ) :
"""Launches a new SMTP client session on the server taken from the ` self . options ` dict .
: param my _ ip : IP of this Client itself""" | username = self . options [ 'username' ]
password = self . options [ 'password' ]
server_host = self . options [ 'server' ]
server_port = self . options [ 'port' ]
honeypot_id = self . options [ 'honeypot_id' ]
session = self . create_session ( server_host , server_port , honeypot_id )
logger . debug ( 'Sending {0} bait session to {1}:{2}. (bait id: {3})' . format ( 'smtp' , server_host , server_port , session . id ) )
try :
self . connect ( )
session . did_connect = True
session . source_port = self . client . sock . getsockname ( ) [ 1 ]
self . login ( username , password )
# TODO : Handle failed login
# TODO : password = ' ' is sillly fix , this needs to be fixed server side . . .
session . add_auth_attempt ( 'plaintext' , True , username = username , password = '' )
session . did_login = True
except smtplib . SMTPException as error :
logger . debug ( 'Caught exception: {0} ({1})' . format ( error , str ( type ( error ) ) ) )
else :
while self . sent_mails <= self . max_mails :
from_addr , to_addr , mail_body = self . get_one_mail ( )
try :
if from_addr and to_addr and isinstance ( mail_body , str ) :
self . client . sendmail ( from_addr , to_addr , mail_body )
else :
continue
except TypeError as e :
logger . debug ( 'Malformed email in mbox archive, skipping.' )
continue
else :
self . sent_mails += 1
logger . debug ( 'Sent mail from ({0}) to ({1})' . format ( from_addr , to_addr ) )
time . sleep ( 1 )
self . client . quit ( )
session . did_complete = True
finally :
logger . debug ( 'SMTP Session complete.' )
session . alldone = True
session . end_session ( )
self . client . close ( ) |
def delete_connection ( self , ** kwargs ) :
"""Remove a single connection to a provider for the specified user .""" | conn = self . find_connection ( ** kwargs )
if not conn :
return False
self . delete ( conn )
return True |
def nii_modify ( nii , fimout = '' , outpath = '' , fcomment = '' , voxel_range = [ ] ) :
'''Modify the NIfTI image given either as a file path or a dictionary ,
obtained by nimpa . getnii ( file _ path ) .''' | if isinstance ( nii , basestring ) and os . path . isfile ( nii ) :
dctnii = imio . getnii ( nii , output = 'all' )
fnii = nii
if isinstance ( nii , dict ) and 'im' in nii :
dctnii = nii
if 'fim' in dctnii :
fnii = dctnii [ 'fim' ]
# > output path
if outpath == '' and fimout != '' and '/' in fimout :
opth = os . path . dirname ( fimout )
if opth == '' and isinstance ( fnii , basestring ) and os . path . isfile ( fnii ) :
opth = os . path . dirname ( nii )
fimout = os . path . basename ( fimout )
elif outpath == '' and isinstance ( fnii , basestring ) and os . path . isfile ( fnii ) :
opth = os . path . dirname ( fnii )
else :
opth = outpath
imio . create_dir ( opth )
# > output floating and affine file names
if fimout == '' :
if fcomment == '' :
fcomment += '_nimpa-modified'
fout = os . path . join ( opth , os . path . basename ( fnii ) . split ( '.nii' ) [ 0 ] + fcomment + '.nii.gz' )
else :
fout = os . path . join ( opth , fimout . split ( '.' ) [ 0 ] + '.nii.gz' )
# > reduce the max value to 255
if voxel_range and len ( voxel_range ) == 1 :
im = voxel_range [ 0 ] * dctnii [ 'im' ] / np . max ( dctnii [ 'im' ] )
elif voxel_range and len ( voxel_range ) == 2 : # > normalise into range 0-1
im = ( dctnii [ 'im' ] - np . min ( dctnii [ 'im' ] ) ) / np . ptp ( dctnii [ 'im' ] )
# > convert to voxel _ range
im = voxel_range [ 0 ] + im * ( voxel_range [ 1 ] - voxel_range [ 0 ] )
else :
return None
# > output file name for the extra reference image
imio . array2nii ( im , dctnii [ 'affine' ] , fout , trnsp = ( dctnii [ 'transpose' ] . index ( 0 ) , dctnii [ 'transpose' ] . index ( 1 ) , dctnii [ 'transpose' ] . index ( 2 ) ) , flip = dctnii [ 'flip' ] )
return { 'fim' : fout , 'im' : im , 'affine' : dctnii [ 'affine' ] } |
def tocimxml ( value ) : # pylint : disable = line - too - long
"""Return the CIM - XML representation of the input object ,
as an object of an appropriate subclass of : term : ` Element ` .
The returned CIM - XML representation is consistent with : term : ` DSP0201 ` .
Parameters :
value ( : term : ` CIM object ` , : term : ` CIM data type ` , : term : ` number ` , : class : ` py : datetime . datetime ` , or tuple / list thereof ) :
The input object .
Specifying ` None ` has been deprecated in pywbem 0.12.
Returns :
The CIM - XML representation , as an object of an appropriate subclass of
: term : ` Element ` .""" | # noqa : E501
if isinstance ( value , ( tuple , list ) ) :
array_xml = [ ]
for v in value :
if v is None :
if SEND_VALUE_NULL :
array_xml . append ( cim_xml . VALUE_NULL ( ) )
else :
array_xml . append ( cim_xml . VALUE ( None ) )
else :
array_xml . append ( cim_xml . VALUE ( atomic_to_cim_xml ( v ) ) )
value_xml = cim_xml . VALUE_ARRAY ( array_xml )
return value_xml
if hasattr ( value , 'tocimxml' ) :
return value . tocimxml ( )
if value is None :
warnings . warn ( "A value of None for pywbem.tocimxml() has been " "deprecated." , DeprecationWarning , stacklevel = 2 )
return cim_xml . VALUE ( atomic_to_cim_xml ( value ) ) |
def _shutdown_proc ( p , timeout ) :
"""Wait for a proc to shut down , then terminate or kill it after ` timeout ` .""" | freq = 10
# how often to check per second
for _ in range ( 1 + timeout * freq ) :
ret = p . poll ( )
if ret is not None :
logging . info ( "Shutdown gracefully." )
return ret
time . sleep ( 1 / freq )
logging . warning ( "Killing the process." )
p . kill ( )
return p . wait ( ) |
def select_as_multiple ( self , keys , where = None , selector = None , columns = None , start = None , stop = None , iterator = False , chunksize = None , auto_close = False , ** kwargs ) :
"""Retrieve pandas objects from multiple tables
Parameters
keys : a list of the tables
selector : the table to apply the where criteria ( defaults to keys [ 0]
if not supplied )
columns : the columns I want back
start : integer ( defaults to None ) , row number to start selection
stop : integer ( defaults to None ) , row number to stop selection
iterator : boolean , return an iterator , default False
chunksize : nrows to include in iteration , return an iterator
Exceptions
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS""" | # default to single select
where = _ensure_term ( where , scope_level = 1 )
if isinstance ( keys , ( list , tuple ) ) and len ( keys ) == 1 :
keys = keys [ 0 ]
if isinstance ( keys , str ) :
return self . select ( key = keys , where = where , columns = columns , start = start , stop = stop , iterator = iterator , chunksize = chunksize , ** kwargs )
if not isinstance ( keys , ( list , tuple ) ) :
raise TypeError ( "keys must be a list/tuple" )
if not len ( keys ) :
raise ValueError ( "keys must have a non-zero length" )
if selector is None :
selector = keys [ 0 ]
# collect the tables
tbls = [ self . get_storer ( k ) for k in keys ]
s = self . get_storer ( selector )
# validate rows
nrows = None
for t , k in itertools . chain ( [ ( s , selector ) ] , zip ( tbls , keys ) ) :
if t is None :
raise KeyError ( "Invalid table [{key}]" . format ( key = k ) )
if not t . is_table :
raise TypeError ( "object [{obj}] is not a table, and cannot be used in all " "select as multiple" . format ( obj = t . pathname ) )
if nrows is None :
nrows = t . nrows
elif t . nrows != nrows :
raise ValueError ( "all tables must have exactly the same nrows!" )
# axis is the concentation axes
axis = list ( { t . non_index_axes [ 0 ] [ 0 ] for t in tbls } ) [ 0 ]
def func ( _start , _stop , _where ) : # retrieve the objs , _ where is always passed as a set of
# coordinates here
objs = [ t . read ( where = _where , columns = columns , start = _start , stop = _stop , ** kwargs ) for t in tbls ]
# concat and return
return concat ( objs , axis = axis , verify_integrity = False ) . _consolidate ( )
# create the iterator
it = TableIterator ( self , s , func , where = where , nrows = nrows , start = start , stop = stop , iterator = iterator , chunksize = chunksize , auto_close = auto_close )
return it . get_result ( coordinates = True ) |
def analyze ( self , text ) :
u"""Analyze the input text with custom CharFilters , Tokenizer and TokenFilters .
: param text : unicode string to be tokenized
: return : token generator . emitted element type depends on the output of the last TokenFilter . ( e . g . , ExtractAttributeFilter emits strings . )""" | for cfilter in self . char_filters :
text = cfilter . filter ( text )
tokens = self . tokenizer . tokenize ( text , stream = True , wakati = False )
for tfilter in self . token_filters :
tokens = tfilter . filter ( tokens )
return tokens |
def addScalarBar ( self , c = None , title = "" , horizontal = False , vmin = None , vmax = None ) :
"""Add a 2D scalar bar to actor .
. . hint : : | mesh _ bands | | mesh _ bands . py | _""" | # book it , it will be created by Plotter . show ( ) later
self . scalarbar = [ c , title , horizontal , vmin , vmax ]
return self |
def factorization_machine_model ( factor_size , num_features , lr_mult_config , wd_mult_config , init_config ) :
"""builds factorization machine network with proper formulation :
y = w _ 0 \ sum ( x _ i w _ i ) + 0.5 ( \ sum \ sum < v _ i , v _ j > x _ ix _ j - \ sum < v _ iv _ i > x _ i ^ 2)""" | x = mx . symbol . Variable ( "data" , stype = 'csr' )
# factor , linear and bias terms
v = mx . symbol . Variable ( "v" , shape = ( num_features , factor_size ) , stype = 'row_sparse' , init = init_config [ 'v' ] , lr_mult = lr_mult_config [ 'v' ] , wd_mult = wd_mult_config [ 'v' ] )
w = mx . symbol . Variable ( 'w' , shape = ( num_features , 1 ) , stype = 'row_sparse' , init = init_config [ 'w' ] , lr_mult = lr_mult_config [ 'w' ] , wd_mult = wd_mult_config [ 'w' ] )
w0 = mx . symbol . Variable ( 'w0' , shape = ( 1 , ) , init = init_config [ 'w0' ] , lr_mult = lr_mult_config [ 'w0' ] , wd_mult = wd_mult_config [ 'w0' ] )
w1 = mx . symbol . broadcast_add ( mx . symbol . dot ( x , w ) , w0 )
# squared terms for subtracting self interactions
v_s = mx . symbol . _internal . _square_sum ( data = v , axis = 1 , keepdims = True )
x_s = x . square ( )
bd_sum = mx . sym . dot ( x_s , v_s )
# interactions
w2 = mx . symbol . dot ( x , v )
w2_squared = 0.5 * mx . symbol . square ( data = w2 )
# putting everything together
w_all = mx . symbol . Concat ( w1 , w2_squared , dim = 1 )
sum1 = w_all . sum ( axis = 1 , keepdims = True )
sum2 = - 0.5 * bd_sum
model = sum1 + sum2
y = mx . symbol . Variable ( "softmax_label" )
model = mx . symbol . LogisticRegressionOutput ( data = model , label = y )
return model |
def Satisfy_Constraints ( U , B , BtBinv ) :
"""U is the prolongator update . Project out components of U such that U * B = 0.
Parameters
U : bsr _ matrix
m x n sparse bsr matrix
Update to the prolongator
B : array
n x k array of the coarse grid near nullspace vectors
BtBinv : array
Local inv ( B _ i . H * B _ i ) matrices for each supernode , i
B _ i is B restricted to the sparsity pattern of supernode i in U
Returns
Updated U , so that U * B = 0.
Update is computed by orthogonally ( in 2 - norm ) projecting
out the components of span ( B ) in U in a row - wise fashion .
See Also
The principal calling routine ,
pyamg . aggregation . smooth . energy _ prolongation _ smoother""" | RowsPerBlock = U . blocksize [ 0 ]
ColsPerBlock = U . blocksize [ 1 ]
num_block_rows = int ( U . shape [ 0 ] / RowsPerBlock )
UB = np . ravel ( U * B )
# Apply constraints , noting that we need the conjugate of B
# for use as Bi . H in local projection
pyamg . amg_core . satisfy_constraints_helper ( RowsPerBlock , ColsPerBlock , num_block_rows , B . shape [ 1 ] , np . conjugate ( np . ravel ( B ) ) , UB , np . ravel ( BtBinv ) , U . indptr , U . indices , np . ravel ( U . data ) )
return U |
def randomArray ( size , bound ) :
"""Returns an array initialized to random values between - max and max .""" | if type ( size ) == type ( 1 ) :
size = ( size , )
temp = Numeric . array ( ndim ( * size ) ) * ( 2.0 * bound )
return temp - bound |
def get ( self , sid ) :
"""Constructs a FieldValueContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . autopilot . v1 . assistant . field _ type . field _ value . FieldValueContext
: rtype : twilio . rest . autopilot . v1 . assistant . field _ type . field _ value . FieldValueContext""" | return FieldValueContext ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , field_type_sid = self . _solution [ 'field_type_sid' ] , sid = sid , ) |
def embedManifestExeCheck ( target , source , env ) :
"""Function run by embedManifestExeCheckAction to check for existence of manifest
and other conditions , and embed the manifest by calling embedManifestExeAction if so .""" | if env . get ( 'WINDOWS_EMBED_MANIFEST' , 0 ) :
manifestSrc = target [ 0 ] . get_abspath ( ) + '.manifest'
if os . path . exists ( manifestSrc ) :
ret = ( embedManifestExeAction ) ( [ target [ 0 ] ] , None , env )
if ret :
raise SCons . Errors . UserError ( "Unable to embed manifest into %s" % ( target [ 0 ] ) )
return ret
else :
print ( '(embed: no %s.manifest found; not embedding.)' % str ( target [ 0 ] ) )
return 0 |
def get_declaration ( self ) :
"""Returns the string for the declaration of the type""" | if self . is_opaque :
out = "{strrep} = type opaque" . format ( strrep = str ( self ) )
else :
out = "{strrep} = type {struct}" . format ( strrep = str ( self ) , struct = self . structure_repr ( ) )
return out |
def ask_string ( * question : Token , default : Optional [ str ] = None ) -> Optional [ str ] :
"""Ask the user to enter a string .""" | tokens = get_ask_tokens ( question )
if default :
tokens . append ( "(%s)" % default )
info ( * tokens )
answer = read_input ( )
if not answer :
return default
return answer |
def set_ ( self , state ) :
"""Set new state for machine .""" | if not self . can_be_ ( state ) :
state = self . _meta [ 'translator' ] . translate ( state )
raise TransitionError ( "Cannot transit from '{actual_value}' to '{value}'." . format ( actual_value = self . actual_state . value , value = state . value ) )
self . force_set ( state ) |
def _save_obj_without_attr ( obj , attr_list , path , values_to_save = None ) :
"""Save object with attributes from attr _ list .
Parameters
obj : obj
Object of class with _ _ dict _ _ attribute .
attr _ list : list
List with attributes to exclude from saving to dill object . If empty
list all attributes will be saved .
path : str
Where to save dill object .
values _ to _ save : list , optional
Placeholders for original attributes for saving object . If None will be
extended to attr _ list length like [ None ] * len ( attr _ list )""" | if values_to_save is None :
values_to_save = [ None ] * len ( attr_list )
saved_attr_dict = { }
for attr , val_save in zip ( attr_list , values_to_save ) :
if attr in obj . __dict__ :
item = obj . __dict__ . pop ( attr )
saved_attr_dict [ attr ] = item
setattr ( obj , attr , val_save )
with open ( path , "wb" ) as out_file :
dill . dump ( obj , out_file )
for attr , item in saved_attr_dict . items ( ) :
setattr ( obj , attr , item ) |
def resource_id ( ** kwargs ) :
"""Create a valid resource id string from the given parts .
This method builds the resource id from the left until the next required id parameter
to be appended is not found . It then returns the built up id .
: param dict kwargs : The keyword arguments that will make up the id .
The method accepts the following keyword arguments :
- subscription ( required ) : Subscription id
- resource _ group : Name of resource group
- namespace : Namespace for the resource provider ( i . e . Microsoft . Compute )
- type : Type of the resource ( i . e . virtualMachines )
- name : Name of the resource ( or parent if child _ name is also specified )
- child _ namespace _ { level } : Namespace for the child resoure of that level ( optional )
- child _ type _ { level } : Type of the child resource of that level
- child _ name _ { level } : Name of the child resource of that level
: returns : A resource id built from the given arguments .
: rtype : str""" | kwargs = { k : v for k , v in kwargs . items ( ) if v is not None }
rid_builder = [ '/subscriptions/{subscription}' . format ( ** kwargs ) ]
try :
try :
rid_builder . append ( 'resourceGroups/{resource_group}' . format ( ** kwargs ) )
except KeyError :
pass
rid_builder . append ( 'providers/{namespace}' . format ( ** kwargs ) )
rid_builder . append ( '{type}/{name}' . format ( ** kwargs ) )
count = 1
while True :
try :
rid_builder . append ( 'providers/{{child_namespace_{}}}' . format ( count ) . format ( ** kwargs ) )
except KeyError :
pass
rid_builder . append ( '{{child_type_{0}}}/{{child_name_{0}}}' . format ( count ) . format ( ** kwargs ) )
count += 1
except KeyError :
pass
return '/' . join ( rid_builder ) |
def plot_feature_importances ( clf , title = 'Feature Importance' , feature_names = None , max_num_features = 20 , order = 'descending' , x_tick_rotation = 0 , ax = None , figsize = None , title_fontsize = "large" , text_fontsize = "medium" ) :
"""Generates a plot of a classifier ' s feature importances .
Args :
clf : Classifier instance that implements ` ` fit ` ` and ` ` predict _ proba ` `
methods . The classifier must also have a ` ` feature _ importances _ ` `
attribute .
title ( string , optional ) : Title of the generated plot . Defaults to
" Feature importances " .
feature _ names ( None , : obj : ` list ` of string , optional ) : Determines the
feature names used to plot the feature importances . If None ,
feature names will be numbered .
max _ num _ features ( int ) : Determines the maximum number of features to
plot . Defaults to 20.
order ( ' ascending ' , ' descending ' , or None , optional ) : Determines the
order in which the feature importances are plotted . Defaults to
' descending ' .
x _ tick _ rotation ( int , optional ) : Rotates x - axis tick labels by the
specified angle . This is useful in cases where there are numerous
categories and the labels overlap each other .
ax ( : class : ` matplotlib . axes . Axes ` , optional ) : The axes upon which to
plot the curve . If None , the plot is drawn on a new set of axes .
figsize ( 2 - tuple , optional ) : Tuple denoting figure size of the plot
e . g . ( 6 , 6 ) . Defaults to ` ` None ` ` .
title _ fontsize ( string or int , optional ) : Matplotlib - style fontsizes .
Use e . g . " small " , " medium " , " large " or integer - values . Defaults to
" large " .
text _ fontsize ( string or int , optional ) : Matplotlib - style fontsizes .
Use e . g . " small " , " medium " , " large " or integer - values . Defaults to
" medium " .
Returns :
ax ( : class : ` matplotlib . axes . Axes ` ) : The axes on which the plot was
drawn .
Example :
> > > import scikitplot . plotters as skplt
> > > rf = RandomForestClassifier ( )
> > > rf . fit ( X , y )
> > > skplt . plot _ feature _ importances (
. . . rf , feature _ names = [ ' petal length ' , ' petal width ' ,
. . . ' sepal length ' , ' sepal width ' ] )
< matplotlib . axes . _ subplots . AxesSubplot object at 0x7fe967d64490 >
> > > plt . show ( )
. . image : : _ static / examples / plot _ feature _ importances . png
: align : center
: alt : Feature Importances""" | if not hasattr ( clf , 'feature_importances_' ) :
raise TypeError ( '"feature_importances_" attribute not in classifier. ' 'Cannot plot feature importances.' )
importances = clf . feature_importances_
if hasattr ( clf , 'estimators_' ) and isinstance ( clf . estimators_ , list ) and hasattr ( clf . estimators_ [ 0 ] , 'feature_importances_' ) :
std = np . std ( [ tree . feature_importances_ for tree in clf . estimators_ ] , axis = 0 )
else :
std = None
if order == 'descending' :
indices = np . argsort ( importances ) [ : : - 1 ]
elif order == 'ascending' :
indices = np . argsort ( importances )
elif order is None :
indices = np . array ( range ( len ( importances ) ) )
else :
raise ValueError ( 'Invalid argument {} for "order"' . format ( order ) )
if ax is None :
fig , ax = plt . subplots ( 1 , 1 , figsize = figsize )
if feature_names is None :
feature_names = indices
else :
feature_names = np . array ( feature_names ) [ indices ]
max_num_features = min ( max_num_features , len ( importances ) )
ax . set_title ( title , fontsize = title_fontsize )
if std is not None :
ax . bar ( range ( max_num_features ) , importances [ indices ] [ : max_num_features ] , color = 'r' , yerr = std [ indices ] [ : max_num_features ] , align = 'center' )
else :
ax . bar ( range ( max_num_features ) , importances [ indices ] [ : max_num_features ] , color = 'r' , align = 'center' )
ax . set_xticks ( range ( max_num_features ) )
ax . set_xticklabels ( feature_names [ : max_num_features ] , rotation = x_tick_rotation )
ax . set_xlim ( [ - 1 , max_num_features ] )
ax . tick_params ( labelsize = text_fontsize )
return ax |
def get_gos_d0d1 ( self ) :
"""Return GO IDs whose depth is 0 ( BP , MF , CC ) or depth is 1.""" | return set ( [ o . id for d in [ 0 , 1 ] for o in self . gosubdag . rcntobj . depth2goobjs . get ( d ) ] ) |
def get_qualification_requests ( self , qualification_type_id , sort_by = 'Expiration' , sort_direction = 'Ascending' , page_size = 10 , page_number = 1 ) :
"""TODO : Document .""" | params = { 'QualificationTypeId' : qualification_type_id , 'SortProperty' : sort_by , 'SortDirection' : sort_direction , 'PageSize' : page_size , 'PageNumber' : page_number }
return self . _process_request ( 'GetQualificationRequests' , params , [ ( 'QualificationRequest' , QualificationRequest ) , ] ) |
def total_charges ( self ) :
"""Represents the ' goods ' acquired in the invoice .""" | selected_charges = Charge . objects . filter ( invoice = self ) . charges ( ) . exclude ( product_code = CARRIED_FORWARD )
return total_amount ( selected_charges ) |
def _get_subject_info ( self , n_local_subj , data ) :
"""Calculate metadata for subjects allocated to this process
Parameters
n _ local _ subj : int
Number of subjects allocated to this process .
data : list of 2D array . Each in shape [ n _ voxel , n _ tr ]
Total number of MPI process .
Returns
max _ sample _ tr : 1D array
Maximum number of TR to subsample for each subject
max _ sample _ voxel : 1D array
Maximum number of voxel to subsample for each subject""" | max_sample_tr = np . zeros ( n_local_subj ) . astype ( int )
max_sample_voxel = np . zeros ( n_local_subj ) . astype ( int )
for idx in np . arange ( n_local_subj ) :
nvoxel = data [ idx ] . shape [ 0 ]
ntr = data [ idx ] . shape [ 1 ]
max_sample_voxel [ idx ] = min ( self . max_voxel , int ( self . voxel_ratio * nvoxel ) )
max_sample_tr [ idx ] = min ( self . max_tr , int ( self . tr_ratio * ntr ) )
return max_sample_tr , max_sample_voxel |
def send_peers ( self , connection_id ) :
"""Sends a message containing our peers to the
connection identified by connection _ id .
Args :
connection _ id ( str ) : A unique identifier which identifies an
connection on the network server socket .""" | with self . _lock : # Needs to actually be the list of advertised endpoints of
# our peers
peer_endpoints = list ( self . _peers . values ( ) )
if self . _endpoint :
peer_endpoints . append ( self . _endpoint )
peers_response = GetPeersResponse ( peer_endpoints = peer_endpoints )
try : # Send a one _ way message because the connection will be closed
# if this is a temp connection .
self . _network . send ( validator_pb2 . Message . GOSSIP_GET_PEERS_RESPONSE , peers_response . SerializeToString ( ) , connection_id , one_way = True )
except ValueError :
LOGGER . debug ( "Connection disconnected: %s" , connection_id ) |
def addvPPfunc ( self , solution ) :
'''Adds the marginal marginal value function to an existing solution , so
that the next solver can evaluate vPP and thus use cubic interpolation .
Parameters
solution : ConsumerSolution
The solution to this single period problem , which must include the
consumption function .
Returns
solution : ConsumerSolution
The same solution passed as input , but with the marginal marginal
value function for this period added as the attribute vPPfunc .''' | vPPfuncNow = MargMargValueFunc ( solution . cFunc , self . CRRA )
solution . vPPfunc = vPPfuncNow
return solution |
def cpe_disjoint ( cls , source , target ) :
"""Compares two WFNs and returns True if the set - theoretic relation
between the names is DISJOINT .
: param CPE2_3 _ WFN source : first WFN CPE Name
: param CPE2_3 _ WFN target : seconds WFN CPE Name
: returns : True if the set relation between source and target
is DISJOINT , otherwise False .
: rtype : boolean""" | # If any pairwise comparison returned DISJOINT then
# the overall name relationship is DISJOINT
for att , result in CPESet2_3 . compare_wfns ( source , target ) :
isDisjoint = result == CPESet2_3 . LOGICAL_VALUE_DISJOINT
if isDisjoint :
return True
return False |
def resolve ( self , component_type , ** kwargs ) :
"""Resolves an instance of the component type .
: param component _ type : The type of the component ( e . g . a class ) .
: param kwargs : Overriding arguments to use ( by name ) instead of resolving them .
: return : An instance of the component .""" | with self . _resolve_lock :
context = _ComponentContext ( self )
return context . resolve ( component_type , ** kwargs ) |
def to_vars_dict ( self ) :
"""Return local state which is relevant for the cluster setup process .""" | return { 'azure_client_id' : self . client_id , 'azure_location' : self . location , 'azure_secret' : self . secret , 'azure_subscription_id' : self . subscription_id , 'azure_tenant_id' : self . tenant_id , } |
def ImportBoarding ( self , drop_off_file ) :
"Reads the bedverb . mdv file ." | for trip_id , seq , code in ReadCSV ( drop_off_file , [ 'FRT_FID' , 'LI_LFD_NR' , 'BEDVERB_CODE' ] ) :
key = ( trip_id , int ( seq ) - 1 )
if code == 'A' :
self . pickup_type [ key ] = '1'
# '1 ' = no pick - up
elif code == 'E' :
self . drop_off_type [ key ] = '1'
# '1 ' = no drop - off
elif code == 'B' : # ' B ' just means that rider needs to push a button to have the driver
# stop . We don ' t encode this for now .
pass
else :
raise ValueError ( 'Unexpected code in bedverb.mdv; ' 'FRT_FID=%s BEDVERB_CODE=%s' % ( trip_id , code ) ) |
def get_web_element ( self , element ) :
"""Return the web element from a page element or its locator
: param element : either a WebElement , PageElement or element locator as a tuple ( locator _ type , locator _ value )
: returns : WebElement object""" | from toolium . pageelements . page_element import PageElement
if isinstance ( element , WebElement ) :
web_element = element
elif isinstance ( element , PageElement ) :
web_element = element . web_element
elif isinstance ( element , tuple ) :
web_element = self . driver_wrapper . driver . find_element ( * element )
else :
web_element = None
return web_element |
def calc_widths_filter ( self , next_filter ) :
"""Coroutine to analyze the incoming data stream for creating optimal
column width choices . This may buffer some of the incoming stream if
there isn ' t enough information to make good choices about column
widths . Also it may resize widths if certain conditions are met such
as the terminal width resize event being detected .""" | window_sent = not not self . data_window
next_primed = False
genexit = None
if not self . data_window :
start = time . monotonic ( )
while len ( self . data_window ) < self . min_render_prefill or ( len ( self . data_window ) < self . max_render_prefill and ( time . monotonic ( ) - start ) < self . max_render_delay ) :
try :
self . data_window . append ( ( yield ) )
except GeneratorExit as e :
genexit = e
break
while True :
if self . width != self . desired_width :
self . headers_drawn = False
# TODO : make optional
self . width = self . desired_width
remaining = self . usable_width
widths = [ x [ 'width' ] for x in self . colspec ]
preformatted = [ i for i , x in enumerate ( self . colspec ) if x [ 'overflow' ] == 'preformatted' ]
unspec = [ ]
for i , width in enumerate ( widths ) :
fixed_width = self . width_normalize ( width )
if fixed_width is None :
unspec . append ( i )
else :
widths [ i ] = fixed_width
remaining -= fixed_width
if unspec :
if self . table . flex and self . data_window :
for i , w in self . calc_flex ( self . data_window , remaining , unspec , preformatted ) :
widths [ i ] = w
else :
dist = self . _uniform_dist ( len ( unspec ) , remaining )
for i , width in zip ( unspec , dist ) :
widths [ i ] = width
self . widths = widths
self . formatters = self . make_formatters ( )
if not next_primed :
next ( next_filter )
next_primed = True
if not window_sent :
for x in self . data_window :
next_filter . send ( x )
window_sent = True
if genexit :
raise genexit
data = ( yield )
self . data_window . append ( data )
next_filter . send ( data ) |
async def save ( self ) :
"""Save the machine in MAAS .""" | orig_owner_data = self . _orig_data [ 'owner_data' ]
new_owner_data = dict ( self . _data [ 'owner_data' ] )
self . _changed_data . pop ( 'owner_data' , None )
await super ( Machine , self ) . save ( )
params_diff = calculate_dict_diff ( orig_owner_data , new_owner_data )
if len ( params_diff ) > 0 :
params_diff [ 'system_id' ] = self . system_id
await self . _handler . set_owner_data ( ** params_diff )
self . _data [ 'owner_data' ] = self . _data [ 'owner_data' ] |
def get_term_by_name ( self , name ) :
"""Get the GO term with the given GO term name .
If the given name is not associated with any GO term , the function will
search for it among synonyms .
Parameters
name : str
The name of the GO term .
Returns
` GOTerm `
The GO term with the given name .
Raises
ValueError
If the given name is found neither among the GO term names , nor
among synonyms .""" | term = None
try :
term = self . terms [ self . name2id [ name ] ]
except KeyError :
try :
term = self . terms [ self . syn2id [ name ] ]
except KeyError :
pass
else :
logger . info ( 'GO term name "%s" is a synonym for "%s".' , name , term . name )
if term is None :
raise ValueError ( 'GO term name "%s" not found!' % name )
return term |
def insert ( collection_name , docs , check_keys , safe , last_error_args ) :
"""Get an * * insert * * message .""" | data = __ZERO
data += bson . _make_c_string ( collection_name )
bson_data = "" . join ( [ bson . BSON . encode ( doc , check_keys ) for doc in docs ] )
if not bson_data :
raise InvalidOperation ( "cannot do an empty bulk insert" )
data += bson_data
if safe :
( _ , insert_message ) = __pack_message ( 2002 , data )
( request_id , error_message ) = __last_error ( last_error_args )
return ( request_id , insert_message + error_message )
else :
return __pack_message ( 2002 , data ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.