signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def find_bindmounts ( self ) :
"""Finds all bind mountpoints that are inside mounts that match the : attr : ` re _ pattern `"""
|
for mountpoint , ( orig , fs , opts ) in self . mountpoints . items ( ) :
if 'bind' in opts and re . match ( self . re_pattern , mountpoint ) :
yield mountpoint
|
def path ( self , which = None ) :
"""Extend ` ` nailgun . entity _ mixins . Entity . path ` ` .
The format of the returned path depends on the value of ` ` which ` ` :
build _ pxe _ default
/ config _ templates / build _ pxe _ default
clone
/ config _ templates / clone
revision
/ config _ templates / revision
` ` super ` ` is called otherwise ."""
|
if which in ( 'build_pxe_default' , 'clone' , 'revision' ) :
prefix = 'self' if which == 'clone' else 'base'
return '{0}/{1}' . format ( super ( ConfigTemplate , self ) . path ( prefix ) , which )
return super ( ConfigTemplate , self ) . path ( which )
|
def on_compare ( self , node ) : # ( ' left ' , ' ops ' , ' comparators ' )
"""comparison operators"""
|
lval = self . run ( node . left )
out = True
for op , rnode in zip ( node . ops , node . comparators ) :
rval = self . run ( rnode )
out = op2func ( op ) ( lval , rval )
lval = rval
if self . use_numpy and isinstance ( out , numpy . ndarray ) and out . any ( ) :
break
elif not out :
break
return out
|
def lal ( self ) :
"""Produces a LAL time series object equivalent to self .
Returns
lal _ data : { lal . * TimeSeries }
LAL time series object containing the same data as self .
The actual type depends on the sample ' s dtype . If the epoch of
self is ' None ' , the epoch of the returned LAL object will be
LIGOTimeGPS ( 0,0 ) ; otherwise , the same as that of self .
Raises
TypeError
If time series is stored in GPU memory ."""
|
lal_data = None
if self . _epoch is None :
ep = _lal . LIGOTimeGPS ( 0 , 0 )
else :
ep = self . _epoch
if self . _data . dtype == _numpy . float32 :
lal_data = _lal . CreateREAL4TimeSeries ( "" , ep , 0 , self . delta_t , _lal . SecondUnit , len ( self ) )
elif self . _data . dtype == _numpy . float64 :
lal_data = _lal . CreateREAL8TimeSeries ( "" , ep , 0 , self . delta_t , _lal . SecondUnit , len ( self ) )
elif self . _data . dtype == _numpy . complex64 :
lal_data = _lal . CreateCOMPLEX8TimeSeries ( "" , ep , 0 , self . delta_t , _lal . SecondUnit , len ( self ) )
elif self . _data . dtype == _numpy . complex128 :
lal_data = _lal . CreateCOMPLEX16TimeSeries ( "" , ep , 0 , self . delta_t , _lal . SecondUnit , len ( self ) )
lal_data . data . data [ : ] = self . numpy ( )
return lal_data
|
def crop ( self , * args , ** kwargs ) :
"""Crop the multiscene and return a new cropped multiscene ."""
|
return self . _generate_scene_func ( self . _scenes , 'crop' , True , * args , ** kwargs )
|
def nrmse_iqr ( simulated_array , observed_array , replace_nan = None , replace_inf = None , remove_neg = False , remove_zero = False ) :
"""Compute the IQR normalized root mean square error between the simulated and observed data .
. . image : : / pictures / NRMSE _ IQR . png
* * Range : * * 0 ≤ NRMSE < inf .
* * Notes : * * This metric is the RMSE normalized by the interquartile range of the observed time
series ( x ) . Normalizing allows comparison between data sets with different scales .
The NRMSEquartile is the least sensitive to outliers of the three normalized rmse metrics .
Parameters
simulated _ array : one dimensional ndarray
An array of simulated data from the time series .
observed _ array : one dimensional ndarray
An array of observed data from the time series .
replace _ nan : float , optional
If given , indicates which value to replace NaN values with in the two arrays . If None , when
a NaN value is found at the i - th position in the observed OR simulated array , the i - th value
of the observed and simulated array are removed before the computation .
replace _ inf : float , optional
If given , indicates which value to replace Inf values with in the two arrays . If None , when
an inf value is found at the i - th position in the observed OR simulated array , the i - th
value of the observed and simulated array are removed before the computation .
remove _ neg : boolean , optional
If True , when a negative value is found at the i - th position in the observed OR simulated
array , the i - th value of the observed AND simulated array are removed before the
computation .
remove _ zero : boolean , optional
If true , when a zero value is found at the i - th position in the observed OR simulated
array , the i - th value of the observed AND simulated array are removed before the
computation .
Returns
float
The IQR normalized root mean square error .
Examples
> > > import HydroErr as he
> > > import numpy as np
> > > sim = np . array ( [ 5 , 7 , 9 , 2 , 4.5 , 6.7 ] )
> > > obs = np . array ( [ 4.7 , 6 , 10 , 2.5 , 4 , 7 ] )
> > > he . nrmse _ iqr ( sim , obs )
0.2595461185212093
References
- Pontius , R . G . , Thontteh , O . , Chen , H . , 2008 . Components of information for multiple
resolution comparison between maps that share a real variable . Environmental and Ecological
Statistics 15(2 ) 111-142."""
|
# Checking and cleaning the data
simulated_array , observed_array = treat_values ( simulated_array , observed_array , replace_nan = replace_nan , replace_inf = replace_inf , remove_neg = remove_neg , remove_zero = remove_zero )
rmse_value = np . sqrt ( np . mean ( ( simulated_array - observed_array ) ** 2 ) )
q1 = np . percentile ( observed_array , 25 )
q3 = np . percentile ( observed_array , 75 )
iqr = q3 - q1
return rmse_value / iqr
|
def _ReadMakefileAm ( self ) :
"""Reads Makefile . am to initialize the project information ."""
|
if not self . library_name :
raise RuntimeError ( "Missing library name" )
file_object = open ( "Makefile.am" , "rb" )
if not file_object :
raise IOError ( "Unable to open: Makefile.am" )
found_subdirs = False
for line in file_object . readlines ( ) :
line = line . strip ( )
if found_subdirs :
library_name , _ , _ = line . partition ( b" " )
if sys . version_info [ 0 ] >= 3 :
library_name = library_name . decode ( "ascii" )
self . include_directories . append ( library_name )
if library_name . startswith ( "lib" ) :
self . library_names . append ( library_name )
if library_name == self . library_name :
break
elif line . startswith ( b"SUBDIRS" ) :
found_subdirs = True
file_object . close ( )
if not self . include_directories or not self . library_names :
raise RuntimeError ( "Unable to find include directories and library names in: " "Makefile.am" )
|
def create_new ( self ) :
"""Create a new kitten by querying / r / awww ."""
|
kitten = reddit_kitten ( )
if self . filter ( url = kitten . url ) . exists ( ) :
return self . get ( url = kitten . url )
else :
kitten_obj = self . create ( url = kitten . url , thumbnail = kitten . thumbnail , title = kitten . title , )
return kitten_obj
|
def glibc_version_string ( ) :
"Returns glibc version string , or None if not using glibc ."
|
# ctypes . CDLL ( None ) internally calls dlopen ( NULL ) , and as the dlopen
# manpage says , " If filename is NULL , then the returned handle is for the
# main program " . This way we can let the linker do the work to figure out
# which libc our process is actually using .
process_namespace = ctypes . CDLL ( None )
try :
gnu_get_libc_version = process_namespace . gnu_get_libc_version
except AttributeError : # Symbol doesn ' t exist - > therefore , we are not linked to
# glibc .
return None
# Call gnu _ get _ libc _ version , which returns a string like " 2.5"
gnu_get_libc_version . restype = ctypes . c_char_p
version_str = gnu_get_libc_version ( )
# py2 / py3 compatibility :
if not isinstance ( version_str , str ) :
version_str = version_str . decode ( "ascii" )
return version_str
|
def query ( self , query ) :
'''Returns an iterable of objects matching criteria expressed in ` query ` .
LoggingDatastore logs the access .'''
|
self . logger . info ( '%s: query %s' % ( self , query ) )
return super ( LoggingDatastore , self ) . query ( query )
|
def history ( self ) :
"""Returns a list of changeset for this file in which the file was changed"""
|
if self . changeset is None :
raise NodeError ( 'Unable to get changeset for this FileNode' )
return self . changeset . get_file_history ( self . path )
|
def verify ( df , check , * args , ** kwargs ) :
"""Generic verify . Assert that ` ` check ( df , * args , * * kwargs ) ` ` is
true .
Parameters
df : DataFrame
check : function
Should take DataFrame and * * kwargs . Returns bool
Returns
df : DataFrame
same as the input ."""
|
result = check ( df , * args , ** kwargs )
try :
assert result
except AssertionError as e :
msg = '{} is not true' . format ( check . __name__ )
e . args = ( msg , df )
raise
return df
|
def get_sep ( self , key = 'param' , target = 'in' ) :
"""Get the separator of current style .
e . g . : in reST and javadoc style , it is " : "
: param key : the key which separator is wanted ( param , type , return , rtype , . . ) ( Default value = ' param ' )
: param target : the target docstring is ' in ' for the input or
' out ' for the output to generate . ( Default value = ' in ' )"""
|
target = 'out' if target == 'out' else 'in'
if self . style [ target ] in [ 'numpydoc' , 'google' ] :
return ''
return self . opt [ key ] [ self . style [ target ] ] [ 'sep' ]
|
def closeView ( self , view = None ) :
"""Closes the inputed view .
: param view | < int > | | < XView > | | None"""
|
if type ( view ) == int :
view = self . widget ( view )
elif view == None :
view = self . currentView ( )
index = self . indexOf ( view )
if index == - 1 :
return False
# close the view
count = self . count ( )
if count == 1 :
self . closePanel ( )
else :
view . close ( )
return True
|
def get_abstract ( pubmed_id , prepend_title = True ) :
"""Get the abstract of an article in the Pubmed database ."""
|
article = get_article_xml ( pubmed_id )
if article is None :
return None
return _abstract_from_article_element ( article , prepend_title )
|
def getArguments ( parser ) :
"Provides additional validation of the arguments collected by argparse ."
|
args = parser . parse_args ( )
# check mutual exlusive and reaquired arguments
if args . lmodel and args . smodel :
parser . error ( 'only one of --load-model and --save-model can be supplied, as they decide on whether to apply the application or the training mode' )
if not args . lmodel and not args . smodel :
parser . error ( 'exactly one of --load-model or --save-model has to be supplied' )
# application mode
if args . lmodel :
if not os . path . isfile ( args . lmodel ) :
parser . error ( 'the supplied model file {} does not exist' . format ( args . lmodel ) )
if not args . simages :
parser . error ( '--save-images must be supplied when running the application mode' )
# training mode
if args . smodel :
if not args . landmarkp in ( 'L2' , 'L3' , 'L4' ) :
args . landmarkp = sequenceOfIntegersGeAscendingStrict ( args . landmarkp )
if not 'auto' == args . stdspace :
args . stdspace = sequenceOfIntegersGeAscendingStrict ( args . stdspace )
if not args . force and os . path . isfile ( args . smodel ) :
parser . error ( 'the target model file {} already exists' . format ( args . smodel ) )
# others
if args . simages :
if not os . path . isdir ( args . simages ) :
parser . error ( '--save-images must be a valid directory' )
if args . masks and len ( args . masks ) != len ( args . images ) :
parser . error ( 'the same number of masks must be passed to --masks as images have been supplied' )
return args
|
def md5_of_file ( path , nblocks = 128 ) :
"""Computes the md5 hash of a file .
Parameters
path : string
path to file to compute hash of
Returns
hashlib md5 hash object . Use . digest ( ) or . hexdigest ( )
on returned object to get binary or hex encoded string ."""
|
md = md5 ( )
with open ( path , 'rb' ) as f :
for chunk in iter ( lambda : f . read ( nblocks * md . block_size ) , b'' ) :
md . update ( chunk )
return md
|
def _clone_block_and_wires ( block_in ) :
"""This is a generic function to copy the WireVectors for another round of
synthesis This does not split a WireVector with multiple wires .
: param block _ in : The block to change
: param synth _ name : a name to prepend to all new copies of a wire
: return : the resulting block and a WireVector map"""
|
block_in . sanity_check ( )
# make sure that everything is valid
block_out = block_in . __class__ ( )
temp_wv_map = { }
with set_working_block ( block_out , no_sanity_check = True ) :
for wirevector in block_in . wirevector_subset ( ) :
new_wv = clone_wire ( wirevector )
temp_wv_map [ wirevector ] = new_wv
return block_out , temp_wv_map
|
def pad ( text , length ) :
"""Pads text to given length , taking into account wide characters ."""
|
text_length = wcswidth ( text )
if text_length < length :
return text + ' ' * ( length - text_length )
return text
|
def canonical_key ( self , key ) :
"""Returns the canonical key for the given ` ` key ` ` ."""
|
if key . startswith ( '/' ) :
return urlparse . urljoin ( self . base_uri , key )
else :
return self . curies . expand ( key )
|
def _GetRecordValues ( self , parser_mediator , table_name , record , value_mappings = None ) :
"""Retrieves the values from the record .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
table _ name ( str ) : name of the table .
record ( pyesedb . record ) : ESE record .
value _ mappings ( Optional [ dict [ str , str ] ) : value mappings , which map
the column name to a callback method .
Returns :
dict [ str , object ] : values per column name ."""
|
record_values = { }
for value_entry in range ( 0 , record . number_of_values ) :
if parser_mediator . abort :
break
column_name = record . get_column_name ( value_entry )
if column_name in record_values :
logger . warning ( '[{0:s}] duplicate column: {1:s} in table: {2:s}' . format ( self . NAME , column_name , table_name ) )
continue
value_callback = None
if value_mappings and column_name in value_mappings :
value_callback_method = value_mappings . get ( column_name )
if value_callback_method :
value_callback = getattr ( self , value_callback_method , None )
if value_callback is None :
logger . warning ( ( '[{0:s}] missing value callback method: {1:s} for column: ' '{2:s} in table: {3:s}' ) . format ( self . NAME , value_callback_method , column_name , table_name ) )
if value_callback :
try :
value_data = record . get_value_data ( value_entry )
value = value_callback ( value_data )
except Exception as exception : # pylint : disable = broad - except
logger . error ( exception )
value = None
parser_mediator . ProduceExtractionWarning ( ( 'unable to parse value: {0:s} with callback: {1:s} with error: ' '{2!s}' ) . format ( column_name , value_callback_method , exception ) )
else :
try :
value = self . _GetRecordValue ( record , value_entry )
except ValueError as exception :
value = None
parser_mediator . ProduceExtractionWarning ( 'unable to parse value: {0:s} with error: {1!s}' . format ( column_name , exception ) )
record_values [ column_name ] = value
return record_values
|
def get_tree ( self , process_name ) :
"""return tree that is managing time - periods for given process"""
|
for tree_name , tree in self . trees . items ( ) :
if process_name in tree :
return tree
|
def copy ( self , klass = _x ) :
"""A new chain beginning with the current chain tokens and argument ."""
|
chain = super ( ) . copy ( )
new_chain = klass ( chain . _args [ 0 ] )
new_chain . _tokens = [ [ chain . compose , [ ] , { } , ] ]
return new_chain
|
def _preserve_bonds ( self , sliced_cartesian , use_lookup = None ) :
"""Is called after cutting geometric shapes .
If you want to change the rules how bonds are preserved , when
applying e . g . : meth : ` Cartesian . cut _ sphere ` this is the
function you have to modify .
It is recommended to inherit from the Cartesian class to
tailor it for your project , instead of modifying the
source code of ChemCoord .
Args :
sliced _ frame ( Cartesian ) :
use _ lookup ( bool ) : Use a lookup variable for
: meth : ` ~ chemcoord . Cartesian . get _ bonds ` . The default is
specified in ` ` settings [ ' defaults ' ] [ ' use _ lookup ' ] ` `
Returns :
Cartesian :"""
|
if use_lookup is None :
use_lookup = settings [ 'defaults' ] [ 'use_lookup' ]
included_atoms_set = set ( sliced_cartesian . index )
assert included_atoms_set . issubset ( set ( self . index ) ) , 'The sliced Cartesian has to be a subset of the bigger frame'
bond_dic = self . get_bonds ( use_lookup = use_lookup )
new_atoms = set ( [ ] )
for atom in included_atoms_set :
new_atoms = new_atoms | bond_dic [ atom ]
new_atoms = new_atoms - included_atoms_set
while not new_atoms == set ( [ ] ) :
index_of_interest = new_atoms . pop ( )
included_atoms_set = ( included_atoms_set | self . get_coordination_sphere ( index_of_interest , n_sphere = float ( 'inf' ) , only_surface = False , exclude = included_atoms_set , give_only_index = True , use_lookup = use_lookup ) )
new_atoms = new_atoms - included_atoms_set
molecule = self . loc [ included_atoms_set , : ]
return molecule
|
def try_instance_init ( self , instance , late_start = False ) :
"""Try to " initialize " the given module instance .
: param instance : instance to init
: type instance : object
: param late _ start : If late _ start , don ' t look for last _ init _ try
: type late _ start : bool
: return : True on successful init . False if instance init method raised any Exception .
: rtype : bool"""
|
try :
instance . init_try += 1
# Maybe it ' s a retry
if not late_start and instance . init_try > 1 : # Do not try until too frequently , or it ' s too loopy
if instance . last_init_try > time . time ( ) - MODULE_INIT_PERIOD :
logger . info ( "Too early to retry initialization, retry period is %d seconds" , MODULE_INIT_PERIOD )
# logger . info ( " % s / % s " , instance . last _ init _ try , time . time ( ) )
return False
instance . last_init_try = time . time ( )
logger . info ( "Trying to initialize module: %s" , instance . name )
# If it ' s an external module , create / update Queues ( )
if instance . is_external :
instance . create_queues ( self . daemon . sync_manager )
# The module instance init function says if initialization is ok
if not instance . init ( ) :
logger . warning ( "Module %s initialisation failed." , instance . name )
return False
logger . info ( "Module %s is initialized." , instance . name )
except Exception as exp : # pylint : disable = broad - except
# pragma : no cover , simple protection
msg = "The module instance %s raised an exception " "on initialization: %s, I remove it!" % ( instance . name , str ( exp ) )
self . configuration_errors . append ( msg )
logger . error ( msg )
logger . exception ( exp )
return False
return True
|
def normalizeSparseWeights ( m ) :
"""Initialize the weights using kaiming _ uniform initialization normalized to
the number of non - zeros in the layer instead of the whole input size .
Similar to torch . nn . Linear . reset _ parameters ( ) but applying weight sparsity
to the input size"""
|
if isinstance ( m , SparseWeightsBase ) :
_ , inputSize = m . module . weight . shape
fan = int ( inputSize * m . weightSparsity )
gain = nn . init . calculate_gain ( 'leaky_relu' , math . sqrt ( 5 ) )
std = gain / np . math . sqrt ( fan )
bound = math . sqrt ( 3.0 ) * std
# Calculate uniform bounds from standard deviation
nn . init . uniform_ ( m . module . weight , - bound , bound )
if m . module . bias is not None :
bound = 1 / math . sqrt ( fan )
nn . init . uniform_ ( m . module . bias , - bound , bound )
|
def is_published ( self ) :
"""` ` True ` ` if the post is published , ` ` False ` ` otherwise ."""
|
return self . status == Status . published and self . timestamp <= arrow . now ( )
|
def document_type2marc ( self , key , value ) :
"""Populate the ` ` 980 ` ` MARC field ."""
|
if value in DOCUMENT_TYPE_REVERSE_MAP and DOCUMENT_TYPE_REVERSE_MAP [ value ] :
return { 'a' : DOCUMENT_TYPE_REVERSE_MAP [ value ] }
|
def as_odict ( self ) :
"""returns an odict version of the object , based on it ' s attributes"""
|
if hasattr ( self , 'cust_odict' ) :
return self . cust_odict
if hasattr ( self , 'attr_check' ) :
self . attr_check ( )
odc = odict ( )
for attr in self . attrorder :
odc [ attr ] = getattr ( self , attr )
return odc
|
def mask_surface ( self , surface , surface_x = 0 , surface_y = 0 ) :
"""A drawing operator that paints the current source
using the alpha channel of : obj : ` surface ` as a mask .
( Opaque areas of : obj : ` surface ` are painted with the source ,
transparent areas are not painted . )
: param pattern : A : class : ` Surface ` object .
: param surface _ x : X coordinate at which to place the origin of surface .
: param surface _ y : Y coordinate at which to place the origin of surface .
: type surface _ x : float
: type surface _ y : float"""
|
cairo . cairo_mask_surface ( self . _pointer , surface . _pointer , surface_x , surface_y )
self . _check_status ( )
|
def search ( self ) :
"""* Return the results of the database conesearch *
* * Return : * *
- ` ` conesearch ` `
* * Usage : * *
See class usage ."""
|
self . log . debug ( 'starting the ``get`` method' )
sqlQuery = self . _get_on_trixel_sources_from_database_query ( )
databaseRows = self . _execute_query ( sqlQuery )
matchIndies , matches = self . _list_crossmatch ( databaseRows )
from fundamentals . renderer import list_of_dictionaries
matches = list_of_dictionaries ( log = self . log , listOfDictionaries = matches , reDatetime = self . reDatetime )
self . log . debug ( 'completed the ``get`` method' )
return matchIndies , matches
|
def utc_to_tz ( utc_datetime , tzinfo , keep_tzinfo = False ) :
"""Convert a UTC datetime to a time awared local time
: param utc _ datetime :
: param tzinfo :
: param keep _ tzinfo :"""
|
tz_awared_datetime = utc_datetime . replace ( tzinfo = utc ) . astimezone ( tzinfo )
if keep_tzinfo is False :
tz_awared_datetime = tz_awared_datetime . replace ( tzinfo = None )
return tz_awared_datetime
|
def removeRouterPrefix ( self , prefixEntry ) :
"""remove the configured prefix on a border router
Args :
prefixEntry : a on - mesh prefix entry
Returns :
True : successful to remove the prefix entry from border router
False : fail to remove the prefix entry from border router"""
|
print '%s call removeRouterPrefix' % self . port
print prefixEntry
prefix = self . __convertIp6PrefixStringToIp6Address ( str ( prefixEntry ) )
try :
prefixLen = 64
cmd = 'prefix remove %s/%d' % ( prefix , prefixLen )
print cmd
if self . __sendCommand ( cmd ) [ 0 ] == 'Done' : # send server data ntf to leader
return self . __sendCommand ( 'netdataregister' ) [ 0 ] == 'Done'
else :
return False
except Exception , e :
ModuleHelper . WriteIntoDebugLogger ( "removeRouterPrefix() Error: " + str ( e ) )
|
def zpopmax ( self , name , count = None ) :
"""Remove and return up to ` ` count ` ` members with the highest scores
from the sorted set ` ` name ` ` ."""
|
args = ( count is not None ) and [ count ] or [ ]
options = { 'withscores' : True }
return self . execute_command ( 'ZPOPMAX' , name , * args , ** options )
|
def grepPDF ( self , path ) :
"""Parse PDF files text content for keywords .
Args :
path : PDF file path .
Returns :
match : set of unique occurrences of every match ."""
|
with open ( path , 'rb' ) as pdf_file_obj :
match = set ( )
text = ''
pdf_reader = PyPDF2 . PdfFileReader ( pdf_file_obj )
pages = pdf_reader . numPages
for page in range ( pages ) :
page_obj = pdf_reader . getPage ( page )
text += '\n' + page_obj . extractText ( )
match . update ( set ( x . lower ( ) for x in re . findall ( self . _keywords , text , re . IGNORECASE ) ) )
return match
|
def add_view ( self , row = None , col = None , row_span = 1 , col_span = 1 , ** kwargs ) :
"""Create a new ViewBox and add it as a child widget .
Parameters
row : int
The row in which to add the widget ( 0 is the topmost row )
col : int
The column in which to add the widget ( 0 is the leftmost column )
row _ span : int
The number of rows to be occupied by this widget . Default is 1.
col _ span : int
The number of columns to be occupied by this widget . Default is 1.
* * kwargs : dict
Keyword arguments to pass to ` ViewBox ` ."""
|
from . viewbox import ViewBox
view = ViewBox ( ** kwargs )
return self . add_widget ( view , row , col , row_span , col_span )
|
def get_meshes ( path = '../../../models' , cutoff = None ) :
"""Get a list of single - body meshes to test identifiers on .
Parameters
path : str , location of models
cutoff : int , number of meshes to stop loading at
Returns
meshes : ( n , ) list of Trimesh objects"""
|
bodies = collections . deque ( )
for file_name in os . listdir ( path ) :
try :
mesh = trimesh . load ( os . path . join ( path , file_name ) )
split = mesh . split ( )
bodies . extend ( split )
if len ( split ) > 1 :
bodies . append ( mesh )
except BaseException :
continue
if cutoff is not None and len ( bodies ) > cutoff :
return np . array ( bodies )
for i in range ( 100 ) :
cylinder = trimesh . creation . cylinder ( radius = np . random . random ( ) * 100 , height = np . random . random ( ) * 1000 , sections = int ( np . clip ( np . random . random ( ) * 720 , 20 , 720 ) ) )
capsule = trimesh . creation . capsule ( radius = np . random . random ( ) * 100 , height = np . random . random ( ) * 1000 , count = np . clip ( np . random . random ( 2 ) * 720 , 20 , 720 ) . astype ( int ) )
bodies . append ( cylinder )
bodies . append ( capsule )
for i in range ( 10 ) :
bodies . append ( trimesh . creation . random_soup ( int ( np . clip ( np . random . random ( ) * 1000 , 20 , 1000 ) ) ) )
bodies . append ( trimesh . creation . icosphere ( ) )
bodies . append ( trimesh . creation . uv_sphere ( ) )
bodies . append ( trimesh . creation . icosahedron ( ) )
return np . array ( bodies )
|
def VerifyStructure ( self , parser_mediator , line ) :
"""Verify that this file is an IIS log file .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between
parsers and other components , such as storage and dfvfs .
line ( str ) : line from a text file .
Returns :
bool : True if the line was successfully parsed ."""
|
# TODO : self . _ line _ structures is a work - around and this needs
# a structural fix .
self . _line_structures = self . LINE_STRUCTURES
self . _day_of_month = None
self . _month = None
self . _year = None
# TODO : Examine other versions of the file format and if this parser should
# support them . For now just checking if it contains the IIS header .
if self . _SIGNATURE in line :
return True
return False
|
def authenticate ( username , password ) :
"""Returns :
a dict with :
pk : the pk of the user
token : dict containing all the data from the api
( access _ token , refresh _ token , expires _ at etc . )
user _ data : dict containing user data such as
first _ name , last _ name etc .
if the authentication succeeds
Raises Unauthorized if the authentication fails"""
|
session = MoJOAuth2Session ( client = LegacyApplicationClient ( client_id = settings . API_CLIENT_ID ) )
token = session . fetch_token ( token_url = get_request_token_url ( ) , username = username , password = password , auth = HTTPBasicAuth ( settings . API_CLIENT_ID , settings . API_CLIENT_SECRET ) , timeout = 15 , encoding = 'utf-8' )
user_data = session . get ( '/users/{username}/' . format ( username = username ) ) . json ( )
return { 'pk' : user_data . get ( 'pk' ) , 'token' : token , 'user_data' : user_data }
|
def load ( ) :
"""| Load the configuration file .
| Add dynamically configuration to the module .
: rtype : None"""
|
config = ConfigParser . RawConfigParser ( DEFAULTS )
config . readfp ( open ( CONF_PATH ) )
for section in config . sections ( ) :
globals ( ) [ section ] = { }
for key , val in config . items ( section ) :
globals ( ) [ section ] [ key ] = val
|
def publish ( self , synchronous = True , ** kwargs ) :
"""Helper for publishing an existing content view .
: param synchronous : What should happen if the server returns an HTTP
202 ( accepted ) status code ? Wait for the task to complete if
` ` True ` ` . Immediately return the server ' s response otherwise .
: param kwargs : Arguments to pass to requests .
: returns : The server ' s response , with all JSON decoded .
: raises : ` ` requests . exceptions . HTTPError ` ` If the server responds with
an HTTP 4XX or 5XX message ."""
|
kwargs = kwargs . copy ( )
# shadow the passed - in kwargs
if 'data' in kwargs and 'id' not in kwargs [ 'data' ] :
kwargs [ 'data' ] [ 'id' ] = self . id
# pylint : disable = no - member
kwargs . update ( self . _server_config . get_client_kwargs ( ) )
response = client . post ( self . path ( 'publish' ) , ** kwargs )
return _handle_response ( response , self . _server_config , synchronous )
|
def get_info ( node_id , info_id ) :
"""Get a specific info .
Both the node and info id must be specified in the url ."""
|
exp = experiment ( session )
# check the node exists
node = models . Node . query . get ( node_id )
if node is None :
return error_response ( error_type = "/info, node does not exist" )
# execute the experiment method :
info = models . Info . query . get ( info_id )
if info is None :
return error_response ( error_type = "/info GET, info does not exist" , participant = node . participant )
elif ( info . origin_id != node . id and info . id not in [ t . info_id for t in node . transmissions ( direction = "incoming" , status = "received" ) ] ) :
return error_response ( error_type = "/info GET, forbidden info" , status = 403 , participant = node . participant )
try : # ping the experiment
exp . info_get_request ( node = node , infos = info )
session . commit ( )
except :
return error_response ( error_type = "/info GET server error" , status = 403 , participant = node . participant )
# return the data
return success_response ( field = "info" , data = info . __json__ ( ) , request_type = "info get" )
|
def ocvTypes ( self ) :
"""Returns the OpenCV type mapping supported .
: return : a dictionary containing the OpenCV type mapping supported .
. . versionadded : : 2.3.0"""
|
if self . _ocvTypes is None :
ctx = SparkContext . _active_spark_context
self . _ocvTypes = dict ( ctx . _jvm . org . apache . spark . ml . image . ImageSchema . javaOcvTypes ( ) )
return self . _ocvTypes
|
def init_streams_pad ( self , start_row = 0 ) :
"""Create a curses pad and populate it with a line by stream"""
|
y = 0
pad = curses . newpad ( max ( 1 , len ( self . filtered_streams ) ) , self . pad_w )
pad . keypad ( 1 )
for s in self . filtered_streams :
pad . addstr ( y , 0 , self . format_stream_line ( s ) )
y += 1
self . offsets [ 'streams' ] = 0
pad . move ( start_row , 0 )
if not self . no_stream_shown :
pad . chgat ( curses . A_REVERSE )
self . pads [ 'streams' ] = pad
|
def _is_number_match_OS ( numobj1 , number2 ) :
"""Wrapper variant of _ is _ number _ match _ OO that copes with one
PhoneNumber object and one string ."""
|
# First see if the second number has an implicit country calling code , by
# attempting to parse it .
try :
numobj2 = parse ( number2 , UNKNOWN_REGION )
return _is_number_match_OO ( numobj1 , numobj2 )
except NumberParseException :
_ , exc , _ = sys . exc_info ( )
if exc . error_type == NumberParseException . INVALID_COUNTRY_CODE : # The second number has no country calling code . EXACT _ MATCH is no
# longer possible . We parse it as if the region was the same as
# that for the first number , and if EXACT _ MATCH is returned , we
# replace this with NSN _ MATCH .
region1 = region_code_for_country_code ( numobj1 . country_code )
try :
if region1 != UNKNOWN_REGION :
numobj2 = parse ( number2 , region1 )
match = _is_number_match_OO ( numobj1 , numobj2 )
if match == MatchType . EXACT_MATCH :
return MatchType . NSN_MATCH
else :
return match
else : # If the first number didn ' t have a valid country calling
# code , then we parse the second number without one as
# well .
numobj2 = parse ( number2 , None , keep_raw_input = False , _check_region = False , numobj = None )
return _is_number_match_OO ( numobj1 , numobj2 )
except NumberParseException :
return MatchType . NOT_A_NUMBER
# One or more of the phone numbers we are trying to match is not a viable
# phone number .
return MatchType . NOT_A_NUMBER
|
def get_request ( self , request ) :
"""Get download requests
Create a list of DownloadRequests for all Sentinel - 2 acquisitions within request ' s time interval and
acceptable cloud coverage .
: param request : OGC - type request with specified bounding box , time interval , and cloud coverage for specific
product .
: type request : OgcRequest or GeopediaRequest
: return : list of DownloadRequests"""
|
return [ DownloadRequest ( url = self . get_url ( request = request , geometry = geometry ) , filename = self . get_filename ( request , geometry ) , data_type = MimeType . JSON , headers = OgcConstants . HEADERS ) for geometry in request . geometry_list ]
|
def main ( ) :
"""Simple command - line program for listing the virtual machines on a system ."""
|
args = GetArgs ( )
if args . password :
password = args . password
else :
password = getpass . getpass ( prompt = 'Enter password for host %s and ' 'user %s: ' % ( args . host , args . user ) )
context = None
if hasattr ( ssl , '_create_unverified_context' ) :
context = ssl . _create_unverified_context ( )
si = SmartConnect ( host = args . host , user = args . user , pwd = password , port = int ( args . port ) , sslContext = context )
if not si :
print ( "Could not connect to the specified host using specified " "username and password" )
return - 1
atexit . register ( Disconnect , si )
content = si . RetrieveContent ( )
for child in content . rootFolder . childEntity :
if hasattr ( child , 'vmFolder' ) :
datacenter = child
vmFolder = datacenter . vmFolder
vmList = vmFolder . childEntity
for vm in vmList :
PrintVmInfo ( vm )
return 0
|
def getProjectAreaID ( self , projectarea_name , archived = False ) :
"""Get : class : ` rtcclient . project _ area . ProjectArea ` id by its name
: param projectarea _ name : the project area name
: param archived : ( default is False ) whether the project area
is archived
: return : the : class : ` string ` object
: rtype : string"""
|
self . log . debug ( "Get the ProjectArea id by its name: %s" , projectarea_name )
proj_area = self . getProjectArea ( projectarea_name , archived = archived )
if proj_area :
return proj_area . id
raise exception . NotFound ( "No ProjectArea named %s" % projectarea_name )
|
def maybe_resolve ( object , resolve ) :
"""Call ` resolve ` on the ` object ` ' s ` $ ref ` value if it has one .
: param object : An object .
: param resolve : A resolving function .
: return : An object , or some other object ! : sparkles :"""
|
if isinstance ( object , dict ) and object . get ( '$ref' ) :
return resolve ( object [ '$ref' ] )
return object
|
def format_epilog_section ( self , section , text ) :
"""Format a section for the epilog by inserting a format"""
|
try :
func = self . _epilog_formatters [ self . epilog_formatter ]
except KeyError :
if not callable ( self . epilog_formatter ) :
raise
func = self . epilog_formatter
return func ( section , text )
|
def list ( ctx ) :
"""List all config values ."""
|
log . debug ( 'chemdataextractor.config.list' )
for k in config :
click . echo ( '%s : %s' % ( k , config [ k ] ) )
|
def areas ( self ) :
"""A 1D array of areas ( in pixel * * 2 ) of the non - zero labeled
regions .
The ` ~ numpy . ndarray ` starts with the * non - zero * label . The
returned array has a length equal to the number of labels and
matches the order of the ` ` labels ` ` attribute ."""
|
return np . array ( [ area for area in np . bincount ( self . data . ravel ( ) ) [ 1 : ] if area != 0 ] )
|
def get_available_voices ( self , language = None , gender = None ) :
"""Returns a list of available voices , via ' ListVoices ' endpoint
Docs :
http : / / developer . ivona . com / en / speechcloud / actions . html # ListVoices
: param language : returned voices language
: type language : str
: param gender : returned voices gender
: type gender : str"""
|
endpoint = 'ListVoices'
data = dict ( )
if language :
data . update ( { 'Voice' : { 'Language' : language } } )
if gender :
data . update ( { 'Voice' : { 'Gender' : gender } } )
print ( data )
response = self . _get_response ( 'get' , endpoint , data )
return response . json ( ) [ 'Voices' ]
|
def schema_completer ( prefix ) :
"""For tab - completion via argcomplete , return completion options .
For the given prefix so far , return the possible options . Note that
filtering via startswith happens after this list is returned ."""
|
from c7n import schema
load_resources ( )
components = prefix . split ( '.' )
if components [ 0 ] in provider . clouds . keys ( ) :
cloud_provider = components . pop ( 0 )
provider_resources = provider . resources ( cloud_provider )
else :
cloud_provider = 'aws'
provider_resources = provider . resources ( 'aws' )
components [ 0 ] = "aws.%s" % components [ 0 ]
# Completions for resource
if len ( components ) == 1 :
choices = [ r for r in provider . resources ( ) . keys ( ) if r . startswith ( components [ 0 ] ) ]
if len ( choices ) == 1 :
choices += [ '{}{}' . format ( choices [ 0 ] , '.' ) ]
return choices
if components [ 0 ] not in provider_resources . keys ( ) :
return [ ]
# Completions for category
if len ( components ) == 2 :
choices = [ '{}.{}' . format ( components [ 0 ] , x ) for x in ( 'actions' , 'filters' ) if x . startswith ( components [ 1 ] ) ]
if len ( choices ) == 1 :
choices += [ '{}{}' . format ( choices [ 0 ] , '.' ) ]
return choices
# Completions for item
elif len ( components ) == 3 :
resource_mapping = schema . resource_vocabulary ( cloud_provider )
return [ '{}.{}.{}' . format ( components [ 0 ] , components [ 1 ] , x ) for x in resource_mapping [ components [ 0 ] ] [ components [ 1 ] ] ]
return [ ]
|
def kill ( self , signal = None ) :
"""Kill or send a signal to the container .
Args :
signal ( str or int ) : The signal to send . Defaults to ` ` SIGKILL ` `
Raises :
: py : class : ` docker . errors . APIError `
If the server returns an error ."""
|
return self . client . api . kill ( self . id , signal = signal )
|
def generate_jwt_token ( user , authsys , ** kwargs ) :
"""Generate a new JWT token , with optional extra information . Any data provided in ` * * kwargs `
will be added into the token object for auth specific usage
Args :
user ( : obj : ` User ` ) : User object to generate token for
authsys ( str ) : The auth system for which the token was generated
* * kwargs ( dict ) : Any optional items to add to the token
Returns :
Encoded JWT token"""
|
# Local import to prevent app startup failures
from cloud_inquisitor . config import dbconfig
token = { 'auth_system' : authsys , 'exp' : time . time ( ) + dbconfig . get ( 'session_expire_time' ) , 'roles' : [ role . name for role in user . roles ] }
if kwargs :
token . update ( ** kwargs )
enc = jwt . encode ( token , get_jwt_key_data ( ) , algorithm = 'HS512' )
return enc . decode ( )
|
def extend ( self , ** attributes ) :
"""Add the items to the instance of the environment if they do not exist
yet . This is used by : ref : ` extensions < writing - extensions > ` to register
callbacks and configuration values without breaking inheritance ."""
|
for key , value in attributes . iteritems ( ) :
if not hasattr ( self , key ) :
setattr ( self , key , value )
|
def is_directory ( self , path ) :
'''Is the node pointed to by @ ref path a directory ( name servers and
naming contexts ) ?'''
|
node = self . get_node ( path )
if not node :
return False
return node . is_directory
|
def get_view ( self , request , view_class , opts = None ) :
"""Instantiates and returns the view class that will generate the
actual context for this plugin ."""
|
kwargs = { }
if opts :
if not isinstance ( opts , dict ) :
opts = opts . __dict__
else :
opts = { }
if not view_class in VALID_MIXIN_OPTIONS :
valid_options = view_class . __dict__ . keys ( )
for cls in view_class . __bases__ :
if cls != object :
valid_options += cls . __dict__ . keys ( )
VALID_MIXIN_OPTIONS [ view_class ] = valid_options
for key in VALID_MIXIN_OPTIONS [ view_class ] :
if key in opts :
kwargs [ key ] = opts [ key ]
elif hasattr ( self , key ) :
kwargs [ key ] = getattr ( self , key )
view = view_class ( ** kwargs )
view . request = request
view . kwargs = { }
return view
|
def _get_longest ( value_lst : List ) -> List :
"""Get the longest match for overlap
Args :
value _ lst : List
Returns : List"""
|
value_lst . sort ( )
result = [ ]
pivot = value_lst [ 0 ]
start , end = pivot [ 0 ] , pivot [ 1 ]
pivot_e = end
pivot_s = start
for idx , ( s , e , v , rule_id , _ ) in enumerate ( value_lst ) :
if s == pivot_s and pivot_e < e :
pivot_e = e
pivot = value_lst [ idx ]
elif s != pivot_s and pivot_e < e :
result . append ( pivot )
pivot = value_lst [ idx ]
pivot_e = e
pivot_s = s
result . append ( pivot )
return result
|
def pencil2 ( ) :
'''Install or update latest Pencil version 2 , a GUI prototyping tool .
Tip : For svg exports displayed proper in other programs ( eg . inkscape ,
okular , reveal . js presentations ) only use the ' Common Shapes ' and
' Desktop - Sketchy GUI ' elements .
More info :
github repo ( forked version 2 ) : https : / / github . com / prikhi / pencil'''
|
repo_name = 'pencil2'
repo_dir = flo ( '~/repos/{repo_name}' )
print_msg ( '## fetch latest pencil\n' )
checkup_git_repo_legacy ( url = 'https://github.com/prikhi/pencil.git' , name = repo_name )
print_msg ( '\n## build properties\n' )
update_or_append_line ( flo ( '{repo_dir}/build/properties.sh' ) , prefix = 'export MAX_VERSION=' , new_line = "export MAX_VERSION='100.*'" )
run ( flo ( 'cat {repo_dir}/build/properties.sh' ) )
run ( flo ( 'cd {repo_dir}/build && ./build.sh linux' ) , msg = '\n## build pencil\n' )
install_user_command_legacy ( 'pencil2' , pencil2_repodir = repo_dir )
print_msg ( '\nNow You can start pencil version 2 with this command:\n\n' ' pencil2' )
|
def connect ( self , receiver , sender = None , weak = True , dispatch_uid = None ) :
"""Connect receiver to sender for signal .
Arguments :
receiver
A function or an instance method which is to receive signals .
Receivers must be hashable objects .
If weak is True , then receiver must be weak referenceable .
Receivers must be able to accept keyword arguments .
If a receiver is connected with a dispatch _ uid argument , it
will not be added if another receiver was already connected
with that dispatch _ uid .
sender
The sender to which the receiver should respond . Must either be
of type Signal , or None to receive events from any sender .
weak
Whether to use weak references to the receiver . By default , the
module will attempt to use weak references to the receiver
objects . If this parameter is false , then strong references will
be used .
dispatch _ uid
An identifier used to uniquely identify a particular instance of
a receiver . This will usually be a string , though it may be
anything hashable ."""
|
if dispatch_uid :
lookup_key = ( dispatch_uid , _make_id ( sender ) )
else :
lookup_key = ( _make_id ( receiver ) , _make_id ( sender ) )
if weak :
ref = weakref . ref
receiver_object = receiver
# Check for bound methods
if hasattr ( receiver , '__self__' ) and hasattr ( receiver , '__func__' ) :
ref = WeakMethod
receiver_object = receiver . __self__
if six . PY3 :
receiver = ref ( receiver )
weakref . finalize ( receiver_object , self . _remove_receiver )
else :
receiver = ref ( receiver , self . _remove_receiver )
with self . lock :
self . _clear_dead_receivers ( )
for r_key , _ in self . receivers :
if r_key == lookup_key :
break
else :
self . receivers . append ( ( lookup_key , receiver ) )
self . sender_receivers_cache . clear ( )
|
def ParseDepends ( self , filename , must_exist = None , only_one = 0 ) :
"""Parse a mkdep - style file for explicit dependencies . This is
completely abusable , and should be unnecessary in the " normal "
case of proper SCons configuration , but it may help make
the transition from a Make hierarchy easier for some people
to swallow . It can also be genuinely useful when using a tool
that can write a . d file , but for which writing a scanner would
be too complicated ."""
|
filename = self . subst ( filename )
try :
fp = open ( filename , 'r' )
except IOError :
if must_exist :
raise
return
lines = SCons . Util . LogicalLines ( fp ) . readlines ( )
lines = [ l for l in lines if l [ 0 ] != '#' ]
tdlist = [ ]
for line in lines :
try :
target , depends = line . split ( ':' , 1 )
except ( AttributeError , ValueError ) : # Throws AttributeError if line isn ' t a string . Can throw
# ValueError if line doesn ' t split into two or more elements .
pass
else :
tdlist . append ( ( target . split ( ) , depends . split ( ) ) )
if only_one :
targets = [ ]
for td in tdlist :
targets . extend ( td [ 0 ] )
if len ( targets ) > 1 :
raise SCons . Errors . UserError ( "More than one dependency target found in `%s': %s" % ( filename , targets ) )
for target , depends in tdlist :
self . Depends ( target , depends )
|
def asset ( self , id ) :
"""Returns a single Asset .
: param int id : ( required ) , id of the asset
: returns : : class : ` Asset < github3 . repos . release . Asset > `"""
|
data = None
if int ( id ) > 0 :
url = self . _build_url ( 'releases' , 'assets' , str ( id ) , base_url = self . _api )
data = self . _json ( self . _get ( url , headers = Release . CUSTOM_HEADERS ) , 200 )
return Asset ( data , self ) if data else None
|
def _random_subprocessor ( self ) :
"""Creates a random subprocessor where there is a coupler between
every pair of working qubits on opposite sides of the same cell .
This is guaranteed to be minimal in that adding a qubit back in
will reintroduce a bad coupler , but not to have minimum size .
OUTPUT :
an : class : ` eden _ processor ` instance"""
|
deletion = set ( )
for e in self . _evil :
if e [ 0 ] in deletion or e [ 1 ] in deletion :
continue
deletion . add ( choice ( e ) )
return self . _subprocessor ( deletion )
|
def brty ( self ) :
"""A string that combines bitrate and technology type , e . g . ' 106A ' ."""
|
return ( "{0}" . format ( self . _brty_send ) if self . _brty_send == self . _brty_recv else "{0}/{1}" . format ( self . _brty_send , self . _brty_recv ) )
|
def imported_target_specs ( cls , kwargs = None , payload = None ) :
""": param kwargs : A kwargs dict representing Target . _ _ init _ _ ( * * kwargs ) ( Optional ) .
: param payload : A Payload object representing the Target . _ _ init _ _ ( payload = . . . ) param . ( Optional ) .
: returns : list of target specs to be imported .
: rtype : list of str"""
|
if kwargs is not None :
assert payload is None , 'may not provide both kwargs and payload'
field = cls . imported_target_kwargs_field
target_representation = kwargs
else :
assert payload is not None , 'must provide either kwargs or payload'
field = cls . imported_target_payload_field
target_representation = payload . as_dict ( )
specs = [ ]
for item in target_representation . get ( field , ( ) ) :
if not isinstance ( item , string_types ) :
raise cls . ExpectedAddressError ( 'expected imports to contain string addresses, got {obj} (type: {found_class}) instead.' . format ( obj = item , found_class = type ( item ) . __name__ ) )
specs . append ( item )
return specs
|
def get_screen_info ( self ) :
"""Retrieve information about the current and available configurations for
the screen associated with this window ."""
|
return GetScreenInfo ( display = self . display , opcode = self . display . get_extension_major ( extname ) , window = self , )
|
def clear_globals_reload_modules ( self ) :
"""Clears globals and reloads modules"""
|
self . code_array . clear_globals ( )
self . code_array . reload_modules ( )
# Clear result cache
self . code_array . result_cache . clear ( )
|
def disable_buttons ( self ) :
"""Function disables buttons"""
|
self . main_btn . set_sensitive ( False )
self . back_btn . hide ( )
self . info_label . set_label ( '<span color="#FFA500">In progress...</span>' )
self . disable_close_window ( )
if self . link is not None :
self . link . hide ( )
|
def stage_pywbem_result ( self , ret , exc ) :
"""Set Result return info or exception info"""
|
# pylint : disable = attribute - defined - outside - init
self . _pywbem_result_ret = ret
self . _pywbem_result_exc = exc
|
def iter_languages ( self ) :
"""Iterate over list of languages ."""
|
default_lang = self . babel . default_locale . language
default_title = self . babel . default_locale . get_display_name ( default_lang )
yield ( default_lang , default_title )
for l , title in current_app . config . get ( 'I18N_LANGUAGES' , [ ] ) :
yield l , title
|
def threshold ( self , value , inclusive = False ) :
"""Return True if > than treshold value ( or > = threshold value if
inclusive = True ) ."""
|
if inclusive :
def function ( x , y ) :
return True if x >= y else False
else :
def function ( x , y ) :
return True if x > y else False
return self . operation ( value , function )
|
def menu ( self , prompt , choices ) :
"""Presents a selection menu and returns the user ' s choice .
Args :
prompt ( str ) : Text to ask the user what to select .
choices ( Sequence [ str ] ) : Values for the user to select from .
Returns :
The value selected by the user , or ` ` None ` ` .
Todo :
Nice opportunity to provide a hook for Unite . vim , etc . here ."""
|
menu = [ prompt ] + [ "{0}. {1}" . format ( * choice ) for choice in enumerate ( choices , start = 1 ) ]
command = 'inputlist({})' . format ( repr ( menu ) )
choice = int ( self . _vim . eval ( command ) )
# Vim returns weird stuff if user clicks outside choices with mouse
if not 0 < choice < len ( menu ) :
return
return choices [ choice - 1 ]
|
def create ( self , width = 0 , max_value = 0 , path = None , flags = 0 , seed = 0 ) :
"""Create new sketch
Params :
< int > width
< int > max _ value
< str > path
< int > flags
< int > seed"""
|
return _madoka . Sketch_create ( self , width , max_value , path , flags , seed )
|
def from_dict ( data , ctx ) :
"""Instantiate a new QuoteHomeConversionFactors from a dict ( generally
from loading a JSON response ) . The data used to instantiate the
QuoteHomeConversionFactors is a shallow copy of the dict passed in ,
with any complex child types instantiated appropriately ."""
|
data = data . copy ( )
if data . get ( 'positiveUnits' ) is not None :
data [ 'positiveUnits' ] = ctx . convert_decimal_number ( data . get ( 'positiveUnits' ) )
if data . get ( 'negativeUnits' ) is not None :
data [ 'negativeUnits' ] = ctx . convert_decimal_number ( data . get ( 'negativeUnits' ) )
return QuoteHomeConversionFactors ( ** data )
|
def frange ( x , y , jump = 1 ) :
"""range for floats"""
|
precision = get_sig_digits ( jump )
while x < y :
yield round ( x , precision )
x += jump
|
def eject_virtual_media ( self , device ) :
"""Ejects the Virtual Media image if one is inserted .
: param device : virual media device
: raises : IloError , on an error from iLO .
: raises : IloInvalidInputError , if the device is not valid ."""
|
self . _validate_virtual_media ( device )
manager = self . _get_sushy_manager ( PROLIANT_MANAGER_ID )
try :
vmedia_device = ( manager . virtual_media . get_member_device ( VIRTUAL_MEDIA_MAP [ device ] ) )
if not vmedia_device . inserted :
LOG . debug ( self . _ ( "No media available in the device '%s' to " "perform eject operation." ) % device )
return
LOG . debug ( self . _ ( "Ejecting the media image '%(url)s' from the " "device %(device)s." ) % { 'url' : vmedia_device . image , 'device' : device } )
vmedia_device . eject_media ( )
except sushy . exceptions . SushyError as e :
msg = ( self . _ ( "The Redfish controller failed to eject the virtual" " media device '%(device)s'. Error %(error)s." ) % { 'device' : device , 'error' : str ( e ) } )
LOG . debug ( msg )
raise exception . IloError ( msg )
|
def reboot ( name , call = None ) :
'''Reboot a droplet in DigitalOcean .
. . versionadded : : 2015.8.8
name
The name of the droplet to restart .
CLI Example :
. . code - block : : bash
salt - cloud - a reboot droplet _ name'''
|
if call != 'action' :
raise SaltCloudSystemExit ( 'The restart action must be called with -a or --action.' )
data = show_instance ( name , call = 'action' )
if data . get ( 'status' ) == 'off' :
return { 'success' : True , 'action' : 'stop' , 'status' : 'off' , 'msg' : 'Machine is already off.' }
ret = query ( droplet_id = data [ 'id' ] , command = 'actions' , args = { 'type' : 'reboot' } , http_method = 'post' )
return { 'success' : True , 'action' : ret [ 'action' ] [ 'type' ] , 'state' : ret [ 'action' ] [ 'status' ] }
|
def extend_distribution_substation_overloading ( network , critical_stations ) :
"""Reinforce MV / LV substations due to overloading issues .
In a first step a parallel transformer of the same kind is installed .
If this is not sufficient as many standard transformers as needed are
installed .
Parameters
network : : class : ` ~ . grid . network . Network `
critical _ stations : : pandas : ` pandas . DataFrame < dataframe > `
Dataframe containing over - loaded MV / LV stations , their apparent power
at maximal over - loading and the corresponding time step .
Index of the dataframe are the over - loaded stations of type
: class : ` ~ . grid . components . LVStation ` . Columns are ' s _ pfa '
containing the apparent power at maximal over - loading as float and
' time _ index ' containing the corresponding time step the over - loading
occured in as : pandas : ` pandas . Timestamp < timestamp > ` . See
: func : ` ~ . flex _ opt . check _ tech _ constraints . mv _ lv _ station _ load ` for more
information .
Returns
dict
Dictionary with lists of added and removed transformers ."""
|
# get parameters for standard transformer
try :
standard_transformer = network . equipment_data [ 'lv_trafos' ] . loc [ network . config [ 'grid_expansion_standard_equipment' ] [ 'mv_lv_transformer' ] ]
except KeyError :
print ( 'Standard MV/LV transformer is not in equipment list.' )
transformers_changes = { 'added' : { } , 'removed' : { } }
for station in critical_stations . index : # list of maximum power of each transformer in the station
s_max_per_trafo = [ _ . type . S_nom for _ in station . transformers ]
# maximum station load from power flow analysis
s_station_pfa = critical_stations . s_pfa [ station ]
# determine missing transformer power to solve overloading issue
case = network . timeseries . timesteps_load_feedin_case . case [ critical_stations . time_index [ station ] ]
load_factor = network . config [ 'grid_expansion_load_factors' ] [ 'lv_{}_transformer' . format ( case ) ]
s_trafo_missing = s_station_pfa - ( sum ( s_max_per_trafo ) * load_factor )
# check if second transformer of the same kind is sufficient
# if true install second transformer , otherwise install as many
# standard transformers as needed
if max ( s_max_per_trafo ) >= s_trafo_missing : # if station has more than one transformer install a new
# transformer of the same kind as the transformer that best
# meets the missing power demand
duplicated_transformer = min ( [ _ for _ in station . transformers if _ . type . S_nom > s_trafo_missing ] , key = lambda j : j . type . S_nom - s_trafo_missing )
new_transformer = Transformer ( id = 'LVStation_{}_transformer_{}' . format ( str ( station . id ) , str ( len ( station . transformers ) + 1 ) ) , geom = duplicated_transformer . geom , mv_grid = duplicated_transformer . mv_grid , grid = duplicated_transformer . grid , voltage_op = duplicated_transformer . voltage_op , type = copy . deepcopy ( duplicated_transformer . type ) )
# add transformer to station and return value
station . add_transformer ( new_transformer )
transformers_changes [ 'added' ] [ station ] = [ new_transformer ]
else : # get any transformer to get attributes for new transformer from
station_transformer = station . transformers [ 0 ]
# calculate how many parallel standard transformers are needed
number_transformers = math . ceil ( s_station_pfa / standard_transformer . S_nom )
# add transformer to station
new_transformers = [ ]
for i in range ( number_transformers ) :
new_transformer = Transformer ( id = 'LVStation_{}_transformer_{}' . format ( str ( station . id ) , str ( i + 1 ) ) , geom = station_transformer . geom , mv_grid = station_transformer . mv_grid , grid = station_transformer . grid , voltage_op = station_transformer . voltage_op , type = copy . deepcopy ( standard_transformer ) )
new_transformers . append ( new_transformer )
transformers_changes [ 'added' ] [ station ] = new_transformers
transformers_changes [ 'removed' ] [ station ] = station . transformers
station . transformers = new_transformers
return transformers_changes
|
def get_page_id ( name , space ) :
"""Return id of a page based on passed page name and space .
Parameters :
- name : name of a Confluence page .
- space : space the Confluence page is in ."""
|
data = _json . loads ( _api . rest ( "?title=" + name . replace ( " " , "%20" ) + "&" "spaceKey=" + space + "&expand=history" ) )
try :
return data [ "results" ] [ 0 ] [ "id" ]
except :
return ( "Page not found!" )
|
def unpickle ( self , parent ) :
"""Sets the parent pointer references for the type * and * all of its
child classes that also have pointer references ."""
|
self . parent = parent
self . _unpickle_collection ( self . members )
self . _unpickle_collection ( self . executables )
self . unpickle_docs ( )
|
def pub ( topic_name , json_msg , repeat_rate = None , host = jps . env . get_master_host ( ) , pub_port = jps . DEFAULT_PUB_PORT ) :
'''publishes the data to the topic
: param topic _ name : name of the topic
: param json _ msg : data to be published
: param repeat _ rate : if None , publishes once . if not None , it is used as [ Hz ] .'''
|
pub = jps . Publisher ( topic_name , host = host , pub_port = pub_port )
time . sleep ( 0.1 )
if repeat_rate is None :
pub . publish ( json_msg )
else :
try :
while True :
pub . publish ( json_msg )
time . sleep ( 1.0 / repeat_rate )
except KeyboardInterrupt :
pass
|
def _filterArgsAndKwargs ( self , originalConstructorExpectedArgList , syntheticMemberList , positionalArgumentKeyValueList , keywordedArgDict ) :
"""Returns a tuple with variadic args and keyworded args after removing arguments that have been used to
synthesize members and that are not expected by the original constructor .
If original constructor accepts variadic args , all variadic args are forwarded .
If original constructor accepts keyworded args , all keyworded args are forwarded .
: type originalConstructorExpectedArgList : list ( str )
: type syntheticMemberList : list ( SyntheticMember )
: type positionalArgumentKeyValueList : list ( tuple )
: type keywordedArgDict : dict ( string : * )"""
|
# List is initialized with all variadic arguments .
positionalArgumentKeyValueList = copy . copy ( positionalArgumentKeyValueList )
# Warning : we use this dict to simplify the usage of the key - value tuple list but be aware that this will
# merge superfluous arguments as they have the same key : None .
positionalArgumentDict = dict ( positionalArgumentKeyValueList )
# Dict is initialized with all keyworded arguments .
keywordedArgDict = keywordedArgDict . copy ( )
for syntheticMember in syntheticMemberList :
argumentName = syntheticMember . memberName ( )
# Argument is expected by the original constructor .
if argumentName in originalConstructorExpectedArgList :
continue
# We filter args only if original constructor does not expected variadic args .
if argumentName in positionalArgumentDict :
positionalArgumentKeyValueList = list ( filter ( lambda pair : pair [ 0 ] != argumentName , positionalArgumentKeyValueList ) )
# We filter args only if original constructor does not expected keyworded args .
if argumentName in keywordedArgDict :
del keywordedArgDict [ argumentName ]
positionalArgumentTuple = tuple ( [ value for _ , value in positionalArgumentKeyValueList ] )
return positionalArgumentTuple , keywordedArgDict
|
def touchPoint ( self , x , y ) :
'''Touches a point in the device screen .
The generated operation will use the units specified in L { coordinatesUnit } and the
orientation in L { vc . display [ ' orientation ' ] } .'''
|
if DEBUG :
print >> sys . stderr , 'touchPoint(%d, %d)' % ( x , y )
print >> sys . stderr , 'touchPoint:' , type ( x ) , type ( y )
if self . areEventsDisabled :
if DEBUG :
print >> sys . stderr , "Ignoring event"
self . canvas . update_idletasks ( )
return
if DEBUG :
print >> sys . stderr , "Is touching point:" , self . isTouchingPoint
if self . isTouchingPoint :
self . showVignette ( )
if self . vc :
self . vc . touch ( x , y )
if self . coordinatesUnit == Unit . DIP :
x = round ( x / self . device . display [ 'density' ] , 2 )
y = round ( y / self . device . display [ 'density' ] , 2 )
self . printOperation ( None , Operation . TOUCH_POINT , x , y , self . coordinatesUnit , self . device . display [ 'orientation' ] )
self . printOperation ( None , Operation . SLEEP , Operation . DEFAULT )
# FIXME : can we reduce this sleep ? ( was 5)
time . sleep ( 1 )
self . isTouchingPoint = self . vc is None
self . takeScreenshotAndShowItOnWindow ( )
# self . hideVignette ( )
self . statusBar . clear ( )
return
|
def add_ip_address ( self , ip_address , sync = True ) :
"""add a ip address to this OS instance .
: param ip _ address : the ip address to add on this OS instance
: param sync : If sync = True ( default ) synchronize with Ariane server . If sync = False ,
add the subnet object on list to be added on next save ( ) .
: return :"""
|
LOGGER . debug ( "OSInstance.add_ip_address" )
if not sync :
self . ip_address_2_add . append ( ip_address )
else :
if ip_address . id is None :
ip_address . save ( )
if self . id is not None and ip_address . id is not None :
params = { 'id' : self . id , 'ipAddressID' : ip_address . id }
args = { 'http_operation' : 'GET' , 'operation_path' : 'update/ipAddresses/add' , 'parameters' : params }
response = OSInstanceService . requester . call ( args )
if response . rc != 0 :
LOGGER . warning ( 'OSInstance.add_ip_address - Problem while updating OS instance ' + self . name + '. Reason: ' + str ( response . response_content ) + '-' + str ( response . error_message ) + " (" + str ( response . rc ) + ")" )
else :
self . ip_address_ids . append ( ip_address . id )
ip_address . ipa_os_instance_id = self . id
else :
LOGGER . warning ( 'OSInstance.add_ip_address - Problem while updating OS instance ' + self . name + '. Reason: IP Address ' + ip_address . ipAddress + ' id is None' )
|
def add ( self , value ) :
"""Create a safe directory from a value ."""
|
path = value if isinstance ( value , Path ) else Path ( str ( value ) )
if path and path != path . parent :
destination = self
for part in path . parts :
destination = destination . setdefault ( part , DirectoryTree ( ) )
|
def NewSection ( self , token_type , section_name , pre_formatters ) :
"""For sections or repeated sections ."""
|
pre_formatters = [ self . _GetFormatter ( f ) for f in pre_formatters ]
# TODO : Consider getting rid of this dispatching , and turn _ Do * into methods
if token_type == REPEATED_SECTION_TOKEN :
new_block = _RepeatedSection ( section_name , pre_formatters )
func = _DoRepeatedSection
elif token_type == SECTION_TOKEN :
new_block = _Section ( section_name , pre_formatters )
func = _DoSection
elif token_type == DEF_TOKEN :
new_block = _Section ( section_name , [ ] )
func = _DoDef
else :
raise AssertionError ( 'Invalid token type %s' % token_type )
self . _NewSection ( func , new_block )
|
def interpret ( self , startpos , args , addr = None , simfd = None ) :
"""implement scanf - extract formatted data from memory or a file according to the stored format
specifiers and store them into the pointers extracted from ` args ` .
: param startpos : The index of the first argument corresponding to the first format element
: param args : A function which , given the index of an argument to the function , returns that argument
: param addr : The address in the memory to extract data from , or . . .
: param simfd : A file descriptor to use for reading data from
: return : The number of arguments parsed"""
|
if simfd is not None and isinstance ( simfd . read_storage , SimPackets ) :
argnum = startpos
for component in self . components :
if type ( component ) is bytes :
sdata , _ = simfd . read_data ( len ( component ) , short_reads = False )
self . state . solver . add ( sdata == component )
elif isinstance ( component , claripy . Bits ) :
sdata , _ = simfd . read_data ( len ( component ) // 8 , short_reads = False )
self . state . solver . add ( sdata == component )
elif component . spec_type == b's' :
if component . length_spec is None :
sdata , slen = simfd . read_data ( self . state . libc . buf_symbolic_bytes )
else :
sdata , slen = simfd . read_data ( component . length_spec )
for byte in sdata . chop ( 8 ) :
self . state . solver . add ( claripy . And ( * [ byte != char for char in self . SCANF_DELIMITERS ] ) )
self . state . memory . store ( args ( argnum ) , sdata , size = slen )
self . state . memory . store ( args ( argnum ) + slen , claripy . BVV ( 0 , 8 ) )
argnum += 1
elif component . spec_type == b'c' :
sdata , _ = simfd . read_data ( 1 , short_reads = False )
self . state . memory . store ( args ( argnum ) , sdata )
argnum += 1
else :
bits = component . size * 8
if component . spec_type == b'x' :
base = 16
elif component . spec_type == b'o' :
base = 8
else :
base = 10
# here ' s the variable representing the result of the parsing
target_variable = self . state . solver . BVS ( 'scanf_' + component . string . decode ( ) , bits , key = ( 'api' , 'scanf' , argnum - startpos , component . string ) )
negative = claripy . SLT ( target_variable , 0 )
# how many digits does it take to represent this variable fully ?
max_digits = int ( math . ceil ( math . log ( 2 ** bits , base ) ) )
# how many digits does the format specify ?
spec_digits = component . length_spec
# how many bits can we specify as input ?
available_bits = float ( 'inf' ) if spec_digits is None else spec_digits * math . log ( base , 2 )
not_enough_bits = available_bits < bits
# how many digits will we model this input as ?
digits = max_digits if spec_digits is None else spec_digits
# constrain target variable range explicitly if it can ' t take on all possible values
if not_enough_bits :
self . state . solver . add ( self . state . solver . And ( self . state . solver . SLE ( target_variable , ( base ** digits ) - 1 ) , self . state . solver . SGE ( target_variable , - ( base ** ( digits - 1 ) - 1 ) ) ) )
# perform the parsing in reverse - constrain the input digits to be the string version of the input
# this only works because we ' re reading from a packet stream and therefore nobody has the ability
# to add other constraints to this data !
# this makes z3 ' s job EXTREMELY easy
sdata , _ = simfd . read_data ( digits , short_reads = False )
for i , digit in enumerate ( reversed ( sdata . chop ( 8 ) ) ) :
digit_value = ( target_variable // ( base ** i ) ) % base
digit_ascii = digit_value + ord ( '0' )
if base > 10 :
digit_ascii = claripy . If ( digit_value >= 10 , digit_value + ( - 10 + ord ( 'a' ) ) , digit_ascii )
# if there aren ' t enough bits , we can increase the range by accounting for the possibility that
# the first digit is a minus sign
if not_enough_bits :
if i == digits - 1 :
neg_digit_ascii = ord ( '-' )
else :
neg_digit_value = ( - target_variable // ( base ** i ) ) % base
neg_digit_ascii = neg_digit_value + ord ( '0' )
if base > 10 :
neg_digit_ascii = claripy . If ( neg_digit_value >= 10 , neg_digit_value + ( - 10 + ord ( 'a' ) ) , neg_digit_ascii )
digit_ascii = claripy . If ( negative , neg_digit_ascii , digit_ascii )
self . state . solver . add ( digit == digit_ascii [ 7 : 0 ] )
self . state . memory . store ( args ( argnum ) , target_variable , endness = self . state . arch . memory_endness )
argnum += 1
return argnum - startpos
# TODO : we only support one format specifier in interpretation for now
format_specifier_count = sum ( 1 for x in self . components if isinstance ( x , FormatSpecifier ) )
if format_specifier_count > 1 :
l . warning ( "We don't support more than one format specifiers in format strings." )
if simfd is not None :
region = simfd . read_storage
addr = simfd . _pos if hasattr ( simfd , '_pos' ) else simfd . _read_pos
# XXX THIS IS BAD
else :
region = self . parser . state . memory
bits = self . parser . state . arch . bits
failed = self . parser . state . solver . BVV ( 0 , bits )
argpos = startpos
position = addr
for component in self . components :
if isinstance ( component , bytes ) : # TODO we skip non - format - specifiers in format string interpretation for now
# if the region doesn ' t match the concrete component , we need to return immediately
pass
else :
fmt_spec = component
try :
dest = args ( argpos )
except SimProcedureArgumentError :
dest = None
if fmt_spec . spec_type == b's' : # set some limits for the find
max_str_len = self . parser . state . libc . max_str_len
max_sym_bytes = self . parser . state . libc . buf_symbolic_bytes
# has the length of the format been limited by the string itself ?
if fmt_spec . length_spec is not None :
max_str_len = fmt_spec . length_spec
max_sym_bytes = fmt_spec . length_spec
# TODO : look for limits on other characters which scanf is sensitive to , ' \ x00 ' , ' \ x20'
ohr , ohc , ohi = region . find ( position , self . parser . state . solver . BVV ( b'\n' ) , max_str_len , max_symbolic_bytes = max_sym_bytes )
# if no newline is found , mm is position + max _ strlen
# If - branch will really only happen for format specifiers with a length
mm = self . parser . state . solver . If ( ohr == 0 , position + max_str_len , ohr )
# we ' re just going to concretize the length , load will do this anyways
length = self . parser . state . solver . max_int ( mm - position )
src_str = region . load ( position , length )
# TODO all of these should be delimiters we search for above
# add that the contents of the string cannot be any scanf % s string delimiters
for delimiter in set ( FormatString . SCANF_DELIMITERS ) :
delim_bvv = self . parser . state . solver . BVV ( delimiter )
for i in range ( length ) :
self . parser . state . add_constraints ( region . load ( position + i , 1 ) != delim_bvv )
# write it out to the pointer
self . parser . state . memory . store ( dest , src_str )
# store the terminating null byte
self . parser . state . memory . store ( dest + length , self . parser . state . solver . BVV ( 0 , 8 ) )
position += length
else : # XXX : atoi only supports strings of one byte
if fmt_spec . spec_type in [ b'd' , b'i' , b'u' , b'x' ] :
base = 16 if fmt_spec . spec_type == b'x' else 10
status , i , num_bytes = self . parser . _sim_atoi_inner ( position , region , base = base , read_length = fmt_spec . length_spec )
# increase failed count if we were unable to parse it
failed = self . parser . state . solver . If ( status , failed , failed + 1 )
position += num_bytes
elif fmt_spec . spec_type == b'c' :
i = region . load ( position , 1 )
i = i . zero_extend ( bits - 8 )
position += 1
else :
raise SimProcedureError ( "unsupported format spec '%s' in interpret" % fmt_spec . spec_type )
i = self . parser . state . solver . Extract ( fmt_spec . size * 8 - 1 , 0 , i )
self . parser . state . memory . store ( dest , i , size = fmt_spec . size , endness = self . parser . state . arch . memory_endness )
argpos += 1
if simfd is not None :
simfd . read_data ( position - addr )
return ( argpos - startpos ) - failed
|
def print_typedefs ( self , w = 0 , ** print3opts ) :
'''Print the types and dict tables .
* w = 0 * - - indentation for each line
* print3options * - - print options , as in Python 3.0'''
|
for k in _all_kinds : # XXX Python 3.0 doesn ' t sort type objects
t = [ ( self . _prepr ( a ) , v ) for a , v in _items ( _typedefs ) if v . kind == k and ( v . both or self . _code_ ) ]
if t :
self . _printf ( '%s%*d %s type%s: basicsize, itemsize, _len_(), _refs()' , linesep , w , len ( t ) , k , _plural ( len ( t ) ) , ** print3opts )
for a , v in _sorted ( t ) :
self . _printf ( '%*s %s: %s' , w , '' , a , v , ** print3opts )
# dict and dict - like classes
t = _sum ( [ len ( v ) for v in _values ( _dict_classes ) ] )
# [ ] for Python 2.2
if t :
self . _printf ( '%s%*d dict/-like classes:' , linesep , w , t , ** print3opts )
for m , v in _items ( _dict_classes ) :
self . _printf ( '%*s %s: %s' , w , '' , m , self . _prepr ( v ) , ** print3opts )
|
def ScheduleSystemCronJobs ( names = None ) :
"""Schedules all system cron jobs ."""
|
errors = [ ]
disabled_classes = config . CONFIG [ "Cron.disabled_cron_jobs" ]
for name in disabled_classes :
try :
cls = registry . SystemCronJobRegistry . CronJobClassByName ( name )
except ValueError :
errors . append ( "Cron job not found: %s." % name )
continue
if names is None :
names = iterkeys ( registry . SystemCronJobRegistry . SYSTEM_CRON_REGISTRY )
for name in names :
cls = registry . SystemCronJobRegistry . CronJobClassByName ( name )
enabled = cls . enabled and name not in disabled_classes
system = rdf_cronjobs . CronJobAction . ActionType . SYSTEM_CRON_ACTION
args = rdf_cronjobs . CronJobAction ( action_type = system , system_cron_action = rdf_cronjobs . SystemCronAction ( job_class_name = name ) )
job = rdf_cronjobs . CronJob ( cron_job_id = name , args = args , enabled = enabled , frequency = cls . frequency , lifetime = cls . lifetime , allow_overruns = cls . allow_overruns )
data_store . REL_DB . WriteCronJob ( job )
if errors :
raise ValueError ( "Error(s) while parsing Cron.disabled_cron_jobs: %s" % errors )
|
def write_packet ( self , payload ) :
"""Writes an entire " mysql packet " in its entirety to the network
addings its length and sequence number ."""
|
# Internal note : when you build packet manually and calls
# _ write _ bytes ( ) directly , you should set self . _ next _ seq _ id properly .
data = pack_int24 ( len ( payload ) ) + int2byte ( self . _next_seq_id ) + payload
self . _write_bytes ( data )
self . _next_seq_id = ( self . _next_seq_id + 1 ) % 256
|
def do_pp ( self , arg ) :
"""pp expression
Pretty - print the value of the expression ."""
|
obj = self . _getval ( arg )
try :
repr ( obj )
except Exception :
self . message ( bdb . safe_repr ( obj ) )
else :
self . message ( pprint . pformat ( obj ) )
|
def delete ( gandi , address , force ) :
"""Delete a domain mail forward ."""
|
source , domain = address
if not force :
proceed = click . confirm ( 'Are you sure to delete the domain ' 'mail forward %s@%s ?' % ( source , domain ) )
if not proceed :
return
result = gandi . forward . delete ( domain , source )
return result
|
def set_trace ( * args , ** kwargs ) :
"""Call pdb . set _ trace , making sure it receives the unwrapped stdout .
This is so we don ' t keep drawing progress bars over debugger output ."""
|
# There ' s no stream attr if capture plugin is enabled :
out = sys . stdout . stream if hasattr ( sys . stdout , 'stream' ) else None
# Python 2.5 can ' t put an explicit kwarg and * * kwargs in the same function
# call .
kwargs [ 'stdout' ] = out
debugger = pdb . Pdb ( * args , ** kwargs )
# Ordinarily ( and in a silly fashion ) , pdb refuses to use raw _ input ( ) if
# you pass it a stream on instantiation . Fix that :
debugger . use_rawinput = True
debugger . set_trace ( sys . _getframe ( ) . f_back )
|
def _compute_document_meta ( self ) :
"""Return documents meta information that can
be used for fast document lookups . Meta information
consists of documents titles , categories and positions
in file ."""
|
meta = OrderedDict ( )
bounds_iter = xml_utils . bounds ( self . filename , start_re = r'<text id="(\d+)"[^>]*name="([^"]*)"' , end_re = r'</text>' )
for match , bounds in bounds_iter :
doc_id , title = str ( match . group ( 1 ) ) , match . group ( 2 )
title = xml_utils . unescape_attribute ( title )
# cache categories
xml_data = xml_utils . load_chunk ( self . filename , bounds )
doc = Document ( compat . ElementTree . XML ( xml_data . encode ( 'utf8' ) ) )
meta [ doc_id ] = _DocumentMeta ( title , bounds , doc . categories ( ) )
return meta
|
def get_rounded ( self , digits ) :
"""Return a vector with the elements rounded to the given number of digits ."""
|
result = self . copy ( )
result . round ( digits )
return result
|
def prime_pairs ( n : int ) -> list :
"""This function finds all pairs of prime numbers x and y , such that :
1 . x and y are both less than or equal to n
2 . x + y equals n
3 . x and y are both prime numbers
The result is a 2D list of these pairs , sorted in increasing order of the first element in each pair .
If there are no such pairs , the function returns an empty list .
Args :
n ( int ) : The integer to find prime pairs for .
Returns :
list : A 2D list of prime pairs , sorted by the first element of each pair .
Each pair is a 2 - element list where the first element is less than or equal to the second element .
If there are no prime pairs , returns an empty list .
Example :
> > > prime _ pairs ( 10)
[ [ 3,7 ] , [ 5,5 ] ]
In this example , there are two prime pairs that satisfy the criteria . These pairs are [ 3,7 ] and [ 5,5 ] ,
and we return them in the sorted order as described in the problem statement ."""
|
primes = [ False , False ] + [ True ] * 999_999
for i in range ( 2 , 1001 ) : # < - - sieve construction
if primes [ i ] :
for j in range ( i + i , 1000_000 , i ) :
primes [ j ] = False
if n < 4 :
return [ ]
# < - - edge cases
if n % 2 or n == 4 :
return [ [ 2 , n - 2 ] ] if primes [ n - 2 ] else [ ]
return [ [ i , n - i ] for i in range ( 3 , ( n + 3 ) // 2 ) # < - - find pairs
if primes [ i ] and primes [ n - i ] ]
|
def _get_related_views_widgets ( self , item , orders = None , pages = None , page_sizes = None , widgets = None , ** args ) :
""": return :
Returns a dict with ' related _ views ' key with a list of
Model View widgets"""
|
widgets = widgets or { }
widgets [ "related_views" ] = [ ]
for view in self . _related_views :
if orders . get ( view . __class__ . __name__ ) :
order_column , order_direction = orders . get ( view . __class__ . __name__ )
else :
order_column , order_direction = "" , ""
widgets [ "related_views" ] . append ( self . _get_related_view_widget ( item , view , order_column , order_direction , page = pages . get ( view . __class__ . __name__ ) , page_size = page_sizes . get ( view . __class__ . __name__ ) , ) )
return widgets
|
def login ( username , password , catalogId = 'EE' ) :
"""This method requires SSL be used due to the sensitive nature of
users passwords . Upon a successful login , an API key will be
returned . This key will be active for one hour and should be
destroyed upon final use of the service by calling the logout
method . Users must have " Machine to Machine " access based on
a user - based role in the users profile .
: param username :
: param password :"""
|
payload = { "username" : username , "password" : password , "authType" : "" , "catalogId" : catalogId }
return json . dumps ( payload )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.