signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def dictionaries_walker ( dictionary , path = ( ) ) :
"""Defines a generator used to walk into nested dictionaries .
Usage : :
> > > nested _ dictionary = { " Level 1A " : { " Level 2A " : { " Level 3A " : " Higher Level " } } , " Level 1B " : " Lower level " }
> > > dictionaries _ walker ( nested _ dictionary )
< generator object dictionaries _ walker at 0x10131a320 >
> > > for value in dictionaries _ walker ( nested _ dictionary ) :
. . . print value
( ( ' Level 1A ' , ' Level 2A ' ) , ' Level 3A ' , ' Higher Level ' )
( ( ) , ' Level 1B ' , ' Lower level ' )
: param dictionary : Dictionary to walk .
: type dictionary : dict
: param path : Walked paths .
: type path : tuple
: return : Path , key , value .
: rtype : tuple
: note : This generator won ' t / can ' t yield any dictionaries , if you want to be able to retrieve dictionaries anyway ,
you will have to either encapsulate them in another object , or mutate their base class ."""
|
for key in dictionary :
if not isinstance ( dictionary [ key ] , dict ) :
yield path , key , dictionary [ key ]
else :
for value in dictionaries_walker ( dictionary [ key ] , path + ( key , ) ) :
yield value
|
def samples_multidimensional_uniform ( bounds , num_data ) :
'''Generates a multidimensional grid uniformly distributed .
: param bounds : tuple defining the box constraints .
: num _ data : number of data points to generate .'''
|
dim = len ( bounds )
Z_rand = np . zeros ( shape = ( num_data , dim ) )
for k in range ( 0 , dim ) :
Z_rand [ : , k ] = np . random . uniform ( low = bounds [ k ] [ 0 ] , high = bounds [ k ] [ 1 ] , size = num_data )
return Z_rand
|
def get_interface ( dev , bInterfaceNumber ) :
r"""Get the current alternate setting of the interface .
dev is the Device object to which the request will be
sent to ."""
|
bmRequestType = util . build_request_type ( util . CTRL_IN , util . CTRL_TYPE_STANDARD , util . CTRL_RECIPIENT_INTERFACE )
return dev . ctrl_transfer ( bmRequestType = bmRequestType , bRequest = 0x0a , wIndex = bInterfaceNumber , data_or_wLength = 1 ) [ 0 ]
|
def _ipopo_setup_field_callback ( cls , context ) : # type : ( type , FactoryContext ) - > None
"""Sets up the class _ field _ callback dictionary
: param cls : The class to handle
: param context : The factory class context"""
|
assert inspect . isclass ( cls )
assert isinstance ( context , FactoryContext )
if context . field_callbacks is not None :
callbacks = context . field_callbacks . copy ( )
else :
callbacks = { }
functions = inspect . getmembers ( cls , inspect . isroutine )
for name , func in functions :
if not hasattr ( func , constants . IPOPO_METHOD_FIELD_CALLBACKS ) : # No attribute , get the next member
continue
method_callbacks = getattr ( func , constants . IPOPO_METHOD_FIELD_CALLBACKS )
if not isinstance ( method_callbacks , list ) : # Invalid content
_logger . warning ( "Invalid attribute %s in %s" , constants . IPOPO_METHOD_FIELD_CALLBACKS , name , )
continue
# Keeping it allows inheritance : by removing it , only the first
# child will see the attribute - > Don ' t remove it
# Store the call backs
for kind , field , if_valid in method_callbacks :
fields_cbs = callbacks . setdefault ( field , { } )
if kind in fields_cbs and not is_from_parent ( cls , fields_cbs [ kind ] [ 0 ] . __name__ ) :
_logger . warning ( "Redefining the callback %s in '%s'. " "Previous callback : '%s' (%s). " "New callback : %s" , kind , name , fields_cbs [ kind ] [ 0 ] . __name__ , fields_cbs [ kind ] [ 0 ] , func , )
fields_cbs [ kind ] = ( func , if_valid )
# Update the factory context
context . field_callbacks . clear ( )
context . field_callbacks . update ( callbacks )
|
def handle_valid ( self , form = None , * args , ** kwargs ) :
"""Called after the form has validated ."""
|
# Take a chance and try save a subclass of a ModelForm .
if hasattr ( form , 'save' ) :
form . save ( )
# Also try and call handle _ valid method of the form itself .
if hasattr ( form , 'handle_valid' ) :
form . handle_valid ( * args , ** kwargs )
|
def bind_field ( self , form : DynamicForm , unbound_field : UnboundField , options : Dict [ Any , Any ] , ) -> Field :
"""Customize how fields are bound by stripping all whitespace .
: param form : The form
: param unbound _ field : The unbound field
: param options : The field options
: returns : The bound field"""
|
filters = unbound_field . kwargs . get ( 'filters' , [ ] )
filters . append ( lambda x : x . strip ( ) if isinstance ( x , str ) else x )
return unbound_field . bind ( form = form , filters = filters , ** options )
|
def run ( self ) :
"""Statistics logger job callback ."""
|
try :
proxy = config_ini . engine . open ( )
self . LOG . info ( "Stats for %s - up %s, %s" % ( config_ini . engine . engine_id , fmt . human_duration ( proxy . system . time ( ) - config_ini . engine . startup , 0 , 2 , True ) . strip ( ) , proxy ) )
except ( error . LoggableError , xmlrpc . ERRORS ) , exc :
self . LOG . warn ( str ( exc ) )
|
def from_ast_file ( cls , filename , index = None ) :
"""Create a TranslationUnit instance from a saved AST file .
A previously - saved AST file ( provided with - emit - ast or
TranslationUnit . save ( ) ) is loaded from the filename specified .
If the file cannot be loaded , a TranslationUnitLoadError will be
raised .
index is optional and is the Index instance to use . If not provided ,
a default Index will be created ."""
|
if index is None :
index = Index . create ( )
ptr = conf . lib . clang_createTranslationUnit ( index , filename )
if not ptr :
raise TranslationUnitLoadError ( filename )
return cls ( ptr = ptr , index = index )
|
def _get_cached_response ( self ) :
"""Returns a file object of the cached response ."""
|
if not self . _is_cached ( ) :
response = self . _download_response ( )
self . cache . set_xml ( self . _get_cache_key ( ) , response )
return self . cache . get_xml ( self . _get_cache_key ( ) )
|
def flatten ( xs : Union [ List , Tuple ] ) -> List :
"""Flatten a nested list or tuple ."""
|
return ( sum ( map ( flatten , xs ) , [ ] ) if ( isinstance ( xs , list ) or isinstance ( xs , tuple ) ) else [ xs ] )
|
def getNetworkSummary ( self , suid , verbose = None ) :
"""Returns summary of collection containing the specified network .
: param suid : Cytoscape Collection / Subnetwork SUID
: param verbose : print more
: returns : 200 : successful operation"""
|
surl = self . ___url
sv = surl . split ( '/' ) [ - 1 ]
surl = surl . rstrip ( sv + '/' )
response = api ( url = surl + '/cyndex2/' + sv + '/networks/' + str ( suid ) + '' , method = "GET" , verbose = verbose , parse_params = False )
return response
|
def Sample ( self , n ) :
"""Generates a random sample from this distribution .
n : int sample size"""
|
size = n ,
return numpy . random . beta ( self . alpha , self . beta , size )
|
def sample ( self , n_samples ) :
"""Generate specified ` n _ samples ` of new data from model . ` v ~ U [ 0,1 ] , v ~ C ^ - 1 ( u | v ) `
Args :
n _ samples : ` int ` , amount of samples to create .
Returns :
np . ndarray : Array of length ` n _ samples ` with generated data from the model ."""
|
if self . tau > 1 or self . tau < - 1 :
raise ValueError ( "The range for correlation measure is [-1,1]." )
v = np . random . uniform ( 0 , 1 , n_samples )
c = np . random . uniform ( 0 , 1 , n_samples )
u = self . percent_point ( c , v )
return np . column_stack ( ( u , v ) )
|
def _parse_thead_tbody_tfoot ( self , table_html ) :
"""Given a table , return parsed header , body , and foot .
Parameters
table _ html : node - like
Returns
tuple of ( header , body , footer ) , each a list of list - of - text rows .
Notes
Header and body are lists - of - lists . Top level list is a list of
rows . Each row is a list of str text .
Logic : Use < thead > , < tbody > , < tfoot > elements to identify
header , body , and footer , otherwise :
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are < th >
- Move rows from bottom of body to footer only if
all elements inside row are < th >"""
|
header_rows = self . _parse_thead_tr ( table_html )
body_rows = self . _parse_tbody_tr ( table_html )
footer_rows = self . _parse_tfoot_tr ( table_html )
def row_is_all_th ( row ) :
return all ( self . _equals_tag ( t , 'th' ) for t in self . _parse_td ( row ) )
if not header_rows : # The table has no < thead > . Move the top all - < th > rows from
# body _ rows to header _ rows . ( This is a common case because many
# tables in the wild have no < thead > or < tfoot >
while body_rows and row_is_all_th ( body_rows [ 0 ] ) :
header_rows . append ( body_rows . pop ( 0 ) )
header = self . _expand_colspan_rowspan ( header_rows )
body = self . _expand_colspan_rowspan ( body_rows )
footer = self . _expand_colspan_rowspan ( footer_rows )
return header , body , footer
|
def _set_user_password ( self , v , load = False ) :
"""Setter method for user _ password , mapped from YANG variable / username / user _ password ( user - passwd )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ user _ password is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ user _ password ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_dict = { 'length' : [ u'1..40' ] } ) , is_leaf = True , yang_name = "user-password" , rest_name = "password" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Password of the user' , u'alt-name' : u'password' } } , namespace = 'urn:brocade.com:mgmt:brocade-aaa' , defining_module = 'brocade-aaa' , yang_type = 'user-passwd' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """user_password must be of a type compatible with user-passwd""" , 'defined-type' : "brocade-aaa:user-passwd" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..40']}), is_leaf=True, yang_name="user-password", rest_name="password", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Password of the user', u'alt-name': u'password'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='user-passwd', is_config=True)""" , } )
self . __user_password = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_tape ( self ) :
"""Get content of tape ."""
|
result = ''
for i in range ( min ( self . tape ) , max ( self . tape ) + 1 ) :
symbol = self . tape [ i ] if self . tape [ i ] != self . EMPTY_SYMBOL else ' '
result += symbol
# Remove unnecessary empty symbols on tape
return result . strip ( )
|
def perlin ( self , key , ** kwargs ) :
"""Return perlin noise seede with the specified key .
For parameters , check the PerlinNoise class ."""
|
if hasattr ( key , "encode" ) :
key = key . encode ( 'ascii' )
value = zlib . adler32 ( key , self . seed )
return PerlinNoise ( value , ** kwargs )
|
def blk_1d ( blk , shape ) :
"""Iterate through the slices that recover a line .
This function is used by : func : ` blk _ nd ` as a base 1d case .
The last slice is returned even if is lesser than blk .
: param blk : the size of the block
: param shape : the size of the array
: return : a generator that yields the slices"""
|
maxpix , rem = blk_coverage_1d ( blk , shape )
for i in range ( 0 , maxpix , blk ) :
yield slice ( i , i + blk )
if rem != 0 :
yield slice ( maxpix , shape )
|
def flag ( self , key , env = None ) :
"""Feature flagging system
write flags to redis
$ dynaconf write redis - s DASHBOARD = 1 - e premiumuser
meaning : Any premium user has DASHBOARD feature enabled
In your program do : :
# premium user has access to dashboard ?
> > > if settings . flag ( ' dashboard ' , ' premiumuser ' ) :
. . . activate _ dashboard ( )
The value is ensured to be loaded fresh from redis server
It also works with file settings but the recommended is redis
as the data can be loaded once it is updated .
: param key : The flag name
: param env : The env to look for"""
|
env = env or self . ENVVAR_PREFIX_FOR_DYNACONF or "DYNACONF"
with self . using_env ( env ) :
value = self . get_fresh ( key )
return value is True or value in true_values
|
def face_angles_sparse ( mesh ) :
"""A sparse matrix representation of the face angles .
Returns
sparse : scipy . sparse . coo _ matrix with :
dtype : float
shape : ( len ( mesh . vertices ) , len ( mesh . faces ) )"""
|
matrix = coo_matrix ( ( mesh . face_angles . flatten ( ) , ( mesh . faces_sparse . row , mesh . faces_sparse . col ) ) , mesh . faces_sparse . shape )
return matrix
|
def modifyPdpContextRequest ( ) :
"""MODIFY PDP CONTEXT REQUEST Section 9.5.6"""
|
a = TpPd ( pd = 0x8 )
b = MessageType ( mesType = 0x48 )
# 01001000
c = RadioPriorityAndSpareHalfOctets ( )
d = LlcServiceAccessPointIdentifier ( )
e = QualityOfService ( )
packet = a / b / c / d / e
return packet
|
def parse_text ( infile , xpath = None , filter_words = None , attributes = None ) :
"""Filter text using XPath , regex keywords , and tag attributes .
Keyword arguments :
infile - - HTML or text content to parse ( list )
xpath - - an XPath expression ( str )
filter _ words - - regex keywords ( list )
attributes - - HTML tag attributes ( list )
Return a list of strings of text ."""
|
infiles = [ ]
text = [ ]
if xpath is not None :
infile = parse_html ( infile , xpath )
if isinstance ( infile , list ) :
if isinstance ( infile [ 0 ] , lh . HtmlElement ) :
infiles = list ( infile )
else :
text = [ line + '\n' for line in infile ]
elif isinstance ( infile , lh . HtmlElement ) :
infiles = [ infile ]
else :
text = [ infile ]
else :
infiles = [ infile ]
if attributes is not None :
attributes = [ clean_attr ( x ) for x in attributes ]
attributes = [ x for x in attributes if x ]
else :
attributes = [ 'text()' ]
if not text :
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes :
for infile in infiles :
if isinstance ( infile , lh . HtmlElement ) :
new_text = infile . xpath ( '{0}/{1}' . format ( text_xpath , attr ) )
else : # re . split preserves delimiters place in the list
new_text = [ x for x in re . split ( '(\n)' , infile ) if x ]
text += new_text
if filter_words is not None :
text = re_filter ( text , filter_words )
return [ '' . join ( x for x in line if x in string . printable ) for line in remove_whitespace ( text ) if line ]
|
def update ( self , jump ) :
"""Update the lattice state by accepting a specific jump
Args :
jump ( Jump ) : The jump that has been accepted .
Returns :
None ."""
|
atom = jump . initial_site . atom
dr = jump . dr ( self . cell_lengths )
# print ( " atom { } jumped from site { } to site { } " . format ( atom . number , jump . initial _ site . number , jump . final _ site . number ) )
jump . final_site . occupation = atom . number
jump . final_site . atom = atom
jump . final_site . is_occupied = True
jump . initial_site . occupation = 0
jump . initial_site . atom = None
jump . initial_site . is_occupied = False
# TODO : updating atom counters could be contained in an atom . move _ to ( site ) method
atom . site = jump . final_site
atom . number_of_hops += 1
atom . dr += dr
atom . summed_dr2 += np . dot ( dr , dr )
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : RatePlanContext for this RatePlanInstance
: rtype : twilio . rest . preview . wireless . rate _ plan . RatePlanContext"""
|
if self . _context is None :
self . _context = RatePlanContext ( self . _version , sid = self . _solution [ 'sid' ] , )
return self . _context
|
def from_dict ( cls , d ) :
"""Create an instance from a dictionary ."""
|
instance = super ( Simulation , cls ) . from_dict ( d )
# The instance ' s input _ files and cmd _ line _ args members still point to data structures in the original
# dictionary . Copy them to avoid surprises if they are changed in the original dictionary .
instance . input_files = dict ( instance . input_files )
instance . cmd_line_args = list ( instance . cmd_line_args )
return instance
|
def remove_term ( self , t ) :
"""Only removes top - level terms . Child terms can be removed at the parent ."""
|
try :
self . terms . remove ( t )
except ValueError :
pass
if t . section and t . parent_term_lc == 'root' :
t . section = self . add_section ( t . section )
t . section . remove_term ( t , remove_from_doc = False )
if t . parent :
try :
t . parent . remove_child ( t )
except ValueError :
pass
|
def get_post_reference_section_keyword_patterns ( ) :
"""Return a list of compiled regex patterns used to search for various
keywords that can often be found after , and therefore suggest the end of ,
a reference section in a full - text document .
@ return : ( list ) of compiled regex patterns ."""
|
compiled_patterns = [ ]
patterns = [ u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'prepared' ) + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'created' ) + ur').*(AAS\s*)?\sLATEX' , ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'macros' ) + u'v' , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'This paper has been produced using' ) , ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters ( u'This article was processed by the author using Springer-Verlag' ) + u' LATEX' ]
for p in patterns :
compiled_patterns . append ( re . compile ( p , re . I | re . UNICODE ) )
return compiled_patterns
|
def xarray_derivative_wrap ( func ) :
"""Decorate the derivative functions to make them work nicely with DataArrays .
This will automatically determine if the coordinates can be pulled directly from the
DataArray , or if a call to lat _ lon _ grid _ deltas is needed ."""
|
@ functools . wraps ( func )
def wrapper ( f , ** kwargs ) :
if 'x' in kwargs or 'delta' in kwargs : # Use the usual DataArray to pint . Quantity preprocessing wrapper
return preprocess_xarray ( func ) ( f , ** kwargs )
elif isinstance ( f , xr . DataArray ) : # Get axis argument , defaulting to first dimension
axis = f . metpy . find_axis_name ( kwargs . get ( 'axis' , 0 ) )
# Initialize new kwargs with the axis number
new_kwargs = { 'axis' : f . get_axis_num ( axis ) }
if f [ axis ] . attrs . get ( '_metpy_axis' ) == 'T' : # Time coordinate , need to convert to seconds from datetimes
new_kwargs [ 'x' ] = f [ axis ] . metpy . as_timestamp ( ) . metpy . unit_array
elif CFConventionHandler . check_axis ( f [ axis ] , 'lon' ) : # Longitude coordinate , need to get grid deltas
new_kwargs [ 'delta' ] , _ = grid_deltas_from_dataarray ( f )
elif CFConventionHandler . check_axis ( f [ axis ] , 'lat' ) : # Latitude coordinate , need to get grid deltas
_ , new_kwargs [ 'delta' ] = grid_deltas_from_dataarray ( f )
else : # General coordinate , use as is
new_kwargs [ 'x' ] = f [ axis ] . metpy . unit_array
# Calculate and return result as a DataArray
result = func ( f . metpy . unit_array , ** new_kwargs )
return xr . DataArray ( result . magnitude , coords = f . coords , dims = f . dims , attrs = { 'units' : str ( result . units ) } )
else : # Error
raise ValueError ( 'Must specify either "x" or "delta" for value positions when "f" ' 'is not a DataArray.' )
return wrapper
|
def validate ( self , signature ) :
'''Check if ofiles and ifiles match signatures recorded in md5file'''
|
if not signature :
return 'Empty signature'
sig_files = self . input_files . _targets + self . output_files . _targets + self . dependent_files . _targets
for x in sig_files :
if not x . target_exists ( 'any' ) :
return f'Missing target {x}'
files_checked = { x . target_name ( ) : False for x in sig_files }
res = { 'input' : [ ] , 'output' : [ ] , 'depends' : [ ] , 'vars' : { } }
cur_type = 'input'
# old signature
if 'init_context' in signature :
for key , value in signature [ 'init_context' ] . items ( ) :
if key not in env . sos_dict :
return f'Variable {key} not in running environment'
try :
if env . sos_dict [ key ] != value :
return f'Context variable {key} value mismatch: {short_repr(value)} saved, {short_repr(env.sos_dict[key])} current'
except Exception as e :
env . logger . debug ( f"Variable {key} of type {type(value).__name__} cannot be compared: {e}" )
elif 'init_context_sig' in signature :
for key , value in signature [ 'init_context_sig' ] . items ( ) :
if key not in env . sos_dict :
return f'Variable {key} not in running environment'
try :
if objectMD5 ( env . sos_dict [ key ] ) != value :
return f'ID of context variable {key} ({objectMD5(env.sos_dict[key])}) mismatch: {short_repr(env.sos_dict[key])} does not match id {value}'
except Exception as e :
env . logger . debug ( f"Variable {key} cannot be compared: {e}" )
res [ 'vars' ] . update ( signature [ 'end_context' ] )
for cur_type in [ 'input' , 'output' , 'depends' ] :
for f , m in signature [ cur_type ] . items ( ) :
try :
if '(' in f and ')' in f : # this part is hard , because this can be a customized target .
target_type = f . split ( '(' ) [ 0 ]
target_class = None
if target_type in globals ( ) :
target_class = eval ( target_type )
else : # check registry
for entrypoint in pkg_resources . iter_entry_points ( group = 'sos_targets' ) :
if entrypoint . name . strip ( ) == target_type :
target_class = entrypoint . load ( )
break
if target_class is None :
raise ValueError ( f'Failed to identify target class {target_type}' )
# parameter of class ?
freal = eval ( f , { target_type : target_class } )
else :
freal = file_target ( f )
if not freal . validate ( m ) :
return f'Target {f} does not exist or does not match saved signature {m}'
res [ cur_type ] . append ( freal . target_name ( ) if isinstance ( freal , file_target ) else freal )
files_checked [ freal . target_name ( ) ] = True
except Exception as e :
env . logger . debug ( f'Wrong md5 in signature: {e}' )
if not all ( files_checked . values ( ) ) :
return f'No MD5 signature for {", ".join(x for x,y in files_checked.items() if not y)}'
if 'input_obj' in signature : # for new style signature , the entire objects are kept
res [ 'input' ] = signature [ 'input_obj' ]
res [ 'depends' ] = signature [ 'depends_obj' ]
res [ 'output' ] = signature [ 'output_obj' ]
return res
|
def process_additional_minidisks ( self , userid , disk_info ) :
'''Generate and punch the scripts used to process additional disk into
target vm ' s reader .'''
|
for idx , disk in enumerate ( disk_info ) :
vdev = disk . get ( 'vdev' ) or self . generate_disk_vdev ( offset = ( idx + 1 ) )
fmt = disk . get ( 'format' )
mount_dir = disk . get ( 'mntdir' ) or '' . join ( [ '/mnt/ephemeral' , str ( vdev ) ] )
disk_parms = self . _generate_disk_parmline ( vdev , fmt , mount_dir )
func_name = '/var/lib/zvmsdk/setupDisk'
self . aemod_handler ( userid , func_name , disk_parms )
# trigger do - script
if self . get_power_state ( userid ) == 'on' :
self . execute_cmd ( userid , "/usr/bin/zvmguestconfigure start" )
|
def sample_labels ( self , n ) :
"""Returns a set of n labels sampled from the labels of the tree
: param n : Number of labels to sample
: return : set of randomly sampled labels"""
|
if n >= len ( self ) :
return self . labels
sample = random . sample ( self . labels , n )
return set ( sample )
|
def coarseMaximum ( arr , shape ) :
'''return an array of [ shape ]
where every cell equals the localised maximum of the given array [ arr ]
at the same ( scalled ) position'''
|
ss0 , ss1 = shape
s0 , s1 = arr . shape
pos0 = linspace2 ( 0 , s0 , ss0 , dtype = int )
pos1 = linspace2 ( 0 , s1 , ss1 , dtype = int )
k0 = pos0 [ 0 ]
k1 = pos1 [ 0 ]
out = np . empty ( shape , dtype = arr . dtype )
_calc ( arr , out , pos0 , pos1 , k0 , k1 , ss0 , ss1 )
return out
|
def _mine ( self , load , skip_verify = False ) :
'''Return the mine data'''
|
if not skip_verify :
if 'id' not in load or 'data' not in load :
return False
if self . opts . get ( 'minion_data_cache' , False ) or self . opts . get ( 'enforce_mine_cache' , False ) :
cbank = 'minions/{0}' . format ( load [ 'id' ] )
ckey = 'mine'
if not load . get ( 'clear' , False ) :
data = self . cache . fetch ( cbank , ckey )
if isinstance ( data , dict ) :
data . update ( load [ 'data' ] )
load [ 'data' ] = data
self . cache . store ( cbank , ckey , load [ 'data' ] )
return True
|
def chrome_tracing_object_transfer_dump ( self , filename = None ) :
"""Return a list of transfer events that can viewed as a timeline .
To view this information as a timeline , simply dump it as a json file
by passing in " filename " or using using json . dump , and then load go to
chrome : / / tracing in the Chrome web browser and load the dumped file .
Make sure to enable " Flow events " in the " View Options " menu .
Args :
filename : If a filename is provided , the timeline is dumped to that
file .
Returns :
If filename is not provided , this returns a list of profiling
events . Each profile event is a dictionary ."""
|
client_id_to_address = { }
for client_info in ray . global_state . client_table ( ) :
client_id_to_address [ client_info [ "ClientID" ] ] = "{}:{}" . format ( client_info [ "NodeManagerAddress" ] , client_info [ "ObjectManagerPort" ] )
all_events = [ ]
for key , items in self . profile_table ( ) . items ( ) : # Only consider object manager events .
if items [ 0 ] [ "component_type" ] != "object_manager" :
continue
for event in items :
if event [ "event_type" ] == "transfer_send" :
object_id , remote_client_id , _ , _ = event [ "extra_data" ]
elif event [ "event_type" ] == "transfer_receive" :
object_id , remote_client_id , _ , _ = event [ "extra_data" ]
elif event [ "event_type" ] == "receive_pull_request" :
object_id , remote_client_id = event [ "extra_data" ]
else :
assert False , "This should be unreachable."
# Choose a color by reading the first couple of hex digits of
# the object ID as an integer and turning that into a color .
object_id_int = int ( object_id [ : 2 ] , 16 )
color = self . _chrome_tracing_colors [ object_id_int % len ( self . _chrome_tracing_colors ) ]
new_event = { # The category of the event .
"cat" : event [ "event_type" ] , # The string displayed on the event .
"name" : event [ "event_type" ] , # The identifier for the group of rows that the event
# appears in .
"pid" : client_id_to_address [ key ] , # The identifier for the row that the event appears in .
"tid" : client_id_to_address [ remote_client_id ] , # The start time in microseconds .
"ts" : self . _seconds_to_microseconds ( event [ "start_time" ] ) , # The duration in microseconds .
"dur" : self . _seconds_to_microseconds ( event [ "end_time" ] - event [ "start_time" ] ) , # What is this ?
"ph" : "X" , # This is the name of the color to display the box in .
"cname" : color , # The extra user - defined data .
"args" : event [ "extra_data" ] , }
all_events . append ( new_event )
# Add another box with a color indicating whether it was a send
# or a receive event .
if event [ "event_type" ] == "transfer_send" :
additional_event = new_event . copy ( )
additional_event [ "cname" ] = "black"
all_events . append ( additional_event )
elif event [ "event_type" ] == "transfer_receive" :
additional_event = new_event . copy ( )
additional_event [ "cname" ] = "grey"
all_events . append ( additional_event )
else :
pass
if filename is not None :
with open ( filename , "w" ) as outfile :
json . dump ( all_events , outfile )
else :
return all_events
|
def add_colorbar ( self , * args , ** kwargs ) :
"""DEPRECATED , use ` Plot . colorbar ` instead"""
|
warnings . warn ( "{0}.add_colorbar was renamed {0}.colorbar, this warnings will " "result in an error in the future" . format ( type ( self ) . __name__ ) , DeprecationWarning )
return self . colorbar ( * args , ** kwargs )
|
def napi_and ( values , ** kwargs ) :
"""Perform element - wise logical * and * operation on arrays .
If * values * contains a non - array object with truth _ value * * False * * , the
outcome will be an array of * * False * * \ s with suitable shape without arrays
being evaluated . Non - array objects with truth value * * True * * are omitted .
If array shapes do not match ( after squeezing when enabled by user ) ,
: exc : ` ValueError ` is raised .
This function uses : obj : ` numpy . logical _ and ` or : obj : ` numpy . all ` ."""
|
arrays = [ ]
result = None
shapes = set ( )
for value in values :
if isinstance ( value , ndarray ) and value . shape :
arrays . append ( value )
shapes . add ( value . shape )
elif not value :
result = value
if len ( shapes ) > 1 and kwargs . get ( 'sq' , kwargs . get ( 'squeeze' , False ) ) :
shapes . clear ( )
for i , a in enumerate ( arrays ) :
a = arrays [ i ] = a . squeeze ( )
shapes . add ( a . shape )
if len ( shapes ) > 1 :
raise ValueError ( 'array shape mismatch, even after squeezing' )
if len ( shapes ) > 1 :
raise ValueError ( 'array shape mismatch' )
shape = shapes . pop ( ) if shapes else None
if result is not None :
if shape :
return numpy . zeros ( shape , bool )
else :
return result
elif arrays :
sc = kwargs . get ( 'sc' , kwargs . get ( 'shortcircuit' , 0 ) )
if sc and numpy . prod ( shape ) >= sc :
return short_circuit_and ( arrays , shape )
elif len ( arrays ) == 2 :
return numpy . logical_and ( * arrays )
else :
return numpy . all ( arrays , 0 )
else :
return value
|
def to_UNIXtime ( timeobject ) :
"""Returns the UNIXtime corresponding to the time value conveyed by the
specified object , which can be either a UNIXtime , a
` ` datetime . datetime ` ` object or an ISO8601 - formatted string in the format
` YYYY - MM - DD HH : MM : SS + 00 ` ` .
: param timeobject : the object conveying the time value
: type timeobject : int , ` ` datetime . datetime ` ` or ISO8601 - formatted
string
: returns : an int UNIXtime
: raises : * TypeError * when bad argument types are provided , * ValueError *
when negative UNIXtimes are provided"""
|
if isinstance ( timeobject , int ) :
if timeobject < 0 :
raise ValueError ( "The time value is a negative number" )
return timeobject
elif isinstance ( timeobject , datetime ) :
return _datetime_to_UNIXtime ( timeobject )
elif isinstance ( timeobject , str ) :
return _ISO8601_to_UNIXtime ( timeobject )
else :
raise TypeError ( 'The time value must be expressed either by an int ' 'UNIX time, a datetime.datetime object or an ' 'ISO8601-formatted string' )
|
def delNode ( self , address ) :
"""Delete a node from the NodeServer
: param node : Dictionary of node settings . Keys : address , name , node _ def _ id , primary , and drivers are required ."""
|
LOGGER . info ( 'Removing node {}' . format ( address ) )
message = { 'removenode' : { 'address' : address } }
self . send ( message )
|
def add ( self , tagname , tagvalue ) :
""": returns : numeric index associated to the tag"""
|
dic = getattr ( self , tagname + '_idx' )
try :
return dic [ tagvalue ]
except KeyError :
dic [ tagvalue ] = idx = len ( dic )
getattr ( self , tagname ) . append ( tagvalue )
if idx > TWO16 :
raise InvalidFile ( 'contains more then %d tags' % TWO16 )
return idx
|
def L1 ( layer = "input" , constant = 0 , batch = None ) :
"""L1 norm of layer . Generally used as penalty ."""
|
if batch is None :
return lambda T : tf . reduce_sum ( tf . abs ( T ( layer ) - constant ) )
else :
return lambda T : tf . reduce_sum ( tf . abs ( T ( layer ) [ batch ] - constant ) )
|
def InitNornir ( config_file : str = "" , dry_run : bool = False , configure_logging : Optional [ bool ] = None , ** kwargs : Dict [ str , Any ] , ) -> Nornir :
"""Arguments :
config _ file ( str ) : Path to the configuration file ( optional )
dry _ run ( bool ) : Whether to simulate changes or not
configure _ logging : Whether to configure logging or not . This argument is being
deprecated . Please use logging . enabled parameter in the configuration
instead .
* * kwargs : Extra information to pass to the
: obj : ` nornir . core . configuration . Config ` object
Returns :
: obj : ` nornir . core . Nornir ` : fully instantiated and configured"""
|
register_default_connection_plugins ( )
if callable ( kwargs . get ( "inventory" , { } ) . get ( "plugin" , "" ) ) :
kwargs [ "inventory" ] [ "plugin" ] = cls_to_string ( kwargs [ "inventory" ] [ "plugin" ] )
if callable ( kwargs . get ( "inventory" , { } ) . get ( "transform_function" , "" ) ) :
kwargs [ "inventory" ] [ "transform_function" ] = cls_to_string ( kwargs [ "inventory" ] [ "transform_function" ] )
conf = Config . load_from_file ( config_file , ** kwargs )
data = GlobalState ( dry_run = dry_run )
if configure_logging is not None :
msg = ( "'configure_logging' argument is deprecated, please use " "'logging.enabled' parameter in the configuration instead: " "https://nornir.readthedocs.io/en/stable/configuration/index.html" )
warnings . warn ( msg , DeprecationWarning )
if conf . logging . enabled is None :
if configure_logging is not None :
conf . logging . enabled = configure_logging
else :
conf . logging . enabled = True
conf . logging . configure ( )
inv = conf . inventory . plugin . deserialize ( transform_function = conf . inventory . transform_function , transform_function_options = conf . inventory . transform_function_options , config = conf , ** conf . inventory . options , )
return Nornir ( inventory = inv , config = conf , data = data )
|
def validate_collxml ( * content_filepaths ) :
"""Validates the given COLLXML file against the collxml - jing . rng RNG ."""
|
content_filepaths = [ Path ( path ) . resolve ( ) for path in content_filepaths ]
return jing ( COLLXML_JING_RNG , * content_filepaths )
|
def destroy ( cls , * ids ) :
"""Delete the records with the given ids
: type ids : list
: param ids : primary key ids of records"""
|
for pk in ids :
cls . find ( pk ) . delete ( )
cls . session . flush ( )
|
def nation ( self , nation_name , password = None , autologin = None ) :
"""Setup access to the Nation API with the Nation object
: param nation _ name : Name of the nation
: param password : ( Optional ) password for this nation
: param autologin ( Optional ) autologin for this nation
: type nation _ name : str
: type password : str
: type autologin : str
: returns : Nation Object based off nation _ name
: rtype : Nation"""
|
return Nation ( nation_name , self , password = password , autologin = autologin )
|
def getAutoServiceLevelEnabled ( self ) :
"""Returns True if enabled , False if disabled"""
|
command = '$GE'
settings = self . sendCommand ( command )
flags = int ( settings [ 2 ] , 16 )
return not ( flags & 0x0020 )
|
def parse ( ) :
"""Parse command line options"""
|
parser = argparse . ArgumentParser ( description = 'Dynamic DynamoDB - Auto provisioning AWS DynamoDB' )
parser . add_argument ( '-c' , '--config' , help = 'Read configuration from a configuration file' )
parser . add_argument ( '--dry-run' , action = 'store_true' , help = 'Run without making any changes to your DynamoDB table' )
parser . add_argument ( '--run-once' , action = 'store_true' , help = 'Run once and then exit Dynamic DynamoDB, instead of looping' )
parser . add_argument ( '--show-config' , action = 'store_true' , help = 'Parse config files, print parsed data and then exit Dynamic DynamoDB' )
parser . add_argument ( '--check-interval' , type = int , help = """How many seconds should we wait between
the checks (default: 300)""" )
parser . add_argument ( '--log-file' , help = 'Send output to the given log file' )
parser . add_argument ( '--log-level' , choices = [ 'debug' , 'info' , 'warning' , 'error' ] , help = 'Log level to use (default: info)' )
parser . add_argument ( '--log-config-file' , help = ( 'Use a custom Python logging configuration file. Overrides both ' '--log-level and --log-file.' ) )
parser . add_argument ( '--version' , action = 'store_true' , help = 'Print current version number' )
parser . add_argument ( '--aws-access-key-id' , help = "Override Boto configuration with the following AWS access key" )
parser . add_argument ( '--aws-secret-access-key' , help = "Override Boto configuration with the following AWS secret key" )
daemon_ag = parser . add_argument_group ( 'Daemon options' )
daemon_ag . add_argument ( '--daemon' , help = ( 'Run Dynamic DynamoDB in daemon mode. Valid modes are ' '[start|stop|restart|foreground]' ) )
daemon_ag . add_argument ( '--instance' , default = 'default' , help = ( 'Name of the Dynamic DynamoDB instance. ' 'Used to run multiple instances of Dynamic DynamoDB. ' 'Give each instance a unique name and control them separately ' 'with the --daemon flag. (default: default)' ) )
daemon_ag . add_argument ( '--pid-file-dir' , default = '/tmp' , help = 'Directory where pid file is located in. Defaults to /tmp' )
dynamodb_ag = parser . add_argument_group ( 'DynamoDB options' )
dynamodb_ag . add_argument ( '-r' , '--region' , help = 'AWS region to operate in (default: us-east-1' )
dynamodb_ag . add_argument ( '-t' , '--table-name' , help = ( 'Table(s) to target. ' 'The name is treated as a regular expression. ' 'E.g. "^my_table.*$" or "my_table"' ) )
r_scaling_ag = parser . add_argument_group ( 'Read units scaling properties' )
r_scaling_ag . add_argument ( '--reads-upper-threshold' , type = int , help = """Scale up the reads with --increase-reads-with if
the currently consumed read units reaches this many
percent (default: 90)""" )
r_scaling_ag . add_argument ( '--throttled-reads-upper-threshold' , type = int , help = """Scale up the reads with --increase-reads-with if
the count of throttled read events exceeds this
count (default: 0)""" )
r_scaling_ag . add_argument ( '--reads-lower-threshold' , type = int , help = """Scale down the reads with --decrease-reads-with if the
currently consumed read units is as low as this
percentage (default: 30)""" )
r_scaling_ag . add_argument ( '--increase-reads-with' , type = int , help = """How much should we increase the read units with?
(default: 50, max: 100 if --increase-reads-unit = percent)""" )
r_scaling_ag . add_argument ( '--decrease-reads-with' , type = int , help = """How much should we decrease the read units with?
(default: 50)""" )
r_scaling_ag . add_argument ( '--increase-reads-unit' , type = str , help = 'Do you want to scale in percent or units? (default: percent)' )
r_scaling_ag . add_argument ( '--decrease-reads-unit' , type = str , help = 'Do you want to scale in percent or units? (default: percent)' )
r_scaling_ag . add_argument ( '--min-provisioned-reads' , type = int , help = """Minimum number of provisioned reads""" )
r_scaling_ag . add_argument ( '--max-provisioned-reads' , type = int , help = """Maximum number of provisioned reads""" )
r_scaling_ag . add_argument ( '--num-read-checks-before-scale-down' , type = int , help = """Number of consecutive checks that must meet criteria
before a scale down event occurs""" )
r_scaling_ag . add_argument ( '--num-read-checks-reset-percent' , type = int , help = """Percentage Value that will cause the num_read_checks_before
scale_down var to reset back to 0""" )
w_scaling_ag = parser . add_argument_group ( 'Write units scaling properties' )
w_scaling_ag . add_argument ( '--writes-upper-threshold' , type = int , help = """Scale up the writes with --increase-writes-with
if the currently consumed write units reaches this
many percent (default: 90)""" )
w_scaling_ag . add_argument ( '--throttled-writes-upper-threshold' , type = int , help = """Scale up the reads with --increase-writes-with if
the count of throttled write events exceeds this
count (default: 0)""" )
w_scaling_ag . add_argument ( '--writes-lower-threshold' , type = int , help = """Scale down the writes with --decrease-writes-with
if the currently consumed write units is as low as this
percentage (default: 30)""" )
w_scaling_ag . add_argument ( '--increase-writes-with' , type = int , help = """How much should we increase the write units with?
(default: 50,
max: 100 if --increase-writes-unit = 'percent')""" )
w_scaling_ag . add_argument ( '--decrease-writes-with' , type = int , help = """How much should we decrease the write units with?
(default: 50)""" )
w_scaling_ag . add_argument ( '--increase-writes-unit' , type = str , help = 'Do you want to scale in percent or units? (default: percent)' )
w_scaling_ag . add_argument ( '--decrease-writes-unit' , type = str , help = 'Do you want to scale in percent or units? (default: percent)' )
w_scaling_ag . add_argument ( '--min-provisioned-writes' , type = int , help = """Minimum number of provisioned writes""" )
w_scaling_ag . add_argument ( '--max-provisioned-writes' , type = int , help = """Maximum number of provisioned writes""" )
w_scaling_ag . add_argument ( '--num-write-checks-before-scale-down' , type = int , help = """Number of consecutive checks that must meet criteria
before a scale down event occurs""" )
w_scaling_ag . add_argument ( '--num-write-checks-reset-percent' , type = int , help = """Percentage Value that will cause the num_write_checks_before
scale_down var to reset back to 0""" )
args = parser . parse_args ( )
# Print the version and quit
if args . version : # Read the dynamic - dynamodb . conf configuration file
internal_config_file = ConfigParser . RawConfigParser ( )
internal_config_file . optionxform = lambda option : option
internal_config_file . read ( os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , '../dynamic-dynamodb.conf' ) ) )
print 'Dynamic DynamoDB version: {0}' . format ( internal_config_file . get ( 'general' , 'version' ) )
sys . exit ( 0 )
# Replace any new values in the configuration
configuration = { }
for arg in args . __dict__ :
if args . __dict__ . get ( arg ) is not None :
configuration [ arg ] = args . __dict__ . get ( arg )
return configuration
|
def rename_dupe_cols ( cols ) :
"""Takes a list of strings and appends 2,3,4 etc to duplicates . Never
appends a 0 or 1 . Appended # s are not always in order . . . but if you wrap
this in a dataframe . to _ sql function you ' re guaranteed to not have dupe
column name errors importing data to SQL . . . you ' ll just have to check
yourself to see which fields were renamed ."""
|
counts = { }
positions = { pos : fld for pos , fld in enumerate ( cols ) }
for c in cols :
if c in counts . keys ( ) :
counts [ c ] += 1
else :
counts [ c ] = 1
fixed_cols = { }
for pos , col in positions . items ( ) :
if counts [ col ] > 1 :
fix_cols = { pos : fld for pos , fld in positions . items ( ) if fld == col }
keys = [ p for p in fix_cols . keys ( ) ]
min_pos = min ( keys )
cnt = 1
for p , c in fix_cols . items ( ) :
if not p == min_pos :
cnt += 1
c = c + str ( cnt )
fixed_cols . update ( { p : c } )
positions . update ( fixed_cols )
cols = [ x for x in positions . values ( ) ]
return cols
|
def remove_task ( cls , task ) :
""": param Task | callable task : Remove ' task ' from the list of tasks to run periodically"""
|
with cls . _lock :
if not isinstance ( task , Task ) :
task = cls . resolved_task ( task )
if task :
cls . tasks . remove ( task )
cls . tasks . sort ( )
|
def _set_ldp_fec_vcs ( self , v , load = False ) :
"""Setter method for ldp _ fec _ vcs , mapped from YANG variable / mpls _ state / ldp / fec / ldp _ fec _ vcs ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ldp _ fec _ vcs is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ldp _ fec _ vcs ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = ldp_fec_vcs . ldp_fec_vcs , is_container = 'container' , presence = False , yang_name = "ldp-fec-vcs" , rest_name = "ldp-fec-vcs" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'mpls-ldp-fec-vcs' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls-operational' , defining_module = 'brocade-mpls-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ldp_fec_vcs must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=ldp_fec_vcs.ldp_fec_vcs, is_container='container', presence=False, yang_name="ldp-fec-vcs", rest_name="ldp-fec-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-vcs', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""" , } )
self . __ldp_fec_vcs = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def OnPasteAs ( self , event ) :
"""Clipboard paste as event handler"""
|
data = self . main_window . clipboard . get_clipboard ( )
key = self . main_window . grid . actions . cursor
with undo . group ( _ ( "Paste As..." ) ) :
self . main_window . actions . paste_as ( key , data )
self . main_window . grid . ForceRefresh ( )
event . Skip ( )
|
def cardinal_direction ( self ) :
"""Returns the cardinal direction of the
wind as a string . Possible returned values are N , E , S , W , and None .
315 degrees to 45 degrees exclusive - > N
45 degrees to 135 degrees exclusive - > E
135 degrees to 225 degrees exclusive - > S
225 degrees to 315 degrees exclusive - > W
None if no direction found ."""
|
if self . direction is None :
return None
if self . direction > 360 or self . direction < 0 :
raise Exception ( 'Direction out of range' )
if ( 315 <= self . direction ) <= 360 or 0 <= ( self . direction ) < 45 :
return 'N'
elif 45 <= self . direction < 135 :
return 'E'
elif 135 <= self . direction < 225 :
return 'S'
elif 225 <= self . direction < 315 :
return 'W'
|
def _gorg ( cls ) :
"""This function exists for compatibility with old typing versions ."""
|
assert isinstance ( cls , GenericMeta )
if hasattr ( cls , '_gorg' ) :
return cls . _gorg
while cls . __origin__ is not None :
cls = cls . __origin__
return cls
|
def summarize ( self , text , length = 5 , binary_matrix = True ) :
"""Implements the method of summarization by relevance score , as described by Gong and Liu in the paper :
Y . Gong and X . Liu ( 2001 ) . Generic text summarization using relevance measure and latent semantic analysis .
Proceedings of the 24th International Conference on Research in Information Retrieval ( SIGIR ’ 01 ) ,
pp . 19–25.
This method computes and ranks the cosine similarity between each sentence vector and the overall
document .
: param text : a string of text to be summarized , path to a text file , or URL starting with http
: param length : the length of the output summary ; either a number of sentences ( e . g . 5 ) or a percentage
of the original document ( e . g . 0.5)
: param binary _ matrix : boolean value indicating whether the matrix of word counts should be binary
( True by default )
: return : list of sentences for the summary"""
|
text = self . _parse_input ( text )
sentences , unprocessed_sentences = self . _tokenizer . tokenize_sentences ( text )
length = self . _parse_summary_length ( length , len ( sentences ) )
if length == len ( sentences ) :
return unprocessed_sentences
matrix = self . _compute_matrix ( sentences , weighting = 'frequency' )
# Sum occurrences of terms over all sentences to obtain document frequency
doc_frequency = matrix . sum ( axis = 0 )
if binary_matrix :
matrix = ( matrix != 0 ) . astype ( int )
summary_sentences = [ ]
for _ in range ( length ) : # Take the inner product of each sentence vector with the document vector
sentence_scores = matrix . dot ( doc_frequency . transpose ( ) )
sentence_scores = np . array ( sentence_scores . T ) [ 0 ]
# Grab the top sentence and add it to the summary
top_sentence = sentence_scores . argsort ( ) [ - 1 ]
summary_sentences . append ( top_sentence )
# Remove all terms that appear in the top sentence from the document
terms_in_top_sentence = ( matrix [ top_sentence , : ] != 0 ) . toarray ( )
doc_frequency [ terms_in_top_sentence ] = 0
# Remove the top sentence from consideration by setting all its elements to zero
# This does the same as matrix [ top _ sentence , : ] = 0 , but is much faster for sparse matrices
matrix . data [ matrix . indptr [ top_sentence ] : matrix . indptr [ top_sentence + 1 ] ] = 0
matrix . eliminate_zeros ( )
# Return the sentences in the order in which they appear in the document
summary_sentences . sort ( )
return [ unprocessed_sentences [ i ] for i in summary_sentences ]
|
def graphql_query ( self , query_hash : str , variables : Dict [ str , Any ] , referer : Optional [ str ] = None , rhx_gis : Optional [ str ] = None ) -> Dict [ str , Any ] :
"""Do a GraphQL Query .
: param query _ hash : Query identifying hash .
: param variables : Variables for the Query .
: param referer : HTTP Referer , or None .
: param rhx _ gis : ' rhx _ gis ' variable as somewhere returned by Instagram , needed to ' sign ' request
: return : The server ' s response dictionary ."""
|
with copy_session ( self . _session ) as tmpsession :
tmpsession . headers . update ( self . _default_http_header ( empty_session_only = True ) )
del tmpsession . headers [ 'Connection' ]
del tmpsession . headers [ 'Content-Length' ]
tmpsession . headers [ 'authority' ] = 'www.instagram.com'
tmpsession . headers [ 'scheme' ] = 'https'
tmpsession . headers [ 'accept' ] = '*/*'
if referer is not None :
tmpsession . headers [ 'referer' ] = urllib . parse . quote ( referer )
variables_json = json . dumps ( variables , separators = ( ',' , ':' ) )
if rhx_gis : # self . log ( " rhx _ gis { } query _ hash { } " . format ( rhx _ gis , query _ hash ) )
values = "{}:{}" . format ( rhx_gis , variables_json )
x_instagram_gis = hashlib . md5 ( values . encode ( ) ) . hexdigest ( )
tmpsession . headers [ 'x-instagram-gis' ] = x_instagram_gis
resp_json = self . get_json ( 'graphql/query' , params = { 'query_hash' : query_hash , 'variables' : variables_json } , session = tmpsession )
if 'status' not in resp_json :
self . error ( "GraphQL response did not contain a \"status\" field." )
return resp_json
|
def get_version_requested ( path ) :
"""Return string listing requested Terraform version ."""
|
tf_version_path = os . path . join ( path , TF_VERSION_FILENAME )
if not os . path . isfile ( tf_version_path ) :
LOGGER . error ( "Terraform install attempted and no %s file present to " "dictate the version. Please create it (e.g. write " "\"0.11.13\" (without quotes) to the file and try again" , TF_VERSION_FILENAME )
sys . exit ( 1 )
with open ( tf_version_path , 'r' ) as stream :
ver = stream . read ( ) . rstrip ( )
return ver
|
def reindex_sortable_title ( portal ) :
"""Reindex sortable _ title from some catalogs"""
|
catalogs = [ "bika_catalog" , "bika_setup_catalog" , "portal_catalog" , ]
for catalog_name in catalogs :
logger . info ( "Reindexing sortable_title for {} ..." . format ( catalog_name ) )
handler = ZLogHandler ( steps = 100 )
catalog = api . get_tool ( catalog_name )
catalog . reindexIndex ( "sortable_title" , None , pghandler = handler )
commit_transaction ( portal )
|
def delete_report ( server , report_number , timeout = HQ_DEFAULT_TIMEOUT ) :
"""Delete a specific crash report from the server .
: param report _ number : Report Number
: return : server response"""
|
try :
r = requests . post ( server + "/reports/delete/%d" % report_number , timeout = timeout )
except Exception as e :
logging . error ( e )
return False
return r
|
def request ( self , method , url , params = None , headers = None ) :
"""Return a deferred ."""
|
if params is None :
params = self . _retrieve_params
return make_request ( self , method , url , stripe_account = self . stripe_account , params = params , headers = headers )
|
def read_raid ( self , raid_config = None ) :
"""Read the logical drives from the system
: param raid _ config : None or a dictionary containing target raid
configuration data . This data stucture should be as
follows :
raid _ config = { ' logical _ disks ' : [ { ' raid _ level ' : 1,
' size _ gb ' : 100 , ' physical _ disks ' : [ ' 6I : 1:5 ' ] ,
' controller ' : ' HPE Smart Array P408i - a SR Gen10 ' } ,
< info - for - logical - disk - 2 > ] }
: returns : A dictionary containing list of logical disks"""
|
self . check_smart_storage_config_ids ( )
if raid_config : # When read called after create raid , user can pass raid config
# as a input
result = self . _post_create_read_raid ( raid_config = raid_config )
else : # When read called after delete raid , there will be no input
# passed by user then
result = self . _post_delete_read_raid ( )
return result
|
def peek_stack_dwords ( self , count , offset = 0 ) :
"""Tries to read DWORDs from the top of the stack .
@ type count : int
@ param count : Number of DWORDs to read .
@ type offset : int
@ param offset : Offset from the stack pointer to begin reading .
@ rtype : tuple ( int . . . )
@ return : Tuple of integers read from the stack .
May be less than the requested number of DWORDs ."""
|
stackData = self . peek_stack_data ( count * 4 , offset )
if len ( stackData ) & 3 :
stackData = stackData [ : - len ( stackData ) & 3 ]
if not stackData :
return ( )
return struct . unpack ( '<' + ( 'L' * count ) , stackData )
|
def shell_out ( cmd , timeout = 30 , chroot = None , runat = None ) :
"""Shell out to an external command and return the output or the empty
string in case of error ."""
|
return sos_get_command_output ( cmd , timeout = timeout , chroot = chroot , chdir = runat ) [ 'output' ]
|
def qbe_tree ( graph , nodes , root = None ) :
"""Given a graph , nodes to explore and an optinal root , do a breadth - first
search in order to return the tree ."""
|
if root :
start = root
else :
index = random . randint ( 0 , len ( nodes ) - 1 )
start = nodes [ index ]
# A queue to BFS instead DFS
to_visit = deque ( )
cnodes = copy ( nodes )
visited = set ( )
# Format is ( parent , parent _ edge , neighbor , neighbor _ field )
to_visit . append ( ( None , None , start , None ) )
tree = { }
while len ( to_visit ) != 0 and nodes :
parent , parent_edge , v , v_edge = to_visit . pop ( )
# Prune
if v in nodes :
nodes . remove ( v )
node = graph [ v ]
if v not in visited and len ( node ) > 1 :
visited . add ( v )
# Preorder process
if all ( ( parent , parent_edge , v , v_edge ) ) :
if parent not in tree :
tree [ parent ] = [ ]
if ( parent_edge , v , v_edge ) not in tree [ parent ] :
tree [ parent ] . append ( ( parent_edge , v , v_edge ) )
if v not in tree :
tree [ v ] = [ ]
if ( v_edge , parent , parent_edge ) not in tree [ v ] :
tree [ v ] . append ( ( v_edge , parent , parent_edge ) )
# Iteration
for node_edge , neighbor , neighbor_edge in node :
value = ( v , node_edge , neighbor , neighbor_edge )
to_visit . append ( value )
remove_leafs ( tree , cnodes )
return tree , ( len ( nodes ) == 0 )
|
def get_cert_contents ( kwargs ) :
"""Builds parameters with server cert file contents .
Args :
kwargs ( dict ) : The keyword args passed to ensure _ server _ cert _ exists ,
optionally containing the paths to the cert , key and chain files .
Returns :
dict : A dictionary containing the appropriate parameters to supply to
upload _ server _ certificate . An empty dictionary if there is a
problem ."""
|
paths = { "certificate" : kwargs . get ( "path_to_certificate" ) , "private_key" : kwargs . get ( "path_to_private_key" ) , "chain" : kwargs . get ( "path_to_chain" ) , }
for key , value in paths . items ( ) :
if value is not None :
continue
path = input ( "Path to %s (skip): " % ( key , ) )
if path == "skip" or not path . strip ( ) :
continue
paths [ key ] = path
parameters = { "ServerCertificateName" : kwargs . get ( "cert_name" ) , }
for key , path in paths . items ( ) :
if not path :
continue
# Allow passing of file like object for tests
try :
contents = path . read ( )
except AttributeError :
with open ( utils . full_path ( path ) ) as read_file :
contents = read_file . read ( )
if key == "certificate" :
parameters [ "CertificateBody" ] = contents
elif key == "private_key" :
parameters [ "PrivateKey" ] = contents
elif key == "chain" :
parameters [ "CertificateChain" ] = contents
return parameters
|
def create ( self , name , passphrase = None , wallet_data = None ) :
"""Create a new Wallet object and add it to this Wallets collection .
This is only available in this library for Application wallets . Users
must add additional wallets in their User Console
Args :
name ( str ) : wallet name
passphrase ( str , optional ) : A passphrase with which to encrypt a user
wallet . If not supplied , wallet _ data is mandatory .
wallet _ data ( dict ) : Output from wallets . generate .
For User Wallets , only the primary tree is used .
For Application Wallets , the primary and backup trees are used .
Returns :
A tuple of the ( backup _ private _ seed , round . Wallet ) ."""
|
if not self . application :
raise RoundError ( "User accounts are limited to one wallet. Make an " "account or shoot us an email <dev@gem.co> if you " "have a compelling use case for more." )
if not passphrase and not wallet_data :
raise ValueError ( "Usage: wallets.create(name, passphrase [, " "wallet_data])" )
elif passphrase :
wallet_data = generate ( passphrase , trees = ( [ 'primary' , 'backup' ] if ( self . application ) else [ 'primary' ] ) )
wallet = dict ( primary_private_seed = wallet_data [ 'primary' ] [ 'encrypted_seed' ] , primary_public_seed = wallet_data [ 'primary' ] [ 'public_seed' ] , name = name )
if self . application :
wallet [ 'backup_public_seed' ] = wallet_data [ 'backup' ] [ 'public_seed' ]
resource = self . resource . create ( wallet )
wallet = self . wrap ( resource )
return ( wallet_data [ 'backup' ] [ 'private_seed' ] , self . add ( wallet ) ) if ( self . application ) else self . add ( wallet )
|
def oauth_flow ( s , oauth_url , username = None , password = None , sandbox = False ) :
"""s should be a requests session"""
|
r = s . get ( oauth_url )
if r . status_code >= 300 :
raise RuntimeError ( r . text )
params = urlparse . parse_qs ( urlparse . urlparse ( r . url ) . query )
data = { "un" : username , "width" : 2560 , "height" : 1440 , "hasRememberUn" : True , "startURL" : params [ 'startURL' ] , "loginURL" : "" , "loginType" : 6 , "useSecure" : True , "local" : "" , "lt" : "OAUTH" , "qs" : "r=https%3A%2F%2Flocalhost%3A8443%2Fsalesforce%2F21" , "locale" : "" , "oauth_token" : "" , "oauth_callback" : "" , "login" : "" , "serverid" : "" , "display" : "popup" , "username" : username , "pw" : password , "Login" : "" }
base = "https://login.salesforce.com" if not sandbox else "https://test.salesforce.com"
r2 = s . post ( base , data )
m = re . search ( "window.location.href\s*='(.[^']+)'" , r2 . text )
assert m is not None , "Couldn't find location.href expression in page %s (Username or password is wrong)" % r2 . url
u3 = "https://" + urlparse . urlparse ( r2 . url ) . hostname + m . group ( 1 )
r3 = s . get ( u3 )
m = re . search ( "window.location.href\s*='(.[^']+)'" , r3 . text )
assert m is not None , "Couldn't find location.href expression in page %s:\n%s" % ( r3 . url , r3 . text )
return m . group ( 1 )
|
def before_render ( self ) :
"""Before template render hook"""
|
# Render the Add button if the user has the AddClient permission
if check_permission ( AddClient , self . context ) :
self . context_actions [ _ ( "Add" ) ] = { "url" : "createObject?type_name=Client" , "icon" : "++resource++bika.lims.images/add.png" }
# Display a checkbox next to each client in the list if the user has
# rights for ModifyPortalContent
self . show_select_column = check_permission ( ModifyPortalContent , self . context )
|
def run_nested_error_groups ( ) :
"""Run nested groups example where an error occurs in nested main phase .
In this example , the first main phase in the nested PhaseGroup errors out .
The other inner main phase is skipped , as is the outer main phase . Both
PhaseGroups were entered , so both teardown phases are run ."""
|
test = htf . Test ( htf . PhaseGroup ( main = [ htf . PhaseGroup . with_teardown ( inner_teardown_phase ) ( error_main_phase , main_phase ) , main_phase , ] , teardown = [ teardown_phase ] , ) )
test . execute ( )
|
def transform ( func , data ) :
"""Apply < func > on each element in < data > and return the list
consisting of the return values from < func > .
> > > data = [ [ 10,20 ] , [ 30,40 ] , [ 50,60 ] ]
. . . chart _ data . transform ( lambda x : [ x [ 0 ] , x [ 1 ] + 1 ] , data )
[ [ 10 , 21 ] , [ 30 , 41 ] , [ 50 , 61 ] ]"""
|
out = [ ]
for r in data :
out . append ( func ( r ) )
return out
|
def adjust_ages ( AgesIn ) :
"""Function to adjust ages to a common age _ unit"""
|
# get a list of age _ units first
age_units , AgesOut , factors , factor , maxunit , age_unit = [ ] , [ ] , [ ] , 1 , 1 , "Ma"
for agerec in AgesIn :
if agerec [ 1 ] not in age_units :
age_units . append ( agerec [ 1 ] )
if agerec [ 1 ] == "Ga" :
factors . append ( 1e9 )
maxunit , age_unit , factor = 1e9 , "Ga" , 1e9
if agerec [ 1 ] == "Ma" :
if maxunit == 1 :
maxunit , age_unt , factor = 1e6 , "Ma" , 1e6
factors . append ( 1e6 )
if agerec [ 1 ] == "Ka" :
factors . append ( 1e3 )
if maxunit == 1 :
maxunit , age_unit , factor = 1e3 , "Ka" , 1e3
if "Years" in agerec [ 1 ] . split ( ) :
factors . append ( 1 )
if len ( age_units ) == 1 : # all ages are of same type
for agerec in AgesIn :
AgesOut . append ( agerec [ 0 ] )
elif len ( age_units ) > 1 :
for agerec in AgesIn : # normalize all to largest age unit
if agerec [ 1 ] == "Ga" :
AgesOut . append ( agerec [ 0 ] * 1e9 / factor )
if agerec [ 1 ] == "Ma" :
AgesOut . append ( agerec [ 0 ] * 1e6 / factor )
if agerec [ 1 ] == "Ka" :
AgesOut . append ( agerec [ 0 ] * 1e3 / factor )
if "Years" in agerec [ 1 ] . split ( ) :
if agerec [ 1 ] == "Years BP" :
AgesOut . append ( old_div ( agerec [ 0 ] , factor ) )
if agerec [ 1 ] == "Years Cal BP" :
AgesOut . append ( old_div ( agerec [ 0 ] , factor ) )
if agerec [ 1 ] == "Years AD (+/-)" : # convert to years BP first
AgesOut . append ( old_div ( ( 1950 - agerec [ 0 ] ) , factor ) )
if agerec [ 1 ] == "Years Cal AD (+/-)" :
AgesOut . append ( old_div ( ( 1950 - agerec [ 0 ] ) , factor ) )
return AgesOut , age_unit
|
def round ( col , scale = 0 ) :
"""Round the given value to ` scale ` decimal places using HALF _ UP rounding mode if ` scale ` > = 0
or at integral part when ` scale ` < 0.
> > > spark . createDataFrame ( [ ( 2.5 , ) ] , [ ' a ' ] ) . select ( round ( ' a ' , 0 ) . alias ( ' r ' ) ) . collect ( )
[ Row ( r = 3.0 ) ]"""
|
sc = SparkContext . _active_spark_context
return Column ( sc . _jvm . functions . round ( _to_java_column ( col ) , scale ) )
|
def compute ( datetimes , to_np = None ) : # @ NoSelf
"""Computes the provided date / time components into CDF epoch value ( s ) .
For CDF _ EPOCH :
For computing into CDF _ EPOCH value , each date / time elements should
have exactly seven ( 7 ) components , as year , month , day , hour , minute ,
second and millisecond , in a list . For example :
[ [ 2017,1,1,1,1,1,111 ] , [ 2017,2,2,2,2,2,222 ] ]
Or , call function compute _ epoch directly , instead , with at least three
(3 ) first ( up to seven ) components . The last component , if
not the 7th , can be a float that can have a fraction of the unit .
For CDF _ EPOCH16:
They should have exactly ten ( 10 ) components , as year ,
month , day , hour , minute , second , millisecond , microsecond , nanosecond
and picosecond , in a list . For example :
[ [ 2017,1,1,1,1,1,123,456,789,999 ] , [ 2017,2,2,2,2,2,987,654,321,999 ] ]
Or , call function compute _ epoch directly , instead , with at least three
(3 ) first ( up to ten ) components . The last component , if
not the 10th , can be a float that can have a fraction of the unit .
For TT2000:
Each TT2000 typed date / time should have exactly nine ( 9 ) components , as
year , month , day , hour , minute , second , millisecond , microsecond ,
and nanosecond , in a list . For example :
[ [ 2017,1,1,1,1,1,123,456,789 ] , [ 2017,2,2,2,2,2,987,654,321 ] ]
Or , call function compute _ tt2000 directly , instead , with at least three
(3 ) first ( up to nine ) components . The last component , if
not the 9th , can be a float that can have a fraction of the unit .
Specify to _ np to True , if the result should be in numpy class ."""
|
if not isinstance ( datetimes , ( list , tuple , np . ndarray ) ) :
raise TypeError ( 'datetime must be in list form' )
if isinstance ( datetimes [ 0 ] , numbers . Number ) :
items = len ( datetimes )
elif isinstance ( datetimes [ 0 ] , ( list , tuple , np . ndarray ) ) :
items = len ( datetimes [ 0 ] )
else :
print ( 'Unknown input' )
return
if ( items == 7 ) :
return CDFepoch . compute_epoch ( datetimes , to_np )
elif ( items == 10 ) :
return CDFepoch . compute_epoch16 ( datetimes , to_np )
elif ( items == 9 ) :
return CDFepoch . compute_tt2000 ( datetimes , to_np )
else :
print ( 'Unknown input' )
return
|
def find_stream ( self , ** kwargs ) :
"""Finds a single stream with the given meta data values . Useful for debugging purposes .
: param kwargs : The meta data as keyword arguments
: return : The stream found"""
|
found = list ( self . find_streams ( ** kwargs ) . values ( ) )
if not found :
raise StreamNotFoundError ( kwargs )
if len ( found ) > 1 :
raise MultipleStreamsFoundError ( kwargs )
return found [ 0 ]
|
def splitDataset ( dataset , groupby ) :
"""Split the given dataset into multiple datasets grouped by the given groupby
function . For example : :
# Split mnist dataset into 10 datasets , one dataset for each label
splitDataset ( mnist , groupby = lambda x : x [ 1 ] )
# Split mnist dataset into 5 datasets , one dataset for each label pair : [ 0,1 ] , [ 2,3 ] , . . .
splitDataset ( mnist , groupby = lambda x : x [ 1 ] / / 2)
: param dataset : Source dataset to split
: param groupby : Group by function . See : func : ` itertools . groupby `
: return : List of datasets"""
|
# Split dataset based on the group by function and keep track of indices
indicesByGroup = collections . defaultdict ( list )
for k , g in itertools . groupby ( enumerate ( dataset ) , key = lambda x : groupby ( x [ 1 ] ) ) :
indicesByGroup [ k ] . extend ( [ i [ 0 ] for i in g ] )
# Sort by group and create a Subset dataset for each of the group indices
_ , indices = zip ( * ( sorted ( indicesByGroup . items ( ) , key = lambda x : x [ 0 ] ) ) )
return [ Subset ( dataset , indices = i ) for i in indices ]
|
def filterAcceptsRow ( self , row_num , parent ) :
"""Qt override .
Reimplemented from base class to allow the use of custom filtering ."""
|
model = self . sourceModel ( )
name = model . row ( row_num ) . name
r = re . search ( self . pattern , name )
if r is None :
return False
else :
return True
|
def result_str ( self ) :
"""Return a string representing the totals contained herein .
: return : str counts / types string"""
|
return '{}, {}, {}' . format ( LocalOnlyCounter . plural_fmt ( 'project' , self . projects ) , LocalOnlyCounter . plural_fmt ( 'folder' , self . folders ) , LocalOnlyCounter . plural_fmt ( 'file' , self . files ) )
|
def npoints_between ( lon1 , lat1 , depth1 , lon2 , lat2 , depth2 , npoints ) :
"""Find a list of specified number of points between two given ones that are
equally spaced along the great circle arc connecting given points .
: param float lon1 , lat1 , depth1:
Coordinates of a point to start from . The first point in a resulting
list has these coordinates .
: param float lon2 , lat2 , depth2:
Coordinates of a point to finish at . The last point in a resulting
list has these coordinates .
: param npoints :
Integer number of points to return . First and last points count ,
so if there have to be two intervals , ` ` npoints ` ` should be 3.
: returns :
Tuple of three 1d numpy arrays : longitudes , latitudes and depths
of resulting points respectively .
Finds distance between two reference points and calls
: func : ` npoints _ towards ` ."""
|
hdist = geodetic_distance ( lon1 , lat1 , lon2 , lat2 )
vdist = depth2 - depth1
rlons , rlats , rdepths = npoints_towards ( lon1 , lat1 , depth1 , azimuth ( lon1 , lat1 , lon2 , lat2 ) , hdist , vdist , npoints )
# the last point should be left intact
rlons [ - 1 ] = lon2
rlats [ - 1 ] = lat2
rdepths [ - 1 ] = depth2
return rlons , rlats , rdepths
|
def unbounded ( self ) :
"""Returns a list of key dimensions that are unbounded , excluding
stream parameters . If any of theses key dimensions are
unbounded , the DynamicMap as a whole is also unbounded ."""
|
unbounded_dims = [ ]
# Dimensioned streams do not need to be bounded
stream_params = set ( util . stream_parameters ( self . streams ) )
for kdim in self . kdims :
if str ( kdim ) in stream_params :
continue
if kdim . values :
continue
if None in kdim . range :
unbounded_dims . append ( str ( kdim ) )
return unbounded_dims
|
def Write ( self , output_writer ) :
"""Writes the table to output writer .
Args :
output _ writer ( CLIOutputWriter ) : output writer ."""
|
# Round up the column sizes to the nearest tab .
for column_index , column_size in enumerate ( self . _column_sizes ) :
column_size , _ = divmod ( column_size , self . _NUMBER_OF_SPACES_IN_TAB )
column_size = ( column_size + 1 ) * self . _NUMBER_OF_SPACES_IN_TAB
self . _column_sizes [ column_index ] = column_size
if self . _columns :
self . _WriteRow ( output_writer , self . _columns , in_bold = True )
for values in self . _rows :
self . _WriteRow ( output_writer , values )
|
def update_current_tags ( self , tags ) :
"""Set a new set of tags for this executable .
Update the set of tags that this job will use . This updated default
file naming and shared options . It will * not * update the pegasus
profile , which belong to the executable and cannot be different for
different nodes .
Parameters
tags : list
The new list of tags to consider ."""
|
if tags is None :
tags = [ ]
tags = [ tag . upper ( ) for tag in tags ]
self . tags = tags
if len ( tags ) > 6 :
warn_msg = "This job has way too many tags. "
warn_msg += "Current tags are {}. " . format ( ' ' . join ( tags ) )
warn_msg += "Current executable {}." . format ( self . name )
logging . info ( warn_msg )
if len ( tags ) != 0 :
self . tagged_name = "{0}-{1}" . format ( self . name , '_' . join ( tags ) )
else :
self . tagged_name = self . name
if self . ifo_string is not None :
self . tagged_name = "{0}-{1}" . format ( self . tagged_name , self . ifo_string )
# Determine the sections from the ini file that will configure
# this executable
sections = [ self . name ]
if self . ifo_list is not None :
if len ( self . ifo_list ) > 1 :
sec_tags = tags + self . ifo_list + [ self . ifo_string ]
else :
sec_tags = tags + self . ifo_list
else :
sec_tags = tags
for sec_len in range ( 1 , len ( sec_tags ) + 1 ) :
for tag_permutation in permutations ( sec_tags , sec_len ) :
joined_name = '-' . join ( tag_permutation )
section = '{0}-{1}' . format ( self . name , joined_name . lower ( ) )
if self . cp . has_section ( section ) :
sections . append ( section )
self . sections = sections
# Do some basic sanity checking on the options
for sec1 , sec2 in combinations ( sections , 2 ) :
self . cp . check_duplicate_options ( sec1 , sec2 , raise_error = True )
# collect the options and profile information
# from the ini file section ( s )
self . common_options = [ ]
self . common_raw_options = [ ]
self . common_input_files = [ ]
for sec in sections :
if self . cp . has_section ( sec ) :
self . add_ini_opts ( self . cp , sec )
else :
warn_string = "warning: config file is missing section "
warn_string += "[{0}]" . format ( sec )
logging . warn ( warn_string )
|
def xpatherror ( self , file , line , no ) :
"""Formats an error message ."""
|
libxml2mod . xmlXPatherror ( self . _o , file , line , no )
|
def _check_connection ( self ) :
"""Check if connection is alive every reconnect _ timeout seconds ."""
|
if ( ( self . tcp_disconnect_timer + 2 * self . reconnect_timeout ) < time . time ( ) ) :
self . tcp_disconnect_timer = time . time ( )
raise OSError ( 'No response from {}. Disconnecting' . format ( self . server_address ) )
if ( self . tcp_check_timer + self . reconnect_timeout ) >= time . time ( ) :
return
msg = Message ( ) . modify ( child_id = 255 , type = self . const . MessageType . internal , sub_type = self . const . Internal . I_VERSION )
self . add_job ( msg . encode )
self . tcp_check_timer = time . time ( )
|
def save_bin ( self , bin_form , * args , ** kwargs ) :
"""Pass through to provider BinAdminSession . update _ bin"""
|
# Implemented from kitosid template for -
# osid . resource . BinAdminSession . update _ bin
if bin_form . is_for_update ( ) :
return self . update_bin ( bin_form , * args , ** kwargs )
else :
return self . create_bin ( bin_form , * args , ** kwargs )
|
def parse_line_headers ( self , line ) :
"""We must build headers carefully : there are multiple blank values
in the header row , and the instrument may just add more for all
we know ."""
|
headers = line . split ( "," )
for i , v in enumerate ( headers ) :
if v :
headers [ i ] = v
else :
headers [ i ] = str ( i )
self . headers = headers
|
def drawLognormal ( N , mu = 0.0 , sigma = 1.0 , seed = 0 ) :
'''Generate arrays of mean one lognormal draws . The sigma input can be a number
or list - like . If a number , output is a length N array of draws from the
lognormal distribution with standard deviation sigma . If a list , output is
a length T list whose t - th entry is a length N array of draws from the
lognormal with standard deviation sigma [ t ] .
Parameters
N : int
Number of draws in each row .
mu : float or [ float ]
One or more means . Number of elements T in mu determines number
of rows of output .
sigma : float or [ float ]
One or more standard deviations . Number of elements T in sigma
determines number of rows of output .
seed : int
Seed for random number generator .
Returns :
draws : np . array or [ np . array ]
T - length list of arrays of mean one lognormal draws each of size N , or
a single array of size N ( if sigma is a scalar ) .'''
|
# Set up the RNG
RNG = np . random . RandomState ( seed )
if isinstance ( sigma , float ) : # Return a single array of length N
if sigma == 0 :
draws = np . exp ( mu ) * np . ones ( N )
else :
draws = RNG . lognormal ( mean = mu , sigma = sigma , size = N )
else : # Set up empty list to populate , then loop and populate list with draws
draws = [ ]
for j in range ( len ( sigma ) ) :
if sigma [ j ] == 0 :
draws . append ( np . exp ( mu [ j ] ) * np . ones ( N ) )
else :
draws . append ( RNG . lognormal ( mean = mu [ j ] , sigma = sigma [ j ] , size = N ) )
return draws
|
async def create_student_container ( self , job_id , parent_container_id , sockets_path , student_path , systemfiles_path , course_common_student_path , socket_id , environment_name , memory_limit , time_limit , hard_time_limit , share_network , write_stream ) :
"""Creates a new student container .
: param write _ stream : stream on which to write the return value of the container ( with a correctly formatted msgpack message )"""
|
try :
self . _logger . debug ( "Starting new student container... %s %s %s %s" , environment_name , memory_limit , time_limit , hard_time_limit )
if environment_name not in self . _containers :
self . _logger . warning ( "Student container asked for an unknown environment %s (not in aliases)" , environment_name )
await self . _write_to_container_stdin ( write_stream , { "type" : "run_student_retval" , "retval" : 254 , "socket_id" : socket_id } )
return
environment = self . _containers [ environment_name ] [ "id" ]
try :
socket_path = path_join ( sockets_path , str ( socket_id ) + ".sock" )
container_id = await self . _docker . create_container_student ( parent_container_id , environment , share_network , memory_limit , student_path , socket_path , systemfiles_path , course_common_student_path )
except Exception as e :
self . _logger . exception ( "Cannot create student container!" )
await self . _write_to_container_stdin ( write_stream , { "type" : "run_student_retval" , "retval" : 254 , "socket_id" : socket_id } )
if isinstance ( e , asyncio . CancelledError ) :
raise
return
self . _student_containers_for_job [ job_id ] . add ( container_id )
self . _student_containers_running [ container_id ] = job_id , parent_container_id , socket_id , write_stream
# send to the container that the sibling has started
await self . _write_to_container_stdin ( write_stream , { "type" : "run_student_started" , "socket_id" : socket_id } )
try :
await self . _docker . start_container ( container_id )
except Exception as e :
self . _logger . exception ( "Cannot start student container!" )
await self . _write_to_container_stdin ( write_stream , { "type" : "run_student_retval" , "retval" : 254 , "socket_id" : socket_id } )
if isinstance ( e , asyncio . CancelledError ) :
raise
return
# Verify the time limit
await self . _timeout_watcher . register_container ( container_id , time_limit , hard_time_limit )
except asyncio . CancelledError :
raise
except :
self . _logger . exception ( "Exception in create_student_container" )
|
def _check_package ( pkg_xml , zipfilename , zf ) :
"""Helper for ` ` build _ index ( ) ` ` : Perform some checks to make sure that
the given package is consistent ."""
|
# The filename must patch the id given in the XML file .
uid = os . path . splitext ( os . path . split ( zipfilename ) [ 1 ] ) [ 0 ]
if pkg_xml . get ( 'id' ) != uid :
raise ValueError ( 'package identifier mismatch (%s vs %s)' % ( pkg_xml . get ( 'id' ) , uid ) )
# Zip file must expand to a subdir whose name matches uid .
if sum ( ( name != uid and not name . startswith ( uid + '/' ) ) for name in zf . namelist ( ) ) :
raise ValueError ( 'Zipfile %s.zip does not expand to a single ' 'subdirectory %s/' % ( uid , uid ) )
|
def jacobi ( A , x , b , iterations = 1 , omega = 1.0 ) :
"""Perform Jacobi iteration on the linear system Ax = b .
Parameters
A : csr _ matrix
Sparse NxN matrix
x : ndarray
Approximate solution ( length N )
b : ndarray
Right - hand side ( length N )
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
Nothing , x will be modified in place .
Examples
> > > # Use Jacobi as a Stand - Alone Solver
> > > from pyamg . relaxation . relaxation . relaxation import jacobi
> > > from pyamg . gallery import poisson
> > > from pyamg . util . linalg import norm
> > > import numpy as np
> > > A = poisson ( ( 10,10 ) , format = ' csr ' )
> > > x0 = np . zeros ( ( A . shape [ 0 ] , 1 ) )
> > > b = np . ones ( ( A . shape [ 0 ] , 1 ) )
> > > jacobi ( A , x0 , b , iterations = 10 , omega = 1.0)
> > > print norm ( b - A * x0)
5.83475132751
> > > # Use Jacobi as the Multigrid Smoother
> > > from pyamg import smoothed _ aggregation _ solver
> > > sa = smoothed _ aggregation _ solver ( A , B = np . ones ( ( A . shape [ 0 ] , 1 ) ) ,
. . . coarse _ solver = ' pinv2 ' , max _ coarse = 50,
. . . presmoother = ( ' jacobi ' , { ' omega ' : 4.0/3.0 , ' iterations ' : 2 } ) ,
. . . postsmoother = ( ' jacobi ' , { ' omega ' : 4.0/3.0 , ' iterations ' : 2 } ) )
> > > x0 = np . zeros ( ( A . shape [ 0 ] , 1 ) )
> > > residuals = [ ]
> > > x = sa . solve ( b , x0 = x0 , tol = 1e - 8 , residuals = residuals )"""
|
A , x , b = make_system ( A , x , b , formats = [ 'csr' , 'bsr' ] )
sweep = slice ( None )
( row_start , row_stop , row_step ) = sweep . indices ( A . shape [ 0 ] )
if ( row_stop - row_start ) * row_step <= 0 : # no work to do
return
temp = np . empty_like ( x )
# Create uniform type , convert possibly complex scalars to length 1 arrays
[ omega ] = type_prep ( A . dtype , [ omega ] )
if sparse . isspmatrix_csr ( A ) :
for iter in range ( iterations ) :
amg_core . jacobi ( A . indptr , A . indices , A . data , x , b , temp , row_start , row_stop , row_step , omega )
else :
R , C = A . blocksize
if R != C :
raise ValueError ( 'BSR blocks must be square' )
row_start = int ( row_start / R )
row_stop = int ( row_stop / R )
for iter in range ( iterations ) :
amg_core . bsr_jacobi ( A . indptr , A . indices , np . ravel ( A . data ) , x , b , temp , row_start , row_stop , row_step , R , omega )
|
def exam_reliability ( x_axis , x_axis_new , reliable_distance , precision = 0.0001 ) :
"""When we do linear interpolation on x _ axis and derive value for
x _ axis _ new , we also evaluate how can we trust those interpolated
data points . This is how it works :
For each new x _ axis point in x _ axis new , let ' s say xi . Find the closest
point in x _ axis , suppose the distance is # dist . Compare this to
# reliable _ distance . If # dist < # reliable _ distance , then we can trust it ,
otherwise , we can ' t .
The precision is to handle decimal value ' s precision problem . Because
1.0 may actually is 1.000001 or 0.99999 in computer system .
So we define that : if ` ` dist ` ` + ` ` precision ` ` < = ` ` reliable _ distance ` ` , then we
can trust it , else , we can ' t .
Here is an O ( n ) algorithm implementation . A lots of improvement than
classic binary search one , which is O ( n ^ 2 ) ."""
|
x_axis = x_axis [ : : - 1 ]
x_axis . append ( - 2 ** 32 )
distance_to_closest_point = list ( )
for t in x_axis_new :
while 1 :
try :
x = x_axis . pop ( )
if x <= t :
left = x
else :
right = x
x_axis . append ( right )
x_axis . append ( left )
left_dist , right_dist = ( t - left ) , ( right - t )
if left_dist <= right_dist :
distance_to_closest_point . append ( left_dist )
else :
distance_to_closest_point . append ( right_dist )
break
except :
distance_to_closest_point . append ( t - left )
break
reliable_flag = list ( )
for dist in distance_to_closest_point :
if dist - precision - reliable_distance <= 0 :
reliable_flag . append ( True )
else :
reliable_flag . append ( False )
return reliable_flag
|
def _print_trainings_long ( trainings : Iterable [ Tuple [ str , dict , TrainingTrace ] ] ) -> None :
"""Print a plain table with the details of the given trainings .
: param trainings : iterable of tuples ( train _ dir , configuration dict , trace )"""
|
long_table = [ ]
for train_dir , config , trace in trainings :
start_datetime , end_datetime = trace [ TrainingTraceKeys . TRAIN_BEGIN ] , trace [ TrainingTraceKeys . TRAIN_END ]
if start_datetime :
age = format_timedelta ( datetime . now ( ) - start_datetime ) + ' ago'
if end_datetime :
duration = format_timedelta ( end_datetime - start_datetime )
else :
duration = CXF_NA_STR
else :
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace [ TrainingTraceKeys . EPOCHS_DONE ] if trace [ TrainingTraceKeys . EPOCHS_DONE ] else 0
long_table . append ( [ path . basename ( train_dir ) ] + list ( map ( lambda fq_name : fq_name . split ( '.' ) [ - 1 ] , get_classes ( config ) ) ) + [ age , duration , epochs_done ] )
print ( tabulate ( long_table , tablefmt = 'plain' ) )
|
def _make_expr_empty_dict ( toplevel , stack_builders ) :
"""This should only be hit for empty dicts . Anything else should hit the
STORE _ MAP handler instead ."""
|
if toplevel . arg :
raise DecompilationError ( "make_expr() called with nonzero BUILD_MAP arg %d" % toplevel . arg )
if stack_builders :
raise DecompilationError ( "Unexpected stack_builders for BUILD_MAP(0): %s" % stack_builders )
return ast . Dict ( keys = [ ] , values = [ ] )
|
def hacking_no_old_style_class ( logical_line , noqa ) :
r"""Check for old style classes .
Examples :
Okay : class Foo ( object ) : \ n pass
Okay : class Foo ( Bar , Baz ) : \ n pass
Okay : class Foo ( object , Baz ) : \ n pass
Okay : class Foo ( somefunc ( ) ) : \ n pass
H238 : class Bar : \ n pass
H238 : class Bar ( ) : \ n pass"""
|
if noqa :
return
line = core . import_normalize ( logical_line . strip ( ) )
if line . startswith ( "class " ) and not RE_NEW_STYLE_CLASS . match ( line ) :
yield ( 0 , "H238: old style class declaration, " "use new style (inherit from `object`)" )
|
def check_backends ( self , service_id , version_number ) :
"""Performs a health check against each backend in version . If the backend has a specific type of healthcheck , that one is performed , otherwise a HEAD request to / is performed . The first item is the details on the Backend itself . The second item is details of the specific HTTP request performed as a health check . The third item is the response details ."""
|
content = self . _fetch ( "/service/%s/version/%d/backend/check_all" % ( service_id , version_number ) )
# TODO : Use a strong - typed class for output ?
return content
|
def add_detection_pattern ( self , m ) :
"""This method add the detection patterns to the QR code . This lets
the scanner orient the pattern . It is required for all QR codes .
The detection pattern consists of three boxes located at the upper
left , upper right , and lower left corners of the matrix . Also , two
special lines called the timing pattern is also necessary . Finally ,
a single black pixel is added just above the lower left black box ."""
|
# Draw outer black box
for i in range ( 7 ) :
inv = - ( i + 1 )
for j in [ 0 , 6 , - 1 , - 7 ] :
m [ j ] [ i ] = 1
m [ i ] [ j ] = 1
m [ inv ] [ j ] = 1
m [ j ] [ inv ] = 1
# Draw inner white box
for i in range ( 1 , 6 ) :
inv = - ( i + 1 )
for j in [ 1 , 5 , - 2 , - 6 ] :
m [ j ] [ i ] = 0
m [ i ] [ j ] = 0
m [ inv ] [ j ] = 0
m [ j ] [ inv ] = 0
# Draw inner black box
for i in range ( 2 , 5 ) :
for j in range ( 2 , 5 ) :
inv = - ( i + 1 )
m [ i ] [ j ] = 1
m [ inv ] [ j ] = 1
m [ j ] [ inv ] = 1
# Draw white border
for i in range ( 8 ) :
inv = - ( i + 1 )
for j in [ 7 , - 8 ] :
m [ i ] [ j ] = 0
m [ j ] [ i ] = 0
m [ inv ] [ j ] = 0
m [ j ] [ inv ] = 0
# To keep the code short , it draws an extra box
# in the lower right corner , this removes it .
for i in range ( - 8 , 0 ) :
for j in range ( - 8 , 0 ) :
m [ i ] [ j ] = ' '
# Add the timing pattern
bit = itertools . cycle ( [ 1 , 0 ] )
for i in range ( 8 , ( len ( m ) - 8 ) ) :
b = next ( bit )
m [ i ] [ 6 ] = b
m [ 6 ] [ i ] = b
# Add the extra black pixel
m [ - 8 ] [ 8 ] = 1
|
def dump_collection ( cfg , f , indent = 0 ) :
'''Save a collection of attributes'''
|
for i , value in enumerate ( cfg ) :
dump_value ( None , value , f , indent )
if i < len ( cfg ) - 1 :
f . write ( u',\n' )
|
def getProjectAreaIDs ( self , projectarea_name = None , archived = False ) :
"""Get all : class : ` rtcclient . project _ area . ProjectArea ` id ( s )
by project area name
If ` projectarea _ name ` is ` None ` , all the
: class : ` rtcclient . project _ area . ProjectArea ` id ( s ) will be returned .
: param projectarea _ name : the project area name
: param archived : ( default is False ) whether the project area
is archived
: return : a : class : ` list ` that contains all the : class : ` ProjectArea ` ids
: rtype : list"""
|
projectarea_ids = list ( )
if projectarea_name and isinstance ( projectarea_name , six . string_types ) :
projectarea_id = self . getProjectAreaID ( projectarea_name , archived = archived )
projectarea_ids . append ( projectarea_id )
elif projectarea_name is None :
projectareas = self . getProjectAreas ( archived = archived )
if projectareas is None :
return None
projectarea_ids = [ proj_area . id for proj_area in projectareas ]
else :
error_msg = "Invalid ProjectArea name: [%s]" % projectarea_name
self . log . error ( error_msg )
raise exception . BadValue ( error_msg )
return projectarea_ids
|
def print_request ( request ) :
"""Prints a prepared request to give the user info as to what they ' re sending
: param request . PreparedRequest request : PreparedRequest object to be printed
: return : Nothing"""
|
print ( '{}\n{}\n{}\n\n{}' . format ( '-----------START-----------' , request . method + ' ' + request . url , '\n' . join ( '{}: {}' . format ( k , v ) for k , v in request . headers . items ( ) ) , request . body , ) )
|
def list_team_members ( team_name , profile = "github" , ignore_cache = False ) :
'''Gets the names of team members in lower case .
team _ name
The name of the team from which to list members .
profile
The name of the profile configuration to use . Defaults to ` ` github ` ` .
ignore _ cache
Bypasses the use of cached team members .
CLI Example :
. . code - block : : bash
salt myminion github . list _ team _ members ' team _ name '
. . versionadded : : 2016.11.0'''
|
cached_team = get_team ( team_name , profile = profile )
if not cached_team :
log . error ( 'Team %s does not exist.' , team_name )
return False
# Return from cache if available
if cached_team . get ( 'members' ) and not ignore_cache :
return cached_team . get ( 'members' )
try :
client = _get_client ( profile )
organization = client . get_organization ( _get_config_value ( profile , 'org_name' ) )
team = organization . get_team ( cached_team [ 'id' ] )
except UnknownObjectException :
log . exception ( 'Resource not found: %s' , cached_team [ 'id' ] )
try :
cached_team [ 'members' ] = [ member . login . lower ( ) for member in team . get_members ( ) ]
return cached_team [ 'members' ]
except UnknownObjectException :
log . exception ( 'Resource not found: %s' , cached_team [ 'id' ] )
return [ ]
|
def enable_parallel_lz4 ( mode ) :
"""Set the global multithread compression mode
Parameters
mode : ` bool `
True : Use parallel compression . False : Use sequential compression"""
|
global ENABLE_PARALLEL
ENABLE_PARALLEL = bool ( mode )
logger . info ( "Setting parallelisation mode to {}" . format ( "multi-threaded" if mode else "single-threaded" ) )
|
def disconnectMsToNet ( Facility_presence = 0 , UserUser_presence = 0 , SsVersionIndicator_presence = 0 ) :
"""Disconnect Section 9.3.7.2"""
|
a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x25 )
# 00100101
c = Cause ( )
packet = a / b / c
if Facility_presence is 1 :
d = FacilityHdr ( ieiF = 0x1C , eightBitF = 0x0 )
packet = packet / d
if UserUser_presence is 1 :
e = UserUserHdr ( ieiUU = 0x7E , eightBitUU = 0x0 )
packet = packet / e
if SsVersionIndicator_presence is 1 :
f = SsVersionIndicatorHdr ( ieiSVI = 0x7F , eightBitSVI = 0x0 )
packet = packet / f
return packet
|
def close ( self ) :
"""Stop serving the : attr : ` . Server . sockets ` and close all
concurrent connections ."""
|
transports , self . transports = self . transports , [ ]
for transport in transports :
transport . close ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.