signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def diagnostics ( self ) :
"""Dictionary access to all diagnostic variables
: type : dict"""
|
diag_dict = { }
for key in self . _diag_vars :
try : # diag _ dict [ key ] = getattr ( self , key )
# using self . _ _ dict _ _ doesn ' t count diagnostics defined as properties
diag_dict [ key ] = self . __dict__ [ key ]
except :
pass
return diag_dict
|
def validate_document_class ( option , value ) :
"""Validate the document _ class option ."""
|
if not issubclass ( value , ( abc . MutableMapping , RawBSONDocument ) ) :
raise TypeError ( "%s must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " "sublass of collections.MutableMapping" % ( option , ) )
return value
|
def do_close ( self , reason ) :
"""Called when channel _ close ( ) is received"""
|
if self . closed :
return
self . closed = True
self . reason = reason
self . incoming . close ( )
self . responses . close ( )
if not self . _closing :
self . client . channel_failed ( self , Failure ( reason ) )
|
def types_of_coordination_environments ( self , anonymous = False ) :
"""Extract information on the different co - ordination environments
present in the graph .
: param anonymous : if anonymous , will replace specie names
with A , B , C , etc .
: return : a list of co - ordination environments ,
e . g . [ ' Mo - S ( 6 ) ' , ' S - Mo ( 3 ) ' ]"""
|
motifs = set ( )
for idx , site in enumerate ( self . structure ) :
centre_sp = site . species_string
connected_sites = self . get_connected_sites ( idx )
connected_species = [ connected_site . site . species_string for connected_site in connected_sites ]
labels = [ ]
for sp in set ( connected_species ) :
count = connected_species . count ( sp )
labels . append ( ( count , sp ) )
labels = sorted ( labels , reverse = True )
if anonymous :
mapping = { centre_sp : 'A' }
available_letters = [ chr ( 66 + i ) for i in range ( 25 ) ]
for label in labels :
sp = label [ 1 ]
if sp not in mapping :
mapping [ sp ] = available_letters . pop ( 0 )
centre_sp = 'A'
labels = [ ( label [ 0 ] , mapping [ label [ 1 ] ] ) for label in labels ]
labels = [ "{}({})" . format ( label [ 1 ] , label [ 0 ] ) for label in labels ]
motif = '{}-{}' . format ( centre_sp , ',' . join ( labels ) )
motifs . add ( motif )
return sorted ( list ( motifs ) )
|
def read_galprop_rings_yaml ( self , galkey ) :
"""Read the yaml file for a partiuclar galprop key"""
|
galprop_rings_yaml = self . _name_factory . galprop_rings_yaml ( galkey = galkey , fullpath = True )
galprop_rings = yaml . safe_load ( open ( galprop_rings_yaml ) )
return galprop_rings
|
def attention_lm_moe_24b_diet ( ) :
"""Unnecessarily large model with 24B params - because we can ."""
|
hparams = attention_lm_moe_large_diet ( )
hparams . moe_hidden_sizes = "12288"
hparams . moe_num_experts = 1024
hparams . batch_size = 4096
return hparams
|
def _file_type ( file ) :
"""Function intended for identification of the file type .
Parameters
file : file path
File path .
Returns
out : str
Identified file type ."""
|
# % % % % % Verification of file type % % % % %
if "." in file : # File with known extension .
file_type = file . split ( "." ) [ - 1 ]
else : # File without known extension .
file_type = magic . from_file ( file , mime = True ) . split ( "/" ) [ - 1 ]
return file_type
|
def print_object_results ( obj_result ) :
"""Print the results of validating an object .
Args :
obj _ result : An ObjectValidationResults instance ."""
|
print_results_header ( obj_result . object_id , obj_result . is_valid )
if obj_result . warnings :
print_warning_results ( obj_result , 1 )
if obj_result . errors :
print_schema_results ( obj_result , 1 )
|
def safe_power ( a , b ) :
"""Same power of a ^ b
: param a : Number a
: param b : Number b
: return : a ^ b"""
|
if abs ( a ) > MAX_POWER or abs ( b ) > MAX_POWER :
raise ValueError ( 'Number too high!' )
return a ** b
|
def _uncompress_file ( self , path ) :
'''Writes the current file into a file in the temporary directory .
If that doesn ' t work , create a new file in the CDFs directory .'''
|
with self . file . open ( 'rb' ) as f :
if ( self . cdfversion == 3 ) :
data_start , data_size , cType , _ = self . _read_ccr ( 8 )
else :
data_start , data_size , cType , _ = self . _read_ccr2 ( 8 )
if cType != 5 :
return
f . seek ( data_start )
decompressed_data = gzip . decompress ( f . read ( data_size ) )
newpath = pathlib . Path ( tempfile . NamedTemporaryFile ( suffix = '.cdf' ) . name )
with newpath . open ( 'wb' ) as g :
g . write ( bytearray . fromhex ( 'cdf30001' ) )
g . write ( bytearray . fromhex ( '0000ffff' ) )
g . write ( decompressed_data )
return newpath
|
def clone ( self , spec = None , ** overrides ) :
"""Clones the Dimension with new parameters
Derive a new Dimension that inherits existing parameters
except for the supplied , explicit overrides
Args :
spec ( tuple , optional ) : Dimension tuple specification
* * overrides : Dimension parameter overrides
Returns :
Cloned Dimension object"""
|
settings = dict ( self . get_param_values ( ) , ** overrides )
if spec is None :
spec = ( self . name , overrides . get ( 'label' , self . label ) )
if 'label' in overrides and isinstance ( spec , basestring ) :
spec = ( spec , overrides [ 'label' ] )
elif 'label' in overrides and isinstance ( spec , tuple ) :
if overrides [ 'label' ] != spec [ 1 ] :
self . param . warning ( 'Using label as supplied by keyword ({!r}), ignoring ' 'tuple value {!r}' . format ( overrides [ 'label' ] , spec [ 1 ] ) )
spec = ( spec [ 0 ] , overrides [ 'label' ] )
return self . __class__ ( spec , ** { k : v for k , v in settings . items ( ) if k not in [ 'name' , 'label' ] } )
|
def readCache ( self , filename ) :
"""Load the graph from a cache file ."""
|
with open ( filename , 'rb' ) as f :
self . modules = pickle . load ( f )
|
def convertTime ( self , time ) :
"""Convert a datetime object representing a time into a human - ready
string that can be read , spoken aloud , etc .
Args :
time ( datetime . date ) : A datetime object to be converted into text .
Returns :
A string representation of the input time , ignoring any day - related
information ."""
|
# if ' : 00 ' , ignore reporting minutes
m_format = ""
if time . minute :
m_format = ":%M"
timeString = time . strftime ( "%I" + m_format + " %p" )
# if ' 07:30 ' , cast to ' 7:30'
if not int ( timeString [ 0 ] ) :
timeString = timeString [ 1 : ]
return timeString
|
def toimage ( self , width = None , height = None ) :
'''Return the current scene as a PIL Image .
* * Example * *
You can build your molecular viewer as usual and dump an image
at any resolution supported by the video card ( up to the
memory limits ) : :
v = QtViewer ( )
# Add the renderers
v . add _ renderer ( . . . )
# Add post processing effects
v . add _ post _ processing ( . . . )
# Move the camera
v . widget . camera . autozoom ( . . . )
v . widget . camera . orbit _ x ( . . . )
v . widget . camera . orbit _ y ( . . . )
# Save the image
image = v . widget . toimage ( 1024 , 768)
image . save ( " mol . png " )
. . seealso : :
https : / / pillow . readthedocs . org / en / latest / PIL . html # module - PIL . Image'''
|
from . postprocessing import NoEffect
effect = NoEffect ( self )
self . post_processing . append ( effect )
oldwidth , oldheight = self . width ( ) , self . height ( )
# self . initializeGL ( )
if None not in ( width , height ) :
self . resize ( width , height )
self . resizeGL ( width , height )
else :
width = self . width ( )
height = self . height ( )
self . paintGL ( )
self . post_processing . remove ( effect )
coltex = effect . texture
coltex . bind ( )
glActiveTexture ( GL_TEXTURE0 )
data = glGetTexImage ( GL_TEXTURE_2D , 0 , GL_RGBA , GL_UNSIGNED_BYTE )
image = pil_Image . frombuffer ( 'RGBA' , ( width , height ) , data , 'raw' , 'RGBA' , 0 , - 1 )
# self . resize ( oldwidth , oldheight )
# self . resizeGL ( oldwidth , oldheight )
return image
|
def check_errors ( self , is_global = False ) :
"""Checks for errors and exits if any of them are critical .
Args :
is _ global : If True , check the global _ errors attribute . If false , check the
error attribute ."""
|
errors = self . global_errors if is_global else self . errors
if errors :
print ( 'dfTimewolf encountered one or more errors:' )
for error , critical in errors :
print ( '{0:s} {1:s}' . format ( 'CRITICAL: ' if critical else '' , error ) )
if critical :
print ( 'Critical error found. Aborting.' )
sys . exit ( - 1 )
|
def export_lane_set ( process , lane_set , plane_element ) :
"""Creates ' laneSet ' element for exported BPMN XML file .
: param process : an XML element ( ' process ' ) , from exported BPMN 2.0 document ,
: param lane _ set : dictionary with exported ' laneSet ' element attributes and child elements ,
: param plane _ element : XML object , representing ' plane ' element of exported BPMN 2.0 XML ."""
|
lane_set_xml = eTree . SubElement ( process , consts . Consts . lane_set )
for key , value in lane_set [ consts . Consts . lanes ] . items ( ) :
BpmnDiagramGraphExport . export_lane ( lane_set_xml , key , value , plane_element )
|
def rvpl ( self , prompt , error = 'Entered value is invalid' , intro = None , validator = lambda x : x != '' , clean = lambda x : x . strip ( ) , strict = True , default = None ) :
"""Start a read - validate - print loop
The RVPL will read the user input , validate it , and loop until the
entered value passes the validation , then return it .
Error message can be customized using the ` ` error ` ` argument . If the
value is a callable , it will be called with the value and it will be
expected to return a printable message . Exceptions raised by the
` ` error ` ` function are not trapped .
When ` ` intro ` ` is passed , it is printed above the prompt .
The ` ` validator ` ` argument is is a function that validates the user
input . Default validator simply validates if user entered any value .
The ` ` clean ` ` argument specifies a function for the ` ` read ( ) ` ` method
with the same semantics ."""
|
if intro :
self . pstd ( utils . rewrap_long ( intro ) )
val = self . read ( prompt , clean )
while not validator ( val ) :
if not strict :
return default
if hasattr ( error , '__call__' ) :
self . perr ( error ( val ) )
else :
self . perr ( error )
val = self . read ( prompt , clean )
return val
|
def cmd_zip ( zip_file , sources , template = None , cwd = None , runas = None ) :
'''. . versionadded : : 2015.5.0
In versions 2014.7 . x and earlier , this function was known as
` ` archive . zip ` ` .
Uses the ` ` zip ` ` command to create zip files . This command is part of the
` Info - ZIP ` _ suite of tools , and is typically packaged as simply ` ` zip ` ` .
. . _ ` Info - ZIP ` : http : / / www . info - zip . org /
zip _ file
Path of zip file to be created
sources
Comma - separated list of sources to include in the zip file . Sources can
also be passed in a Python list .
. . versionchanged : : 2017.7.0
Globbing is now supported for this argument
template : None
Can be set to ' jinja ' or another supported template engine to render
the command arguments before execution :
. . code - block : : bash
salt ' * ' archive . cmd _ zip template = jinja / tmp / zipfile . zip / tmp / sourcefile1 , / tmp / { { grains . id } } . txt
cwd : None
Use this argument along with relative paths in ` ` sources ` ` to create
zip files which do not contain the leading directories . If not
specified , the zip file will be created as if the cwd was ` ` / ` ` , and
creating a zip file of ` ` / foo / bar / baz . txt ` ` will contain the parent
directories ` ` foo ` ` and ` ` bar ` ` . To create a zip file containing just
` ` baz . txt ` ` , the following command would be used :
. . code - block : : bash
salt ' * ' archive . cmd _ zip / tmp / baz . zip baz . txt cwd = / foo / bar
. . versionadded : : 2014.7.1
runas : None
Create the zip file as the specified user . Defaults to the user under
which the minion is running .
. . versionadded : : 2015.5.0
CLI Example :
. . code - block : : bash
salt ' * ' archive . cmd _ zip / tmp / zipfile . zip / tmp / sourcefile1 , / tmp / sourcefile2
# Globbing for sources ( 2017.7.0 and later )
salt ' * ' archive . cmd _ zip / tmp / zipfile . zip ' / tmp / sourcefile * ' '''
|
cmd = [ 'zip' , '-r' ]
cmd . append ( '{0}' . format ( zip_file ) )
cmd . extend ( _expand_sources ( sources ) )
return __salt__ [ 'cmd.run' ] ( cmd , cwd = cwd , template = template , runas = runas , python_shell = False ) . splitlines ( )
|
def set_alternative ( self , experiment_name , alternative ) :
"""Explicitly set the alternative the user is enrolled in for the specified experiment .
This allows you to change a user between alternatives . The user and goal counts for the new
alternative will be increment , but those for the old one will not be decremented . The user will
be enrolled in the experiment even if the experiment would not normally accept this user ."""
|
experiment = experiment_manager . get_experiment ( experiment_name )
if experiment :
self . _set_enrollment ( experiment , alternative )
|
def bbox_horz_aligned ( box1 , box2 ) :
"""Returns true if the vertical center point of either span is within the
vertical range of the other"""
|
if not ( box1 and box2 ) :
return False
# NEW : any overlap counts
# return box1 . top < = box2 . bottom and box2 . top < = box1 . bottom
box1_top = box1 . top + 1.5
box2_top = box2 . top + 1.5
box1_bottom = box1 . bottom - 1.5
box2_bottom = box2 . bottom - 1.5
return not ( box1_top > box2_bottom or box2_top > box1_bottom )
|
def match_path_against ( pathname , patterns , case_sensitive = True ) :
"""Determines whether the pathname matches any of the given wildcard patterns ,
optionally ignoring the case of the pathname and patterns .
: param pathname :
A path name that will be matched against a wildcard pattern .
: param patterns :
A list of wildcard patterns to match _ path the filename against .
: param case _ sensitive :
` ` True ` ` if the matching should be case - sensitive ; ` ` False ` ` otherwise .
: returns :
` ` True ` ` if the pattern matches ; ` ` False ` ` otherwise .
Doctests : :
> > > match _ path _ against ( " / home / username / foobar / blah . py " , [ " * . py " , " * . txt " ] , False )
True
> > > match _ path _ against ( " / home / username / foobar / blah . py " , [ " * . PY " , " * . txt " ] , True )
False
> > > match _ path _ against ( " / home / username / foobar / blah . py " , [ " * . PY " , " * . txt " ] , False )
True
> > > match _ path _ against ( " C : \\ windows \\ blah \\ BLAH . PY " , [ " * . py " , " * . txt " ] , True )
False
> > > match _ path _ against ( " C : \\ windows \\ blah \\ BLAH . PY " , [ " * . py " , " * . txt " ] , False )
True"""
|
if case_sensitive :
match_func = fnmatchcase
pattern_transform_func = ( lambda w : w )
else :
match_func = fnmatch
pathname = pathname . lower ( )
pattern_transform_func = _string_lower
for pattern in set ( patterns ) :
pattern = pattern_transform_func ( pattern )
if match_func ( pathname , pattern ) :
return True
return False
|
def list_by_group ( self , id_egroup ) :
"""Search Group Equipment from by the identifier .
: param id _ egroup : Identifier of the Group Equipment . Integer value and greater than zero .
: return : Dictionary with the following structure :
{ ' equipaments ' :
[ { ' nome ' : < name _ equipament > , ' grupos ' : < id _ group > ,
' mark ' : { ' id ' : < id _ mark > , ' nome ' : < name _ mark > } , ' modelo ' : < id _ model > ,
' tipo _ equipamento ' : < id _ type > ,
' model ' : { ' nome ' : , ' id ' : < id _ model > , ' marca ' : < id _ mark > } ,
' type ' : { id ' : < id _ type > , ' tipo _ equipamento ' : < name _ type > } ,
' id ' : < id _ equipment > } , . . . ] }
: raise InvalidParameterError : Group Equipment is null and invalid .
: raise GrupoEquipamentoNaoExisteError : Group Equipment not registered .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if id_egroup is None :
raise InvalidParameterError ( u'The identifier of Group Equipament is invalid or was not informed.' )
url = 'equipment/group/' + str ( id_egroup ) + '/'
code , xml = self . submit ( None , 'GET' , url )
return self . response ( code , xml )
|
def select ( self , ** kws ) :
'''Find all servers with indicated protocol support . Shuffled .
Filter by TOR support , and pruning level .'''
|
lst = [ i for i in self . values ( ) if i . select ( ** kws ) ]
random . shuffle ( lst )
return lst
|
def filter ( self , value , model = None , context = None ) :
"""Filter
Performs value filtering and returns filtered result .
: param value : input value
: param model : parent model being validated
: param context : object , filtering context
: return : filtered value"""
|
value = str ( value )
return value . upper ( )
|
def constructor ( self , name = None , function = None , return_type = None , arg_types = None , header_dir = None , header_file = None , recursive = None ) :
"""returns reference to constructor declaration , that is matched
defined criteria"""
|
return ( self . _find_single ( self . _impl_matchers [ scopedef_t . constructor ] , name = name , function = function , decl_type = self . _impl_decl_types [ scopedef_t . constructor ] , return_type = return_type , arg_types = arg_types , header_dir = header_dir , header_file = header_file , recursive = recursive ) )
|
def get_int_relative ( strings : Sequence [ str ] , prefix1 : str , delta : int , prefix2 : str , ignoreleadingcolon : bool = False ) -> Optional [ int ] :
"""Fetches an int parameter via : func : ` get _ string _ relative ` ."""
|
return get_int_raw ( get_string_relative ( strings , prefix1 , delta , prefix2 , ignoreleadingcolon = ignoreleadingcolon ) )
|
def box ( df , s = None , title_from = None , subplots = False , figsize = ( 18 , 6 ) , groups = None , fcol = None , ecol = None , hatch = None , ylabel = "" , xlabel = "" ) :
"""Generate a box plot from pandas DataFrame with sample grouping .
Plot group mean , median and deviations for specific values ( proteins ) in the dataset . Plotting is controlled via
the ` s ` param , which is used as a search string along the y - axis . All matching values will be returned and plotted .
Multiple search values can be provided as a ` list ` of ` str ` and these will be searched as an ` and ` query .
Box fill and edge colors can be controlled on a full - index basis by passing a ` dict ` of indexer : color to
` fcol ` and ` ecol ` respectively . Box hatching can be controlled by passing a ` dict ` of indexer : hatch to ` hatch ` .
: param df : Pandas ` DataFrame `
: param s : ` str ` search y - axis for matching values ( case - insensitive )
: param title _ from : ` list ` of ` str ` of index levels to generate title from
: param subplots : ` bool ` use subplots to separate plot groups
: param figsize : ` tuple ` of ` int ` size of resulting figure
: param groups :
: param fcol : ` dict ` of ` str ` indexer : color where color is hex value or matplotlib color code
: param ecol : ` dict ` of ` str ` indexer : color where color is hex value or matplotlib color code
: param hatch : ` dict ` of ` str ` indexer : hatch where hatch is matplotlib hatch descriptor
: param ylabel : ` str ` ylabel for boxplot
: param xlabel : ` str ` xlabel for boxplot
: return : ` list ` of ` Figure `"""
|
df = df . copy ( )
if type ( s ) == str :
s = [ s ]
if title_from is None :
title_from = list ( df . index . names )
if groups is None :
groups = list ( set ( df . columns . get_level_values ( 0 ) ) )
# Build the combined name / info string using label _ from ; replace the index
title_idxs = get_index_list ( df . index . names , title_from )
df . index = [ build_combined_label ( r , title_idxs ) for r in df . index . values ]
if s : # Filter the table on the match string ( s )
df = df . iloc [ [ all ( [ str ( si ) . lower ( ) in l . lower ( ) for si in s ] ) for l in df . index . values ] ]
figures = [ ]
# Iterate each matching row , building the correct structure dataframe
for ix in range ( df . shape [ 0 ] ) :
dfi = pd . DataFrame ( df . iloc [ ix ] ) . T
label = dfi . index . values [ 0 ]
dfi = process . fold_columns_to_rows ( dfi , levels_from = len ( df . columns . names ) - 1 )
if subplots :
gs = gridspec . GridSpec ( 1 , len ( subplots ) , width_ratios = [ dfi [ sp ] . shape [ 1 ] for sp in subplots ] )
subplotl = subplots
elif isinstance ( dfi . columns , pd . MultiIndex ) and len ( dfi . columns . levels ) > 1 :
subplotl = dfi . columns . levels [ 0 ]
gs = gridspec . GridSpec ( 1 , len ( subplotl ) , width_ratios = [ dfi [ sp ] . shape [ 1 ] for sp in subplotl ] )
else : # Subplots
subplotl = [ None ]
gs = gridspec . GridSpec ( 1 , 1 )
first_ax = None
fig = plt . figure ( figsize = figsize )
for n , sp in enumerate ( subplotl ) :
if sp is None :
dfp = dfi
else :
dfp = dfi [ sp ]
ax = fig . add_subplot ( gs [ n ] , sharey = first_ax )
# print ( dfp . median ( axis = 1 , level = 0 ) . reset _ index ( ) )
medians = dfp . median ( axis = 1 , level = 0 ) . reset_index ( )
# . set _ index ( ' Replicate ' ) # . dropna ( axis = 1)
if groups and all ( [ g in medians . columns . get_level_values ( 0 ) for g in groups ] ) :
medians = medians [ groups ]
ax , dic = medians . plot ( kind = 'box' , return_type = 'both' , patch_artist = True , ax = ax , )
ax . set_xlabel ( '' )
for n , c in enumerate ( medians . columns . values ) :
if sp is None :
hier = [ ]
else :
hier = [ sp ]
if type ( c ) == tuple :
hier . extend ( c )
else :
hier . append ( c )
if fcol :
color = hierarchical_match ( fcol , hier , None )
if color :
dic [ 'boxes' ] [ n ] . set_color ( color )
if ecol :
color = hierarchical_match ( ecol , hier , None )
if color :
dic [ 'boxes' ] [ n ] . set_edgecolor ( color )
if hatch :
dic [ 'boxes' ] [ n ] . set_hatch ( hierarchical_match ( hatch , hier , '' ) )
ax . set_xlabel ( xlabel )
ax . tick_params ( axis = 'both' , which = 'major' , labelsize = 12 )
if first_ax is None :
first_ax = ax
else :
for yl in ax . get_yticklabels ( ) :
yl . set_visible ( False )
first_ax . set_ylabel ( ylabel , fontsize = 14 )
fig . subplots_adjust ( wspace = 0.05 )
fig . suptitle ( label )
figures . append ( fig )
return figures
|
async def _receive_reconfig_param ( self , param ) :
"""Handle a RE - CONFIG parameter ."""
|
self . __log_debug ( '<< %s' , param )
if isinstance ( param , StreamResetOutgoingParam ) : # mark closed inbound streams
for stream_id in param . streams :
self . _inbound_streams . pop ( stream_id , None )
# close data channel
channel = self . _data_channels . get ( stream_id )
if channel :
self . _data_channel_close ( channel )
# send response
response_param = StreamResetResponseParam ( response_sequence = param . request_sequence , result = 1 )
self . _reconfig_response_seq = param . request_sequence
await self . _send_reconfig_param ( response_param )
elif isinstance ( param , StreamAddOutgoingParam ) : # increase inbound streams
self . _inbound_streams_count += param . new_streams
# send response
response_param = StreamResetResponseParam ( response_sequence = param . request_sequence , result = 1 )
self . _reconfig_response_seq = param . request_sequence
await self . _send_reconfig_param ( response_param )
elif isinstance ( param , StreamResetResponseParam ) :
if ( self . _reconfig_request and param . response_sequence == self . _reconfig_request . request_sequence ) : # mark closed streams
for stream_id in self . _reconfig_request . streams :
self . _outbound_stream_seq . pop ( stream_id , None )
self . _data_channel_closed ( stream_id )
self . _reconfig_request = None
await self . _transmit_reconfig ( )
|
def volumes ( val , ** kwargs ) : # pylint : disable = unused - argument
'''Should be a list of absolute paths'''
|
val = helpers . translate_stringlist ( val )
for item in val :
if not os . path . isabs ( item ) :
raise SaltInvocationError ( '\'{0}\' is not an absolute path' . format ( item ) )
return val
|
def start ( self , measurementId , durationInSeconds = None ) :
"""Initialises the device if required then enters a read loop taking data from the provider and passing it to the
handler . It will continue until either breakRead is true or the duration ( if provided ) has passed .
: return :"""
|
logger . info ( ">> measurement " + measurementId + ( ( " for " + str ( durationInSeconds ) ) if durationInSeconds is not None else " until break" ) )
self . failureCode = None
self . measurementOverflowed = False
self . dataHandler . start ( measurementId )
self . breakRead = False
self . startTime = time . time ( )
self . doInit ( )
# this must follow doInit because doInit sets status to INITIALISED
self . status = RecordingDeviceStatus . RECORDING
elapsedTime = 0
try :
self . _sampleIdx = 0
while True :
logger . debug ( measurementId + " provideData " )
self . dataHandler . handle ( self . provideData ( ) )
elapsedTime = time . time ( ) - self . startTime
if self . breakRead or durationInSeconds is not None and elapsedTime > durationInSeconds :
logger . debug ( measurementId + " breaking provideData" )
self . startTime = 0
break
except :
self . status = RecordingDeviceStatus . FAILED
self . failureCode = str ( sys . exc_info ( ) )
logger . exception ( measurementId + " failed" )
finally :
expectedSamples = self . fs * ( durationInSeconds if durationInSeconds is not None else elapsedTime )
if self . _sampleIdx < expectedSamples :
self . status = RecordingDeviceStatus . FAILED
self . failureCode = "Insufficient samples " + str ( self . _sampleIdx ) + " for " + str ( elapsedTime ) + " second long run, expected " + str ( expectedSamples )
self . _sampleIdx = 0
if self . measurementOverflowed :
self . status = RecordingDeviceStatus . FAILED
self . failureCode = "Measurement overflow detected"
if self . status == RecordingDeviceStatus . FAILED :
logger . error ( "<< measurement " + measurementId + " - FAILED - " + self . failureCode )
else :
self . status = RecordingDeviceStatus . INITIALISED
logger . info ( "<< measurement " + measurementId + " - " + self . status . name )
self . dataHandler . stop ( measurementId , self . failureCode )
if self . status == RecordingDeviceStatus . FAILED :
logger . warning ( "Reinitialising device after measurement failure" )
self . doInit ( )
|
def _GetNormalizedTimestamp ( self ) :
"""Retrieves the normalized timestamp .
Returns :
decimal . Decimal : normalized timestamp , which contains the number of
seconds since January 1 , 1970 00:00:00 and a fraction of second used
for increased precision , or None if the normalized timestamp cannot be
determined ."""
|
if self . _normalized_timestamp is None :
if ( self . _timestamp is not None and self . _timestamp >= 0 and self . _timestamp <= self . _UINT32_MAX ) :
self . _normalized_timestamp = ( decimal . Decimal ( self . _timestamp ) - self . _HFS_TO_POSIX_BASE )
return self . _normalized_timestamp
|
def app_run ( app_name_or_id , alias = None , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / app - xxxx / run API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Apps # API - method : - / app - xxxx % 5B / yyyy % 5D / run"""
|
input_params_cp = Nonce . update_nonce ( input_params )
fully_qualified_version = app_name_or_id + ( ( '/' + alias ) if alias else '' )
return DXHTTPRequest ( '/%s/run' % fully_qualified_version , input_params_cp , always_retry = always_retry , ** kwargs )
|
def run ( self ) :
"""Starts Pyro naming server with command line arguments ( see
pyro documentation )"""
|
args = [ ]
for arg in self . args :
args . append ( arg )
Args = Pyro . util . ArgParser ( )
Args . parse ( args , 'hkmrvxn:p:b:c:d:s:i:1:2:' )
hostname = Args . getOpt ( 'n' , None )
identification = Args . getOpt ( 'i' , None )
port = None
useNameServer = True
if port :
port = int ( port )
norange = ( port == 0 )
self . starter = Pyro . EventService . Server . EventServiceStarter ( identification = identification )
self . starter . start ( hostname , port , useNameServer = useNameServer , norange = norange )
|
def create_view ( self , dataset , view , query , use_legacy_sql = None , project_id = None ) :
"""Create a new view in the dataset .
Parameters
dataset : str
The dataset to create the view in
view : str
The name of the view to create
query : dict
A query that BigQuery executes when the view is referenced .
use _ legacy _ sql : bool , optional
If False , the query will use BigQuery ' s standard SQL
( https : / / cloud . google . com / bigquery / sql - reference / )
project _ id : str , optional
The project to create the view in
Returns
Union [ bool , dict ]
bool indicating if the view was successfully created or not ,
or response from BigQuery if swallow _ results is set to False ."""
|
project_id = self . _get_project_id ( project_id )
body = { 'tableReference' : { 'tableId' : view , 'projectId' : project_id , 'datasetId' : dataset } , 'view' : { 'query' : query } }
if use_legacy_sql is not None :
body [ 'view' ] [ 'useLegacySql' ] = use_legacy_sql
try :
view = self . bigquery . tables ( ) . insert ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries )
if self . swallow_results :
return True
else :
return view
except HttpError as e :
logger . error ( ( 'Cannot create view {0}.{1}\n' 'Http Error: {2}' ) . format ( dataset , view , e . content ) )
if self . swallow_results :
return False
else :
return { }
|
def _add_parameters ( self , parameter_map , parameter_list ) :
"""Populates the given parameter map with the list of parameters provided , resolving any reference objects encountered .
Args :
parameter _ map : mapping from parameter names to parameter objects
parameter _ list : list of either parameter objects or reference objects"""
|
for parameter in parameter_list :
if parameter . get ( '$ref' ) : # expand parameter from $ ref if not specified inline
parameter = self . specification [ 'parameters' ] . get ( parameter . get ( '$ref' ) . split ( '/' ) [ - 1 ] )
parameter_map [ parameter [ 'name' ] ] = parameter
|
def i2osp ( self , long_integer , block_size ) :
'Convert a long integer into an octet string .'
|
hex_string = '%X' % long_integer
if len ( hex_string ) > 2 * block_size :
raise ValueError ( 'integer %i too large to encode in %i octets' % ( long_integer , block_size ) )
return a2b_hex ( hex_string . zfill ( 2 * block_size ) )
|
def process_text ( text , citation = None , offline = False , output_fname = default_output_fname , timeout = None ) :
"""Return a ReachProcessor by processing the given text .
Parameters
text : str
The text to be processed .
citation : Optional [ str ]
A PubMed ID passed to be used in the evidence for the extracted INDRA
Statements . This is used when the text to be processed comes from
a publication that is not otherwise identified . Default : None
offline : Optional [ bool ]
If set to True , the REACH system is ran offline . Otherwise ( by default )
the web service is called . Default : False
output _ fname : Optional [ str ]
The file to output the REACH JSON output to .
Defaults to reach _ output . json in current working directory .
timeout : Optional [ float ]
This only applies when reading online ( ` offline = False ` ) . Only wait for
` timeout ` seconds for the api to respond .
Returns
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp . statements ."""
|
if offline :
if not try_offline :
logger . error ( 'Offline reading is not available.' )
return None
try :
api_ruler = reach_reader . get_api_ruler ( )
except ReachOfflineReadingError as e :
logger . error ( e )
logger . error ( 'Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.' )
return None
try :
result_map = api_ruler . annotateText ( text , 'fries' )
except JavaException as e :
logger . error ( 'Could not process text.' )
logger . error ( e )
return None
# REACH version < 1.3.3
json_str = result_map . get ( 'resultJson' )
if not json_str : # REACH version > = 1.3.3
json_str = result_map . get ( 'result' )
if not isinstance ( json_str , bytes ) :
json_str = json_str . encode ( 'utf-8' )
else :
data = { 'text' : text . encode ( 'utf-8' ) }
try :
res = requests . post ( reach_text_url , data , timeout = timeout )
except requests . exceptions . RequestException as e :
logger . error ( 'Could not connect to REACH service:' )
logger . error ( e )
return None
# TODO : we could use res . json ( ) here to get a dict
# directly
# This is a byte string
json_str = res . content
if not isinstance ( json_str , bytes ) :
raise TypeError ( '{} is {} instead of {}' . format ( json_str , json_str . __class__ , bytes ) )
with open ( output_fname , 'wb' ) as fh :
fh . write ( json_str )
return process_json_str ( json_str . decode ( 'utf-8' ) , citation )
|
def is_functional_group ( self , atom , group ) :
"""Given a pybel atom , look up if it belongs to a function group"""
|
n_atoms = [ a_neighbor . GetAtomicNum ( ) for a_neighbor in pybel . ob . OBAtomAtomIter ( atom . OBAtom ) ]
if group in [ 'quartamine' , 'tertamine' ] and atom . atomicnum == 7 : # Nitrogen
# It ' s a nitrogen , so could be a protonated amine or quaternary ammonium
if '1' not in n_atoms and len ( n_atoms ) == 4 :
return True if group == 'quartamine' else False
# It ' s a quat . ammonium ( N with 4 residues ! = H )
elif atom . OBAtom . GetHyb ( ) == 3 and len ( n_atoms ) >= 3 :
return True if group == 'tertamine' else False
# It ' s sp3 - hybridized , so could pick up an hydrogen
else :
return False
if group in [ 'sulfonium' , 'sulfonicacid' , 'sulfate' ] and atom . atomicnum == 16 : # Sulfur
if '1' not in n_atoms and len ( n_atoms ) == 3 : # It ' s a sulfonium ( S with 3 residues ! = H )
return True if group == 'sulfonium' else False
elif n_atoms . count ( 8 ) == 3 : # It ' s a sulfonate or sulfonic acid
return True if group == 'sulfonicacid' else False
elif n_atoms . count ( 8 ) == 4 : # It ' s a sulfate
return True if group == 'sulfate' else False
if group == 'phosphate' and atom . atomicnum == 15 : # Phosphor
if set ( n_atoms ) == { 8 } : # It ' s a phosphate
return True
if group in [ 'carboxylate' , 'guanidine' ] and atom . atomicnum == 6 : # It ' s a carbon atom
if n_atoms . count ( 8 ) == 2 and n_atoms . count ( 6 ) == 1 : # It ' s a carboxylate group
return True if group == 'carboxylate' else False
elif n_atoms . count ( 7 ) == 3 and len ( n_atoms ) == 3 : # It ' s a guanidine group
nitro_partners = [ ]
for nitro in pybel . ob . OBAtomAtomIter ( atom . OBAtom ) :
nitro_partners . append ( len ( [ b_neighbor for b_neighbor in pybel . ob . OBAtomAtomIter ( nitro ) ] ) )
if min ( nitro_partners ) == 1 : # One nitrogen is only connected to the carbon , can pick up a H
return True if group == 'guanidine' else False
if group == 'halocarbon' and atom . atomicnum in [ 9 , 17 , 35 , 53 ] : # Halogen atoms
n_atoms = [ na for na in pybel . ob . OBAtomAtomIter ( atom . OBAtom ) if na . GetAtomicNum ( ) == 6 ]
if len ( n_atoms ) == 1 : # Halocarbon
return True
else :
return False
|
def decode_safely ( self , encoded_data ) :
"""Inverse for the ` encode _ safely ` function ."""
|
decoder = self . base_decoder
result = settings . null
try :
result = pickle . loads ( decoder ( encoded_data ) )
except :
warnings . warn ( "Could not load and deserialize the data." , RuntimeWarning )
return result
|
def attach ( self , image_in , sampler = None , show = True ) :
"""Attaches the relevant cross - sections to each axis .
Parameters
attach _ image : ndarray
The image to be attached to the collage , once it is created .
Must be atleast 3d .
sampler : str or list or callable
selection strategy : to identify the type of sampling done to select the slices to return .
All sampling is done between the first and last non - empty slice in that view / dimension .
- if ' linear ' : linearly spaced slices
- if list , it is treated as set of percentages at which slices to be sampled
( must be in the range of [ 1-100 ] , not [ 0-1 ] ) .
This could be used to more / all slices in the middle e . g . range ( 40 , 60 , 5)
or at the end e . g . [ 5 , 10 , 15 , 85 , 90 , 95]
- if callable , it must take a 2D image of arbitray size , return True / False
to indicate whether to select that slice or not .
Only non - empty slices ( atleas one non - zero voxel ) are provided as input .
Simple examples for callable could be based on
1 ) percentage of non - zero voxels > x etc
2 ) presence of desired texture ?
3 ) certain properties of distribution ( skewe : dark / bright , energy etc ) etc
If the sampler returns more than requested ` num _ slices ` ,
only the first num _ slices will be selected .
show : bool
Flag to request immediate display of collage"""
|
if len ( image_in . shape ) < 3 :
raise ValueError ( 'Image must be atleast 3D' )
# allowing the choice of new sampling for different invocations .
if sampler is None :
temp_sampler = self . sampler
else :
temp_sampler = sampler
slicer = SlicePicker ( image_in = image_in , view_set = self . view_set , num_slices = self . num_slices , sampler = temp_sampler )
try :
for img_obj , slice_data in zip ( self . images , slicer . get_slices ( ) ) :
img_obj . set_data ( slice_data )
except :
self . _data_attached = False
raise ValueError ( 'unable to attach the given image data to current collage' )
else :
self . _data_attached = True
# show all the axes
if show :
self . show ( )
|
def binary_operator ( op ) :
"""Factory function for making binary operator methods on a Filter subclass .
Returns a function " binary _ operator " suitable for implementing functions
like _ _ and _ _ or _ _ or _ _ ."""
|
# When combining a Filter with a NumericalExpression , we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator .
commuted_method_getter = attrgetter ( method_name_for_op ( op , commute = True ) )
def binary_operator ( self , other ) :
if isinstance ( self , NumericalExpression ) :
self_expr , other_expr , new_inputs = self . build_binary_op ( op , other , )
return NumExprFilter . create ( "({left}) {op} ({right})" . format ( left = self_expr , op = op , right = other_expr , ) , new_inputs , )
elif isinstance ( other , NumericalExpression ) : # NumericalExpression overrides numerical ops to correctly handle
# merging of inputs . Look up and call the appropriate
# right - binding operator with ourself as the input .
return commuted_method_getter ( other ) ( self )
elif isinstance ( other , Term ) :
if other . dtype != bool_dtype :
raise BadBinaryOperator ( op , self , other )
if self is other :
return NumExprFilter . create ( "x_0 {op} x_0" . format ( op = op ) , ( self , ) , )
return NumExprFilter . create ( "x_0 {op} x_1" . format ( op = op ) , ( self , other ) , )
elif isinstance ( other , int ) : # Note that this is true for bool as well
return NumExprFilter . create ( "x_0 {op} {constant}" . format ( op = op , constant = int ( other ) ) , binds = ( self , ) , )
raise BadBinaryOperator ( op , self , other )
binary_operator . __doc__ = "Binary Operator: '%s'" % op
return binary_operator
|
def simplify ( graph ) :
"""Simplify the CFG by merging / deleting statement nodes when possible :
If statement B follows statement A and if B has no other predecessor
besides A , then we can merge A and B into a new statement node .
We also remove nodes which do nothing except redirecting the control
flow ( nodes which only contains a goto ) ."""
|
redo = True
while redo :
redo = False
node_map = { }
to_update = set ( )
for node in graph . nodes [ : ] :
if node . type . is_stmt and node in graph :
sucs = graph . all_sucs ( node )
if len ( sucs ) != 1 :
continue
suc = sucs [ 0 ]
if len ( node . get_ins ( ) ) == 0 :
if any ( pred . type . is_switch for pred in graph . all_preds ( node ) ) :
continue
if node is suc :
continue
node_map [ node ] = suc
for pred in graph . all_preds ( node ) :
pred . update_attribute_with ( node_map )
if node not in graph . sucs ( pred ) :
graph . add_catch_edge ( pred , suc )
continue
graph . add_edge ( pred , suc )
redo = True
if node is graph . entry :
graph . entry = suc
graph . remove_node ( node )
elif ( suc . type . is_stmt and len ( graph . all_preds ( suc ) ) == 1 and not ( suc in graph . catch_edges ) and not ( ( node is suc ) or ( suc is graph . entry ) ) ) :
ins_to_merge = suc . get_ins ( )
node . add_ins ( ins_to_merge )
for var in suc . var_to_declare :
node . add_variable_declaration ( var )
new_suc = graph . sucs ( suc ) [ 0 ]
if new_suc :
graph . add_edge ( node , new_suc )
for exception_suc in graph . catch_edges . get ( suc , [ ] ) :
graph . add_catch_edge ( node , exception_suc )
redo = True
graph . remove_node ( suc )
else :
to_update . add ( node )
for node in to_update :
node . update_attribute_with ( node_map )
|
def main ( vocab_path : str , elmo_config_path : str , elmo_weights_path : str , output_dir : str , batch_size : int , device : int , use_custom_oov_token : bool = False ) :
"""Creates ELMo word representations from a vocabulary file . These
word representations are _ independent _ - they are the result of running
the CNN and Highway layers of the ELMo model , but not the Bidirectional LSTM .
ELMo requires 2 additional tokens : < S > and < / S > . The first token
in this file is assumed to be an unknown token .
This script produces two artifacts : A new vocabulary file
with the < S > and < / S > tokens inserted and a glove formatted embedding
file containing word : vector pairs , one per line , with all values
separated by a space ."""
|
# Load the vocabulary words and convert to char ids
with open ( vocab_path , 'r' ) as vocab_file :
tokens = vocab_file . read ( ) . strip ( ) . split ( '\n' )
# Insert the sentence boundary tokens which elmo uses at positions 1 and 2.
if tokens [ 0 ] != DEFAULT_OOV_TOKEN and not use_custom_oov_token :
raise ConfigurationError ( "ELMo embeddings require the use of a OOV token." )
tokens = [ tokens [ 0 ] ] + [ "<S>" , "</S>" ] + tokens [ 1 : ]
indexer = ELMoTokenCharactersIndexer ( )
indices = indexer . tokens_to_indices ( [ Token ( token ) for token in tokens ] , Vocabulary ( ) , "indices" ) [ "indices" ]
sentences = [ ]
for k in range ( ( len ( indices ) // 50 ) + 1 ) :
sentences . append ( indexer . pad_token_sequence ( indices [ ( k * 50 ) : ( ( k + 1 ) * 50 ) ] , desired_num_tokens = 50 , padding_lengths = { } ) )
last_batch_remainder = 50 - ( len ( indices ) % 50 )
if device != - 1 :
elmo_token_embedder = _ElmoCharacterEncoder ( elmo_config_path , elmo_weights_path ) . cuda ( device )
else :
elmo_token_embedder = _ElmoCharacterEncoder ( elmo_config_path , elmo_weights_path )
all_embeddings = [ ]
for i in range ( ( len ( sentences ) // batch_size ) + 1 ) :
array = numpy . array ( sentences [ i * batch_size : ( i + 1 ) * batch_size ] )
if device != - 1 :
batch = torch . from_numpy ( array ) . cuda ( device )
else :
batch = torch . from_numpy ( array )
token_embedding = elmo_token_embedder ( batch ) [ 'token_embedding' ] . data
# Reshape back to a list of words of shape ( batch _ size * 50 , encoding _ dim )
# We also need to remove the < S > , < / S > tokens appended by the encoder .
per_word_embeddings = token_embedding [ : , 1 : - 1 , : ] . contiguous ( ) . view ( - 1 , token_embedding . size ( - 1 ) )
all_embeddings . append ( per_word_embeddings )
# Remove the embeddings associated with padding in the last batch .
all_embeddings [ - 1 ] = all_embeddings [ - 1 ] [ : - last_batch_remainder , : ]
embedding_weight = torch . cat ( all_embeddings , 0 ) . cpu ( ) . numpy ( )
# Write out the embedding in a glove format .
os . makedirs ( output_dir , exist_ok = True )
with gzip . open ( os . path . join ( output_dir , "elmo_embeddings.txt.gz" ) , 'wb' ) as embeddings_file :
for i , word in enumerate ( tokens ) :
string_array = " " . join ( [ str ( x ) for x in list ( embedding_weight [ i , : ] ) ] )
embeddings_file . write ( f"{word} {string_array}\n" . encode ( 'utf-8' ) )
# Write out the new vocab with the < S > and < / S > tokens .
_ , vocab_file_name = os . path . split ( vocab_path )
with open ( os . path . join ( output_dir , vocab_file_name ) , "w" ) as new_vocab_file :
for word in tokens :
new_vocab_file . write ( f"{word}\n" )
|
def query ( self , table , hash_key , range_key_condition = None , attributes_to_get = None , request_limit = None , max_results = None , consistent_read = False , scan_index_forward = True , exclusive_start_key = None , item_class = Item ) :
"""Perform a query on the table .
: type table : : class : ` boto . dynamodb . table . Table `
: param table : The Table object that is being queried .
: type hash _ key : int | long | float | str | unicode
: param hash _ key : The HashKey of the requested item . The
type of the value must match the type defined in the
schema for the table .
: type range _ key _ condition : dict
: param range _ key _ condition : A dict where the key is either
a scalar value appropriate for the RangeKey in the schema
of the database or a tuple of such values . The value
associated with this key in the dict will be one of the
following conditions :
' EQ ' | ' LE ' | ' LT ' | ' GE ' | ' GT ' | ' BEGINS _ WITH ' | ' BETWEEN '
The only condition which expects or will accept a tuple
of values is ' BETWEEN ' , otherwise a scalar value should
be used as the key in the dict .
: type attributes _ to _ get : list
: param attributes _ to _ get : A list of attribute names .
If supplied , only the specified attribute names will
be returned . Otherwise , all attributes will be returned .
: type request _ limit : int
: param request _ limit : The maximum number of items to retrieve
from Amazon DynamoDB on each request . You may want to set
a specific request _ limit based on the provisioned throughput
of your table . The default behavior is to retrieve as many
results as possible per request .
: type max _ results : int
: param max _ results : The maximum number of results that will
be retrieved from Amazon DynamoDB in total . For example ,
if you only wanted to see the first 100 results from the
query , regardless of how many were actually available , you
could set max _ results to 100 and the generator returned
from the query method will only yeild 100 results max .
: type consistent _ read : bool
: param consistent _ read : If True , a consistent read
request is issued . Otherwise , an eventually consistent
request is issued .
: type scan _ index _ forward : bool
: param scan _ index _ forward : Specified forward or backward
traversal of the index . Default is forward ( True ) .
: type exclusive _ start _ key : list or tuple
: param exclusive _ start _ key : Primary key of the item from
which to continue an earlier query . This would be
provided as the LastEvaluatedKey in that query .
: type item _ class : Class
: param item _ class : Allows you to override the class used
to generate the items . This should be a subclass of
: class : ` boto . dynamodb . item . Item `
: rtype : generator"""
|
rkc = self . dynamize_range_key_condition ( range_key_condition )
response = True
n = 0
while response :
if response is True :
pass
elif response . has_key ( "LastEvaluatedKey" ) :
lek = response [ 'LastEvaluatedKey' ]
exclusive_start_key = self . dynamize_last_evaluated_key ( lek )
else :
break
response = self . layer1 . query ( table . name , self . dynamize_value ( hash_key ) , rkc , attributes_to_get , request_limit , consistent_read , scan_index_forward , exclusive_start_key , object_hook = item_object_hook )
for item in response [ 'Items' ] :
if max_results and n == max_results :
break
yield item_class ( table , attrs = item )
n += 1
|
def add_axes_at_origin ( self , loc = None ) :
"""Add axes actor at the origin of a render window .
Parameters
loc : int , tuple , or list
Index of the renderer to add the actor to . For example ,
` ` loc = 2 ` ` or ` ` loc = ( 1 , 1 ) ` ` . When None , defaults to the
active render window .
Returns
marker _ actor : vtk . vtkAxesActor
vtkAxesActor actor"""
|
self . _active_renderer_index = self . loc_to_index ( loc )
return self . renderers [ self . _active_renderer_index ] . add_axes_at_origin ( )
|
def __update_in_toto_layout_pubkeys ( self ) :
'''NOTE : We assume that all the public keys needed to verify any in - toto
root layout , or sublayout , metadata file has been directly signed by
the top - level TUF targets role using * OFFLINE * keys . This is a
reasonable assumption , as TUF does not offer meaningful security
guarantees if _ ALL _ targets were signed using _ online _ keys .'''
|
target_relpaths = [ ]
targets = self . __updater . targets_of_role ( 'targets' )
for target in targets :
target_relpath = target [ 'filepath' ]
# Download this target only if it _ looks _ like a public key .
if target_relpath . endswith ( '.pub' ) : # NOTE : Avoid recursively downloading in - toto metadata for
# in - toto root layout pubkeys themselves , and so on ad
# infinitum .
self . __get_target ( target_relpath , download_in_toto_metadata = False )
target_relpaths . append ( target_relpath )
return target_relpaths
|
def update_lbaas_member ( self , lbaas_member , lbaas_pool , body = None ) :
"""Updates a lbaas _ member ."""
|
return self . put ( self . lbaas_member_path % ( lbaas_pool , lbaas_member ) , body = body )
|
def failedToGetPerspective ( self , why , broker ) :
"""The login process failed , most likely because of an authorization
failure ( bad password ) , but it is also possible that we lost the new
connection before we managed to send our credentials ."""
|
log . msg ( "ReconnectingPBClientFactory.failedToGetPerspective" )
# put something useful in the logs
if why . check ( pb . PBConnectionLost ) :
log . msg ( "we lost the brand-new connection" )
# fall through
elif why . check ( error . UnauthorizedLogin ) :
log . msg ( "unauthorized login; check worker name and password" )
# fall through
else :
log . err ( why , 'While trying to connect:' )
reactor . stop ( )
return
# lose the current connection , which will trigger a retry
broker . transport . loseConnection ( )
|
def add_edge ( self , edge ) :
"""Adds Edges to the probnet dict .
Parameters
edge : < Element Link at Links Node in XML >
etree Element consisting Variable tag .
Examples
> > > reader = ProbModelXMLReader ( )
> > > reader . add _ edge ( edge )"""
|
var1 = edge . findall ( 'Variable' ) [ 0 ] . attrib [ 'name' ]
var2 = edge . findall ( 'Variable' ) [ 1 ] . attrib [ 'name' ]
self . probnet [ 'edges' ] [ ( var1 , var2 ) ] = { }
self . probnet [ 'edges' ] [ ( var1 , var2 ) ] [ 'directed' ] = edge . attrib [ 'directed' ]
# TODO : check for the case of undirected graphs if we need to add to both elements of the dic for a single edge .
if edge . find ( 'Comment' ) is not None :
self . probnet [ 'edges' ] [ ( var1 , var2 ) ] [ 'Comment' ] = edge . find ( 'Comment' ) . text
if edge . find ( 'Label' ) is not None :
self . probnet [ 'edges' ] [ ( var1 , var2 ) ] [ 'Label' ] = edge . find ( 'Label' ) . text
if edge . find ( 'AdditionalProperties/Property' ) is not None :
self . probnet [ 'edges' ] [ ( var1 , var2 ) ] [ 'AdditionalProperties' ] = { }
for prop in edge . findall ( 'AdditionalProperties/Property' ) :
self . probnet [ 'edges' ] [ ( var1 , var2 ) ] [ 'AdditionalProperties' ] [ prop . attrib [ 'name' ] ] = prop . attrib [ 'value' ]
|
def run_forever ( self , * args , ** kwargs ) :
""": type relax : float
: param relax : seconds between each : meth : ` . getUpdates `
: type offset : int
: param offset :
initial ` ` offset ` ` parameter supplied to : meth : ` . getUpdates `
: type timeout : int
: param timeout :
` ` timeout ` ` parameter supplied to : meth : ` . getUpdates ` , controlling
how long to poll .
: type allowed _ updates : array of string
: param allowed _ updates :
` ` allowed _ updates ` ` parameter supplied to : meth : ` . getUpdates ` ,
controlling which types of updates to receive .
Calling this method will block forever . Use : meth : ` . run _ as _ thread ` to
run it non - blockingly ."""
|
collectloop = CollectLoop ( self . _handle )
updatesloop = GetUpdatesLoop ( self . _bot , lambda update : collectloop . input_queue . put ( _extract_message ( update ) [ 1 ] ) )
# feed messages to collect loop
# feed events to collect loop
self . _bot . scheduler . on_event ( collectloop . input_queue . put )
self . _bot . scheduler . run_as_thread ( )
updatesloop . run_as_thread ( * args , ** kwargs )
collectloop . run_forever ( )
|
def getInitialArguments ( self ) :
"""Include L { organizer } ' s C { storeOwnerPerson } ' s name , and the name of
L { initialPerson } and the value of L { initialState } , if they are set ."""
|
initialArguments = ( self . organizer . storeOwnerPerson . name , )
if self . initialPerson is not None :
initialArguments += ( self . initialPerson . name , self . initialState )
return initialArguments
|
def permute_iter ( elements ) :
"""iterator : returns a perumation by each call ."""
|
if len ( elements ) <= 1 :
yield elements
else :
for perm in permute_iter ( elements [ 1 : ] ) :
for i in range ( len ( elements ) ) :
yield perm [ : i ] + elements [ 0 : 1 ] + perm [ i : ]
|
def override_params ( opening_char = '{' , closing_char = '}' , separator_char = '|' ) :
"""Override some character settings
@ type opening _ char : str
@ param opening _ char : Opening character . Default : ' { '
@ type closing _ char : str
@ param closing _ char : Closing character . Default : ' } '
@ type separator _ char : str
@ param separator _ char : Separator char . Default : ' | '"""
|
global char_separator , char_opening , char_closing
char_separator = separator_char
char_opening = opening_char
char_closing = closing_char
|
def expand ( self , basedir , config , sourcedir , targetdir , cwd ) :
"""Validate that given paths are not the same .
Args :
basedir ( string ) : Project base directory used to prepend relative
paths . If empty or equal to ' . ' , it will be filled with current
directory path .
config ( string ) : Settings file path .
sourcedir ( string ) : Source directory path .
targetdir ( string ) : Compiled files target directory path .
cwd ( string ) : Current directory path to prepend base dir if empty .
Returns :
tuple : Expanded arguments in the same order"""
|
# Expand home directory if any
expanded_basedir = os . path . expanduser ( basedir )
expanded_config = os . path . expanduser ( config )
expanded_sourcedir = os . path . expanduser ( sourcedir )
expanded_targetdir = os . path . expanduser ( targetdir )
# If not absolute , base dir is prepended with current directory
if not os . path . isabs ( expanded_basedir ) :
expanded_basedir = os . path . join ( cwd , expanded_basedir )
# Prepend paths with base dir if they are not allready absolute
if not os . path . isabs ( expanded_config ) :
expanded_config = os . path . join ( expanded_basedir , expanded_config )
if not os . path . isabs ( expanded_sourcedir ) :
expanded_sourcedir = os . path . join ( expanded_basedir , expanded_sourcedir )
if not os . path . isabs ( expanded_targetdir ) :
expanded_targetdir = os . path . join ( expanded_basedir , expanded_targetdir )
# Normalize paths
expanded_basedir = os . path . normpath ( expanded_basedir )
expanded_config = os . path . normpath ( expanded_config )
expanded_sourcedir = os . path . normpath ( expanded_sourcedir )
expanded_targetdir = os . path . normpath ( expanded_targetdir )
return ( expanded_basedir , expanded_config , expanded_sourcedir , expanded_targetdir )
|
def load_agent ( self ) :
"""Loads and initializes an agent using instance variables , registers for quick chat and sets render functions .
: return : An instance of an agent , and the agent class file ."""
|
agent_class = self . agent_class_wrapper . get_loaded_class ( )
agent = agent_class ( self . name , self . team , self . index )
agent . init_match_config ( self . match_config )
agent . load_config ( self . bot_configuration . get_header ( "Bot Parameters" ) )
self . update_metadata_queue ( agent )
self . set_render_manager ( agent )
agent_class_file = self . agent_class_wrapper . python_file
agent . _register_quick_chat ( self . send_quick_chat_from_agent )
agent . _register_field_info ( self . get_field_info )
agent . _register_set_game_state ( self . set_game_state )
agent . _register_ball_prediction ( self . get_ball_prediction )
agent . _register_ball_prediction_struct ( self . get_ball_prediction_struct )
agent . _register_get_rigid_body_tick ( self . get_rigid_body_tick )
if self . quick_chat_quit_event :
self . quick_chat_quit_event . set ( )
self . quick_chat_quit_event = mp . Event ( )
register_for_quick_chat ( self . quick_chat_queue_holder , agent . handle_quick_chat , self . quick_chat_quit_event )
while not self . is_valid_field_info ( ) :
time . sleep ( 0.1 )
# Once all engine setup is done , do the agent - specific initialization , if any :
agent . initialize_agent ( )
return agent , agent_class_file
|
def add_bus ( self , bus ) :
"""Add a bus for notification .
: param can . BusABC bus :
CAN bus instance ."""
|
if self . _loop is not None and hasattr ( bus , 'fileno' ) and bus . fileno ( ) >= 0 : # Use file descriptor to watch for messages
reader = bus . fileno ( )
self . _loop . add_reader ( reader , self . _on_message_available , bus )
else :
reader = threading . Thread ( target = self . _rx_thread , args = ( bus , ) , name = 'can.notifier for bus "{}"' . format ( bus . channel_info ) )
reader . daemon = True
reader . start ( )
self . _readers . append ( reader )
|
def show ( data , negate = False ) :
"""Show the stretched data ."""
|
from PIL import Image as pil
data = np . array ( ( data - data . min ( ) ) * 255.0 / ( data . max ( ) - data . min ( ) ) , np . uint8 )
if negate :
data = 255 - data
img = pil . fromarray ( data )
img . show ( )
|
def _GetDateValuesWithEpoch ( self , number_of_days , date_time_epoch ) :
"""Determines date values .
Args :
number _ of _ days ( int ) : number of days since epoch .
date _ time _ epoch ( DateTimeEpoch ) : date and time of the epoch .
Returns :
tuple [ int , int , int ] : year , month , day of month ."""
|
return self . _GetDateValues ( number_of_days , date_time_epoch . year , date_time_epoch . month , date_time_epoch . day_of_month )
|
def _DeepCopy ( self , obj ) :
"""Creates an object copy by serializing / deserializing it .
RDFStruct . Copy ( ) doesn ' t deep - copy repeated fields which may lead to
hard to catch bugs .
Args :
obj : RDFValue to be copied .
Returns :
A deep copy of the passed RDFValue ."""
|
precondition . AssertType ( obj , rdfvalue . RDFValue )
return obj . __class__ . FromSerializedString ( obj . SerializeToString ( ) )
|
def add_edge ( self , u , v , key = None , attr_dict = None , ** attr ) :
"""Version of add _ edge that only writes to the database once ."""
|
if attr_dict is None :
attr_dict = attr
else :
try :
attr_dict . update ( attr )
except AttributeError :
raise NetworkXError ( "The attr_dict argument must be a dictionary." )
if u not in self . node :
self . node [ u ] = { }
if v not in self . node :
self . node [ v ] = { }
if v in self . succ [ u ] :
keydict = self . adj [ u ] [ v ]
if key is None :
key = len ( keydict )
while key in keydict :
key += 1
datadict = keydict . get ( key , { } )
datadict . update ( attr_dict )
keydict [ key ] = datadict
else :
if key is None :
key = 0
datadict = { }
datadict . update ( attr_dict )
keydict = { key : datadict }
self . succ [ u ] [ v ] = keydict
return key
|
def _property_detect_type ( name , values ) :
'''Detect the datatype of a property'''
|
value_type = 'str'
if values . startswith ( 'on | off' ) :
value_type = 'bool'
elif values . startswith ( 'yes | no' ) :
value_type = 'bool_alt'
elif values in [ '<size>' , '<size> | none' ] :
value_type = 'size'
elif values in [ '<count>' , '<count> | none' , '<guid>' ] :
value_type = 'numeric'
elif name in [ 'sharenfs' , 'sharesmb' , 'canmount' ] :
value_type = 'bool'
elif name in [ 'version' , 'copies' ] :
value_type = 'numeric'
return value_type
|
def format_items ( self , data , many ) :
"""Format data as a Resource object or list of Resource objects .
See : http : / / jsonapi . org / format / # document - resource - objects"""
|
if many :
return [ self . format_item ( item ) for item in data ]
else :
return self . format_item ( data )
|
def add_filter ( self , filter_id , description ) :
"""Add a filter line to the header .
Arguments :
filter _ id ( str ) : The id of the filter line
description ( str ) : A description of the info line"""
|
filter_line = '##FILTER=<ID={0},Description="{1}">' . format ( filter_id , description )
logger . info ( "Adding filter line to vcf: {0}" . format ( filter_line ) )
self . parse_meta_data ( filter_line )
return
|
def _latch_file_info ( self ) :
"""Internal function to update the dictionaries
keeping track of input and output files"""
|
self . _map_arguments ( self . args )
self . files . latch_file_info ( self . args )
self . sub_files . file_dict . clear ( )
self . sub_files . update ( self . files . file_dict )
for link in self . _links . values ( ) :
self . sub_files . update ( link . files . file_dict )
self . sub_files . update ( link . sub_files . file_dict )
|
def acknowledge_incident ( self , service_key , incident_key , description = None , details = None ) :
"""Causes the referenced incident to enter the acknowledged state .
Send an acknowledge event when someone is presently working on the
incident ."""
|
return self . create_event ( service_key , description , "acknowledge" , details , incident_key )
|
def install_package ( self , client , package ) :
"""Install package on instance ."""
|
install_cmd = "{sudo} '{install} {package}'" . format ( sudo = self . get_sudo_exec_wrapper ( ) , install = self . get_install_cmd ( ) , package = package )
try :
out = ipa_utils . execute_ssh_command ( client , install_cmd )
except Exception as error :
raise IpaDistroException ( 'An error occurred installing package {package} ' 'on instance: {error}' . format ( package = package , error = error ) )
else :
return out
|
def p_levelsig ( self , p ) :
'levelsig : levelsig _ base'
|
p [ 0 ] = Sens ( p [ 1 ] , 'level' , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def siget ( fullname = "" ) :
"""Returns a softimage object given its fullname ."""
|
fullname = str ( fullname )
if not len ( fullname ) :
return None
return sidict . GetObject ( fullname , False )
|
def delta ( var , key , tusec = None ) :
'''calculate slope'''
|
global last_delta
if tusec is not None :
tnow = tusec * 1.0e-6
else :
import mavutil
tnow = mavutil . mavfile_global . timestamp
dv = 0
ret = 0
if key in last_delta :
( last_v , last_t , last_ret ) = last_delta [ key ]
if last_t == tnow :
return last_ret
if tnow == last_t :
ret = 0
else :
ret = ( var - last_v ) / ( tnow - last_t )
last_delta [ key ] = ( var , tnow , ret )
return ret
|
def _normalized_serial ( self ) :
"""Normalized serial name for usage in log filename .
Some Android emulators use ip : port as their serial names , while on
Windows ` : ` is not valid in filename , it should be sanitized first ."""
|
if self . _serial is None :
return None
normalized_serial = self . _serial . replace ( ' ' , '_' )
normalized_serial = normalized_serial . replace ( ':' , '-' )
return normalized_serial
|
def obo ( self ) :
"""str : the ` Term ` serialized in an Obo ` ` [ Term ] ` ` stanza .
Note :
The following guide was used :
ftp : / / ftp . geneontology . org / pub / go / www / GO . format . obo - 1_4 . shtml"""
|
def add_tags ( stanza_list , tags ) :
for tag in tags :
if tag in self . other :
if isinstance ( self . other [ tag ] , list ) :
for attribute in self . other [ tag ] :
stanza_list . append ( "{}: {}" . format ( tag , attribute ) )
else :
stanza_list . append ( "{}: {}" . format ( tag , self . other [ tag ] ) )
# metatags = [ " id " , " is _ anonymous " , " name " , " namespace " , " alt _ id " , " def " , " comment " ,
# " subset " , " synonym " , " xref " , " builtin " , " property _ value " , " is _ a " ,
# " intersection _ of " , " union _ of " , " equivalent _ to " , " disjoint _ from " ,
# " relationship " , " created _ by " , " creation _ date " , " is _ obsolete " ,
# " replaced _ by " , " consider " ]
stanza_list = [ "[Term]" ]
# id
stanza_list . append ( "id: {}" . format ( self . id ) )
# name
if self . name is not None :
stanza_list . append ( "name: {}" . format ( self . name ) )
else :
stanza_list . append ( "name: " )
add_tags ( stanza_list , [ 'is_anonymous' , 'alt_id' ] )
# def
if self . desc :
stanza_list . append ( self . desc . obo )
# comment , subset
add_tags ( stanza_list , [ 'comment' , 'subset' ] )
# synonyms
for synonym in sorted ( self . synonyms , key = str ) :
stanza_list . append ( synonym . obo )
add_tags ( stanza_list , [ 'xref' ] )
# is _ a
if Relationship ( 'is_a' ) in self . relations :
for companion in self . relations [ Relationship ( 'is_a' ) ] :
stanza_list . append ( "is_a: {} ! {}" . format ( companion . id , companion . name ) )
add_tags ( stanza_list , [ 'intersection_of' , 'union_of' , 'disjoint_from' ] )
for relation in self . relations :
if relation . direction == "bottomup" and relation is not Relationship ( 'is_a' ) :
stanza_list . extend ( "relationship: {} {} ! {}" . format ( relation . obo_name , companion . id , companion . name ) for companion in self . relations [ relation ] )
add_tags ( stanza_list , [ 'is_obsolete' , 'replaced_by' , 'consider' , 'builtin' , 'created_by' , 'creation_date' ] )
return "\n" . join ( stanza_list )
|
def update_from_json ( self , path = join ( 'config' , 'hdx_dataset_static.json' ) ) : # type : ( str ) - > None
"""Update dataset metadata with static metadata from JSON file
Args :
path ( str ) : Path to JSON dataset metadata . Defaults to config / hdx _ dataset _ static . json .
Returns :
None"""
|
super ( Dataset , self ) . update_from_json ( path )
self . separate_resources ( )
|
def generate ( self , field_name , field ) :
"""Tries to lookup a matching formfield generator ( lowercase
field - classname ) and raises a NotImplementedError of no generator
can be found ."""
|
if hasattr ( self , 'generate_%s' % field . __class__ . __name__ . lower ( ) ) :
generator = getattr ( self , 'generate_%s' % field . __class__ . __name__ . lower ( ) )
return generator ( field_name , field , ( field . verbose_name or field_name ) . capitalize ( ) )
else :
raise NotImplementedError ( '%s is not supported by MongoForm' % field . __class__ . __name__ )
|
def export_image_to_uri ( self , image_id , uri , ibm_api_key = None ) :
"""Export image into the given object storage
: param int image _ id : The ID of the image
: param string uri : The URI for object storage of the format
swift : / / < objectStorageAccount > @ < cluster > / < container > / < objectPath >
or cos : / / < regionName > / < bucketName > / < objectPath > if using IBM Cloud
Object Storage
: param string ibm _ api _ key : Ibm Api Key needed to communicate with IBM
Cloud Object Storage"""
|
if 'cos://' in uri :
return self . vgbdtg . copyToIcos ( { 'uri' : uri , 'ibmApiKey' : ibm_api_key } , id = image_id )
else :
return self . vgbdtg . copyToExternalSource ( { 'uri' : uri } , id = image_id )
|
def should_highlight ( self , req ) :
"""Check the given argument parameter to decide if highlights needed .
argument value is expected to be ' 0 ' or ' 1'"""
|
try :
return bool ( req . args and int ( req . args . get ( 'es_highlight' , 0 ) ) )
except ( AttributeError , TypeError ) :
return False
|
def subscribe ( self , feedUrl ) :
"""Adds a feed to the top - level subscription list
Ubscribing seems idempotent , you can subscribe multiple times
without error
returns True or throws HTTPError"""
|
response = self . httpPost ( ReaderUrl . SUBSCRIPTION_EDIT_URL , { 'ac' : 'subscribe' , 's' : feedUrl } )
# FIXME - need better return API
if response and 'OK' in response :
return True
else :
return False
|
def igphyml ( input_file = None , tree_file = None , root = None , verbose = False ) :
'''Computes a phylogenetic tree using IgPhyML .
. . note : :
IgPhyML must be installed . It can be downloaded from https : / / github . com / kbhoehn / IgPhyML .
Args :
input _ file ( str ) : Path to a Phylip - formatted multiple sequence alignment . Required .
tree _ file ( str ) : Path to the output tree file .
root ( str ) : Name of the root sequence . Required .
verbose ( bool ) : If ` True ` , prints the standard output and standard error for each IgPhyML run .
Default is ` False ` .'''
|
if shutil . which ( 'igphyml' ) is None :
raise RuntimeError ( 'It appears that IgPhyML is not installed.\nPlease install and try again.' )
# first , tree topology is estimated with the M0 / GY94 model
igphyml_cmd1 = 'igphyml -i {} -m GY -w M0 -t e --run_id gy94' . format ( aln_file )
p1 = sp . Popen ( igphyml_cmd1 , stdout = sp . PIPE , stderr = sp . PIPE )
stdout1 , stderr1 = p1 . communicate ( )
if verbose :
print ( stdout1 + '\n' )
print ( stderr1 + '\n\n' )
intermediate = input_file + '_igphyml_tree.txt_gy94'
# now we fit the HLP17 model once the tree topology is fixed
igphyml_cmd2 = 'igphyml -i {0} -m HLP17 --root {1} -o lr -u {}_igphyml_tree.txt_gy94 -o {}' . format ( input_file , root , tree_file )
p2 = sp . Popen ( igphyml_cmd2 , stdout = sp . PIPE , stderr = sp . PIPE )
stdout2 , stderr2 = p2 . communicate ( )
if verbose :
print ( stdout2 + '\n' )
print ( stderr2 + '\n' )
return tree_file + '_igphyml_tree.txt'
|
def clear_source ( self ) :
"""stub"""
|
if ( self . get_source_metadata ( ) . is_read_only ( ) or self . get_source_metadata ( ) . is_required ( ) ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'texts' ] [ 'source' ] = self . _source_metadata [ 'default_string_values' ] [ 0 ]
|
def accept_freeware_license ( ) :
'''different Eagle versions need differnt TAB count .
6.5 - > 2
6.6 - > 3
7.4 - > 2'''
|
ntab = 3 if version ( ) . startswith ( '6.6.' ) else 2
for _ in range ( ntab ) :
EasyProcess ( 'xdotool key KP_Tab' ) . call ( )
time . sleep ( 0.5 )
EasyProcess ( 'xdotool key KP_Space' ) . call ( )
time . sleep ( 0.5 )
# say OK to any more question
EasyProcess ( 'xdotool key KP_Space' ) . call ( )
|
def add_circuit_breaker ( self , circ_breaker ) :
"""Creates circuit breaker object and . . .
Args
circ _ breaker : CircuitBreakerDing0
Description # TODO"""
|
if circ_breaker not in self . _circuit_breakers and isinstance ( circ_breaker , CircuitBreakerDing0 ) :
self . _circuit_breakers . append ( circ_breaker )
self . graph_add_node ( circ_breaker )
|
def time ( self ) :
"""Returns numpy array of datetime . time . The time part of the Timestamps ."""
|
# If the Timestamps have a timezone that is not UTC ,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self . tz is not None and not timezones . is_utc ( self . tz ) :
timestamps = self . _local_timestamps ( )
else :
timestamps = self . asi8
return tslib . ints_to_pydatetime ( timestamps , box = "time" )
|
def _new_stream ( self , idx ) :
'''Randomly select and create a new stream .
Parameters
idx : int , [ 0 : n _ streams - 1]
The stream index to replace'''
|
# Don ' t activate the stream if the weight is 0 or None
if self . stream_weights_ [ idx ] :
self . streams_ [ idx ] = self . streamers [ idx ] . iterate ( )
else :
self . streams_ [ idx ] = None
# Reset the sample count to zero
self . stream_counts_ [ idx ] = 0
|
def anonymous ( self ) :
"""Gets the anonymous handler . Also tries to grab a class
if the ` anonymous ` value is a string , so that we can define
anonymous handlers that aren ' t defined yet ( like , when
you ' re subclassing your basehandler into an anonymous one . )"""
|
if hasattr ( self . handler , 'anonymous' ) :
anon = self . handler . anonymous
if callable ( anon ) :
return anon
for klass in typemapper . keys ( ) :
if anon == klass . __name__ :
return klass
return None
|
def line ( self , text = '' ) :
'''A simple helper to write line with ` \n `'''
|
self . out . write ( text )
self . out . write ( '\n' )
|
def input_variables ( self , exclude_specials = True ) :
"""Get all variables that have never been written to .
: return : A list of variables that are never written to ."""
|
def has_write_access ( accesses ) :
return any ( acc for acc in accesses if acc . access_type == 'write' )
def has_read_access ( accesses ) :
return any ( acc for acc in accesses if acc . access_type == 'read' )
input_variables = [ ]
for variable , accesses in self . _variable_accesses . items ( ) :
if not has_write_access ( accesses ) and has_read_access ( accesses ) :
if not exclude_specials or not variable . category :
input_variables . append ( variable )
return input_variables
|
def _set_extended ( self , v , load = False ) :
"""Setter method for extended , mapped from YANG variable / ipv6 _ acl / ipv6 / access _ list / extended ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ extended is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ extended ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "name" , extended . extended , yang_name = "extended" , rest_name = "extended" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'name' , extensions = { u'tailf-common' : { u'info' : u'Extended IP ACL' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-full-command' : None , u'callpoint' : u'ip6_ext_acl_cp' , u'cli-mode-name' : u'conf-ip6acl-ext' } } ) , is_container = 'list' , yang_name = "extended" , rest_name = "extended" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Extended IP ACL' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-full-command' : None , u'callpoint' : u'ip6_ext_acl_cp' , u'cli-mode-name' : u'conf-ip6acl-ext' } } , namespace = 'urn:brocade.com:mgmt:brocade-ipv6-access-list' , defining_module = 'brocade-ipv6-access-list' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """extended must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Extended IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_ext_acl_cp', u'cli-mode-name': u'conf-ip6acl-ext'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Extended IP ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'ip6_ext_acl_cp', u'cli-mode-name': u'conf-ip6acl-ext'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='list', is_config=True)""" , } )
self . __extended = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def keyPressEvent ( self , event ) :
"""Overloads the key press event to control keystroke modifications for \
the console widget .
: param event | < QKeyEvent >"""
|
# enter | | return keys will apply the command
if event . key ( ) in ( Qt . Key_Return , Qt . Key_Enter ) :
self . applyCommand ( )
event . accept ( )
# home key will move the cursor to the home position
elif event . key ( ) == Qt . Key_Home :
self . gotoHome ( )
event . accept ( )
elif event . key ( ) in ( Qt . Key_Backspace , Qt . Key_Delete ) :
super ( XConsoleEdit , self ) . keyPressEvent ( event )
# update the completer
cursor = self . textCursor ( )
text = projex . text . nativestring ( cursor . block ( ) . text ( ) )
text = text [ : cursor . columnNumber ( ) ] . split ( ' ' ) [ - 1 ]
if not '.' in text :
self . cancelCompletion ( )
# period key will trigger a completion popup
elif event . key ( ) == Qt . Key_Period or ( Qt . Key_A <= event . key ( ) <= Qt . Key_Z ) :
super ( XConsoleEdit , self ) . keyPressEvent ( event )
self . startCompletion ( force = event . key ( ) == Qt . Key_Period )
# space , tab , backspace and delete will cancel the completion
elif event . key ( ) == Qt . Key_Space :
self . cancelCompletion ( )
super ( XConsoleEdit , self ) . keyPressEvent ( event )
# left parenthesis will start method help
elif event . key ( ) == Qt . Key_ParenLeft :
self . cancelCompletion ( )
self . showMethodToolTip ( )
super ( XConsoleEdit , self ) . keyPressEvent ( event )
# Ctrl + Up will load previous commands
elif event . key ( ) == Qt . Key_Up :
if self . commandLineInteraction ( ) or event . modifiers ( ) & Qt . ControlModifier :
self . insertPreviousCommand ( )
event . accept ( )
else :
super ( XConsoleEdit , self ) . keyPressEvent ( event )
# Ctrl + Down will load future commands
elif event . key ( ) == Qt . Key_Down :
if self . commandLineInteraction ( ) or event . modifiers ( ) & Qt . ControlModifier :
self . insertNextCommand ( )
event . accept ( )
else :
super ( XConsoleEdit , self ) . keyPressEvent ( event )
# otherwise , handle the event like normal
else :
super ( XConsoleEdit , self ) . keyPressEvent ( event )
|
def tqx ( mt , x , t ) :
"""nqx : Returns the probability to die within n years at age x"""
|
return ( mt . lx [ x ] - mt . lx [ x + t ] ) / mt . lx [ x ]
|
def import_assist ( self , starting ) :
"""Return a list of ` ` ( name , module ) ` ` tuples
This function tries to find modules that have a global name
that starts with ` starting ` ."""
|
# XXX : breaking if gave up ! use generators
result = [ ]
for module in self . names :
for global_name in self . names [ module ] :
if global_name . startswith ( starting ) :
result . append ( ( global_name , module ) )
return result
|
def select ( self , domain_or_name , query = '' , next_token = None , consistent_read = False ) :
"""Returns a set of Attributes for item names within domain _ name that
match the query . The query must be expressed in using the SELECT
style syntax rather than the original SimpleDB query language .
Even though the select request does not require a domain object ,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain .
: type domain _ or _ name : string or : class : ` boto . sdb . domain . Domain ` object
: param domain _ or _ name : Either the name of a domain or a Domain object
: type query : string
: param query : The SimpleDB query to be performed .
: type consistent _ read : bool
: param consistent _ read : When set to true , ensures that the most recent
data is returned .
: rtype : ResultSet
: return : An iterator containing the results ."""
|
domain , domain_name = self . get_domain_and_name ( domain_or_name )
params = { 'SelectExpression' : query }
if consistent_read :
params [ 'ConsistentRead' ] = 'true'
if next_token :
params [ 'NextToken' ] = next_token
try :
return self . get_list ( 'Select' , params , [ ( 'Item' , self . item_cls ) ] , parent = domain )
except SDBResponseError , e :
e . body = "Query: %s\n%s" % ( query , e . body )
raise e
|
def _populate_field_defaults ( self ) :
"""Populate the defaults of each field . This is done in a separate pass
because defaults that specify a union tag require the union to have
been defined ."""
|
for namespace in self . api . namespaces . values ( ) :
for data_type in namespace . data_types : # Only struct fields can have default
if not isinstance ( data_type , Struct ) :
continue
for field in data_type . fields :
if not field . _ast_node . has_default :
continue
if isinstance ( field . _ast_node . default , AstTagRef ) :
default_value = TagRef ( field . data_type , field . _ast_node . default . tag )
else :
default_value = field . _ast_node . default
if not ( field . _ast_node . type_ref . nullable and default_value is None ) : # Verify that the type of the default value is correct for this field
try :
if field . data_type . name in ( 'Float32' , 'Float64' ) : # You can assign int to the default value of float type
# However float type should always have default value in float
default_value = float ( default_value )
field . data_type . check ( default_value )
except ValueError as e :
raise InvalidSpec ( 'Field %s has an invalid default: %s' % ( quote ( field . _ast_node . name ) , e ) , field . _ast_node . lineno , field . _ast_node . path )
field . set_default ( default_value )
|
def log_status ( self ) :
'''show download status'''
|
if self . download_filename is None :
print ( "No download" )
return
dt = time . time ( ) - self . download_start
speed = os . path . getsize ( self . download_filename ) / ( 1000.0 * dt )
m = self . entries . get ( self . download_lognum , None )
if m is None :
size = 0
else :
size = m . size
highest = max ( self . download_set )
diff = set ( range ( highest ) ) . difference ( self . download_set )
print ( "Downloading %s - %u/%u bytes %.1f kbyte/s (%u retries %u missing)" % ( self . download_filename , os . path . getsize ( self . download_filename ) , size , speed , self . retries , len ( diff ) ) )
|
def t_to_min ( x ) :
"""Convert XML ' xs : duration type ' to decimal minutes , e . g . :
t _ to _ min ( ' PT1H2M30S ' ) = = 62.5"""
|
g = re . match ( 'PT(?:(.*)H)?(?:(.*)M)?(?:(.*)S)?' , x ) . groups ( )
return sum ( 0 if g [ i ] is None else float ( g [ i ] ) * 60. ** ( 1 - i ) for i in range ( 3 ) )
|
def get_ids ( self , request_data , parameter_name = 'ids' ) :
"""Extract a list of integers from request data ."""
|
if parameter_name not in request_data :
raise ParseError ( "`{}` parameter is required" . format ( parameter_name ) )
ids = request_data . get ( parameter_name )
if not isinstance ( ids , list ) :
raise ParseError ( "`{}` parameter not a list" . format ( parameter_name ) )
if not ids :
raise ParseError ( "`{}` parameter is empty" . format ( parameter_name ) )
if any ( map ( lambda id : not isinstance ( id , int ) , ids ) ) :
raise ParseError ( "`{}` parameter contains non-integers" . format ( parameter_name ) )
return ids
|
def make_placeholders ( seq , start = 1 ) :
"""Generate placeholders for the given sequence ."""
|
if len ( seq ) == 0 :
raise ValueError ( 'Sequence must have at least one element.' )
param_style = Context . current ( ) . param_style
placeholders = None
if isinstance ( seq , dict ) :
if param_style in ( 'named' , 'pyformat' ) :
template = ':%s' if param_style == 'named' else '%%(%s)s'
placeholders = ( template % key for key in six . iterkeys ( seq ) )
elif isinstance ( seq , ( list , tuple ) ) :
if param_style == 'numeric' :
placeholders = ( ':%d' % i for i in xrange ( start , start + len ( seq ) ) )
elif param_style in ( 'qmark' , 'format' , 'pyformat' ) :
placeholders = itertools . repeat ( '?' if param_style == 'qmark' else '%s' , len ( seq ) )
if placeholders is None :
raise NotSupported ( "Param style '%s' does not support sequence type '%s'" % ( param_style , seq . __class__ . __name__ ) )
return ', ' . join ( placeholders )
|
def update_xml_element ( self ) :
"""Updates the xml element contents to matches the instance contents
: returns : Updated XML element
: rtype : lxml . etree . _ Element"""
|
if not hasattr ( self , 'xml_element' ) :
self . xml_element = etree . Element ( self . name , nsmap = NSMAP )
if hasattr ( self , 'time' ) :
self . xml_element . set ( 'time' , self . time_to_str ( ) )
if hasattr ( self , 'update' ) :
self . xml_element . set ( 'update' , str ( self . update ) )
self . xml_element . text = self . text
return self . xml_element
|
def stop_process ( self ) :
"""Stop the process .
: raises : EnvironmentError if stopping fails due to unknown environment
TestStepError if process stops with non - default returncode and return code is not ignored ."""
|
if self . read_thread is not None :
self . logger . debug ( "stop_process::readThread.stop()-in" )
self . read_thread . stop ( )
self . logger . debug ( "stop_process::readThread.stop()-out" )
returncode = None
if self . proc :
self . logger . debug ( "os.killpg(%d)" , self . proc . pid )
for sig in ( signal . SIGINT , signal . SIGTERM , signal . SIGKILL ) :
timeout = 5
try :
try :
self . logger . debug ( "Trying signal %s" , sig )
os . killpg ( self . proc . pid , sig )
except AttributeError :
self . logger . debug ( "os.killpg::AttributeError" )
# Failed most likely because in windows ,
# so use taskkill to kill whole process tree of proc
if platform . system ( ) == "Windows" :
subprocess . call ( [ 'taskkill' , '/F' , '/T' , '/PID' , str ( self . proc . pid ) ] )
else :
self . logger . debug ( "os.killpg::unknown env" )
raise EnvironmentError ( "Unknown platform, " "don't know how to terminate process" )
while self . proc . poll ( ) is None and timeout > 0 :
time . sleep ( 1 )
timeout -= 1
returncode = self . proc . poll ( )
if returncode is not None :
break
except OSError as error :
self . logger . info ( "os.killpg::OSError: %s" , error )
self . proc = None
if returncode is not None :
self . logger . debug ( "Process stopped with returncode %s" % returncode )
if returncode != self . default_retcode and not self . __ignore_return_code :
raise TestStepError ( "Process stopped with returncode %d" % returncode )
self . logger . debug ( "stop_process-out" )
|
def find_matching_link ( self , mode , group , addr ) :
"""Find a matching link in the current device .
Mode : r | c is the mode of the link in the linked device
This method will search for a corresponding link in the
reverse direction .
group : All - Link group number
addr : Inteon address of the linked device"""
|
found_rec = None
mode_test = None
if mode . lower ( ) in [ 'c' , 'r' ] :
link_group = int ( group )
link_addr = Address ( addr )
for mem_addr in self :
rec = self [ mem_addr ]
if mode . lower ( ) == 'r' :
mode_test = rec . control_flags . is_controller
else :
mode_test = rec . control_flags . is_responder
if ( mode_test and rec . group == link_group and rec . address == link_addr ) :
found_rec = rec
return found_rec
|
def add_records ( self , domain , records ) :
"""Adds the specified DNS records to a domain .
: param domain : the domain to add the records to
: param records : the records to add"""
|
url = self . API_TEMPLATE + self . RECORDS . format ( domain = domain )
self . _patch ( url , json = records )
self . logger . debug ( 'Added records @ {}' . format ( records ) )
# If we didn ' t get any exceptions , return True to let the user know
return True
|
def add_affect ( self , name , src , dest , val , condition = None ) :
"""adds how param ' src ' affects param ' dest ' to the list"""
|
self . affects . append ( ParamAffects ( name , src , dest , val , condition ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.