signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def handle_format ( self , t , i ) :
"""Handle format .""" | if t == '{' :
t = self . format_next ( i )
if t == '{' :
self . get_single_stack ( )
self . result . append ( t )
else :
field , text = self . get_format ( t , i )
self . handle_format_group ( field , text )
else :
t = self . format_next ( i )
if t == '}' :
self . get_single_stack ( )
self . result . append ( t )
else :
raise SyntaxError ( "Unmatched '}' at %d!" % ( i . index - 2 ) ) |
def get_repository_lookup_session ( self , proxy , * args , ** kwargs ) :
"""Gets the repository lookup session .
arg proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . repository . RepositoryLookupSession ) - a
RepositoryLookupSession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ repository _ lookup ( ) is false
compliance : optional - This method must be implemented if
supports _ repository _ lookup ( ) is true .""" | if not self . supports_repository_lookup ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . RepositoryLookupSession ( proxy , runtime = self . _runtime , ** kwargs )
except AttributeError :
raise
# OperationFailed ( )
return session |
def start_supporting_containers ( self , log_syslog = False ) :
"""Start all supporting containers ( containers required for CKAN to
operate ) if they aren ' t already running .
: param log _ syslog : A flag to redirect all container logs to host ' s syslog""" | log_syslog = True if self . always_prod else log_syslog
# in production we always use log _ syslog driver ( to aggregate all the logs )
task . start_supporting_containers ( self . sitedir , self . target , self . passwords , self . _get_container_name , self . extra_containers , log_syslog = log_syslog ) |
def complete_event ( self , event_id : str ) :
"""Complete the specified event .""" | event_ids = DB . get_list ( self . _processed_key )
if event_id not in event_ids :
raise KeyError ( 'Unable to complete event. Event {} has not been ' 'processed (ie. it is not in the processed ' 'list).' . format ( event_id ) )
DB . remove_from_list ( self . _processed_key , event_id , pipeline = True )
key = _keys . completed_events ( self . _object_type , self . _subscriber )
DB . append_to_list ( key , event_id , pipeline = True )
DB . execute ( ) |
def _drop_definition ( self ) :
"""Remove header definition associated with this section .""" | rId = self . _sectPr . remove_headerReference ( self . _hdrftr_index )
self . _document_part . drop_header_part ( rId ) |
def _digits ( self ) :
"""0-9""" | self . number += self . key
try :
if self . compact is False :
self . top . body . focus_position = self . items . index ( self . items_com [ max ( int ( self . number ) - 1 , 0 ) ] )
else :
self . top . body . focus_position = self . items . index ( self . items [ max ( int ( self . number ) - 1 , 0 ) ] )
except IndexError :
self . number = self . number [ : - 1 ]
self . top . keypress ( self . size , "" )
# Trick urwid into redisplaying the cursor
if self . number :
self . _footer_start_thread ( "Selection: {}" . format ( self . number ) , 1 ) |
def insert_entity ( self , entity ) :
'''Adds an insert entity operation to the batch . See
: func : ` ~ azure . storage . table . tableservice . TableService . insert _ entity ` for more
information on inserts .
The operation will not be executed until the batch is committed .
: param entity :
The entity to insert . Could be a dict or an entity object .
Must contain a PartitionKey and a RowKey .
: type entity : a dict or : class : ` azure . storage . table . models . Entity `''' | request = _insert_entity ( entity )
self . _add_to_batch ( entity [ 'PartitionKey' ] , entity [ 'RowKey' ] , request ) |
def run ( configobj ) :
"""Primary Python interface for image registration code
This task replaces ' tweakshifts '""" | print ( 'TweakReg Version %s(%s) started at: %s \n' % ( __version__ , __version_date__ , util . _ptime ( ) [ 0 ] ) )
util . print_pkg_versions ( )
# make sure ' updatewcs ' is set to False when running from GUI or if missing
# from configObj :
if 'updatewcs' not in configobj :
configobj [ 'updatewcs' ] = False
# Check to see whether or not the imagefindpars parameters have
# already been loaded , as done through the python interface .
# Repeat for refimagefindpars
if PSET_SECTION not in configobj : # Manage PSETs for source finding algorithms
_managePsets ( configobj , PSET_SECTION , imagefindpars . __taskname__ )
# print configobj [ PSET _ SECTION ]
if PSET_SECTION_REFIMG not in configobj : # Manage PSETs for source finding algorithms in reference image
_managePsets ( configobj , PSET_SECTION_REFIMG , refimagefindpars . __taskname__ )
log . debug ( '' )
log . debug ( "==== TweakReg was invoked with the following parameters: ====" )
log . debug ( '' )
util . print_cfg ( configobj , log . debug )
# print out user set input parameter values for running this task
log . info ( '' )
log . info ( "USER INPUT PARAMETERS common to all Processing Steps:" )
util . printParams ( configobj , log = log )
# start interpretation of input parameters
input_files = configobj [ 'input' ]
# Start by interpreting the inputs
use_catfile = True
expand_refcat = configobj [ 'expand_refcat' ]
enforce_user_order = configobj [ 'enforce_user_order' ]
filenames , catnames = tweakutils . parse_input ( input_files , sort_wildcards = not enforce_user_order )
catdict = { }
for indx , f in enumerate ( filenames ) :
if catnames is not None and len ( catnames ) > 0 :
catdict [ f ] = catnames [ indx ]
else :
catdict [ f ] = None
if not filenames :
print ( 'No filenames matching input %r were found.' % input_files )
raise IOError
# Verify that files are writable ( based on file permissions ) so that
# they can be updated if either ' updatewcs ' or ' updatehdr ' have
# been turned on ( 2 cases which require updating the input files )
if configobj [ 'updatewcs' ] or configobj [ 'UPDATE HEADER' ] [ 'updatehdr' ] :
filenames = util . verifyFilePermissions ( filenames )
if filenames is None or len ( filenames ) == 0 :
raise IOError
if configobj [ 'UPDATE HEADER' ] [ 'updatehdr' ] :
wname = configobj [ 'UPDATE HEADER' ] [ 'wcsname' ]
# verify that a unique WCSNAME has been specified by the user
if not configobj [ 'UPDATE HEADER' ] [ 'reusename' ] :
for fname in filenames :
uniq = util . verifyUniqueWcsname ( fname , wname )
if not uniq :
errstr = 'WCSNAME "%s" already present in "%s". ' % ( wname , fname ) + 'A unique value for the "wcsname" parameter needs to be ' + 'specified. \n\nQuitting!'
print ( textutil . textbox ( errstr , width = 60 ) )
raise IOError
if configobj [ 'updatewcs' ] :
print ( '\nRestoring WCS solutions to original state using updatewcs...\n' )
updatewcs . updatewcs ( filenames )
if catnames in [ None , '' , ' ' , 'INDEF' ] or len ( catnames ) == 0 :
catfile_par = configobj [ 'COORDINATE FILE DESCRIPTION' ] [ 'catfile' ]
# check to see whether the user specified input catalogs through other parameters
if catfile_par not in [ None , '' , ' ' , 'INDEF' ] : # read in catalog file list provided by user
catnames , catdict = tweakutils . parse_atfile_cat ( '@' + catfile_par )
else :
use_catfile = False
if 'exclusions' in configobj and configobj [ 'exclusions' ] not in [ None , '' , ' ' , 'INDEF' ] :
if os . path . exists ( configobj [ 'exclusions' ] ) :
excl_files , excl_dict = tweakutils . parse_atfile_cat ( '@' + configobj [ 'exclusions' ] )
# make sure the dictionary is well formed and that keys are base
# file names and that exclusion files have been expanded :
exclusion_files = [ ]
exclusion_dict = { }
rootpath = os . path . abspath ( os . path . split ( configobj [ 'exclusions' ] ) [ 0 ] )
for f in excl_dict . keys ( ) :
print ( f )
bf = os . path . basename ( f )
exclusion_files . append ( bf )
reglist = excl_dict [ f ]
if reglist is None :
exclusion_dict [ bf ] = None
continue
new_reglist = [ ]
for regfile in reglist :
if regfile in [ None , 'None' , '' , ' ' , 'INDEF' ] :
new_reglist . append ( None )
else :
abs_regfile = os . path . normpath ( os . path . join ( rootpath , regfile ) )
new_reglist . append ( abs_regfile )
exclusion_dict [ bf ] = new_reglist
else :
raise IOError ( 'Could not find specified exclusions file "{:s}"' . format ( configobj [ 'exclusions' ] ) )
else :
exclusion_files = [ None ] * len ( filenames )
exclusion_dict = { }
for f in filenames :
exclusion_dict [ os . path . basename ( f ) ] = None
# Verify that we have the same number of catalog files as input images
if catnames is not None and ( len ( catnames ) > 0 ) :
missed_files = [ ]
for f in filenames :
if f not in catdict :
missed_files . append ( f )
if len ( missed_files ) > 0 :
print ( 'The input catalogs does not contain entries for the following images:' )
print ( missed_files )
raise IOError
else : # setup array of None values as input to catalog parameter for Image class
catnames = [ None ] * len ( filenames )
use_catfile = False
# convert input images and any provided catalog file names into Image objects
input_images = [ ]
# copy out only those parameters needed for Image class
catfile_kwargs = tweakutils . get_configobj_root ( configobj )
# define default value for ' xyunits ' assuming sources to be derived from image directly
catfile_kwargs [ 'xyunits' ] = 'pixels'
# initialized here , required by Image class
del catfile_kwargs [ 'exclusions' ]
if use_catfile : # reset parameters based on parameter settings in this section
catfile_kwargs . update ( configobj [ 'COORDINATE FILE DESCRIPTION' ] )
for sort_par in imgclasses . sortKeys :
catfile_kwargs [ 'sort_' + sort_par ] = catfile_kwargs [ sort_par ]
# Update parameter set with ' SOURCE FINDING PARS ' now
catfile_kwargs . update ( configobj [ PSET_SECTION ] )
uphdr_par = configobj [ 'UPDATE HEADER' ]
hdrlet_par = configobj [ 'HEADERLET CREATION' ]
objmatch_par = configobj [ 'OBJECT MATCHING PARAMETERS' ]
catfit_pars = configobj [ 'CATALOG FITTING PARAMETERS' ]
catfit_pars [ 'minobj' ] = objmatch_par [ 'minobj' ]
objmatch_par [ 'residplot' ] = catfit_pars [ 'residplot' ]
hdrlet_par . update ( uphdr_par )
# default hdrlet name
catfile_kwargs [ 'updatehdr' ] = uphdr_par [ 'updatehdr' ]
shiftpars = configobj [ 'OPTIONAL SHIFTFILE OUTPUT' ]
# verify a valid hdrname was provided , if headerlet was set to True
imgclasses . verify_hdrname ( ** hdrlet_par )
print ( '' )
print ( 'Finding shifts for: ' )
for f in filenames :
print ( ' {}' . format ( f ) )
print ( '' )
log . info ( "USER INPUT PARAMETERS for finding sources for each input image:" )
util . printParams ( catfile_kwargs , log = log )
log . info ( '' )
try :
minsources = max ( 1 , catfit_pars [ 'minobj' ] )
omitted_images = [ ]
all_input_images = [ ]
for imgnum in range ( len ( filenames ) ) : # Create Image instances for all input images
try :
regexcl = exclusion_dict [ os . path . basename ( filenames [ imgnum ] ) ]
except KeyError :
regexcl = None
pass
img = imgclasses . Image ( filenames [ imgnum ] , input_catalogs = catdict [ filenames [ imgnum ] ] , exclusions = regexcl , ** catfile_kwargs )
all_input_images . append ( img )
if img . num_sources < minsources :
warn_str = "Image '{}' will not be aligned " "since it contains fewer than {} sources." . format ( img . name , minsources )
print ( '\nWARNING: {}\n' . format ( warn_str ) )
log . warning ( warn_str )
omitted_images . append ( img )
continue
input_images . append ( img )
except KeyboardInterrupt :
for img in input_images :
img . close ( )
print ( 'Quitting as a result of user request (Ctrl-C)...' )
return
# create set of parameters to pass to RefImage class
kwargs = tweakutils . get_configobj_root ( configobj )
# Determine a reference image or catalog and
# return the full list of RA / Dec positions
# Determine what WCS needs to be used for reference tangent plane
refcat_par = configobj [ 'REFERENCE CATALOG DESCRIPTION' ]
if refcat_par [ 'refcat' ] not in [ None , '' , ' ' , 'INDEF' ] : # User specified a catalog to use
# Update kwargs with reference catalog parameters
kwargs . update ( refcat_par )
# input _ images list can be modified below .
# So , make a copy of the original :
input_images_orig_copy = copy ( input_images )
do_match_refimg = False
# otherwise , extract the catalog from the first input image source list
if configobj [ 'refimage' ] not in [ None , '' , ' ' , 'INDEF' ] : # User specified an image to use
# A hack to allow different source finding parameters for
# the reference image :
ref_sourcefind_pars = tweakutils . get_configobj_root ( configobj [ PSET_SECTION_REFIMG ] )
ref_catfile_kwargs = catfile_kwargs . copy ( )
ref_catfile_kwargs . update ( ref_sourcefind_pars )
ref_catfile_kwargs [ 'updatehdr' ] = False
log . info ( '' )
log . info ( "USER INPUT PARAMETERS for finding sources for " "the reference image:" )
util . printParams ( ref_catfile_kwargs , log = log )
# refimg = imgclasses . Image ( configobj [ ' refimage ' ] , * * catfile _ kwargs )
# Check to see whether the user specified a separate catalog
# of reference source positions and replace default source list with it
if refcat_par [ 'refcat' ] not in [ None , '' , ' ' , 'INDEF' ] : # User specified a catalog to use
ref_source = refcat_par [ 'refcat' ]
cat_src = ref_source
xycat = None
cat_src_type = 'catalog'
else :
try :
regexcl = exclusion_dict [ configobj [ 'refimage' ] ]
except KeyError :
regexcl = None
pass
refimg = imgclasses . Image ( configobj [ 'refimage' ] , exclusions = regexcl , ** ref_catfile_kwargs )
ref_source = refimg . all_radec
cat_src = None
xycat = refimg . xy_catalog
cat_src_type = 'image'
try :
if 'use_sharp_round' in ref_catfile_kwargs :
kwargs [ 'use_sharp_round' ] = ref_catfile_kwargs [ 'use_sharp_round' ]
refimage = imgclasses . RefImage ( configobj [ 'refimage' ] , ref_source , xycatalog = xycat , cat_origin = cat_src , ** kwargs )
refwcs_fname = refimage . wcs . filename
if cat_src is not None :
refimage . name = cat_src
except KeyboardInterrupt :
refimage . close ( )
for img in input_images :
img . close ( )
print ( 'Quitting as a result of user request (Ctrl-C)...' )
return
if len ( input_images ) < 1 :
warn_str = "Fewer than two images are available for alignment. " "Quitting..."
print ( '\nWARNING: {}\n' . format ( warn_str ) )
log . warning ( warn_str )
for img in input_images :
img . close ( )
return
image = _max_overlap_image ( refimage , input_images , expand_refcat , enforce_user_order )
elif refcat_par [ 'refcat' ] not in [ None , '' , ' ' , 'INDEF' ] : # a reference catalog is provided but not the reference image / wcs
if len ( input_images ) < 1 :
warn_str = "No images available for alignment. Quitting..."
print ( '\nWARNING: {}\n' . format ( warn_str ) )
log . warning ( warn_str )
for img in input_images :
img . close ( )
return
if len ( input_images ) == 1 :
image = input_images . pop ( 0 )
else :
image , image2 = _max_overlap_pair ( input_images , expand_refcat , enforce_user_order )
input_images . insert ( 0 , image2 )
# Workaround the defect described in ticket :
# http : / / redink . stsci . edu / trac / ssb / stsci _ python / ticket / 1151
refwcs = [ ]
for i in all_input_images :
refwcs . extend ( i . get_wcs ( ) )
kwargs [ 'ref_wcs_name' ] = image . get_wcs ( ) [ 0 ] . filename
# A hack to allow different source finding parameters for
# the reference image :
ref_sourcefind_pars = tweakutils . get_configobj_root ( configobj [ PSET_SECTION_REFIMG ] )
ref_catfile_kwargs = catfile_kwargs . copy ( )
ref_catfile_kwargs . update ( ref_sourcefind_pars )
ref_catfile_kwargs [ 'updatehdr' ] = False
log . info ( '' )
log . info ( "USER INPUT PARAMETERS for finding sources for " "the reference image (not used):" )
util . printParams ( ref_catfile_kwargs , log = log )
ref_source = refcat_par [ 'refcat' ]
cat_src = ref_source
xycat = None
try :
if 'use_sharp_round' in ref_catfile_kwargs :
kwargs [ 'use_sharp_round' ] = ref_catfile_kwargs [ 'use_sharp_round' ]
kwargs [ 'find_bounding_polygon' ] = True
refimage = imgclasses . RefImage ( refwcs , ref_source , xycatalog = xycat , cat_origin = cat_src , ** kwargs )
refwcs_fname = refimage . wcs . filename
refimage . name = cat_src
cat_src_type = 'catalog'
except KeyboardInterrupt :
refimage . close ( )
for img in input_images :
img . close ( )
print ( 'Quitting as a result of user request (Ctrl-C)...' )
return
else :
if len ( input_images ) < 2 :
warn_str = "Fewer than two images available for alignment. " "Quitting..."
print ( '\nWARNING: {}\n' . format ( warn_str ) )
log . warning ( warn_str )
for img in input_images :
img . close ( )
return
kwargs [ 'use_sharp_round' ] = catfile_kwargs [ 'use_sharp_round' ]
cat_src = None
refimg , image = _max_overlap_pair ( input_images , expand_refcat , enforce_user_order )
refwcs = [ ]
# refwcs . extend ( refimg . get _ wcs ( ) )
# refwcs . extend ( image . get _ wcs ( ) )
# for i in input _ images :
# refwcs . extend ( i . get _ wcs ( ) )
# Workaround the defect described in ticket :
# http : / / redink . stsci . edu / trac / ssb / stsci _ python / ticket / 1151
for i in all_input_images :
refwcs . extend ( i . get_wcs ( ) )
kwargs [ 'ref_wcs_name' ] = refimg . get_wcs ( ) [ 0 ] . filename
try :
ref_source = refimg . all_radec
refimage = imgclasses . RefImage ( refwcs , ref_source , xycatalog = refimg . xy_catalog , ** kwargs )
refwcs_fname = refimg . name
cat_src_type = 'image'
except KeyboardInterrupt :
refimage . close ( )
for img in input_images :
img . close ( )
print ( 'Quitting as a result of user request (Ctrl-C)...' )
return
omitted_images . insert ( 0 , refimg )
# refimage * must * be first
do_match_refimg = True
print ( "\n{0}\nPerforming alignment in the projection plane defined by the " "WCS\nderived from '{1}'\n{0}\n" . format ( '=' * 63 , refwcs_fname ) )
if refimage . outxy is not None :
if cat_src is None :
cat_src = refimage . name
try :
log . info ( "USER INPUT PARAMETERS for matching sources:" )
util . printParams ( objmatch_par , log = log )
log . info ( '' )
log . info ( "USER INPUT PARAMETERS for fitting source lists:" )
util . printParams ( configobj [ 'CATALOG FITTING PARAMETERS' ] , log = log )
if hdrlet_par [ 'headerlet' ] :
log . info ( '' )
log . info ( "USER INPUT PARAMETERS for creating headerlets:" )
util . printParams ( hdrlet_par , log = log )
if shiftpars [ 'shiftfile' ] :
log . info ( '' )
log . info ( "USER INPUT PARAMETERS for creating a shiftfile:" )
util . printParams ( shiftpars , log = log )
# Now , apply reference WCS to each image ' s sky positions as well as the
# reference catalog sky positions ,
# then perform the fit between the reference catalog positions and
# each image ' s positions
quit_immediately = False
xycat_lines = ''
xycat_filename = None
for img in input_images_orig_copy :
if xycat_filename is None :
xycat_filename = img . rootname + '_xy_catfile.list'
# Keep a record of all the generated input _ xy catalogs
xycat_lines += img . get_xy_catnames ( )
retry_flags = len ( input_images ) * [ 0 ]
objmatch_par [ 'cat_src_type' ] = cat_src_type
while image is not None :
print ( '\n' + '=' * 20 )
print ( 'Performing fit for: {}\n' . format ( image . name ) )
image . match ( refimage , quiet_identity = False , ** objmatch_par )
assert ( len ( retry_flags ) == len ( input_images ) )
if not image . goodmatch : # we will try to match it again once reference catalog
# has expanded with new sources :
# if expand _ refcat :
input_images . append ( image )
retry_flags . append ( 1 )
if len ( retry_flags ) > 0 and retry_flags [ 0 ] == 0 :
retry_flags . pop ( 0 )
image = input_images . pop ( 0 )
# try to match next image in the list
continue
else : # no new sources have been added to the reference
# catalog and we have already tried to match
# images to the existing reference catalog
# input _ images . append ( image ) # < - add it back for later reporting
# retry _ flags . append ( 1)
break
image . performFit ( ** catfit_pars )
if image . quit_immediately :
quit_immediately = True
image . close ( )
break
# add unmatched sources to the reference catalog
# ( to expand it ) :
if expand_refcat :
refimage . append_not_matched_sources ( image )
image . updateHeader ( wcsname = uphdr_par [ 'wcsname' ] , reusename = uphdr_par [ 'reusename' ] )
if hdrlet_par [ 'headerlet' ] :
image . writeHeaderlet ( ** hdrlet_par )
if configobj [ 'clean' ] :
image . clean ( )
image . close ( )
if refimage . dirty and len ( input_images ) > 0 : # The reference catalog has been updated with new sources .
# Clear retry flags and get next image :
image = _max_overlap_image ( refimage , input_images , expand_refcat , enforce_user_order )
retry_flags = len ( input_images ) * [ 0 ]
refimage . clear_dirty_flag ( )
elif len ( input_images ) > 0 and retry_flags [ 0 ] == 0 :
retry_flags . pop ( 0 )
image = input_images . pop ( 0 )
else :
break
assert ( len ( retry_flags ) == len ( input_images ) )
if not quit_immediately : # process images that have not been matched in order to
# update their headers :
si = 0
if do_match_refimg :
image = omitted_images [ 0 ]
image . match ( refimage , quiet_identity = True , ** objmatch_par )
si = 1
# process omitted ( from start ) images separately :
for image in omitted_images [ si : ] :
image . match ( refimage , quiet_identity = False , ** objmatch_par )
# add to the list of omitted images , images that could not
# be matched :
omitted_images . extend ( input_images )
if len ( input_images ) > 0 :
print ( "\nUnable to match the following images:" )
print ( "-------------------------------------" )
for image in input_images :
print ( image . name )
print ( "" )
# update headers :
for image in omitted_images :
image . performFit ( ** catfit_pars )
if image . quit_immediately :
quit_immediately = True
image . close ( )
break
image . updateHeader ( wcsname = uphdr_par [ 'wcsname' ] , reusename = uphdr_par [ 'reusename' ] )
if hdrlet_par [ 'headerlet' ] :
image . writeHeaderlet ( ** hdrlet_par )
if configobj [ 'clean' ] :
image . clean ( )
image . close ( )
if configobj [ 'writecat' ] and not configobj [ 'clean' ] : # Write out catalog file recording input XY catalogs used
# This file will be suitable for use as input to ' tweakreg '
# as the ' catfile ' parameter
if os . path . exists ( xycat_filename ) :
os . remove ( xycat_filename )
f = open ( xycat_filename , mode = 'w' )
f . writelines ( xycat_lines )
f . close ( )
if expand_refcat :
base_reg_name = os . path . splitext ( os . path . basename ( cat_src ) ) [ 0 ]
refimage . write_skycatalog ( 'cumulative_sky_refcat_{:s}.coo' . format ( base_reg_name ) , show_flux = True , show_id = True )
# write out shiftfile ( if specified )
if shiftpars [ 'shiftfile' ] :
tweakutils . write_shiftfile ( input_images_orig_copy , shiftpars [ 'outshifts' ] , outwcs = shiftpars [ 'outwcs' ] )
except KeyboardInterrupt :
refimage . close ( )
for img in input_images_orig_copy :
img . close ( )
del img
print ( 'Quitting as a result of user request (Ctrl-C)...' )
return
else :
print ( 'No valid sources in reference frame. Quitting...' )
return |
def to_credentials ( arg ) :
'''to _ credentials ( arg ) converts arg into a pair ( key , secret ) if arg can be coerced into such a
pair and otherwise raises an error .
Possible inputs include :
* A tuple ( key , secret )
* A mapping with the keys ' key ' and ' secret '
* The name of a file that can load credentials via the load _ credentials ( ) function
* A string that separates the key and secret by ' : ' , e . g . , ' mykey : mysecret '
* A string that separates the key and secret by a " \n " , e . g . , " mykey \n mysecret "''' | if pimms . is_str ( arg ) :
try :
return load_credentials ( arg )
except Exception :
pass
try :
return str_to_credentials ( arg )
except Exception :
raise ValueError ( 'String "%s" is neither a file containing credentials nor a valid' ' credentials string itself.' % arg )
elif pimms . is_map ( arg ) and 'key' in arg and 'secret' in arg :
return ( arg [ 'key' ] , arg [ 'secret' ] )
elif pimms . is_vector ( arg , str ) and len ( arg ) == 2 :
return tuple ( arg )
else :
raise ValueError ( 'given argument cannot be coerced to credentials: %s' % arg ) |
def read ( url , encoding = None , cache = None , mode = "rb" ) :
"""Read from any URL .
Internally differentiates between URLs supported by tf . gfile , such as URLs
with the Google Cloud Storage scheme ( ' gs : / / . . . ' ) or local paths , and HTTP
URLs . This way users don ' t need to know about the underlying fetch mechanism .
Args :
url : a URL including scheme or a local path
mode : mode in which to open the file . defaults to binary ( ' rb ' )
encoding : if specified , encoding that should be used to decode read data
if mode is specified to be text ( ' r ' ) , this defaults to ' utf - 8 ' .
cache : whether to attempt caching the resource . Defaults to True only if
the given URL specifies a remote resource .
Returns :
All bytes form the specified resource , or a decoded string of those .""" | with read_handle ( url , cache , mode = mode ) as handle :
data = handle . read ( )
if encoding :
data = data . decode ( encoding )
return data |
def _addToHosts ( self , node , destinationIP = None ) :
"""Add an " privateIP hostname " line to the / etc / hosts file . If destinationIP is given ,
do this on the remote machine .
Azure VMs sometimes fail to initialize , causing the appliance to fail .
This error is given :
Failed to obtain the IP address for ' l7d41a19b - 15a6-442c - 8ba1-9678a951d824 ' ;
the DNS service may not be able to resolve it : Name or service not known .
This method is a fix .
: param node : Node to add to / etc / hosts .
: param destinationIP : A remote host ' s address""" | cmd = "echo %s %s | sudo tee --append /etc/hosts > /dev/null" % ( node . privateIP , node . name )
logger . debug ( "Running command %s on %s" % ( cmd , destinationIP ) )
if destinationIP :
subprocess . Popen ( [ "ssh" , "-oStrictHostKeyChecking=no" , "core@%s" % destinationIP , cmd ] )
else :
subprocess . Popen ( cmd , shell = True ) |
def set_stable_spot_instance_settings ( self , maximum_bid_price_percentage = None , timeout_for_request = None , allow_fallback = True ) :
"""Purchase options for stable spot instances .
` maximum _ bid _ price _ percentage ` : Maximum value to bid for stable node spot
instances , expressed as a percentage of the base price
( applies to both master and slave nodes ) .
` timeout _ for _ request ` : Timeout for a stable node spot instance request ( Unit :
minutes )
` allow _ fallback ` : Whether to fallback to on - demand instances for
stable nodes if spot instances are not available""" | self . hadoop_settings [ 'stable_spot_instance_settings' ] = { 'maximum_bid_price_percentage' : maximum_bid_price_percentage , 'timeout_for_request' : timeout_for_request , 'allow_fallback' : allow_fallback } |
def flatten_union ( table ) :
"""Extract all union queries from ` table ` .
Parameters
table : TableExpr
Returns
Iterable [ Union [ TableExpr , bool ] ]""" | op = table . op ( )
if isinstance ( op , ops . Union ) :
return toolz . concatv ( flatten_union ( op . left ) , [ op . distinct ] , flatten_union ( op . right ) )
return [ table ] |
def epochs ( ts , variability = None , threshold = 0.0 , minlength = 1.0 , plot = True ) :
"""Identify " stationary " epochs within a time series , based on a
continuous measure of variability .
Epochs are defined to contain the points of minimal variability , and to
extend as wide as possible with variability not exceeding the threshold .
Args :
ts Timeseries of m variables , shape ( n , m ) .
variability ( optional ) Timeseries of shape ( n , m , q ) , giving q scalar
measures of the variability of timeseries ` ts ` near each
point in time . ( if None , we will use variability _ fp ( ) )
Epochs require the mean of these to be below the threshold .
threshold The maximum variability permitted in stationary epochs .
minlength Shortest acceptable epoch length ( in seconds )
plot bool Whether to display the output
Returns : ( variability , allchannels _ epochs )
variability : as above
allchannels _ epochs : ( list of ) list of tuples
For each variable , a list of tuples ( start , end ) that give the
starting and ending indices of stationary epochs .
( epochs are inclusive of start point but not the end point )""" | if variability is None :
variability = ts . variability_fp ( plot = False )
orig_ndim = ts . ndim
if ts . ndim is 1 :
ts = ts [ : , np . newaxis ]
if variability . ndim is 1 :
variability = variability [ : , np . newaxis , np . newaxis ]
elif variability . ndim is 2 :
variability = variability [ : , np . newaxis , : ]
channels = ts . shape [ 1 ]
n = len ( ts )
dt = ( 1.0 * ts . tspan [ - 1 ] - ts . tspan [ 0 ] ) / ( n - 1 )
fs = 1.0 / dt
allchannels_epochs = [ ]
for i in range ( channels ) :
v = variability [ : , i , : ]
v = np . nanmean ( v , axis = 1 )
# mean of q different variability measures
# then smooth the variability with a low - pass filter
nonnan_ix = np . nonzero ( ~ np . isnan ( v ) ) [ 0 ]
nonnans = slice ( nonnan_ix . min ( ) , nonnan_ix . max ( ) )
crit_freq = 1.0
# Hz
b , a = signal . butter ( 3 , 2.0 * crit_freq / fs )
# v [ nonnans ] = signal . filtfilt ( b , a , v [ nonnans ] )
v [ nonnan_ix ] = signal . filtfilt ( b , a , v [ nonnan_ix ] )
# find all local minima of the variability not exceeding the threshold
m = v [ 1 : - 1 ]
l = v [ 0 : - 2 ]
r = v [ 2 : ]
minima = np . nonzero ( ~ np . isnan ( m ) & ~ np . isnan ( l ) & ~ np . isnan ( r ) & ( m <= threshold ) & ( m - l < 0 ) & ( r - m > 0 ) ) [ 0 ] + 1
if len ( minima ) is 0 :
print ( u'Channel %d: no epochs found using threshold %g' % ( i , threshold ) )
allchannels_epochs . append ( [ ] )
else : # Sort the list of minima by ascending variability
minima = minima [ np . argsort ( v [ minima ] ) ]
epochs = [ ]
for m in minima : # Check this minimum is not inside an existing epoch
overlap = False
for e in epochs :
if m >= e [ 0 ] and m <= e [ 1 ] :
overlap = True
break
if not overlap : # Get largest subthreshold interval surrounding the minimum
startix = m - 1
endix = m + 1
for startix in range ( m - 1 , 0 , - 1 ) :
if np . isnan ( v [ startix ] ) or v [ startix ] > threshold :
startix += 1
break
for endix in range ( m + 1 , len ( v ) , 1 ) :
if np . isnan ( v [ endix ] ) or v [ endix ] > threshold :
break
if ( endix - startix ) * dt >= minlength :
epochs . append ( ( startix , endix ) )
allchannels_epochs . append ( epochs )
if plot :
_plot_variability ( ts , variability , threshold , allchannels_epochs )
if orig_ndim is 1 :
allchannels_epochs = allchannels_epochs [ 0 ]
return ( variability , allchannels_epochs ) |
def step ( self , action ) :
"""Pass action to underlying environment ( s ) or perform special action .""" | # Special codes
if action in self . _player_actions ( ) :
envs_step_tuples = self . _player_actions ( ) [ action ] ( )
elif self . _wait and action == self . name_to_action_num [ "NOOP" ] : # Ignore no - op , do not pass to environment .
envs_step_tuples = self . _last_step_tuples
else : # Run action on environment ( s ) .
if action == self . WAIT_MODE_NOOP_ACTION :
action = self . name_to_action_num [ "NOOP" ]
# Perform action on underlying environment ( s ) .
envs_step_tuples = self . _step_envs ( action )
self . _update_statistics ( envs_step_tuples )
self . _last_step_tuples = envs_step_tuples
ob , reward , done , info = self . _player_step_tuple ( envs_step_tuples )
return ob , reward , done , info |
def read_config ( self ) :
"""Reads the configuration .
This method can be overloaded to integrate with your application ' s own
configuration mechanism . By default , a single ' status ' file is read
from the reports ' directory .
This should set ` self . status ` to one of the state constants , and make
sure ` self . location ` points to a writable directory where the reports
will be written .
The possible values for ` self . status ` are :
- ` UNSET ` : nothing has been selected and the user should be prompted
- ` ENABLED ` : collect and upload reports
- ` DISABLED ` : don ' t collect or upload anything , stop prompting
- ` ERRORED ` : something is broken , and we can ' t do anything in this
session ( for example , the configuration directory is not writable )""" | if self . enabled and not os . path . isdir ( self . location ) :
try :
os . makedirs ( self . location , 0o700 )
except OSError :
logger . warning ( "Couldn't create %s, usage statistics won't be " "collected" , self . location )
self . status = Stats . ERRORED
status_file = os . path . join ( self . location , 'status' )
if self . enabled and os . path . exists ( status_file ) :
with open ( status_file , 'r' ) as fp :
status = fp . read ( ) . strip ( )
if status == 'ENABLED' :
self . status = Stats . ENABLED
elif status == 'DISABLED' :
self . status = Stats . DISABLED |
def value ( self ) :
"""returns the wkid id for use in json calls""" | if self . _wkid == None and self . _wkt is not None :
return { "wkt" : self . _wkt }
else :
return { "wkid" : self . _wkid } |
def parse ( url ) :
"""Parses a database URL in this format :
[ database type ] : / / [ username ] : [ password ] @ [ host ] : [ port ] / [ database name ]
or , for cloud SQL :
[ database type ] : / / [ username ] : [ password ] @ [ project _ id ] : [ instance _ name ] / [ database name ]""" | config = { }
url = urlparse . urlparse ( url )
# Remove query strings .
path = url . path [ 1 : ]
path = path . split ( '?' , 2 ) [ 0 ]
try :
port = url . port
hostname = url . hostname
except ValueError :
port = None
if url . scheme == 'rdbms' : # local appengine stub requires INSTANCE parameter
config [ 'INSTANCE' ] = url . netloc . split ( '@' ) [ - 1 ]
hostname = None
else :
hostname = "/cloudsql/{}" . format ( url . netloc . split ( '@' ) [ - 1 ] )
config . update ( { 'NAME' : path , 'USER' : url . username , 'PASSWORD' : url . password , 'HOST' : hostname , 'PORT' : port , } )
if url . scheme in SCHEMES :
config [ 'ENGINE' ] = SCHEMES [ url . scheme ]
return config |
def _configure_root_logger ( self ) :
"""Initialise logging system""" | root_logger = logging . getLogger ( )
root_logger . setLevel ( logging . DEBUG )
if self . args . verbose :
handler = logging . StreamHandler ( sys . stdout )
else :
handler = logging . handlers . RotatingFileHandler ( common . LOG_FILE , maxBytes = common . MAX_LOG_SIZE , backupCount = common . MAX_LOG_COUNT )
handler . setLevel ( logging . INFO )
handler . setFormatter ( logging . Formatter ( common . LOG_FORMAT ) )
root_logger . addHandler ( handler ) |
def is_list_of_list_of_states ( self , arg ) :
"""A list of list of states example -
[ [ ( ' x1 ' , ' easy ' ) , ( ' x2 ' , ' hard ' ) ] , [ ( ' x1 ' , ' hard ' ) , ( ' x2 ' , ' medium ' ) ] ]
Returns
True , if arg is a list of list of states else False .""" | if arg is None :
return False
return all ( [ isinstance ( arg , list ) , all ( isinstance ( i , list ) for i in arg ) , all ( ( isinstance ( i , tuple ) for i in lst ) for lst in arg ) ] ) |
def get_primary_keys ( conn , table : str , schema = 'public' ) :
"""Returns primary key columns for a specific table .""" | query = """\
SELECT
c.constraint_name AS pkey_constraint_name,
c.column_name AS column_name
FROM
information_schema.key_column_usage AS c
JOIN information_schema.table_constraints AS t
ON t.constraint_name = c.constraint_name
AND t.table_catalog = c.table_catalog
AND t.table_schema = c.table_schema
AND t.table_name = c.table_name
WHERE t.constraint_type = 'PRIMARY KEY'
AND c.table_schema=%s
AND c.table_name=%s
ORDER BY c.ordinal_position"""
for record in select_dict ( conn , query , params = ( schema , table ) ) :
yield record [ 'column_name' ] |
def execute ( self , timeSeries ) :
"""Creates a new TimeSeries containing the smoothed values .
: return : TimeSeries object containing the smoothed TimeSeries ,
including the forecasted values .
: rtype : TimeSeries
: note : The first normalized value is chosen as the starting point .""" | # determine the number of values to forecast , if necessary
self . _calculate_values_to_forecast ( timeSeries )
# extract the required parameters , performance improvement
alpha = self . _parameters [ "smoothingFactor" ]
beta = self . _parameters [ "trendSmoothingFactor" ]
# initialize some variables
resultList = [ ]
estimator = None
trend = None
lastT = None
# " It ' s always about performance ! "
append = resultList . append
# smooth the existing TimeSeries data
for idx in xrange ( len ( timeSeries ) ) : # get the current to increase performance
t = timeSeries [ idx ]
# get the initial estimate
if estimator is None :
estimator = t [ 1 ]
lastT = t
continue
# add the first value to the resultList without any correction
if 0 == len ( resultList ) :
append ( [ t [ 0 ] , estimator ] )
trend = t [ 1 ] - lastT [ 1 ]
# store current values for next iteration
lastT = t
lastEstimator = estimator
continue
# calculate the new estimator and trend , based on the last occured value , the error and the smoothingFactor
estimator = alpha * t [ 1 ] + ( 1 - alpha ) * ( estimator + trend )
trend = beta * ( estimator - lastEstimator ) + ( 1 - beta ) * trend
# add an entry to the result
append ( [ t [ 0 ] , estimator ] )
# store current values for next iteration
lastT = t
lastEstimator = estimator
# forecast additional values if requested
if self . _parameters [ "valuesToForecast" ] > 0 :
currentTime = resultList [ - 1 ] [ 0 ]
normalizedTimeDiff = currentTime - resultList [ - 2 ] [ 0 ]
for idx in xrange ( 1 , self . _parameters [ "valuesToForecast" ] + 1 ) :
currentTime += normalizedTimeDiff
# reuse everything
forecast = estimator + idx * trend
# add a forecasted value
append ( [ currentTime , forecast ] )
# return a TimeSeries , containing the result
return TimeSeries . from_twodim_list ( resultList ) |
def to_smart_columns ( data , headers = None , padding = 2 ) :
"""Nicely format the 2 - dimensional list into columns""" | result = ''
col_widths = [ ]
for row in data :
col_counter = 0
for word in row :
try :
col_widths [ col_counter ] = max ( len ( word ) , col_widths [ col_counter ] )
except IndexError :
col_widths . append ( len ( word ) )
col_counter += 1
if headers :
col_counter = 0
for word in headers :
try :
col_widths [ col_counter ] = max ( len ( word ) , col_widths [ col_counter ] )
except IndexError :
col_widths . append ( len ( word ) )
col_counter += 1
# Add padding
col_widths = [ width + padding for width in col_widths ]
total_width = sum ( col_widths )
if headers :
col_counter = 0
for word in headers :
result += "" . join ( word . ljust ( col_widths [ col_counter ] ) )
col_counter += 1
result += "\n"
result += '-' * total_width + "\n"
for row in data :
col_counter = 0
for word in row :
result += "" . join ( word . ljust ( col_widths [ col_counter ] ) )
col_counter += 1
result += "\n"
return result |
def run_ppm_server ( pdb_file , outfile , force_rerun = False ) :
"""Run the PPM server from OPM to predict transmembrane residues .
Args :
pdb _ file ( str ) : Path to PDB file
outfile ( str ) : Path to output HTML results file
force _ rerun ( bool ) : Flag to rerun PPM if HTML results file already exists
Returns :
dict : Dictionary of information from the PPM run , including a link to download the membrane protein file""" | if ssbio . utils . force_rerun ( outfile = outfile , flag = force_rerun ) :
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = { 'userfile' : open ( pdb_file , 'rb' ) }
r = requests . post ( url , files = files )
info = r . text
# Save results in raw HTML format
with open ( outfile , 'w' ) as f :
f . write ( info )
else : # Utilize existing saved results
with open ( outfile , 'r' ) as f :
info = f . read ( )
# Clean up the HTML stuff
t = info . replace ( '\n' , '' )
tt = t . replace ( '\r' , '' )
ttt = tt . replace ( '\t' , '' )
soup = BeautifulSoup ( ttt , "lxml" )
# Find all tables in the HTML code
tables = soup . find_all ( "table" , attrs = { "class" : "data" } )
info_dict = { }
# There are multiple tables with information
table_index = 0
for t in tables :
data_index = 0
# " row1 " contains data
for data in t . find_all ( 'tr' , attrs = { "class" : "row1" } ) :
data_list = list ( data . strings )
if table_index == 0 :
info_dict [ 'Depth/Hydrophobic Thickness' ] = data_list [ 0 ]
info_dict [ 'deltaG_transfer' ] = data_list [ 2 ]
info_dict [ 'Tilt Angle' ] = data_list [ 3 ]
if table_index == 1 and data_index == 0 :
info_dict [ 'Embedded_residues_Tilt' ] = data_list [ 0 ]
info_dict [ 'Embedded_residues' ] = data_list [ 1 ]
if table_index == 1 and data_index == 1 :
info_dict [ 'Transmembrane_secondary_structure_segments_Tilt' ] = data_list [ 0 ]
info_dict [ 'Transmembrane_secondary_structure_segments' ] = data_list [ 1 ]
if table_index == 2 :
info_dict [ 'Output Messages' ] = data_list [ 1 ]
if table_index == 3 :
baseurl = 'http://sunshine.phar.umich.edu/'
a = data . find ( 'a' , href = True )
download_url = baseurl + a [ 'href' ] . replace ( './' , '' )
info_dict [ 'Output file download link' ] = download_url
data_index += 1
table_index += 1
return info_dict |
def get_assessment_taken_form_for_create ( self , assessment_offered_id , assessment_taken_record_types ) :
"""Gets the assessment taken form for creating new assessments taken .
A new form should be requested for each create transaction .
arg : assessment _ offered _ id ( osid . id . Id ) : the ` ` Id ` ` of the
related ` ` AssessmentOffered ` `
arg : assessment _ taken _ record _ types ( osid . type . Type [ ] ) : array
of assessment taken record types to be included in the
create operation or an empty list if none
return : ( osid . assessment . AssessmentTakenForm ) - the assessment
taken form
raise : NotFound - ` ` assessment _ offered _ id ` ` is not found
raise : NullArgument - ` ` assessment _ offered _ id ` ` or
` ` assessment _ taken _ record _ types ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
raise : Unsupported - unable to get form for requested record
types
* compliance : mandatory - - This method must be implemented . *""" | if not isinstance ( assessment_offered_id , ABCId ) :
raise errors . InvalidArgument ( 'argument is not a valid OSID Id' )
for arg in assessment_taken_record_types :
if not isinstance ( arg , ABCType ) :
raise errors . InvalidArgument ( 'one or more argument array elements is not a valid OSID Type' )
am = self . _get_provider_manager ( 'ASSESSMENT' )
aols = am . get_assessment_offered_lookup_session ( proxy = self . _proxy )
aols . use_federated_bank_view ( )
offered = aols . get_assessment_offered ( assessment_offered_id )
try :
deadline = offered . get_deadline ( )
nowutc = DateTime . utcnow ( )
if nowutc > deadline :
raise errors . PermissionDenied ( 'you are passed the deadline for the offered' )
except errors . IllegalState : # no deadline set
pass
if assessment_taken_record_types == [ ] : # WHY are we passing bank _ id = self . _ catalog _ id below , seems redundant :
obj_form = objects . AssessmentTakenForm ( bank_id = self . _catalog_id , assessment_offered_id = assessment_offered_id , catalog_id = self . _catalog_id , runtime = self . _runtime , proxy = self . _proxy )
else :
obj_form = objects . AssessmentTakenForm ( bank_id = self . _catalog_id , record_types = assessment_taken_record_types , assessment_offered_id = assessment_offered_id , catalog_id = self . _catalog_id , runtime = self . _runtime , proxy = self . _proxy )
obj_form . _for_update = False
self . _forms [ obj_form . get_id ( ) . get_identifier ( ) ] = not CREATED
return obj_form |
def __get_git_bin ( ) :
"""Get git binary location .
: return : Check git location""" | git = 'git'
alternatives = [ '/usr/bin/git' ]
for alt in alternatives :
if os . path . exists ( alt ) :
git = alt
break
return git |
def merge ( args ) :
"""% prog merge map1 map2 map3 . . .
Convert csv maps to bed format .
Each input map is csv formatted , for example :
ScaffoldID , ScaffoldPosition , LinkageGroup , GeneticPosition
scaffold _ 2707,11508,1,0
scaffold _ 2707,11525,1,1.2
scaffold _ 759,81336,1,9.7""" | p = OptionParser ( merge . __doc__ )
p . add_option ( "-w" , "--weightsfile" , default = "weights.txt" , help = "Write weights to file" )
p . set_outfile ( "out.bed" )
opts , args = p . parse_args ( args )
if len ( args ) < 1 :
sys . exit ( not p . print_help ( ) )
maps = args
outfile = opts . outfile
fp = must_open ( maps )
b = Bed ( )
mapnames = set ( )
for row in fp :
mapname = filename_to_mapname ( fp . filename ( ) )
mapnames . add ( mapname )
try :
m = CSVMapLine ( row , mapname = mapname )
if m . cm < 0 :
logging . error ( "Ignore marker with negative genetic distance" )
print ( row . strip ( ) , file = sys . stderr )
else :
b . append ( BedLine ( m . bedline ) )
except ( IndexError , ValueError ) : # header or mal - formed line
continue
b . print_to_file ( filename = outfile , sorted = True )
logging . debug ( "A total of {0} markers written to `{1}`." . format ( len ( b ) , outfile ) )
assert len ( maps ) == len ( mapnames ) , "You have a collision in map names"
write_weightsfile ( mapnames , weightsfile = opts . weightsfile ) |
def parse_connection_string ( connect_str ) :
"""Parse a connection string such as those provided by the Azure portal .
Connection string should be formatted like : ` Key = Value ; Key = Value ; Key = Value ` .
The connection string will be parsed into a dictionary .
: param connect _ str : The connection string .
: type connect _ str : str
: rtype : dict [ str , str ]""" | connect_info = { }
fields = connect_str . split ( ';' )
for field in fields :
key , value = field . split ( '=' , 1 )
connect_info [ key ] = value
return connect_info |
def create_variable_with_length ( self ) :
"""Append code for creating variable with length of that variable
( for example length of list or dictionary ) with name ` ` { variable } _ len ` ` .
It can be called several times and always it ' s done only when that variable
still does not exists .""" | variable_name = '{}_len' . format ( self . _variable )
if variable_name in self . _variables :
return
self . _variables . add ( variable_name )
self . l ( '{variable}_len = len({variable})' ) |
def color_grid ( data , palette , denom = 9.0 , mask_zeros = True ) :
"""Convert the given data ( 2d array of numbers or binary strings ) to a 2d
array of RGB or RGBA values which can then be visualized as a heat map .
Arguments :
data - 2d array of numbers or binary strings
palette - a seaborn palette ( list of RGB values ) indicating how to convert
data to colors . Will be converted to a continuous colormap if
necessary . This should generally be the length of the longest
binary string or the highest possible number
denom - if the data is composed of numbers rather than binary strings ,
this number will indicate how to normalize the data to [ 0 , 1]
should it be neccessary .
mask _ zeros - Boolean indicating whether 0s should be colored white rather
than the color specified by the palette . - 1s always yield
-1 so that missing data can be handled appropriately .""" | grid = [ ]
try : # If this isn ' t numeric , don ' t bother with this block
float ( data [ 0 ] [ 0 ] )
# This is continuous data - we need a colormap rather than palette
palette = matplotlib . colors . LinearSegmentedColormap . from_list ( "color_grid" , palette )
palette . set_bad ( alpha = 0 )
except :
pass
for row in range ( len ( data ) ) :
grid . append ( [ ] )
for col in range ( len ( data [ row ] ) ) :
try :
rgb = color_array_by_value ( data [ row ] [ col ] , palette , denom , mask_zeros )
except :
rgb = color_array_by_hue_mix ( data [ row ] [ col ] , palette )
grid [ row ] . append ( rgb )
return grid |
def send_messages ( self , messages ) :
"""Send one or more EmailMessage objects .
Returns :
int : Number of email messages sent .""" | if not messages :
return
new_conn_created = self . open ( )
if not self . connection : # We failed silently on open ( ) . Trying to send would be pointless .
return
num_sent = 0
for message in messages :
sent = self . _send ( message )
if sent :
num_sent += 1
if new_conn_created :
self . close ( )
return num_sent |
def register ( name = EopDb . DEFAULT_DBNAME ) :
"""Decorator for registering an Eop Database
Example :
. . code - block : : python
@ register
class SqliteEnvDatabase :
# sqlite implementation
# this database will be known as ' default '
@ register ( ' json ' )
class JsonEnvDatabase :
# JSON implementation
EopDb . get ( 58090.2 ) # get Eop from SqliteEnvDatabase
EopDb . get ( 58090.2 , dbname = ' default ' ) # same as above
EopDb . get ( 58090.2 , dbname = ' json ' ) # get Eop from JsonEnvDatabase""" | # I had a little trouble setting this function up , due to the fact that
# I wanted it to be usable both as a simple decorator ( ` ` @ register ` ` )
# and a decorator with arguments ( ` ` @ register ( ' mydatabase ' ) ` ` ) .
# The current implementation allows this dual - use , but it ' s a bit hacky .
# In the simple decorator mode , when the @ register decorator is called
# the argument passed is the class to decorate . So it * is * the decorated
# function
# In the decorator - with - arguments mode , the @ register decorator should provide
# a callable that will be the decorated function . This callable takes
# the class you want to decorate
if isinstance ( name , str ) : # decorator with argument
def wrapper ( klass ) :
EopDb . register ( klass , name )
return klass
return wrapper
else : # simple decorator mode
klass = name
EopDb . register ( klass )
return klass |
def send ( signal = Any , sender = Anonymous , * arguments , ** named ) :
"""Send signal from sender to all connected receivers .
signal - - ( hashable ) signal value , see connect for details
sender - - the sender of the signal
if Any , only receivers registered for Any will receive
the message .
if Anonymous , only receivers registered to receive
messages from Anonymous or Any will receive the message
Otherwise can be any python object ( normally one
registered with a connect if you actually want
something to occur ) .
arguments - - positional arguments which will be passed to
* all * receivers . Note that this may raise TypeErrors
if the receivers do not allow the particular arguments .
Note also that arguments are applied before named
arguments , so they should be used with care .
named - - named arguments which will be filtered according
to the parameters of the receivers to only provide those
acceptable to the receiver .
Return a list of tuple pairs [ ( receiver , response ) , . . . ]
if any receiver raises an error , the error propagates back
through send , terminating the dispatch loop , so it is quite
possible to not have all receivers called if a raises an
error .""" | # Call each receiver with whatever arguments it can accept .
# Return a list of tuple pairs [ ( receiver , response ) , . . . ] .
responses = [ ]
for receiver in liveReceivers ( getAllReceivers ( sender , signal ) ) :
response = robustapply . robustApply ( receiver , signal = signal , sender = sender , * arguments , ** named )
responses . append ( ( receiver , response ) )
return responses |
def find_optconf ( self , pconfs ) :
"""Find the optimal Parallel configuration .""" | # Save pconfs for future reference .
self . set_pconfs ( pconfs )
# Select the partition on which we ' ll be running and set MPI / OMP cores .
optconf = self . manager . select_qadapter ( pconfs )
return optconf |
def _dataset_load_from_hdx ( self , id_or_name ) : # type : ( str ) - > bool
"""Loads the dataset given by either id or name from HDX
Args :
id _ or _ name ( str ) : Either id or name of dataset
Returns :
bool : True if loaded , False if not""" | if not self . _load_from_hdx ( 'dataset' , id_or_name ) :
return False
self . _dataset_create_resources ( )
return True |
def _obj_index ( self , uri , base_path , marked_path , headers , spr = False ) :
"""Return an index of objects from within the container .
: param uri :
: param base _ path :
: param marked _ path :
: param headers :
: param spr : " single page return " Limit the returned data to one page
: type spr : ` ` bol ` `
: return :""" | object_list = list ( )
l_obj = None
container_uri = uri . geturl ( )
while True :
marked_uri = urlparse . urljoin ( container_uri , marked_path )
resp = self . http . get ( url = marked_uri , headers = headers )
self . _resp_exception ( resp = resp )
return_list = resp . json ( )
if spr :
return return_list
time_offset = self . job_args . get ( 'time_offset' )
for obj in return_list :
if time_offset : # Get the last _ modified data from the Object .
time_delta = cloud_utils . TimeDelta ( job_args = self . job_args , last_modified = time_offset )
if time_delta :
object_list . append ( obj )
else :
object_list . append ( obj )
if object_list :
last_obj_in_list = object_list [ - 1 ] . get ( 'name' )
else :
last_obj_in_list = None
if l_obj == last_obj_in_list :
return object_list
else :
l_obj = last_obj_in_list
marked_path = self . _last_marker ( base_path = base_path , last_object = l_obj ) |
def should_run ( keywords , post_processor ) :
"""Check if the postprocessor should run for the current hazard and exposure
: param keywords : impact layer keywords
: type keywords : dict
: param post _ processor : the post processor instance to check
: type post _ processor : dict
: returns : Tuple with True if success , else False with an error message .
: rtype : ( bool , str )""" | exposure = keywords [ 'exposure_keywords' ] [ 'exposure' ]
hazard = keywords [ 'hazard_keywords' ] [ 'hazard' ]
try :
run_filter = post_processor [ 'run_filter' ]
except KeyError : # if no run _ filter is defined we run the postprocessor
return True , None
msg = tr ( 'Postprocessor "{name}" did not run because hazard "{hazard}" ' 'and exposure "{exposure}" are not in its run_filter ' '"{run_filter}"' . format ( name = post_processor [ 'name' ] , hazard = hazard , exposure = exposure , run_filter = run_filter ) )
# if an hazard filter is defined the current hazard needs to be defined in
# there
if 'hazard' in run_filter and hazard not in run_filter [ 'hazard' ] :
return False , msg
# if an exposure filter is defined the current exposure needs to be
# defined in there
if 'exposure' in run_filter and exposure not in run_filter [ 'exposure' ] :
return False , msg
return True , None |
def _batch_gvcfs ( data , region , vrn_files , ref_file , out_file = None ) :
"""Perform batching of gVCF files if above recommended input count .""" | if out_file is None :
out_file = vrn_files [ 0 ]
# group to get below the maximum batch size , using 200 as the baseline
max_batch = int ( dd . get_joint_group_size ( data ) )
if len ( vrn_files ) > max_batch :
out = [ ]
num_batches = int ( math . ceil ( float ( len ( vrn_files ) ) / max_batch ) )
for i , batch_vrn_files in enumerate ( tz . partition_all ( num_batches , vrn_files ) ) :
base , ext = utils . splitext_plus ( out_file )
batch_out_file = "%s-b%s%s" % ( base , i , ext )
out . append ( run_combine_gvcfs ( batch_vrn_files , region , ref_file , batch_out_file , data ) )
return _batch_gvcfs ( data , region , out , ref_file )
else :
return vrn_files |
def mergeWindows ( data , dimOrder , maxWindowSize , overlapPercent , batchSize , transform , progressCallback = None ) :
"""Generates sliding windows for the specified dataset and applies the specified
transformation function to each window . Where multiple overlapping windows
include an element of the input dataset , the overlap is resolved by computing
the mean transform result value for that element .
Irrespective of the order of the dimensions of the input dataset , the
transformation function should return a NumPy array with dimensions
[ batch , height , width , resultChannels ] .
If a progress callback is supplied , it will be called immediately before
applying the transformation function to each batch of windows . The callback
should accept the current batch index and number of batches as arguments .""" | # Determine the dimensions of the input data
sourceWidth = data . shape [ dimOrder . index ( 'w' ) ]
sourceHeight = data . shape [ dimOrder . index ( 'h' ) ]
# Generate the sliding windows and group them into batches
windows = generate ( data , dimOrder , maxWindowSize , overlapPercent )
batches = batchWindows ( windows , batchSize )
# Apply the transform to the first batch of windows and determine the result dimensionality
exemplarResult = transform ( data , batches [ 0 ] )
resultDimensions = exemplarResult . shape [ len ( exemplarResult . shape ) - 1 ]
# Create the matrices to hold the sums and counts for the transform result values
sums = np . zeros ( ( sourceHeight , sourceWidth , resultDimensions ) , dtype = np . float )
counts = np . zeros ( ( sourceHeight , sourceWidth ) , dtype = np . uint32 )
# Iterate over the batches and apply the transformation function to each batch
for batchNum , batch in enumerate ( batches ) : # If a progress callback was supplied , call it
if progressCallback != None :
progressCallback ( batchNum , len ( batches ) )
# Apply the transformation function to the current batch
batchResult = transform ( data , batch )
# Iterate over the windows in the batch and update the sums matrix
for windowNum , window in enumerate ( batch ) : # Create views into the larger matrices that correspond to the current window
windowIndices = window . indices ( False )
sumsView = sums [ windowIndices ]
countsView = counts [ windowIndices ]
# Update the result sums for each of the dataset elements in the window
sumsView [ : ] += batchResult [ windowNum ]
countsView [ : ] += 1
# Use the sums and the counts to compute the mean values
for dim in range ( 0 , resultDimensions ) :
sums [ : , : , dim ] /= counts
# Return the mean values
return sums |
def display_initialize ( self ) :
"""Display ' please wait ' message , and narrow build warning .""" | echo ( self . term . home + self . term . clear )
echo ( self . term . move_y ( self . term . height // 2 ) )
echo ( self . term . center ( 'Initializing page data ...' ) . rstrip ( ) )
flushout ( )
if LIMIT_UCS == 0x10000 :
echo ( '\n\n' )
echo ( self . term . blink_red ( self . term . center ( 'narrow Python build: upperbound value is {n}.' . format ( n = LIMIT_UCS ) ) . rstrip ( ) ) )
echo ( '\n\n' )
flushout ( ) |
def set_matrix ( self , matrix ) :
"""Modifies the current transformation matrix ( CTM )
by setting it equal to : obj : ` matrix ` .
: param matrix :
A transformation : class : ` Matrix ` from user space to device space .""" | cairo . cairo_set_matrix ( self . _pointer , matrix . _pointer )
self . _check_status ( ) |
def create_with_validation ( raw_properties ) :
"""Creates new ' PropertySet ' instances after checking
that all properties are valid and converting implicit
properties into gristed form .""" | assert is_iterable_typed ( raw_properties , basestring )
properties = [ property . create_from_string ( s ) for s in raw_properties ]
property . validate ( properties )
return create ( properties ) |
def _load_neighbors_from_external_source ( self ) -> None :
"""Loads the neighbors of the node from the igraph ` Graph ` instance that is
wrapped by the graph that has this node .""" | graph : SpotifyArtistGraph = self . _graph
items : List [ NameExternalIDPair ] = graph . client . similar_artists ( self . external_id )
limit : int = graph . neighbor_count if graph . neighbor_count > 0 else self . _NEIGHBORS_TO_LOAD
if len ( items ) > limit :
del items [ limit : ]
for item in items :
neighbor : SpotifyArtistNode = graph . nodes . get_node_by_name ( item . name , can_validate_and_load = True , external_id = item . external_id )
# Strangely we need this guard because the Spofity API ' s search method doesn ' t
# recognise certain artist names .
# Actually it could also be a bug in SpotifyClient . search _ artists _ by _ name ( ) ,
# the artist name sent as a request parameter may not be encoded 100 % correctly . . .
# Anyway , this is a working hotfix .
if neighbor is not None :
graph . add_edge ( self , neighbor ) |
def list_permissions ( vhost , runas = None ) :
'''Lists permissions for vhost via rabbitmqctl list _ permissions
CLI Example :
. . code - block : : bash
salt ' * ' rabbitmq . list _ permissions / myvhost''' | if runas is None and not salt . utils . platform . is_windows ( ) :
runas = salt . utils . user . get_user ( )
res = __salt__ [ 'cmd.run_all' ] ( [ RABBITMQCTL , 'list_permissions' , '-q' , '-p' , vhost ] , reset_system_locale = False , runas = runas , python_shell = False )
return _output_to_dict ( res ) |
def create_session_config ( log_device_placement = False , enable_graph_rewriter = False , gpu_mem_fraction = 0.95 , use_tpu = False , xla_jit_level = tf . OptimizerOptions . OFF , inter_op_parallelism_threads = 0 , intra_op_parallelism_threads = 0 ) :
"""The TensorFlow Session config to use .""" | if use_tpu :
graph_options = tf . GraphOptions ( )
else :
if enable_graph_rewriter :
rewrite_options = rewriter_config_pb2 . RewriterConfig ( )
rewrite_options . layout_optimizer = rewriter_config_pb2 . RewriterConfig . ON
graph_options = tf . GraphOptions ( rewrite_options = rewrite_options )
else :
graph_options = tf . GraphOptions ( optimizer_options = tf . OptimizerOptions ( opt_level = tf . OptimizerOptions . L1 , do_function_inlining = False , global_jit_level = xla_jit_level ) )
gpu_options = tf . GPUOptions ( per_process_gpu_memory_fraction = gpu_mem_fraction )
config = tf . ConfigProto ( allow_soft_placement = True , graph_options = graph_options , gpu_options = gpu_options , log_device_placement = log_device_placement , inter_op_parallelism_threads = inter_op_parallelism_threads , intra_op_parallelism_threads = intra_op_parallelism_threads , isolate_session_state = True )
return config |
def _integrate_storage ( self , timeseries , position , params , voltage_level , reactive_power_timeseries , ** kwargs ) :
"""Integrate storage units in the grid .
Parameters
timeseries : : obj : ` str ` or : pandas : ` pandas . Series < series > `
Parameter used to obtain time series of active power the storage
storage is charged ( negative ) or discharged ( positive ) with . Can
either be a given time series or an operation strategy . See class
definition for more information
position : : obj : ` str ` or : class : ` ~ . grid . components . Station ` or : class : ` ~ . grid . components . BranchTee ` or : class : ` ~ . grid . components . Generator ` or : class : ` ~ . grid . components . Load `
Parameter used to place the storage . See class definition for more
information .
params : : obj : ` dict `
Dictionary with storage parameters for one storage . See class
definition for more information on what parameters must be
provided .
voltage _ level : : obj : ` str ` or None
` voltage _ level ` defines which side of the LV station the storage is
connected to . Valid options are ' lv ' and ' mv ' . Default : None . See
class definition for more information .
reactive _ power _ timeseries : : pandas : ` pandas . Series < series > ` or None
Reactive power time series in kvar ( generator sign convention ) .
Index of the series needs to be a
: pandas : ` pandas . DatetimeIndex < datetimeindex > ` .""" | # place storage
params = self . _check_nominal_power ( params , timeseries )
if isinstance ( position , Station ) or isinstance ( position , BranchTee ) or isinstance ( position , Generator ) or isinstance ( position , Load ) :
storage = storage_integration . set_up_storage ( node = position , parameters = params , voltage_level = voltage_level )
line = storage_integration . connect_storage ( storage , position )
elif isinstance ( position , str ) and position == 'hvmv_substation_busbar' :
storage , line = storage_integration . storage_at_hvmv_substation ( self . edisgo . network . mv_grid , params )
elif isinstance ( position , str ) and position == 'distribute_storages_mv' : # check active power time series
if not isinstance ( timeseries , pd . Series ) :
raise ValueError ( "Storage time series needs to be a pandas Series if " "`position` is 'distribute_storages_mv'." )
else :
timeseries = pd . DataFrame ( data = { 'p' : timeseries } , index = timeseries . index )
self . _check_timeindex ( timeseries )
# check reactive power time series
if reactive_power_timeseries is not None :
self . _check_timeindex ( reactive_power_timeseries )
timeseries [ 'q' ] = reactive_power_timeseries . loc [ timeseries . index ]
else :
timeseries [ 'q' ] = 0
# start storage positioning method
storage_positioning . one_storage_per_feeder ( edisgo = self . edisgo , storage_timeseries = timeseries , storage_nominal_power = params [ 'nominal_power' ] , ** kwargs )
return
else :
message = 'Provided storage position option {} is not ' 'valid.' . format ( timeseries )
logging . error ( message )
raise KeyError ( message )
# implement operation strategy ( active power )
if isinstance ( timeseries , pd . Series ) :
timeseries = pd . DataFrame ( data = { 'p' : timeseries } , index = timeseries . index )
self . _check_timeindex ( timeseries )
storage . timeseries = timeseries
elif isinstance ( timeseries , str ) and timeseries == 'fifty-fifty' :
storage_operation . fifty_fifty ( self . edisgo . network , storage )
else :
message = 'Provided storage timeseries option {} is not ' 'valid.' . format ( timeseries )
logging . error ( message )
raise KeyError ( message )
# reactive power
if reactive_power_timeseries is not None :
self . _check_timeindex ( reactive_power_timeseries )
storage . timeseries = pd . DataFrame ( { 'p' : storage . timeseries . p , 'q' : reactive_power_timeseries . loc [ storage . timeseries . index ] } , index = storage . timeseries . index )
# update pypsa representation
if self . edisgo . network . pypsa is not None :
pypsa_io . update_pypsa_storage ( self . edisgo . network . pypsa , storages = [ storage ] , storages_lines = [ line ] ) |
def neighborSelect ( a , x , y ) :
"""finds ( local ) minima in a 2d grid
: param a : 1d array of displacements from the source positions
: type a : numpy array with length numPix * * 2 in float
: returns : array of indices of local minima , values of those minima
: raises : AttributeError , KeyError""" | dim = int ( np . sqrt ( len ( a ) ) )
values = [ ]
x_mins = [ ]
y_mins = [ ]
for i in range ( dim + 1 , len ( a ) - dim - 1 ) :
if ( a [ i ] < a [ i - 1 ] and a [ i ] < a [ i + 1 ] and a [ i ] < a [ i - dim ] and a [ i ] < a [ i + dim ] and a [ i ] < a [ i - ( dim - 1 ) ] and a [ i ] < a [ i - ( dim + 1 ) ] and a [ i ] < a [ i + ( dim - 1 ) ] and a [ i ] < a [ i + ( dim + 1 ) ] ) :
if ( a [ i ] < a [ ( i - 2 * dim - 1 ) % dim ** 2 ] and a [ i ] < a [ ( i - 2 * dim + 1 ) % dim ** 2 ] and a [ i ] < a [ ( i - dim - 2 ) % dim ** 2 ] and a [ i ] < a [ ( i - dim + 2 ) % dim ** 2 ] and a [ i ] < a [ ( i + dim - 2 ) % dim ** 2 ] and a [ i ] < a [ ( i + dim + 2 ) % dim ** 2 ] and a [ i ] < a [ ( i + 2 * dim - 1 ) % dim ** 2 ] and a [ i ] < a [ ( i + 2 * dim + 1 ) % dim ** 2 ] ) :
if ( a [ i ] < a [ ( i - 3 * dim - 1 ) % dim ** 2 ] and a [ i ] < a [ ( i - 3 * dim + 1 ) % dim ** 2 ] and a [ i ] < a [ ( i - dim - 3 ) % dim ** 2 ] and a [ i ] < a [ ( i - dim + 3 ) % dim ** 2 ] and a [ i ] < a [ ( i + dim - 3 ) % dim ** 2 ] and a [ i ] < a [ ( i + dim + 3 ) % dim ** 2 ] and a [ i ] < a [ ( i + 3 * dim - 1 ) % dim ** 2 ] and a [ i ] < a [ ( i + 3 * dim + 1 ) % dim ** 2 ] ) :
x_mins . append ( x [ i ] )
y_mins . append ( y [ i ] )
values . append ( a [ i ] )
return np . array ( x_mins ) , np . array ( y_mins ) , np . array ( values ) |
def render_tooltip ( self , tooltip , obj ) :
"""Render the tooltip for this column for an object""" | if self . tooltip_attr :
val = getattr ( obj , self . tooltip_attr )
elif self . tooltip_value :
val = self . tooltip_value
else :
return False
setter = getattr ( tooltip , TOOLTIP_SETTERS . get ( self . tooltip_type ) )
if self . tooltip_type in TOOLTIP_SIZED_TYPES :
setter ( val , self . tooltip_image_size )
else :
setter ( val )
return True |
def DeregisterCredentials ( cls , credentials ) :
"""Deregisters a path specification credentials .
Args :
credentials ( Credentials ) : credentials .
Raises :
KeyError : if credential object is not set for the corresponding
type indicator .""" | if credentials . type_indicator not in cls . _credentials :
raise KeyError ( 'Credential object not set for type indicator: {0:s}.' . format ( credentials . type_indicator ) )
del cls . _credentials [ credentials . type_indicator ] |
def start_agent ( self , cfgin = True ) :
"""CLI interface to start 12 - factor service""" | default_conf = { "threads" : { "result" : { "number" : 0 , "function" : None } , "worker" : { "number" : 0 , "function" : None } , } , "interval" : { "refresh" : 900 , "heartbeat" : 300 , "reporting" : 300 , "test" : 60 } , "heartbeat-hook" : False }
indata = { }
if cfgin :
indata = json . load ( sys . stdin )
elif os . environ . get ( "REFLEX_MONITOR_CONFIG" ) :
indata = os . environ . get ( "REFLEX_MONITOR_CONFIG" )
if indata [ 0 ] != "{" :
indata = base64 . b64decode ( indata )
else :
self . NOTIFY ( "Using default configuration" )
conf = dictlib . union ( default_conf , indata )
conf [ 'threads' ] [ 'result' ] [ 'function' ] = self . handler_thread
conf [ 'threads' ] [ 'worker' ] [ 'function' ] = self . worker_thread
self . NOTIFY ( "Starting monitor Agent" )
try :
self . configure ( conf ) . start ( )
except KeyboardInterrupt :
self . thread_stopper . set ( )
if self . refresh_stopper :
self . refresh_stopper . set ( )
if self . heartbeat_stopper :
self . heartbeat_stopper . set ( )
if self . reporting_stopper :
self . reporting_stopper . set ( ) |
def connect ( self ) :
"""| coro |
Connect to ubisoft , automatically called when needed""" | if time . time ( ) < self . _login_cooldown :
raise FailedToConnect ( "login on cooldown" )
resp = yield from self . session . post ( "https://connect.ubi.com/ubiservices/v2/profiles/sessions" , headers = { "Content-Type" : "application/json" , "Ubi-AppId" : self . appid , "Authorization" : "Basic " + self . token } , data = json . dumps ( { "rememberMe" : True } ) )
data = yield from resp . json ( )
if "ticket" in data :
self . key = data . get ( "ticket" )
self . sessionid = data . get ( "sessionId" )
self . uncertain_spaceid = data . get ( "spaceId" )
else :
raise FailedToConnect |
def policy_list ( request , ** kwargs ) :
"""List of QoS Policies .""" | policies = neutronclient ( request ) . list_qos_policies ( ** kwargs ) . get ( 'policies' )
return [ QoSPolicy ( p ) for p in policies ] |
def firstElementChild ( self ) -> Optional [ AbstractNode ] :
"""First Element child node .
If this node has no element child , return None .""" | for child in self . childNodes :
if child . nodeType == Node . ELEMENT_NODE :
return child
return None |
def get_user ( self , username = "~" ) :
"""get info about user ( if no user specified , use the one initiating request )
: param username : str , name of user to get info about , default = " ~ "
: return : dict""" | url = self . _build_url ( "users/%s/" % username , _prepend_namespace = False )
response = self . _get ( url )
check_response ( response )
return response |
def get_students ( self ) :
"""Get user objects that are students ( quickly ) .""" | users = User . objects . filter ( user_type = "student" , graduation_year__gte = settings . SENIOR_GRADUATION_YEAR )
users = users . exclude ( id__in = EXTRA )
return users |
def get_domain ( value ) :
"""domain = dot - atom / domain - literal / obs - domain
obs - domain = atom * ( " . " atom ) )""" | domain = Domain ( )
leader = None
if value [ 0 ] in CFWS_LEADER :
leader , value = get_cfws ( value )
if not value :
raise errors . HeaderParseError ( "expected domain but found '{}'" . format ( value ) )
if value [ 0 ] == '[' :
token , value = get_domain_literal ( value )
if leader is not None :
token [ : 0 ] = [ leader ]
domain . append ( token )
return domain , value
try :
token , value = get_dot_atom ( value )
except errors . HeaderParseError :
token , value = get_atom ( value )
if leader is not None :
token [ : 0 ] = [ leader ]
domain . append ( token )
if value and value [ 0 ] == '.' :
domain . defects . append ( errors . ObsoleteHeaderDefect ( "domain is not a dot-atom (contains CFWS)" ) )
if domain [ 0 ] . token_type == 'dot-atom' :
domain [ : ] = domain [ 0 ]
while value and value [ 0 ] == '.' :
domain . append ( DOT )
token , value = get_atom ( value [ 1 : ] )
domain . append ( token )
return domain , value |
def optimized_binary_search_lower ( tab , logsize ) :
"""Binary search in a table using bit operations
: param tab : boolean monotone table
of size : math : ` 2 ^ \\ textrm { logsize } `
with tab [ 0 ] = False
: param int logsize :
: returns : last i such that not tab [ i ]
: complexity : O ( logsize )""" | lo = 0
intervalsize = ( 1 << logsize ) >> 1
while intervalsize > 0 :
if not tab [ lo | intervalsize ] :
lo |= intervalsize
intervalsize >>= 1
return lo |
def get_versions_from_webpage ( self ) :
"""Get version details from Zenodo webpage ( it is not available in the REST api )""" | res = requests . get ( 'https://zenodo.org/record/' + self . data [ 'conceptrecid' ] )
soup = BeautifulSoup ( res . text , 'html.parser' )
version_rows = soup . select ( '.well.metadata > table.table tr' )
if len ( version_rows ) == 0 : # when only 1 version
return [ { 'recid' : self . data [ 'id' ] , 'name' : '1' , 'doi' : self . data [ 'doi' ] , 'date' : self . data [ 'created' ] , 'original_version' : self . original_version ( ) } ]
return [ self . _row_to_version ( row ) for row in version_rows if len ( row . select ( 'td' ) ) > 1 ] |
def get_probes_config ( self ) :
"""Return the configuration of the RPM probes .""" | probes = { }
probes_table = junos_views . junos_rpm_probes_config_table ( self . device )
probes_table . get ( )
probes_table_items = probes_table . items ( )
for probe_test in probes_table_items :
test_name = py23_compat . text_type ( probe_test [ 0 ] )
test_details = { p [ 0 ] : p [ 1 ] for p in probe_test [ 1 ] }
probe_name = napalm_base . helpers . convert ( py23_compat . text_type , test_details . pop ( 'probe_name' ) )
target = napalm_base . helpers . convert ( py23_compat . text_type , test_details . pop ( 'target' , '' ) )
test_interval = napalm_base . helpers . convert ( int , test_details . pop ( 'test_interval' , '0' ) )
probe_count = napalm_base . helpers . convert ( int , test_details . pop ( 'probe_count' , '0' ) )
probe_type = napalm_base . helpers . convert ( py23_compat . text_type , test_details . pop ( 'probe_type' , '' ) )
source = napalm_base . helpers . convert ( py23_compat . text_type , test_details . pop ( 'source_address' , '' ) )
if probe_name not in probes . keys ( ) :
probes [ probe_name ] = { }
probes [ probe_name ] [ test_name ] = { 'probe_type' : probe_type , 'target' : target , 'source' : source , 'probe_count' : probe_count , 'test_interval' : test_interval }
return probes |
def get ( self , request , * args , ** kwargs ) :
"""Return a : class : ` . django . http . JsonResponse ` .
Example : :
' results ' : [
' text ' : " foo " ,
' id ' : 123
' more ' : true""" | self . widget = self . get_widget_or_404 ( )
self . term = kwargs . get ( 'term' , request . GET . get ( 'term' , '' ) )
self . object_list = self . get_queryset ( )
context = self . get_context_data ( )
return JsonResponse ( { 'results' : [ { 'text' : self . widget . label_from_instance ( obj ) , 'id' : obj . pk , } for obj in context [ 'object_list' ] ] , 'more' : context [ 'page_obj' ] . has_next ( ) } ) |
def _add_edge ( self , s_a , s_b , ** edge_labels ) :
"""Add an edge in the graph from ` s _ a ` to statement ` s _ b ` , where ` s _ a ` and ` s _ b ` are tuples of statements of the
form ( irsb _ addr , stmt _ idx ) .""" | # Is that edge already in the graph ?
# If at least one is new , then we are not redoing the same path again
if ( s_a , s_b ) not in self . graph . edges ( ) :
self . graph . add_edge ( s_a , s_b , ** edge_labels )
self . _new = True
l . info ( "New edge: %s --> %s" , s_a , s_b ) |
def team_status ( self , team , event ) :
"""Get status of a team at an event .
: param team : Team whose status to get .
: param event : Event team is at .
: return : Status object .""" | return Status ( self . _get ( 'team/%s/event/%s/status' % ( self . team_key ( team ) , event ) ) ) |
def long_description ( * filenames ) :
"""Provide a long description .""" | res = [ '' ]
for filename in filenames :
with open ( filename ) as fp :
for line in fp :
res . append ( ' ' + line )
res . append ( '' )
res . append ( '\n' )
return EMPTYSTRING . join ( res ) |
def delete_user ( name , runas = None ) :
'''Deletes a user via rabbitmqctl delete _ user .
CLI Example :
. . code - block : : bash
salt ' * ' rabbitmq . delete _ user rabbit _ user''' | if runas is None and not salt . utils . platform . is_windows ( ) :
runas = salt . utils . user . get_user ( )
res = __salt__ [ 'cmd.run_all' ] ( [ RABBITMQCTL , 'delete_user' , name ] , reset_system_locale = False , python_shell = False , runas = runas )
msg = 'Deleted'
return _format_response ( res , msg ) |
def get_environ ( self , sock ) :
"""Create WSGI environ entries to be merged into each request .""" | cipher = sock . cipher ( )
ssl_environ = { "wsgi.url_scheme" : "https" , "HTTPS" : "on" , 'SSL_PROTOCOL' : cipher [ 1 ] , 'SSL_CIPHER' : cipher [ 0 ] # # SSL _ VERSION _ INTERFACE string The mod _ ssl program version
# # SSL _ VERSION _ LIBRARY string The OpenSSL program version
}
return ssl_environ |
def release_branches ( self ) :
"""A dictionary that maps branch names to : class : ` Release ` objects .""" | self . ensure_release_scheme ( 'branches' )
return dict ( ( r . revision . branch , r ) for r in self . releases . values ( ) ) |
def value_type ( self ) :
"""The attribute ' s type , note that this is the type of the attribute ' s
value and not its affect on the item ( i . e . negative or positive ) . See
' type ' for that .""" | redundantprefix = "value_is_"
vtype = self . _attribute . get ( "description_format" )
if vtype and vtype . startswith ( redundantprefix ) :
return vtype [ len ( redundantprefix ) : ]
else :
return vtype |
def get_all_for_resource ( identifier , configuration = None ) : # type : ( str , Optional [ Configuration ] ) - > List [ ' ResourceView ' ]
"""Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects
Args :
identifier ( str ) : Identifier of resource
configuration ( Optional [ Configuration ] ) : HDX configuration . Defaults to global configuration .
Returns :
List [ ResourceView ] : List of ResourceView objects""" | resourceview = ResourceView ( configuration = configuration )
success , result = resourceview . _read_from_hdx ( 'resource view' , identifier , 'id' , ResourceView . actions ( ) [ 'list' ] )
resourceviews = list ( )
if success :
for resourceviewdict in result :
resourceview = ResourceView ( resourceviewdict , configuration = configuration )
resourceviews . append ( resourceview )
return resourceviews |
def filter_to_pass_and_reject ( in_file , paired , out_dir = None ) :
"""Filter VCF to only those with a strict PASS / REJECT : somatic + germline .
Removes low quality calls filtered but also labeled with REJECT .""" | from bcbio . heterogeneity import bubbletree
out_file = "%s-prfilter.vcf.gz" % utils . splitext_plus ( in_file ) [ 0 ]
if out_dir :
out_file = os . path . join ( out_dir , os . path . basename ( out_file ) )
if not utils . file_uptodate ( out_file , in_file ) :
with file_transaction ( paired . tumor_data , out_file ) as tx_out_file :
max_depth = bubbletree . max_normal_germline_depth ( in_file , bubbletree . PARAMS , paired )
tx_out_plain = tx_out_file . replace ( ".vcf.gz" , ".vcf" )
with contextlib . closing ( cyvcf2 . VCF ( in_file ) ) as reader :
reader = _add_db_to_header ( reader )
with contextlib . closing ( cyvcf2 . Writer ( tx_out_plain , reader ) ) as writer :
for rec in reader :
filters = rec . FILTER . split ( ";" ) if rec . FILTER else [ ]
other_filters = [ x for x in filters if x not in [ "PASS" , "." , "REJECT" ] ]
if len ( other_filters ) == 0 or bubbletree . is_info_germline ( rec ) : # Germline , check if we should include based on frequencies
if "REJECT" in filters or bubbletree . is_info_germline ( rec ) :
stats = bubbletree . _is_possible_loh ( rec , reader , bubbletree . PARAMS , paired , use_status = True , max_normal_depth = max_depth )
if stats :
rec . FILTER = "PASS"
rec . INFO [ "DB" ] = True
writer . write_record ( rec )
# Somatic , always include
else :
writer . write_record ( rec )
vcfutils . bgzip_and_index ( tx_out_plain , paired . tumor_data [ "config" ] )
return out_file |
def update ( xCqNck7t , ** kwargs ) :
"""Updates the Dict with the given values . Turns internal dicts into Dicts .""" | def dict_list_val ( inlist ) :
l = [ ]
for i in inlist :
if type ( i ) == dict :
l . append ( Dict ( ** i ) )
elif type ( i ) == list :
l . append ( make_list ( i ) )
elif type ( i ) == bytes :
l . append ( i . decode ( 'UTF-8' ) )
else :
l . append ( i )
return l
for k in list ( kwargs . keys ( ) ) :
if type ( kwargs [ k ] ) == dict :
xCqNck7t [ k ] = Dict ( ** kwargs [ k ] )
elif type ( kwargs [ k ] ) == list :
xCqNck7t [ k ] = dict_list_val ( kwargs [ k ] )
else :
xCqNck7t [ k ] = kwargs [ k ] |
def transform ( self , func ) :
"""Apply a transformation to tokens in this : class : ` . FeatureSet ` \ .
Parameters
func : callable
Should take four parameters : token , value in document ( e . g . count ) ,
value in : class : ` . FeatureSet ` ( e . g . overall count ) , and document
count ( i . e . number of documents in which the token occurs ) . Should
return a new numeric ( int or float ) value , or None . If value is 0
or None , the token will be excluded .
Returns
: class : ` . FeatureSet `
Examples
Apply a tf * idf transformation .
. . code - block : : python
> > > words = corpus . features [ ' words ' ]
> > > def tfidf ( f , c , C , DC ) :
. . . tf = float ( c )
. . . idf = log ( float ( len ( words . features ) ) / float ( DC ) )
. . . return tf * idf
> > > corpus . features [ ' words _ tfidf ' ] = words . transform ( tfidf )""" | features = { }
for i , feature in self . features . iteritems ( ) :
feature_ = [ ]
for f , v in feature :
t = self . lookup [ f ]
v_ = func ( f , v , self . counts [ t ] , self . documentCounts [ t ] )
if v_ :
feature_ . append ( ( f , v_ ) )
features [ i ] = Feature ( feature_ )
return FeatureSet ( features ) |
def configure ( self , src_dir , build_dir , ** kwargs ) :
"""This function builds the cmake configure command .""" | del kwargs
ex_path = self . _ex_args . get ( "project_path" )
if ex_path :
src_dir = os . path . join ( src_dir , ex_path )
return [ { "args" : [ "cmake" , src_dir ] + self . _config_args , "cwd" : build_dir } ] |
def main ( show_details : [ '-l' ] = False , cols : [ '-w' , '--width' ] = '' , * files ) :
'''List information about a particular file or set of files
: param show _ details : Whether to show detailed info about files
: param cols : specify screen width''' | print ( files )
print ( show_details )
print ( cols ) |
def forum_post_list ( self , creator_id = None , creator_name = None , topic_id = None , topic_title_matches = None , topic_category_id = None , body_matches = None ) :
"""Return a list of forum posts .
Parameters :
creator _ id ( int ) :
creator _ name ( str ) :
topic _ id ( int ) :
topic _ title _ matches ( str ) :
topic _ category _ id ( int ) : Can be : 0 , 1 , 2 ( General , Tags , Bugs &
Features respectively ) .
body _ matches ( str ) : Can be part of the post content .""" | params = { 'search[creator_id]' : creator_id , 'search[creator_name]' : creator_name , 'search[topic_id]' : topic_id , 'search[topic_title_matches]' : topic_title_matches , 'search[topic_category_id]' : topic_category_id , 'search[body_matches]' : body_matches }
return self . _get ( 'forum_posts.json' , params ) |
def colorize ( lead , num , color ) :
"""Print ' lead ' = ' num ' in ' color '""" | if num != 0 and ANSIBLE_COLOR and color is not None :
return "%s%s%-15s" % ( stringc ( lead , color ) , stringc ( "=" , color ) , stringc ( str ( num ) , color ) )
else :
return "%s=%-4s" % ( lead , str ( num ) ) |
def to_map_with_default ( value , default_value ) :
"""Converts value into map object or returns default when conversion is not possible
: param value : the value to convert .
: param default _ value : the default value .
: return : map object or emptu map when conversion is not supported .""" | result = MapConverter . to_nullable_map ( value )
return result if result != None else default_value |
def parse_date ( self , value ) :
"""A lazy method to parse anything to date .
If input data type is :
- string : parse date from it
- integer : use from ordinal
- datetime : use date part
- date : just return it""" | if value is None :
raise Exception ( "Unable to parse date from %r" % value )
elif isinstance ( value , string_types ) :
return self . str2date ( value )
elif isinstance ( value , int ) :
return date . fromordinal ( value )
elif isinstance ( value , datetime ) :
return value . date ( )
elif isinstance ( value , date ) :
return value
else :
raise Exception ( "Unable to parse date from %r" % value ) |
def u_distance_stats_sqr ( x , y , ** kwargs ) :
"""u _ distance _ stats _ sqr ( x , y , * , exponent = 1)
Computes the unbiased estimators for the squared distance covariance
and squared distance correlation between two random vectors , and the
individual squared distance variances .
Parameters
x : array _ like
First random vector . The columns correspond with the individual random
variables while the rows are individual instances of the random vector .
y : array _ like
Second random vector . The columns correspond with the individual random
variables while the rows are individual instances of the random vector .
exponent : float
Exponent of the Euclidean distance , in the range : math : ` ( 0 , 2 ) ` .
Equivalently , it is twice the Hurst parameter of fractional Brownian
motion .
Returns
Stats
Squared distance covariance , squared distance correlation ,
squared distance variance of the first random vector and
squared distance variance of the second random vector .
See Also
u _ distance _ covariance _ sqr
u _ distance _ correlation _ sqr
Notes
It is less efficient to compute the statistics separately , rather than
using this function , because some computations can be shared .
The algorithm uses the fast distance covariance algorithm proposed in
: cite : ` b - fast _ distance _ correlation ` when possible .
Examples
> > > import numpy as np
> > > import dcor
> > > a = np . array ( [ [ 1 , 2 , 3 , 4 ] ,
. . . [ 5 , 6 , 7 , 8 ] ,
. . . [ 9 , 10 , 11 , 12 ] ,
. . . [ 13 , 14 , 15 , 16 ] ] )
> > > b = np . array ( [ [ 1 ] , [ 0 ] , [ 0 ] , [ 1 ] ] )
> > > dcor . u _ distance _ stats _ sqr ( a , a ) # doctest : + ELLIPSIS
. . . # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = 42.66666 . . . , correlation _ xy = 1.0,
variance _ x = 42.66666 . . . , variance _ y = 42.66666 . . . )
> > > dcor . u _ distance _ stats _ sqr ( a , b ) # doctest : + ELLIPSIS
. . . # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = - 2.66666 . . . , correlation _ xy = - 0.5,
variance _ x = 42.66666 . . . , variance _ y = 0.66666 . . . )
> > > dcor . u _ distance _ stats _ sqr ( b , b ) # doctest : + ELLIPSIS
. . . # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = 0.66666 . . . , correlation _ xy = 1.0,
variance _ x = 0.66666 . . . , variance _ y = 0.66666 . . . )
> > > dcor . u _ distance _ stats _ sqr ( a , b , exponent = 0.5 ) # doctest : + ELLIPSIS
. . . # doctest : + NORMALIZE _ WHITESPACE
Stats ( covariance _ xy = - 0.2996598 . . . , correlation _ xy = - 0.4050479 . . . ,
variance _ x = 0.8209855 . . . , variance _ y = 0.66666 . . . )""" | if _can_use_fast_algorithm ( x , y , ** kwargs ) :
return _u_distance_stats_sqr_fast ( x , y )
else :
return _distance_sqr_stats_naive_generic ( x , y , matrix_centered = _u_distance_matrix , product = u_product , ** kwargs ) |
def get_request_token ( cls , consumer_key , redirect_uri = 'http://example.com/' , state = None ) :
'''Returns the request token that can be used to fetch the access token''' | headers = { 'X-Accept' : 'application/json' , }
url = 'https://getpocket.com/v3/oauth/request'
payload = { 'consumer_key' : consumer_key , 'redirect_uri' : redirect_uri , }
if state :
payload [ 'state' ] = state
return cls . _make_request ( url , payload , headers ) [ 0 ] [ 'code' ] |
def optimal_orientation ( self , t_gps ) :
"""Return the optimal orientation in right ascension and declination
for a given GPS time .
Parameters
t _ gps : float
Time in gps seconds
Returns
ra : float
Right ascension that is optimally oriented for the detector
dec : float
Declination that is optimally oriented for the detector""" | ra = self . longitude + ( self . gmst_estimate ( t_gps ) % ( 2.0 * np . pi ) )
dec = self . latitude
return ra , dec |
def pywt_pad_mode ( pad_mode , pad_const = 0 ) :
"""Convert ODL - style padding mode to pywt - style padding mode .
Parameters
pad _ mode : str
The ODL padding mode to use at the boundaries .
pad _ const : float , optional
Value to use outside the signal boundaries when ` ` pad _ mode ` ` is
' constant ' . Only a value of 0 . is supported by PyWavelets
Returns
pad _ mode _ pywt : str
The corresponding name of the requested padding mode in PyWavelets .
See ` signal extension modes ` _ .
References
. . _ signal extension modes :
https : / / pywavelets . readthedocs . io / en / latest / ref / signal - extension - modes . html""" | pad_mode = str ( pad_mode ) . lower ( )
if pad_mode == 'constant' and pad_const != 0.0 :
raise ValueError ( 'constant padding with constant != 0 not supported ' 'for `pywt` back-end' )
try :
return PAD_MODES_ODL2PYWT [ pad_mode ]
except KeyError :
raise ValueError ( "`pad_mode` '{}' not understood" . format ( pad_mode ) ) |
def get_tmp_filename ( tmp_dir = gettempdir ( ) , prefix = "tmp" , suffix = ".txt" , result_constructor = FilePath ) :
"""Generate a temporary filename and return as a FilePath object
tmp _ dir : the directory to house the tmp _ filename
prefix : string to append to beginning of filename
Note : It is very useful to have prefix be descriptive of the
process which is creating the temporary file . For example , if
your temp file will be used to build a temporary blast database ,
you might pass prefix = TempBlastDB
suffix : the suffix to be appended to the temp filename
result _ constructor : the constructor used to build the result filename
( default : cogent . app . parameters . FilePath ) . Note that joining
FilePath objects with one another or with strings , you must use
the + operator . If this causes trouble , you can pass str as the
the result _ constructor .""" | # check not none
if not tmp_dir :
tmp_dir = ""
# if not current directory , append " / " if not already on path
elif not tmp_dir . endswith ( "/" ) :
tmp_dir += "/"
chars = "abcdefghigklmnopqrstuvwxyz"
picks = chars + chars . upper ( ) + "0123456790"
return result_constructor ( tmp_dir ) + result_constructor ( prefix ) + result_constructor ( "%s%s" % ( '' . join ( [ choice ( picks ) for i in range ( 20 ) ] ) , suffix ) ) |
def get ( self , name ) :
"""Get the vrrp configurations for a single node interface
Args :
name ( string ) : The name of the interface for which vrrp
configurations will be retrieved .
Returns :
A dictionary containing the vrrp configurations on the interface .
Returns None if no vrrp configurations are defined or
if the interface is not configured .""" | # Validate the interface and vrid are specified
interface = name
if not interface :
raise ValueError ( "Vrrp.get(): interface must contain a value." )
# Get the config for the interface . Return None if the
# interface is not defined
config = self . get_block ( 'interface %s' % interface )
if config is None :
return config
# Find all occurrences of vrids in this interface and make
# a set of the unique vrid numbers
match = set ( re . findall ( r'^\s+(?:no |)vrrp (\d+)' , config , re . M ) )
if not match :
return None
# Initialize the result dict
result = dict ( )
for vrid in match :
subd = dict ( )
# Parse the vrrp configuration for the vrid ( s ) in the list
subd . update ( self . _parse_delay_reload ( config , vrid ) )
subd . update ( self . _parse_description ( config , vrid ) )
subd . update ( self . _parse_enable ( config , vrid ) )
subd . update ( self . _parse_ip_version ( config , vrid ) )
subd . update ( self . _parse_mac_addr_adv_interval ( config , vrid ) )
subd . update ( self . _parse_preempt ( config , vrid ) )
subd . update ( self . _parse_preempt_delay_min ( config , vrid ) )
subd . update ( self . _parse_preempt_delay_reload ( config , vrid ) )
subd . update ( self . _parse_primary_ip ( config , vrid ) )
subd . update ( self . _parse_priority ( config , vrid ) )
subd . update ( self . _parse_secondary_ip ( config , vrid ) )
subd . update ( self . _parse_timers_advertise ( config , vrid ) )
subd . update ( self . _parse_track ( config , vrid ) )
subd . update ( self . _parse_bfd_ip ( config , vrid ) )
result . update ( { int ( vrid ) : subd } )
# If result dict is empty , return None , otherwise return result
return result if result else None |
def force_disconnect ( self , session_id , connection_id ) :
"""Sends a request to disconnect a client from an OpenTok session
: param String session _ id : The session ID of the OpenTok session from which the
client will be disconnected
: param String connection _ id : The connection ID of the client that will be disconnected""" | endpoint = self . endpoints . force_disconnect_url ( session_id , connection_id )
response = requests . delete ( endpoint , headers = self . json_headers ( ) , proxies = self . proxies , timeout = self . timeout )
if response . status_code == 204 :
pass
elif response . status_code == 400 :
raise ForceDisconnectError ( 'One of the arguments - sessionId or connectionId - is invalid.' )
elif response . status_code == 403 :
raise AuthError ( 'You are not authorized to forceDisconnect, check your authentication credentials.' )
elif response . status_code == 404 :
raise ForceDisconnectError ( 'The client specified by the connectionId property is not connected to the session.' )
else :
raise RequestError ( 'An unexpected error occurred' , response . status_code ) |
def create_list_str ( help_string = NO_HELP , default = NO_DEFAULT ) : # type : ( str , Union [ List [ str ] , NO _ DEFAULT _ TYPE ] ) - > List [ str ]
"""Create a List [ str ] parameter
: param help _ string :
: param default :
: return :""" | # noinspection PyTypeChecker
return ParamFunctions ( help_string = help_string , default = default , type_name = "List[str]" , function_s2t = convert_string_to_list_str , function_t2s = convert_list_str_to_string , ) |
def get_any_node ( self , addr , is_syscall = None , anyaddr = False , force_fastpath = False ) :
"""Get an arbitrary CFGNode ( without considering their contexts ) from our graph .
: param int addr : Address of the beginning of the basic block . Set anyaddr to True to support arbitrary
address .
: param bool is _ syscall : Whether you want to get the syscall node or any other node . This is due to the fact that
syscall SimProcedures have the same address as the targer it returns to .
None means get either , True means get a syscall node , False means get something that isn ' t
a syscall node .
: param bool anyaddr : If anyaddr is True , then addr doesn ' t have to be the beginning address of a basic
block . By default the entire graph . nodes ( ) will be iterated , and the first node
containing the specific address is returned , which is slow . If you need to do many such
queries , you may first call ` generate _ index ( ) ` to create some indices that may speed up the
query .
: param bool force _ fastpath : If force _ fastpath is True , it will only perform a dict lookup in the _ nodes _ by _ addr
dict .
: return : A CFGNode if there is any that satisfies given conditions , or None otherwise""" | # fastpath : directly look in the nodes list
if not anyaddr :
try :
return self . _nodes_by_addr [ addr ] [ 0 ]
except ( KeyError , IndexError ) :
pass
if force_fastpath :
return None
# slower path
# if self . _ node _ lookup _ index is not None :
# pass
# the slowest path
# try to show a warning first
# TODO : re - enable it once the segment tree is implemented
# if self . _ node _ lookup _ index _ warned = = False :
# l . warning ( ' Calling get _ any _ node ( ) with anyaddr = True is slow on large programs . '
# ' For better performance , you may first call generate _ index ( ) to generate some indices that may '
# ' speed the node lookup . ' )
# self . _ node _ lookup _ index _ warned = True
for n in self . graph . nodes ( ) :
if self . ident == "CFGEmulated" :
cond = n . looping_times == 0
else :
cond = True
if anyaddr and n . size is not None :
cond = cond and n . addr <= addr < n . addr + n . size
else :
cond = cond and ( addr == n . addr )
if cond :
if is_syscall is None :
return n
if n . is_syscall == is_syscall :
return n
return None |
def words ( query ) :
"""lines ( query ) - - print the number of words in a given file""" | filename = support . get_file_name ( query )
if ( os . path . isfile ( filename ) ) :
with open ( filename ) as openfile :
print len ( openfile . read ( ) . split ( ) )
else :
print 'File not found : ' + filename |
def get_family_hierarchy_design_session ( self , proxy = None ) :
"""Gets the ` ` OsidSession ` ` associated with the family hierarchy design service .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . relationship . FamilyHierarchyDesignSession ) - a
` ` HierarchyDesignSession ` ` for families
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ family _ hierarchy _ design ( ) ` `
is ` ` false ` `
* compliance : optional - - This method must be implemented if ` ` supports _ family _ hierarchy _ design ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_family_hierarchy_design ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . FamilyHierarchyDesignSession ( proxy = proxy , runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session |
def _parse_engine ( engine ) :
"""Parse the engine uri to determine where to store loggs""" | engine = ( engine or '' ) . strip ( )
backend , path = URI_RE . match ( engine ) . groups ( )
if backend not in SUPPORTED_BACKENDS :
raise NotImplementedError ( "Logg supports only {0} for now." . format ( SUPPORTED_BACKENDS ) )
log . debug ( 'Found engine: {0}' . format ( engine ) )
return backend , path |
def split_parameter_types ( parameters ) :
"""Split a parameter types declaration into individual types .
The input is the left hand side of a signature ( the part before the arrow ) ,
excluding the parentheses .
: sig : ( str ) - > List [ str ]
: param parameters : Comma separated parameter types .
: return : Parameter types .""" | if parameters == "" :
return [ ]
# only consider the top level commas , ignore the ones in [ ]
commas = [ ]
bracket_depth = 0
for i , char in enumerate ( parameters ) :
if ( char == "," ) and ( bracket_depth == 0 ) :
commas . append ( i )
elif char == "[" :
bracket_depth += 1
elif char == "]" :
bracket_depth -= 1
types = [ ]
last_i = 0
for i in commas :
types . append ( parameters [ last_i : i ] . strip ( ) )
last_i = i + 1
else :
types . append ( parameters [ last_i : ] . strip ( ) )
return types |
def start_child_span ( operation_name , tracer = None , parent = None , tags = None ) :
"""Start a new span as a child of parent _ span . If parent _ span is None ,
start a new root span .
: param operation _ name : operation name
: param tracer : Tracer or None ( defaults to opentracing . tracer )
: param parent : parent Span or None
: param tags : optional tags
: return : new span""" | tracer = tracer or opentracing . tracer
return tracer . start_span ( operation_name = operation_name , child_of = parent . context if parent else None , tags = tags ) |
def wiki_request ( self , params ) :
"""Make a request to the MediaWiki API using the given search
parameters
Args :
params ( dict ) : Request parameters
Returns :
A parsed dict of the JSON response
Note :
Useful when wanting to query the MediaWiki site for some value that is not part of the wrapper API""" | params [ "format" ] = "json"
if "action" not in params :
params [ "action" ] = "query"
limit = self . _rate_limit
last_call = self . _rate_limit_last_call
if limit and last_call and last_call + self . _min_wait > datetime . now ( ) : # call time to quick for rate limited api requests , wait
wait_time = ( last_call + self . _min_wait ) - datetime . now ( )
time . sleep ( int ( wait_time . total_seconds ( ) ) )
req = self . _get_response ( params )
if self . _rate_limit :
self . _rate_limit_last_call = datetime . now ( )
return req |
def output_file ( self , _container ) :
"""Find and writes the output path of a chroot container .""" | p = local . path ( _container )
if p . exists ( ) :
if not ui . ask ( "Path '{0}' already exists." " Overwrite?" . format ( p ) ) :
sys . exit ( 0 )
CFG [ "container" ] [ "output" ] = str ( p ) |
def _ValidateFSM ( self ) :
"""Checks state names and destinations for validity .
Each destination state must exist , be a valid name and
not be a reserved name .
There must be a ' Start ' state and if ' EOF ' or ' End ' states are specified ,
they must be empty .
Returns :
True if FSM is valid .
Raises :
TextFSMTemplateError : If any state definitions are invalid .""" | # Must have ' Start ' state .
if 'Start' not in self . states :
raise TextFSMTemplateError ( "Missing state 'Start'." )
# ' End / EOF ' state ( if specified ) must be empty .
if self . states . get ( 'End' ) :
raise TextFSMTemplateError ( "Non-Empty 'End' state." )
if self . states . get ( 'EOF' ) :
raise TextFSMTemplateError ( "Non-Empty 'EOF' state." )
# Remove ' End ' state .
if 'End' in self . states :
del self . states [ 'End' ]
self . state_list . remove ( 'End' )
# Ensure jump states are all valid .
for state in self . states :
for rule in self . states [ state ] :
if rule . line_op == 'Error' :
continue
if not rule . new_state or rule . new_state in ( 'End' , 'EOF' ) :
continue
if rule . new_state not in self . states :
raise TextFSMTemplateError ( "State '%s' not found, referenced in state '%s'" % ( rule . new_state , state ) )
return True |
def _netstat_linux ( ) :
'''Return netstat information for Linux distros''' | ret = [ ]
cmd = 'netstat -tulpnea'
out = __salt__ [ 'cmd.run' ] ( cmd )
for line in out . splitlines ( ) :
comps = line . split ( )
if line . startswith ( 'tcp' ) :
ret . append ( { 'proto' : comps [ 0 ] , 'recv-q' : comps [ 1 ] , 'send-q' : comps [ 2 ] , 'local-address' : comps [ 3 ] , 'remote-address' : comps [ 4 ] , 'state' : comps [ 5 ] , 'user' : comps [ 6 ] , 'inode' : comps [ 7 ] , 'program' : comps [ 8 ] } )
if line . startswith ( 'udp' ) :
ret . append ( { 'proto' : comps [ 0 ] , 'recv-q' : comps [ 1 ] , 'send-q' : comps [ 2 ] , 'local-address' : comps [ 3 ] , 'remote-address' : comps [ 4 ] , 'user' : comps [ 5 ] , 'inode' : comps [ 6 ] , 'program' : comps [ 7 ] } )
return ret |
def __gen_node_href ( self , layer , node_id ) :
"""generates a complete xlink : href for any node ( token node ,
structure node etc . ) in the docgraph . This will only work AFTER
the corresponding PAULA files have been created ( and their file names
are registered in ` ` self . paulamap ` ` ) .""" | if istoken ( self . dg , node_id ) :
base_paula_id = self . paulamap [ 'tokenization' ]
else :
base_paula_id = self . paulamap [ 'hierarchy' ] [ layer ]
return '{0}.xml#{1}' . format ( base_paula_id , node_id ) |
def validate ( ) :
"""Display error messages and exit if no lore environment can be found .""" | if not os . path . exists ( os . path . join ( ROOT , APP , '__init__.py' ) ) :
message = ansi . error ( ) + ' Python module not found.'
if os . environ . get ( 'LORE_APP' ) is None :
message += ' $LORE_APP is not set. Should it be different than "%s"?' % APP
else :
message += ' $LORE_APP is set to "%s". Should it be different?' % APP
sys . exit ( message )
if exists ( ) :
return
if len ( sys . argv ) > 1 :
command = sys . argv [ 1 ]
else :
command = 'lore'
sys . exit ( ansi . error ( ) + ' %s is only available in lore ' 'app directories (missing %s)' % ( ansi . bold ( command ) , ansi . underline ( VERSION_PATH ) ) ) |
def get_set ( self , project , articleset , ** filters ) :
"""List the articlesets in a project""" | url = URL . articleset . format ( ** locals ( ) )
return self . request ( url , ** filters ) |
async def getNodeByNdef ( self , ndef ) :
'''Return a single Node by ( form , valu ) tuple .
Args :
ndef ( ( str , obj ) ) : A ( form , valu ) ndef tuple . valu must be
normalized .
Returns :
( synapse . lib . node . Node ) : The Node or None .''' | buid = s_common . buid ( ndef )
return await self . getNodeByBuid ( buid ) |
def _load_profile ( self , profile_name ) :
"""Load a profile by name
Called by load _ user _ options""" | # find the profile
default_profile = self . _profile_list [ 0 ]
for profile in self . _profile_list :
if profile . get ( 'default' , False ) : # explicit default , not the first
default_profile = profile
if profile [ 'display_name' ] == profile_name :
break
else :
if profile_name : # name specified , but not found
raise ValueError ( "No such profile: %s. Options include: %s" % ( profile_name , ', ' . join ( p [ 'display_name' ] for p in self . _profile_list ) ) )
else : # no name specified , use the default
profile = default_profile
self . log . debug ( "Applying KubeSpawner override for profile '%s'" , profile [ 'display_name' ] )
kubespawner_override = profile . get ( 'kubespawner_override' , { } )
for k , v in kubespawner_override . items ( ) :
if callable ( v ) :
v = v ( self )
self . log . debug ( ".. overriding KubeSpawner value %s=%s (callable result)" , k , v )
else :
self . log . debug ( ".. overriding KubeSpawner value %s=%s" , k , v )
setattr ( self , k , v ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.