signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def tier ( self , * args , append = True , source = None , ** kwargs ) :
"""Function decorator for a tier coroutine . If the function being
decorated is not already a coroutine function it will be wrapped ."""
|
if len ( args ) == 1 and not kwargs and callable ( args [ 0 ] ) :
raise TypeError ( 'Uncalled decorator syntax is invalid' )
def decorator ( coro ) :
if not asyncio . iscoroutinefunction ( coro ) :
coro = asyncio . coroutine ( coro )
if append and source is None :
self . append_tier ( coro , * args , ** kwargs )
else :
self . add_tier ( coro , * args , source = source , ** kwargs )
return coro
return decorator
|
def run_migrations_online ( ) :
"""Run migrations in ' online ' mode .
In this scenario we need to create an Engine
and associate a connection with the context ."""
|
connectable = context . config . attributes . get ( "connection" , None )
if connectable is None :
options = context . config . get_section ( context . config . config_ini_section )
url = options . pop ( "url" )
connectable = create_engine ( url , poolclass = pool . NullPool )
with connectable . connect ( ) as connection :
context . configure ( connection = connection , target_metadata = db . metadata , compare_server_default = True , )
with context . begin_transaction ( ) :
context . run_migrations ( )
|
def square ( self , n_coeffs , do_overlap_add = False ) :
"""Compute a " square " view of the frequency adaptive transform , by
resampling each frequency band such that they all contain the same
number of samples , and performing an overlap - add procedure in the
case where the sample frequency and duration differ
: param n _ coeffs : The common size to which each frequency band should
be resampled"""
|
resampled_bands = [ self . _resample ( band , n_coeffs ) for band in self . iter_bands ( ) ]
stacked = np . vstack ( resampled_bands ) . T
fdim = FrequencyDimension ( self . scale )
# TODO : This feels like it could be wrapped up nicely elsewhere
chunk_frequency = Picoseconds ( int ( np . round ( self . time_dimension . duration / Picoseconds ( 1 ) / n_coeffs ) ) )
td = TimeDimension ( frequency = chunk_frequency )
arr = ConstantRateTimeSeries ( ArrayWithUnits ( stacked . reshape ( - 1 , n_coeffs , self . n_bands ) , dimensions = [ self . time_dimension , td , fdim ] ) )
if not do_overlap_add :
return arr
# Begin the overlap add procedure
overlap_ratio = self . time_dimension . overlap_ratio
if overlap_ratio == 0 : # no overlap add is necessary
return ArrayWithUnits ( stacked , [ td , fdim ] )
step_size_samples = int ( n_coeffs * overlap_ratio )
first_dim = int ( np . round ( ( stacked . shape [ 0 ] * overlap_ratio ) + ( n_coeffs * overlap_ratio ) ) )
output = ArrayWithUnits ( np . zeros ( ( first_dim , self . n_bands ) ) , dimensions = [ td , fdim ] )
for i , chunk in enumerate ( arr ) :
start = step_size_samples * i
stop = start + n_coeffs
output [ start : stop ] += chunk . reshape ( ( - 1 , self . n_bands ) )
return output
|
def get_favicon ( self , article ) :
"""Extract the favicon from a website
http : / / en . wikipedia . org / wiki / Favicon
< link rel = " shortcut icon " type = " image / png " href = " favicon . png " / >
< link rel = " icon " type = " image / png " href = " favicon . png " / >"""
|
kwargs = { 'tag' : 'link' , 'attr' : 'rel' , 'value' : 'icon' }
meta = self . parser . getElementsByTag ( article . doc , ** kwargs )
if meta :
favicon = self . parser . getAttribute ( meta [ 0 ] , 'href' )
return favicon
return ''
|
def safe_mkdir ( path , uid = - 1 , gid = - 1 ) :
"""create path if it doesn ' t exist"""
|
try :
os . mkdir ( path )
except OSError as e :
if e . errno == errno . EEXIST :
pass
else :
raise
else :
os . chown ( path , uid , gid )
|
def to_string ( type ) :
"""Converts a TypeCode into its string name .
: param type : the TypeCode to convert into a string .
: return : the name of the TypeCode passed as a string value ."""
|
if type == None :
return "unknown"
elif type == TypeCode . Unknown :
return "unknown"
elif type == TypeCode . String :
return "string"
elif type == TypeCode . Integer :
return "integer"
elif type == TypeCode . Long :
return "long"
elif type == TypeCode . Float :
return "float"
elif type == TypeCode . Double :
return "double"
elif type == TypeCode . Duration :
return "duration"
elif type == TypeCode . DateTime :
return "datetime"
elif type == TypeCode . Object :
return "object"
elif type == TypeCode . Enum :
return "enum"
elif type == TypeCode . Array :
return "array"
elif type == TypeCode . Map :
return "map"
else :
return "unknown"
|
def observe ( self , ob ) :
"""Add the Observe option .
: param ob : observe count"""
|
option = Option ( )
option . number = defines . OptionRegistry . OBSERVE . number
option . value = ob
self . del_option_by_number ( defines . OptionRegistry . OBSERVE . number )
self . add_option ( option )
|
def add_record ( self , orcid_id , token , request_type , data , content_type = 'application/orcid+json' ) :
"""Add a record to a profile .
Parameters
: param orcid _ id : string
Id of the author .
: param token : string
Token received from OAuth 2 3 - legged authorization .
: param request _ type : string
One of ' activities ' , ' education ' , ' employment ' , ' funding ' ,
' peer - review ' , ' work ' .
: param data : dict | lxml . etree . _ Element
The record in Python - friendly format , as either JSON - compatible
dictionary ( content _ type = = ' application / orcid + json ' ) or
XML ( content _ type = = ' application / orcid + xml ' )
: param content _ type : string
MIME type of the passed record .
Returns
: returns : string
Put - code of the new work ."""
|
return self . _update_activities ( orcid_id , token , requests . post , request_type , data , content_type = content_type )
|
def _make_intersection ( edge_info , all_edge_nodes ) :
"""Convert a description of edges into a curved polygon .
. . note : :
This is a helper used only by : meth : ` . Surface . intersect ` .
Args :
edge _ info ( Tuple [ Tuple [ int , float , float ] , . . . ] ) : Information
describing each edge in the curved polygon by indicating which
surface / edge on the surface and then start and end parameters
along that edge . ( See : func : ` . ends _ to _ curve ` . )
all _ edge _ nodes ( Tuple [ numpy . ndarray , . . . ] ) : The nodes of three edges
of the first surface being intersected followed by the nodes of
the three edges of the second .
Returns :
. CurvedPolygon : The intersection corresponding to ` ` edge _ info ` ` ."""
|
edges = [ ]
for index , start , end in edge_info :
nodes = all_edge_nodes [ index ]
new_nodes = _curve_helpers . specialize_curve ( nodes , start , end )
degree = new_nodes . shape [ 1 ] - 1
edge = _curve_mod . Curve ( new_nodes , degree , _copy = False )
edges . append ( edge )
return curved_polygon . CurvedPolygon ( * edges , metadata = edge_info , _verify = False )
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : ExportContext for this ExportInstance
: rtype : twilio . rest . preview . bulk _ exports . export . ExportContext"""
|
if self . _context is None :
self . _context = ExportContext ( self . _version , resource_type = self . _solution [ 'resource_type' ] , )
return self . _context
|
def subscribe ( self , subscription ) :
"""Create the given ` Subscription ` for this existing account ."""
|
url = urljoin ( self . _url , '/subscriptions' )
return subscription . post ( url )
|
def customize ( self , customize ) :
"""Special handling for opcodes , such as those that take a variable number
of arguments - - we add a new entry for each in TABLE _ R ."""
|
for k , v in list ( customize . items ( ) ) :
if k in TABLE_R :
continue
op = k [ : k . rfind ( '_' ) ]
if k . startswith ( 'CALL_METHOD' ) : # This happens in PyPy only
TABLE_R [ k ] = ( '%c(%P)' , 0 , ( 1 , - 1 , ', ' , 100 ) )
elif self . version >= 3.6 and k . startswith ( 'CALL_FUNCTION_KW' ) :
TABLE_R [ k ] = ( '%c(%P)' , 0 , ( 1 , - 1 , ', ' , 100 ) )
elif op == 'CALL_FUNCTION' :
TABLE_R [ k ] = ( '%c(%P)' , 0 , ( 1 , - 1 , ', ' , 100 ) )
elif op in ( 'CALL_FUNCTION_VAR' , 'CALL_FUNCTION_VAR_KW' , 'CALL_FUNCTION_KW' ) : # FIXME : handle everything in customize .
# Right now , some of this is here , and some in that .
if v == 0 :
str = '%c(%C'
# ' % C ' is a dummy here . . .
p2 = ( 0 , 0 , None )
# . . because of the None in this
else :
str = '%c(%C, '
p2 = ( 1 , - 2 , ', ' )
if op == 'CALL_FUNCTION_VAR' : # Python 3.5 only puts optional args ( the VAR part )
# lowest down the stack
if self . version == 3.5 :
if str == '%c(%C, ' :
entry = ( '%c(*%C, %c)' , 0 , p2 , - 2 )
elif str == '%c(%C' :
entry = ( '%c(*%C)' , 0 , ( 1 , 100 , '' ) )
elif self . version == 3.4 : # CALL _ FUNCTION _ VAR ' s top element of the stack contains
# the variable argument list
if v == 0 :
str = '%c(*%c)'
entry = ( str , 0 , - 2 )
else :
str = '%c(%C, *%c)'
entry = ( str , 0 , p2 , - 2 )
else :
str += '*%c)'
entry = ( str , 0 , p2 , - 2 )
elif op == 'CALL_FUNCTION_KW' :
str += '**%c)'
entry = ( str , 0 , p2 , - 2 )
elif op == 'CALL_FUNCTION_VAR_KW' :
str += '*%c, **%c)'
# Python 3.5 only puts optional args ( the VAR part )
# lowest down the stack
na = ( v & 0xff )
# positional parameters
if self . version == 3.5 and na == 0 :
if p2 [ 2 ] :
p2 = ( 2 , - 2 , ', ' )
entry = ( str , 0 , p2 , 1 , - 2 )
else :
if p2 [ 2 ] :
p2 = ( 1 , - 3 , ', ' )
entry = ( str , 0 , p2 , - 3 , - 2 )
pass
else :
assert False , "Unhandled CALL_FUNCTION %s" % op
TABLE_R [ k ] = entry
pass
# handled by n _ dict :
# if op = = ' BUILD _ SLICE ' : TABLE _ R [ k ] = ( ' % C ' , ( 0 , - 1 , ' : ' ) )
# handled by n _ list :
# if op = = ' BUILD _ LIST ' : TABLE _ R [ k ] = ( ' [ % C ] ' , ( 0 , - 1 , ' , ' ) )
# elif op = = ' BUILD _ TUPLE ' : TABLE _ R [ k ] = ( ' ( % C % , ) ' , ( 0 , - 1 , ' , ' ) )
pass
return
|
def current_ioloop ( io_loop ) :
'''A context manager that will set the current ioloop to io _ loop for the context'''
|
orig_loop = tornado . ioloop . IOLoop . current ( )
io_loop . make_current ( )
try :
yield
finally :
orig_loop . make_current ( )
|
def write ( self , len , buf ) :
"""Write the content of the array in the output I / O buffer
This routine handle the I18N transcoding from internal
UTF - 8 The buffer is lossless , i . e . will store in case of
partial or delayed writes ."""
|
ret = libxml2mod . xmlOutputBufferWrite ( self . _o , len , buf )
return ret
|
async def send_async ( self , message , callback , timeout = 0 ) :
"""Add a single message to the internal pending queue to be processed
by the Connection without waiting for it to be sent .
: param message : The message to send .
: type message : ~ uamqp . message . Message
: param callback : The callback to be run once a disposition is received
in receipt of the message . The callback must take three arguments , the message ,
the send result and the optional delivery condition ( exception ) .
: type callback :
callable [ ~ uamqp . message . Message , ~ uamqp . constants . MessageSendResult , ~ uamqp . errors . MessageException ]
: param timeout : An expiry time for the message added to the queue . If the
message is not sent within this timeout it will be discarded with an error
state . If set to 0 , the message will not expire . The default is 0."""
|
# pylint : disable = protected - access
try :
raise self . _error
except TypeError :
pass
except Exception as e :
_logger . warning ( "%r" , e )
raise
c_message = message . get_message ( )
message . _on_message_sent = callback
try :
await self . _session . _connection . lock_async ( timeout = None )
return self . _sender . send ( c_message , timeout , message )
finally :
self . _session . _connection . release_async ( )
|
def float_range ( string , minimum , maximum , inf , sup ) :
"""Requires values to be a number and range in a certain range .
: param string : Value to validate
: param minimum : Minimum value to accept
: param maximum : Maximum value to accept
: param inf : Infimum value to accept
: param sup : Supremum value to accept
: type string : str
: type minimum : float
: type maximum : float
: type inf : float
: type sup : float"""
|
return _inrange ( float ( string ) , minimum , maximum , inf , sup )
|
def _assign_to_field ( obj , name , val ) :
'Helper to assign an arbitrary value to a protobuf field'
|
target = getattr ( obj , name )
if isinstance ( target , containers . RepeatedScalarFieldContainer ) :
target . append ( val )
elif isinstance ( target , containers . RepeatedCompositeFieldContainer ) :
target = target . add ( )
target . CopyFrom ( val )
elif isinstance ( target , ( int , float , bool , str , bytes ) ) :
setattr ( obj , name , val )
elif isinstance ( target , message . Message ) :
target . CopyFrom ( val )
else :
raise RuntimeError ( "Unsupported type: {}" . format ( type ( target ) ) )
|
def q_gram ( self , qrange = qtransform . DEFAULT_QRANGE , frange = qtransform . DEFAULT_FRANGE , mismatch = qtransform . DEFAULT_MISMATCH , snrthresh = 5.5 , ** kwargs ) :
"""Scan a ` TimeSeries ` using the multi - Q transform and return an
` EventTable ` of the most significant tiles
Parameters
qrange : ` tuple ` of ` float ` , optional
` ( low , high ) ` range of Qs to scan
frange : ` tuple ` of ` float ` , optional
` ( low , high ) ` range of frequencies to scan
mismatch : ` float ` , optional
maximum allowed fractional mismatch between neighbouring tiles
snrthresh : ` float ` , optional
lower inclusive threshold on individual tile SNR to keep in the
table
* * kwargs
other keyword arguments to be passed to : meth : ` QTiling . transform ` ,
including ` ` ' epoch ' ` ` and ` ` ' search ' ` `
Returns
qgram : ` EventTable `
a table of time - frequency tiles on the most significant ` QPlane `
See Also
TimeSeries . q _ transform
for a method to interpolate the raw Q - transform over a regularly
gridded spectrogram
gwpy . signal . qtransform
for code and documentation on how the Q - transform is implemented
gwpy . table . EventTable . tile
to render this ` EventTable ` as a collection of polygons
Notes
Only tiles with signal energy greater than or equal to
` snrthresh * * 2 / 2 ` will be stored in the output ` EventTable ` . The
table columns are ` ` ' time ' ` ` , ` ` ' duration ' ` ` , ` ` ' frequency ' ` ` ,
` ` ' bandwidth ' ` ` , and ` ` ' energy ' ` ` ."""
|
qscan , _ = qtransform . q_scan ( self , mismatch = mismatch , qrange = qrange , frange = frange , ** kwargs )
qgram = qscan . table ( snrthresh = snrthresh )
return qgram
|
def find ( self , upload_id , ** kwargs ) :
"""Finds an upload by ID ."""
|
return super ( UploadsProxy , self ) . find ( upload_id , file_upload = True )
|
def _copy ( self , other , copy_func ) :
"""Copies the contents of another Any object to itself
: param object :
Another instance of the same class
: param copy _ func :
An reference of copy . copy ( ) or copy . deepcopy ( ) to use when copying
lists , dicts and objects"""
|
super ( Any , self ) . _copy ( other , copy_func )
self . _parsed = copy_func ( other . _parsed )
|
def delete_all_pipelines ( self ) :
'''Deletes all pipelines
Args :
returnsOK for overall success or last error code , resp data .'''
|
code , data = self . get_pipeline ( )
if code == requests . codes . ok :
for pl_data in data :
c , d = self . delete_pipeline ( pl_data [ 'pipelineKey' ] )
if c != requests . codes . ok :
code = c
data = d
return code , data
|
def align_generation ( self , file_nm , padding = 75 ) :
"""Description : Align to lip position"""
|
align = Align ( self . _align_root + '/' + file_nm + '.align' )
return nd . array ( align . sentence ( padding ) )
|
def reset_state ( self ) :
"""All forked dataflows should only be reset * * once and only once * * in spawned processes .
Subclasses should call this method with super ."""
|
assert not self . _reset_done , "reset_state() was called twice! This violates the API of DataFlow!"
self . _reset_done = True
# _ _ del _ _ not guaranteed to get called at exit
atexit . register ( del_weakref , weakref . ref ( self ) )
|
def evalRanges ( self , datetimeString , sourceTime = None ) :
"""Evaluate the C { datetimeString } text and determine if
it represents a date or time range .
@ type datetimeString : string
@ param datetimeString : datetime text to evaluate
@ type sourceTime : struct _ time
@ param sourceTime : C { struct _ time } value to use as the base
@ rtype : tuple
@ return : tuple of : start datetime , end datetime and the invalid flag"""
|
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString . strip ( ) . lower ( )
if self . ptc . rangeSep in s :
s = s . replace ( self . ptc . rangeSep , ' %s ' % self . ptc . rangeSep )
s = s . replace ( ' ' , ' ' )
m = self . ptc . CRE_TIMERNG1 . search ( s )
if m is not None :
rangeFlag = 1
else :
m = self . ptc . CRE_TIMERNG2 . search ( s )
if m is not None :
rangeFlag = 2
else :
m = self . ptc . CRE_TIMERNG4 . search ( s )
if m is not None :
rangeFlag = 7
else :
m = self . ptc . CRE_TIMERNG3 . search ( s )
if m is not None :
rangeFlag = 3
else :
m = self . ptc . CRE_DATERNG1 . search ( s )
if m is not None :
rangeFlag = 4
else :
m = self . ptc . CRE_DATERNG2 . search ( s )
if m is not None :
rangeFlag = 5
else :
m = self . ptc . CRE_DATERNG3 . search ( s )
if m is not None :
rangeFlag = 6
if _debug :
print 'evalRanges: rangeFlag =' , rangeFlag , '[%s]' % s
if m is not None :
if ( m . group ( ) != s ) : # capture remaining string
parseStr = m . group ( )
chunk1 = s [ : m . start ( ) ]
chunk2 = s [ m . end ( ) : ]
s = '%s %s' % ( chunk1 , chunk2 )
flag = 1
sourceTime , flag = self . parse ( s , sourceTime )
if flag == 0 :
sourceTime = None
else :
parseStr = s
if rangeFlag == 1 :
m = re . search ( self . ptc . rangeSep , parseStr )
startTime , sflag = self . parse ( ( parseStr [ : m . start ( ) ] ) , sourceTime )
endTime , eflag = self . parse ( ( parseStr [ ( m . start ( ) + 1 ) : ] ) , sourceTime )
if ( eflag != 0 ) and ( sflag != 0 ) :
return ( startTime , endTime , 2 )
elif rangeFlag == 2 :
m = re . search ( self . ptc . rangeSep , parseStr )
startTime , sflag = self . parse ( ( parseStr [ : m . start ( ) ] ) , sourceTime )
endTime , eflag = self . parse ( ( parseStr [ ( m . start ( ) + 1 ) : ] ) , sourceTime )
if ( eflag != 0 ) and ( sflag != 0 ) :
return ( startTime , endTime , 2 )
elif rangeFlag == 3 or rangeFlag == 7 :
m = re . search ( self . ptc . rangeSep , parseStr )
# capturing the meridian from the end time
if self . ptc . usesMeridian :
ampm = re . search ( self . ptc . am [ 0 ] , parseStr )
# appending the meridian to the start time
if ampm is not None :
startTime , sflag = self . parse ( ( parseStr [ : m . start ( ) ] + self . ptc . meridian [ 0 ] ) , sourceTime )
else :
startTime , sflag = self . parse ( ( parseStr [ : m . start ( ) ] + self . ptc . meridian [ 1 ] ) , sourceTime )
else :
startTime , sflag = self . parse ( ( parseStr [ : m . start ( ) ] ) , sourceTime )
endTime , eflag = self . parse ( parseStr [ ( m . start ( ) + 1 ) : ] , sourceTime )
if ( eflag != 0 ) and ( sflag != 0 ) :
return ( startTime , endTime , 2 )
elif rangeFlag == 4 :
m = re . search ( self . ptc . rangeSep , parseStr )
startDate , sflag = self . parse ( ( parseStr [ : m . start ( ) ] ) , sourceTime )
endDate , eflag = self . parse ( ( parseStr [ ( m . start ( ) + 1 ) : ] ) , sourceTime )
if ( eflag != 0 ) and ( sflag != 0 ) :
return ( startDate , endDate , 1 )
elif rangeFlag == 5 :
m = re . search ( self . ptc . rangeSep , parseStr )
endDate = parseStr [ ( m . start ( ) + 1 ) : ]
# capturing the year from the end date
date = self . ptc . CRE_DATE3 . search ( endDate )
endYear = date . group ( 'year' )
# appending the year to the start date if the start date
# does not have year information and the end date does .
# eg : " Aug 21 - Sep 4 , 2007"
if endYear is not None :
startDate = ( parseStr [ : m . start ( ) ] ) . strip ( )
date = self . ptc . CRE_DATE3 . search ( startDate )
startYear = date . group ( 'year' )
if startYear is None :
startDate = startDate + ', ' + endYear
else :
startDate = parseStr [ : m . start ( ) ]
startDate , sflag = self . parse ( startDate , sourceTime )
endDate , eflag = self . parse ( endDate , sourceTime )
if ( eflag != 0 ) and ( sflag != 0 ) :
return ( startDate , endDate , 1 )
elif rangeFlag == 6 :
m = re . search ( self . ptc . rangeSep , parseStr )
startDate = parseStr [ : m . start ( ) ]
# capturing the month from the start date
mth = self . ptc . CRE_DATE3 . search ( startDate )
mth = mth . group ( 'mthname' )
# appending the month name to the end date
endDate = mth + parseStr [ ( m . start ( ) + 1 ) : ]
startDate , sflag = self . parse ( startDate , sourceTime )
endDate , eflag = self . parse ( endDate , sourceTime )
if ( eflag != 0 ) and ( sflag != 0 ) :
return ( startDate , endDate , 1 )
else : # if range is not found
sourceTime = time . localtime ( )
return ( sourceTime , sourceTime , 0 )
|
def construct_meta ( need_data , env ) :
"""Constructs the node - structure for the status container
: param need _ data : need _ info container
: return : node"""
|
hide_options = env . config . needs_hide_options
if not isinstance ( hide_options , list ) :
raise SphinxError ( 'Config parameter needs_hide_options must be of type list' )
node_meta = nodes . line_block ( classes = [ 'needs_meta' ] )
# need parameters
param_status = "status: "
param_tags = "tags: "
if need_data [ "status" ] is not None and 'status' not in hide_options :
status_line = nodes . line ( classes = [ 'status' ] )
# node _ status = nodes . line ( param _ status , param _ status , classes = [ ' status ' ] )
node_status = nodes . inline ( param_status , param_status , classes = [ 'status' ] )
status_line . append ( node_status )
status_line . append ( nodes . inline ( need_data [ "status" ] , need_data [ "status" ] , classes = [ "needs-status" , str ( need_data [ 'status' ] ) ] ) )
node_meta . append ( status_line )
if need_data [ "tags" ] and 'tags' not in hide_options :
tag_line = nodes . line ( classes = [ 'tags' ] )
# node _ tags = nodes . line ( param _ tags , param _ tags , classes = [ ' tags ' ] )
node_tags = nodes . inline ( param_tags , param_tags , classes = [ 'tags' ] )
tag_line . append ( node_tags )
for tag in need_data [ 'tags' ] : # node _ tags . append ( nodes . inline ( tag , tag , classes = [ " needs - tag " , str ( tag ) ] ) )
# node _ tags . append ( nodes . inline ( ' ' , ' ' ) )
tag_line . append ( nodes . inline ( tag , tag , classes = [ "needs-tag" , str ( tag ) ] ) )
tag_line . append ( nodes . inline ( ' ' , ' ' ) )
node_meta . append ( tag_line )
# Links incoming
if need_data [ 'links_back' ] and 'links_back' not in hide_options :
node_incoming_line = nodes . line ( classes = [ 'links' , 'incoming' ] )
prefix = "links incoming: "
node_incoming_prefix = nodes . inline ( prefix , prefix )
node_incoming_line . append ( node_incoming_prefix )
node_incoming_links = Need_incoming ( reftarget = need_data [ 'id' ] )
node_incoming_links . append ( nodes . inline ( need_data [ 'id' ] , need_data [ 'id' ] ) )
node_incoming_line . append ( node_incoming_links )
node_meta . append ( node_incoming_line )
# # Links outgoing
if need_data [ 'links' ] and 'links' not in hide_options :
node_outgoing_line = nodes . line ( classes = [ 'links' , 'outgoing' ] )
prefix = "links outgoing: "
node_outgoing_prefix = nodes . inline ( prefix , prefix )
node_outgoing_line . append ( node_outgoing_prefix )
node_outgoing_links = Need_outgoing ( reftarget = need_data [ 'id' ] )
node_outgoing_links . append ( nodes . inline ( need_data [ 'id' ] , need_data [ 'id' ] ) )
node_outgoing_line . append ( node_outgoing_links )
node_meta . append ( node_outgoing_line )
extra_options = getattr ( env . config , 'needs_extra_options' , { } )
node_extra_options = [ ]
for key , value in extra_options . items ( ) :
if key in hide_options :
continue
param_data = need_data [ key ]
if param_data is None or not param_data :
continue
param_option = '{}: ' . format ( key )
option_line = nodes . line ( classes = [ 'extra_option' ] )
option_line . append ( nodes . inline ( param_option , param_option , classes = [ 'extra_option' ] ) )
option_line . append ( nodes . inline ( param_data , param_data , classes = [ "needs-extra-option" , str ( key ) ] ) )
node_extra_options . append ( option_line )
node_meta += node_extra_options
global_options = getattr ( env . config , 'needs_global_options' , { } )
node_global_options = [ ]
for key , value in global_options . items ( ) : # If a global option got locally overwritten , it must already part of extra _ options .
# In this skipp output , as this is done during extra _ option handling
if key in extra_options or key in hide_options :
continue
param_data = need_data [ key ]
if param_data is None or not param_data :
continue
param_option = '{}: ' . format ( key )
global_option_line = nodes . line ( classes = [ 'global_option' ] )
global_option_line . append ( nodes . inline ( param_option , param_option , classes = [ 'global_option' ] ) )
global_option_line . append ( nodes . inline ( param_data , param_data , classes = [ "needs-global-option" , str ( key ) ] ) )
node_global_options . append ( global_option_line )
node_meta += node_global_options
return node_meta
|
def verify_all ( num ) :
"""Verifies all problem files in the current directory and
prints an overview of the status of each problem ."""
|
# Define various problem statuses
keys = ( 'correct' , 'incorrect' , 'error' , 'skipped' , 'missing' )
symbols = ( 'C' , 'I' , 'E' , 'S' , '.' )
colours = ( 'green' , 'red' , 'yellow' , 'cyan' , 'white' )
status = OrderedDict ( ( key , click . style ( symbol , fg = colour , bold = True ) ) for key , symbol , colour in zip ( keys , symbols , colours ) )
overview = { }
# Search through problem files using glob module
files = problem_glob ( )
# No Project Euler files in the current directory
if not files :
click . echo ( "No Project Euler files found in the current directory." )
sys . exit ( 1 )
for file in files : # Catch KeyboardInterrupt during verification to allow the user to
# skip the verification of a specific problem if it takes too long
try :
is_correct = verify ( file . num , filename = str ( file ) , exit = False )
except KeyboardInterrupt :
overview [ file . num ] = status [ 'skipped' ]
else :
if is_correct is None : # error was returned by problem file
overview [ file . num ] = status [ 'error' ]
elif is_correct :
overview [ file . num ] = status [ 'correct' ]
elif not is_correct :
overview [ file . num ] = status [ 'incorrect' ]
# Attempt to add " skipped " suffix to the filename if the
# problem file is not the current problem . This is useful
# when the - - verify - all is used in a directory containing
# files generated pre - v1.1 ( before files with suffixes )
if file . num != num :
file . change_suffix ( '-skipped' )
# Separate each verification with a newline
click . echo ( )
# Print overview of the status of each problem
legend = ', ' . join ( '{} = {}' . format ( v , k ) for k , v in status . items ( ) )
click . echo ( '-' * 63 )
click . echo ( legend + '\n' )
# Rows needed for overview is based on the current problem number
num_of_rows = ( num + 19 ) // 20
for row in range ( 1 , num_of_rows + 1 ) :
low , high = ( row * 20 ) - 19 , ( row * 20 )
click . echo ( "Problems {:03d}-{:03d}: " . format ( low , high ) , nl = False )
for problem in range ( low , high + 1 ) : # Add missing status to problems with no corresponding file
status = overview [ problem ] if problem in overview else '.'
# Separate problem indicators into groups of 5
spacer = ' ' if ( problem % 5 == 0 ) else ' '
# Start a new line at the end of each row
click . secho ( status + spacer , nl = ( problem % 20 == 0 ) )
click . echo ( )
|
def encode ( cls , line ) :
"""Backslash escape line . value ."""
|
if not line . encoded :
encoding = getattr ( line , 'encoding_param' , None )
if encoding and encoding . upper ( ) == cls . base64string :
line . value = b64encode ( line . value ) . decode ( 'utf-8' )
else :
line . value = backslashEscape ( str_ ( line . value ) )
line . encoded = True
|
def visitObjectMacro ( self , ctx : jsgParser . ObjectExprContext ) :
"""objectMacro : ID EQUALS membersDef SEMI"""
|
name = as_token ( ctx )
self . _context . grammarelts [ name ] = JSGObjectExpr ( self . _context , ctx . membersDef ( ) , name )
|
def Max ( a , axis , keep_dims ) :
"""Max reduction op ."""
|
return np . amax ( a , axis = axis if not isinstance ( axis , np . ndarray ) else tuple ( axis ) , keepdims = keep_dims ) ,
|
def fft_convolve ( in1 , in2 , conv_device = "cpu" , conv_mode = "linear" , store_on_gpu = False ) :
"""This function determines the convolution of two inputs using the FFT . Contains an implementation for both CPU
and GPU .
INPUTS :
in1 ( no default ) : Array containing one set of data , possibly an image .
in2 ( no default ) : Gpuarray containing the FFT of the PSF .
conv _ device ( default = " cpu " ) : Parameter which allows specification of " cpu " or " gpu " .
conv _ mode ( default = " linear " ) : Mode specifier for the convolution - " linear " or " circular " ."""
|
# NOTE : Circular convolution assumes a periodic repetition of the input . This can cause edge effects . Linear
# convolution pads the input with zeros to avoid this problem but is consequently heavier on computation and
# memory .
if conv_device == 'gpu' :
if conv_mode == "linear" :
fft_in1 = pad_array ( in1 )
fft_in1 = gpu_r2c_fft ( fft_in1 , store_on_gpu = True )
fft_in2 = in2
conv_in1_in2 = fft_in1 * fft_in2
conv_in1_in2 = contiguous_slice ( fft_shift ( gpu_c2r_ifft ( conv_in1_in2 , is_gpuarray = True , store_on_gpu = True ) ) )
if store_on_gpu :
return conv_in1_in2
else :
return conv_in1_in2 . get ( )
elif conv_mode == "circular" :
fft_in1 = gpu_r2c_fft ( in1 , store_on_gpu = True )
fft_in2 = in2
conv_in1_in2 = fft_in1 * fft_in2
conv_in1_in2 = fft_shift ( gpu_c2r_ifft ( conv_in1_in2 , is_gpuarray = True , store_on_gpu = True ) )
if store_on_gpu :
return conv_in1_in2
else :
return conv_in1_in2 . get ( )
else :
if conv_mode == "linear" :
fft_in1 = pad_array ( in1 )
fft_in2 = in2
out1_slice = tuple ( slice ( 0.5 * sz , 1.5 * sz ) for sz in in1 . shape )
return np . require ( np . fft . fftshift ( np . fft . irfft2 ( fft_in2 * np . fft . rfft2 ( fft_in1 ) ) ) [ out1_slice ] , np . float32 , 'C' )
elif conv_mode == "circular" :
return np . fft . fftshift ( np . fft . irfft2 ( in2 * np . fft . rfft2 ( in1 ) ) )
|
def unlink_parameter ( self , param ) :
""": param param : param object to remove from being a parameter of this parameterized object ."""
|
if not param in self . parameters :
try :
raise HierarchyError ( "{} does not belong to this object {}, remove parameters directly from their respective parents" . format ( param . _short ( ) , self . name ) )
except AttributeError :
raise HierarchyError ( "{} does not seem to be a parameter, remove parameters directly from their respective parents" . format ( str ( param ) ) )
start = sum ( [ p . size for p in self . parameters [ : param . _parent_index_ ] ] )
self . size -= param . size
del self . parameters [ param . _parent_index_ ]
self . _remove_parameter_name ( param )
param . _disconnect_parent ( )
param . remove_observer ( self , self . _pass_through_notify_observers )
for name , iop in self . _index_operations . items ( ) :
iop . shift_left ( start , param . size )
self . _connect_parameters ( )
self . _notify_parent_change ( )
parent = self . _parent_
while parent is not None :
parent . size -= param . size
parent = parent . _parent_
self . _highest_parent_ . _connect_parameters ( )
self . _highest_parent_ . _connect_fixes ( )
self . _highest_parent_ . _notify_parent_change ( )
|
def trim_data ( x ) :
"""Trim leading and trailing NaNs from dataset
This is done by browsing the array from each end and store the index of the
first non - NaN in each case , the return the appropriate slice of the array"""
|
# Find indices for first and last valid data
first = 0
while np . isnan ( x [ first ] ) :
first += 1
last = len ( x )
while np . isnan ( x [ last - 1 ] ) :
last -= 1
return x [ first : last ]
|
def plot_coupling_matrix ( self , lmax , nwin = None , weights = None , mode = 'full' , axes_labelsize = None , tick_labelsize = None , show = True , ax = None , fname = None ) :
"""Plot the multitaper coupling matrix .
This matrix relates the global power spectrum to the expectation of
the localized multitaper spectrum .
Usage
x . plot _ coupling _ matrix ( lmax , [ nwin , weights , mode , axes _ labelsize ,
tick _ labelsize , show , ax , fname ] )
Parameters
lmax : int
Spherical harmonic bandwidth of the global power spectrum .
nwin : int , optional , default = x . nwin
Number of tapers used in the mutlitaper spectral analysis .
weights : ndarray , optional , default = x . weights
Taper weights used with the multitaper spectral analyses .
mode : str , opitonal , default = ' full '
' full ' returns a biased output spectrum of size lmax + lwin + 1 . The
input spectrum is assumed to be zero for degrees l > lmax .
' same ' returns a biased output spectrum with the same size
( lmax + 1 ) as the input spectrum . The input spectrum is assumed to be
zero for degrees l > lmax .
' valid ' returns a biased spectrum with size lmax - lwin + 1 . This
returns only that part of the biased spectrum that is not
influenced by the input spectrum beyond degree lmax .
axes _ labelsize : int , optional , default = None
The font size for the x and y axes labels .
tick _ labelsize : int , optional , default = None
The font size for the x and y tick labels .
show : bool , optional , default = True
If True , plot the image to the screen .
ax : matplotlib axes object , optional , default = None
An array of matplotlib axes objects where the plots will appear .
fname : str , optional , default = None
If present , save the image to the specified file ."""
|
figsize = ( _mpl . rcParams [ 'figure.figsize' ] [ 0 ] , _mpl . rcParams [ 'figure.figsize' ] [ 0 ] )
if axes_labelsize is None :
axes_labelsize = _mpl . rcParams [ 'axes.labelsize' ]
if tick_labelsize is None :
tick_labelsize = _mpl . rcParams [ 'xtick.labelsize' ]
if ax is None :
fig = _plt . figure ( figsize = figsize )
axes = fig . add_subplot ( 111 )
else :
axes = ax
axes . imshow ( self . coupling_matrix ( lmax , nwin = nwin , weights = weights , mode = mode ) , aspect = 'auto' )
axes . set_xlabel ( 'Input power' , fontsize = axes_labelsize )
axes . set_ylabel ( 'Output power' , fontsize = axes_labelsize )
axes . tick_params ( labelsize = tick_labelsize )
axes . minorticks_on ( )
if ax is None :
fig . tight_layout ( pad = 0.5 )
if show :
fig . show ( )
if fname is not None :
fig . savefig ( fname )
return fig , axes
|
def execute_closing_transaction ( statements : Iterable ) :
"""Open a connection , commit a transaction , and close it ."""
|
with closing ( connect ( ) ) as conn :
with conn . cursor ( ) as cursor :
for statement in statements :
cursor . execute ( statement )
|
def cheby_rect ( G , bounds , signal , ** kwargs ) :
r"""Fast filtering using Chebyshev polynomial for a perfect rectangle filter .
Parameters
G : Graph
bounds : array _ like
The bounds of the pass - band filter
signal : array _ like
Signal to filter
order : int ( optional )
Order of the Chebyshev polynomial ( default : 30)
Returns
r : array _ like
Result of the filtering"""
|
if not ( isinstance ( bounds , ( list , np . ndarray ) ) and len ( bounds ) == 2 ) :
raise ValueError ( 'Bounds of wrong shape.' )
bounds = np . array ( bounds )
m = int ( kwargs . pop ( 'order' , 30 ) + 1 )
try :
Nv = np . shape ( signal ) [ 1 ]
r = np . zeros ( ( G . N , Nv ) )
except IndexError :
r = np . zeros ( ( G . N ) )
b1 , b2 = np . arccos ( 2. * bounds / G . lmax - 1. )
factor = 4. / G . lmax * G . L - 2. * sparse . eye ( G . N )
T_old = signal
T_cur = factor . dot ( signal ) / 2.
r = ( b1 - b2 ) / np . pi * signal + 2. / np . pi * ( np . sin ( b1 ) - np . sin ( b2 ) ) * T_cur
for k in range ( 2 , m ) :
T_new = factor . dot ( T_cur ) - T_old
r += 2. / ( k * np . pi ) * ( np . sin ( k * b1 ) - np . sin ( k * b2 ) ) * T_new
T_old = T_cur
T_cur = T_new
return r
|
def marvcli_query ( ctx , list_tags , collections , discarded , outdated , path , tags , null ) :
"""Query datasets .
Use - - collection = * to list all datasets across all collections ."""
|
if not any ( [ collections , discarded , list_tags , outdated , path , tags ] ) :
click . echo ( ctx . get_help ( ) )
ctx . exit ( 1 )
sep = '\x00' if null else '\n'
site = create_app ( ) . site
if '*' in collections :
collections = None
else :
for col in collections :
if col not in site . collections :
ctx . fail ( 'Unknown collection: {}' . format ( col ) )
if list_tags :
tags = site . listtags ( collections )
if tags :
click . echo ( sep . join ( tags ) , nl = not null )
else :
click . echo ( 'no tags' , err = True )
return
setids = site . query ( collections , discarded , outdated , path , tags )
if setids :
sep = '\x00' if null else '\n'
click . echo ( sep . join ( setids ) , nl = not null )
|
def readline_check_physical ( self ) :
"""Check and return the next physical line . This method can be
used to feed tokenize . generate _ tokens ."""
|
line = self . readline ( )
if line :
self . check_physical ( line )
return line
|
def _read_file ( self , filename ) :
"""Return the lines from the given file , ignoring lines that start with
comments"""
|
result = [ ]
with open ( filename , 'r' ) as f :
lines = f . read ( ) . split ( '\n' )
for line in lines :
nocomment = line . strip ( ) . split ( '#' ) [ 0 ] . strip ( )
if nocomment :
result . append ( nocomment )
return result
|
def generate_acl ( config , model_cls , raml_resource , es_based = True ) :
"""Generate an ACL .
Generated ACL class has a ` item _ model ` attribute set to
: model _ cls : .
ACLs used for collection and item access control are generated from a
first security scheme with type ` x - ACL ` .
If : raml _ resource : has no x - ACL security schemes defined then ALLOW _ ALL
ACL is used .
If the ` collection ` or ` item ` settings are empty , then ALLOW _ ALL ACL
is used .
: param model _ cls : Generated model class
: param raml _ resource : Instance of ramlfications . raml . ResourceNode
for which ACL is being generated
: param es _ based : Boolean inidicating whether ACL should query ES or
not when getting an object"""
|
schemes = raml_resource . security_schemes or [ ]
schemes = [ sch for sch in schemes if sch . type == 'x-ACL' ]
if not schemes :
collection_acl = item_acl = [ ]
log . debug ( 'No ACL scheme applied. Using ACL: {}' . format ( item_acl ) )
else :
sec_scheme = schemes [ 0 ]
log . debug ( '{} ACL scheme applied' . format ( sec_scheme . name ) )
settings = sec_scheme . settings or { }
collection_acl = parse_acl ( acl_string = settings . get ( 'collection' ) )
item_acl = parse_acl ( acl_string = settings . get ( 'item' ) )
class GeneratedACLBase ( object ) :
item_model = model_cls
def __init__ ( self , request , es_based = es_based ) :
super ( GeneratedACLBase , self ) . __init__ ( request = request )
self . es_based = es_based
self . _collection_acl = collection_acl
self . _item_acl = item_acl
bases = [ GeneratedACLBase ]
if config . registry . database_acls :
from nefertari_guards . acl import DatabaseACLMixin as GuardsMixin
bases += [ DatabaseACLMixin , GuardsMixin ]
bases . append ( BaseACL )
return type ( 'GeneratedACL' , tuple ( bases ) , { } )
|
def add_effect ( effect_id , * args , ** kwargs ) :
'''If inside a side - effect , adds an effect to it .'''
|
effect = fiber . get_stack_var ( SIDE_EFFECT_TAG )
if effect is None :
return False
effect . add_effect ( effect_id , * args , ** kwargs )
return True
|
def getHeader ( self ) :
"""Returns the file header as dict
Parameters
None"""
|
return { "technician" : self . getTechnician ( ) , "recording_additional" : self . getRecordingAdditional ( ) , "patientname" : self . getPatientName ( ) , "patient_additional" : self . getPatientAdditional ( ) , "patientcode" : self . getPatientCode ( ) , "equipment" : self . getEquipment ( ) , "admincode" : self . getAdmincode ( ) , "gender" : self . getGender ( ) , "startdate" : self . getStartdatetime ( ) , "birthdate" : self . getBirthdate ( ) }
|
def plot_script ( self , script ) :
"""Calls the plot function of the script , and redraws both plots
Args :
script : script to be plotted"""
|
script . plot ( [ self . matplotlibwidget_1 . figure , self . matplotlibwidget_2 . figure ] )
self . matplotlibwidget_1 . draw ( )
self . matplotlibwidget_2 . draw ( )
|
def locale_check ( ) :
"""Checks if this application runs with a correct locale ( i . e . supports UTF - 8 encoding ) and attempt to fix
if this is not the case .
This is to prevent UnicodeEncodeError with unicode paths when using standard library I / O operation
methods ( e . g . os . stat ( ) or os . path . * ) which rely on the system or user locale .
More information can be found there : http : / / seasonofcode . com / posts / unicode - i - o - and - locales - in - python . html
or there : http : / / robjwells . com / post / 61198832297 / get - your - us - ascii - out - of - my - face"""
|
# no need to check on Windows or when this application is frozen
if sys . platform . startswith ( "win" ) or hasattr ( sys , "frozen" ) :
return
language = encoding = None
try :
language , encoding = locale . getlocale ( )
except ValueError as e :
log . error ( "Could not determine the current locale: {}" . format ( e ) )
if not language and not encoding :
try :
log . warning ( "Could not find a default locale, switching to C.UTF-8..." )
locale . setlocale ( locale . LC_ALL , ( "C" , "UTF-8" ) )
except locale . Error as e :
log . error ( "Could not switch to the C.UTF-8 locale: {}" . format ( e ) )
raise SystemExit
elif encoding != "UTF-8" :
log . warning ( "Your locale {}.{} encoding is not UTF-8, switching to the UTF-8 version..." . format ( language , encoding ) )
try :
locale . setlocale ( locale . LC_ALL , ( language , "UTF-8" ) )
except locale . Error as e :
log . error ( "Could not set an UTF-8 encoding for the {} locale: {}" . format ( language , e ) )
raise SystemExit
else :
log . info ( "Current locale is {}.{}" . format ( language , encoding ) )
|
def object_table ( self , object_id = None ) :
"""Fetch and parse the object table info for one or more object IDs .
Args :
object _ id : An object ID to fetch information about . If this is
None , then the entire object table is fetched .
Returns :
Information from the object table ."""
|
self . _check_connected ( )
if object_id is not None : # Return information about a single object ID .
return self . _object_table ( object_id )
else : # Return the entire object table .
object_keys = self . _keys ( ray . gcs_utils . TablePrefix_OBJECT_string + "*" )
object_ids_binary = { key [ len ( ray . gcs_utils . TablePrefix_OBJECT_string ) : ] for key in object_keys }
results = { }
for object_id_binary in object_ids_binary :
results [ binary_to_object_id ( object_id_binary ) ] = ( self . _object_table ( binary_to_object_id ( object_id_binary ) ) )
return results
|
def get_negative_log_likelihood ( self , y_true , X , mask ) :
"""Compute the loss , i . e . , negative log likelihood ( normalize by number of time steps )
likelihood = 1 / Z * exp ( - E ) - > neg _ log _ like = - log ( 1 / Z * exp ( - E ) ) = logZ + E"""
|
input_energy = self . activation ( K . dot ( X , self . kernel ) + self . bias )
if self . use_boundary :
input_energy = self . add_boundary_energy ( input_energy , mask , self . left_boundary , self . right_boundary )
energy = self . get_energy ( y_true , input_energy , mask )
logZ = self . get_log_normalization_constant ( input_energy , mask , input_length = K . int_shape ( X ) [ 1 ] )
nloglik = logZ + energy
if mask is not None :
nloglik = nloglik / K . sum ( K . cast ( mask , K . floatx ( ) ) , 1 )
else :
nloglik = nloglik / K . cast ( K . shape ( X ) [ 1 ] , K . floatx ( ) )
return nloglik
|
def _close_connections ( self , connection = None , timeout = 5 ) :
"""Close ` ` connection ` ` if specified , otherwise close all connections .
Return a list of : class : ` . Future ` called back once the connection / s
are closed ."""
|
all = [ ]
if connection :
waiter = connection . event ( 'connection_lost' ) . waiter ( )
if waiter :
all . append ( waiter )
connection . close ( )
else :
connections = list ( self . _concurrent_connections )
self . _concurrent_connections = set ( )
for connection in connections :
waiter = connection . event ( 'connection_lost' ) . waiter ( )
if waiter :
all . append ( waiter )
connection . close ( )
if all :
self . logger . info ( '%s closing %d connections' , self , len ( all ) )
return asyncio . wait ( all , timeout = timeout , loop = self . _loop )
|
def _init_glyph ( self , plot , mapping , properties ) :
"""Returns a Bokeh glyph object and optionally creates a colorbar ."""
|
ret = super ( ColorbarPlot , self ) . _init_glyph ( plot , mapping , properties )
if self . colorbar :
for k , v in list ( self . handles . items ( ) ) :
if not k . endswith ( 'color_mapper' ) :
continue
self . _draw_colorbar ( plot , v , k [ : - 12 ] )
return ret
|
def _get_chart_info ( df , vtype , cat , prep , callers ) :
"""Retrieve values for a specific variant type , category and prep method ."""
|
maxval_raw = max ( list ( df [ "value.floor" ] ) )
curdf = df [ ( df [ "variant.type" ] == vtype ) & ( df [ "category" ] == cat ) & ( df [ "bamprep" ] == prep ) ]
vals = [ ]
labels = [ ]
for c in callers :
row = curdf [ df [ "caller" ] == c ]
if len ( row ) > 0 :
vals . append ( list ( row [ "value.floor" ] ) [ 0 ] )
labels . append ( list ( row [ "value" ] ) [ 0 ] )
else :
vals . append ( 1 )
labels . append ( "" )
return vals , labels , maxval_raw
|
def solve_ng ( self , structure , wavelength_step = 0.01 , filename = "ng.dat" ) :
r"""Solve for the group index , : math : ` n _ g ` , of a structure at a particular
wavelength .
Args :
structure ( Structure ) : The target structure to solve
for modes .
wavelength _ step ( float ) : The step to take below and
above the nominal wavelength . This is used for
approximating the gradient of : math : ` n _ \ mathrm { eff } `
at the nominal wavelength . Default is 0.01.
filename ( str ) : The nominal filename to use when saving the
effective indices . Defaults to ' ng . dat ' .
Returns :
list : A list of the group indices found for each mode ."""
|
wl_nom = structure . _wl
self . solve ( structure )
n_ctrs = self . n_effs
structure . change_wavelength ( wl_nom - wavelength_step )
self . solve ( structure )
n_bcks = self . n_effs
structure . change_wavelength ( wl_nom + wavelength_step )
self . solve ( structure )
n_frws = self . n_effs
n_gs = [ ]
for n_ctr , n_bck , n_frw in zip ( n_ctrs , n_bcks , n_frws ) :
n_gs . append ( n_ctr - wl_nom * ( n_frw - n_bck ) / ( 2 * wavelength_step ) )
if filename :
with open ( self . _modes_directory + filename , "w" ) as fs :
fs . write ( "# Mode idx, Group index\n" )
for idx , n_g in enumerate ( n_gs ) :
fs . write ( "%i,%.3f\n" % ( idx , np . round ( n_g . real , 3 ) ) )
return n_gs
|
def validate_username_for_new_account ( person , username ) :
"""Validate the new username for a new account . If the username is invalid
or in use , raises : py : exc : ` UsernameInvalid ` or : py : exc : ` UsernameTaken ` .
: param person : Owner of new account .
: param username : Username to validate ."""
|
# This is much the same as validate _ username _ for _ new _ person , except
# we don ' t care if the username is used by the person owning the account
# is the username valid ?
validate_username ( username )
# Check for existing people
query = Person . objects . filter ( username__exact = username )
count = query . exclude ( pk = person . pk ) . count ( )
if count >= 1 :
raise UsernameTaken ( six . u ( 'The username is already taken. Please choose another. ' 'If this was the name of your old account please email %s' ) % settings . ACCOUNTS_EMAIL )
# Check for existing accounts not belonging to this person
query = Account . objects . filter ( username__exact = username )
count = query . exclude ( person__pk = person . pk ) . count ( )
if count >= 1 :
raise UsernameTaken ( six . u ( 'The username is already taken. Please choose another. ' 'If this was the name of your old account please email %s' ) % settings . ACCOUNTS_EMAIL )
# Check datastore , in case username created outside Karaage .
# Make sure we don ' t count the entry for person .
query = Person . objects . filter ( username__exact = username )
count = query . filter ( pk = person . pk ) . count ( )
if count == 0 and account_exists ( username ) :
raise UsernameTaken ( six . u ( 'Username is already in external personal datastore.' ) )
|
def __update ( self , breakpoint_graph , merge_edges = False ) :
"""Updates a current : class ` BreakpointGraph ` object with information from a supplied : class ` BreakpointGraph ` instance .
Depending of a ` ` merge _ edges ` ` flag , while updating of a current : class ` BreakpointGraph ` object is occuring , edges between similar vertices can be merged to already existing ones .
: param breakpoint _ graph : a breakpoint graph to extract information from , which will be then added to the current
: type breakpoint _ graph : : class ` BreakpointGraph `
: param merge _ edges : flag to indicate if edges to be added to current : class ` BreakpointGraph ` object are to be merged to already existing ones
: type merge _ edges : ` ` Boolean ` `
: return : ` ` None ` ` , performs inplace changes"""
|
for bgedge in breakpoint_graph . edges ( ) :
self . __add_bgedge ( bgedge = deepcopy ( bgedge ) , merge = merge_edges )
|
def create_attributes ( klass , attributes , previous_object = None ) :
"""Attributes for resource creation ."""
|
result = { }
if previous_object is not None :
result = { k : v for k , v in previous_object . to_json ( ) . items ( ) if k != 'sys' }
result . update ( attributes )
return result
|
def get_protein_coding_genes ( path_or_buffer , include_polymorphic_pseudogenes = True , remove_duplicates = True , ** kwargs ) :
r"""Get list of all protein - coding genes based on Ensembl GTF file .
Parameters
See : func : ` get _ genes ` function .
Returns
` pandas . DataFrame `
Table with rows corresponding to protein - coding genes ."""
|
valid_biotypes = set ( [ 'protein_coding' ] )
if include_polymorphic_pseudogenes :
valid_biotypes . add ( 'polymorphic_pseudogene' )
df = get_genes ( path_or_buffer , valid_biotypes , remove_duplicates = remove_duplicates , ** kwargs )
return df
|
def get_showcases ( self ) : # type : ( ) - > List [ hdx . data . showcase . Showcase ]
"""Get any showcases the dataset is in
Returns :
List [ Showcase ] : list of showcases"""
|
assoc_result , showcases_dicts = self . _read_from_hdx ( 'showcase' , self . data [ 'id' ] , fieldname = 'package_id' , action = hdx . data . showcase . Showcase . actions ( ) [ 'list_showcases' ] )
showcases = list ( )
if assoc_result :
for showcase_dict in showcases_dicts :
showcase = hdx . data . showcase . Showcase ( showcase_dict , configuration = self . configuration )
showcases . append ( showcase )
return showcases
|
def protocol_names ( self ) :
"""Returns all registered protocol names"""
|
l = self . protocols ( )
retval = [ str ( k . name ) for k in l ]
return retval
|
def __update_rating ( uid , rating ) :
'''Update rating .'''
|
entry = TabRating . update ( rating = rating ) . where ( TabRating . uid == uid )
entry . execute ( )
|
def make_multi_lagger ( lags , groupby_kwargs = None ) :
"""Return a union of transformers that apply different lags
Args :
lags ( Collection [ int ] ) : collection of lags to apply
groupby _ kwargs ( dict ) : keyword arguments to pd . DataFrame . groupby"""
|
laggers = [ SingleLagger ( l , groupby_kwargs = groupby_kwargs ) for l in lags ]
feature_union = FeatureUnion ( [ ( repr ( lagger ) , lagger ) for lagger in laggers ] )
return feature_union
|
def verify_ws2p_head ( self , head : Any ) -> bool :
"""Check specified document
: param Any head :
: return :"""
|
signature = base64 . b64decode ( head . signature )
inline = head . inline ( )
prepended = signature + bytes ( inline , 'ascii' )
try :
self . verify ( prepended )
return True
except ValueError :
return False
|
def list_builds ( page_size = 200 , page_index = 0 , sort = "" , q = "" ) :
"""List all builds
: param page _ size : number of builds returned per query
: param sort : RSQL sorting query
: param q : RSQL query
: return :"""
|
response = utils . checked_api_call ( pnc_api . builds_running , 'get_all' , page_size = page_size , page_index = page_index , sort = sort , q = q )
if response :
return response . content
|
def apply_bparams ( fn ) :
"""apply fn to each line of bparams , returning the result"""
|
cmd = [ "bparams" , "-a" ]
try :
output = subprocess . check_output ( cmd ) . decode ( 'utf-8' )
except :
return None
return fn ( output . split ( "\n" ) )
|
def getHook ( self , repo_user , repo_name , hook_id ) :
"""GET / repos / : owner / : repo / hooks / : id
Returns the Hook ."""
|
return self . api . makeRequest ( [ 'repos' , repo_user , repo_name , 'hooks' , str ( hook_id ) ] , method = 'GET' , )
|
def get_tac_resource ( url ) :
"""Get the requested resource or update resource using Tacoma account
: returns : http response with content in xml"""
|
response = None
response = TrumbaTac_DAO ( ) . getURL ( url , { "Accept" : "application/xml" } )
_log_xml_resp ( "Tacoma" , url , response )
return response
|
def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im ( ) :
"""TPU related imagenet model ."""
|
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu ( )
update_hparams_for_tpu ( hparams )
hparams . batch_size = 4
hparams . optimizer = "Adafactor"
hparams . learning_rate_schedule = "rsqrt_decay"
hparams . learning_rate_warmup_steps = 6000
hparams . layer_prepostprocess_dropout = 0.1
return hparams
|
def set_position ( self , resource_id , to_position , db_session = None , * args , ** kwargs ) :
"""Sets node position for new node in the tree
: param resource _ id : resource to move
: param to _ position : new position
: param db _ session :
: return : def count _ children ( cls , resource _ id , db _ session = None ) :"""
|
return self . service . set_position ( resource_id = resource_id , to_position = to_position , db_session = db_session , * args , ** kwargs )
|
def get_user ( self , sAMAccountName ) :
"""Fetches one user object from the AD , based on the sAMAccountName attribute ( read : username )"""
|
logger . debug ( 'Polling AD for user %s' % sAMAccountName )
ldap_filter = r'(&(objectClass=user)(sAMAccountName=%s)' % sAMAccountName
attributes = MSADUser . ATTRS
for entry in self . pagedsearch ( ldap_filter , attributes ) : # TODO : return ldapuser object
yield MSADUser . from_ldap ( entry , self . _ldapinfo )
logger . debug ( 'Finished polling for entries!' )
|
def compatible_firmware_version ( self ) :
"""Returns the DLL ' s compatible J - Link firmware version .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
Returns :
The firmware version of the J - Link that the DLL is compatible
with .
Raises :
JLinkException : on error ."""
|
identifier = self . firmware_version . split ( 'compiled' ) [ 0 ]
buf_size = self . MAX_BUF_SIZE
buf = ( ctypes . c_char * buf_size ) ( )
res = self . _dll . JLINKARM_GetEmbeddedFWString ( identifier . encode ( ) , buf , buf_size )
if res < 0 :
raise errors . JLinkException ( res )
return ctypes . string_at ( buf ) . decode ( )
|
def flag_based_complete ( self , text : str , line : str , begidx : int , endidx : int , flag_dict : Dict [ str , Union [ Iterable , Callable ] ] , all_else : Union [ None , Iterable , Callable ] = None ) -> List [ str ] :
"""Tab completes based on a particular flag preceding the token being completed
: param text : the string prefix we are attempting to match ( all returned matches must begin with it )
: param line : the current input line with leading whitespace removed
: param begidx : the beginning index of the prefix text
: param endidx : the ending index of the prefix text
: param flag _ dict : dictionary whose structure is the following :
keys - flags ( ex : - c , - - create ) that result in tab completion for the next
argument in the command line
values - there are two types of values
1 . iterable list of strings to match against ( dictionaries , lists , etc . )
2 . function that performs tab completion ( ex : path _ complete )
: param all _ else : an optional parameter for tab completing any token that isn ' t preceded by a flag in flag _ dict
: return : a list of possible tab completions"""
|
# Get all tokens through the one being completed
tokens , _ = self . tokens_for_completion ( line , begidx , endidx )
if not tokens :
return [ ]
completions_matches = [ ]
match_against = all_else
# Must have at least 2 args for a flag to precede the token being completed
if len ( tokens ) > 1 :
flag = tokens [ - 2 ]
if flag in flag_dict :
match_against = flag_dict [ flag ]
# Perform tab completion using a Collection
if isinstance ( match_against , Collection ) :
completions_matches = self . basic_complete ( text , line , begidx , endidx , match_against )
# Perform tab completion using a function
elif callable ( match_against ) :
completions_matches = match_against ( text , line , begidx , endidx )
return completions_matches
|
def class_space ( classlevel = 3 ) :
"returns the calling class ' name and dictionary"
|
frame = sys . _getframe ( classlevel )
classname = frame . f_code . co_name
classdict = frame . f_locals
return classname , classdict
|
def with_matching_args ( self , * args , ** kwargs ) :
"""Set the last call to expect specific argument values if those arguments exist .
Unlike : func : ` fudge . Fake . with _ args ` use this if you want to only declare
expectations about matching arguments . Any unknown keyword arguments
used by the app under test will be allowed .
For example , you can declare positional arguments but ignore keyword arguments :
. . doctest : :
> > > import fudge
> > > db = fudge . Fake ( ' db ' ) . expects ( ' transaction ' ) . with _ matching _ args ( ' insert ' )
With this declaration , any keyword argument is allowed :
. . doctest : :
> > > db . transaction ( ' insert ' , isolation _ level = ' lock ' )
> > > db . transaction ( ' insert ' , isolation _ level = ' shared ' )
> > > db . transaction ( ' insert ' , retry _ on _ error = True )
. . doctest : :
: hide :
> > > fudge . clear _ expectations ( )
. . note : :
you may get more mileage out of : mod : ` fudge . inspector ` functions as
described in : func : ` fudge . Fake . with _ args `"""
|
exp = self . _get_current_call ( )
if args :
exp . expected_matching_args = args
if kwargs :
exp . expected_matching_kwargs = kwargs
return self
|
def has_conflicting_update ( self , update ) :
"""Checks if there are conflicting updates . Conflicting updates are updates that have the
same requirement but different target versions to update to .
: param update : Update to check
: return : bool - True if conflict found"""
|
# we explicitly want a flat list of updates here , that ' s why we call iter _ updates
# with both ` initial ` and ` scheduled ` = = False
for _ , _ , _ , updates in self . iter_updates ( initial = False , scheduled = False ) :
for _update in updates :
if ( update . requirement . key == _update . requirement . key and ( update . commit_message != _update . commit_message or update . requirement . latest_version_within_specs != _update . requirement . latest_version_within_specs ) ) :
logger . info ( "{} conflicting with {}/{}" . format ( update . requirement . key , update . requirement . latest_version_within_specs , _update . requirement . latest_version_within_specs ) )
return True
return False
|
def load ( fh , model ) :
"""Deserialize PENMAN graphs from a file ( handle or filename )
Args :
fh : filename or file object
model : Xmrs subclass instantiated from decoded triples
Returns :
a list of objects ( of class * model * )"""
|
graphs = penman . load ( fh , cls = XMRSCodec )
xs = [ model . from_triples ( g . triples ( ) ) for g in graphs ]
return xs
|
def get_release_notes ( osa_repo_dir , osa_old_commit , osa_new_commit ) :
"""Get release notes between the two revisions ."""
|
repo = Repo ( osa_repo_dir )
# Get a list of tags , sorted
tags = repo . git . tag ( ) . split ( '\n' )
tags = sorted ( tags , key = LooseVersion )
# Currently major tags are being printed after rc and
# b tags . We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list ( tags )
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout ( repo , osa_old_commit )
old_tag = repo . git . describe ( )
# If the SHA given is between two release tags , then
# ' git describe ' will return a tag in form of
# < tag > - < commitNum > - < sha > . For example :
# 14.0.2-3 - g6931e26
# Since reno does not support this format , we need to
# strip away the commit number and sha bits .
if '-' in old_tag :
old_tag = old_tag [ 0 : old_tag . index ( '-' ) ]
# Get the nearest tag associated with the new commit
checkout ( repo , osa_new_commit )
new_tag = repo . git . describe ( )
if '-' in new_tag :
nearest_new_tag = new_tag [ 0 : new_tag . index ( '-' ) ]
else :
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old _ sha and new _ sha . The latest release
# is not included in this list . That version will be
# printed separately in the following step .
tags = tags [ tags . index ( old_tag ) : tags . index ( nearest_new_tag ) ]
release_notes = ""
# Checkout the new commit , then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit .
repo . git . checkout ( osa_new_commit , '-f' )
reno_report_command = [ 'reno' , 'report' , '--earliest-version' , nearest_new_tag ]
reno_report_p = subprocess . Popen ( reno_report_command , cwd = osa_repo_dir , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
reno_output = reno_report_p . communicate ( ) [ 0 ] . decode ( 'UTF-8' )
release_notes += reno_output
# We want to start with the latest packaged release first , so
# the tags list is reversed
for version in reversed ( tags ) : # If version is an rc or b tag , and it has a major
# release tag , then skip it . There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases .
repo . git . checkout ( version , '-f' )
# We are outputing one version at a time here
reno_report_command = [ 'reno' , 'report' , '--branch' , version , '--earliest-version' , version ]
reno_report_p = subprocess . Popen ( reno_report_command , cwd = osa_repo_dir , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
reno_output = reno_report_p . communicate ( ) [ 0 ] . decode ( 'UTF-8' )
# We need to ensure the output includes the version we are concerned
# about .
# This is due to https : / / bugs . launchpad . net / reno / + bug / 1670173
if version in reno_output :
release_notes += reno_output
# Clean up " Release Notes " title . We don ' t need this title for
# each tagged release .
release_notes = release_notes . replace ( "=============\nRelease Notes\n=============" , "" )
# Replace headers that contain ' = ' with ' ~ ' to comply with osa - differ ' s
# formatting
release_notes = re . sub ( '===+' , _equal_to_tilde , release_notes )
# Replace headers that contain ' - ' with ' # ' to comply with osa - differ ' s
# formatting
release_notes = re . sub ( '---+' , _dash_to_num , release_notes )
return release_notes
|
def row_content_length ( row ) :
'''Returns the length of non - empty content in a given row .'''
|
if not row :
return 0
try :
return ( index + 1 for index , cell in reversed ( list ( enumerate ( row ) ) ) if not is_empty_cell ( cell ) ) . next ( )
except StopIteration :
return len ( row )
|
def flush ( self ) :
"""Append the latest updates to file , or optionally to stdout instead . See the constructor
for logging options ."""
|
latest = self . _latest ( )
self . _chars_flushed += len ( latest )
if self . _use_stdout :
file = sys . stdout
else :
file = open ( self . logpath ( ) , 'a' )
print ( latest , file = file , flush = True , end = '' )
if not self . _use_stdout :
file . close ( )
|
def save ( filename , n_frames = 1 , axis = np . array ( [ 0. , 0. , 1. ] ) , clf = True , ** kwargs ) :
"""Save frames from the viewer out to a file .
Parameters
filename : str
The filename in which to save the output image . If more than one frame ,
should have extension . gif .
n _ frames : int
Number of frames to render . If more than one , the scene will animate .
axis : ( 3 , ) float or None
If present , the animation will rotate about the given axis in world coordinates .
Otherwise , the animation will rotate in azimuth .
clf : bool
If true , the Visualizer is cleared after rendering the figure .
kwargs : dict
Other keyword arguments for the SceneViewer instance ."""
|
if n_frames > 1 and os . path . splitext ( filename ) [ 1 ] != '.gif' :
raise ValueError ( 'Expected .gif file for multiple-frame save.' )
v = SceneViewer ( Visualizer3D . _scene , size = Visualizer3D . _init_size , animate = ( n_frames > 1 ) , animate_axis = axis , max_frames = n_frames , ** kwargs )
data = [ m . data for m in v . saved_frames ]
if len ( data ) > 1 :
imageio . mimwrite ( filename , data , fps = v . _animate_rate , palettesize = 128 , subrectangles = True )
else :
imageio . imwrite ( filename , data [ 0 ] )
if clf :
Visualizer3D . clf ( )
|
def big_bounding_box ( paths_n_stuff ) :
"""Finds a BB containing a collection of paths , Bezier path segments , and
points ( given as complex numbers ) ."""
|
bbs = [ ]
for thing in paths_n_stuff :
if is_path_segment ( thing ) or isinstance ( thing , Path ) :
bbs . append ( thing . bbox ( ) )
elif isinstance ( thing , complex ) :
bbs . append ( ( thing . real , thing . real , thing . imag , thing . imag ) )
else :
try :
complexthing = complex ( thing )
bbs . append ( ( complexthing . real , complexthing . real , complexthing . imag , complexthing . imag ) )
except ValueError :
raise TypeError ( "paths_n_stuff can only contains Path, CubicBezier, " "QuadraticBezier, Line, and complex objects." )
xmins , xmaxs , ymins , ymaxs = list ( zip ( * bbs ) )
xmin = min ( xmins )
xmax = max ( xmaxs )
ymin = min ( ymins )
ymax = max ( ymaxs )
return xmin , xmax , ymin , ymax
|
def cipher ( self ) :
"""Retrieve information about the current cipher
Return a triple consisting of cipher name , SSL protocol version defining
its use , and the number of secret bits . Return None if handshaking
has not been completed ."""
|
if not self . _handshake_done :
return
current_cipher = SSL_get_current_cipher ( self . _ssl . value )
cipher_name = SSL_CIPHER_get_name ( current_cipher )
cipher_version = SSL_CIPHER_get_version ( current_cipher )
cipher_bits = SSL_CIPHER_get_bits ( current_cipher )
return cipher_name , cipher_version , cipher_bits
|
def from_pickle ( cls , filename ) :
"""Loads and Returns a Camera from a pickle file , given a filename ."""
|
with open ( filename , 'rb' ) as f :
cam = pickle . load ( f )
projection = cam . projection . copy ( )
return cls ( projection = projection , position = cam . position . xyz , rotation = cam . rotation . __class__ ( * cam . rotation [ : ] ) )
|
def to_key_val_list ( value , sort = False , insensitive = False ) :
"""Take an object and test to see if it can be represented as a
dictionary . If it can be , return a list of tuples , e . g . ,
> > > to _ key _ val _ list ( [ ( ' key ' , ' val ' ) ] )
[ ( ' key ' , ' val ' ) ]
> > > to _ key _ val _ list ( { ' key ' : ' val ' } )
[ ( ' key ' , ' val ' ) ]
> > > to _ key _ val _ list ( { ' key ' : ' val ' } , sort = True )
[ ( ' key ' , ' val ' ) ]
> > > to _ key _ val _ list ( ' string ' )
ValueError : cannot encode objects that are not 2 - tuples ."""
|
if value is None :
return None
if isinstance ( value , ( str , bytes , bool , int ) ) :
raise ValueError ( 'cannot encode objects that are not 2-tuples' )
if isinstance ( value , collections . Mapping ) :
value = value . items ( )
if sort and not insensitive :
values = sorted ( value )
elif sort :
values = sorted ( value , key = lambda t : t [ 0 ] . lower ( ) )
else :
values = value
return list ( values )
|
def mangle_volume ( citation_elements ) :
"""Make sure the volume letter is before the volume number
e . g . transforms 100B to B100"""
|
volume_re = re . compile ( ur"(\d+)([A-Z])" , re . U | re . I )
for el in citation_elements :
if el [ 'type' ] == 'JOURNAL' :
matches = volume_re . match ( el [ 'volume' ] )
if matches :
el [ 'volume' ] = matches . group ( 2 ) + matches . group ( 1 )
return citation_elements
|
def get_names_and_paths ( compiler_output : Dict [ str , Any ] ) -> Dict [ str , str ] :
"""Return a mapping of contract name to relative path as defined in compiler output ."""
|
return { contract_name : make_path_relative ( path ) for path in compiler_output for contract_name in compiler_output [ path ] . keys ( ) }
|
def check_version_info ( redis_client ) :
"""Check if various version info of this process is correct .
This will be used to detect if workers or drivers are started using
different versions of Python , pyarrow , or Ray . If the version
information is not present in Redis , then no check is done .
Args :
redis _ client : A client for the primary Redis shard .
Raises :
Exception : An exception is raised if there is a version mismatch ."""
|
redis_reply = redis_client . get ( "VERSION_INFO" )
# Don ' t do the check if there is no version information in Redis . This
# is to make it easier to do things like start the processes by hand .
if redis_reply is None :
return
true_version_info = tuple ( json . loads ( ray . utils . decode ( redis_reply ) ) )
version_info = _compute_version_info ( )
if version_info != true_version_info :
node_ip_address = ray . services . get_node_ip_address ( )
error_message = ( "Version mismatch: The cluster was started with:\n" " Ray: " + true_version_info [ 0 ] + "\n" " Python: " + true_version_info [ 1 ] + "\n" " Pyarrow: " + str ( true_version_info [ 2 ] ) + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info [ 0 ] + "\n" " Python: " + version_info [ 1 ] + "\n" " Pyarrow: " + str ( version_info [ 2 ] ) )
if version_info [ : 2 ] != true_version_info [ : 2 ] :
raise Exception ( error_message )
else :
logger . warning ( error_message )
|
def create_dev_vlan ( vlanid , vlan_name , auth , url , devid = None , devip = None ) :
"""function takes devid and vlanid vlan _ name of specific device and 802.1q VLAN tag
and issues a RESTFUL call to add the specified VLAN from the target device . VLAN Name
MUST be valid on target device .
: param vlanid : int or str value of target 802.1q VLAN
: param vlan _ name : str value of the target 802.1q VLAN name . MUST be valid name on target device .
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: param devid : str requires devid of the target device
: param devip : str of ipv4 address of the target device
: return : str HTTP Response code . Should be 201 if successfully created
: rtype : str
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . vlanm import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > create _ dev _ vlan = create _ dev _ vlan ( ' 350 ' , ' 200 ' , ' test vlan ' , auth . creds , auth . url )"""
|
if devip is not None :
devid = get_dev_details ( devip , auth , url ) [ 'id' ]
create_dev_vlan_url = "/imcrs/vlan?devId=" + str ( devid )
f_url = url + create_dev_vlan_url
payload = '''{"vlanId":"%s", "vlanName":"%s"}''' % ( str ( vlanid ) , vlan_name )
response = requests . post ( f_url , data = payload , auth = auth , headers = HEADERS )
try :
if response . status_code == 201 :
print ( 'Vlan Created' )
return 201
elif response . status_code == 409 :
print ( '''Unable to create VLAN.\nVLAN Already Exists\nDevice does not support VLAN
function''' )
return 409
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + " create_dev_vlan: An Error has occured"
|
def dump_stats ( myStats ) :
"""Show stats when pings are done"""
|
print ( "\n----%s PYTHON PING Statistics----" % ( myStats . thisIP ) )
if myStats . pktsSent > 0 :
myStats . fracLoss = ( myStats . pktsSent - myStats . pktsRcvd ) / myStats . pktsSent
print ( ( "%d packets transmitted, %d packets received, " "%0.1f%% packet loss" ) % ( myStats . pktsSent , myStats . pktsRcvd , 100.0 * myStats . fracLoss ) )
if myStats . pktsRcvd > 0 :
print ( "round-trip (ms) min/avg/max = %d/%0.1f/%d" % ( myStats . minTime , myStats . totTime / myStats . pktsRcvd , myStats . maxTime ) )
print ( "" )
return
|
def register_actions ( self , shortcut_manager ) :
"""Register callback methods for triggered actions in all child controllers .
: param rafcon . gui . shortcut _ manager . ShortcutManager shortcut _ manager : Shortcut Manager Object holding mappings
between shortcuts and actions ."""
|
assert isinstance ( shortcut_manager , ShortcutManager )
self . __shortcut_manager = shortcut_manager
for controller in list ( self . __child_controllers . values ( ) ) :
if controller not in self . __action_registered_controllers :
try :
controller . register_actions ( shortcut_manager )
except Exception as e :
logger . error ( "Error while registering action for {0}: {1}" . format ( controller . __class__ . __name__ , e ) )
self . __action_registered_controllers . append ( controller )
|
def find_module ( self , fullname , path = None ) :
"""Looks up the table based on the module path ."""
|
if not fullname . startswith ( self . _module_name + '.' ) : # Not a quilt submodule .
return None
submodule = fullname [ len ( self . _module_name ) + 1 : ]
parts = submodule . split ( '.' )
# Pop the team prefix if this is a team import .
if self . _teams :
team = parts . pop ( 0 )
else :
team = None
# Handle full paths first .
if len ( parts ) == 2 :
store , pkg = PackageStore . find_package ( team , parts [ 0 ] , parts [ 1 ] )
if pkg is not None :
return PackageLoader ( store , pkg )
else :
return None
# Return fake loaders for partial paths .
for store_dir in PackageStore . find_store_dirs ( ) :
store = PackageStore ( store_dir )
if len ( parts ) == 0 :
assert self . _teams
path = store . team_path ( team )
elif len ( parts ) == 1 :
path = store . user_path ( team , parts [ 0 ] )
if os . path . isdir ( path ) :
return FakeLoader ( path )
# Nothing is found .
return None
|
def muc_set_affiliation ( self , jid , affiliation , * , reason = None ) :
"""Convenience wrapper around : meth : ` . MUCClient . set _ affiliation ` . See
there for details , and consider its ` mucjid ` argument to be set to
: attr : ` mucjid ` ."""
|
return ( yield from self . service . set_affiliation ( self . _mucjid , jid , affiliation , reason = reason ) )
|
def printmp ( msg ) :
"""Print temporarily , until next print overrides it ."""
|
filler = ( 80 - len ( msg ) ) * ' '
print ( msg + filler , end = '\r' )
sys . stdout . flush ( )
|
def do_loop_turn ( self ) : # pylint : disable = too - many - branches , too - many - statements , too - many - locals
"""Loop turn for Arbiter
If not a master daemon , wait for my master death . . .
Else , run :
* Check satellites are alive
* Check and dispatch ( if needed ) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites
: return : None"""
|
# If I am a spare , I only wait for the master arbiter to die . . .
if not self . is_master :
logger . debug ( "Waiting for my master death..." )
self . wait_for_master_death ( )
return
if self . loop_count % self . alignak_monitor_period == 1 :
self . get_alignak_status ( details = True )
# Maybe an external process requested Alignak stop . . .
if self . kill_request :
logger . info ( "daemon stop mode ..." )
if not self . dispatcher . stop_request_sent :
logger . info ( "entering daemon stop mode, time before exiting: %s" , self . conf . daemons_stop_timeout )
self . dispatcher . stop_request ( )
if time . time ( ) > self . kill_timestamp + self . conf . daemons_stop_timeout :
logger . info ( "daemon stop mode delay reached, immediate stop" )
self . dispatcher . stop_request ( stop_now = True )
time . sleep ( 1 )
self . interrupted = True
logger . info ( "exiting..." )
if not self . kill_request : # Main loop treatment
# Try to see if one of my module is dead , and restart previously dead modules
self . check_and_del_zombie_modules ( )
# Call modules that manage a starting tick pass
_t0 = time . time ( )
self . hook_point ( 'tick' )
statsmgr . timer ( 'hook.tick' , time . time ( ) - _t0 )
# Look for logging timeperiods activation change ( active / inactive )
self . check_and_log_tp_activation_change ( )
# Check that my daemons are alive
if not self . daemons_check ( ) :
if self . conf . daemons_failure_kill :
self . request_stop ( message = "Some Alignak daemons cannot be checked." , exit_code = 4 )
else :
logger . warning ( "Should have killed my children if " "'daemons_failure_kill' were set!" )
# Now the dispatcher job - check if all daemons are reachable and have a configuration
if not self . daemons_reachability_check ( ) :
logger . warning ( "A new configuration dispatch is required!" )
# Prepare and dispatch the monitored configuration
self . configuration_dispatch ( self . dispatcher . not_configured )
# Now get things from our module instances
_t0 = time . time ( )
self . get_objects_from_from_queues ( )
statsmgr . timer ( 'get-objects-from-queues' , time . time ( ) - _t0 )
# Maybe our satellites raised new broks . Reap them . . .
_t0 = time . time ( )
self . get_broks_from_satellites ( )
statsmgr . timer ( 'broks.got.time' , time . time ( ) - _t0 )
# One broker is responsible for our broks , we give him our broks
_t0 = time . time ( )
self . push_broks_to_broker ( )
statsmgr . timer ( 'broks.pushed.time' , time . time ( ) - _t0 )
# # We push our external commands to our schedulers . . .
# _ t0 = time . time ( )
# self . push _ external _ commands _ to _ schedulers ( )
# statsmgr . timer ( ' external - commands . pushed . time ' , time . time ( ) - _ t0)
if self . system_health and ( self . loop_count % self . system_health_period == 1 ) :
perfdatas = [ ]
cpu_count = psutil . cpu_count ( )
perfdatas . append ( "'cpu_count'=%d" % cpu_count )
logger . debug ( " . cpu count: %d" , cpu_count )
cpu_percents = psutil . cpu_percent ( percpu = True )
cpu = 1
for percent in cpu_percents :
perfdatas . append ( "'cpu_%d_percent'=%.2f%%" % ( cpu , percent ) )
cpu += 1
cpu_times_percent = psutil . cpu_times_percent ( percpu = True )
cpu = 1
for cpu_times_percent in cpu_times_percent :
logger . debug ( " . cpu time percent: %s" , cpu_times_percent )
for key in cpu_times_percent . _fields :
perfdatas . append ( "'cpu_%d_%s_percent'=%.2f%%" % ( cpu , key , getattr ( cpu_times_percent , key ) ) )
cpu += 1
logger . info ( "%s cpu|%s" , self . name , " " . join ( perfdatas ) )
perfdatas = [ ]
disk_partitions = psutil . disk_partitions ( all = False )
for disk_partition in disk_partitions :
logger . debug ( " . disk partition: %s" , disk_partition )
disk = getattr ( disk_partition , 'mountpoint' )
disk_usage = psutil . disk_usage ( disk )
logger . debug ( " . disk usage: %s" , disk_usage )
for key in disk_usage . _fields :
if 'percent' in key :
perfdatas . append ( "'disk_%s_percent_used'=%.2f%%" % ( disk , getattr ( disk_usage , key ) ) )
else :
perfdatas . append ( "'disk_%s_%s'=%dB" % ( disk , key , getattr ( disk_usage , key ) ) )
logger . info ( "%s disks|%s" , self . name , " " . join ( perfdatas ) )
perfdatas = [ ]
virtual_memory = psutil . virtual_memory ( )
logger . debug ( " . memory: %s" , virtual_memory )
for key in virtual_memory . _fields :
if 'percent' in key :
perfdatas . append ( "'mem_percent_used_%s'=%.2f%%" % ( key , getattr ( virtual_memory , key ) ) )
else :
perfdatas . append ( "'mem_%s'=%dB" % ( key , getattr ( virtual_memory , key ) ) )
swap_memory = psutil . swap_memory ( )
logger . debug ( " . memory: %s" , swap_memory )
for key in swap_memory . _fields :
if 'percent' in key :
perfdatas . append ( "'swap_used_%s'=%.2f%%" % ( key , getattr ( swap_memory , key ) ) )
else :
perfdatas . append ( "'swap_%s'=%dB" % ( key , getattr ( swap_memory , key ) ) )
logger . info ( "%s memory|%s" , self . name , " " . join ( perfdatas ) )
|
def send ( self , data ) :
"""Sends data to the ` AlarmDecoder ` _ device .
: param data : data to send
: type data : string"""
|
if self . _device :
if isinstance ( data , str ) :
data = str . encode ( data )
# Hack to support unicode under Python 2 . x
if sys . version_info < ( 3 , ) :
if isinstance ( data , unicode ) :
data = bytes ( data )
self . _device . write ( data )
|
def generate_start_command ( server , options_override = None , standalone = False ) :
"""Check if we need to use numactl if we are running on a NUMA box .
10gen recommends using numactl on NUMA . For more info , see
http : / / www . mongodb . org / display / DOCS / NUMA"""
|
command = [ ]
if mongod_needs_numactl ( ) :
log_info ( "Running on a NUMA machine..." )
command = apply_numactl ( command )
# append the mongod executable
command . append ( get_server_executable ( server ) )
# create the command args
cmd_options = server . export_cmd_options ( options_override = options_override , standalone = standalone )
command . extend ( options_to_command_args ( cmd_options ) )
return command
|
def status_messages ( self ) :
"""Returns status messages if any"""
|
messages = IStatusMessage ( self . request )
m = messages . show ( )
for item in m :
item . id = idnormalizer . normalize ( item . message )
return m
|
def arg_parser ( * args , ** kwargs ) :
"""Return a parser with common options used in the prawtools commands ."""
|
msg = { 'site' : 'The site to connect to defined in your praw.ini file.' , 'update' : 'Prevent the checking for prawtools package updates.' }
kwargs [ 'version' ] = 'BBoe\'s PRAWtools {}' . format ( __version__ )
parser = OptionParser ( * args , ** kwargs )
parser . add_option ( '-v' , '--verbose' , action = 'count' , default = 0 , help = 'Increase the verbosity by 1 each time' )
parser . add_option ( '-U' , '--disable-update-check' , action = 'store_true' , help = msg [ 'update' ] )
group = OptionGroup ( parser , 'Site/Authentication options' )
group . add_option ( '-S' , '--site' , help = msg [ 'site' ] )
parser . add_option_group ( group )
return parser
|
def get_parent ( self , context ) :
"""Load the parent template using our own ` ` find _ template ` ` , which
will cause its absolute path to not be used again . Then peek at
the first node , and if its parent arg is the same as the
current parent arg , we know circular inheritance is going to
occur , in which case we try and find the template again , with
the absolute directory removed from the search list ."""
|
parent = self . parent_name . resolve ( context )
# If parent is a template object , just return it .
if hasattr ( parent , "render" ) :
return parent
template = self . find_template ( parent , context )
for node in template . nodelist :
if ( isinstance ( node , ExtendsNode ) and node . parent_name . resolve ( context ) == parent ) :
return self . find_template ( parent , context , peeking = True )
return template
|
def validate ( cls , mapper_spec ) :
"""Validate mapper specification .
Args :
mapper _ spec : an instance of model . MapperSpec
Raises :
BadReaderParamsError : if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name ."""
|
reader_spec = cls . get_params ( mapper_spec , allow_old = False )
# Bucket Name is required
if cls . BUCKET_NAME_PARAM not in reader_spec :
raise errors . BadReaderParamsError ( "%s is required for Google Cloud Storage" % cls . BUCKET_NAME_PARAM )
try :
cloudstorage . validate_bucket_name ( reader_spec [ cls . BUCKET_NAME_PARAM ] )
except ValueError , error :
raise errors . BadReaderParamsError ( "Bad bucket name, %s" % ( error ) )
# Object Name ( s ) are required
if cls . OBJECT_NAMES_PARAM not in reader_spec :
raise errors . BadReaderParamsError ( "%s is required for Google Cloud Storage" % cls . OBJECT_NAMES_PARAM )
filenames = reader_spec [ cls . OBJECT_NAMES_PARAM ]
if not isinstance ( filenames , list ) :
raise errors . BadReaderParamsError ( "Object name list is not a list but a %s" % filenames . __class__ . __name__ )
for filename in filenames :
if not isinstance ( filename , basestring ) :
raise errors . BadReaderParamsError ( "Object name is not a string but a %s" % filename . __class__ . __name__ )
if cls . DELIMITER_PARAM in reader_spec :
delimiter = reader_spec [ cls . DELIMITER_PARAM ]
if not isinstance ( delimiter , basestring ) :
raise errors . BadReaderParamsError ( "%s is not a string but a %s" % ( cls . DELIMITER_PARAM , type ( delimiter ) ) )
|
def serialize_quantity ( o ) :
"""Serializes an : obj : ` astropy . units . Quantity ` , for JSONification .
Args :
o ( : obj : ` astropy . units . Quantity ` ) : : obj : ` Quantity ` to be serialized .
Returns :
A dictionary that can be passed to : obj : ` json . dumps ` ."""
|
return dict ( _type = 'astropy.units.Quantity' , value = o . value , unit = o . unit . to_string ( ) )
|
def heating_remaining ( self ) :
"""Return seconds of heat time remaining ."""
|
try :
if self . side == 'left' :
timerem = self . device . device_data [ 'leftHeatingDuration' ]
elif self . side == 'right' :
timerem = self . device . device_data [ 'rightHeatingDuration' ]
return timerem
except TypeError :
return None
|
def authorized ( self ) :
"""This is the route / function that the user will be redirected to by
the provider ( e . g . Twitter ) after the user has logged into the
provider ' s website and authorized your app to access their account ."""
|
if self . redirect_url :
next_url = self . redirect_url
elif self . redirect_to :
next_url = url_for ( self . redirect_to )
else :
next_url = "/"
try :
self . session . parse_authorization_response ( request . url )
except TokenMissing as err :
message = err . args [ 0 ]
response = getattr ( err , "response" , None )
log . warning ( "OAuth 1 access token error: %s" , message )
oauth_error . send ( self , message = message , response = response )
return redirect ( next_url )
try :
token = self . session . fetch_access_token ( self . access_token_url , should_load_token = False )
except ValueError as err : # can ' t proceed with OAuth , have to just redirect to next _ url
message = err . args [ 0 ]
response = getattr ( err , "response" , None )
log . warning ( "OAuth 1 access token error: %s" , message )
oauth_error . send ( self , message = message , response = response )
return redirect ( next_url )
results = oauth_authorized . send ( self , token = token ) or [ ]
set_token = True
for func , ret in results :
if isinstance ( ret , ( Response , current_app . response_class ) ) :
return ret
if ret == False :
set_token = False
if set_token :
self . token = token
return redirect ( next_url )
|
def NoExclusions ( self ) :
"""Determine that there are no exclusion criterion in play
: return : True if there is no real boundary specification of any kind .
Simple method allowing parsers to short circuit the determination of
missingness , which can be moderately compute intensive ."""
|
if len ( self . start_bounds ) + len ( self . target_rs ) + len ( self . ignored_rs ) == 0 :
return BoundaryCheck . chrom == - 1
return False
|
def get_phrases ( self , ns = None , layer = 'syntax' , cat_key = 'cat' , cat_val = 'NP' ) :
"""yield all node IDs that dominate the given phrase type , e . g . all NPs"""
|
if not ns :
ns = self . ns
for node_id in select_nodes_by_layer ( self , '{0}:{1}' . format ( ns , layer ) ) :
if self . node [ node_id ] [ self . ns + ':' + cat_key ] == cat_val :
yield node_id
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.