signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def del_pipe ( self , pipe , forced = False ) :
"""Deletes a pipe ( A , . . . , N ) which is an N - ` ` tuple ` ` of ` ` Piper ` `
instances . Deleting a pipe means to delete all the connections between
` ` Pipers ` ` and to delete all the ` ` Pipers ` ` . If " forced " is ` ` False ` `
only ` ` Pipers ` ` which are not used anymore ( i . e . have not downstream
` ` Pipers ` ` ) are deleted .
The direction of the edges in the ` ` DictGraph ` ` is reversed compared to
the left to right data - flow in a pipe .
Arguments :
- pipe ( sequence ) N - ` ` tuple ` ` of ` ` Piper ` ` instances or objects which
can be resolved in the ` ` Dagger ` ` ( see : ` ` Dagger . resolve ` ` ) . The
` ` Pipers ` ` are removed in the order from right to left .
- forced ( ` ` bool ` ` ) [ default : ` ` False ` ` ] The forced argument will be
given to the ` ` Dagger . del _ piper ` ` method . If " forced " is ` ` False ` `
only ` ` Pipers ` ` with no outgoing pipes will be deleted ."""
|
self . log . debug ( '%s removes pipe%s forced: %s' % ( repr ( self ) , repr ( pipe ) , forced ) )
pipe = list ( reversed ( pipe ) )
for i in xrange ( len ( pipe ) - 1 ) :
edge = ( self . resolve ( pipe [ i ] ) , self . resolve ( pipe [ i + 1 ] ) )
self . del_edge ( edge )
self . log . debug ( '%s removed the %s>>>%s edge' % ( repr ( self ) , edge [ 0 ] , edge [ 1 ] ) )
try :
self . del_piper ( edge [ 0 ] , forced )
self . del_piper ( edge [ 1 ] , forced )
except DaggerError :
pass
|
def create_choice_attribute ( creator_type , value , choice_entry ) :
"""Create an instance of a subclass of ChoiceAttributeMixin for the given value .
Parameters
creator _ type : type
` ` ChoiceAttributeMixin ` ` or a subclass , from which we ' ll call the ` ` get _ class _ for _ value ` `
class - method .
value : ?
The value for which we want to create an instance of a new subclass of ` ` creator _ type ` ` .
choice _ entry : ChoiceEntry
The ` ` ChoiceEntry ` ` instance that hold the current value , used to access its constant ,
value and display name .
Returns
ChoiceAttributeMixin
An instance of a subclass of ` ` creator _ type ` ` for the given value"""
|
klass = creator_type . get_class_for_value ( value )
return klass ( value , choice_entry )
|
def handle_error ( self , callback ) :
"""Called when an error occurs in an event loop callback .
By default , sets the error view ."""
|
self . loop . log_error ( callback )
msg = "\n" . join ( [ "Exception in callback %r" % callback , traceback . format_exc ( ) ] )
self . show_error ( msg . encode ( 'utf-8' ) )
|
def format_datetime ( dt , usegmt = False ) :
"""Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True , dt must be an aware datetime with an offset of zero . In
this case ' GMT ' will be rendered instead of the normal + 0000 required by
RFC2822 . This is to support HTTP headers involving date stamps ."""
|
now = dt . timetuple ( )
if usegmt :
if dt . tzinfo is None or dt . tzinfo != datetime . timezone . utc :
raise ValueError ( "usegmt option requires a UTC datetime" )
zone = 'GMT'
elif dt . tzinfo is None :
zone = '-0000'
else :
zone = dt . strftime ( "%z" )
return _format_timetuple_and_zone ( now , zone )
|
def takeScreenshotAndShowItOnWindow ( self ) :
'''Takes the current screenshot and shows it on the main window .
It also :
- sizes the window
- create the canvas
- set the focus
- enable the events
- create widgets
- finds the targets ( as explained in L { findTargets } )
- hides the vignette ( that could have been showed before )'''
|
if PROFILE :
print >> sys . stderr , "PROFILING: takeScreenshotAndShowItOnWindow()"
profileStart ( )
if DEBUG :
print >> sys . stderr , "takeScreenshotAndShowItOnWindow()"
if self . vc and self . vc . uiAutomatorHelper :
received = self . vc . uiAutomatorHelper . takeScreenshot ( )
stream = StringIO . StringIO ( received )
self . unscaledScreenshot = Image . open ( stream )
else :
self . unscaledScreenshot = self . device . takeSnapshot ( reconnect = True )
self . image = self . unscaledScreenshot
( width , height ) = self . image . size
if self . scale != 1 :
scaledWidth = int ( width * self . scale )
scaledHeight = int ( height * self . scale )
self . image = self . image . resize ( ( scaledWidth , scaledHeight ) , PIL . Image . ANTIALIAS )
( width , height ) = self . image . size
if self . isDarwin and 14 < self . sdkVersion < 23 :
stream = StringIO . StringIO ( )
self . image . save ( stream , 'GIF' )
import base64
gif = base64 . b64encode ( stream . getvalue ( ) )
stream . close ( )
if self . canvas is None :
if DEBUG :
print >> sys . stderr , "Creating canvas" , width , 'x' , height
self . placeholder . grid_forget ( )
self . canvas = Tkinter . Canvas ( self . mainFrame , width = width , height = height )
self . canvas . focus_set ( )
self . enableEvents ( )
self . createMessageArea ( width , height )
self . createVignette ( width , height )
if self . isDarwin and self . scale != 1 and 14 < self . sdkVersion < 23 : # Extremely weird Tkinter bug , I guess
# If the image was rotated and then resized if ImageTk . PhotoImage ( self . image )
# is used as usual then the result is a completely transparent image and only
# the " Please wait . . . " is seen .
# Converting it to GIF seems to solve the problem
self . screenshot = Tkinter . PhotoImage ( data = gif )
else :
self . screenshot = ImageTk . PhotoImage ( self . image )
if self . imageId is not None :
self . canvas . delete ( self . imageId )
self . imageId = self . canvas . create_image ( 0 , 0 , anchor = Tkinter . NW , image = self . screenshot )
if DEBUG :
try :
print >> sys . stderr , "Grid info" , self . canvas . grid_info ( )
except :
print >> sys . stderr , "Exception getting grid info"
gridInfo = None
try :
gridInfo = self . canvas . grid_info ( )
except :
if DEBUG :
print >> sys . stderr , "Adding canvas to grid (1,1)"
self . canvas . grid ( row = 1 , column = 1 , rowspan = 4 )
if not gridInfo :
self . canvas . grid ( row = 1 , column = 1 , rowspan = 4 )
self . findTargets ( )
self . hideVignette ( )
if DEBUG :
try :
self . printGridInfo ( )
except :
pass
if PROFILE :
profileEnd ( )
|
def is_iterable ( val ) :
"""Check if val is not a list , but is a collections . Iterable type . This is used to determine
when list ( ) should be called on val
> > > l = [ 1 , 2]
> > > is _ iterable ( l )
False
> > > is _ iterable ( iter ( l ) )
True
: param val : value to check
: return : True if it is not a list , but is a collections . Iterable"""
|
if isinstance ( val , list ) :
return False
return isinstance ( val , collections . Iterable )
|
def run ( self , ** client_params ) :
"""Actually creates the async job on the CARTO server
: param client _ params : To be send to the CARTO API . See CARTO ' s
documentation depending on the subclass
you are using
: type client _ params : kwargs
: return :
: raise : CartoException"""
|
try :
self . send ( self . get_collection_endpoint ( ) , http_method = "POST" , ** client_params )
except Exception as e :
raise CartoException ( e )
|
def make_model ( self ) :
"""Assemble text from the set of collected INDRA Statements .
Returns
stmt _ strs : str
Return the assembled text as unicode string . By default , the text
is a single string consisting of one or more sentences with
periods at the end ."""
|
stmt_strs = [ ]
for stmt in self . statements :
if isinstance ( stmt , ist . Modification ) :
stmt_strs . append ( _assemble_modification ( stmt ) )
elif isinstance ( stmt , ist . Autophosphorylation ) :
stmt_strs . append ( _assemble_autophosphorylation ( stmt ) )
elif isinstance ( stmt , ist . Association ) :
stmt_strs . append ( _assemble_association ( stmt ) )
elif isinstance ( stmt , ist . Complex ) :
stmt_strs . append ( _assemble_complex ( stmt ) )
elif isinstance ( stmt , ist . Influence ) :
stmt_strs . append ( _assemble_influence ( stmt ) )
elif isinstance ( stmt , ist . RegulateActivity ) :
stmt_strs . append ( _assemble_regulate_activity ( stmt ) )
elif isinstance ( stmt , ist . RegulateAmount ) :
stmt_strs . append ( _assemble_regulate_amount ( stmt ) )
elif isinstance ( stmt , ist . ActiveForm ) :
stmt_strs . append ( _assemble_activeform ( stmt ) )
elif isinstance ( stmt , ist . Translocation ) :
stmt_strs . append ( _assemble_translocation ( stmt ) )
elif isinstance ( stmt , ist . Gef ) :
stmt_strs . append ( _assemble_gef ( stmt ) )
elif isinstance ( stmt , ist . Gap ) :
stmt_strs . append ( _assemble_gap ( stmt ) )
elif isinstance ( stmt , ist . Conversion ) :
stmt_strs . append ( _assemble_conversion ( stmt ) )
else :
logger . warning ( 'Unhandled statement type: %s.' % type ( stmt ) )
if stmt_strs :
return ' ' . join ( stmt_strs )
else :
return ''
|
def _decode_surrogatepass ( data , codec ) :
"""Like data . decode ( codec , ' surrogatepass ' ) but makes utf - 16 - le / be work
on Python < 3.4 + Windows
https : / / bugs . python . org / issue27971
Raises UnicodeDecodeError , LookupError"""
|
try :
return data . decode ( codec , _surrogatepass )
except UnicodeDecodeError :
if not _codec_can_decode_with_surrogatepass ( codec ) :
if _normalize_codec ( codec ) == "utf-16-be" :
data = _swap_bytes ( data )
codec = "utf-16-le"
if _normalize_codec ( codec ) == "utf-16-le" :
buffer_ = ctypes . create_string_buffer ( data + b"\x00\x00" )
value = ctypes . wstring_at ( buffer_ , len ( data ) // 2 )
if value . encode ( "utf-16-le" , _surrogatepass ) != data :
raise
return value
else :
raise
else :
raise
|
def erase_all ( self ) :
"""@ brief Erase all the flash .
@ exception FlashEraseFailure"""
|
assert self . _active_operation == self . Operation . ERASE
assert self . is_erase_all_supported
# update core register to execute the erase _ all subroutine
result = self . _call_function_and_wait ( self . flash_algo [ 'pc_eraseAll' ] )
# check the return code
if result != 0 :
raise FlashEraseFailure ( 'erase_all error: %i' % result , result_code = result )
|
def module_can_run_parallel ( test_module : unittest . TestSuite ) -> bool :
"""Checks if a given module of tests can be run in parallel or not
: param test _ module : the module to run
: return : True if the module can be run on parallel , False otherwise"""
|
for test_class in test_module : # if the test is already failed , we just don ' t filter it
# and let the test runner deal with it later .
if hasattr ( unittest . loader , '_FailedTest' ) : # import failure in python 3.4.5 +
# noinspection PyProtectedMember
if isinstance ( test_class , unittest . loader . _FailedTest ) :
continue
if not isinstance ( test_class , collections . Iterable ) : # likely an import failure in python 3.4.4-
# before python 3.4.5 , test import failures were not serializable .
# We are unable to be sure that this is a module import failure , but it very likely is
# if this is the case , we ' ll just run this locally and see
raise TestClassNotIterable ( )
for test_case in test_class :
return not getattr ( sys . modules [ test_case . __module__ ] , "__no_parallel__" , False )
|
def stoptimes ( self , start_date , end_date ) :
"""Return all stop times in the date range
: param start _ date :
The starting date for the query .
: param end _ date :
The end date for the query .
> > > import datetime
> > > today = datetime . date . today ( )
> > > trans . stoptimes ( today - datetime . timedelta ( days = 1 ) , today )"""
|
params = { 'start' : self . format_date ( start_date ) , 'end' : self . format_date ( end_date ) }
response = self . _request ( ENDPOINTS [ 'STOPTIMES' ] , params )
return response
|
def _get_hash ( self , name , operation , create = False ) :
"""Get ( and maybe create ) a hash by name ."""
|
return self . _get_by_type ( name , operation , create , b'hash' , { } )
|
def calculate_start_time ( df ) :
"""Calculate the star _ time per read .
Time data is either
a " time " ( in seconds , derived from summary files ) or
a " timestamp " ( in UTC , derived from fastq _ rich format )
and has to be converted appropriately in a datetime format time _ arr
For both the time _ zero is the minimal value of the time _ arr ,
which is then used to subtract from all other times
In the case of method = track ( and dataset is a column in the df ) then this
subtraction is done per dataset"""
|
if "time" in df :
df [ "time_arr" ] = pd . Series ( df [ "time" ] , dtype = 'datetime64[s]' )
elif "timestamp" in df :
df [ "time_arr" ] = pd . Series ( df [ "timestamp" ] , dtype = "datetime64[ns]" )
else :
return df
if "dataset" in df :
for dset in df [ "dataset" ] . unique ( ) :
time_zero = df . loc [ df [ "dataset" ] == dset , "time_arr" ] . min ( )
df . loc [ df [ "dataset" ] == dset , "start_time" ] = df . loc [ df [ "dataset" ] == dset , "time_arr" ] - time_zero
else :
df [ "start_time" ] = df [ "time_arr" ] - df [ "time_arr" ] . min ( )
return df . drop ( [ "time" , "timestamp" , "time_arr" ] , axis = 1 , errors = "ignore" )
|
def vrfs_get ( self , subcommand = 'routes' , route_dist = None , route_family = 'all' , format = 'json' ) :
"""This method returns the existing vrfs .
` ` subcommand ` ` specifies one of the following .
- ' routes ' : shows routes present for vrf
- ' summary ' : shows configuration and summary of vrf
` ` route _ dist ` ` specifies a route distinguisher value .
If route _ family is not ' all ' , this value must be specified .
` ` route _ family ` ` specifies route family of the VRF .
This parameter must be one of the following .
- RF _ VPN _ V4 = ' ipv4'
- RF _ VPN _ V6 = ' ipv6'
- RF _ L2 _ EVPN = ' evpn '
- ' all ' ( default )
` ` format ` ` specifies the format of the response .
This parameter must be one of the following .
- ' json ' ( default )
- ' cli '"""
|
show = { 'format' : format , }
if route_family in SUPPORTED_VRF_RF :
assert route_dist is not None
show [ 'params' ] = [ 'vrf' , subcommand , route_dist , route_family ]
else :
show [ 'params' ] = [ 'vrf' , subcommand , 'all' ]
return call ( 'operator.show' , ** show )
|
def get_initial ( self , form , name ) :
"""Get the initial data that got passed into the superform for this
composite field . It should return ` ` None ` ` if no initial values where
given ."""
|
if hasattr ( form , 'initial' ) :
return form . initial . get ( name , None )
return None
|
def get_history_by_flight_number ( self , flight_number , page = 1 , limit = 100 ) :
"""Fetch the history of a flight by its number .
This method can be used to get the history of a flight route by the number .
It checks the user authentication and returns the data accordingly .
Args :
flight _ number ( str ) : The flight number , e . g . AI101
page ( int ) : Optional page number ; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit ( int ) : Optional limit on number of records returned
Returns :
A list of dicts with the data ; one dict for each row of data from flightradar24
Example : :
from pyflightdata import FlightData
f = FlightData ( )
# optional login
f . login ( myemail , mypassword )
f . get _ history _ by _ flight _ number ( ' AI101 ' )
f . get _ history _ by _ flight _ number ( ' AI101 ' , page = 1 , limit = 10)"""
|
url = FLT_BASE . format ( flight_number , str ( self . AUTH_TOKEN ) , page , limit )
return self . _fr24 . get_data ( url )
|
def do_dump ( self , arg ) :
'''Output all bytes waiting in output queue .'''
|
if not self . arm . is_connected ( ) :
print ( self . style . error ( 'Error: ' , 'Arm is not connected.' ) )
return
print ( self . arm . dump ( ) )
|
def final ( ) :
"""In case the program is cancelled or quit , we need to clean up the PulseIn
helper process and also the message queue , this is called at exit to do so"""
|
if DEBUG :
print ( "Cleaning up message queues" , queues )
print ( "Cleaning up processes" , procs )
for q in queues :
q . remove ( )
for proc in procs :
proc . terminate ( )
|
def get_context ( self , arr , expr , context ) :
"""Returns a context dictionary for use in evaluating the expression .
: param arr : The input array .
: param expr : The input expression .
: param context : Evaluation context ."""
|
expression_names = [ x for x in self . get_expression_names ( expr ) if x not in set ( context . keys ( ) ) . union ( [ 'i' ] ) ]
if len ( expression_names ) != 1 :
raise ValueError ( 'The expression must have exactly one variable.' )
return { expression_names [ 0 ] : arr }
|
def _get_conversion_outfile ( self , convert_to = None ) :
'''a helper function to return a conversion temporary output file
based on kind of conversion
Parameters
convert _ to : a string either docker or singularity , if a different'''
|
conversion = self . _get_conversion_type ( convert_to )
prefix = "Singularity"
if conversion == "docker" :
prefix = "Dockerfile"
suffix = next ( tempfile . _get_candidate_names ( ) )
return "%s.%s" % ( prefix , suffix )
|
def get_skyline ( lrh ) :
"""Wortst Time Complexity : O ( NlogN )
: type buildings : List [ List [ int ] ]
: rtype : List [ List [ int ] ]"""
|
skyline , live = [ ] , [ ]
i , n = 0 , len ( lrh )
while i < n or live :
if not live or i < n and lrh [ i ] [ 0 ] <= - live [ 0 ] [ 1 ] :
x = lrh [ i ] [ 0 ]
while i < n and lrh [ i ] [ 0 ] == x :
heapq . heappush ( live , ( - lrh [ i ] [ 2 ] , - lrh [ i ] [ 1 ] ) )
i += 1
else :
x = - live [ 0 ] [ 1 ]
while live and - live [ 0 ] [ 1 ] <= x :
heapq . heappop ( live )
height = len ( live ) and - live [ 0 ] [ 0 ]
if not skyline or height != skyline [ - 1 ] [ 1 ] :
skyline += [ x , height ] ,
return skyline
|
def submit_all ( self , poll = True , errors = True , process_files = True , halt_on_error = True ) :
"""Submit Batch request to ThreatConnect API .
By default this method will submit the job request and data and if the size of the data
is below the value * * synchronousBatchSaveLimit * * set in System Setting it will process
the request synchronously and return the batch status . If the size of the batch is greater
than the value set the batch job will be queued .
Errors are not retrieve automatically and need to be enabled .
If any of the submit , poll , or error methods fail the entire submit will halt at the point
of failure . The behavior can be changed by setting halt _ on _ error to False .
Each of these methods can also be called on their own for greater control of the submit
process .
Args :
poll ( bool , default : True ) : Poll for status .
errors ( bool , default : True ) : Retrieve any batch errors ( only if poll is True ) .
process _ files ( bool , default : True ) : Send any document or report attachments to the API .
halt _ on _ error ( bool , default : True ) : If True any exception will raise an error .
Returns .
dict : The Batch Status from the ThreatConnect API ."""
|
batch_data_array = [ ]
while True :
batch_data = { }
batch_id = None
if self . action . lower ( ) == 'delete' : # while waiting of FR for delete support in createAndUpload submit delete request
# the old way ( submit job + submit data ) , still using V2.
if len ( self ) > 0 : # pylint : disable = C1801
batch_id = self . submit_job ( halt_on_error )
if batch_id is not None :
batch_data = self . submit_data ( batch_id , halt_on_error )
else :
batch_data = { }
else :
batch_data = ( self . submit_create_and_upload ( halt_on_error ) . get ( 'data' , { } ) . get ( 'batchStatus' , { } ) )
batch_id = batch_data . get ( 'id' )
if not batch_data :
break
elif batch_id is not None :
self . tcex . log . info ( 'Batch ID: {}' . format ( batch_id ) )
# job hit queue
if poll : # poll for status
batch_data = ( self . poll ( batch_id , halt_on_error = halt_on_error ) . get ( 'data' , { } ) . get ( 'batchStatus' ) )
if errors : # retrieve errors
error_count = batch_data . get ( 'errorCount' , 0 )
error_groups = batch_data . get ( 'errorGroupCount' , 0 )
error_indicators = batch_data . get ( 'errorIndicatorCount' , 0 )
if error_count > 0 or error_groups > 0 or error_indicators > 0 :
self . tcex . log . debug ( 'retrieving batch errors' )
batch_data [ 'errors' ] = self . errors ( batch_id )
else : # can ' t process files if status is unknown ( polling must be enabled )
process_files = False
if process_files : # submit file data after batch job is complete
batch_data [ 'uploadStatus' ] = self . submit_files ( halt_on_error )
batch_data_array . append ( batch_data )
return batch_data_array
|
def page ( self , status = values . unset , date_created_after = values . unset , date_created_before = values . unset , room_sid = values . unset , page_token = values . unset , page_number = values . unset , page_size = values . unset ) :
"""Retrieve a single page of CompositionInstance records from the API .
Request is executed immediately
: param CompositionInstance . Status status : Only show Compositions with the given status .
: param datetime date _ created _ after : Only show Compositions created on or after this ISO8601 date - time with timezone .
: param datetime date _ created _ before : Only show Compositions created before this ISO8601 date - time with timezone .
: param unicode room _ sid : Only show Compositions with the given Room SID .
: param str page _ token : PageToken provided by the API
: param int page _ number : Page Number , this value is simply for client state
: param int page _ size : Number of records to return , defaults to 50
: returns : Page of CompositionInstance
: rtype : twilio . rest . video . v1 . composition . CompositionPage"""
|
params = values . of ( { 'Status' : status , 'DateCreatedAfter' : serialize . iso8601_datetime ( date_created_after ) , 'DateCreatedBefore' : serialize . iso8601_datetime ( date_created_before ) , 'RoomSid' : room_sid , 'PageToken' : page_token , 'Page' : page_number , 'PageSize' : page_size , } )
response = self . _version . page ( 'GET' , self . _uri , params = params , )
return CompositionPage ( self . _version , response , self . _solution )
|
def set ( self , time , value , compact = False ) :
"""Set the value for the time series . If compact is True , only set the
value if it ' s different from what it would be anyway ."""
|
if ( len ( self ) == 0 ) or ( not compact ) or ( compact and self . get ( time ) != value ) :
self . _d [ time ] = value
|
async def open_session ( self , request : BaseRequestWebsocket ) -> Session :
"""Open and return a Session using the request ."""
|
return await ensure_coroutine ( self . session_interface . open_session ) ( self , request )
|
def tmpfile ( prefix , direc ) :
"""Returns the path to a newly created temporary file ."""
|
return tempfile . mktemp ( prefix = prefix , suffix = '.pdb' , dir = direc )
|
def query_pa_no_flush ( session , permission , role , obj ) :
"""Query for a : class : ` PermissionAssignment ` using ` session ` without any
` flush ( ) ` .
It works by looking in session ` new ` , ` dirty ` and ` deleted ` , and issuing a
query with no autoflush .
. . note : :
This function is used by ` add _ permission ` and ` delete _ permission ` to allow
to add / remove the same assignment twice without issuing any flush . Since
: class : ` Entity ` creates its initial permissions in during
: sqlalchemy : ` sqlalchemy . orm . events . SessionEvents . after _ attach ` , it might be
problematic to issue a flush when entity is not yet ready to be flushed
( missing required attributes for example ) ."""
|
to_visit = [ session . deleted , session . dirty , session . new ]
with session . no_autoflush : # no _ autoflush is required to visit PERMISSIONS _ ATTR without emitting a
# flush ( )
if obj :
to_visit . append ( getattr ( obj , PERMISSIONS_ATTR ) )
permissions = ( p for p in chain ( * to_visit ) if isinstance ( p , PermissionAssignment ) )
for instance in permissions :
if ( instance . permission == permission and instance . role == role and instance . object == obj ) :
return instance
# Last chance : perform a filtered query . If obj is not None , sometimes
# getattr ( obj , PERMISSIONS _ ATTR ) has objects not present in session
# not in this query ( maybe in a parent session transaction ` new ` ? ) .
if obj is not None and obj . id is None :
obj = None
return ( session . query ( PermissionAssignment ) . filter ( PermissionAssignment . permission == permission , PermissionAssignment . role == role , PermissionAssignment . object == obj , ) . first ( ) )
|
async def toggle ( self ) :
"""Toggles between pause and resume command"""
|
self . logger . debug ( "toggle command" )
if not self . state == 'ready' :
return
if self . streamer is None :
return
try :
if self . streamer . is_playing ( ) :
await self . pause ( )
else :
await self . resume ( )
except Exception as e :
logger . error ( e )
pass
|
def get_row_generator ( self , ref , cache = None ) :
"""Return a row generator for a reference"""
|
from inspect import isgenerator
from rowgenerators import get_generator
g = get_generator ( ref )
if not g :
raise GenerateError ( "Cant figure out how to generate rows from {} ref: {}" . format ( type ( ref ) , ref ) )
else :
return g
|
def urlsplit ( url ) :
"""Split an arbitrary url into protocol , host , rest
The standard urlsplit does not want to provide ' netloc ' for arbitrary
protocols , this works around that .
: param url : The url to split into component parts"""
|
proto , rest = url . split ( ':' , 1 )
host = ''
if rest [ : 2 ] == '//' :
host , rest = rest [ 2 : ] . split ( '/' , 1 )
rest = '/' + rest
return proto , host , rest
|
def _compute_inflation ( value , reference_value ) :
"""Helper function to compute the inflation / deflation based on a value and
a reference value"""
|
res = value / float ( reference_value )
return InflationResult ( factor = res , value = res - 1 )
|
def dropout_mask ( x : Tensor , sz : Collection [ int ] , p : float ) :
"Return a dropout mask of the same type as ` x ` , size ` sz ` , with probability ` p ` to cancel an element ."
|
return x . new ( * sz ) . bernoulli_ ( 1 - p ) . div_ ( 1 - p )
|
def _discover_meta_cols ( self , ** kwargs ) :
"""Return the subset of ` kwargs ` values ( not keys ! ) matching
a ` meta ` column name"""
|
cols = set ( [ 'exclude' ] )
for arg , value in kwargs . items ( ) :
if isstr ( value ) and value in self . meta . columns :
cols . add ( value )
return list ( cols )
|
def prepare_function_symbol ( self , symbol_name , basic_addr = None ) :
"""Prepare the address space with the data necessary to perform relocations pointing to the given symbol
Returns a 2 - tuple . The first item is the address of the function code , the second is the address of the
relocation target ."""
|
if basic_addr is None :
basic_addr = self . project . loader . extern_object . get_pseudo_addr ( symbol_name )
return basic_addr , basic_addr
|
def normalize_keys ( self , value ) :
"""Normalize the keys of a dictionary using : func : ` normalize _ name ( ) ` .
: param value : The dictionary to normalize .
: returns : A dictionary with normalized keys ."""
|
return dict ( ( self . normalize_name ( k ) , v ) for k , v in value . items ( ) )
|
def list_all_promotions ( cls , ** kwargs ) :
"""List Promotions
Return a list of Promotions
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ promotions ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ Promotion ]
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_promotions_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_promotions_with_http_info ( ** kwargs )
return data
|
def tx_tn_days_above ( tasmin , tasmax , thresh_tasmin = '22 degC' , thresh_tasmax = '30 degC' , freq = 'YS' ) :
r"""Number of days with both hot maximum and minimum daily temperatures .
The number of days per period with tasmin above a threshold and tasmax above another threshold .
Parameters
tasmin : xarray . DataArray
Minimum daily temperature [ ° C ] or [ K ]
tasmax : xarray . DataArray
Maximum daily temperature [ ° C ] or [ K ]
thresh _ tasmin : str
Threshold temperature for tasmin on which to base evaluation [ ° C ] or [ K ] . Default : ' 22 degC '
thresh _ tasmax : str
Threshold temperature for tasmax on which to base evaluation [ ° C ] or [ K ] . Default : ' 30 degC '
freq : str , optional
Resampling frequency
Returns
xarray . DataArray
the number of days with tasmin > thresh _ tasmin and
tasmax > thresh _ tasamax per period
Notes
Let : math : ` TX _ { ij } ` be the maximum temperature at day : math : ` i ` of period : math : ` j ` , : math : ` TN _ { ij } `
the daily minimum temperature at day : math : ` i ` of period : math : ` j ` , : math : ` TX _ { thresh } ` the threshold for maximum
daily temperature , and : math : ` TN _ { thresh } ` the threshold for minimum daily temperature . Then counted is the number
of days where :
. . math : :
TX _ { ij } > TX _ { thresh } [ ° C ]
and where :
. . math : :
TN _ { ij } > TN _ { thresh } [ ° C ]"""
|
thresh_tasmax = utils . convert_units_to ( thresh_tasmax , tasmax )
thresh_tasmin = utils . convert_units_to ( thresh_tasmin , tasmin )
events = ( ( tasmin > ( thresh_tasmin ) ) & ( tasmax > ( thresh_tasmax ) ) ) * 1
return events . resample ( time = freq ) . sum ( dim = 'time' )
|
def GetCollectionNode ( self , partition_key ) :
"""Gets the SelfLink / ID based link of the collection node that maps to the partition key
based on the hashing algorithm used for finding the node in the ring .
: param str partition _ key :
The partition key to be used for finding the node in the ring .
: return :
The name of the collection mapped to that partition .
: rtype : str"""
|
if partition_key is None :
raise ValueError ( "partition_key is None or empty." )
partition_number = self . _FindPartition ( self . _GetBytes ( partition_key ) )
return self . partitions [ partition_number ] . GetNode ( )
|
def mk_dict ( results , description ) :
"""Given a result list and descrition sequence , return a list
of dictionaries"""
|
rows = [ ]
for row in results :
row_dict = { }
for idx in range ( len ( row ) ) :
col = description [ idx ] [ 0 ]
row_dict [ col ] = row [ idx ]
rows . append ( row_dict )
return rows
|
def get_shard_id2num_examples ( num_shards , total_num_examples ) :
"""Return the mapping shard _ id = > num _ examples , assuming round - robin ."""
|
# TODO ( b / 130353071 ) : This has the strong assumption that the shards have
# been written in a round - robin fashion . This assumption does not hold , for
# instance , with Beam generation . The mapping shard _ id = > num _ examples
# should be computed during generation .
# Minimum number of example per shards
num_example_in_shard = total_num_examples // num_shards
shard_id2num_examples = [ num_example_in_shard for _ in range ( num_shards ) ]
# If there are remaining examples , we add them to the first shards
for shard_id in range ( total_num_examples % num_shards ) :
shard_id2num_examples [ shard_id ] += 1
return shard_id2num_examples
|
def segment_length ( curve , start , end , start_point , end_point , error = LENGTH_ERROR , min_depth = LENGTH_MIN_DEPTH , depth = 0 ) :
"""Recursively approximates the length by straight lines"""
|
mid = ( start + end ) / 2
mid_point = curve . point ( mid )
length = abs ( end_point - start_point )
first_half = abs ( mid_point - start_point )
second_half = abs ( end_point - mid_point )
length2 = first_half + second_half
if ( length2 - length > error ) or ( depth < min_depth ) : # Calculate the length of each segment :
depth += 1
return ( segment_length ( curve , start , mid , start_point , mid_point , error , min_depth , depth ) + segment_length ( curve , mid , end , mid_point , end_point , error , min_depth , depth ) )
# This is accurate enough .
return length2
|
def filter_input ( keys , raw ) :
"""Adds fancy mouse wheel functionality and VI navigation to ListBox"""
|
if len ( keys ) == 1 :
if keys [ 0 ] in UI . keys [ 'up' ] :
keys [ 0 ] = 'up'
elif keys [ 0 ] in UI . keys [ 'down' ] :
keys [ 0 ] = 'down'
elif len ( keys [ 0 ] ) == 4 and keys [ 0 ] [ 0 ] == 'mouse press' :
if keys [ 0 ] [ 1 ] == 4 :
keys [ 0 ] = 'up'
elif keys [ 0 ] [ 1 ] == 5 :
keys [ 0 ] = 'down'
return keys
|
def histogram_summary ( tag , values , bins ) :
"""Outputs a ` Summary ` protocol buffer with a histogram .
Adding a histogram summary makes it possible to visualize the data ' s distribution in
TensorBoard . See detailed explanation of the TensorBoard histogram dashboard at
https : / / www . tensorflow . org / get _ started / tensorboard _ histograms
This op reports an ` InvalidArgument ` error if any value is not finite .
Adapted from the TensorFlow function ` histogram ( ) ` at
https : / / github . com / tensorflow / tensorflow / blob / master / tensorflow / python / summary / summary . py
Parameters
tag : str
A name for the summary of the histogram . Will also serve as a series name in
TensorBoard .
values : MXNet ` NDArray ` or ` numpy . ndarray `
Values for building the histogram .
Returns
A ` Summary ` protobuf of the histogram ."""
|
tag = _clean_tag ( tag )
values = _make_numpy_array ( values )
hist = _make_histogram ( values . astype ( float ) , bins )
return Summary ( value = [ Summary . Value ( tag = tag , histo = hist ) ] )
|
def add_federation ( self , provider , federated_id ) :
"""Add federated login to the current user
: param provider :
: param federated _ id :
: return :"""
|
models . AuthUserFederation . new ( user = self , provider = provider , federated_id = federated_id )
|
def _read_dataframes_100k ( path ) :
"""reads in the movielens 100k dataset"""
|
import pandas
ratings = pandas . read_table ( os . path . join ( path , "u.data" ) , names = [ 'userId' , 'movieId' , 'rating' , 'timestamp' ] )
movies = pandas . read_csv ( os . path . join ( path , "u.item" ) , names = [ 'movieId' , 'title' ] , usecols = [ 0 , 1 ] , delimiter = '|' , encoding = 'ISO-8859-1' )
return ratings , movies
|
def load_model ( self , tid , custom_objects = None ) :
"""Load saved keras model of the trial .
If tid = None , get the best model
Not applicable for trials ran in cross validion ( i . e . not applicable
for ` CompileFN . cv _ n _ folds is None `"""
|
if tid is None :
tid = self . best_trial_tid ( )
model_path = self . get_trial ( tid ) [ "result" ] [ "path" ] [ "model" ]
return load_model ( model_path , custom_objects = custom_objects )
|
def subroutine ( self , iterator , asyncStart = True , name = None , daemon = False ) :
"""Start extra routines in this container .
: param iterator : A coroutine object i . e the return value of an async method ` my _ routine ( ) `
: param asyncStart : if False , start the routine in foreground . By default , the routine
starts in background , which means it is not executed until the current caller
reaches the next ` yield ` statement or quit .
: param name : if not None , ` container . < name > ` is set to the routine object . This is useful when
you want to terminate the routine from outside .
: param daemon : if True , this routine is set to be a daemon routine .
A daemon routine does not stop the scheduler from quitting ; if all non - daemon routines
are quit , the scheduler stops ."""
|
r = Routine ( iterator , self . scheduler , asyncStart , self , True , daemon )
if name is not None :
setattr ( self , name , r )
# Call subroutine may change the currentroutine , we should restore it
currentroutine = getattr ( self , 'currentroutine' , None )
try :
next ( r )
except StopIteration :
pass
self . currentroutine = currentroutine
return r
|
def tune ( self , divergence_threshold = 1e10 , verbose = 0 ) :
"""Tunes the scaling parameter for the proposal distribution
according to the acceptance rate of the last k proposals :
Rate Variance adaptation
< 0.001 x 0.1
< 0.05 x 0.5
< 0.2 x 0.9
> 0.5 x 1.1
> 0.75 x 2
> 0.95 x 10
This method is called exclusively during the burn - in period of the
sampling algorithm .
May be overridden in subclasses ."""
|
if self . verbose > - 1 :
verbose = self . verbose
# Verbose feedback
if verbose > 0 :
print_ ( '\t%s tuning:' % self . _id )
# Flag for tuning state
tuning = True
# Calculate recent acceptance rate
if not ( self . accepted + self . rejected ) :
return tuning
acc_rate = self . accepted / ( self . accepted + self . rejected )
current_factor = self . adaptive_scale_factor
# Switch statement
if acc_rate < 0.001 : # reduce by 90 percent
self . adaptive_scale_factor *= 0.1
elif acc_rate < 0.05 : # reduce by 50 percent
self . adaptive_scale_factor *= 0.5
elif acc_rate < 0.2 : # reduce by ten percent
self . adaptive_scale_factor *= 0.9
elif acc_rate > 0.95 : # increase by factor of ten
self . adaptive_scale_factor *= 10.0
elif acc_rate > 0.75 : # increase by double
self . adaptive_scale_factor *= 2.0
elif acc_rate > 0.5 : # increase by ten percent
self . adaptive_scale_factor *= 1.1
else :
tuning = False
# Re - initialize rejection count
self . rejected = 0.
self . accepted = 0.
# Prevent from tuning to zero
if not self . adaptive_scale_factor :
self . adaptive_scale_factor = current_factor
return False
# More verbose feedback , if requested
if verbose > 0 :
if hasattr ( self , 'stochastic' ) :
print_ ( '\t\tvalue:' , self . stochastic . value )
print_ ( '\t\tacceptance rate:' , acc_rate )
print_ ( '\t\tadaptive scale factor:' , self . adaptive_scale_factor )
print_ ( )
return tuning
|
def fits_region_objects_to_table ( regions ) :
"""Converts list of regions to FITS region table .
Parameters
regions : list
List of ` regions . Region ` objects
Returns
region _ string : ` ~ astropy . table . Table `
FITS region table
Examples
> > > from regions import CirclePixelRegion , PixCoord
> > > reg _ pixel = CirclePixelRegion ( PixCoord ( 1 , 2 ) , 5)
> > > table = fits _ region _ objects _ to _ table ( [ reg _ pixel ] )
> > > print ( table )
X [ 1 ] Y [ 1 ] SHAPE R [ 4 ] ROTANG COMPONENT
pix pix pix deg
1.0 2.0 circle 5.0 . . 0.0 0 1"""
|
for reg in regions :
if isinstance ( reg , SkyRegion ) :
raise TypeError ( 'Every region must be a pixel region' . format ( reg ) )
shape_list = to_shape_list ( regions , coordinate_system = 'image' )
return shape_list . to_fits ( )
|
def _onPaint ( self , evt ) :
"""Called when wxPaintEvt is generated"""
|
DEBUG_MSG ( "_onPaint()" , 1 , self )
drawDC = wx . PaintDC ( self )
if not self . _isDrawn :
self . draw ( drawDC = drawDC )
else :
self . gui_repaint ( drawDC = drawDC )
evt . Skip ( )
|
def get_representations_of_kind ( kind , start = None , end = None ) :
"""Return all representations of properties of kind in the specified range .
NOTE : This function does not return unindexed properties .
Args :
kind : name of kind whose properties you want .
start : only return properties > = start if start is not None .
end : only return properties < end if end is not None .
Returns :
A dictionary mapping property names to its list of representations ."""
|
q = Property . query ( ancestor = Property . key_for_kind ( kind ) )
if start is not None and start != '' :
q = q . filter ( Property . key >= Property . key_for_property ( kind , start ) )
if end is not None :
if end == '' :
return { }
q = q . filter ( Property . key < Property . key_for_property ( kind , end ) )
result = { }
for property in q :
result [ property . property_name ] = property . property_representation
return result
|
def plot_predict ( self , h = 5 , past_values = 20 , intervals = True , oos_data = None , ** kwargs ) :
"""Makes forecast with the estimated model
Parameters
h : int ( default : 5)
How many steps ahead would you like to forecast ?
past _ values : int ( default : 20)
How many past observations to show on the forecast graph ?
intervals : Boolean
Would you like to show 95 % prediction intervals for the forecast ?
oos _ data : pd . DataFrame
Data for the variables to be used out of sample ( ys can be NaNs )
Returns
- Plot of the forecast"""
|
import matplotlib . pyplot as plt
import seaborn as sns
figsize = kwargs . get ( 'figsize' , ( 10 , 7 ) )
nsims = kwargs . get ( 'nsims' , 200 )
if self . latent_variables . estimated is False :
raise Exception ( "No latent variables estimated!" )
else :
_ , X_oos = dmatrices ( self . formula , oos_data )
X_oos = np . array ( [ X_oos ] ) [ 0 ]
full_X = self . X . copy ( )
full_X = np . append ( full_X , X_oos , axis = 0 )
Z = full_X
date_index = self . shift_dates ( h )
# Retrieve data , dates and ( transformed ) latent variables
if self . latent_variables . estimation_method in [ 'M-H' ] :
lower_final = 0
upper_final = 0
plot_values_final = 0
plot_index = date_index [ - h - past_values : ]
for i in range ( nsims ) :
t_params = self . draw_latent_variables ( nsims = 1 ) . T [ 0 ]
a , P = self . _forecast_model ( t_params , Z , h )
smoothed_series = np . zeros ( self . y . shape [ 0 ] + h )
series_variance = np . zeros ( self . y . shape [ 0 ] + h )
for t in range ( self . y . shape [ 0 ] + h ) :
smoothed_series [ t ] = np . dot ( Z [ t ] , a [ : , t ] )
series_variance [ t ] = np . dot ( np . dot ( Z [ t ] , P [ : , : , t ] ) , Z [ t ] . T )
plot_values = smoothed_series [ - h - past_values : ]
lower = smoothed_series [ - h : ] - 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( t_params [ 0 ] ) , 0.5 )
upper = smoothed_series [ - h : ] + 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( t_params [ 0 ] ) , 0.5 )
lower_final += np . append ( plot_values [ - h - 1 ] , lower )
upper_final += np . append ( plot_values [ - h - 1 ] , upper )
plot_values_final += plot_values
plot_values_final = plot_values_final / nsims
lower_final = lower_final / nsims
upper_final = upper_final / nsims
plt . figure ( figsize = figsize )
if intervals == True :
plt . fill_between ( date_index [ - h - 1 : ] , lower_final , upper_final , alpha = 0.2 )
plt . plot ( plot_index , plot_values_final )
plt . title ( "Forecast for " + self . data_name )
plt . xlabel ( "Time" )
plt . ylabel ( self . data_name )
plt . show ( )
else :
a , P = self . _forecast_model ( self . latent_variables . get_z_values ( ) , h )
plot_values = a [ 0 ] [ - h - past_values : ]
forecasted_values = a [ 0 ] [ - h : ]
smoothed_series = np . zeros ( self . y . shape [ 0 ] + h )
series_variance = np . zeros ( self . y . shape [ 0 ] + h )
for t in range ( self . y . shape [ 0 ] + h ) :
smoothed_series [ t ] = np . dot ( Z [ t ] , a [ : , t ] )
series_variance [ t ] = np . dot ( np . dot ( Z [ t ] , P [ : , : , t ] ) , Z [ t ] . T )
lower = forecasted_values - 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( self . latent_variables . get_z_values ( ) [ 0 ] ) , 0.5 )
upper = forecasted_values + 1.96 * np . power ( P [ 0 ] [ 0 ] [ - h : ] + self . latent_variables . z_list [ 0 ] . prior . transform ( self . latent_variables . get_z_values ( ) [ 0 ] ) , 0.5 )
lower = np . append ( plot_values [ - h - 1 ] , lower )
upper = np . append ( plot_values [ - h - 1 ] , upper )
plot_index = date_index [ - h - past_values : ]
plt . figure ( figsize = figsize )
if intervals == True :
plt . fill_between ( date_index [ - h - 1 : ] , lower , upper , alpha = 0.2 )
plt . plot ( plot_index , plot_values )
plt . title ( "Forecast for " + self . data_name )
plt . xlabel ( "Time" )
plt . ylabel ( self . data_name )
plt . show ( )
|
def reloadFileAtIndex ( self , itemIndex , rtiClass = None ) :
"""Reloads the item at the index by removing the repo tree item and inserting a new one .
The new item will have by of type rtiClass . If rtiClass is None ( the default ) , the
new rtiClass will be the same as the old one ."""
|
fileRtiParentIndex = itemIndex . parent ( )
fileRti = self . getItem ( itemIndex )
position = fileRti . childNumber ( )
fileName = fileRti . fileName
if rtiClass is None :
rtiClass = type ( fileRti )
# Delete old RTI and Insert a new one instead .
self . deleteItemAtIndex ( itemIndex )
# this will close the items resources .
return self . loadFile ( fileName , rtiClass , position = position , parentIndex = fileRtiParentIndex )
|
def send_result ( self , additional_dict ) :
'''Send a result to the RPC client
: param additional _ dict : the dictionary with the response'''
|
self . send_response ( 200 )
self . send_header ( "Content-type" , "application/json" )
response = { 'jsonrpc' : self . req_rpc_version , 'id' : self . req_id , }
response . update ( additional_dict )
jresponse = json . dumps ( response )
self . send_header ( "Content-length" , len ( jresponse ) )
self . end_headers ( )
self . wfile . write ( jresponse . encode ( ) )
|
def replace_cancel_operation_by_id ( cls , cancel_operation_id , cancel_operation , ** kwargs ) :
"""Replace CancelOperation
Replace all attributes of CancelOperation
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ cancel _ operation _ by _ id ( cancel _ operation _ id , cancel _ operation , async = True )
> > > result = thread . get ( )
: param async bool
: param str cancel _ operation _ id : ID of cancelOperation to replace ( required )
: param CancelOperation cancel _ operation : Attributes of cancelOperation to replace ( required )
: return : CancelOperation
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_cancel_operation_by_id_with_http_info ( cancel_operation_id , cancel_operation , ** kwargs )
else :
( data ) = cls . _replace_cancel_operation_by_id_with_http_info ( cancel_operation_id , cancel_operation , ** kwargs )
return data
|
def select_option_by_text ( self , dropdown_selector , option , dropdown_by = By . CSS_SELECTOR , timeout = settings . SMALL_TIMEOUT ) :
"""Selects an HTML < select > option by option text .
@ Params
dropdown _ selector - the < select > selector
option - the text of the option"""
|
if self . timeout_multiplier and timeout == settings . SMALL_TIMEOUT :
timeout = self . __get_new_timeout ( timeout )
self . __select_option ( dropdown_selector , option , dropdown_by = dropdown_by , option_by = "text" , timeout = timeout )
|
def get ( self , sid ) :
"""Constructs a FunctionContext
: param sid : The sid
: returns : twilio . rest . serverless . v1 . service . function . FunctionContext
: rtype : twilio . rest . serverless . v1 . service . function . FunctionContext"""
|
return FunctionContext ( self . _version , service_sid = self . _solution [ 'service_sid' ] , sid = sid , )
|
def concordance_index_ipcw ( survival_train , survival_test , estimate , tau = None , tied_tol = 1e-8 ) :
"""Concordance index for right - censored data based on inverse probability of censoring weights .
This is an alternative to the estimator in : func : ` concordance _ index _ censored `
that does not depend on the distribution of censoring times in the test data .
Therefore , the estimate is unbiased and consistent for a population concordance
measure that is free of censoring .
It is based on inverse probability of censoring weights , thus requires
access to survival times from the training data to estimate the censoring
distribution . Note that this requires that survival times ` survival _ test `
lie within the range of survival times ` survival _ train ` . This can be
achieved by specifying the truncation time ` tau ` .
The resulting ` cindex ` tells how well the given prediction model works in
predicting events that occur in the time range from 0 to ` tau ` .
The estimator uses the Kaplan - Meier estimator to estimate the
censoring survivor function . Therefore , it is restricted to
situations where the random censoring assumption holds and
censoring is independent of the features .
Parameters
survival _ train : structured array , shape = ( n _ train _ samples , )
Survival times for training data to estimate the censoring
distribution from .
A structured array containing the binary event indicator
as first field , and time of event or time of censoring as
second field .
survival _ test : structured array , shape = ( n _ samples , )
Survival times of test data .
A structured array containing the binary event indicator
as first field , and time of event or time of censoring as
second field .
estimate : array - like , shape = ( n _ samples , )
Estimated risk of experiencing an event of test data .
tau : float , optional
Truncation time . The survival function for the underlying
censoring time distribution : math : ` D ` needs to be positive
at ` tau ` , i . e . , ` tau ` should be chosen such that the
probability of being censored after time ` tau ` is non - zero :
: math : ` P ( D > \\ tau ) > 0 ` . If ` None ` , no truncation is performed .
tied _ tol : float , optional , default : 1e - 8
The tolerance value for considering ties .
If the absolute difference between risk scores is smaller
or equal than ` tied _ tol ` , risk scores are considered tied .
Returns
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied _ risk : int
Number of pairs having tied estimated risks
tied _ time : int
Number of comparable pairs sharing the same time
References
. . [ 1 ] Uno , H . , Cai , T . , Pencina , M . J . , D ’ Agostino , R . B . , & Wei , L . J . ( 2011 ) .
" On the C - statistics for evaluating overall adequacy of risk prediction
procedures with censored survival data " .
Statistics in Medicine , 30(10 ) , 1105–1117."""
|
test_event , test_time = check_y_survival ( survival_test )
if tau is not None :
survival_test = survival_test [ test_time < tau ]
estimate = check_array ( estimate , ensure_2d = False )
check_consistent_length ( test_event , test_time , estimate )
cens = CensoringDistributionEstimator ( )
cens . fit ( survival_train )
ipcw = cens . predict_ipcw ( survival_test )
w = numpy . square ( ipcw )
return _estimate_concordance_index ( test_event , test_time , estimate , w , tied_tol )
|
def validate_query_params ( self , request ) :
"""Validate that query params are in the list of valid query keywords in
: py : attr : ` query _ regex `
: raises ValidationError : if not ."""
|
# TODO : For jsonapi error object conformance , must set jsonapi errors " parameter " for
# the ValidationError . This requires extending DRF / DJA Exceptions .
for qp in request . query_params . keys ( ) :
if not self . query_regex . match ( qp ) :
raise ValidationError ( 'invalid query parameter: {}' . format ( qp ) )
if len ( request . query_params . getlist ( qp ) ) > 1 :
raise ValidationError ( 'repeated query parameter not allowed: {}' . format ( qp ) )
|
def set_integer_value ( self , value = None ) :
"""stub"""
|
if value is None :
raise NullArgument ( )
if self . get_integer_value_metadata ( ) . is_read_only ( ) :
raise NoAccess ( )
if not self . my_osid_object_form . _is_valid_integer ( value , self . get_integer_value_metadata ( ) ) :
raise InvalidArgument ( )
self . my_osid_object_form . _my_map [ 'integerValue' ] = int ( value )
|
def add ( self , key , val ) :
"""Adds a ( name , value ) pair , doesn ' t overwrite the value if it already
exists .
> > > headers = HTTPHeaderDict ( foo = ' bar ' )
> > > headers . add ( ' Foo ' , ' baz ' )
> > > headers [ ' foo ' ]
' bar , baz '"""
|
key_lower = key . lower ( )
new_vals = key , val
# Keep the common case aka no item present as fast as possible
vals = _dict_setdefault ( self , key_lower , new_vals )
if new_vals is not vals : # new _ vals was not inserted , as there was a previous one
if isinstance ( vals , list ) : # If already several items got inserted , we have a list
vals . append ( val )
else : # vals should be a tuple then , i . e . only one item so far
# Need to convert the tuple to list for further extension
_dict_setitem ( self , key_lower , [ vals [ 0 ] , vals [ 1 ] , val ] )
|
def connect_cloudwatch ( aws_access_key_id = None , aws_secret_access_key = None , ** kwargs ) :
""": type aws _ access _ key _ id : string
: param aws _ access _ key _ id : Your AWS Access Key ID
: type aws _ secret _ access _ key : string
: param aws _ secret _ access _ key : Your AWS Secret Access Key
: rtype : : class : ` boto . ec2 . cloudwatch . CloudWatchConnection `
: return : A connection to Amazon ' s EC2 Monitoring service"""
|
from boto . ec2 . cloudwatch import CloudWatchConnection
return CloudWatchConnection ( aws_access_key_id , aws_secret_access_key , ** kwargs )
|
async def edit_message ( self , entity , message = None , text = None , * , parse_mode = ( ) , link_preview = True , file = None , buttons = None ) :
"""Edits the given message ID ( to change its contents or disable preview ) .
Args :
entity ( ` entity ` | ` Message < telethon . tl . custom . message . Message > ` ) :
From which chat to edit the message . This can also be
the message to be edited , and the entity will be inferred
from it , so the next parameter will be assumed to be the
message text .
You may also pass a : tl : ` InputBotInlineMessageID ` ,
which is the only way to edit messages that were sent
after the user selects an inline query result .
message ( ` int ` | ` Message < telethon . tl . custom . message . Message > ` | ` str ` ) :
The ID of the message ( or ` Message
< telethon . tl . custom . message . Message > ` itself ) to be edited .
If the ` entity ` was a ` Message
< telethon . tl . custom . message . Message > ` , then this message
will be treated as the new text .
text ( ` str ` , optional ) :
The new text of the message . Does nothing if the ` entity `
was a ` Message < telethon . tl . custom . message . Message > ` .
parse _ mode ( ` object ` , optional ) :
See the ` TelegramClient . parse _ mode
< telethon . client . messageparse . MessageParseMethods . parse _ mode > `
property for allowed values . Markdown parsing will be used by
default .
link _ preview ( ` bool ` , optional ) :
Should the link preview be shown ?
file ( ` str ` | ` bytes ` | ` file ` | ` media ` , optional ) :
The file object that should replace the existing media
in the message .
buttons ( ` list ` , ` custom . Button < telethon . tl . custom . button . Button > ` , : tl : ` KeyboardButton ` ) :
The matrix ( list of lists ) , row list or button to be shown
after sending the message . This parameter will only work if
you have signed in as a bot . You can also pass your own
: tl : ` ReplyMarkup ` here .
Examples :
> > > client = . . .
> > > message = client . send _ message ( ' username ' , ' hello ' )
> > > client . edit _ message ( ' username ' , message , ' hello ! ' )
> > > # or
> > > client . edit _ message ( ' username ' , message . id , ' Hello ' )
> > > # or
> > > client . edit _ message ( message , ' Hello ! ' )
Raises :
` ` MessageAuthorRequiredError ` ` if you ' re not the author of the
message but tried editing it anyway .
` ` MessageNotModifiedError ` ` if the contents of the message were
not modified at all .
Returns :
The edited ` telethon . tl . custom . message . Message ` , unless
` entity ` was a : tl : ` InputBotInlineMessageID ` in which
case this method returns a boolean ."""
|
if isinstance ( entity , types . InputBotInlineMessageID ) :
text = message
message = entity
elif isinstance ( entity , types . Message ) :
text = message
# Shift the parameters to the right
message = entity
entity = entity . to_id
text , msg_entities = await self . _parse_message_text ( text , parse_mode )
file_handle , media , image = await self . _file_to_media ( file )
if isinstance ( entity , types . InputBotInlineMessageID ) :
return await self ( functions . messages . EditInlineBotMessageRequest ( id = entity , message = text , no_webpage = not link_preview , entities = msg_entities , media = media , reply_markup = self . build_reply_markup ( buttons ) ) )
entity = await self . get_input_entity ( entity )
request = functions . messages . EditMessageRequest ( peer = entity , id = utils . get_message_id ( message ) , message = text , no_webpage = not link_preview , entities = msg_entities , media = media , reply_markup = self . build_reply_markup ( buttons ) )
msg = self . _get_response_message ( request , await self ( request ) , entity )
await self . _cache_media ( msg , file , file_handle , image = image )
return msg
|
def poll ( self ) :
"""Update internal state from polling strava . com .
: raise stravalib . exc . ActivityUploadFailed : If the poll returns an error ."""
|
response = self . client . protocol . get ( '/uploads/{upload_id}' , upload_id = self . upload_id , check_for_errors = False )
self . update_from_response ( response )
|
def delete ( self ) :
"""delete the object from the db if pk is set"""
|
ret = False
q = self . query
pk = self . pk
if pk :
pk_name = self . schema . pk . name
self . query . is_field ( pk_name , pk ) . delete ( )
setattr ( self , pk_name , None )
# mark all the fields that still exist as modified
self . reset_modified ( )
for field_name in self . schema . fields :
if getattr ( self , field_name , None ) != None :
self . modified_fields . add ( field_name )
ret = True
return ret
|
def createEditor ( self , parent , column , operator , value ) :
"""Creates a new editor for the system ."""
|
editor = super ( EnumPlugin , self ) . createEditor ( parent , column , operator , value )
editor . setEnum ( column . enum ( ) )
if operator in ( 'contains' , 'does not contain' ) :
editor . setCheckable ( True )
editor . setCurrentValue ( value )
return editor
|
def _read_mol ( self ) :
"""- V3000"""
|
self . system = dict ( )
if self . file_content [ 2 ] != '\n' :
self . system [ 'remarks' ] = self . file_content [ 2 ]
file_body = [ i . split ( ) for i in self . file_content ]
elements = [ ]
coordinates = [ ]
atom_data = False
for line in file_body :
if len ( line ) > 2 :
if line [ 2 ] == 'END' and line [ 3 ] == 'ATOM' :
atom_data = False
if atom_data is True :
elements . append ( line [ 3 ] )
coordinates . append ( line [ 4 : 7 ] )
if line [ 2 ] == 'BEGIN' and line [ 3 ] == 'ATOM' :
atom_data = True
self . system [ 'elements' ] = np . array ( elements )
self . system [ 'coordinates' ] = np . array ( coordinates , dtype = float )
return self . system
|
def _geometric_intersect ( nodes1 , degree1 , nodes2 , degree2 , verify ) :
r"""Find all intersections among edges of two surfaces .
. . note : :
There is also a Fortran implementation of this function , which
will be used if it can be built .
Uses : func : ` generic _ intersect ` with the
: attr : ` ~ . IntersectionStrategy . GEOMETRIC ` intersection strategy .
Args :
nodes1 ( numpy . ndarray ) : The nodes defining the first surface in
the intersection ( assumed in : math : \ mathbf { R } ^ 2 ` ) .
degree1 ( int ) : The degree of the surface given by ` ` nodes1 ` ` .
nodes2 ( numpy . ndarray ) : The nodes defining the second surface in
the intersection ( assumed in : math : \ mathbf { R } ^ 2 ` ) .
degree2 ( int ) : The degree of the surface given by ` ` nodes2 ` ` .
verify ( Optional [ bool ] ) : Indicates if duplicate intersections
should be checked .
Returns :
Tuple [ Optional [ list ] , Optional [ bool ] , tuple ] : 3 - tuple of
* List of " edge info " lists . Each list represents a curved polygon
and contains 3 - tuples of edge index , start and end ( see the
output of : func : ` ends _ to _ curve ` ) .
* " Contained " boolean . If not : data : ` None ` , indicates
that one of the surfaces is contained in the other .
* The nodes of three edges of the first surface being intersected
followed by the nodes of the three edges of the second ."""
|
all_intersections = _geometric_intersection . all_intersections
return generic_intersect ( nodes1 , degree1 , nodes2 , degree2 , verify , all_intersections )
|
def from_dict ( d ) :
"""Recreate a KrausModel from the dictionary representation .
: param dict d : The dictionary representing the KrausModel . See ` to _ dict ` for an
example .
: return : The deserialized KrausModel .
: rtype : KrausModel"""
|
kraus_ops = [ KrausModel . unpack_kraus_matrix ( k ) for k in d [ 'kraus_ops' ] ]
return KrausModel ( d [ 'gate' ] , d [ 'params' ] , d [ 'targets' ] , kraus_ops , d [ 'fidelity' ] )
|
def clean_tenant_url ( url_string ) :
"""Removes the TENANT _ TOKEN from a particular string"""
|
if hasattr ( settings , 'PUBLIC_SCHEMA_URLCONF' ) :
if ( settings . PUBLIC_SCHEMA_URLCONF and url_string . startswith ( settings . PUBLIC_SCHEMA_URLCONF ) ) :
url_string = url_string [ len ( settings . PUBLIC_SCHEMA_URLCONF ) : ]
return url_string
|
def find ( cls , paths ) :
"""Given a list of files or directories , try to detect python interpreters amongst them .
Returns a list of PythonInterpreter objects ."""
|
pythons = [ ]
for path in paths :
for fn in cls . expand_path ( path ) :
basefile = os . path . basename ( fn )
if cls . _matches_binary_name ( basefile ) :
try :
pythons . append ( cls . from_binary ( fn ) )
except Exception as e :
TRACER . log ( 'Could not identify %s: %s' % ( fn , e ) )
continue
return pythons
|
def _auto_wait_for_status ( self , message = None , exclude_services = None , include_only = None , timeout = None ) :
"""Wait for all units to have a specific extended status , except
for any defined as excluded . Unless specified via message , any
status containing any case of ' ready ' will be considered a match .
Examples of message usage :
Wait for all unit status to CONTAIN any case of ' ready ' or ' ok ' :
message = re . compile ( ' . * ready . * | . * ok . * ' , re . IGNORECASE )
Wait for all units to reach this status ( exact match ) :
message = re . compile ( ' ^ Unit is ready and clustered $ ' )
Wait for all units to reach any one of these ( exact match ) :
message = re . compile ( ' Unit is ready | OK | Ready ' )
Wait for at least one unit to reach this status ( exact match ) :
message = { ' ready ' }
See Amulet ' s sentry . wait _ for _ messages ( ) for message usage detail .
https : / / github . com / juju / amulet / blob / master / amulet / sentry . py
: param message : Expected status match
: param exclude _ services : List of juju service names to ignore ,
not to be used in conjuction with include _ only .
: param include _ only : List of juju service names to exclusively check ,
not to be used in conjuction with exclude _ services .
: param timeout : Maximum time in seconds to wait for status match
: returns : None . Raises if timeout is hit ."""
|
if not timeout :
timeout = int ( os . environ . get ( 'AMULET_SETUP_TIMEOUT' , 1800 ) )
self . log . info ( 'Waiting for extended status on units for {}s...' '' . format ( timeout ) )
all_services = self . d . services . keys ( )
if exclude_services and include_only :
raise ValueError ( 'exclude_services can not be used ' 'with include_only' )
if message :
if isinstance ( message , re . _pattern_type ) :
match = message . pattern
else :
match = message
self . log . debug ( 'Custom extended status wait match: ' '{}' . format ( match ) )
else :
self . log . debug ( 'Default extended status wait match: contains ' 'READY (case-insensitive)' )
message = re . compile ( '.*ready.*' , re . IGNORECASE )
if exclude_services :
self . log . debug ( 'Excluding services from extended status match: ' '{}' . format ( exclude_services ) )
else :
exclude_services = [ ]
if include_only :
services = include_only
else :
services = list ( set ( all_services ) - set ( exclude_services ) )
self . log . debug ( 'Waiting up to {}s for extended status on services: ' '{}' . format ( timeout , services ) )
service_messages = { service : message for service in services }
# Check for idleness
self . d . sentry . wait ( timeout = timeout )
# Check for error states and bail early
self . d . sentry . wait_for_status ( self . d . juju_env , services , timeout = timeout )
# Check for ready messages
self . d . sentry . wait_for_messages ( service_messages , timeout = timeout )
self . log . info ( 'OK' )
|
def abup_se_plot ( mod , species ) :
"""plot species from one ABUPP file and the se file .
You must use this function in the directory where the ABP files
are and an ABUP file for model mod must exist .
Parameters
mod : integer
Model to plot , you need to have an ABUPP file for that
model .
species : string
The species to plot .
Notes
The species is set to ' C - 12 ' ."""
|
# Marco , you have already implemented finding headers and columns in
# ABUP files . You may want to transplant that into here ?
species = 'C-12'
filename = 'ABUPP%07d0000.DAT' % mod
print ( filename )
mass , c12 = np . loadtxt ( filename , skiprows = 4 , usecols = [ 1 , 18 ] , unpack = True )
c12_se = self . se . get ( mod , 'iso_massf' , 'C-12' )
mass_se = self . se . get ( mod , 'mass' )
pyl . plot ( mass , c12 )
pyl . plot ( mass_se , c12_se , 'o' , label = 'cycle ' + str ( mod ) )
pyl . legend ( )
|
def threshold_brier_score ( observations , forecasts , threshold , issorted = False , axis = - 1 ) :
"""Calculate the Brier scores of an ensemble for exceeding given thresholds .
According to the threshold decomposition of CRPS , the resulting Brier
scores can thus be summed along the last axis to calculate CRPS , as
. . math : :
CRPS ( F , x ) = \ int _ z BS ( F ( z ) , H ( z - x ) ) dz
where $ F ( x ) = \ int _ { z \ leq x } p ( z ) dz $ is the cumulative distribution
function ( CDF ) of the forecast distribution $ F $ , $ x $ is a point estimate of
the true observation ( observational error is neglected ) , $ BS $ denotes the
Brier score and $ H ( x ) $ denotes the Heaviside step function , which we define
here as equal to 1 for x > = 0 and 0 otherwise .
It is more efficient to calculate CRPS directly , but this threshold
decomposition itself provides a useful summary of model quality as a
function of measurement values .
The Numba accelerated version of this function is much faster for
calculating many thresholds simultaneously : it runs in time
O ( N * ( E * log ( E ) + T ) ) , where N is the number of observations , E is the
ensemble size and T is the number of thresholds .
The non - Numba accelerated version requires time and space O ( N * E * T ) .
Parameters
observations : float or array _ like
Observations float or array . Missing values ( NaN ) are given scores of
NaN .
forecasts : float or array _ like
Array of forecasts ensemble members , of the same shape as observations
except for the extra axis corresponding to the ensemble . If forecasts
has the same shape as observations , the forecasts are treated as
deterministic . Missing values ( NaN ) are ignored .
threshold : scalar or 1d array _ like
Threshold value ( s ) at which to calculate exceedence Brier scores .
issorted : bool , optional
Optimization flag to indicate that the elements of ` ensemble ` are
already sorted along ` axis ` .
axis : int , optional
Axis in forecasts which corresponds to different ensemble members ,
along which to calculate the threshold decomposition .
Returns
out : np . ndarray
Brier scores at each thresold for each ensemble forecast against the
observations . If ` ` threshold ` ` is a scalar , the result will have the
same shape as observations . Otherwise , it will have an additional final
dimension corresponding to the threshold levels .
References
Gneiting , T . and Ranjan , R . Comparing density forecasts using threshold -
and quantile - weighted scoring rules . J . Bus . Econ . Stat . 29 , 411-422
(2011 ) . http : / / www . stat . washington . edu / research / reports / 2008 / tr533 . pdf
See also
crps _ ensemble , brier _ score"""
|
observations = np . asarray ( observations )
threshold = np . asarray ( threshold )
forecasts = np . asarray ( forecasts )
if axis != - 1 :
forecasts = move_axis_to_end ( forecasts , axis )
if forecasts . shape == observations . shape :
forecasts = forecasts [ ... , np . newaxis ]
if observations . shape != forecasts . shape [ : - 1 ] :
raise ValueError ( 'observations and forecasts must have matching ' 'shapes or matching shapes except along `axis=%s`' % axis )
scalar_threshold = threshold . ndim == 0
if threshold . ndim > 1 :
raise ValueError ( 'threshold must be scalar or 1-dimensional' )
if threshold . ndim == 1 and not ( np . sort ( threshold ) == threshold ) . all ( ) :
raise ValueError ( '1D thresholds must be sorted' )
threshold = threshold . reshape ( ( 1 , ) * observations . ndim + ( - 1 , ) )
if not issorted :
forecasts = np . sort ( forecasts , axis = - 1 )
result = _threshold_brier_score_core ( observations , forecasts , threshold )
if scalar_threshold :
result = result . squeeze ( axis = - 1 )
return result
|
def submit_async_work_chain ( self , work_chain , workunit_parent , done_hook = None ) :
"""Submit work to be executed in the background .
- work _ chain : An iterable of Work instances . Will be invoked serially . Each instance may
have a different cardinality . There is no output - input chaining : the argument
tuples must already be present in each work instance . If any work throws an
exception no subsequent work in the chain will be attempted .
- workunit _ parent : Work is accounted for under this workunit .
- done _ hook : If not None , invoked with no args after all work is done , or on error ."""
|
def done ( ) :
if done_hook :
done_hook ( )
with self . _pending_workchains_cond :
self . _pending_workchains -= 1
self . _pending_workchains_cond . notify ( )
def error ( e ) :
done ( )
self . _run_tracker . log ( Report . ERROR , '{}' . format ( e ) )
# We filter out Nones defensively . There shouldn ' t be any , but if a bug causes one ,
# Pants might hang indefinitely without this filtering .
work_iter = ( _f for _f in work_chain if _f )
def submit_next ( ) :
try :
self . submit_async_work ( next ( work_iter ) , workunit_parent = workunit_parent , on_success = lambda x : submit_next ( ) , on_failure = error )
except StopIteration :
done ( )
# The success case .
with self . _pending_workchains_cond :
self . _pending_workchains += 1
try :
submit_next ( )
except Exception as e : # Handles errors in the submission code .
done ( )
self . _run_tracker . log ( Report . ERROR , '{}' . format ( e ) )
raise
|
def crop_at_zero_crossing ( gen , seconds = 5 , error = 0.1 ) :
'''Crop the generator , ending at a zero - crossing
Crop the generator to produce approximately seconds seconds
( default 5s ) of audio at the provided FRAME _ RATE , attempting
to end the clip at a zero crossing point to avoid clicking .'''
|
source = iter ( gen )
buffer_length = int ( 2 * error * sampler . FRAME_RATE )
# split the source into two iterators :
# - start , which contains the bulk of the sound clip
# - and end , which contains the final 100ms , plus 100ms past
# the desired clip length . We may cut the clip anywhere
# within this + / - 100ms end buffer .
start = itertools . islice ( source , 0 , int ( ( seconds - error ) * sampler . FRAME_RATE ) )
end = itertools . islice ( source , 0 , buffer_length )
for sample in start :
yield sample
# pull end buffer generator into memory so we can work with it
end = list ( end )
# find min by sorting buffer samples , first by abs of sample , then by distance from optimal
best = sorted ( enumerate ( end ) , key = lambda x : ( math . fabs ( x [ 1 ] ) , abs ( ( buffer_length / 2 ) - x [ 0 ] ) ) )
print best [ : 10 ]
print best [ 0 ] [ 0 ]
# todo : better logic when we don ' t have a perfect zero crossing
# if best [ 0 ] [ 1 ] ! = 0:
# # we don ' t have a perfect zero crossing , so let ' s look for best fit ?
# pass
# crop samples at index of best zero crossing
for sample in end [ : best [ 0 ] [ 0 ] + 1 ] :
yield sample
|
def main ( ) :
"""Sample usage for this python module
This main method simply illustrates sample usage for this python
module .
: return : None"""
|
log = logging . getLogger ( Logify . get_name ( ) + '.logify.main' )
log . info ( 'logger name is: %s' , Logify . get_name ( ) )
log . debug ( 'This is DEBUG' )
log . info ( 'This is INFO' )
log . warning ( 'This is a WARNING' )
log . error ( 'This is an ERROR' )
|
def confirm_tell ( self , data , success ) :
"""Confirm that you ' ve done as you were told . Call this from your control callback to confirm action .
Used when you are advertising a control and you want to tell the remote requestor that you have
done what they asked you to .
` Example : ` this is a minimal example to show the idea . Note - no Exception handling and ugly use of globals
# ! python
client = None
def controlreq _ cb ( args ) :
global client # the client object you connected with
# perform your action with the data they sent
success = do _ control _ action ( args [ ' data ' ] )
if args [ ' confirm ' ] : # you ' ve been asked to confirm
client . confirm _ tell ( args , success )
# else , if you do not confirm _ tell ( ) this causes a timeout at the requestor ' s end .
client = IOT . Client ( config = ' test . ini ' )
thing = client . create _ thing ( ' test321 ' )
control = thing . create _ control ( ' control ' , controlreq _ cb )
Raises [ IOTException ] ( . / Exceptions . m . html # IoticAgent . IOT . Exceptions . IOTException )
containing the error if the infrastructure detects a problem
Raises [ LinkException ] ( . . / Core / AmqpLink . m . html # IoticAgent . Core . AmqpLink . LinkException )
if there is a communications problem between you and the infrastructure
` data ` ( mandatory ) ( dictionary ) The ` " args " ` dictionary that your callback was called with
` success ` ( mandatory ) ( boolean ) Whether or not the action you have been asked to do has been
sucessful .
More details on the contents of the ` data ` dictionary for controls see :
[ create _ control ( ) ] ( . / Thing . m . html # IoticAgent . IOT . Thing . Thing . create _ control )"""
|
logger . info ( "confirm_tell(success=%s) [lid=\"%s\",pid=\"%s\"]" , success , data [ P_ENTITY_LID ] , data [ P_LID ] )
evt = self . _request_point_confirm_tell ( R_CONTROL , data [ P_ENTITY_LID ] , data [ P_LID ] , success , data [ 'requestId' ] )
self . _wait_and_except_if_failed ( evt )
|
def xstep ( self ) :
r"""Minimise Augmented Lagrangian with respect to block vector
: math : ` \ mathbf { x } = \ left ( \ begin { array } { ccc } \ mathbf { x } _ 0 ^ T &
\ mathbf { x } _ 1 ^ T & \ ldots \ end { array } \ right ) ^ T \ ; ` ."""
|
# This test reflects empirical evidence that two slightly
# different implementations are faster for single or
# multi - channel data . This kludge is intended to be temporary .
if self . cri . Cd > 1 :
for i in range ( self . Nb ) :
self . xistep ( i )
else :
self . YU [ : ] = self . Y [ ... , np . newaxis ] - self . U
b = np . swapaxes ( self . ZSf [ ... , np . newaxis ] , self . cri . axisK , - 1 ) + self . rho * sl . rfftn ( self . YU , None , self . cri . axisN )
for i in range ( self . Nb ) :
self . Xf [ ... , i ] = sl . solvedbi_sm ( self . Zf [ ... , [ i ] , : ] , self . rho , b [ ... , i ] , axis = self . cri . axisM )
self . X = sl . irfftn ( self . Xf , self . cri . Nv , self . cri . axisN )
if self . opt [ 'LinSolveCheck' ] :
ZSfs = np . sum ( self . ZSf , axis = self . cri . axisK , keepdims = True )
YU = np . sum ( self . Y [ ... , np . newaxis ] - self . U , axis = - 1 )
b = ZSfs + self . rho * sl . rfftn ( YU , None , self . cri . axisN )
Xf = self . swapaxes ( self . Xf )
Zop = lambda x : sl . inner ( self . Zf , x , axis = self . cri . axisM )
ZHop = lambda x : np . conj ( self . Zf ) * x
ax = np . sum ( ZHop ( Zop ( Xf ) ) + self . rho * Xf , axis = self . cri . axisK , keepdims = True )
self . xrrs = sl . rrs ( ax , b )
else :
self . xrrs = None
|
def infer_schema ( self , stream , namespace = None ) :
"""Queries the Kronos server and fetches the inferred schema for the
requested stream ."""
|
return self . _make_request ( self . _infer_schema_url , data = { 'stream' : stream , 'namespace' : namespace or self . namespace } )
|
def _collectOffAxisPoints ( self ) :
"""Return a dictionary with all off - axis locations ."""
|
offAxis = { }
for l , ( value , deltaName ) in self . items ( ) :
location = Location ( l )
name = location . isOnAxis ( )
if name is None or name is False :
offAxis [ l ] = 1
return list ( offAxis . keys ( ) )
|
def find_collisions ( Signal , tolerance = 50 ) :
"""Finds collision events in the signal from the shift in phase of the signal .
Parameters
Signal : array _ like
Array containing the values of the signal of interest containing a single frequency .
tolerance : float
Percentage tolerance , if the value of the FM Discriminator varies from the mean by this
percentage it is counted as being during a collision event ( or the aftermath of an event ) .
Returns
Collisions : ndarray
Array of booleans , true if during a collision event , false otherwise ."""
|
fmd = fm_discriminator ( Signal )
mean_fmd = _np . mean ( fmd )
Collisions = [ _is_this_a_collision ( [ value , mean_fmd , tolerance ] ) for value in fmd ]
return Collisions
|
def parse ( self , filename_or_file , initialize = True ) :
"""Load manifest from file or file object"""
|
if isinstance ( filename_or_file , ( str , unicode ) ) :
filename = filename_or_file
else :
filename = filename_or_file . name
try :
domtree = minidom . parse ( filename_or_file )
except xml . parsers . expat . ExpatError , e :
args = [ e . args [ 0 ] ]
if isinstance ( filename , unicode ) :
filename = filename . encode ( sys . getdefaultencoding ( ) , "replace" )
args . insert ( 0 , '\n File "%s"\n ' % filename )
raise ManifestXMLParseError ( " " . join ( [ str ( arg ) for arg in args ] ) )
if initialize :
self . __init__ ( )
self . filename = filename
self . load_dom ( domtree , False )
|
def _populate_domain ( self ) :
'''Populate TrustDomain ' s domain attribute .
This entails an inspection of each device ' s certificate - authority
devices in its trust domain and recording them . After which , we
get a dictionary of who trusts who in the domain .'''
|
self . domain = { }
for device in self . devices :
device_name = get_device_info ( device ) . name
ca_devices = device . tm . cm . trust_domains . trust_domain . load ( name = 'Root' ) . caDevices
self . domain [ device_name ] = [ d . replace ( '/%s/' % self . partition , '' ) for d in ca_devices ]
|
def _iter_straight_packed ( self , byte_blocks ) :
"""Iterator that undoes the effect of filtering ;
yields each row as a sequence of packed bytes .
Assumes input is straightlaced .
` byte _ blocks ` should be an iterable that yields the raw bytes
in blocks of arbitrary size ."""
|
# length of row , in bytes
rb = self . row_bytes
a = bytearray ( )
# The previous ( reconstructed ) scanline .
# None indicates first line of image .
recon = None
for some_bytes in byte_blocks :
a . extend ( some_bytes )
while len ( a ) >= rb + 1 :
filter_type = a [ 0 ]
scanline = a [ 1 : rb + 1 ]
del a [ : rb + 1 ]
recon = self . undo_filter ( filter_type , scanline , recon )
yield recon
if len ( a ) != 0 : # : file : format We get here with a file format error :
# when the available bytes ( after decompressing ) do not
# pack into exact rows .
raise FormatError ( 'Wrong size for decompressed IDAT chunk.' )
assert len ( a ) == 0
|
def _tr_above ( self ) :
"""The tr element prior in sequence to the tr this cell appears in .
Raises | ValueError | if called on a cell in the top - most row ."""
|
tr_lst = self . _tbl . tr_lst
tr_idx = tr_lst . index ( self . _tr )
if tr_idx == 0 :
raise ValueError ( 'no tr above topmost tr' )
return tr_lst [ tr_idx - 1 ]
|
def get_worker_node_ips ( config_file , override_cluster_name ) :
"""Returns worker node IPs for given configuration file ."""
|
config = yaml . load ( open ( config_file ) . read ( ) )
if override_cluster_name is not None :
config [ "cluster_name" ] = override_cluster_name
provider = get_node_provider ( config [ "provider" ] , config [ "cluster_name" ] )
try :
nodes = provider . non_terminated_nodes ( { TAG_RAY_NODE_TYPE : "worker" } )
if config . get ( "provider" , { } ) . get ( "use_internal_ips" , False ) is True :
return [ provider . internal_ip ( node ) for node in nodes ]
else :
return [ provider . external_ip ( node ) for node in nodes ]
finally :
provider . cleanup ( )
|
def parse_set ( string ) :
"""Parse set from comma separated string ."""
|
string = string . strip ( )
if string :
return set ( string . split ( "," ) )
else :
return set ( )
|
def _load_info ( self ) :
"""This functions stores the id and the username of the bot .
Called by ` . username ` and ` . id ` properties .
: return :"""
|
myself = self . get_me ( )
if self . return_python_objects :
self . _id = myself . id
self . _username = myself . username
else :
self . _id = myself [ "result" ] [ "id" ]
self . _username = myself [ "result" ] [ "username" ]
|
def is_volatile ( self ) :
"""True if combination of field access properties result in a field that
should be interpreted as volatile .
( Any hardware - writable field is inherently volatile )"""
|
hw = self . get_property ( 'hw' )
return ( ( hw in ( rdltypes . AccessType . rw , rdltypes . AccessType . rw1 , rdltypes . AccessType . w , rdltypes . AccessType . w1 ) ) or self . get_property ( 'counter' ) or ( self . get_property ( 'next' ) is not None ) or self . get_property ( 'hwset' ) or self . get_property ( 'hwclr' ) )
|
def process_filter_args ( cls , kwargs ) :
"""loop through properties in filter parameters check they match class definition
deflate them and convert into something easy to generate cypher from"""
|
output = { }
for key , value in kwargs . items ( ) :
if '__' in key :
prop , operator = key . rsplit ( '__' )
operator = OPERATOR_TABLE [ operator ]
else :
prop = key
operator = '='
if prop not in cls . defined_properties ( rels = False ) :
raise ValueError ( "No such property {0} on {1}" . format ( prop , cls . __name__ ) )
property_obj = getattr ( cls , prop )
if isinstance ( property_obj , AliasProperty ) :
prop = property_obj . aliased_to ( )
deflated_value = getattr ( cls , prop ) . deflate ( value )
else : # handle special operators
if operator == _SPECIAL_OPERATOR_IN :
if not isinstance ( value , tuple ) and not isinstance ( value , list ) :
raise ValueError ( 'Value must be a tuple or list for IN operation {0}={1}' . format ( key , value ) )
deflated_value = [ property_obj . deflate ( v ) for v in value ]
elif operator == _SPECIAL_OPERATOR_ISNULL :
if not isinstance ( value , bool ) :
raise ValueError ( 'Value must be a bool for isnull operation on {0}' . format ( key ) )
operator = 'IS NULL' if value else 'IS NOT NULL'
deflated_value = None
elif operator in _REGEX_OPERATOR_TABLE . values ( ) :
deflated_value = property_obj . deflate ( value )
if not isinstance ( deflated_value , basestring ) :
raise ValueError ( 'Must be a string value for {0}' . format ( key ) )
if operator in _STRING_REGEX_OPERATOR_TABLE . values ( ) :
deflated_value = re . escape ( deflated_value )
deflated_value = operator . format ( deflated_value )
operator = _SPECIAL_OPERATOR_REGEX
else :
deflated_value = property_obj . deflate ( value )
# map property to correct property name in the database
db_property = cls . defined_properties ( rels = False ) [ prop ] . db_property or prop
output [ db_property ] = ( operator , deflated_value )
return output
|
def take_off ( self , height = DEFAULT , velocity = DEFAULT ) :
"""Takes off , that is starts the motors , goes straight up and hovers .
Do not call this function if you use the with keyword . Take off is
done automatically when the context is created .
: param height : the height ( meters ) to hover at . None uses the default
height set when constructed .
: param velocity : the velocity ( meters / second ) when taking off
: return :"""
|
if self . _is_flying :
raise Exception ( 'Already flying' )
if not self . _cf . is_connected ( ) :
raise Exception ( 'Crazyflie is not connected' )
self . _is_flying = True
self . _reset_position_estimator ( )
self . _activate_controller ( )
self . _activate_high_level_commander ( )
self . _hl_commander = self . _cf . high_level_commander
height = self . _height ( height )
duration_s = height / self . _velocity ( velocity )
self . _hl_commander . takeoff ( height , duration_s )
time . sleep ( duration_s )
self . _z = height
|
async def start_worker ( self , cmd : List [ str ] , input_source : str , output : Optional [ str ] = None , extra_cmd : Optional [ str ] = None , pattern : Optional [ str ] = None , reading : str = FFMPEG_STDERR , ) -> None :
"""Start ffmpeg do process data from output ."""
|
if self . is_running :
_LOGGER . warning ( "Can't start worker. It is allready running!" )
return
if reading == FFMPEG_STDERR :
stdout = False
stderr = True
else :
stdout = True
stderr = False
# start ffmpeg and reading to queue
await self . open ( cmd = cmd , input_source = input_source , output = output , extra_cmd = extra_cmd , stdout_pipe = stdout , stderr_pipe = stderr , )
self . _input = await self . get_reader ( reading )
# start background processing
self . _read_task = self . _loop . create_task ( self . _process_lines ( pattern ) )
self . _loop . create_task ( self . _worker_process ( ) )
|
def spawn ( cls , options = None , dir_base = None ) :
"""Alternative constructor . Creates a mutator and returns section object .
: param dict options :
: param str | unicode dir _ base :
: rtype : SectionMutator"""
|
from uwsgiconf . utils import ConfModule
options = options or { 'compile' : True , }
dir_base = os . path . abspath ( dir_base or find_project_dir ( ) )
name_module = ConfModule . default_name
name_project = get_project_name ( dir_base )
path_conf = os . path . join ( dir_base , name_module )
if os . path . exists ( path_conf ) : # Read an existing config for further modification of first section .
section = cls . _get_section_existing ( name_module , name_project )
else : # Create section on - fly .
section = cls . _get_section_new ( dir_base )
mutator = cls ( section = section , dir_base = dir_base , project_name = name_project , options = options )
mutator . mutate ( )
return mutator
|
def digest ( algorithm = DEFAULT_HASH_ALGORITHM , hash_library = DEFAULT_HASH_LIBRARY ) :
"""< Purpose >
Provide the caller with the ability to create digest objects without having
to worry about crypto library availability or which library to use . The
caller also has the option of specifying which hash algorithm and / or
library to use .
# Creation of a digest object using defaults or by specifying hash
# algorithm and library .
digest _ object = securesystemslib . hash . digest ( )
digest _ object = securesystemslib . hash . digest ( ' sha384 ' )
digest _ object = securesystemslib . hash . digest ( ' sha256 ' , ' hashlib ' )
# The expected interface for digest objects .
digest _ object . digest _ size
digest _ object . hexdigest ( )
digest _ object . update ( ' data ' )
digest _ object . digest ( )
# Added hash routines by this module .
digest _ object = securesystemslib . hash . digest _ fileobject ( file _ object )
digest _ object = securesystemslib . hash . digest _ filename ( filename )
< Arguments >
algorithm :
The hash algorithm ( e . g . , ' md5 ' , ' sha1 ' , ' sha256 ' ) .
hash _ library :
The crypto library to use for the given hash algorithm ( e . g . , ' hashlib ' ) .
< Exceptions >
securesystemslib . exceptions . FormatError , if the arguments are
improperly formatted .
securesystemslib . exceptions . UnsupportedAlgorithmError , if an unsupported
hashing algorithm is specified , or digest could not be generated with given
the algorithm .
securesystemslib . exceptions . UnsupportedLibraryError , if an unsupported
library was requested via ' hash _ library ' .
< Side Effects >
None .
< Returns >
Digest object ( e . g . , hashlib . new ( algorithm ) ) ."""
|
# Are the arguments properly formatted ? If not , raise
# ' securesystemslib . exceptions . FormatError ' .
securesystemslib . formats . NAME_SCHEMA . check_match ( algorithm )
securesystemslib . formats . NAME_SCHEMA . check_match ( hash_library )
# Was a hashlib digest object requested and is it supported ?
# If so , return the digest object .
if hash_library == 'hashlib' and hash_library in SUPPORTED_LIBRARIES :
try :
return hashlib . new ( algorithm )
except ValueError :
raise securesystemslib . exceptions . UnsupportedAlgorithmError ( algorithm )
# Was a pyca _ crypto digest object requested and is it supported ?
elif hash_library == 'pyca_crypto' and hash_library in SUPPORTED_LIBRARIES : # pragma : no cover
# TODO : Add support for pyca / cryptography ' s hashing routines .
pass
# The requested hash library is not supported .
else :
raise securesystemslib . exceptions . UnsupportedLibraryError ( 'Unsupported' ' library requested. Supported hash' ' libraries: ' + repr ( SUPPORTED_LIBRARIES ) )
|
def parser ( self ) :
"""Creates a parser for the method based on the documentation .
: return < OptionParser >"""
|
usage = self . usage ( )
if self . __doc__ :
usage += '\n' + nstr ( self . __doc__ )
parse = PARSER_CLASS ( usage = usage )
shorts = { v : k for k , v in self . short_keys . items ( ) }
for key , default in self . cmd_opts . items ( ) : # default key , cannot be duplicated
if key == 'help' :
continue
try :
short = '-' + shorts [ key ]
except KeyError :
short = ''
if default is True :
action = 'store_false'
elif default is False :
action = 'store_true'
else :
action = 'store'
# add the option
parse . add_option ( short , '--%s' % key , action = action , default = default )
return parse
|
def finish ( self , key1set , key2set , mask = True ) :
"""Returns ( weights , means , variances ) , where :
weights
ndarray of number of samples per key ; shape ( n1 , n2 ) , where n1 is the
size of key1set and n2 is the size of key2set .
means
computed mean value for each key
variances
computed variance for each key"""
|
n1_us = len ( self . _key1map )
n2_us = len ( self . _key2map )
wt_us = self . _m0 [ : n1_us , : n2_us ]
badwt = ( wt_us == 0 ) | ~ np . isfinite ( wt_us )
wt_us [ badwt ] = 1
mean_us = self . _m1 [ : n1_us , : n2_us ] / wt_us
var_us = self . _m2 [ : n1_us , : n2_us ] / wt_us - mean_us ** 2
wt_us [ badwt ] = 0
mean_us [ badwt ] = np . nan
var_us [ badwt ] = np . nan
n1_them = len ( key1set )
n2_them = len ( key2set )
wt = np . zeros ( ( n1_them , n2_them ) , dtype = self . _m0 . dtype )
mean = np . empty ( ( n1_them , n2_them ) , dtype = self . _m1 . dtype )
mean . fill ( np . nan )
var = np . empty_like ( mean )
var . fill ( np . nan )
# You can ' t fancy - index on two axes simultaneously , so we do a manual
# loop on the first axis .
us_idx2 = [ ]
them_idx2 = [ ]
for them_i2 , key2 in enumerate ( key2set ) :
us_i2 = self . _key2map [ key2 ]
if us_i2 < n2_us :
them_idx2 . append ( them_i2 )
us_idx2 . append ( us_i2 )
# otherwise , we must not have seen that key
for them_i1 , key1 in enumerate ( key1set ) :
us_i1 = self . _key1map [ key1 ]
if us_i1 >= n1_us :
continue
# don ' t have this key
wt [ them_i1 , them_idx2 ] = wt_us [ us_i1 , us_idx2 ]
mean [ them_i1 , them_idx2 ] = mean_us [ us_i1 , us_idx2 ]
var [ them_i1 , them_idx2 ] = var_us [ us_i1 , us_idx2 ]
if mask :
m = ~ np . isfinite ( mean )
mean = np . ma . MaskedArray ( mean , m )
var = np . ma . MaskedArray ( var , m )
self . _m0 = self . _m1 = self . _m2 = None
self . _key1map . clear ( )
self . _key2map . clear ( )
return wt , mean , var
|
def activated ( self , value ) :
"""Setter for * * self . _ _ activated * * attribute .
: param value : Attribute value .
: type value : bool"""
|
if value is not None :
assert type ( value ) is bool , "'{0}' attribute: '{1}' type is not 'bool'!" . format ( "activated" , value )
self . component_activated . emit ( ) if value else self . component_deactivated . emit ( )
self . __activated = value
|
def add_weight ( self , weight ) :
"""stub"""
|
if weight is None :
raise NullArgument ( 'weight cannot be None' )
if not self . my_osid_object_form . _is_valid_decimal ( weight , self . get_weight_metadata ( ) ) :
raise InvalidArgument ( 'weight' )
self . my_osid_object_form . _my_map [ 'weight' ] = weight
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.