signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _PrintProcessingTime ( self , processing_status ) :
"""Prints the processing time .
Args :
processing _ status ( ProcessingStatus ) : processing status .""" | if not processing_status :
processing_time = '00:00:00'
else :
processing_time = time . time ( ) - processing_status . start_time
time_struct = time . gmtime ( processing_time )
processing_time = time . strftime ( '%H:%M:%S' , time_struct )
self . _output_writer . Write ( 'Processing time\t\t: {0:s}\n' . format ( processing_time ) ) |
def abort ( code , error = None , message = None ) :
"""Abort with suitable error response
Args :
code ( int ) : status code
error ( str ) : error symbol or flask . Response
message ( str ) : error message""" | if error is None :
flask_abort ( code )
elif isinstance ( error , Response ) :
error . status_code = code
flask_abort ( code , response = error )
else :
body = { "status" : code , "error" : error , "message" : message }
flask_abort ( code , response = export ( body , code ) ) |
def update_item ( event , updated_attributes , calendar_item_update_operation_type ) :
"""Saves updates to an event in the store . Only request changes for attributes that have actually changed .""" | root = M . UpdateItem ( M . ItemChanges ( T . ItemChange ( T . ItemId ( Id = event . id , ChangeKey = event . change_key ) , T . Updates ( ) ) ) , ConflictResolution = u"AlwaysOverwrite" , MessageDisposition = u"SendAndSaveCopy" , SendMeetingInvitationsOrCancellations = calendar_item_update_operation_type )
update_node = root . xpath ( u'/m:UpdateItem/m:ItemChanges/t:ItemChange/t:Updates' , namespaces = NAMESPACES ) [ 0 ]
# if not send _ only _ to _ changed _ attendees :
# # We want to resend invites , which you do by setting an attribute to the same value it has . Right now , events
# # are always scheduled as Busy time , so we just set that again .
# update _ node . append (
# update _ property _ node ( field _ uri = " calendar : LegacyFreeBusyStatus " , node _ to _ insert = T . LegacyFreeBusyStatus ( " Busy " ) )
if u'html_body' in updated_attributes :
update_node . append ( update_property_node ( field_uri = "item:Body" , node_to_insert = T . Body ( event . html_body , BodyType = "HTML" ) ) )
if u'text_body' in updated_attributes :
update_node . append ( update_property_node ( field_uri = "item:Body" , node_to_insert = T . Body ( event . text_body , BodyType = "Text" ) ) )
if u'subject' in updated_attributes :
update_node . append ( update_property_node ( field_uri = "item:Subject" , node_to_insert = T . Subject ( event . subject ) ) )
if u'start' in updated_attributes :
start = convert_datetime_to_utc ( event . start )
update_node . append ( update_property_node ( field_uri = "calendar:Start" , node_to_insert = T . Start ( start . strftime ( EXCHANGE_DATETIME_FORMAT ) ) ) )
if u'end' in updated_attributes :
end = convert_datetime_to_utc ( event . end )
update_node . append ( update_property_node ( field_uri = "calendar:End" , node_to_insert = T . End ( end . strftime ( EXCHANGE_DATETIME_FORMAT ) ) ) )
if u'location' in updated_attributes :
update_node . append ( update_property_node ( field_uri = "calendar:Location" , node_to_insert = T . Location ( event . location ) ) )
if u'attendees' in updated_attributes :
if event . required_attendees :
required = resource_node ( element = T . RequiredAttendees ( ) , resources = event . required_attendees )
update_node . append ( update_property_node ( field_uri = "calendar:RequiredAttendees" , node_to_insert = required ) )
else :
update_node . append ( delete_field ( field_uri = "calendar:RequiredAttendees" ) )
if event . optional_attendees :
optional = resource_node ( element = T . OptionalAttendees ( ) , resources = event . optional_attendees )
update_node . append ( update_property_node ( field_uri = "calendar:OptionalAttendees" , node_to_insert = optional ) )
else :
update_node . append ( delete_field ( field_uri = "calendar:OptionalAttendees" ) )
if u'resources' in updated_attributes :
if event . resources :
resources = resource_node ( element = T . Resources ( ) , resources = event . resources )
update_node . append ( update_property_node ( field_uri = "calendar:Resources" , node_to_insert = resources ) )
else :
update_node . append ( delete_field ( field_uri = "calendar:Resources" ) )
if u'reminder_minutes_before_start' in updated_attributes :
if event . reminder_minutes_before_start :
update_node . append ( update_property_node ( field_uri = "item:ReminderIsSet" , node_to_insert = T . ReminderIsSet ( 'true' ) ) )
update_node . append ( update_property_node ( field_uri = "item:ReminderMinutesBeforeStart" , node_to_insert = T . ReminderMinutesBeforeStart ( str ( event . reminder_minutes_before_start ) ) ) )
else :
update_node . append ( update_property_node ( field_uri = "item:ReminderIsSet" , node_to_insert = T . ReminderIsSet ( 'false' ) ) )
if u'is_all_day' in updated_attributes :
update_node . append ( update_property_node ( field_uri = "calendar:IsAllDayEvent" , node_to_insert = T . IsAllDayEvent ( str ( event . is_all_day ) . lower ( ) ) ) )
for attr in event . RECURRENCE_ATTRIBUTES :
if attr in updated_attributes :
recurrence_node = T . Recurrence ( )
if event . recurrence == 'daily' :
recurrence_node . append ( T . DailyRecurrence ( T . Interval ( str ( event . recurrence_interval ) ) , ) )
elif event . recurrence == 'weekly' :
recurrence_node . append ( T . WeeklyRecurrence ( T . Interval ( str ( event . recurrence_interval ) ) , T . DaysOfWeek ( event . recurrence_days ) , ) )
elif event . recurrence == 'monthly' :
recurrence_node . append ( T . AbsoluteMonthlyRecurrence ( T . Interval ( str ( event . recurrence_interval ) ) , T . DayOfMonth ( str ( event . start . day ) ) , ) )
elif event . recurrence == 'yearly' :
recurrence_node . append ( T . AbsoluteYearlyRecurrence ( T . DayOfMonth ( str ( event . start . day ) ) , T . Month ( event . start . strftime ( "%B" ) ) , ) )
recurrence_node . append ( T . EndDateRecurrence ( T . StartDate ( event . start . strftime ( EXCHANGE_DATE_FORMAT ) ) , T . EndDate ( event . recurrence_end_date . strftime ( EXCHANGE_DATE_FORMAT ) ) , ) )
update_node . append ( update_property_node ( field_uri = "calendar:Recurrence" , node_to_insert = recurrence_node ) )
return root |
def cli ( env , context_id , friendly_name , remote_peer , preshared_key , phase1_auth , phase1_crypto , phase1_dh , phase1_key_ttl , phase2_auth , phase2_crypto , phase2_dh , phase2_forward_secrecy , phase2_key_ttl ) :
"""Update tunnel context properties .
Updates are made atomically , so either all are accepted or none are .
Key life values must be in the range 120-172800.
Phase 2 perfect forward secrecy must be in the range 0-1.
A separate configuration request should be made to realize changes on
network devices .""" | manager = SoftLayer . IPSECManager ( env . client )
succeeded = manager . update_tunnel_context ( context_id , friendly_name = friendly_name , remote_peer = remote_peer , preshared_key = preshared_key , phase1_auth = phase1_auth , phase1_crypto = phase1_crypto , phase1_dh = phase1_dh , phase1_key_ttl = phase1_key_ttl , phase2_auth = phase2_auth , phase2_crypto = phase2_crypto , phase2_dh = phase2_dh , phase2_forward_secrecy = phase2_forward_secrecy , phase2_key_ttl = phase2_key_ttl )
if succeeded :
env . out ( 'Updated context #{}' . format ( context_id ) )
else :
raise CLIHalt ( 'Failed to update context #{}' . format ( context_id ) ) |
def number_of_permutations ( self ) :
"""Returns the number of permutations of this coordination geometry .""" | if self . permutations_safe_override :
return factorial ( self . coordination )
elif self . permutations is None :
return factorial ( self . coordination )
return len ( self . permutations ) |
def category ( soup ) :
"""Find the category from subject areas""" | category = [ ]
tags = raw_parser . category ( soup )
for tag in tags :
category . append ( node_text ( tag ) )
return category |
def unrate_url ( obj ) :
"""Generates a link to " un - rate " the given object - this
can be used as a form target or for POSTing via Ajax .""" | return reverse ( 'ratings_unrate_object' , args = ( ContentType . objects . get_for_model ( obj ) . pk , obj . pk , ) ) |
def copy_assets ( self , assets_path ) :
"""Banana banana""" | if not os . path . exists ( assets_path ) :
os . mkdir ( assets_path )
extra_files = self . _get_extra_files ( )
for ex_files in Formatter . get_extra_files_signal ( self ) :
extra_files . extend ( ex_files )
for src , dest in extra_files :
dest = os . path . join ( assets_path , dest )
destdir = os . path . dirname ( dest )
if not os . path . exists ( destdir ) :
os . makedirs ( destdir )
if os . path . isfile ( src ) :
shutil . copy ( src , dest )
elif os . path . isdir ( src ) :
recursive_overwrite ( src , dest ) |
def _auto_connect ( self ) :
"""Attempts to connect to the roaster every quarter of a second .""" | while not self . _teardown . value :
try :
self . _connect ( )
return True
except exceptions . RoasterLookupError :
time . sleep ( .25 )
return False |
def _FormatServiceText ( self , service ) :
"""Produces a human readable multi - line string representing the service .
Args :
service ( WindowsService ) : service to format .
Returns :
str : human readable representation of a Windows Service .""" | string_segments = [ service . name , '\tImage Path = {0:s}' . format ( service . image_path ) , '\tService Type = {0:s}' . format ( service . HumanReadableType ( ) ) , '\tStart Type = {0:s}' . format ( service . HumanReadableStartType ( ) ) , '\tService Dll = {0:s}' . format ( service . service_dll ) , '\tObject Name = {0:s}' . format ( service . object_name ) , '\tSources:' ]
for source in service . sources :
string_segments . append ( '\t\t{0:s}:{1:s}' . format ( source [ 0 ] , source [ 1 ] ) )
return '\n' . join ( string_segments ) |
def write_padding ( fp , size , divisor = 2 ) :
"""Writes padding bytes given the currently written size .
: param fp : file - like object
: param divisor : divisor of the byte alignment
: return : written byte size""" | remainder = size % divisor
if remainder :
return write_bytes ( fp , struct . pack ( '%dx' % ( divisor - remainder ) ) )
return 0 |
def _update_event_type ( self_ , watcher , event , triggered ) :
"""Returns an updated Event object with the type field set appropriately .""" | if triggered :
event_type = 'triggered'
else :
event_type = 'changed' if watcher . onlychanged else 'set'
return Event ( what = event . what , name = event . name , obj = event . obj , cls = event . cls , old = event . old , new = event . new , type = event_type ) |
def exception ( self , timeout = None ) :
"""Similar to result ( ) , except returns the exception if any .""" | # Check exceptional case : return none if no error
if not self . _poll ( timeout ) . HasField ( 'error' ) :
return None
# Return expected error
return self . _operation . error |
def get_contour ( mask ) :
"""Compute the image contour from a mask
The contour is computed in a very inefficient way using scikit - image
and a conversion of float coordinates to pixel coordinates .
Parameters
mask : binary ndarray of shape ( M , N ) or ( K , M , N )
The mask outlining the pixel positions of the event .
If a 3d array is given , then ` K ` indexes the individual
contours .
Returns
cont : ndarray or list of K ndarrays of shape ( J , 2)
A 2D array that holds the contour of an event ( in pixels )
e . g . obtained using ` mm . contour ` where ` mm ` is an instance
of ` RTDCBase ` . The first and second columns of ` cont `
correspond to the x - and y - coordinates of the contour .""" | if isinstance ( mask , np . ndarray ) and len ( mask . shape ) == 2 :
mask = [ mask ]
ret_list = False
else :
ret_list = True
contours = [ ]
for mi in mask :
c0 = find_contours ( mi . transpose ( ) , level = .9999 , positive_orientation = "low" , fully_connected = "high" ) [ 0 ]
# round all coordinates to pixel values
c1 = np . asarray ( np . round ( c0 ) , int )
# remove duplicates
c2 = remove_duplicates ( c1 )
contours . append ( c2 )
if ret_list :
return contours
else :
return contours [ 0 ] |
def existing_analysis ( using ) :
"""Get the existing analysis for the ` using ` Elasticsearch connection""" | es = connections . get_connection ( using )
index_name = settings . ELASTICSEARCH_CONNECTIONS [ using ] [ 'index_name' ]
if es . indices . exists ( index = index_name ) :
return stringer ( es . indices . get_settings ( index = index_name ) [ index_name ] [ 'settings' ] [ 'index' ] . get ( 'analysis' , { } ) )
return DOES_NOT_EXIST |
def insert_image ( filename , extnum_filename , auximage , extnum_auximage ) :
"""Replace image in filename by another image ( same size ) in newimage .
Parameters
filename : str
File name where the new image will be inserted .
extnum _ filename : int
Extension number in filename where the new image will be
inserted . Note that the first extension is 1 ( and not zero ) .
auximage : str
File name of the new image .
extnum _ auximage : int
Extension number where the new image is located in auximage .
Note that the first extension is 1 ( and not zero ) .""" | # read the new image
with fits . open ( auximage ) as hdulist :
newimage = hdulist [ extnum_auximage ] . data
# open the destination image
hdulist = fits . open ( filename , mode = 'update' )
oldimage_shape = hdulist [ extnum_filename ] . data . shape
if oldimage_shape == newimage . shape :
hdulist [ extnum_filename ] . data = newimage
hdulist . flush ( )
else :
print ( 'filename shape:' , oldimage_shape )
print ( 'newimage shape:' , newimage . shape )
print ( "ERROR: new image doesn't have the same shape" )
hdulist . close ( ) |
def correlation_plot ( self , data ) :
"""Create heatmap of Pearson ' s correlation coefficient .
Parameters
data : pd . DataFrame ( )
Data to display .
Returns
matplotlib . figure
Heatmap .""" | # CHECK : Add saved filename in result . json
fig = plt . figure ( Plot_Data . count )
corr = data . corr ( )
ax = sns . heatmap ( corr )
Plot_Data . count += 1
return fig |
def submit_import ( cls , volume , location , project = None , name = None , overwrite = False , properties = None , parent = None , preserve_folder_structure = True , api = None ) :
"""Submits new import job .
: param volume : Volume identifier .
: param location : Volume location .
: param project : Project identifier .
: param name : Optional file name .
: param overwrite : If true it will overwrite file if exists .
: param properties : Properties dictionary .
: param parent : The ID of the target folder to which the item should be
imported . Should not be used together with project .
: param preserve _ folder _ structure : Whether to keep the exact source
folder structure . The default value is true if the item being
imported is a folder . Should not be used if you are importing
a file .
: param api : Api instance .
: return : Import object .""" | data = { }
volume = Transform . to_volume ( volume )
if project and parent :
raise SbgError ( 'Project and parent identifiers are mutually exclusive' )
elif project :
project = Transform . to_project ( project )
destination = { 'project' : project }
elif parent :
parent = Transform . to_file ( parent )
destination = { 'parent' : parent }
else :
raise SbgError ( 'Project or parent identifier is required.' )
source = { 'volume' : volume , 'location' : location }
if name :
destination [ 'name' ] = name
data [ 'source' ] = source
data [ 'destination' ] = destination
data [ 'overwrite' ] = overwrite
if not preserve_folder_structure :
data [ 'preserve_folder_structure' ] = preserve_folder_structure
if properties :
data [ 'properties' ] = properties
api = api if api else cls . _API
extra = { 'resource' : cls . __name__ , 'query' : data }
logger . info ( 'Submitting import' , extra = extra )
_import = api . post ( cls . _URL [ 'query' ] , data = data ) . json ( )
return Import ( api = api , ** _import ) |
def run ( self ) :
"""This AI simple moves the characters towards the opposite
edges of the grid for 3 steps or until event halts the
simulation""" | x , y = 1 , 0
# set the direction
num_steps = 0
while self . s . get_state ( ) != 'Halted' :
self . s . command ( { 'name' : 'walk' , 'type' : 'move' , 'direction' : [ x , y ] } , self . a1 )
self . s . command ( { 'name' : 'walk' , 'type' : 'run' , 'direction' : [ x , y + 1 ] } , self . a2 )
num_steps += 1
if num_steps >= 3 :
break
for a in self . s . agents :
print ( a . name , 'finished at position ' , a . coords [ 'x' ] , a . coords [ 'y' ] ) |
def get_query_string ( request , new_params = None , remove = None ) :
"""Given the request , return the query string .
Parameters can be added or removed as necessary .
Code snippet taken from Django admin app ( views / main . py )
( c ) Copyright Django Software Foundation and individual contributors .
Redistribution and use in source and binary forms , with or without modification ,
are permitted provided that the following conditions are met :
1 . Redistributions of source code must retain the above copyright notice ,
this list of conditions and the following disclaimer .
2 . Redistributions in binary form must reproduce the above copyright
notice , this list of conditions and the following disclaimer in the
documentation and / or other materials provided with the distribution .
3 . Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " AND
ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES
( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ;
LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .""" | if new_params is None :
new_params = { }
if remove is None :
remove = [ ]
p = dict ( request . GET . items ( ) )
for r in remove :
for k in p . keys ( ) :
if k . startswith ( r ) :
del p [ k ]
for k , v in new_params . items ( ) :
if v is None :
if k in p :
del p [ k ]
else :
p [ k ] = v
return '?%s' % urlencode ( p ) |
def _send ( self ) :
"""Send all queued messages to the server .""" | data = self . output_buffer . view ( )
if not data :
return
if self . closed ( ) :
raise self . Error ( "Failed to write to closed connection {!r}" . format ( self . server . address ) )
if self . defunct ( ) :
raise self . Error ( "Failed to write to defunct connection {!r}" . format ( self . server . address ) )
self . socket . sendall ( data )
self . output_buffer . clear ( ) |
def expect ( self , expect , searchwindowsize = None , maxread = None , timeout = None , iteration_n = 1 ) :
"""Handle child expects , with EOF and TIMEOUT handled
iteration _ n - Number of times this expect has been called for the send .
If 1 , ( the default ) then it gets added to the pane of output
( if applicable to this run )""" | if isinstance ( expect , str ) :
expect = [ expect ]
if searchwindowsize != None :
old_searchwindowsize = self . pexpect_child . searchwindowsize
self . pexpect_child . searchwindowsize = searchwindowsize
if maxread != None :
old_maxread = self . pexpect_child . maxread
self . pexpect_child . maxread = maxread
res = self . pexpect_child . expect ( expect + [ pexpect . TIMEOUT ] + [ pexpect . EOF ] , timeout = timeout )
if searchwindowsize != None :
self . pexpect_child . searchwindowsize = old_searchwindowsize
if maxread != None :
self . pexpect_child . maxread = old_maxread
# Add to session lines only if pane manager exists .
if shutit_global . shutit_global_object . pane_manager and iteration_n == 1 :
time_seen = time . time ( )
lines_to_add = [ ]
if isinstance ( self . pexpect_child . before , ( str , unicode ) ) :
for line_str in self . pexpect_child . before . split ( '\n' ) :
lines_to_add . append ( line_str )
if isinstance ( self . pexpect_child . after , ( str , unicode ) ) :
for line_str in self . pexpect_child . after . split ( '\n' ) :
lines_to_add . append ( line_str )
# If first or last line is empty , remove it .
# if len ( lines _ to _ add ) > 0 and lines _ to _ add [ 1 ] = = ' ' :
# lines _ to _ add = lines _ to _ add [ 1 : ]
# if len ( lines _ to _ add ) > 0 and lines _ to _ add [ - 1 ] = = ' ' :
# lines _ to _ add = lines _ to _ add [ : - 1]
for line in lines_to_add :
self . session_output_lines . append ( SessionPaneLine ( line_str = line , time_seen = time_seen , line_type = 'output' ) )
return res |
def hil_optical_flow_encode ( self , time_usec , sensor_id , integration_time_us , integrated_x , integrated_y , integrated_xgyro , integrated_ygyro , integrated_zgyro , temperature , quality , time_delta_distance_us , distance ) :
'''Simulated optical flow from a flow sensor ( e . g . PX4FLOW or optical
mouse sensor )
time _ usec : Timestamp ( microseconds , synced to UNIX time or since system boot ) ( uint64 _ t )
sensor _ id : Sensor ID ( uint8 _ t )
integration _ time _ us : Integration time in microseconds . Divide integrated _ x and integrated _ y by the integration time to obtain average flow . The integration time also indicates the . ( uint32 _ t )
integrated _ x : Flow in radians around X axis ( Sensor RH rotation about the X axis induces a positive flow . Sensor linear motion along the positive Y axis induces a negative flow . ) ( float )
integrated _ y : Flow in radians around Y axis ( Sensor RH rotation about the Y axis induces a positive flow . Sensor linear motion along the positive X axis induces a positive flow . ) ( float )
integrated _ xgyro : RH rotation around X axis ( rad ) ( float )
integrated _ ygyro : RH rotation around Y axis ( rad ) ( float )
integrated _ zgyro : RH rotation around Z axis ( rad ) ( float )
temperature : Temperature * 100 in centi - degrees Celsius ( int16 _ t )
quality : Optical flow quality / confidence . 0 : no valid flow , 255 : maximum quality ( uint8 _ t )
time _ delta _ distance _ us : Time in microseconds since the distance was sampled . ( uint32 _ t )
distance : Distance to the center of the flow field in meters . Positive value ( including zero ) : distance known . Negative value : Unknown distance . ( float )''' | return MAVLink_hil_optical_flow_message ( time_usec , sensor_id , integration_time_us , integrated_x , integrated_y , integrated_xgyro , integrated_ygyro , integrated_zgyro , temperature , quality , time_delta_distance_us , distance ) |
def prefix ( cls , name ) :
"""Create a new TreeModel where class attribute
names are prefixed with ` ` name ` `""" | attrs = dict ( [ ( name + attr , value ) for attr , value in cls . get_attrs ( ) ] )
return TreeModelMeta ( '_' . join ( [ name , cls . __name__ ] ) , ( TreeModel , ) , attrs ) |
def iter_forks ( self , number = - 1 , etag = None ) :
"""Iterator of forks of this gist .
. . versionchanged : : 0.9
Added params ` ` number ` ` and ` ` etag ` ` .
: param int number : ( optional ) , number of forks to iterate over .
Default : - 1 will iterate over all forks of this gist .
: param str etag : ( optional ) , ETag from a previous request to this
endpoint .
: returns : generator of : class : ` Gist < Gist > `""" | url = self . _build_url ( 'forks' , base_url = self . _api )
return self . _iter ( int ( number ) , url , Gist , etag = etag ) |
def sequence_equal ( self , second_iterable , equality_comparer = operator . eq ) :
'''Determine whether two sequences are equal by elementwise comparison .
Sequence equality is defined as the two sequences being equal length
and corresponding elements being equal as determined by the equality
comparer .
Note : This method uses immediate execution .
Args :
second _ iterable : The sequence which will be compared with the
source sequence .
equality _ comparer : An optional binary predicate function which is
used to compare corresponding elements . Should return True if
the elements are equal , otherwise False . The default equality
comparer is operator . eq which calls _ _ eq _ _ on elements of the
source sequence with the corresponding element of the second
sequence as a parameter .
Returns :
True if the sequences are equal , otherwise False .
Raises :
ValueError : If the Queryable is closed .
TypeError : If second _ iterable is not in fact iterable .
TypeError : If equality _ comparer is not callable .''' | if self . closed ( ) :
raise ValueError ( "Attempt to call to_tuple() on a closed Queryable." )
if not is_iterable ( second_iterable ) :
raise TypeError ( "Cannot compute sequence_equal() with second_iterable of non-iterable {type}" . format ( type = str ( type ( second_iterable ) ) [ 7 : - 1 ] ) )
if not is_callable ( equality_comparer ) :
raise TypeError ( "aggregate() parameter equality_comparer={equality_comparer} is not callable" . format ( equality_comparer = repr ( equality_comparer ) ) )
# Try to check the lengths directly as an optimization
try :
if len ( self . _iterable ) != len ( second_iterable ) :
return False
except TypeError :
pass
sentinel = object ( )
for first , second in izip_longest ( self , second_iterable , fillvalue = sentinel ) :
if first is sentinel or second is sentinel :
return False
if not equality_comparer ( first , second ) :
return False
return True |
def handle_message ( self , message : BaseMessage , responder : Responder , create_task : True ) :
"""Public method to handle a message . It requires :
- A message from the platform
- A responder from the platform
If ` create _ task ` is true , them the task will automatically be added to
the loop . However , if it is not , the coroutine will be returned and it
will be the responsibility of the caller to run / start the task .""" | coro = self . _handle_message ( message , responder )
if create_task :
loop = asyncio . get_event_loop ( )
loop . create_task ( coro )
else :
return coro |
def start_trace ( full = False , frame = None , below = 0 , under = None , server = None , port = None ) :
"""Start tracing program at callee level
breaking on exception / breakpoints""" | wdb = Wdb . get ( server = server , port = port )
if not wdb . stepping :
wdb . start_trace ( full , frame or sys . _getframe ( ) . f_back , below , under )
return wdb |
def prev ( self ) :
"""Return the previous window""" | prev_index = self . index - 1
prev_handle = self . _browser . driver . window_handles [ prev_index ]
return Window ( self . _browser , prev_handle ) |
def parse ( to_parse , ignore_whitespace_text_nodes = True , adapter = None ) :
"""Parse an XML document into an * xml4h * - wrapped DOM representation
using an underlying XML library implementation .
: param to _ parse : an XML document file , document string , or the
path to an XML file . If a string value is given that contains
a ` ` < ` ` character it is treated as literal XML data , otherwise
a string value is treated as a file path .
: type to _ parse : a file - like object or string
: param bool ignore _ whitespace _ text _ nodes : if ` ` True ` ` pure whitespace
nodes are stripped from the parsed document , since these are
usually noise introduced by XML docs serialized to be human - friendly .
: param adapter : the * xml4h * implementation adapter class used to parse
the document and to interact with the resulting nodes .
If None , : attr : ` best _ adapter ` will be used .
: type adapter : adapter class or None
: return : an : class : ` xml4h . nodes . Document ` node representing the
parsed document .
Delegates to an adapter ' s : meth : ` ~ xml4h . impls . interface . parse _ string ` or
: meth : ` ~ xml4h . impls . interface . parse _ file ` implementation .""" | if adapter is None :
adapter = best_adapter
if isinstance ( to_parse , basestring ) and '<' in to_parse :
return adapter . parse_string ( to_parse , ignore_whitespace_text_nodes )
else :
return adapter . parse_file ( to_parse , ignore_whitespace_text_nodes ) |
def fix_local_scheme ( home_dir , symlink = True ) :
"""Platforms that use the " posix _ local " install scheme ( like Ubuntu with
Python 2.7 ) need to be given an additional " local " location , sigh .""" | try :
import sysconfig
except ImportError :
pass
else :
if sysconfig . _get_default_scheme ( ) == 'posix_local' :
local_path = os . path . join ( home_dir , 'local' )
if not os . path . exists ( local_path ) :
os . mkdir ( local_path )
for subdir_name in os . listdir ( home_dir ) :
if subdir_name == 'local' :
continue
copyfile ( os . path . abspath ( os . path . join ( home_dir , subdir_name ) ) , os . path . join ( local_path , subdir_name ) , symlink ) |
def sample_double_norm ( mean , std_upper , std_lower , size ) :
"""Note that this function requires Scipy .""" | from scipy . special import erfinv
# There ' s probably a better way to do this . We first draw percentiles
# uniformly between 0 and 1 . We want the peak of the distribution to occur
# at ` mean ` . However , if we assign 50 % of the samples to the lower half
# and 50 % to the upper half , the side with the smaller variance will be
# overrepresented because of the 1 / sigma normalization of the Gaussian
# PDF . Therefore we need to divide points between the two halves with a
# fraction ` cutoff ` ( defined below ) going to the lower half . Having
# partitioned them this way , we can then use the standard Gaussian
# quantile function to go from percentiles to sample values - - except that
# we must remap from [ 0 , cutoff ] to [ 0 , 0.5 ] and from [ cutoff , 1 ] to [ 0.5,
samples = np . empty ( size )
percentiles = np . random . uniform ( 0. , 1. , size )
cutoff = std_lower / ( std_lower + std_upper )
w = ( percentiles < cutoff )
percentiles [ w ] *= 0.5 / cutoff
samples [ w ] = mean + np . sqrt ( 2 ) * std_lower * erfinv ( 2 * percentiles [ w ] - 1 )
w = ~ w
percentiles [ w ] = 1 - ( 1 - percentiles [ w ] ) * 0.5 / ( 1 - cutoff )
samples [ w ] = mean + np . sqrt ( 2 ) * std_upper * erfinv ( 2 * percentiles [ w ] - 1 )
return samples |
def _validate_num_qubits ( state : np . ndarray ) -> int :
"""Validates that state ' s size is a power of 2 , returning number of qubits .""" | size = state . size
if size & ( size - 1 ) :
raise ValueError ( 'state.size ({}) is not a power of two.' . format ( size ) )
return size . bit_length ( ) - 1 |
def reload ( self , hardware_id , post_uri = None , ssh_keys = None ) :
"""Perform an OS reload of a server with its current configuration .
: param integer hardware _ id : the instance ID to reload
: param string post _ uri : The URI of the post - install script to run
after reload
: param list ssh _ keys : The SSH keys to add to the root user""" | config = { }
if post_uri :
config [ 'customProvisionScriptUri' ] = post_uri
if ssh_keys :
config [ 'sshKeyIds' ] = [ key_id for key_id in ssh_keys ]
return self . hardware . reloadOperatingSystem ( 'FORCE' , config , id = hardware_id ) |
def gaussian_points ( loc = ( 0 , 0 ) , scale = ( 10 , 10 ) , n = 100 ) :
"""Generates and returns ` n ` normally distributed points centered at ` loc ` with ` scale ` x and y directionality .""" | arr = np . random . normal ( loc , scale , ( n , 2 ) )
return gpd . GeoSeries ( [ shapely . geometry . Point ( x , y ) for ( x , y ) in arr ] ) |
def append_form ( self , obj : Union [ Sequence [ Tuple [ str , str ] ] , Mapping [ str , str ] ] , headers : Optional [ 'MultiMapping[str]' ] = None ) -> Payload :
"""Helper to append form urlencoded part .""" | assert isinstance ( obj , ( Sequence , Mapping ) )
if headers is None :
headers = CIMultiDict ( )
if isinstance ( obj , Mapping ) :
obj = list ( obj . items ( ) )
data = urlencode ( obj , doseq = True )
return self . append_payload ( StringPayload ( data , headers = headers , content_type = 'application/x-www-form-urlencoded' ) ) |
def centerize ( src , dst_shape , margin_color = None ) :
"""Centerize image for specified image size
@ param src : image to centerize
@ param dst _ shape : image shape ( height , width ) or ( height , width , channel )""" | if src . shape [ : 2 ] == dst_shape [ : 2 ] :
return src
centerized = np . zeros ( dst_shape , dtype = src . dtype )
if margin_color :
centerized [ : , : ] = margin_color
pad_vertical , pad_horizontal = 0 , 0
h , w = src . shape [ : 2 ]
dst_h , dst_w = dst_shape [ : 2 ]
if h < dst_h :
pad_vertical = ( dst_h - h ) // 2
if w < dst_w :
pad_horizontal = ( dst_w - w ) // 2
centerized [ pad_vertical : pad_vertical + h , pad_horizontal : pad_horizontal + w ] = src
return centerized |
def toy_poisson_rbf_1d_laplace ( optimize = True , plot = True ) :
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance .""" | optimizer = 'scg'
x_len = 100
X = np . linspace ( 0 , 10 , x_len ) [ : , None ]
f_true = np . random . multivariate_normal ( np . zeros ( x_len ) , GPy . kern . RBF ( 1 ) . K ( X ) )
Y = np . array ( [ np . random . poisson ( np . exp ( f ) ) for f in f_true ] ) [ : , None ]
kern = GPy . kern . RBF ( 1 )
poisson_lik = GPy . likelihoods . Poisson ( )
laplace_inf = GPy . inference . latent_function_inference . Laplace ( )
# create simple GP Model
m = GPy . core . GP ( X , Y , kernel = kern , likelihood = poisson_lik , inference_method = laplace_inf )
if optimize :
m . optimize ( optimizer )
if plot :
m . plot ( )
# plot the real underlying rate function
pb . plot ( X , np . exp ( f_true ) , '--k' , linewidth = 2 )
return m |
def choice_prompt ( prompt , choices = None , choice = None ) :
'''Ask the user for a prompt , and only return when one of the requested
options is provided .
Parameters
prompt : the prompt to ask the user
choices : a list of choices that are valid , defaults to [ Y / N / y / n ]''' | if not choices :
choices = [ "y" , "n" , "Y" , "N" ]
print ( prompt )
get_input = getattr ( __builtins__ , 'raw_input' , input )
pretty_choices = '/' . join ( choices )
message = 'Please enter your choice [%s] : ' % ( pretty_choices )
while choice not in choices :
choice = get_input ( message ) . strip ( )
# If the option isn ' t valid , this is shown next
message = "Please enter a valid option in [%s]" % ( pretty_choices )
return choice |
def _add_thread ( self , aThread ) :
"""Private method to add a thread object to the snapshot .
@ type aThread : L { Thread }
@ param aThread : Thread object .""" | # # if not isinstance ( aThread , Thread ) :
# # if hasattr ( aThread , ' _ _ class _ _ ' ) :
# # typename = aThread . _ _ class _ _ . _ _ name _ _
# # else :
# # typename = str ( type ( aThread ) )
# # msg = " Expected Thread , got % s instead " % typename
# # raise TypeError ( msg )
dwThreadId = aThread . dwThreadId
# # if dwThreadId in self . _ _ threadDict :
# # msg = " Already have a Thread object with ID % d " % dwThreadId
# # raise KeyError ( msg )
aThread . set_process ( self )
self . __threadDict [ dwThreadId ] = aThread |
def http_basic_auth_get_user ( request ) :
"""Inspect the given request to find a logged user . If not found , the header HTTP _ AUTHORIZATION
is read for ' Basic Auth ' login and password , and try to authenticate against default UserModel .
Always return a User instance ( possibly anonymous , meaning authentication failed )""" | try : # If standard auth middleware already authenticated a user , use it
if user_is_authenticated ( request . user ) :
return request . user
except AttributeError :
pass
# This was grabbed from https : / / www . djangosnippets . org / snippets / 243/
# Thanks to http : / / stackoverflow . com / a / 1087736/1887976
if 'HTTP_AUTHORIZATION' in request . META :
auth_data = request . META [ 'HTTP_AUTHORIZATION' ] . split ( )
if len ( auth_data ) == 2 and auth_data [ 0 ] . lower ( ) == "basic" :
uname , passwd = base64 . b64decode ( auth_data [ 1 ] ) . decode ( 'utf-8' ) . split ( ':' )
django_user = authenticate ( username = uname , password = passwd )
if django_user is not None :
login ( request , django_user )
# In all cases , return the current request ' s user ( may be anonymous user if no login succeed )
try :
return request . user
except AttributeError :
return AnonymousUser ( ) |
def turn_off ( self ) -> "Signal" :
"""Turns off the signal . This may be invoked from any code .""" | if _logger is not None :
self . _log ( INFO , "turn-off" )
self . _is_on = False
return self |
def transform_config ( transformer , data , data_type = 'S3Prefix' , content_type = None , compression_type = None , split_type = None , job_name = None ) :
"""Export Airflow transform config from a SageMaker transformer
Args :
transformer ( sagemaker . transformer . Transformer ) : The SageMaker transformer to export Airflow
config from .
data ( str ) : Input data location in S3.
data _ type ( str ) : What the S3 location defines ( default : ' S3Prefix ' ) . Valid values :
* ' S3Prefix ' - the S3 URI defines a key name prefix . All objects with this prefix will be used as
inputs for the transform job .
* ' ManifestFile ' - the S3 URI points to a single manifest file listing each S3 object to use as
an input for the transform job .
content _ type ( str ) : MIME type of the input data ( default : None ) .
compression _ type ( str ) : Compression type of the input data , if compressed ( default : None ) .
Valid values : ' Gzip ' , None .
split _ type ( str ) : The record delimiter for the input object ( default : ' None ' ) .
Valid values : ' None ' , ' Line ' , ' RecordIO ' , and ' TFRecord ' .
job _ name ( str ) : job name ( default : None ) . If not specified , one will be generated .
Returns :
dict : Transform config that can be directly used by SageMakerTransformOperator in Airflow .""" | if job_name is not None :
transformer . _current_job_name = job_name
else :
base_name = transformer . base_transform_job_name
transformer . _current_job_name = utils . name_from_base ( base_name ) if base_name is not None else transformer . model_name
if transformer . output_path is None :
transformer . output_path = 's3://{}/{}' . format ( transformer . sagemaker_session . default_bucket ( ) , transformer . _current_job_name )
job_config = sagemaker . transformer . _TransformJob . _load_config ( data , data_type , content_type , compression_type , split_type , transformer )
config = { 'TransformJobName' : transformer . _current_job_name , 'ModelName' : transformer . model_name , 'TransformInput' : job_config [ 'input_config' ] , 'TransformOutput' : job_config [ 'output_config' ] , 'TransformResources' : job_config [ 'resource_config' ] , }
if transformer . strategy is not None :
config [ 'BatchStrategy' ] = transformer . strategy
if transformer . max_concurrent_transforms is not None :
config [ 'MaxConcurrentTransforms' ] = transformer . max_concurrent_transforms
if transformer . max_payload is not None :
config [ 'MaxPayloadInMB' ] = transformer . max_payload
if transformer . env is not None :
config [ 'Environment' ] = transformer . env
if transformer . tags is not None :
config [ 'Tags' ] = transformer . tags
return config |
def close ( self ) :
"""Closes the tunnel .""" | try :
self . sock . shutdown ( socket . SHUT_RDWR )
self . sock . close ( )
except socket . error :
pass |
def model_fn ( features , labels , mode , params ) :
"""The model _ fn argument for creating an Estimator .""" | tf . logging . info ( "features = %s labels = %s mode = %s params=%s" % ( features , labels , mode , params ) )
global_step = tf . train . get_global_step ( )
graph = mtf . Graph ( )
mesh = mtf . Mesh ( graph , "my_mesh" )
logits , loss = mnist_model ( features , labels , mesh )
mesh_shape = mtf . convert_to_shape ( FLAGS . mesh_shape )
layout_rules = mtf . convert_to_layout_rules ( FLAGS . layout )
mesh_size = mesh_shape . size
mesh_devices = [ "" ] * mesh_size
mesh_impl = mtf . placement_mesh_impl . PlacementMeshImpl ( mesh_shape , layout_rules , mesh_devices )
if mode == tf . estimator . ModeKeys . TRAIN :
var_grads = mtf . gradients ( [ loss ] , [ v . outputs [ 0 ] for v in graph . trainable_variables ] )
optimizer = mtf . optimize . AdafactorOptimizer ( )
update_ops = optimizer . apply_grads ( var_grads , graph . trainable_variables )
lowering = mtf . Lowering ( graph , { mesh : mesh_impl } )
restore_hook = mtf . MtfRestoreHook ( lowering )
tf_logits = lowering . export_to_tf_tensor ( logits )
if mode != tf . estimator . ModeKeys . PREDICT :
tf_loss = lowering . export_to_tf_tensor ( loss )
tf . summary . scalar ( "loss" , tf_loss )
if mode == tf . estimator . ModeKeys . TRAIN :
tf_update_ops = [ lowering . lowered_operation ( op ) for op in update_ops ]
tf_update_ops . append ( tf . assign_add ( global_step , 1 ) )
train_op = tf . group ( tf_update_ops )
saver = tf . train . Saver ( tf . global_variables ( ) , sharded = True , max_to_keep = 10 , keep_checkpoint_every_n_hours = 2 , defer_build = False , save_relative_paths = True )
tf . add_to_collection ( tf . GraphKeys . SAVERS , saver )
saver_listener = mtf . MtfCheckpointSaverListener ( lowering )
saver_hook = tf . train . CheckpointSaverHook ( FLAGS . model_dir , save_steps = 1000 , saver = saver , listeners = [ saver_listener ] )
accuracy = tf . metrics . accuracy ( labels = labels , predictions = tf . argmax ( tf_logits , axis = 1 ) )
# Name tensors to be logged with LoggingTensorHook .
tf . identity ( tf_loss , "cross_entropy" )
tf . identity ( accuracy [ 1 ] , name = "train_accuracy" )
# Save accuracy scalar to Tensorboard output .
tf . summary . scalar ( "train_accuracy" , accuracy [ 1 ] )
# restore _ hook must come before saver _ hook
return tf . estimator . EstimatorSpec ( tf . estimator . ModeKeys . TRAIN , loss = tf_loss , train_op = train_op , training_chief_hooks = [ restore_hook , saver_hook ] )
if mode == tf . estimator . ModeKeys . PREDICT :
predictions = { "classes" : tf . argmax ( tf_logits , axis = 1 ) , "probabilities" : tf . nn . softmax ( tf_logits ) , }
return tf . estimator . EstimatorSpec ( mode = tf . estimator . ModeKeys . PREDICT , predictions = predictions , prediction_hooks = [ restore_hook ] , export_outputs = { "classify" : tf . estimator . export . PredictOutput ( predictions ) } )
if mode == tf . estimator . ModeKeys . EVAL :
return tf . estimator . EstimatorSpec ( mode = tf . estimator . ModeKeys . EVAL , loss = tf_loss , evaluation_hooks = [ restore_hook ] , eval_metric_ops = { "accuracy" : tf . metrics . accuracy ( labels = labels , predictions = tf . argmax ( tf_logits , axis = 1 ) ) , } ) |
def zSetSurfaceData ( self , surfNum , radius = None , thick = None , material = None , semidia = None , conic = None , comment = None ) :
"""Sets surface data""" | if self . pMode == 0 : # Sequential mode
surf = self . pLDE . GetSurfaceAt ( surfNum )
if radius is not None :
surf . pRadius = radius
if thick is not None :
surf . pThickness = thick
if material is not None :
surf . pMaterial = material
if semidia is not None :
surf . pSemiDiameter = semidia
if conic is not None :
surf . pConic = conic
if comment is not None :
surf . pComment = comment
else :
raise NotImplementedError ( 'Function not implemented for non-sequential mode' ) |
def parse_json ( self , page ) :
'''Returns json feed .''' | if not isinstance ( page , basestring ) :
page = util . decode_page ( page )
self . doc = json . loads ( page )
results = self . doc . get ( self . result_name , [ ] )
if not results :
self . check_status ( self . doc . get ( 'status' ) )
return None
return results |
def changePlanParticipation ( self , plan , take_part = True ) :
"""Changes participation in a plan
: param plan : Plan to take part in or not
: param take _ part : Whether to take part in the plan
: raises : FBchatException if request failed""" | data = { "event_reminder_id" : plan . uid , "guest_state" : "GOING" if take_part else "DECLINED" , "acontext" : ACONTEXT , }
j = self . _post ( self . req_url . PLAN_PARTICIPATION , data , fix_request = True , as_json = True ) |
def _build_full_list ( self ) :
"""Build a full list of pages .
Examples :
> > > _ SlicedPaginator ( 1 , 7 , 5 ) . _ build _ full _ list ( )
[1 , 2 , 3 , 4 , 5]
> > > _ SlicedPaginator ( 6 , 7 , 5 ) . _ build _ full _ list ( )
[3 , 4 , 5 , 6 , 7]
> > > _ SlicedPaginator ( 6 , 7 , 5 ) . _ build _ full _ list ( )
[3 , 4 , 5 , 6 , 7]
> > > import itertools
> > > combinations = itertools . combinations ( range ( 100 ) , 2)
> > > combinations = filter ( lambda ( x , y ) : x < y , combinations )
> > > for page , maxpages in combinations :
. . . a = _ SlicedPaginator ( page + 1 , maxpages , 7)
. . . b = a . _ build _ full _ list ( )
> > > _ SlicedPaginator ( 2 , 5 , 7 ) . _ build _ full _ list ( )
[1 , 2 , 3 , 4 , 5]
> > > _ SlicedPaginator ( 5 , 5 , 7 ) . _ build _ full _ list ( )
[1 , 2 , 3 , 4 , 5]""" | if self . npages <= self . maxpages_items :
return range ( 1 , self . npages + 1 )
else :
l = range ( self . curpage - self . max_prev_items , self . curpage + self . max_next_items + 1 )
while l and l [ 0 ] < 1 :
l . append ( l [ - 1 ] + 1 )
del l [ 0 ]
while l and l [ - 1 ] > self . npages :
l . insert ( 0 , l [ 0 ] - 1 )
del l [ - 1 ]
return l |
def read ( self , size = 1024 ) :
"""Read at most ` ` size ` ` bytes from the pty , return them as unicode .
Can block if there is nothing to read . Raises : exc : ` EOFError ` if the
terminal was closed .
The size argument still refers to bytes , not unicode code points .""" | b = super ( PtyProcessUnicode , self ) . read ( size )
return self . decoder . decode ( b , final = False ) |
def item ( p_queue , queue_id , host = None ) :
if host is not None :
return os . path . join ( _path ( host , _c . FSQ_QUEUE , root = hosts ( p_queue ) ) , valid_name ( queue_id ) )
'''Construct a path to a queued item''' | return os . path . join ( _path ( p_queue , _c . FSQ_QUEUE ) , valid_name ( queue_id ) ) |
def is_valid_triangle ( x , y , z ) :
"""This function checks the validity of a triangle based on its angles .
A triangle is valid if the sum of its three angles equals 180 degrees .
Args :
x , y , z : The angles of the triangle .
Returns :
A boolean value indicating if the triangle is valid or not .
Examples :
> > > is _ valid _ triangle ( 60 , 50 , 90)
False
> > > is _ valid _ triangle ( 45 , 75 , 60)
True
> > > is _ valid _ triangle ( 30 , 50 , 100)
True""" | return ( ( x + y ) + z ) == 180 |
def ias53 ( msg ) :
"""Indicated airspeed , DBS 5,3 message
Args :
msg ( String ) : 28 bytes hexadecimal message
Returns :
int : indicated arispeed in knots""" | d = hex2bin ( data ( msg ) )
if d [ 12 ] == '0' :
return None
ias = bin2int ( d [ 13 : 23 ] )
# knots
return ias |
def template ( tem , queue = False , ** kwargs ) :
'''Execute the information stored in a template file on the minion .
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion .
CLI Example :
. . code - block : : bash
salt ' * ' state . template ' < Path to template on the minion > ' ''' | if 'env' in kwargs : # " env " is not supported ; Use " saltenv " .
kwargs . pop ( 'env' )
conflict = _check_queue ( queue , kwargs )
if conflict is not None :
return conflict
opts = salt . utils . state . get_sls_opts ( __opts__ , ** kwargs )
try :
st_ = salt . state . HighState ( opts , context = __context__ , proxy = __proxy__ , initial_pillar = _get_initial_pillar ( opts ) )
except NameError :
st_ = salt . state . HighState ( opts , context = __context__ , initial_pillar = _get_initial_pillar ( opts ) )
errors = _get_pillar_errors ( kwargs , pillar = st_ . opts [ 'pillar' ] )
if errors :
__context__ [ 'retcode' ] = salt . defaults . exitcodes . EX_PILLAR_FAILURE
raise CommandExecutionError ( 'Pillar failed to render' , info = errors )
if not tem . endswith ( '.sls' ) :
tem = '{sls}.sls' . format ( sls = tem )
high_state , errors = st_ . render_state ( tem , kwargs . get ( 'saltenv' , '' ) , '' , None , local = True )
if errors :
__context__ [ 'retcode' ] = salt . defaults . exitcodes . EX_STATE_COMPILER_ERROR
return errors
ret = st_ . state . call_high ( high_state )
_set_retcode ( ret , highstate = high_state )
return ret |
def load ( self , filename ) :
"""entry point load the pest control file . sniffs the first non - comment line to detect the version ( if present )
Parameters
filename : str
pst filename
Raises
lots of exceptions for incorrect format""" | assert os . path . exists ( filename ) , "couldn't find control file {0}" . format ( filename )
f = open ( filename , 'r' )
while True :
line = f . readline ( )
if line == "" :
raise Exception ( "Pst.load() error: EOF when trying to find first line - #sad" )
if line . strip ( ) . split ( ) [ 0 ] . lower ( ) == "pcf" :
break
assert line . startswith ( "pcf" ) , "Pst.load() error: first noncomment line must start with 'pcf', not '{0}'" . format ( line )
raw = line . strip ( ) . split ( )
if len ( raw ) > 1 and "version" in raw [ 1 ] . lower ( ) :
raw = raw [ 1 ] . split ( '=' )
if len ( raw ) > 1 :
try :
self . _version = int ( raw [ 1 ] )
except :
pass
if self . _version == 1 :
self . _load_version1 ( filename )
elif self . _version == 2 :
self . _load_version2 ( filename )
else :
raise Exception ( "Pst.load() error: version must be 1 or 2, not '{0}'" . format ( version ) ) |
def load_iterable ( self , iterable , session = None ) :
'''Load an ` ` iterable ` ` .
By default it returns a generator of data loaded via the
: meth : ` loads ` method .
: param iterable : an iterable over data to load .
: param session : Optional : class : ` stdnet . odm . Session ` .
: return : an iterable over decoded data .''' | data = [ ]
load = self . loads
for v in iterable :
data . append ( load ( v ) )
return data |
def __yahoo_request ( query ) :
"""Request Yahoo Finance information .
Request information from YQL .
` Check < http : / / goo . gl / 8AROUD > ` _ for more information on YQL .""" | query = quote ( query )
url = 'https://query.yahooapis.com/v1/public/yql?q=' + query + '&format=json&env=store://datatables.org/alltableswithkeys'
response = urlopen ( url ) . read ( )
return json . loads ( response . decode ( 'utf-8' ) ) [ 'query' ] [ 'results' ] |
async def _collect_sample ( self , url , url_pattern ) :
"""Sample collection is meant to be very tolerant to generic failures as failing to
obtain the sample has important consequences on the results .
- Multiple retries with longer delays
- Larger than usual timeout""" | samples = [ ]
urls = [ self . path_generator . generate_url ( url , url_pattern ) for _ in range ( self . confirmation_factor ) ]
iterator = asyncio . as_completed ( [ self . _fetch_sample ( url ) for url in urls ] )
for promise in iterator :
try :
sig = await promise
if sig :
samples . append ( sig )
except RejectRequest as e :
pass
if not samples :
raise StopRequest ( "Impossible to obtain sample" )
else :
return samples |
def _check_branch ( self , revision , branch ) :
'''Used to find out if the revision is in the given branch .
: param revision : Revision to check .
: param branch : Branch to check revision on .
: return : True / False - Found it / Didn ' t find it''' | # Get a changelog
clog_url = self . hg_url / branch / 'json-log' / revision
try :
Log . note ( "Searching through changelog {{url}}" , url = clog_url )
clog_obj = http . get_json ( clog_url , retry = RETRY )
if isinstance ( clog_obj , ( text_type , str ) ) :
Log . note ( "Revision {{cset}} does not exist in the {{branch}} branch" , cset = revision , branch = branch )
return False
except Exception as e :
Log . note ( "Unexpected error getting changset-log for {{url}}: {{error}}" , url = clog_url , error = e )
return False
return True |
def get_features ( self , mapobject_type_name ) :
'''Gets features for a given object type .
Parameters
mapobject _ type _ name : str
type of the segmented objects
Returns
List [ Dict [ str , str ] ]
information about each feature
See also
: func : ` tmserver . api . feature . get _ features `
: class : ` tmlib . models . feature . Feature `''' | logger . info ( 'get features of experiment "%s", object type "%s"' , self . experiment_name , mapobject_type_name )
mapobject_type_id = self . _get_mapobject_type_id ( mapobject_type_name )
url = self . _build_api_url ( '/experiments/{experiment_id}/mapobject_types/{mapobject_type_id}/features' . format ( experiment_id = self . _experiment_id , mapobject_type_id = mapobject_type_id ) )
res = self . _session . get ( url )
res . raise_for_status ( )
return res . json ( ) [ 'data' ] |
def commonancestors ( Class , * args ) :
"""Generator function to find common ancestors of a particular type for any two or more FoLiA element instances .
The function produces all common ancestors of the type specified , starting from the closest one up to the most distant one .
Parameters :
Class : The type of ancestor to find , should be the : class : ` AbstractElement ` class or any subclass thereof ( not an instance ! )
* args : The elements to find the common ancestors of , elements are instances derived from : class : ` AbstractElement `
Yields :
instance derived from : class : ` AbstractElement ` : A common ancestor of the arguments , an instance of the specified ` ` Class ` ` .""" | commonancestors = None
# pylint : disable = redefined - outer - name
for sibling in args :
ancestors = list ( sibling . ancestors ( Class ) )
if commonancestors is None :
commonancestors = copy ( ancestors )
else :
removeancestors = [ ]
for a in commonancestors : # pylint : disable = not - an - iterable
if not a in ancestors :
removeancestors . append ( a )
for a in removeancestors :
commonancestors . remove ( a )
if commonancestors :
for commonancestor in commonancestors :
yield commonancestor |
def create_project ( self , project_name , desc ) :
"""Send POST to / projects creating a new project with the specified name and desc .
Raises DataServiceError on error .
: param project _ name : str name of the project
: param desc : str description of the project
: return : requests . Response containing the successful result""" | data = { "name" : project_name , "description" : desc }
return self . _post ( "/projects" , data ) |
def resolve_python_path ( path ) :
"""Turns a python path like module . name . here : ClassName . SubClass into an object""" | # Get the module
module_path , local_path = path . split ( ':' , 1 )
thing = importlib . import_module ( module_path )
# Traverse the local sections
local_bits = local_path . split ( '.' )
for bit in local_bits :
thing = getattr ( thing , bit )
return thing |
def get_submissions ( student_item_dict , limit = None ) :
"""Retrieves the submissions for the specified student item ,
ordered by most recent submitted date .
Returns the submissions relative to the specified student item . Exception
thrown if no submission is found relative to this location .
Args :
student _ item _ dict ( dict ) : The location of the problem this submission is
associated with , as defined by a course , student , and item .
limit ( int ) : Optional parameter for limiting the returned number of
submissions associated with this student item . If not specified , all
associated submissions are returned .
Returns :
List dict : A list of dicts for the associated student item . The submission
contains five attributes : student _ item , attempt _ number , submitted _ at ,
created _ at , and answer . ' student _ item ' is the ID of the related student
item for the submission . ' attempt _ number ' is the attempt this submission
represents for this question . ' submitted _ at ' represents the time this
submission was submitted , which can be configured , versus the
' created _ at ' date , which is when the submission is first created .
Raises :
SubmissionRequestError : Raised when the associated student item fails
validation .
SubmissionNotFoundError : Raised when a submission cannot be found for
the associated student item .
Examples :
> > > student _ item _ dict = dict (
> > > student _ id = " Tim " ,
> > > item _ id = " item _ 1 " ,
> > > course _ id = " course _ 1 " ,
> > > item _ type = " type _ one "
> > > get _ submissions ( student _ item _ dict , 3)
' student _ item ' : 2,
' attempt _ number ' : 1,
' submitted _ at ' : datetime . datetime ( 2014 , 1 , 29 , 23 , 14 , 52 , 649284 , tzinfo = < UTC > ) ,
' created _ at ' : datetime . datetime ( 2014 , 1 , 29 , 17 , 14 , 52 , 668850 , tzinfo = < UTC > ) ,
' answer ' : u ' The answer is 42 . '""" | student_item_model = _get_or_create_student_item ( student_item_dict )
try :
submission_models = Submission . objects . filter ( student_item = student_item_model )
except DatabaseError :
error_message = ( u"Error getting submission request for student item {}" . format ( student_item_dict ) )
logger . exception ( error_message )
raise SubmissionNotFoundError ( error_message )
if limit :
submission_models = submission_models [ : limit ]
return SubmissionSerializer ( submission_models , many = True ) . data |
def get_owned_filters ( self , server_id ) :
"""Return the indication filters in a WBEM server owned by this
subscription manager .
This function accesses only the local list of owned filters ; it does
not contact the WBEM server . The local list of owned filters is
discovered from the WBEM server when the server is registered with the
the subscription manager , and is maintained from then on as changes
happen through this subscription manager .
Parameters :
server _ id ( : term : ` string ` ) :
The server ID of the WBEM server , returned by
: meth : ` ~ pywbem . WBEMSubscriptionManager . add _ server ` .
Returns :
: class : ` py : list ` of : class : ` ~ pywbem . CIMInstance ` : The indication
filter instances .""" | # Validate server _ id
self . _get_server ( server_id )
return list ( self . _owned_filters [ server_id ] ) |
def MaxPooling ( inputs , pool_size , strides = None , padding = 'valid' , data_format = 'channels_last' ) :
"""Same as ` tf . layers . MaxPooling2D ` . Default strides is equal to pool _ size .""" | if strides is None :
strides = pool_size
layer = tf . layers . MaxPooling2D ( pool_size , strides , padding = padding , data_format = data_format )
ret = layer . apply ( inputs , scope = tf . get_variable_scope ( ) )
return tf . identity ( ret , name = 'output' ) |
def use_refresh_token ( self , refresh_token , scope = None ) : # type ( str , Optional [ List [ str ] ] ) - > Tuple [ se _ leg _ op . access _ token . AccessToken , Optional [ str ] ]
"""Creates a new access token , and refresh token , based on the supplied refresh token .
: return : new access token and new refresh token if the old one had an expiration time""" | if refresh_token not in self . refresh_tokens :
raise InvalidRefreshToken ( '{} unknown' . format ( refresh_token ) )
refresh_token_info = self . refresh_tokens [ refresh_token ]
if 'exp' in refresh_token_info and refresh_token_info [ 'exp' ] < int ( time . time ( ) ) :
raise InvalidRefreshToken ( '{} has expired' . format ( refresh_token ) )
authz_info = self . access_tokens [ refresh_token_info [ 'access_token' ] ]
if scope :
if not requested_scope_is_allowed ( scope , authz_info [ 'granted_scope' ] ) :
logger . debug ( 'trying to refresh token with superset scope, requested_scope=%s, granted_scope=%s' , scope , authz_info [ 'granted_scope' ] )
raise InvalidScope ( 'Requested scope includes non-granted value' )
scope = ' ' . join ( scope )
logger . debug ( 'refreshing token with new scope, old_scope=%s -> new_scope=%s' , authz_info [ 'scope' ] , scope )
else : # OAuth 2.0 : scope : " [ . . . ] if omitted is treated as equal to the scope originally granted by the resource owner "
scope = authz_info [ 'granted_scope' ]
new_access_token = self . _create_access_token ( authz_info [ 'sub' ] , authz_info [ self . KEY_AUTHORIZATION_REQUEST ] , authz_info [ 'granted_scope' ] , scope )
new_refresh_token = None
if self . refresh_token_threshold and 'exp' in refresh_token_info and refresh_token_info [ 'exp' ] - int ( time . time ( ) ) < self . refresh_token_threshold : # refresh token is close to expiry , issue a new one
new_refresh_token = self . create_refresh_token ( new_access_token . value )
else :
self . refresh_tokens [ refresh_token ] [ 'access_token' ] = new_access_token . value
logger . debug ( 'refreshed tokens, new_access_token=%s new_refresh_token=%s old_refresh_token=%s' , new_access_token , new_refresh_token , refresh_token )
return new_access_token , new_refresh_token |
def get ( self , measurementId ) :
"""Analyses the measurement with the given parameters
: param measurementId :
: return :""" | logger . info ( 'Loading raw data for ' + measurementId )
measurement = self . _measurementController . getMeasurement ( measurementId , MeasurementStatus . COMPLETE )
if measurement is not None :
if measurement . inflate ( ) :
data = { name : { 'raw' : { 'x' : self . _jsonify ( data . raw ( 'x' ) ) , 'y' : self . _jsonify ( data . raw ( 'y' ) ) , 'z' : self . _jsonify ( data . raw ( 'z' ) ) } , 'vibration' : { 'x' : self . _jsonify ( data . vibration ( 'x' ) ) , 'y' : self . _jsonify ( data . vibration ( 'y' ) ) , 'z' : self . _jsonify ( data . vibration ( 'z' ) ) } , 'tilt' : { 'x' : self . _jsonify ( data . tilt ( 'x' ) ) , 'y' : self . _jsonify ( data . tilt ( 'y' ) ) , 'z' : self . _jsonify ( data . tilt ( 'z' ) ) } } for name , data in measurement . data . items ( ) }
return data , 200
else :
return None , 404
else :
return None , 404 |
def write ( self , oprot ) :
'''Write this object to the given output protocol and return self .
: type oprot : thryft . protocol . _ output _ protocol . _ OutputProtocol
: rtype : pastpy . gen . database . impl . online . online _ database _ configuration . OnlineDatabaseConfiguration''' | oprot . write_struct_begin ( 'OnlineDatabaseConfiguration' )
oprot . write_field_begin ( name = 'collection_name' , type = 11 , id = None )
oprot . write_string ( self . collection_name )
oprot . write_field_end ( )
if self . download_dir_path is not None :
oprot . write_field_begin ( name = 'download_dir_path' , type = 11 , id = None )
oprot . write_string ( self . download_dir_path )
oprot . write_field_end ( )
oprot . write_field_stop ( )
oprot . write_struct_end ( )
return self |
def imdb ( limit = None , shuffle = True ) :
"""Downloads ( and caches ) IMDB Moview Reviews . 25k training data , 25k test data
Args :
limit : get only first N items for each class
Returns :
[ X _ train , y _ train , X _ test , y _ test ]""" | movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
# download and extract , thus remove the suffix ' . tar . gz '
path = keras . utils . get_file ( 'aclImdb.tar.gz' , movie_review_url , extract = True ) [ : - 7 ]
X_train , y_train = read_pos_neg_data ( path , 'train' , limit )
X_test , y_test = read_pos_neg_data ( path , 'test' , limit )
if shuffle :
X_train , y_train = sklearn . utils . shuffle ( X_train , y_train )
X_test , y_test = sklearn . utils . shuffle ( X_test , y_test )
return X_train , X_test , y_train , y_test |
def bitwise_xor ( self , t ) :
"""Operation xor
: param t : The other operand .""" | # Using same variables as in paper
s = self
new_interval = ( s . bitwise_not ( ) . bitwise_or ( t ) ) . bitwise_not ( ) . bitwise_or ( s . bitwise_or ( t . bitwise_not ( ) ) . bitwise_not ( ) )
return new_interval . normalize ( ) |
def convert_string_to_list ( input_string : str ) -> list :
"""Transform a string into a list , split by spaces .
Examples :
convert _ string _ to _ list ( ' python programming ' ) - > [ ' python ' , ' programming ' ]
convert _ string _ to _ list ( ' lists tuples strings ' ) - > [ ' lists ' , ' tuples ' , ' strings ' ]
convert _ string _ to _ list ( ' write a program ' ) - > [ ' write ' , ' a ' , ' program ' ]
Parameters :
input _ string ( str ) : The string to be converted to a list .
Returns :
list : The input string converted into a list .""" | return input_string . split ( ' ' ) |
def images_to_matrix ( image_list , mask = None , sigma = None , epsilon = 0.5 ) :
"""Read images into rows of a matrix , given a mask - much faster for
large datasets as it is based on C + + implementations .
ANTsR function : ` imagesToMatrix `
Arguments
image _ list : list of ANTsImage types
images to convert to ndarray
mask : ANTsImage ( optional )
image containing binary mask . voxels in the mask are placed in the matrix
sigma : scaler ( optional )
smoothing factor
epsilon : scalar
threshold for mask
Returns
ndarray
array with a row for each image
shape = ( N _ IMAGES , N _ VOXELS )
Example
> > > import ants
> > > img = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) )
> > > img2 = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) )
> > > img3 = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) )
> > > mat = ants . image _ list _ to _ matrix ( [ img , img2 , img3 ] )""" | def listfunc ( x ) :
if np . sum ( np . array ( x . shape ) - np . array ( mask . shape ) ) != 0 :
x = reg . resample_image_to_target ( x , mask , 2 )
return x [ mask ]
if mask is None :
mask = utils . get_mask ( image_list [ 0 ] )
num_images = len ( image_list )
mask_arr = mask . numpy ( ) >= epsilon
num_voxels = np . sum ( mask_arr )
data_matrix = np . empty ( ( num_images , num_voxels ) )
do_smooth = sigma is not None
for i , img in enumerate ( image_list ) :
if do_smooth :
data_matrix [ i , : ] = listfunc ( utils . smooth_image ( img , sigma , sigma_in_physical_coordinates = True ) )
else :
data_matrix [ i , : ] = listfunc ( img )
return data_matrix |
def port_remove_nio_binding ( self , port_number ) :
"""Removes a port NIO binding .
: param port _ number : port number
: returns : NIO instance""" | if not self . _ethernet_adapter . port_exists ( port_number ) :
raise VPCSError ( "Port {port_number} doesn't exist in adapter {adapter}" . format ( adapter = self . _ethernet_adapter , port_number = port_number ) )
if self . is_running ( ) :
yield from self . _ubridge_send ( "bridge delete {name}" . format ( name = "VPCS-{}" . format ( self . _id ) ) )
nio = self . _ethernet_adapter . get_nio ( port_number )
if isinstance ( nio , NIOUDP ) :
self . manager . port_manager . release_udp_port ( nio . lport , self . _project )
self . _ethernet_adapter . remove_nio ( port_number )
log . info ( 'VPCS "{name}" [{id}]: {nio} removed from port {port_number}' . format ( name = self . _name , id = self . id , nio = nio , port_number = port_number ) )
return nio |
def transform ( self , textual ) :
"""Transform an object from textual form to ` PyObject `""" | if textual is None :
return None
type = textual [ 0 ]
try :
method = getattr ( self , type + '_to_pyobject' )
return method ( textual )
except AttributeError :
return None |
def get_proficiency_mdata ( ) :
"""Return default mdata map for Proficiency""" | return { 'completion' : { 'element_label' : { 'text' : 'completion' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter a decimal value.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_decimal_values' : [ None ] , 'syntax' : 'DECIMAL' , 'decimal_scale' : None , 'minimum_decimal' : None , 'maximum_decimal' : None , 'decimal_set' : [ ] , } , 'objective' : { 'element_label' : { 'text' : 'objective' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , 'resource' : { 'element_label' : { 'text' : 'resource' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , 'level' : { 'element_label' : { 'text' : 'level' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , } |
def main ( data_directory : int , dataset : str = None , filter_by : str = None , verbose : bool = False ) -> None :
"""Parameters
data _ directory : str , required .
The path to the data directory of https : / / github . com / jkkummerfeld / text2sql - data
which has been preprocessed using scripts / reformat _ text2sql _ data . py .
dataset : str , optional .
The dataset to parse . By default all are parsed .
filter _ by : str , optional
Compute statistics about a particular error and only print errors which don ' t contain this string .
verbose : bool , optional .
Whether to print information about incorrectly parsed SQL .""" | directory_dict = { path : files for path , names , files in os . walk ( data_directory ) if files }
for directory , data_files in directory_dict . items ( ) :
if "query_split" in directory or ( dataset is not None and dataset not in directory ) :
continue
print ( f"Parsing dataset at {directory}" )
parsed = 0
total_non_aliases = 0
total_as_count = 0
total_queries_with_weird_as = 0
total = 0
for json_file in data_files :
print ( f"\tParsing split at {json_file}" )
file_path = os . path . join ( directory , json_file )
num_parsed , num_queries , filtered_errors , non_basic_as_aliases , as_count , queries_with_weird_as = parse_dataset ( file_path , filter_by , verbose )
parsed += num_parsed
total += num_queries
total_non_aliases += non_basic_as_aliases
total_as_count += as_count
total_queries_with_weird_as += queries_with_weird_as
print ( f"\tParsed {parsed} out of {total} queries, coverage {parsed/total}" )
print ( f"\tFound {total_non_aliases} out of {total_as_count} non simple AS aliases. percentage: {total_non_aliases/total_as_count}" )
print ( f"\tFound {total_queries_with_weird_as} out of {total} queries with > 1 weird AS. percentage: {total_queries_with_weird_as/total}" )
if filter_by is not None :
print ( f"\tOf {total - parsed} errors, {filtered_errors/ (total - parsed + 1e-13)} contain {filter_by}" ) |
def _removeBackrefs ( senderkey ) :
"""Remove all back - references to this senderkey""" | try :
signals = connections [ senderkey ]
except KeyError :
signals = None
else :
items = signals . items ( )
def allReceivers ( ) :
for signal , set in items :
for item in set :
yield item
for receiver in allReceivers ( ) :
_killBackref ( receiver , senderkey ) |
def process_calibration ( self , save = False ) :
"""processes the data gathered in a calibration run ( does not work if multiple
calibrations ) , returns resultant dB""" | if not self . save_data :
raise Exception ( "Runner must be set to save when run, to be able to process" )
vfunc = np . vectorize ( calc_db , self . mphonesens , self . mphonedb )
if USE_FFT :
peaks = np . mean ( abs ( self . datafile . get_data ( self . current_dataset_name + '/fft_peaks' ) ) , axis = 1 )
else :
peaks = np . mean ( abs ( self . datafile . get_data ( self . current_dataset_name + '/vamp' ) ) , axis = 1 )
# print ' calibration frequencies ' , self . calibration _ frequencies
# cal _ index = self . calibration _ indexes [ self . calibration _ frequencies . index ( self . calf ) ]
# cal _ peak = peaks [ cal _ index ]
# cal _ vmax = vmaxes [ cal _ index ]
# print ' vfunc inputs ' , vmaxes , self . caldb , cal _ vmax
resultant_dB = vfunc ( peaks , self . calpeak ) * - 1
# db attenuation
print 'calibration frequences' , self . calibration_frequencies , 'indexes' , self . calibration_indexes
print 'attenuations' , resultant_dB
calibration_vector = resultant_dB [ self . calibration_indexes ] . squeeze ( )
# Not currenly saving resultant intensity
return resultant_dB , '' , self . calf |
def swap ( self , old_chunks , new_chunk ) :
"""Swaps old consecutive chunks with new chunk .
Args :
old _ chunks ( : obj : ` budou . chunk . ChunkList ` ) : List of consecutive Chunks to
be removed .
new _ chunk ( : obj : ` budou . chunk . Chunk ` ) : A Chunk to be inserted .""" | indexes = [ self . index ( chunk ) for chunk in old_chunks ]
del self [ indexes [ 0 ] : indexes [ - 1 ] + 1 ]
self . insert ( indexes [ 0 ] , new_chunk ) |
def irfftn ( a , s = None , axes = None , norm = None ) :
"""Compute the inverse of the N - dimensional FFT of real input .
This function computes the inverse of the N - dimensional discrete
Fourier Transform for real input over any number of axes in an
M - dimensional array by means of the Fast Fourier Transform ( FFT ) . In
other words , ` ` irfftn ( rfftn ( a ) , a . shape ) = = a ` ` to within numerical
accuracy . ( The ` ` a . shape ` ` is necessary like ` ` len ( a ) ` ` is for ` irfft ` ,
and for the same reason . )
The input should be ordered in the same way as is returned by ` rfftn ` ,
i . e . as for ` irfft ` for the final transformation axis , and as for ` ifftn `
along all the other axes .
Parameters
a : array _ like
Input array .
s : sequence of ints , optional
Shape ( length of each transformed axis ) of the output
( ` ` s [ 0 ] ` ` refers to axis 0 , ` ` s [ 1 ] ` ` to axis 1 , etc . ) . ` s ` is also the
number of input points used along this axis , except for the last axis ,
where ` ` s [ - 1 ] / / 2 + 1 ` ` points of the input are used .
Along any axis , if the shape indicated by ` s ` is smaller than that of
the input , the input is cropped . If it is larger , the input is padded
with zeros . If ` s ` is not given , the shape of the input along the
axes specified by ` axes ` is used .
axes : sequence of ints , optional
Axes over which to compute the inverse FFT . If not given , the last
` len ( s ) ` axes are used , or all axes if ` s ` is also not specified .
Repeated indices in ` axes ` means that the inverse transform over that
axis is performed multiple times .
norm : { None , " ortho " } , optional
. . versionadded : : 1.10.0
Normalization mode ( see ` numpy . fft ` ) . Default is None .
Returns
out : ndarray
The truncated or zero - padded input , transformed along the axes
indicated by ` axes ` , or by a combination of ` s ` or ` a ` ,
as explained in the parameters section above .
The length of each transformed axis is as given by the corresponding
element of ` s ` , or the length of the input in every axis except for the
last one if ` s ` is not given . In the final transformed axis the length
of the output when ` s ` is not given is ` ` 2 * ( m - 1 ) ` ` where ` ` m ` ` is the
length of the final transformed axis of the input . To get an odd
number of output points in the final axis , ` s ` must be specified .
Raises
ValueError
If ` s ` and ` axes ` have different length .
IndexError
If an element of ` axes ` is larger than than the number of axes of ` a ` .
See Also
rfftn : The forward n - dimensional FFT of real input ,
of which ` ifftn ` is the inverse .
fft : The one - dimensional FFT , with definitions and conventions used .
irfft : The inverse of the one - dimensional FFT of real input .
irfft2 : The inverse of the two - dimensional FFT of real input .
Notes
See ` fft ` for definitions and conventions used .
See ` rfft ` for definitions and conventions used for real input .
Examples
> > > a = np . zeros ( ( 3 , 2 , 2 ) )
> > > a [ 0 , 0 , 0 ] = 3 * 2 * 2
> > > np . fft . irfftn ( a )
array ( [ [ [ 1 . , 1 . ] ,
[ 1 . , 1 . ] ] ,
[ [ 1 . , 1 . ] ,
[ 1 . , 1 . ] ] ,
[ [ 1 . , 1 . ] ,
[ 1 . , 1 . ] ] ] )""" | output = mkl_fft . irfftn_numpy ( a , s , axes )
if _unitary ( norm ) :
output *= sqrt ( _tot_size ( output , axes ) )
return output |
def get_num_names_in_namespace ( self , namespace_id ) :
"""Get the number of names in a namespace""" | cur = self . db . cursor ( )
return namedb_get_num_names_in_namespace ( cur , namespace_id , self . lastblock ) |
def import_ed25519_publickey_from_file ( filepath ) :
"""< Purpose >
Load the ED25519 public key object ( conformant to
' securesystemslib . formats . KEY _ SCHEMA ' ) stored in ' filepath ' . Return
' filepath ' in securesystemslib . formats . ED25519KEY _ SCHEMA format .
If the key object in ' filepath ' contains a private key , it is discarded .
< Arguments >
filepath :
< filepath > . pub file , a public key file .
< Exceptions >
securesystemslib . exceptions . FormatError , if ' filepath ' is improperly
formatted or is an unexpected key type .
< Side Effects >
The contents of ' filepath ' is read and saved .
< Returns >
An ED25519 key object conformant to
' securesystemslib . formats . ED25519KEY _ SCHEMA ' .""" | # Does ' filepath ' have the correct format ?
# Ensure the arguments have the appropriate number of objects and object
# types , and that all dict keys are properly named .
# Raise ' securesystemslib . exceptions . FormatError ' if there is a mismatch .
securesystemslib . formats . PATH_SCHEMA . check_match ( filepath )
# ED25519 key objects are saved in json and metadata format . Return the
# loaded key object in securesystemslib . formats . ED25519KEY _ SCHEMA ' format that
# also includes the keyid .
ed25519_key_metadata = securesystemslib . util . load_json_file ( filepath )
ed25519_key , junk = securesystemslib . keys . format_metadata_to_key ( ed25519_key_metadata )
# Raise an exception if an unexpected key type is imported . Redundant
# validation of ' keytype ' . ' securesystemslib . keys . format _ metadata _ to _ key ( ) '
# should have fully validated ' ed25519 _ key _ metadata ' .
if ed25519_key [ 'keytype' ] != 'ed25519' : # pragma : no cover
message = 'Invalid key type loaded: ' + repr ( ed25519_key [ 'keytype' ] )
raise securesystemslib . exceptions . FormatError ( message )
return ed25519_key |
def _format_mongodb_uri ( parsed_uri ) :
"""Painstakingly reconstruct a MongoDB URI parsed using pymongo . uri _ parser . parse _ uri .
: param parsed _ uri : Result of pymongo . uri _ parser . parse _ uri
: type parsed _ uri : dict
: return : New URI
: rtype : str | unicode""" | user_pass = ''
if parsed_uri . get ( 'username' ) and parsed_uri . get ( 'password' ) :
user_pass = '{username!s}:{password!s}@' . format ( ** parsed_uri )
_nodes = [ ]
for host , port in parsed_uri . get ( 'nodelist' ) :
if ':' in host and not host . endswith ( ']' ) : # IPv6 address without brackets
host = '[{!s}]' . format ( host )
if port == 27017 :
_nodes . append ( host )
else :
_nodes . append ( '{!s}:{!s}' . format ( host , port ) )
nodelist = ',' . join ( _nodes )
options = ''
if parsed_uri . get ( 'options' ) :
_opt_list = [ ]
for key , value in parsed_uri . get ( 'options' ) . items ( ) :
if isinstance ( value , bool ) :
value = str ( value ) . lower ( )
_opt_list . append ( '{!s}={!s}' . format ( key , value ) )
options = '?' + '&' . join ( _opt_list )
db_name = parsed_uri . get ( 'database' ) or ''
res = "mongodb://{user_pass!s}{nodelist!s}/{db_name!s}{options!s}" . format ( user_pass = user_pass , nodelist = nodelist , db_name = db_name , # collection is ignored
options = options )
return res |
def parse_tagged_reference_line ( line_marker , line , identified_dois , identified_urls ) :
"""Given a single tagged reference line , convert it to its MARC - XML representation .
Try to find all tags and extract their contents and their types into corresponding
dictionary elements . Append each dictionary tag representation onto a list , which
is given to ' build _ formatted _ xml _ citation ( ) ' where the correct xml output will be generated .
This method is dumb , with very few heuristics . It simply looks for tags , and makes dictionaries
from the data it finds in a tagged reference line .
@ param line _ marker : ( string ) The line marker for this single reference line ( e . g . [ 19 ] )
@ param line : ( string ) The tagged reference line .
@ param identified _ dois : ( list ) a list of dois which were found in this line . The ordering of
dois corresponds to the ordering of tags in the line , reading from left to right .
@ param identified _ urls : ( list ) a list of urls which were found in this line . The ordering of
urls corresponds to the ordering of tags in the line , reading from left to right .
@ param which format to use for references ,
roughly " < title > < volume > < page > " or " < title > , < volume > , < page > "
@ return xml _ line : ( string ) the MARC - XML representation of the tagged reference line
@ return count _ * : ( integer ) the number of * ( pieces of info ) found in the reference line .""" | count_misc = count_title = count_reportnum = count_url = count_doi = count_auth_group = 0
processed_line = line
cur_misc_txt = u""
tag_match = re_tagged_citation . search ( processed_line )
# contains a list of dictionary entries of previously cited items
citation_elements = [ ]
# the last tag element found when working from left - to - right across the
# line
identified_citation_element = None
while tag_match is not None : # While there are tags inside this reference line . . .
tag_match_start = tag_match . start ( )
tag_match_end = tag_match . end ( )
tag_type = tag_match . group ( 1 )
cur_misc_txt += processed_line [ 0 : tag_match_start ]
# Catches both standard titles , and ibid ' s
if tag_type . find ( "JOURNAL" ) != - 1 : # This tag is an identified journal TITLE . It should be followed
# by VOLUME , YEAR and PAGE tags .
# See if the found title has been tagged as an ibid :
# < cds . JOURNALibid >
if tag_match . group ( 'ibid' ) :
is_ibid = True
closing_tag_length = len ( CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID )
idx_closing_tag = processed_line . find ( CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID , tag_match_end )
else :
is_ibid = False
closing_tag_length = len ( CFG_REFEXTRACT_MARKER_CLOSING_TITLE )
# extract the title from the line :
idx_closing_tag = processed_line . find ( CFG_REFEXTRACT_MARKER_CLOSING_TITLE , tag_match_end )
if idx_closing_tag == - 1 : # no closing TITLE tag found - get rid of the solitary tag
processed_line = processed_line [ tag_match_end : ]
identified_citation_element = None
else : # Closing tag was found :
# The title text to be used in the marked - up citation :
title_text = processed_line [ tag_match_end : idx_closing_tag ]
# Now trim this matched title and its tags from the start of
# the line :
processed_line = processed_line [ idx_closing_tag + closing_tag_length : ]
numeration_match = re_recognised_numeration_for_title_plus_series . search ( processed_line )
if numeration_match : # recognised numeration immediately after the title -
# extract it :
reference_volume = numeration_match . group ( 'vol' )
reference_year = numeration_match . group ( 'yr' ) or ''
reference_page = numeration_match . group ( 'pg' )
# This is used on two accounts :
# 1 . To get the series char from the title , if no series was found with the numeration
# 2 . To always remove any series character from the title match text
# series _ from _ title = re _ series _ from _ title . search ( title _ text )
if numeration_match . group ( 'series' ) :
reference_volume = numeration_match . group ( 'series' ) + reference_volume
# Skip past the matched numeration in the working line :
processed_line = processed_line [ numeration_match . end ( ) : ]
# ' id _ ibid ' saves whether THIS TITLE is an ibid or not . ( True or False )
# ' extra _ ibids ' are there to hold ibid ' s without the word ' ibid ' , which
# come directly after this title
# i . e . , they are recognised using title numeration instead
# of ibid notation
identified_citation_element = { 'type' : "JOURNAL" , 'misc_txt' : cur_misc_txt , 'title' : title_text , 'volume' : reference_volume , 'year' : reference_year , 'page' : reference_page , 'is_ibid' : is_ibid , 'extra_ibids' : [ ] }
count_title += 1
cur_misc_txt = u""
# Try to find IBID ' s after this title , on top of previously found titles that were
# denoted with the word ' IBID ' . ( i . e . look for IBID ' s without the word ' IBID ' by
# looking at extra numeration after this title )
numeration_match = re_numeration_no_ibid_txt . match ( processed_line )
while numeration_match is not None :
reference_volume = numeration_match . group ( 'vol' )
reference_year = numeration_match . group ( 'yr' )
reference_page = numeration_match . group ( 'pg' )
if numeration_match . group ( 'series' ) :
reference_volume = numeration_match . group ( 'series' ) + reference_volume
# Skip past the matched numeration in the working line :
processed_line = processed_line [ numeration_match . end ( ) : ]
# Takes the just found title text
identified_citation_element [ 'extra_ibids' ] . append ( { 'type' : "JOURNAL" , 'misc_txt' : "" , 'title' : title_text , 'volume' : reference_volume , 'year' : reference_year , 'page' : reference_page , } )
# Increment the stats counters :
count_title += 1
title_text = ""
reference_volume = ""
reference_year = ""
reference_page = ""
numeration_match = re_numeration_no_ibid_txt . match ( processed_line )
else : # No numeration was recognised after the title . Add the
# title into a MISC item instead :
cur_misc_txt += "%s" % title_text
identified_citation_element = None
elif tag_type == "REPORTNUMBER" : # This tag is an identified institutional report number :
# extract the institutional report - number from the line :
idx_closing_tag = processed_line . find ( CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM , tag_match_end )
# Sanity check - did we find a closing report - number tag ?
if idx_closing_tag == - 1 : # no closing < / cds . REPORTNUMBER > tag found - strip the opening tag and move past this
# recognised reportnumber as it is unreliable :
processed_line = processed_line [ tag_match_end : ]
identified_citation_element = None
else : # closing tag was found
report_num = processed_line [ tag_match_end : idx_closing_tag ]
# now trim this matched institutional report - number
# and its tags from the start of the line :
ending_tag_pos = idx_closing_tag + len ( CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM )
processed_line = processed_line [ ending_tag_pos : ]
identified_citation_element = { 'type' : "REPORTNUMBER" , 'misc_txt' : cur_misc_txt , 'report_num' : report_num }
count_reportnum += 1
cur_misc_txt = u""
elif tag_type == "URL" : # This tag is an identified URL :
# From the " identified _ urls " list , get this URL and its
# description string :
url_string = identified_urls [ 0 ] [ 0 ]
url_desc = identified_urls [ 0 ] [ 1 ]
# Now move past this " < cds . URL / > " tag in the line :
processed_line = processed_line [ tag_match_end : ]
# Delete the information for this URL from the start of the list
# of identified URLs :
identified_urls [ 0 : 1 ] = [ ]
# Save the current misc text
identified_citation_element = { 'type' : "URL" , 'misc_txt' : "%s" % cur_misc_txt , 'url_string' : "%s" % url_string , 'url_desc' : "%s" % url_desc }
count_url += 1
cur_misc_txt = u""
elif tag_type == "DOI" : # This tag is an identified DOI :
# From the " identified _ dois " list , get this DOI and its
# description string :
doi_string = identified_dois [ 0 ]
# Now move past this " < cds . CDS / > " tag in the line :
processed_line = processed_line [ tag_match_end : ]
# Remove DOI from the list of DOI strings
identified_dois [ 0 : 1 ] = [ ]
# SAVE the current misc text
identified_citation_element = { 'type' : "DOI" , 'misc_txt' : "%s" % cur_misc_txt , 'doi_string' : "%s" % doi_string }
# Increment the stats counters :
count_doi += 1
cur_misc_txt = u""
elif tag_type . find ( "AUTH" ) != - 1 : # This tag is an identified Author :
auth_type = ""
# extract the title from the line :
if tag_type . find ( "stnd" ) != - 1 :
auth_type = "stnd"
idx_closing_tag_nearest = processed_line . find ( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND , tag_match_end )
elif tag_type . find ( "etal" ) != - 1 :
auth_type = "etal"
idx_closing_tag_nearest = processed_line . find ( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL , tag_match_end )
elif tag_type . find ( "incl" ) != - 1 :
auth_type = "incl"
idx_closing_tag_nearest = processed_line . find ( CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL , tag_match_end )
if idx_closing_tag_nearest == - 1 : # no closing < / cds . AUTH * * * * > tag found - strip the opening tag
# and move past it
processed_line = processed_line [ tag_match_end : ]
identified_citation_element = None
else :
auth_txt = processed_line [ tag_match_end : idx_closing_tag_nearest ]
# Now move past the ending tag in the line :
processed_line = processed_line [ idx_closing_tag_nearest + len ( "</cds.AUTHxxxx>" ) : ]
# SAVE the current misc text
identified_citation_element = { 'type' : "AUTH" , 'misc_txt' : "%s" % cur_misc_txt , 'auth_txt' : "%s" % auth_txt , 'auth_type' : "%s" % auth_type }
# Increment the stats counters :
count_auth_group += 1
cur_misc_txt = u""
# These following tags may be found separately ;
# They are usually found when a " JOURNAL " tag is hit
# ( ONLY immediately afterwards , however )
# Sitting by themselves means they do not have
# an associated TITLE tag , and should be MISC
elif tag_type == "SER" : # This tag is a SERIES tag ; Since it was not preceeded by a TITLE
# tag , it is useless - strip the tag and put it into miscellaneous :
( cur_misc_txt , processed_line ) = convert_unusable_tag_to_misc ( processed_line , cur_misc_txt , tag_match_end , CFG_REFEXTRACT_MARKER_CLOSING_SERIES )
identified_citation_element = None
elif tag_type == "VOL" : # This tag is a VOLUME tag ; Since it was not preceeded by a TITLE
# tag , it is useless - strip the tag and put it into miscellaneous :
( cur_misc_txt , processed_line ) = convert_unusable_tag_to_misc ( processed_line , cur_misc_txt , tag_match_end , CFG_REFEXTRACT_MARKER_CLOSING_VOLUME )
identified_citation_element = None
elif tag_type == "YR" : # This tag is a YEAR tag ; Since it ' s not preceeded by TITLE and
# VOLUME tags , it is useless - strip the tag and put the contents
# into miscellaneous :
( cur_misc_txt , processed_line ) = convert_unusable_tag_to_misc ( processed_line , cur_misc_txt , tag_match_end , CFG_REFEXTRACT_MARKER_CLOSING_YEAR )
identified_citation_element = None
elif tag_type == "PG" : # This tag is a PAGE tag ; Since it ' s not preceeded by TITLE ,
# VOLUME and YEAR tags , it is useless - strip the tag and put the
# contents into miscellaneous :
( cur_misc_txt , processed_line ) = convert_unusable_tag_to_misc ( processed_line , cur_misc_txt , tag_match_end , CFG_REFEXTRACT_MARKER_CLOSING_PAGE )
identified_citation_element = None
elif tag_type == "QUOTED" :
identified_citation_element , processed_line , cur_misc_txt = map_tag_to_subfield ( tag_type , processed_line [ tag_match_end : ] , cur_misc_txt , 'title' )
elif tag_type == "ISBN" :
identified_citation_element , processed_line , cur_misc_txt = map_tag_to_subfield ( tag_type , processed_line [ tag_match_end : ] , cur_misc_txt , tag_type )
elif tag_type == "PUBLISHER" :
identified_citation_element , processed_line , cur_misc_txt = map_tag_to_subfield ( tag_type , processed_line [ tag_match_end : ] , cur_misc_txt , 'publisher' )
elif tag_type == "COLLABORATION" :
identified_citation_element , processed_line , cur_misc_txt = map_tag_to_subfield ( tag_type , processed_line [ tag_match_end : ] , cur_misc_txt , 'collaboration' )
if identified_citation_element : # Append the found tagged data and current misc text
citation_elements . append ( identified_citation_element )
identified_citation_element = None
# Look for the next tag in the processed line :
tag_match = re_tagged_citation . search ( processed_line )
# place any remaining miscellaneous text into the
# appropriate MARC XML fields :
cur_misc_txt += processed_line
# This MISC element will hold the entire citation in the event
# that no tags were found .
if len ( cur_misc_txt . strip ( " .;," ) ) > 0 : # Increment the stats counters :
count_misc += 1
identified_citation_element = { 'type' : "MISC" , 'misc_txt' : cur_misc_txt , }
citation_elements . append ( identified_citation_element )
return ( citation_elements , line_marker , { 'misc' : count_misc , 'title' : count_title , 'reportnum' : count_reportnum , 'url' : count_url , 'doi' : count_doi , 'auth_group' : count_auth_group } ) |
def _plot_posterior_op ( values , var_name , selection , ax , bw , linewidth , bins , kind , point_estimate , round_to , credible_interval , ref_val , rope , ax_labelsize , xt_labelsize , ** kwargs ) : # noqa : D202
"""Artist to draw posterior .""" | def format_as_percent ( x , round_to = 0 ) :
return "{0:.{1:d}f}%" . format ( 100 * x , round_to )
def display_ref_val ( ) :
if ref_val is None :
return
elif isinstance ( ref_val , dict ) :
val = None
for sel in ref_val . get ( var_name , [ ] ) :
if all ( k in selection and selection [ k ] == v for k , v in sel . items ( ) if k != "ref_val" ) :
val = sel [ "ref_val" ]
break
if val is None :
return
elif isinstance ( ref_val , Number ) :
val = ref_val
else :
raise ValueError ( "Argument `ref_val` must be None, a constant, or a " 'dictionary like {"var_name": {"ref_val": (lo, hi)}}' )
less_than_ref_probability = ( values < val ) . mean ( )
greater_than_ref_probability = ( values >= val ) . mean ( )
ref_in_posterior = "{} <{:g}< {}" . format ( format_as_percent ( less_than_ref_probability , 1 ) , val , format_as_percent ( greater_than_ref_probability , 1 ) , )
ax . axvline ( val , ymin = 0.05 , ymax = 0.75 , color = "C1" , lw = linewidth , alpha = 0.65 )
ax . text ( values . mean ( ) , plot_height * 0.6 , ref_in_posterior , size = ax_labelsize , color = "C1" , weight = "semibold" , horizontalalignment = "center" , )
def display_rope ( ) :
if rope is None :
return
elif isinstance ( rope , dict ) :
vals = None
for sel in rope . get ( var_name , [ ] ) :
if all ( k in selection and selection [ k ] == v for k , v in sel . items ( ) if k != "rope" ) :
vals = sel [ "rope" ]
break
if vals is None :
return
elif len ( rope ) == 2 :
vals = rope
else :
raise ValueError ( "Argument `rope` must be None, a dictionary like" '{"var_name": {"rope": (lo, hi)}}, or an' "iterable of length 2" )
ax . plot ( vals , ( plot_height * 0.02 , plot_height * 0.02 ) , lw = linewidth * 5 , color = "C2" , solid_capstyle = "round" , zorder = 0 , alpha = 0.7 , )
text_props = { "size" : ax_labelsize , "horizontalalignment" : "center" , "color" : "C2" }
ax . text ( vals [ 0 ] , plot_height * 0.2 , vals [ 0 ] , weight = "semibold" , ** text_props )
ax . text ( vals [ 1 ] , plot_height * 0.2 , vals [ 1 ] , weight = "semibold" , ** text_props )
def display_point_estimate ( ) :
if not point_estimate :
return
if point_estimate not in ( "mode" , "mean" , "median" ) :
raise ValueError ( "Point Estimate should be in ('mode','mean','median')" )
if point_estimate == "mean" :
point_value = values . mean ( )
elif point_estimate == "mode" :
if isinstance ( values [ 0 ] , float ) :
density , lower , upper = _fast_kde ( values , bw = bw )
x = np . linspace ( lower , upper , len ( density ) )
point_value = x [ np . argmax ( density ) ]
else :
point_value = mode ( values . round ( round_to ) ) [ 0 ] [ 0 ]
elif point_estimate == "median" :
point_value = np . median ( values )
point_text = "{}={:.{}f}" . format ( point_estimate , point_value , round_to )
ax . text ( point_value , plot_height * 0.8 , point_text , size = ax_labelsize , horizontalalignment = "center" , )
def display_hpd ( ) :
hpd_intervals = hpd ( values , credible_interval = credible_interval )
ax . plot ( hpd_intervals , ( plot_height * 0.02 , plot_height * 0.02 ) , lw = linewidth * 2 , color = "k" , solid_capstyle = "round" , )
ax . text ( hpd_intervals [ 0 ] , plot_height * 0.07 , hpd_intervals [ 0 ] . round ( round_to ) , size = ax_labelsize , horizontalalignment = "center" , )
ax . text ( hpd_intervals [ 1 ] , plot_height * 0.07 , hpd_intervals [ 1 ] . round ( round_to ) , size = ax_labelsize , horizontalalignment = "center" , )
ax . text ( ( hpd_intervals [ 0 ] + hpd_intervals [ 1 ] ) / 2 , plot_height * 0.3 , format_as_percent ( credible_interval ) + " HPD" , size = ax_labelsize , horizontalalignment = "center" , )
def format_axes ( ) :
ax . yaxis . set_ticks ( [ ] )
ax . spines [ "top" ] . set_visible ( False )
ax . spines [ "right" ] . set_visible ( False )
ax . spines [ "left" ] . set_visible ( False )
ax . spines [ "bottom" ] . set_visible ( True )
ax . xaxis . set_ticks_position ( "bottom" )
ax . tick_params ( axis = "x" , direction = "out" , width = 1 , length = 3 , color = "0.5" , labelsize = xt_labelsize )
ax . spines [ "bottom" ] . set_color ( "0.5" )
if kind == "kde" and values . dtype . kind == "f" :
plot_kde ( values , bw = bw , fill_kwargs = { "alpha" : kwargs . pop ( "fill_alpha" , 0 ) } , plot_kwargs = { "linewidth" : linewidth } , ax = ax , rug = False , )
else :
if bins is None :
if values . dtype . kind == "i" :
xmin = values . min ( )
xmax = values . max ( )
bins = range ( xmin , xmax + 2 )
ax . set_xlim ( xmin - 0.5 , xmax + 0.5 )
else :
bins = "auto"
kwargs . setdefault ( "align" , "left" )
kwargs . setdefault ( "color" , "C0" )
ax . hist ( values , bins = bins , alpha = 0.35 , ** kwargs )
plot_height = ax . get_ylim ( ) [ 1 ]
format_axes ( )
display_hpd ( )
display_point_estimate ( )
display_ref_val ( )
display_rope ( ) |
def implied_loop_expr ( expr , start , end , delta ) :
"""given the parameters of an implied loop - - namely , the start and end
values together with the delta per iteration - - implied _ loop _ expr ( )
returns a list of values of the lambda expression expr applied to
successive values of the implied loop .""" | if delta > 0 :
stop = end + 1
else :
stop = end - 1
result_list = [ expr ( x ) for x in range ( start , stop , delta ) ]
# return the flattened list of results
return list ( itertools . chain ( result_list ) ) |
def stop_codon_spliced_offsets ( self ) :
"""Offsets from start of spliced mRNA transcript
of nucleotides in stop codon .""" | offsets = [ self . spliced_offset ( position ) for position in self . stop_codon_positions ]
return self . _contiguous_offsets ( offsets ) |
def word ( self ) :
"""Property of the DigitWord returning ( or setting ) the DigitWord as a list of integers ( or
string representations ) of DigitModel . The property is called during instantiation as the
property validates the value passed and ensures that all digits are valid .""" | if self . wordtype == DigitWord . DIGIT :
return self . _word
else : # Strip out ' 0x ' from the string representation . Note , this could be replaced with the
# following code : str ( hex ( a ) ) [ 2 : ] but is more obvious in the code below .
return [ str ( hex ( a ) ) . replace ( '0x' , '' ) for a in self . _word ] |
def _check_valid ( key , val , valid ) :
"""Helper to check valid options""" | if val not in valid :
raise ValueError ( '%s must be one of %s, not "%s"' % ( key , valid , val ) ) |
def build_absolute_uri ( request , relative_url ) :
"""Ensure absolute _ uri are relative to WEBROOT .""" | webroot = getattr ( settings , 'WEBROOT' , '' )
if webroot . endswith ( "/" ) and relative_url . startswith ( "/" ) :
webroot = webroot [ : - 1 ]
return request . build_absolute_uri ( webroot + relative_url ) |
def runGetVariantAnnotationSet ( self , id_ ) :
"""Runs a getVariantSet request for the specified ID .""" | compoundId = datamodel . VariantAnnotationSetCompoundId . parse ( id_ )
dataset = self . getDataRepository ( ) . getDataset ( compoundId . dataset_id )
variantSet = dataset . getVariantSet ( compoundId . variant_set_id )
variantAnnotationSet = variantSet . getVariantAnnotationSet ( id_ )
return self . runGetRequest ( variantAnnotationSet ) |
def choice ( opts , default = 1 , text = 'Please make a choice.' ) :
"""Prompt the user to select an option
@ param opts : List of tuples containing options in ( key , value ) format - value is optional
@ type opts : list of tuple
@ param text : Prompt text
@ type text : str""" | opts_len = len ( opts )
opts_enum = enumerate ( opts , 1 )
opts = list ( opts )
for key , opt in opts_enum :
click . echo ( '[{k}] {o}' . format ( k = key , o = opt [ 1 ] if isinstance ( opt , tuple ) else opt ) )
click . echo ( '-' * 12 )
opt = click . prompt ( text , default , type = click . IntRange ( 1 , opts_len ) )
opt = opts [ opt - 1 ]
return opt [ 0 ] if isinstance ( opt , tuple ) else opt |
def pick_monomials_up_to_degree ( monomials , degree ) :
"""Collect monomials up to a given degree .""" | ordered_monomials = [ ]
if degree >= 0 :
ordered_monomials . append ( S . One )
for deg in range ( 1 , degree + 1 ) :
ordered_monomials . extend ( pick_monomials_of_degree ( monomials , deg ) )
return ordered_monomials |
def _name_in_services ( name , services ) :
'''Checks to see if the given service is in the given services .
: param str name : Service label , file name , or full path
: param dict services : The currently available services .
: return : The service information for the service , otherwise
an empty dictionary
: rtype : dict''' | if name in services : # Match on label
return services [ name ]
for service in six . itervalues ( services ) :
if service [ 'file_path' ] . lower ( ) == name : # Match on full path
return service
basename , ext = os . path . splitext ( service [ 'file_name' ] )
if basename . lower ( ) == name : # Match on basename
return service
return dict ( ) |
def create_for_receipt ( self , receipt , ** kwargs ) :
"""Creates a ReceiptPDF object for a given receipt . Does not actually
generate the related PDF file .
All attributes will be completed with the information for the relevant
` ` TaxPayerProfile ` ` instance .
: param Receipt receipt : The receipt for the PDF which will be
generated .""" | try :
profile = TaxPayerProfile . objects . get ( taxpayer__points_of_sales__receipts = receipt , )
except TaxPayerProfile . DoesNotExist :
raise exceptions . DjangoAfipException ( 'Cannot generate a PDF for taxpayer with no profile' , )
pdf = ReceiptPDF . objects . create ( receipt = receipt , issuing_name = profile . issuing_name , issuing_address = profile . issuing_address , issuing_email = profile . issuing_email , vat_condition = profile . vat_condition , gross_income_condition = profile . gross_income_condition , sales_terms = profile . sales_terms , ** kwargs )
return pdf |
def _init_w_transforms ( data , features , random_states , comm = MPI . COMM_SELF ) :
"""Initialize the mappings ( Wi ) for the SRM with random orthogonal matrices .
Parameters
data : list of 2D arrays , element i has shape = [ voxels _ i , samples ]
Each element in the list contains the fMRI data of one subject .
features : int
The number of features in the model .
random _ states : list of ` RandomState ` s
One ` RandomState ` instance per subject .
comm : mpi4py . MPI . Intracomm
The MPI communicator containing the data
Returns
w : list of array , element i has shape = [ voxels _ i , features ]
The initialized orthogonal transforms ( mappings ) : math : ` W _ i ` for each
subject .
voxels : list of int
A list with the number of voxels per subject .
Note
This function assumes that the numpy random number generator was
initialized .
Not thread safe .""" | w = [ ]
subjects = len ( data )
voxels = np . empty ( subjects , dtype = int )
# Set Wi to a random orthogonal voxels by features matrix
for subject in range ( subjects ) :
if data [ subject ] is not None :
voxels [ subject ] = data [ subject ] . shape [ 0 ]
rnd_matrix = random_states [ subject ] . random_sample ( ( voxels [ subject ] , features ) )
q , r = np . linalg . qr ( rnd_matrix )
w . append ( q )
else :
voxels [ subject ] = 0
w . append ( None )
voxels = comm . allreduce ( voxels , op = MPI . SUM )
return w , voxels |
def get_qemu_info ( path , backing_chain = False , fail_on_error = True ) :
"""Get info on a given qemu disk
Args :
path ( str ) : Path to the required disk
backing _ chain ( boo ) : if true , include also info about
the image predecessors .
Return :
object : if backing _ chain = = True then a list of dicts else a dict""" | cmd = [ 'qemu-img' , 'info' , '--output=json' , path ]
if backing_chain :
cmd . insert ( - 1 , '--backing-chain' )
result = run_command_with_validation ( cmd , fail_on_error , msg = 'Failed to get info for {}' . format ( path ) )
return json . loads ( result . out ) |
def _connectWithContextFactory ( ctxFactory , workbench ) :
"""Connect using the given context factory . Notifications go to the
given workbench .""" | endpoint = SSL4ClientEndpoint ( reactor , "localhost" , 4430 , ctxFactory )
splash = _Splash ( u"Connecting" , u"Connecting..." )
workbench . display ( splash )
d = endpoint . connect ( Factory ( workbench ) )
@ d . addBoth
def closeSplash ( returnValue ) :
workbench . undisplay ( )
return returnValue
@ d . addErrback
def notifyFailure ( f ) :
f . trap ( ConnectError )
d = alert ( workbench , u"Couldn't connect" , u"Connection failed! " "Check internet connection, or try again later.\n" "Error: {!r}" . format ( f . value ) )
return d . addCallback ( lambda _result : reactor . stop ( ) )
return d |
def morph ( clm1 , clm2 , t , lmax ) :
"""Interpolate linearly the two sets of sph harm . coeeficients .""" | clm = ( 1 - t ) * clm1 + t * clm2
grid_reco = clm . expand ( lmax = lmax )
# cut " high frequency " components
agrid_reco = grid_reco . to_array ( )
pts = [ ]
for i , longs in enumerate ( agrid_reco ) :
ilat = grid_reco . lats ( ) [ i ]
for j , value in enumerate ( longs ) :
ilong = grid_reco . lons ( ) [ j ]
th = ( 90 - ilat ) / 57.3
ph = ilong / 57.3
r = value + rbias
p = np . array ( [ sin ( th ) * cos ( ph ) , sin ( th ) * sin ( ph ) , cos ( th ) ] ) * r
pts . append ( p )
return pts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.