signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def reset ( self ) :
"""Reset the estimates of mean and variance .
Resets the full state of this class .
Returns :
Operation .""" | with tf . name_scope ( self . _name + '/reset' ) :
return tf . group ( self . _count . assign ( 0 ) , self . _mean . assign ( tf . zeros_like ( self . _mean ) ) , self . _var_sum . assign ( tf . zeros_like ( self . _var_sum ) ) ) |
def _send_offset_fetch_request ( self , partitions ) :
"""Fetch the committed offsets for a set of partitions .
This is a non - blocking call . The returned future can be polled to get
the actual offsets returned from the broker .
Arguments :
partitions ( list of TopicPartition ) : the partitions to fetch
Returns :
Future : resolves to dict of offsets : { TopicPartition : int }""" | assert self . config [ 'api_version' ] >= ( 0 , 8 , 1 ) , 'Unsupported Broker API'
assert all ( map ( lambda k : isinstance ( k , TopicPartition ) , partitions ) )
if not partitions :
return Future ( ) . success ( { } )
node_id = self . coordinator ( )
if node_id is None :
return Future ( ) . failure ( Errors . GroupCoordinatorNotAvailableError )
# Verify node is ready
if not self . _client . ready ( node_id ) :
log . debug ( "Node %s not ready -- failing offset fetch request" , node_id )
return Future ( ) . failure ( Errors . NodeNotReadyError )
log . debug ( "Group %s fetching committed offsets for partitions: %s" , self . group_id , partitions )
# construct the request
topic_partitions = collections . defaultdict ( set )
for tp in partitions :
topic_partitions [ tp . topic ] . add ( tp . partition )
if self . config [ 'api_version' ] >= ( 0 , 8 , 2 ) :
request = OffsetFetchRequest [ 1 ] ( self . group_id , list ( topic_partitions . items ( ) ) )
else :
request = OffsetFetchRequest [ 0 ] ( self . group_id , list ( topic_partitions . items ( ) ) )
# send the request with a callback
future = Future ( )
_f = self . _client . send ( node_id , request )
_f . add_callback ( self . _handle_offset_fetch_response , future )
_f . add_errback ( self . _failed_request , node_id , request , future )
return future |
def compile ( self , source , path = None ) :
"""Compile source to a ready to run template .
: param source :
The template to compile - should be a unicode string
: return :
A template function ready to execute""" | container = self . _generate_code ( source )
def make_module_name ( name , suffix = None ) :
output = 'pybars._templates.%s' % name
if suffix :
output += '_%s' % suffix
return output
if not path :
path = '_template'
generate_name = True
else :
path = path . replace ( '\\' , '/' )
path = path . replace ( '/' , '_' )
mod_name = make_module_name ( path )
generate_name = mod_name in sys . modules
if generate_name :
mod_name = make_module_name ( path , self . template_counter )
while mod_name in sys . modules :
self . template_counter += 1
mod_name = make_module_name ( path , self . template_counter )
mod = ModuleType ( mod_name )
filename = '%s.py' % mod_name . replace ( 'pybars.' , '' ) . replace ( '.' , '/' )
exec ( compile ( container . full_code , filename , 'exec' , dont_inherit = True ) , mod . __dict__ )
sys . modules [ mod_name ] = mod
linecache . getlines ( filename , mod . __dict__ )
return mod . __dict__ [ container . name ] |
def convert ( cls , content , input_format , output_format ) :
"""Convert transcript ` content ` from ` input _ format ` to ` output _ format ` .
Arguments :
content : Transcript content byte - stream .
input _ format : Input transcript format .
output _ format : Output transcript format .
Accepted input formats : sjson , srt .
Accepted output format : srt , sjson .
Raises :
TranscriptsGenerationException : On parsing the invalid srt
content during conversion from srt to sjson .""" | assert input_format in ( 'srt' , 'sjson' )
assert output_format in ( 'srt' , 'sjson' )
# Decode the content with utf - 8 - sig which will also
# skip byte order mark ( BOM ) character if found .
content = content . decode ( 'utf-8-sig' )
if input_format == output_format :
return content
if input_format == 'srt' :
if output_format == 'sjson' :
try : # With error handling ( set to ' ERROR _ RAISE ' ) , we will be getting
# the exception if something went wrong in parsing the transcript .
srt_subs = SubRipFile . from_string ( content , error_handling = SubRipFile . ERROR_RAISE )
except Error as ex : # Base exception from pysrt
raise TranscriptsGenerationException ( text_type ( ex ) )
return json . dumps ( cls . generate_sjson_from_srt ( srt_subs ) )
if input_format == 'sjson' :
if output_format == 'srt' :
return cls . generate_srt_from_sjson ( json . loads ( content ) ) |
def parse ( self , channel_id , payload ) :
'''Parse a header frame for a channel given a Reader payload .''' | class_id = payload . read_short ( )
weight = payload . read_short ( )
size = payload . read_longlong ( )
properties = { }
# The AMQP spec is overly - complex when it comes to handling header
# frames . The spec says that in addition to the first 16bit field ,
# additional ones can follow which / may / then be in the property list
# ( because bit flags aren ' t in the list ) . Properly implementing custom
# values requires the ability change the properties and their types ,
# which someone is welcome to do , but seriously , what ' s the point ?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed , there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse . For now it ' s up to someone using custom
# headers to flip the flag .
if self . DEFAULT_PROPERTIES :
flag_bits = payload . read_short ( )
for key , proptype , rfunc , wfunc , mask in self . PROPERTIES :
if flag_bits & mask :
properties [ key ] = rfunc ( payload )
else :
flags = [ ]
while True :
flag_bits = payload . read_short ( )
flags . append ( flag_bits )
if flag_bits & 1 == 0 :
break
shift = 0
for key , proptype , rfunc , wfunc , mask in self . PROPERTIES :
if shift == 0 :
if not flags :
break
flag_bits , flags = flags [ 0 ] , flags [ 1 : ]
shift = 15
if flag_bits & ( 1 << shift ) :
properties [ key ] = rfunc ( payload )
shift -= 1
return HeaderFrame ( channel_id , class_id , weight , size , properties ) |
def randmatrix ( m , n , random_seed = None ) :
"""Creates an m x n matrix of random values drawn using
the Xavier Glorot method .""" | val = np . sqrt ( 6.0 / ( m + n ) )
np . random . seed ( random_seed )
return np . random . uniform ( - val , val , size = ( m , n ) ) |
def cli ( ctx , transcript = { } , suppress_history = False , suppress_events = False , organism = "" , sequence = "" ) :
"""[ UNTESTED ] Add a transcript to a feature
Output :
A standard apollo feature dictionary ( { " features " : [ { . . . } ] } )""" | return ctx . gi . annotations . add_transcript ( transcript = transcript , suppress_history = suppress_history , suppress_events = suppress_events , organism = organism , sequence = sequence ) |
def last_updated ( self ) :
'''Return the last _ updated time of the current request item
: return : A DateTime object
: rettype : datetetime . datetime''' | key = self . get_key_from_request ( )
max_age = self . get_max_age ( )
if max_age == 0 :
return datetime . fromtimestamp ( Storage . start_time )
ttl = self . get_storage ( ) . ttl ( key )
if ttl >= 0 :
return datetime . now ( ) - timedelta ( seconds = ( max_age - ttl ) )
if ttl == - 1 : # Per Redis docs : - 1 is no expiry , - 2 is does not exists .
return datetime . fromtimestamp ( Storage . start_time )
# Should never reach here . It means the storage put failed or the item
# somehow does not exists anymore .
return datetime . now ( ) |
def local_run ( ) :
"""Whether we should hit GCS dev appserver stub .""" | server_software = os . environ . get ( 'SERVER_SOFTWARE' )
if server_software is None :
return True
if 'remote_api' in server_software :
return False
if server_software . startswith ( ( 'Development' , 'testutil' ) ) :
return True
return False |
def team_districts ( self , team ) :
"""Get districts a team has competed in .
: param team : Team to get data on .
: return : List of District objects .""" | return [ District ( raw ) for raw in self . _get ( 'team/%s/districts' % self . team_key ( team ) ) ] |
def get_bel_stmts ( self , filter = False ) :
"""Get relevant statements from the BEL large corpus .
Performs a series of neighborhood queries and then takes the union of
all the statements . Because the query process can take a long time for
large gene lists , the resulting list of statements are cached in a
pickle file with the filename ` < basename > _ bel _ stmts . pkl ` . If the
pickle file is present , it is used by default ; if not present , the
queries are performed and the results are cached .
Parameters
filter : bool
If True , includes only those statements that exclusively mention
genes in : py : attr : ` gene _ list ` . Default is False . Note that the
full ( unfiltered ) set of statements are cached .
Returns
list of : py : class : ` indra . statements . Statement `
List of INDRA statements extracted from the BEL large corpus .""" | if self . basename is not None :
bel_stmt_path = '%s_bel_stmts.pkl' % self . basename
# Check for cached BEL stmt file
if self . basename is not None and os . path . isfile ( bel_stmt_path ) :
logger . info ( "Loading BEL statements from %s" % bel_stmt_path )
with open ( bel_stmt_path , 'rb' ) as f :
bel_statements = pickle . load ( f )
# No cache , so perform the queries
else :
bel_proc = bel . process_pybel_neighborhood ( self . gene_list , network_file = self . bel_corpus )
bel_statements = bel_proc . statements
# Save to pickle file if we ' re caching
if self . basename is not None :
with open ( bel_stmt_path , 'wb' ) as f :
pickle . dump ( bel_statements , f )
# Optionally filter out statements not involving only our gene set
if filter :
if len ( self . gene_list ) > 1 :
bel_statements = ac . filter_gene_list ( bel_statements , self . gene_list , 'all' )
return bel_statements |
def get_service ( self , bundle , reference ) : # type : ( Any , ServiceReference ) - > Any
"""Retrieves the service corresponding to the given reference
: param bundle : The bundle requiring the service
: param reference : A service reference
: return : The requested service
: raise BundleException : The service could not be found""" | with self . __svc_lock :
if reference . is_factory ( ) :
return self . __get_service_from_factory ( bundle , reference )
# Be sure to have the instance
try :
service = self . __svc_registry [ reference ]
# Indicate the dependency
imports = self . __bundle_imports . setdefault ( bundle , { } )
imports . setdefault ( reference , _UsageCounter ( ) ) . inc ( )
reference . used_by ( bundle )
return service
except KeyError : # Not found
raise BundleException ( "Service not found (reference: {0})" . format ( reference ) ) |
def hill_i ( self , x , threshold = 0.1 , power = 2 ) :
"""Inhibiting hill function .
Is equivalent to 1 - hill _ a ( self , x , power , threshold ) .""" | x_pow = np . power ( x , power )
threshold_pow = np . power ( threshold , power )
return threshold_pow / ( x_pow + threshold_pow ) |
def produce_fake_hash ( x ) :
"""Produce random , binary features , totally irrespective of the content of
x , but in the same shape as x .""" | h = np . random . binomial ( 1 , 0.5 , ( x . shape [ 0 ] , 1024 ) )
packed = np . packbits ( h , axis = - 1 ) . view ( np . uint64 )
return zounds . ArrayWithUnits ( packed , [ x . dimensions [ 0 ] , zounds . IdentityDimension ( ) ] ) |
def register ( self , src , trg , trg_mask = None , src_mask = None ) :
"""Implementation of pair - wise registration using thunder - registration
For more information on the model estimation , refer to https : / / github . com / thunder - project / thunder - registration
This function takes two 2D single channel images and estimates a 2D translation that best aligns the pair . The
estimation is done by maximising the correlation of the Fourier transforms of the images . Once , the translation
is estimated , it is applied to the ( multi - channel ) image to warp and , possibly , ot hte ground - truth . Different
interpolations schemes could be more suitable for images and ground - truth values ( or masks ) .
: param src : 2D single channel source moving image
: param trg : 2D single channel target reference image
: param src _ mask : Mask of source image . Not used in this method .
: param trg _ mask : Mask of target image . Not used in this method .
: return : Estimated 2D transformation matrix of shape 2x3""" | # Initialise instance of CrossCorr object
ccreg = registration . CrossCorr ( )
# padding _ value = 0
# Compute translation between pair of images
model = ccreg . fit ( src , reference = trg )
# Get translation as an array
translation = [ - x for x in model . toarray ( ) . tolist ( ) [ 0 ] ]
# Fill in transformation matrix
warp_matrix = np . eye ( 2 , 3 )
warp_matrix [ 0 , 2 ] = translation [ 1 ]
warp_matrix [ 1 , 2 ] = translation [ 0 ]
# Return transformation matrix
return warp_matrix |
def handleImplicitCheck ( self ) :
"""Checkboxes are hidden when inside of a RadioGroup as a selection of
the Radio button is an implicit selection of the Checkbox . As such , we have
to manually " check " any checkbox as needed .""" | for button , widget in zip ( self . radioButtons , self . widgets ) :
if isinstance ( widget , CheckBox ) :
if button . GetValue ( ) : # checked
widget . setValue ( True )
else :
widget . setValue ( False ) |
def get_top_albums ( self , limit = None , cacheable = True ) :
"""Returns a list of the top albums .""" | params = self . _get_params ( )
if limit :
params [ "limit" ] = limit
return self . _get_things ( "getTopAlbums" , "album" , Album , params , cacheable ) |
def loads ( self , msg , encoding = None , raw = False ) :
'''Run the correct loads serialization format
: param encoding : Useful for Python 3 support . If the msgpack data
was encoded using " use _ bin _ type = True " , this will
differentiate between the ' bytes ' type and the
' str ' type by decoding contents with ' str ' type
to what the encoding was set as . Recommended
encoding is ' utf - 8 ' when using Python 3.
If the msgpack data was not encoded using
" use _ bin _ type = True " , it will try to decode
all ' bytes ' and ' str ' data ( the distinction has
been lost in this case ) to what the encoding is
set as . In this case , it will fail if any of
the contents cannot be converted .''' | try :
def ext_type_decoder ( code , data ) :
if code == 78 :
data = salt . utils . stringutils . to_unicode ( data )
return datetime . datetime . strptime ( data , '%Y%m%dT%H:%M:%S.%f' )
return data
gc . disable ( )
# performance optimization for msgpack
if msgpack . version >= ( 0 , 4 , 0 ) : # msgpack only supports ' encoding ' starting in 0.4.0.
# Due to this , if we don ' t need it , don ' t pass it at all so
# that under Python 2 we can still work with older versions
# of msgpack .
try :
ret = salt . utils . msgpack . loads ( msg , use_list = True , ext_hook = ext_type_decoder , encoding = encoding , _msgpack_module = msgpack )
except UnicodeDecodeError : # msg contains binary data
ret = msgpack . loads ( msg , use_list = True , ext_hook = ext_type_decoder )
else :
ret = salt . utils . msgpack . loads ( msg , use_list = True , ext_hook = ext_type_decoder , _msgpack_module = msgpack )
if six . PY3 and encoding is None and not raw :
ret = salt . transport . frame . decode_embedded_strs ( ret )
except Exception as exc :
log . critical ( 'Could not deserialize msgpack message. This often happens ' 'when trying to read a file not in binary mode. ' 'To see message payload, enable debug logging and retry. ' 'Exception: %s' , exc )
log . debug ( 'Msgpack deserialization failure on message: %s' , msg )
gc . collect ( )
raise
finally :
gc . enable ( )
return ret |
def _get_example_values ( self , route : str , annotation : ResourceAnnotation ) -> Dict [ str , Any ] :
"""Gets example values for all properties in the annotation ' s schema .
: param route : The route to get example values for .
: type route : werkzeug . routing . Rule for a flask api .
: param annotation : Schema annotation for the method to be requested .
: type annotation : doctor . resource . ResourceAnnotation
: retruns : A dict containing property names as keys and example values
as values .""" | defined_values = self . defined_example_values . get ( ( annotation . http_method . lower ( ) , str ( route ) ) )
if defined_values and not defined_values [ 'update' ] :
return defined_values [ 'values' ]
# If we defined a req _ obj _ type for the logic , use that type ' s
# example values instead of the annotated parameters .
if annotation . logic . _doctor_req_obj_type :
values = annotation . logic . _doctor_req_obj_type . get_example ( )
else :
values = { k : v . annotation . get_example ( ) for k , v in annotation . annotated_parameters . items ( ) }
if defined_values :
values . update ( defined_values [ 'values' ] )
# If this is a GET route , we need to json dumps any parameters that
# are lists or dicts . Otherwise we ' ll get a 400 error for those params
if annotation . http_method == 'GET' :
for k , v in values . items ( ) :
if isinstance ( v , ( list , dict ) ) :
values [ k ] = json . dumps ( v )
return values |
def check_validation_level ( validation_level ) :
"""Validate the given validation level
: type validation _ level : ` ` int ` `
: param validation _ level : validation level ( see : class : ` hl7apy . consts . VALIDATION _ LEVEL ` )
: raises : : exc : ` hl7apy . exceptions . UnknownValidationLevel ` if the given validation level is unsupported""" | if validation_level not in ( VALIDATION_LEVEL . QUIET , VALIDATION_LEVEL . STRICT , VALIDATION_LEVEL . TOLERANT ) :
raise UnknownValidationLevel |
def result ( self ) -> workflow . IntervalGeneratorType :
"""Generate intervals indicating the valid sentences .""" | config = cast ( SentenceSegementationConfig , self . config )
index = - 1
labels = None
while True : # 1 . Find the start of the sentence .
start = - 1
while True : # Check the ` ` labels ` ` generated from step ( 2 ) .
if labels is None : # https : / / www . python . org / dev / peps / pep - 0479/
try :
index , labels = next ( self . index_labels_generator )
except StopIteration :
return
# Check if we found a valid sentence char .
if labels [ SentenceValidCharacterLabeler ] :
start = index
break
# Trigger next ( . . . ) action .
labels = None
index = - 1
# 2 . Find the ending .
end = - 1
try :
while True :
index , labels = next ( self . index_labels_generator )
# Detected invalid char .
if config . enable_strict_sentence_charset and not labels [ SentenceValidCharacterLabeler ] and not labels [ WhitespaceLabeler ] :
end = index
break
# Detected sentence ending .
if self . _labels_indicate_sentence_ending ( labels ) : # Consume the ending span .
while True :
index , labels = next ( self . index_labels_generator )
is_ending = ( self . _labels_indicate_sentence_ending ( labels ) or ( config . extend_ending_with_delimiters and labels [ DelimitersLabeler ] ) )
if not is_ending :
end = index
break
# yeah we found the ending .
break
except StopIteration :
end = len ( self . input_sequence )
# Trigger next ( . . . ) action .
labels = None
index = - 1
yield start , end |
async def destroy_attachment ( self , a : Attachment ) :
"""destroy a match attachment
| methcoro |
Args :
a : the attachment you want to destroy
Raises :
APIException""" | await self . connection ( 'DELETE' , 'tournaments/{}/matches/{}/attachments/{}' . format ( self . _tournament_id , self . _id , a . _id ) )
if a in self . attachments :
self . attachments . remove ( a ) |
def Overlay_highlightNode ( self , highlightConfig , ** kwargs ) :
"""Function path : Overlay . highlightNode
Domain : Overlay
Method name : highlightNode
Parameters :
Required arguments :
' highlightConfig ' ( type : HighlightConfig ) - > A descriptor for the highlight appearance .
Optional arguments :
' nodeId ' ( type : DOM . NodeId ) - > Identifier of the node to highlight .
' backendNodeId ' ( type : DOM . BackendNodeId ) - > Identifier of the backend node to highlight .
' objectId ' ( type : Runtime . RemoteObjectId ) - > JavaScript object id of the node to be highlighted .
No return value .
Description : Highlights DOM node with given id or with the given JavaScript object wrapper . Either nodeId or objectId must be specified .""" | expected = [ 'nodeId' , 'backendNodeId' , 'objectId' ]
passed_keys = list ( kwargs . keys ( ) )
assert all ( [ ( key in expected ) for key in passed_keys ] ) , "Allowed kwargs are ['nodeId', 'backendNodeId', 'objectId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self . synchronous_command ( 'Overlay.highlightNode' , highlightConfig = highlightConfig , ** kwargs )
return subdom_funcs |
def attach_run_command ( cmd ) :
"""Run a command when attaching
Please do not call directly , this will execvp the command .
This is to be used in conjunction with the attach method
of a container .""" | if isinstance ( cmd , tuple ) :
return _lxc . attach_run_command ( cmd )
elif isinstance ( cmd , list ) :
return _lxc . attach_run_command ( ( cmd [ 0 ] , cmd ) )
else :
return _lxc . attach_run_command ( ( cmd , [ cmd ] ) ) |
def _xml_tag_filter ( s : str , strip_namespaces : bool ) -> str :
"""Returns tag name and optionally strips namespaces .
: param el : Element
: param strip _ namespaces : Strip namespace prefix
: return : str""" | if strip_namespaces :
ns_end = s . find ( '}' )
if ns_end != - 1 :
s = s [ ns_end + 1 : ]
else :
ns_end = s . find ( ':' )
if ns_end != - 1 :
s = s [ ns_end + 1 : ]
return s |
def ensure_newline ( self , n ) :
"""Make sure there are ' n ' line breaks at the end .""" | assert n >= 0
text = self . _output . getvalue ( ) . rstrip ( '\n' )
if not text :
return
self . _output = StringIO ( )
self . _output . write ( text )
self . _output . write ( '\n' * n )
text = self . _output . getvalue ( )
assert text [ - n - 1 ] != '\n'
assert text [ - n : ] == '\n' * n |
def fd_to_td ( htilde , delta_t = None , left_window = None , right_window = None , left_beta = 8 , right_beta = 8 ) :
"""Converts a FD waveform to TD .
A window can optionally be applied using ` ` fd _ taper ` ` to the left or right
side of the waveform before being converted to the time domain .
Parameters
htilde : FrequencySeries
The waveform to convert .
delta _ t : float , optional
Make the returned time series have the given ` ` delta _ t ` ` .
left _ window : tuple of float , optional
A tuple giving the start and end frequency of the FD taper to apply
on the left side . If None , no taper will be applied on the left .
right _ window : tuple of float , optional
A tuple giving the start and end frequency of the FD taper to apply
on the right side . If None , no taper will be applied on the right .
left _ beta : int , optional
The beta parameter to use for the left taper . See ` ` fd _ taper ` ` for
details . Default is 8.
right _ beta : int , optional
The beta parameter to use for the right taper . Default is 8.
Returns
TimeSeries
The time - series representation of ` ` htilde ` ` .""" | if left_window is not None :
start , end = left_window
htilde = fd_taper ( htilde , start , end , side = 'left' , beta = left_beta )
if right_window is not None :
start , end = right_window
htilde = fd_taper ( htilde , start , end , side = 'right' , beta = right_beta )
return htilde . to_timeseries ( delta_t = delta_t ) |
def fasta_stats ( self ) :
"""Parse the lengths of all contigs for each sample , as well as the total GC %""" | for sample in self . metadata : # Initialise variables to store appropriate values parsed from contig records
contig_lengths = list ( )
fasta_sequence = str ( )
for contig , record in sample [ self . analysistype ] . record_dict . items ( ) : # Append the length of the contig to the list
contig_lengths . append ( len ( record . seq ) )
# Add the contig sequence to the string
fasta_sequence += record . seq
# Set the reverse sorted ( e . g . largest to smallest ) list of contig sizes as the value
sample [ self . analysistype ] . contig_lengths = sorted ( contig_lengths , reverse = True )
try : # Calculate the GC % of the total genome sequence using GC - format to have two decimal places
sample [ self . analysistype ] . gc = float ( '{:0.2f}' . format ( GC ( fasta_sequence ) ) )
except TypeError :
sample [ self . analysistype ] . gc = 'NA' |
def _set_error_handler_callbacks ( self , app ) :
"""Sets the error handler callbacks used by this extension""" | @ app . errorhandler ( NoAuthorizationError )
def handle_auth_error ( e ) :
return self . _unauthorized_callback ( str ( e ) )
@ app . errorhandler ( CSRFError )
def handle_csrf_error ( e ) :
return self . _unauthorized_callback ( str ( e ) )
@ app . errorhandler ( ExpiredSignatureError )
def handle_expired_error ( e ) :
try :
token = ctx_stack . top . expired_jwt
return self . _expired_token_callback ( token )
except TypeError :
msg = ( "jwt.expired_token_loader callback now takes the expired token " "as an additional paramter. Example: expired_callback(token)" )
warn ( msg , DeprecationWarning )
return self . _expired_token_callback ( )
@ app . errorhandler ( InvalidHeaderError )
def handle_invalid_header_error ( e ) :
return self . _invalid_token_callback ( str ( e ) )
@ app . errorhandler ( InvalidTokenError )
def handle_invalid_token_error ( e ) :
return self . _invalid_token_callback ( str ( e ) )
@ app . errorhandler ( JWTDecodeError )
def handle_jwt_decode_error ( e ) :
return self . _invalid_token_callback ( str ( e ) )
@ app . errorhandler ( WrongTokenError )
def handle_wrong_token_error ( e ) :
return self . _invalid_token_callback ( str ( e ) )
@ app . errorhandler ( InvalidAudienceError )
def handle_invalid_audience_error ( e ) :
return self . _invalid_token_callback ( str ( e ) )
@ app . errorhandler ( RevokedTokenError )
def handle_revoked_token_error ( e ) :
return self . _revoked_token_callback ( )
@ app . errorhandler ( FreshTokenRequired )
def handle_fresh_token_required ( e ) :
return self . _needs_fresh_token_callback ( )
@ app . errorhandler ( UserLoadError )
def handler_user_load_error ( e ) : # The identity is already saved before this exception was raised ,
# otherwise a different exception would be raised , which is why we
# can safely call get _ jwt _ identity ( ) here
identity = get_jwt_identity ( )
return self . _user_loader_error_callback ( identity )
@ app . errorhandler ( UserClaimsVerificationError )
def handle_failed_user_claims_verification ( e ) :
return self . _verify_claims_failed_callback ( ) |
def set_defaults ( self , default_config_params ) :
"""Set default values from specified ConfigParams and returns a new ConfigParams object .
: param default _ config _ params : ConfigMap with default parameter values .
: return : a new ConfigParams object .""" | map = StringValueMap . from_maps ( default_config_params , self )
return ConfigParams ( map ) |
def publish ( self , topic , message ) :
"""Publish an MQTT message to a topic .""" | self . connect ( )
log . info ( 'publish {}' . format ( message ) )
self . client . publish ( topic , message ) |
def show ( self , rows : int = 5 , dataframe : pd . DataFrame = None ) -> pd . DataFrame :
"""Display info about the dataframe
: param rows : number of rows to show , defaults to 5
: param rows : int , optional
: param dataframe : a pandas dataframe , defaults to None
: param dataframe : pd . DataFrame , optional
: return : a pandas dataframe
: rtype : pd . DataFrame
: example : ` ` ds . show ( ) ` `""" | try :
if dataframe is not None :
df = dataframe
else :
df = self . df
if df is None :
self . warning ( "Dataframe is empty: nothing to show" )
return
num = len ( df . columns . values )
except Exception as e :
self . err ( e , self . show , "Can not show dataframe" )
return
f = list ( df )
fds = [ ]
for fi in f :
fds . append ( str ( fi ) )
fields = ", " . join ( fds )
num_rows = len ( df . index )
self . info ( "The dataframe has" , colors . bold ( num_rows ) , "rows and" , colors . bold ( num ) , "columns:" )
print ( fields )
return df . head ( rows ) |
def bootstrap ( ) :
'''Patches the ' site ' module such that the bootstrap functions for
registering the post import hook callback functions are called as
the last thing done when initialising the Python interpreter . This
function would normally be called from the special ' . pth ' file .''' | global _patched
if _patched :
return
_patched = True
# We want to do our real work as the very last thing in the ' site '
# module when it is being imported so that the module search path is
# initialised properly . What is the last thing executed depends on
# whether the ' usercustomize ' module support is enabled . Support for
# the ' usercustomize ' module will not be enabled in Python virtual
# enviromments . We therefore wrap the functions for the loading of
# both the ' sitecustomize ' and ' usercustomize ' modules but detect
# when ' usercustomize ' support is disabled and in that case do what
# we need to after the ' sitecustomize ' module is loaded .
# In wrapping these functions though , we can ' t actually use wrapt to
# do so . This is because depending on how wrapt was installed it may
# technically be dependent on ' . pth ' evaluation for Python to know
# where to import it from . The addition of the directory which
# contains wrapt may not yet have been done . We thus use a simple
# function wrapper instead .
site . execsitecustomize = _execsitecustomize_wrapper ( site . execsitecustomize )
site . execusercustomize = _execusercustomize_wrapper ( site . execusercustomize ) |
def get_authorization_session_for_vault ( self , vault_id ) :
"""Gets an ` ` AuthorizationSession ` ` which is responsible for performing authorization checks for the given vault .
arg : vault _ id ( osid . id . Id ) : the ` ` Id ` ` of the vault
return : ( osid . authorization . AuthorizationSession ) - ` ` an
_ authorization _ session ` `
raise : NotFound - ` ` vault _ id ` `
raise : NullArgument - ` ` vault _ id ` ` is ` ` null ` `
raise : OperationFailed - ` ` unable to complete request ` `
raise : Unimplemented - ` ` supports _ authorization ( ) ` ` or
` ` supports _ visible _ federation ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ authorization ( ) ` ` and
` ` supports _ visible _ federation ( ) ` ` are ` ` true ` ` . *""" | if not self . supports_authorization ( ) :
raise errors . Unimplemented ( )
# Also include check to see if the catalog Id is found otherwise raise errors . NotFound
# pylint : disable = no - member
return sessions . AuthorizationSession ( vault_id , runtime = self . _runtime ) |
def gevent_start ( self ) :
"""Helper method to start the node for gevent - based applications .""" | import gevent
import gevent . select
self . _poller_greenlet = gevent . spawn ( self . poll )
self . _select = gevent . select . select
self . heartbeat ( )
self . update ( ) |
def Run ( self , unused_arg ) :
"""This kills us with no cleanups .""" | logging . debug ( "Disabling service" )
win32serviceutil . ChangeServiceConfig ( None , config . CONFIG [ "Nanny.service_name" ] , startType = win32service . SERVICE_DISABLED )
svc_config = QueryService ( config . CONFIG [ "Nanny.service_name" ] )
if svc_config [ 1 ] == win32service . SERVICE_DISABLED :
logging . info ( "Disabled service successfully" )
self . SendReply ( rdf_protodict . DataBlob ( string = "Service disabled." ) )
else :
self . SendReply ( rdf_protodict . DataBlob ( string = "Service failed to disable." ) ) |
def add_where_when ( voevent , coords , obs_time , observatory_location , allow_tz_naive_datetime = False ) :
"""Add details of an observation to the WhereWhen section .
We
Args :
voevent ( : class : ` Voevent ` ) : Root node of a VOEvent etree .
coords ( : class : ` . Position2D ` ) : Sky co - ordinates of event .
obs _ time ( datetime . datetime ) : Nominal DateTime of the observation . Must
either be timezone - aware , or should be carefully verified as
representing UTC and then set parameter
` ` allow _ tz _ naive _ datetime = True ` ` .
observatory _ location ( str ) : Telescope locale , e . g . ' La Palma ' .
May be a generic location as listed under
: class : ` voeventparse . definitions . observatory _ location ` .
allow _ tz _ naive _ datetime ( bool ) : ( Default False ) . Accept timezone - naive
datetime - timestamps . See comments for ` ` obs _ time ` ` .""" | # . . todo : : Implement TimeError using datetime . timedelta
if obs_time . tzinfo is not None :
utc_naive_obs_time = obs_time . astimezone ( pytz . utc ) . replace ( tzinfo = None )
elif not allow_tz_naive_datetime :
raise ValueError ( "Datetime passed without tzinfo, cannot be sure if it is really a " "UTC timestamp. Please verify function call and either add tzinfo " "or pass parameter 'allow_tz_naive_obstime=True', as appropriate" , )
else :
utc_naive_obs_time = obs_time
obs_data = etree . SubElement ( voevent . WhereWhen , 'ObsDataLocation' )
etree . SubElement ( obs_data , 'ObservatoryLocation' , id = observatory_location )
ol = etree . SubElement ( obs_data , 'ObservationLocation' )
etree . SubElement ( ol , 'AstroCoordSystem' , id = coords . system )
ac = etree . SubElement ( ol , 'AstroCoords' , coord_system_id = coords . system )
time = etree . SubElement ( ac , 'Time' , unit = 's' )
instant = etree . SubElement ( time , 'TimeInstant' )
instant . ISOTime = utc_naive_obs_time . isoformat ( )
# iso _ time = etree . SubElement ( instant , ' ISOTime ' ) = obs _ time . isoformat ( )
pos2d = etree . SubElement ( ac , 'Position2D' , unit = coords . units )
pos2d . Name1 = 'RA'
pos2d . Name2 = 'Dec'
pos2d_val = etree . SubElement ( pos2d , 'Value2' )
pos2d_val . C1 = coords . ra
pos2d_val . C2 = coords . dec
pos2d . Error2Radius = coords . err |
def _normalize_abmn ( abmn ) :
"""return a normalized version of abmn""" | abmn_2d = np . atleast_2d ( abmn )
abmn_normalized = np . hstack ( ( np . sort ( abmn_2d [ : , 0 : 2 ] , axis = 1 ) , np . sort ( abmn_2d [ : , 2 : 4 ] , axis = 1 ) , ) )
return abmn_normalized |
def apply ( query , replacements = None , vars = None , allow_io = False , libs = ( "stdcore" , "stdmath" ) ) :
"""Run ' query ' on ' vars ' and return the result ( s ) .
Arguments :
query : A query object or string with the query .
replacements : Built - time parameters to the query , either as dict or
as an array ( for positional interpolation ) .
vars : The variables to be supplied to the query solver .
allow _ io : ( Default : False ) Include ' stdio ' and allow IO functions .
libs : Iterable of library modules to include , given as strings .
Default : ( ' stdcore ' , ' stdmath ' )
For full list of bundled libraries , see efilter . stdlib .
Note : ' stdcore ' must always be included .
WARNING : Including ' stdio ' must be done in conjunction with
' allow _ io ' . This is to make enabling IO explicit . ' allow _ io '
implies that ' stdio ' should be included and so adding it to
libs is actually not required .
Notes on IO : If allow _ io is set to True then ' stdio ' will be included and
the EFILTER query will be allowed to read files from disk . Use this with
caution .
If the query returns a lazily - evaluated result that depends on reading
from a file ( for example , filtering a CSV file ) then the file
descriptor will remain open until the returned result is deallocated .
The caller is responsible for releasing the result when it ' s no longer
needed .
Returns :
The result of evaluating the query . The type of the output will depend
on the query , and can be predicted using ' infer ' ( provided reflection
callbacks are implemented ) . In the common case of a SELECT query the
return value will be an iterable of filtered data ( actually an object
implementing IRepeated , as well as _ _ iter _ _ . )
A word on cardinality of the return value :
Types in EFILTER always refer to a scalar . If apply returns more than
one value , the type returned by ' infer ' will refer to the type of
the value inside the returned container .
If you ' re unsure whether your query returns one or more values ( rows ) ,
use the ' getvalues ' function .
Raises :
efilter . errors . EfilterError if there are issues with the query .
Examples :
apply ( " 5 + 5 " ) # - > 10
apply ( " SELECT * FROM people WHERE age > 10 " ,
vars = { " people " : ( { " age " : 10 , " name " : " Bob " } ,
{ " age " : 20 , " name " : " Alice " } ,
{ " age " : 30 , " name " : " Eve " } ) )
# This will replace the question mark ( ? ) with the string " Bob " in a
# safe manner , preventing SQL injection .
apply ( " SELECT * FROM people WHERE name = ? " , replacements = [ " Bob " ] , . . . )""" | if vars is None :
vars = { }
if allow_io :
libs = list ( libs )
libs . append ( "stdio" )
query = q . Query ( query , params = replacements )
stdcore_included = False
for lib in libs :
if lib == "stdcore" :
stdcore_included = True
# ' solve ' always includes this automatically - we don ' t have a say
# in the matter .
continue
if lib == "stdio" and not allow_io :
raise ValueError ( "Attempting to include 'stdio' but IO not " "enabled. Pass allow_io=True." )
module = std_core . LibraryModule . ALL_MODULES . get ( lib )
if not lib :
raise ValueError ( "There is no standard library module %r." % lib )
vars = scope . ScopeStack ( module , vars )
if not stdcore_included :
raise ValueError ( "EFILTER cannot work without standard lib 'stdcore'." )
results = solve . solve ( query , vars ) . value
return results |
def hold_sync ( self ) :
"""Hold syncing any state until the outermost context manager exits""" | if self . _holding_sync is True :
yield
else :
try :
self . _holding_sync = True
yield
finally :
self . _holding_sync = False
self . send_state ( self . _states_to_send )
self . _states_to_send . clear ( ) |
def _initialize ( self , * args , ** kwargs ) :
"""Initiaize the mapping matcher with constructor arguments .""" | self . items = None
self . keys = None
self . values = None
if args :
if len ( args ) != 2 :
raise TypeError ( "expected exactly two positional arguments, " "got %s" % len ( args ) )
if kwargs :
raise TypeError ( "expected positional or keyword arguments, not both" )
# got positional arguments only
self . keys , self . values = map ( self . _validate_argument , args )
elif kwargs :
has_kv = 'keys' in kwargs and 'values' in kwargs
has_of = 'of' in kwargs
if not ( has_kv or has_of ) :
raise TypeError ( "expected keys/values or items matchers, " "but got: %s" % list ( kwargs . keys ( ) ) )
if has_kv and has_of :
raise TypeError ( "expected keys & values, or items matchers, not both" )
if has_kv : # got keys = and values = matchers
self . keys = self . _validate_argument ( kwargs [ 'keys' ] )
self . values = self . _validate_argument ( kwargs [ 'values' ] )
else : # got of = matcher , which can be a tuple of matchers ,
# or a single matcher for dictionary items
of = kwargs [ 'of' ]
if isinstance ( of , tuple ) :
try : # got of = as tuple of matchers
self . keys , self . values = map ( self . _validate_argument , of )
except ValueError :
raise TypeError ( "of= tuple has to be a pair of matchers/types" % ( self . __class__ . __name__ , ) )
else : # got of = as a single matcher
self . items = self . _validate_argument ( of ) |
def time_since ( self , mtype ) :
'''return the time since the last message of type mtype was received''' | if not mtype in self . messages :
return time . time ( ) - self . start_time
return time . time ( ) - self . messages [ mtype ] . _timestamp |
def dequeue ( self ) -> Tuple [ int , TItem ] :
"""Removes and returns an item from the priority queue .
Returns :
A tuple whose first element is the priority of the dequeued item
and whose second element is the dequeued item .
Raises :
ValueError :
The queue is empty .""" | if self . _len == 0 :
raise ValueError ( 'BucketPriorityQueue is empty.' )
# Drop empty buckets at the front of the queue .
while self . _buckets and not self . _buckets [ 0 ] :
self . _buckets . pop ( 0 )
self . _offset += 1
# Pull item out of the front bucket .
item = self . _buckets [ 0 ] . pop ( 0 )
priority = self . _offset
self . _len -= 1
if self . _drop_set is not None :
self . _drop_set . remove ( ( priority , item ) )
# Note : do not eagerly clear out empty buckets after pulling the item !
# Doing so increases the worst case complexity of " monotonic " use from
# O ( N + P ) to O ( N * P ) .
return priority , item |
def file ( input_file , light = False ) :
"""Import colorscheme from json file .""" | util . create_dir ( os . path . join ( CONF_DIR , "colorschemes/light/" ) )
util . create_dir ( os . path . join ( CONF_DIR , "colorschemes/dark/" ) )
theme_name = "." . join ( ( input_file , "json" ) )
bri = "light" if light else "dark"
user_theme_file = os . path . join ( CONF_DIR , "colorschemes" , bri , theme_name )
theme_file = os . path . join ( MODULE_DIR , "colorschemes" , bri , theme_name )
# Find the theme file .
if input_file in ( "random" , "random_dark" ) :
theme_file = get_random_theme ( )
elif input_file == "random_light" :
theme_file = get_random_theme ( light )
elif os . path . isfile ( user_theme_file ) :
theme_file = user_theme_file
elif os . path . isfile ( input_file ) :
theme_file = input_file
# Parse the theme file .
if os . path . isfile ( theme_file ) :
logging . info ( "Set theme to \033[1;37m%s\033[0m." , os . path . basename ( theme_file ) )
return parse ( theme_file )
logging . error ( "No %s colorscheme file found." , bri )
logging . error ( "Try adding '-l' to set light themes." )
logging . error ( "Try removing '-l' to set dark themes." )
sys . exit ( 1 ) |
def parseDoc ( self , doc_str , format = "xml" ) :
"""Parse a OAI - ORE Resource Maps document .
See Also : ` ` rdflib . ConjunctiveGraph . parse ` ` for documentation on arguments .""" | self . parse ( data = doc_str , format = format )
self . _ore_initialized = True
return self |
def _get_log_file ( self , handler ) :
'''Generate log file path for a given handler
Args :
handler :
The handler configuration dictionary for which a log file
path should be generated .''' | if 'file_name_pattern' not in handler :
filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap'
else :
filename = handler [ 'file_name_pattern' ]
log_file = handler [ 'log_dir' ]
if 'path' in handler :
log_file = os . path . join ( log_file , handler [ 'path' ] , filename )
else :
log_file = os . path . join ( log_file , filename )
log_file = time . strftime ( log_file , time . gmtime ( ) )
log_file = log_file . format ( ** handler )
return log_file |
def requestAttribute ( self , attrib : Attribute , sender ) :
"""Used to get a raw attribute from Sovrin
: param attrib : attribute to add
: return : number of pending txns""" | self . _attributes [ attrib . key ( ) ] = attrib
req = attrib . getRequest ( sender )
if req :
return self . prepReq ( req , key = attrib . key ( ) ) |
def matrix_to_points ( matrix , pitch , origin ) :
"""Convert an ( n , m , p ) matrix into a set of points for each voxel center .
Parameters
matrix : ( n , m , p ) bool , voxel matrix
pitch : float , what pitch was the voxel matrix computed with
origin : ( 3 , ) float , what is the origin of the voxel matrix
Returns
points : ( q , 3 ) list of points""" | indices = np . column_stack ( np . nonzero ( matrix ) )
points = indices_to_points ( indices = indices , pitch = pitch , origin = origin )
return points |
def predict_and_complete ( self , i , to_scan , columns , transitives ) :
"""The core Earley Predictor and Completer .
At each stage of the input , we handling any completed items ( things
that matched on the last cycle ) and use those to predict what should
come next in the input stream . The completions and any predicted
non - terminals are recursively processed until we reach a set of ,
which can be added to the scan list for the next scanner cycle .""" | # Held Completions ( H in E . Scotts paper ) .
node_cache = { }
held_completions = { }
column = columns [ i ]
# R ( items ) = Ei ( column . items )
items = deque ( column )
while items :
item = items . pop ( )
# remove an element , A say , from R
# # # The Earley completer
if item . is_complete : # # # ( item . s = = string )
if item . node is None :
label = ( item . s , item . start , i )
item . node = node_cache [ label ] if label in node_cache else node_cache . setdefault ( label , SymbolNode ( * label ) )
item . node . add_family ( item . s , item . rule , item . start , None , None )
# create _ leo _ transitives ( item . rule . origin , item . start )
# # # R Joop Leo right recursion Completer
if item . rule . origin in transitives [ item . start ] :
transitive = transitives [ item . start ] [ item . s ]
if transitive . previous in transitives [ transitive . column ] :
root_transitive = transitives [ transitive . column ] [ transitive . previous ]
else :
root_transitive = transitive
new_item = Item ( transitive . rule , transitive . ptr , transitive . start )
label = ( root_transitive . s , root_transitive . start , i )
new_item . node = node_cache [ label ] if label in node_cache else node_cache . setdefault ( label , SymbolNode ( * label ) )
new_item . node . add_path ( root_transitive , item . node )
if new_item . expect in self . TERMINALS : # Add ( B : : aC . B , h , y ) to Q
to_scan . add ( new_item )
elif new_item not in column : # Add ( B : : aC . B , h , y ) to Ei and R
column . add ( new_item )
items . append ( new_item )
# # # R Regular Earley completer
else : # Empty has 0 length . If we complete an empty symbol in a particular
# parse step , we need to be able to use that same empty symbol to complete
# any predictions that result , that themselves require empty . Avoids
# infinite recursion on empty symbols .
# held _ completions is ' H ' in E . Scott ' s paper .
is_empty_item = item . start == i
if is_empty_item :
held_completions [ item . rule . origin ] = item . node
originators = [ originator for originator in columns [ item . start ] if originator . expect is not None and originator . expect == item . s ]
for originator in originators :
new_item = originator . advance ( )
label = ( new_item . s , originator . start , i )
new_item . node = node_cache [ label ] if label in node_cache else node_cache . setdefault ( label , SymbolNode ( * label ) )
new_item . node . add_family ( new_item . s , new_item . rule , i , originator . node , item . node )
if new_item . expect in self . TERMINALS : # Add ( B : : aC . B , h , y ) to Q
to_scan . add ( new_item )
elif new_item not in column : # Add ( B : : aC . B , h , y ) to Ei and R
column . add ( new_item )
items . append ( new_item )
# # # The Earley predictor
elif item . expect in self . NON_TERMINALS : # # # ( item . s = = lr0)
new_items = [ ]
for rule in self . predictions [ item . expect ] :
new_item = Item ( rule , 0 , i )
new_items . append ( new_item )
# Process any held completions ( H ) .
if item . expect in held_completions :
new_item = item . advance ( )
label = ( new_item . s , item . start , i )
new_item . node = node_cache [ label ] if label in node_cache else node_cache . setdefault ( label , SymbolNode ( * label ) )
new_item . node . add_family ( new_item . s , new_item . rule , new_item . start , item . node , held_completions [ item . expect ] )
new_items . append ( new_item )
for new_item in new_items :
if new_item . expect in self . TERMINALS :
to_scan . add ( new_item )
elif new_item not in column :
column . add ( new_item )
items . append ( new_item ) |
def sets ( self , values ) :
"""Set list of sets .""" | # if cache server is configured , save sets list
if self . cache :
self . cache . set ( self . app . config [ 'OAISERVER_CACHE_KEY' ] , values ) |
def setData ( self , data , setName = None ) :
"""Assign the data in the dataframe to the AMPL entities with the names
corresponding to the column names .
Args :
data : The dataframe containing the data to be assigned .
setName : The name of the set to which the indices values of the
DataFrame are to be assigned .
Raises :
AMPLException : if the data assignment procedure was not successful .""" | if not isinstance ( data , DataFrame ) :
if pd is not None and isinstance ( data , pd . DataFrame ) :
data = DataFrame . fromPandas ( data )
if setName is None :
lock_and_call ( lambda : self . _impl . setData ( data . _impl ) , self . _lock )
else :
lock_and_call ( lambda : self . _impl . setData ( data . _impl , setName ) , self . _lock ) |
def load ( prefix , epoch , ctx = None , ** kwargs ) :
"""Load model checkpoint from file .
Parameters
prefix : str
Prefix of model name .
epoch : int
epoch number of model we would like to load .
ctx : Context or list of Context , optional
The device context of training and prediction .
kwargs : dict
Other parameters for model , including ` num _ epoch ` , optimizer and ` numpy _ batch _ size ` .
Returns
model : FeedForward
The loaded model that can be used for prediction .
Notes
- ` ` prefix - symbol . json ` ` will be saved for symbol .
- ` ` prefix - epoch . params ` ` will be saved for parameters .""" | symbol , arg_params , aux_params = load_checkpoint ( prefix , epoch )
return FeedForward ( symbol , ctx = ctx , arg_params = arg_params , aux_params = aux_params , begin_epoch = epoch , ** kwargs ) |
def erase_code_breakpoint ( self , dwProcessId , address ) :
"""Erases the code breakpoint at the given address .
@ see :
L { define _ code _ breakpoint } ,
L { has _ code _ breakpoint } ,
L { get _ code _ breakpoint } ,
L { enable _ code _ breakpoint } ,
L { enable _ one _ shot _ code _ breakpoint } ,
L { disable _ code _ breakpoint }
@ type dwProcessId : int
@ param dwProcessId : Process global ID .
@ type address : int
@ param address : Memory address of breakpoint .""" | bp = self . get_code_breakpoint ( dwProcessId , address )
if not bp . is_disabled ( ) :
self . disable_code_breakpoint ( dwProcessId , address )
del self . __codeBP [ ( dwProcessId , address ) ] |
def crop ( self , min , max ) :
"""Crop a region by removing coordinates outside bounds .
Follows normal slice indexing conventions .
Parameters
min : tuple
Minimum or starting bounds for each axis .
max : tuple
Maximum or ending bounds for each axis .""" | new = [ c for c in self . coordinates if all ( c >= min ) and all ( c < max ) ]
return one ( new ) |
def store ( self , result , filename , pretty = True ) :
"""Write a result to the given file .
Parameters
result : memote . MemoteResult
The dictionary structure of results .
filename : str or pathlib . Path
Store results directly to the given filename .
pretty : bool , optional
Whether ( default ) or not to write JSON in a more legible format .""" | LOGGER . info ( "Storing result in '%s'." , filename )
if filename . endswith ( ".gz" ) :
with gzip . open ( filename , "wb" ) as file_handle :
file_handle . write ( jsonify ( result , pretty = pretty ) . encode ( "utf-8" ) )
else :
with open ( filename , "w" , encoding = "utf-8" ) as file_handle :
file_handle . write ( jsonify ( result , pretty = pretty ) ) |
def is_undirected ( matrix ) :
"""Determine if the matrix reprensents a directed graph
: param matrix : The matrix to tested
: returns : boolean""" | if isspmatrix ( matrix ) :
return sparse_allclose ( matrix , matrix . transpose ( ) )
return np . allclose ( matrix , matrix . T ) |
def config_dict_to_string ( dictionary ) :
"""Convert a given config dictionary : :
dictionary [ key _ 1 ] = value _ 1
dictionary [ key _ 2 ] = value _ 2
dictionary [ key _ n ] = value _ n
into the corresponding string : :
key _ 1 = value _ 1 | key _ 2 = value _ 2 | . . . | key _ n = value _ n
: param dict dictionary : the config dictionary
: rtype : string""" | parameters = [ ]
for key in dictionary :
parameters . append ( u"%s%s%s" % ( key , gc . CONFIG_STRING_ASSIGNMENT_SYMBOL , dictionary [ key ] ) )
return gc . CONFIG_STRING_SEPARATOR_SYMBOL . join ( parameters ) |
def check_info_validity ( self ) :
"""ValueError if cache differs at all from source data layer with
an excepton for volume _ size which prints a warning .""" | cache_info = self . get_json ( 'info' )
if not cache_info :
return
fresh_info = self . vol . _fetch_info ( )
mismatch_error = ValueError ( """
Data layer info file differs from cache. Please check whether this
change invalidates your cache.
If VALID do one of:
1) Manually delete the cache (see location below)
2) Refresh your on-disk cache as follows:
vol = CloudVolume(..., cache=False) # refreshes from source
vol.cache = True
vol.commit_info() # writes to disk
If INVALID do one of:
1) Delete the cache manually (see cache location below)
2) Instantiate as follows:
vol = CloudVolume(..., cache=False) # refreshes info from source
vol.flush_cache() # deletes cache
vol.cache = True
vol.commit_info() # writes info to disk
CACHED: {cache}
SOURCE: {source}
CACHE LOCATION: {path}
""" . format ( cache = cache_info , source = fresh_info , path = self . path ) )
try :
fresh_sizes = [ scale [ 'size' ] for scale in fresh_info [ 'scales' ] ]
cache_sizes = [ scale [ 'size' ] for scale in cache_info [ 'scales' ] ]
except KeyError :
raise mismatch_error
for scale in fresh_info [ 'scales' ] :
del scale [ 'size' ]
for scale in cache_info [ 'scales' ] :
del scale [ 'size' ]
if fresh_info != cache_info :
raise mismatch_error
if fresh_sizes != cache_sizes :
warn ( "WARNING: Data layer bounding box differs in cache.\nCACHED: {}\nSOURCE: {}\nCACHE LOCATION:{}" . format ( cache_sizes , fresh_sizes , self . path ) ) |
def tptnfpfn_chi ( * args , ** kwargs ) :
"""Calculate Chi from True Positive ( tp ) , True Negative ( tn ) , False Positive / Negative counts .
Assumes that the random variable being measured is continuous rather than discrete .
Reference :
https : / / en . wikipedia . org / wiki / Matthews _ correlation _ coefficient
> > > round ( tptnfpfn _ chi ( 1000 , 2000 , 30 , 40 ) )
2765.0""" | tp , tn , fp , fn = args_tptnfpfn ( * args , ** kwargs )
return tptnfpfn_mcc ( tp = tp , tn = tn , fp = fp , fn = fn ) ** 2. * ( tp + tn + fp + fn ) |
def set_insn ( self , insn ) :
"""Set a new raw buffer to disassemble
: param insn : the buffer
: type insn : string""" | self . insn = insn
self . size = len ( self . insn ) |
def check_new_version_available ( this_version ) :
"""Checks if a newer version of Zappa is available .
Returns True is updateable , else False .""" | import requests
pypi_url = 'https://pypi.python.org/pypi/Zappa/json'
resp = requests . get ( pypi_url , timeout = 1.5 )
top_version = resp . json ( ) [ 'info' ] [ 'version' ]
return this_version != top_version |
def users_forgot_password ( self , email , ** kwargs ) :
"""Send email to reset your password .""" | return self . __call_api_post ( 'users.forgotPassword' , email = email , data = kwargs ) |
def reply ( self , reply_comment ) :
"""Reply to the Message .
Notes :
HTML can be inserted in the string and will be interpreted properly by Outlook .
Args :
reply _ comment : String message to send with email .""" | payload = '{ "Comment": "' + reply_comment + '"}'
endpoint = 'https://outlook.office.com/api/v2.0/me/messages/' + self . message_id + '/reply'
self . _make_api_call ( 'post' , endpoint , data = payload ) |
def to_int ( self , number , default = 0 ) :
"""Returns an integer""" | try :
return int ( number )
except ( KeyError , ValueError ) :
return self . to_int ( default , 0 ) |
def hv_mv_station_load ( network ) :
"""Checks for over - loading of HV / MV station .
Parameters
network : : class : ` ~ . grid . network . Network `
Returns
: pandas : ` pandas . DataFrame < dataframe > `
Dataframe containing over - loaded HV / MV stations , their apparent power
at maximal over - loading and the corresponding time step .
Index of the dataframe are the over - loaded stations of type
: class : ` ~ . grid . components . MVStation ` . Columns are ' s _ pfa '
containing the apparent power at maximal over - loading as float and
' time _ index ' containing the corresponding time step the over - loading
occured in as : pandas : ` pandas . Timestamp < timestamp > ` .
Notes
Over - load is determined based on allowed load factors for feed - in and
load cases that are defined in the config file ' config _ grid _ expansion ' in
section ' grid _ expansion _ load _ factors ' .""" | crit_stations = pd . DataFrame ( )
crit_stations = _station_load ( network , network . mv_grid . station , crit_stations )
if not crit_stations . empty :
logger . debug ( '==> HV/MV station has load issues.' )
else :
logger . debug ( '==> No HV/MV station load issues.' )
return crit_stations |
def send_config_set ( self , config_commands = None , exit_config_mode = True , delay_factor = 1 , max_loops = 150 , strip_prompt = False , strip_command = False , config_mode_command = None , ) :
"""Send configuration commands down the SSH channel .
config _ commands is an iterable containing all of the configuration commands .
The commands will be executed one after the other .
Automatically exits / enters configuration mode .
: param config _ commands : Multiple configuration commands to be sent to the device
: type config _ commands : list or string
: param exit _ config _ mode : Determines whether or not to exit config mode after complete
: type exit _ config _ mode : bool
: param delay _ factor : Factor to adjust delays
: type delay _ factor : int
: param max _ loops : Controls wait time in conjunction with delay _ factor ( default : 150)
: type max _ loops : int
: param strip _ prompt : Determines whether or not to strip the prompt
: type strip _ prompt : bool
: param strip _ command : Determines whether or not to strip the command
: type strip _ command : bool
: param config _ mode _ command : The command to enter into config mode
: type config _ mode _ command : str""" | delay_factor = self . select_delay_factor ( delay_factor )
if config_commands is None :
return ""
elif isinstance ( config_commands , string_types ) :
config_commands = ( config_commands , )
if not hasattr ( config_commands , "__iter__" ) :
raise ValueError ( "Invalid argument passed into send_config_set" )
# Send config commands
cfg_mode_args = ( config_mode_command , ) if config_mode_command else tuple ( )
output = self . config_mode ( * cfg_mode_args )
for cmd in config_commands :
self . write_channel ( self . normalize_cmd ( cmd ) )
if self . fast_cli :
pass
else :
time . sleep ( delay_factor * 0.05 )
# Gather output
output += self . _read_channel_timing ( delay_factor = delay_factor , max_loops = max_loops )
if exit_config_mode :
output += self . exit_config_mode ( )
output = self . _sanitize_output ( output )
log . debug ( "{}" . format ( output ) )
return output |
def CheckDependencies ( verbose_output = True ) :
"""Checks the availability of the dependencies .
Args :
verbose _ output ( Optional [ bool ] ) : True if output should be verbose .
Returns :
bool : True if the dependencies are available , False otherwise .""" | print ( 'Checking availability and versions of dependencies.' )
check_result = True
for module_name , version_tuple in sorted ( PYTHON_DEPENDENCIES . items ( ) ) :
if not _CheckPythonModule ( module_name , version_tuple [ 0 ] , version_tuple [ 1 ] , is_required = version_tuple [ 3 ] , maximum_version = version_tuple [ 2 ] , verbose_output = verbose_output ) :
check_result = False
if not _CheckSQLite3 ( verbose_output = verbose_output ) :
check_result = False
if check_result and not verbose_output :
print ( '[OK]' )
print ( '' )
return check_result |
def main ( ) :
"""Get arguments and call the execution function""" | # if less than required arguments , use the defaults
if len ( sys . argv ) < 8 :
print ( "Usage: %s server_url username password namespace classname " "max_open, max_pull" % sys . argv [ 0 ] )
server_url = SERVER_URL
username = USERNAME
password = PASSWORD
namespace = TEST_NAMESPACE
classname = TEST_CLASS
max_open = 0
max_pull = 100
else :
server_url = sys . argv [ 1 ]
username = sys . argv [ 2 ]
password = sys . argv [ 3 ]
namespace = sys . argv [ 4 ]
classname = sys . argv [ 5 ]
max_open = sys . argv [ 6 ]
max_pull = sys . argv [ 7 ]
print ( 'Parameters: server_url=%s\n username=%s\n namespace=%s\n' ' classname=%s\n max_open=%s,\n max_pull=%s' % ( server_url , username , namespace , classname , max_open , max_pull ) )
# connect to the server
conn = WBEMConnection ( server_url , ( username , password ) , default_namespace = namespace , no_verification = True )
# Call method to execute the enumeration sequence and return instances
try :
instances = execute_request ( conn , classname , max_open , max_pull )
# print the resulting instances
for instance in instances :
print ( '\npath=%s\n%s' % ( instance . path , instance . tomof ( ) ) )
# handle exceptions
except CIMError as ce :
print ( 'Operation Failed: CIMError: code=%s, Description=%s' % ( ce . status_code_name , ce . status_description ) )
sys . exit ( 1 )
except Error as err :
print ( "Operation failed: %s" % err )
sys . exit ( 1 )
return 0 |
def add_router_to_hosting_device ( self , context , hosting_device_id , router_id ) :
"""Add a ( non - hosted ) router to a hosting device .""" | e_context = context . elevated ( )
r_hd_binding_db = self . _get_router_binding_info ( e_context , router_id )
if r_hd_binding_db . hosting_device_id :
if r_hd_binding_db . hosting_device_id == hosting_device_id :
return
raise routertypeawarescheduler . RouterHostedByHostingDevice ( router_id = router_id , hosting_device_id = hosting_device_id )
rt_info = self . validate_hosting_device_router_combination ( context , r_hd_binding_db , hosting_device_id )
result = self . schedule_router_on_hosting_device ( e_context , r_hd_binding_db , hosting_device_id , rt_info [ 'slot_need' ] )
if result : # refresh so that we get latest contents from DB
e_context . session . expire ( r_hd_binding_db )
router = self . get_router ( e_context , router_id )
self . add_type_and_hosting_device_info ( e_context , router , r_hd_binding_db , schedule = False )
l3_cfg_notifier = self . agent_notifiers . get ( AGENT_TYPE_L3_CFG )
if l3_cfg_notifier :
l3_cfg_notifier . router_added_to_hosting_device ( context , router )
else :
raise routertypeawarescheduler . RouterSchedulingFailed ( router_id = router_id , hosting_device_id = hosting_device_id ) |
def _merge_fields ( a , b ) :
"""Merge two lists of fields .
Fields in ` b ` override fields in ` a ` . Fields in ` a ` are output first .""" | a_names = set ( x [ 0 ] for x in a )
b_names = set ( x [ 0 ] for x in b )
a_keep = a_names - b_names
fields = [ ]
for name , field in a :
if name in a_keep :
fields . append ( ( name , field ) )
fields . extend ( b )
return fields |
def main ( ) :
'''Main entry point for the mongo _ backups CLI .''' | args = docopt ( __doc__ , version = __version__ )
if args . get ( 'backup' ) :
backup_database ( args )
if args . get ( 'backup_all' ) :
backup_all ( args )
if args . get ( 'decrypt' ) :
decrypt_file ( args . get ( '<path>' ) )
if args . get ( 'configure' ) :
configure ( service = 'all' )
if args . get ( 'configure-aws' ) :
configure ( service = 'aws' )
if args . get ( 'configure-dropbox' ) :
configure ( service = 'dropbox' )
if args . get ( 'configure-swift' ) :
configure ( service = 'swift' )
if args . get ( 'download_all' ) :
download_all ( ) |
def __send_command ( self , command , command_string = b'' , response_size = 8 ) :
"""send command to the terminal""" | if command not in [ const . CMD_CONNECT , const . CMD_AUTH ] and not self . is_connect :
raise ZKErrorConnection ( "instance are not connected." )
buf = self . __create_header ( command , command_string , self . __session_id , self . __reply_id )
try :
if self . tcp :
top = self . __create_tcp_top ( buf )
self . __sock . send ( top )
self . __tcp_data_recv = self . __sock . recv ( response_size + 8 )
self . __tcp_length = self . __test_tcp_top ( self . __tcp_data_recv )
if self . __tcp_length == 0 :
raise ZKNetworkError ( "TCP packet invalid" )
self . __header = unpack ( '<4H' , self . __tcp_data_recv [ 8 : 16 ] )
self . __data_recv = self . __tcp_data_recv [ 8 : ]
else :
self . __sock . sendto ( buf , self . __address )
self . __data_recv = self . __sock . recv ( response_size )
self . __header = unpack ( '<4H' , self . __data_recv [ : 8 ] )
except Exception as e :
raise ZKNetworkError ( str ( e ) )
self . __response = self . __header [ 0 ]
self . __reply_id = self . __header [ 3 ]
self . __data = self . __data_recv [ 8 : ]
if self . __response in [ const . CMD_ACK_OK , const . CMD_PREPARE_DATA , const . CMD_DATA ] :
return { 'status' : True , 'code' : self . __response }
return { 'status' : False , 'code' : self . __response } |
def _dB_dR ( self , R ) :
"""Return numpy array of dB / dR from B1 up to and including Bn .""" | return - self . _HNn / R ** 3 / self . _sin_alpha ** 2 * ( 0.8 * self . _HNn + R * self . _sin_alpha ) |
def iter_relation ( self ) :
"""Iterate through all ( point , element ) pairs in the relation .""" | for point in iter_points ( self . inputs ) :
yield ( point , self . restrict ( point ) ) |
def extract_pattern ( pattern : str , ifiles : List [ str ] ) -> Dict [ str , any ] :
'''This function match pattern to a list of input files , extract and return
pieces of filenames as a list of variables with keys defined by pattern .''' | res = glob_wildcards ( pattern , [ ] )
for ifile in ifiles :
matched = glob_wildcards ( pattern , [ ifile ] )
for key in matched . keys ( ) :
if not matched [ key ] : # env . logger . warning ( ' Filename { } does not match pattern { } . None returned . ' . format ( ifile , pattern ) )
res [ key ] . append ( None )
else :
res [ key ] . extend ( matched [ key ] )
return res |
def createFolder ( self , name ) :
"""Creates a folder in which items can be placed . Folders are only
visible to a user and solely used for organizing content within
that user ' s content space .""" | url = "%s/createFolder" % self . root
params = { "f" : "json" , "title" : name }
self . _folders = None
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url ) |
def prt_tsv ( self , prt = sys . stdout ) :
"""Print an ASCII text format .""" | prtfmt = self . objprt . get_prtfmt_str ( self . flds_cur )
prt . write ( "{FLDS}\n" . format ( FLDS = " " . join ( self . flds_cur ) ) )
WrSectionsTxt . prt_sections ( prt , self . desc2nts [ 'sections' ] , prtfmt , secspc = True ) |
def __gen_pointing_file ( self , top_level_layer ) :
"""Creates etree representations of PAULA XML files modeling pointing
relations . Pointing relations are ahierarchical edges between any
two nodes ( ` ` tok ` ` , ` ` mark ` ` or ` ` struct ` ` ) . They are used to signal
pointing relations between tokens ( e . g . in a dependency parse tree )
or the coreference link between anaphora and antecedent .""" | paula_id = '{0}.{1}.{2}_{3}_pointing' . format ( top_level_layer , self . corpus_name , self . name , top_level_layer )
self . paulamap [ 'pointing' ] [ top_level_layer ] = paula_id
E , tree = gen_paula_etree ( paula_id )
pointing_edges = select_edges_by ( self . dg , layer = top_level_layer , edge_type = EdgeTypes . pointing_relation , data = True )
pointing_dict = defaultdict ( lambda : defaultdict ( str ) )
for source_id , target_id , edge_attrs in pointing_edges :
pointing_dict [ source_id ] [ target_id ] = edge_attrs
# NOTE : we don ' t add a base file here , because the nodes could be
# tokens or structural nodes
rlist = E ( 'relList' )
for source_id in pointing_dict :
for target_id in pointing_dict [ source_id ] :
source_href = self . __gen_node_href ( top_level_layer , source_id )
target_href = self . __gen_node_href ( top_level_layer , target_id )
rel = E ( 'rel' , { 'id' : 'rel_{0}_{1}' . format ( source_id , target_id ) , XLINKHREF : source_href , 'target' : target_href } )
# adds source / target node labels as a < ! - - comment - - >
if self . human_readable :
source_label = self . dg . node [ source_id ] . get ( 'label' )
target_label = self . dg . node [ target_id ] . get ( 'label' )
rel . append ( Comment ( u'{0} - {1}' . format ( source_label , target_label ) ) )
rlist . append ( rel )
tree . append ( rlist )
self . files [ paula_id ] = tree
self . file2dtd [ paula_id ] = PaulaDTDs . rel
return paula_id |
def add_instruction ( self , reil_instruction ) :
"""Add an instruction for analysis .""" | for expr in self . _translator . translate ( reil_instruction ) :
self . _solver . add ( expr ) |
def render_nocache ( self ) :
"""Render the ` nocache ` blocks of the content and return the whole
html""" | tmpl = template . Template ( '' . join ( [ # start by loading the cache library
template . BLOCK_TAG_START , 'load %s' % self . get_templatetag_module ( ) , template . BLOCK_TAG_END , # and surround the cached template by " raw " tags
self . RAW_TOKEN_START , self . content , self . RAW_TOKEN_END , ] ) )
return tmpl . render ( self . context ) |
def _load_img ( handle , target_dtype = np . float32 , size = None , ** kwargs ) :
"""Load image file as numpy array .""" | image_pil = PIL . Image . open ( handle , ** kwargs )
# resize the image to the requested size , if one was specified
if size is not None :
if len ( size ) > 2 :
size = size [ : 2 ]
log . warning ( "`_load_img()` received size: {}, trimming to first two dims!" . format ( size ) )
image_pil = image_pil . resize ( size , resample = PIL . Image . LANCZOS )
image_array = np . asarray ( image_pil )
# remove alpha channel if it contains no information
# if image _ array . shape [ - 1 ] > 3 and ' A ' not in image _ pil . mode :
# image _ array = image _ array [ . . . , : - 1]
image_dtype = image_array . dtype
image_max_value = np . iinfo ( image_dtype ) . max
# . . . for uint8 that ' s 255 , etc .
# using np . divide should avoid an extra copy compared to doing division first
ndimage = np . divide ( image_array , image_max_value , dtype = target_dtype )
rank = len ( ndimage . shape )
if rank == 3 :
return ndimage
elif rank == 2 :
return np . repeat ( np . expand_dims ( ndimage , axis = 2 ) , 3 , axis = 2 )
else :
message = "Loaded image has more dimensions than expected: {}" . format ( rank )
raise NotImplementedError ( message ) |
def accounting_sample_replace ( total , data , accounting_column , prob_column = None , max_iterations = 50 ) :
"""Sample rows with accounting with replacement .
Parameters
total : int
The control total the sampled rows will attempt to match .
data : pandas . DataFrame
Table to sample from .
accounting _ column : string
Name of column with accounting totals / quantities to apply towards the control .
prob _ column : string , optional , default None
Name of the column in the data to provide probabilities or weights .
max _ iterations : int , optional , default 50
When using an accounting attribute , the maximum number of sampling iterations
that will be applied .
Returns
sample _ rows : pandas . DataFrame
Table containing the sample .
matched : bool
Indicates if the total was matched exactly .""" | # check for probabilities
p = get_probs ( data , prob_column )
# determine avg number of accounting items per sample ( e . g . persons per household )
per_sample = data [ accounting_column ] . sum ( ) / ( 1.0 * len ( data . index . values ) )
curr_total = 0
remaining = total
sample_rows = pd . DataFrame ( )
closest = None
closest_remain = total
matched = False
for i in range ( 0 , max_iterations ) : # stop if we ' ve hit the control
if remaining == 0 :
matched = True
break
# if sampling with probabilities , re - caclc the # of items per sample
# after the initial sample , this way the sample size reflects the probabilities
if p is not None and i == 1 :
per_sample = sample_rows [ accounting_column ] . sum ( ) / ( 1.0 * len ( sample_rows ) )
# update the sample
num_samples = int ( math . ceil ( math . fabs ( remaining ) / per_sample ) )
if remaining > 0 : # we ' re short , add to the sample
curr_ids = np . random . choice ( data . index . values , num_samples , p = p )
sample_rows = pd . concat ( [ sample_rows , data . loc [ curr_ids ] ] )
else : # we ' ve overshot , remove from existing samples ( FIFO )
sample_rows = sample_rows . iloc [ num_samples : ] . copy ( )
# update the total and check for the closest result
curr_total = sample_rows [ accounting_column ] . sum ( )
remaining = total - curr_total
if abs ( remaining ) < closest_remain :
closest_remain = abs ( remaining )
closest = sample_rows
return closest , matched |
def is_macro_name ( func_name , dialect ) :
"""is _ macro _ name ( func _ name : str , dialect : str ) - > bool
> > > is _ macro _ name ( ' yacc : define - parser ' )
True
Tests if a word is a macro using the language ' s / dialect ' s convention ,
e . g macros in Lisp usually start with ' def ' and ' with ' in Scheme . Saves
the effort of finding all the macros in Lisp / Scheme / Clojure / newLISP and storing
them in a list .""" | if not func_name :
return False
if dialect == 'lisp' :
return re . search ( '^(macro|def|do|with-)' , func_name , re . I )
if dialect == 'scheme' :
return re . search ( '^(call-|def|with-)' , func_name )
if dialect == 'clojure' :
return re . search ( '^(def|with)' , func_name )
if dialect == 'newlisp' :
return re . search ( '^(macro|def)' , func_name )
else :
return False |
def update ( self , account_sid = values . unset , api_version = values . unset , friendly_name = values . unset , sms_application_sid = values . unset , sms_fallback_method = values . unset , sms_fallback_url = values . unset , sms_method = values . unset , sms_url = values . unset , status_callback = values . unset , status_callback_method = values . unset , voice_application_sid = values . unset , voice_caller_id_lookup = values . unset , voice_fallback_method = values . unset , voice_fallback_url = values . unset , voice_method = values . unset , voice_url = values . unset , emergency_status = values . unset , emergency_address_sid = values . unset , trunk_sid = values . unset , voice_receive_mode = values . unset , identity_sid = values . unset , address_sid = values . unset ) :
"""Update the IncomingPhoneNumberInstance
: param unicode account _ sid : The SID of the Account that created the resource to update
: param unicode api _ version : The API version to use for incoming calls made to the phone number
: param unicode friendly _ name : A string to describe the resource
: param unicode sms _ application _ sid : Unique string that identifies the application
: param unicode sms _ fallback _ method : HTTP method used with sms _ fallback _ url
: param unicode sms _ fallback _ url : The URL we call when an error occurs while executing TwiML
: param unicode sms _ method : The HTTP method to use with sms _ url
: param unicode sms _ url : The URL we should call when the phone number receives an incoming SMS message
: param unicode status _ callback : The URL we should call to send status information to your application
: param unicode status _ callback _ method : The HTTP method we should use to call status _ callback
: param unicode voice _ application _ sid : The SID of the application to handle the phone number
: param bool voice _ caller _ id _ lookup : Whether to lookup the caller ' s name
: param unicode voice _ fallback _ method : The HTTP method used with fallback _ url
: param unicode voice _ fallback _ url : The URL we will call when an error occurs in TwiML
: param unicode voice _ method : The HTTP method used with the voice _ url
: param unicode voice _ url : The URL we should call when the phone number receives a call
: param IncomingPhoneNumberInstance . EmergencyStatus emergency _ status : Whether the phone number is enabled for emergency calling
: param unicode emergency _ address _ sid : The emergency address configuration to use for emergency calling
: param unicode trunk _ sid : SID of the trunk to handle phone calls to the phone number
: param IncomingPhoneNumberInstance . VoiceReceiveMode voice _ receive _ mode : Incoming call type : fax or voice
: param unicode identity _ sid : Unique string that identifies the identity associated with number
: param unicode address _ sid : The SID of the Address resource associated with the phone number
: returns : Updated IncomingPhoneNumberInstance
: rtype : twilio . rest . api . v2010 . account . incoming _ phone _ number . IncomingPhoneNumberInstance""" | return self . _proxy . update ( account_sid = account_sid , api_version = api_version , friendly_name = friendly_name , sms_application_sid = sms_application_sid , sms_fallback_method = sms_fallback_method , sms_fallback_url = sms_fallback_url , sms_method = sms_method , sms_url = sms_url , status_callback = status_callback , status_callback_method = status_callback_method , voice_application_sid = voice_application_sid , voice_caller_id_lookup = voice_caller_id_lookup , voice_fallback_method = voice_fallback_method , voice_fallback_url = voice_fallback_url , voice_method = voice_method , voice_url = voice_url , emergency_status = emergency_status , emergency_address_sid = emergency_address_sid , trunk_sid = trunk_sid , voice_receive_mode = voice_receive_mode , identity_sid = identity_sid , address_sid = address_sid , ) |
def _tag ( val , tag ) :
"""Surround val with < tag > < / tag >""" | if isinstance ( val , str ) :
val = bytes ( val , 'utf-8' )
return ( bytes ( '<' + tag + '>' , 'utf-8' ) + val + bytes ( '</' + tag + '>' , 'utf-8' ) ) |
def _receive_signal ( self , progress_subscript ) :
"""this function takes care of signals emitted by the subscripts
Args :
progress _ subscript : progress of subscript""" | self . progress = self . _estimate_progress ( )
self . updateProgress . emit ( int ( self . progress ) ) |
def replace_option_by_id ( cls , option_id , option , ** kwargs ) :
"""Replace Option
Replace all attributes of Option
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ option _ by _ id ( option _ id , option , async = True )
> > > result = thread . get ( )
: param async bool
: param str option _ id : ID of option to replace ( required )
: param Option option : Attributes of option to replace ( required )
: return : Option
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_option_by_id_with_http_info ( option_id , option , ** kwargs )
else :
( data ) = cls . _replace_option_by_id_with_http_info ( option_id , option , ** kwargs )
return data |
def find_DQ_extension ( self ) :
"""Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from .""" | dqfile = None
# Look for additional file with DQ array , primarily for WFPC2 data
indx = self . _filename . find ( '.fits' )
if indx > 3 :
suffix = self . _filename [ indx - 4 : indx ]
dqfile = self . _filename . replace ( suffix [ : 3 ] , '_c1' )
elif indx < 0 and len ( self . _filename ) > 3 and self . _filename [ - 4 ] == os . extsep and self . _filename [ - 1 ] . lower ( ) == 'h' : # assume we ' ve got a GEIS file
dqfile = self . _filename [ : - 2 ] + '1' + self . _filename [ - 1 ]
hdulist = readgeis . readgeis ( dqfile )
prih = hdulist [ 0 ] . header
if 'FILETYPE' in prih :
dq_suffix = prih [ 'FILETYPE' ] . strip ( ) . upper ( )
else : # assume extension name is ' SDQ ' for WFPC2 GEIS files
dq_suffix = 'SDQ'
hdulist . close ( )
return dqfile , dq_suffix
else :
raise ValueError ( "Input file {} does not appear to be neither " "a FITS file nor a GEIS file." . format ( self . _filename ) )
if os . path . exists ( dqfile ) :
dq_suffix = fits . getval ( dqfile , "EXTNAME" , ext = 1 , memmap = False )
else :
dq_suffix = "SCI"
return dqfile , dq_suffix |
def _make_validate ( self , teststep_dict , entry_json ) :
"""parse HAR entry response and make teststep validate .
Args :
entry _ json ( dict ) :
" request " : { } ,
" response " : {
" status " : 200,
" headers " : [
" name " : " Content - Type " ,
" value " : " application / json ; charset = utf - 8"
" content " : {
" size " : 71,
" mimeType " : " application / json ; charset = utf - 8 " ,
" text " : " eyJJc1N1Y2Nlc3MiOnRydWUsIkNvZGUiOjIwMCwiTWVzc2FnZSI6bnVsbCwiVmFsdWUiOnsiQmxuUmVzdWx0Ijp0cnVlfX0 = " ,
" encoding " : " base64"
Returns :
" validate " : [
{ " eq " : [ " status _ code " , 200 ] }""" | teststep_dict [ "validate" ] . append ( { "eq" : [ "status_code" , entry_json [ "response" ] . get ( "status" ) ] } )
resp_content_dict = entry_json [ "response" ] . get ( "content" )
headers_mapping = utils . convert_list_to_dict ( entry_json [ "response" ] . get ( "headers" , [ ] ) )
if "Content-Type" in headers_mapping :
teststep_dict [ "validate" ] . append ( { "eq" : [ "headers.Content-Type" , headers_mapping [ "Content-Type" ] ] } )
text = resp_content_dict . get ( "text" )
if not text :
return
mime_type = resp_content_dict . get ( "mimeType" )
if mime_type and mime_type . startswith ( "application/json" ) :
encoding = resp_content_dict . get ( "encoding" )
if encoding and encoding == "base64" :
content = base64 . b64decode ( text ) . decode ( 'utf-8' )
else :
content = text
try :
resp_content_json = json . loads ( content )
except JSONDecodeError :
logging . warning ( "response content can not be loaded as json: {}" . format ( content . encode ( "utf-8" ) ) )
return
if not isinstance ( resp_content_json , dict ) :
return
for key , value in resp_content_json . items ( ) :
if isinstance ( value , ( dict , list ) ) :
continue
teststep_dict [ "validate" ] . append ( { "eq" : [ "content.{}" . format ( key ) , value ] } ) |
def sync ( self ) :
"""Retrieve areas from ElkM1""" | self . elk . send ( ka_encode ( ) )
self . get_descriptions ( TextDescriptions . KEYPAD . value ) |
def _banner_default ( self ) :
"""Reimplement banner creation to let the user decide if he wants a
banner or not""" | # Don ' t change banner for external kernels
if self . external_kernel :
return ''
show_banner_o = self . additional_options [ 'show_banner' ]
if show_banner_o :
return self . long_banner ( )
else :
return self . short_banner ( ) |
def get_objective_bank ( self ) :
"""Gets the ObjectiveBank associated with this session .
return : ( osid . learning . ObjectiveBank ) - the ObjectiveBank
associated with this session
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
compliance : mandatory - This method must be implemented .""" | # This should probably be accomplished via a handcar call instead of OSID
url_path = construct_url ( 'objective_banks' , bank_id = self . _catalog_idstr )
return objects . ObjectiveBank ( self . _get_request ( url_path ) ) |
def cells ( ) :
'''# Slow''' | import time
def query_db ( ) :
time . sleep ( 5 )
return [ 1 , 2 , 3 , 4 , 5 ]
def clean ( data ) :
time . sleep ( 2 )
return [ x for x in data if x % 2 == 0 ]
def myfilter ( data ) :
time . sleep ( 2 )
return [ x for x in data if x >= 3 ]
rows = query_db ( )
data = clean ( rows )
data , len ( data ) |
def read_sas ( filepath_or_buffer , format = None , index = None , encoding = None , chunksize = None , iterator = False ) :
"""Read SAS files stored as either XPORT or SAS7BDAT format files .
Parameters
filepath _ or _ buffer : string or file - like object
Path to the SAS file .
format : string { ' xport ' , ' sas7bdat ' } or None
If None , file format is inferred from file extension . If ' xport ' or
' sas7bdat ' , uses the corresponding format .
index : identifier of index column , defaults to None
Identifier of column that should be used as index of the DataFrame .
encoding : string , default is None
Encoding for text data . If None , text data are stored as raw bytes .
chunksize : int
Read file ` chunksize ` lines at a time , returns iterator .
iterator : bool , defaults to False
If True , returns an iterator for reading the file incrementally .
Returns
DataFrame if iterator = False and chunksize = None , else SAS7BDATReader
or XportReader""" | if format is None :
buffer_error_msg = ( "If this is a buffer object rather " "than a string name, you must specify " "a format string" )
filepath_or_buffer = _stringify_path ( filepath_or_buffer )
if not isinstance ( filepath_or_buffer , str ) :
raise ValueError ( buffer_error_msg )
fname = filepath_or_buffer . lower ( )
if fname . endswith ( ".xpt" ) :
format = "xport"
elif fname . endswith ( ".sas7bdat" ) :
format = "sas7bdat"
else :
raise ValueError ( "unable to infer format of SAS file" )
if format . lower ( ) == 'xport' :
from pandas . io . sas . sas_xport import XportReader
reader = XportReader ( filepath_or_buffer , index = index , encoding = encoding , chunksize = chunksize )
elif format . lower ( ) == 'sas7bdat' :
from pandas . io . sas . sas7bdat import SAS7BDATReader
reader = SAS7BDATReader ( filepath_or_buffer , index = index , encoding = encoding , chunksize = chunksize )
else :
raise ValueError ( 'unknown SAS format' )
if iterator or chunksize :
return reader
data = reader . read ( )
reader . close ( )
return data |
def text ( self ) :
"""Return the String assosicated with the current text""" | if self . m_name == - 1 or self . m_event != const . TEXT :
return u''
return self . sb [ self . m_name ] |
def type_assert_dict ( d , kcls = None , vcls = None , allow_none = False , cast_from = None , cast_to = None , dynamic = None , objcls = None , ctor = None , ) :
"""Checks that every key / value in @ d is an instance of @ kcls : @ vcls
Will also unmarshal JSON objects to Python objects if
the value is an instance of dict and @ vcls is a class type
Args :
d : The dict to type assert
kcls : The class to type assert for keys .
NOTE : JSON only allows str keys
vcls : The class to type assert for values
allow _ none : Allow a None value for the values .
This would not make sense for the keys .
cast _ from : type - or - tuple - of - types , If @ obj is an instance
of this type ( s ) , cast it to @ cast _ to
cast _ to : type , The type to cast @ obj to if it ' s an instance
of @ cast _ from , or None to cast to @ cls .
If you need more than type ( x ) , use a lambda or
factory function .
dynamic : @ cls , A dynamic default value if @ d is None ,
and @ dynamic is not None .
objcls : None - or - type , a type to assert @ d is ,
ie : dict , etc . . .
Note that isinstance considers
collections . OrderedDict to be of type dict
ctor : None - or - static - method : Use this method as the
constructor instead of _ _ init _ _
Returns :
@ d , note that @ d will be recreated , which
may be a performance concern if @ d has many items
Raises :
TypeError : if a key is not an instance of @ kcls or
a value is not an instance of @ vcls""" | _check_dstruct ( d , objcls )
if ( d is None and dynamic is not None ) :
d = dynamic
t = type ( d )
return t ( ( _check ( k , kcls ) if kcls else k , _check ( v , vcls , allow_none , cast_from , cast_to , ctor = ctor , ) if vcls else v , ) for k , v in d . items ( ) ) |
def _send_file_external_with_retry ( self , http_verb , host , url , http_headers , chunk ) :
"""Send chunk to host , url using http _ verb . If http _ verb is PUT and a connection error occurs
retry a few times . Pauses between retries . Raises if unsuccessful .""" | count = 0
retry_times = 1
if http_verb == 'PUT' :
retry_times = SEND_EXTERNAL_PUT_RETRY_TIMES
while True :
try :
return self . data_service . send_external ( http_verb , host , url , http_headers , chunk )
except requests . exceptions . ConnectionError :
count += 1
if count < retry_times :
if count == 1 : # Only show a warning the first time we fail to send a chunk
self . _show_retry_warning ( host )
time . sleep ( SEND_EXTERNAL_RETRY_SECONDS )
self . data_service . recreate_requests_session ( )
else :
raise |
def _create_model_matrices ( self ) :
"""Creates model matrices / vectors
Returns
None ( changes model attributes )""" | self . model_Y = self . data
self . model_scores = np . zeros ( ( self . X . shape [ 1 ] , self . model_Y . shape [ 0 ] + 1 ) ) |
def from_tri_2_sym ( tri , dim ) :
"""convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
tri : 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix .
Returns
symm : 2D array
Symmetric matrix in shape = [ dim , dim ]""" | symm = np . zeros ( ( dim , dim ) )
symm [ np . triu_indices ( dim ) ] = tri
return symm |
def resource_type ( self , rt ) :
"""Set the CoRE Link Format rt attribute of the resource .
: param rt : the CoRE Link Format rt attribute""" | if not isinstance ( rt , str ) :
rt = str ( rt )
self . _attributes [ "rt" ] = rt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.