signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def min_heap_sort ( arr , simulation = False ) :
"""Heap Sort that uses a min heap to sort an array in ascending order
Complexity : O ( n log ( n ) )""" | iteration = 0
if simulation :
print ( "iteration" , iteration , ":" , * arr )
for i in range ( 0 , len ( arr ) - 1 ) :
iteration = min_heapify ( arr , i , simulation , iteration )
return arr |
def calculate_jacobsthal ( num ) :
"""A function to calculate the nth jacobsthal number using dynamic programming .
The function calculates previously unknown jacobsthal numbers and stores them for future use .
Examples :
> > > calculate _ jacobsthal ( 5)
11
> > > calculate _ jacobsthal ( 2)
> > > calculate _ jacobsthal ( 4)
: param num : The nth number in the series to find
: return : The jacobsthal number value at index n""" | jacobsthal = [ 0 ] * ( num + 1 )
# Create a list to hold the jacobsthal numbers
jacobsthal [ 0 ] = 0
jacobsthal [ 1 ] = 1
for i in range ( 2 , num + 1 ) :
jacobsthal [ i ] = jacobsthal [ i - 1 ] + 2 * jacobsthal [ i - 2 ]
return jacobsthal [ num ] |
def Serialize ( self , writer ) :
"""Serialize full object .
Args :
writer ( neo . IO . BinaryWriter ) :""" | super ( ContractState , self ) . Serialize ( writer )
self . Code . Serialize ( writer )
writer . WriteUInt8 ( self . ContractProperties )
writer . WriteVarString ( self . Name )
writer . WriteVarString ( self . CodeVersion )
writer . WriteVarString ( self . Author )
writer . WriteVarString ( self . Email )
writer . WriteVarString ( self . Description ) |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'fonts' ) and self . fonts is not None :
_dict [ 'fonts' ] = [ x . _to_dict ( ) for x in self . fonts ]
return _dict |
async def _response_handler ( self ) :
"""监听响应数据的协程函数 . ` connect ` 被调用后会被创建为一个协程并放入事件循环 .""" | if self . debug is True :
if self . debug is True :
print ( "listenning response!" )
while True :
try :
res = await self . reader . readuntil ( self . SEPARATOR )
except :
raise
else :
response = self . decoder ( res )
self . _status_code_check ( response ) |
def agents ( self ) :
"""| Description : IDs of agents involved in the chat""" | if self . api and self . agent_ids :
return self . api . _get_agents ( self . agent_ids ) |
def std_input ( prompt = "" , style = None ) :
"""Very simple Python2/3 - compatible input method .
: param prompt : prompt message
: param style : dictionary of ansi _ wrap keyword - arguments""" | p = ansi_wrap ( prompt , ** ( style or { } ) )
try :
return raw_input ( p ) . strip ( )
except NameError :
return input ( p ) . strip ( ) |
def update ( self ) :
'''Check and update the object with conditional request
: rtype : Boolean value indicating whether the object is changed''' | conditionalRequestHeader = dict ( )
if self . etag is not None :
conditionalRequestHeader [ Consts . REQ_IF_NONE_MATCH ] = self . etag
if self . last_modified is not None :
conditionalRequestHeader [ Consts . REQ_IF_MODIFIED_SINCE ] = self . last_modified
status , responseHeaders , output = self . _requester . requestJson ( "GET" , self . _url . value , headers = conditionalRequestHeader )
if status == 304 :
return False
else :
headers , data = self . _requester . _Requester__check ( status , responseHeaders , output )
self . _storeAndUseAttributes ( headers , data )
self . __completed = True
return True |
def get ( feature , obj , ** kwargs ) :
'''Obtain a feature from a set of morphology objects
Parameters :
feature ( string ) : feature to extract
obj : a neuron , population or neurite tree
* * kwargs : parameters to forward to underlying worker functions
Returns :
features as a 1D or 2D numpy array .''' | feature = ( NEURITEFEATURES [ feature ] if feature in NEURITEFEATURES else NEURONFEATURES [ feature ] )
return _np . array ( list ( feature ( obj , ** kwargs ) ) ) |
def closed ( self , code , reason = None ) :
"""Handler called when the WebSocket is closed . Status code 1000
denotes a normal close ; all others are errors .""" | if code != 1000 :
self . _error = errors . SignalFlowException ( code , reason )
_logger . info ( 'Lost WebSocket connection with %s (%s: %s).' , self , code , reason )
for c in self . _channels . values ( ) :
c . offer ( WebSocketComputationChannel . END_SENTINEL )
self . _channels . clear ( )
with self . _connection_cv :
self . _connected = False
self . _connection_cv . notify ( ) |
def postComponents ( self , name , status , ** kwargs ) :
'''Create a new component .
: param name : Name of the component
: param status : Status of the component ; 1-4
: param description : ( optional ) Description of the component
: param link : ( optional ) A hyperlink to the component
: param order : ( optional ) Order of the component
: param group _ id : ( optional ) The group id that the component is within
: param enabled : ( optional )
: return : : class : ` Response < Response > ` object
: rtype : requests . Response''' | kwargs [ 'name' ] = name
kwargs [ 'status' ] = status
return self . __postRequest ( '/components' , kwargs ) |
def path_shift ( self , shift = 1 ) :
'''Shift path fragments from PATH _ INFO to SCRIPT _ NAME and vice versa .
: param shift : The number of path fragments to shift . May be negative
to change the shift direction . ( default : 1)''' | script_name = self . environ . get ( 'SCRIPT_NAME' , '/' )
self [ 'SCRIPT_NAME' ] , self . path = path_shift ( script_name , self . path , shift )
self [ 'PATH_INFO' ] = self . path |
def _indicator ( self , indicator_data ) :
"""Return previously stored indicator or new indicator .
Args :
indicator _ data ( dict | obj ) : An Indicator dict or instance of Indicator object .
Returns :
dict | obj : The new Indicator dict / object or the previously stored dict / object .""" | if isinstance ( indicator_data , dict ) : # get xid from dict
xid = indicator_data . get ( 'xid' )
else : # get xid from object
xid = indicator_data . xid
if self . indicators . get ( xid ) is not None : # return existing indicator from memory
indicator_data = self . indicators . get ( xid )
elif self . indicators_shelf . get ( xid ) is not None : # return existing indicator from shelf
indicator_data = self . indicators_shelf . get ( xid )
else : # store new indicators
self . indicators [ xid ] = indicator_data
return indicator_data |
def list_dhcp_agent_hosting_networks ( self , network , ** _params ) :
"""Fetches a list of dhcp agents hosting a network .""" | return self . get ( ( self . network_path + self . DHCP_AGENTS ) % network , params = _params ) |
def calc_dihedral ( point1 , point2 , point3 , point4 ) :
"""Calculates a dihedral angle
Here , two planes are defined by ( point1 , point2 , point3 ) and
( point2 , point3 , point4 ) . The angle between them is returned .
Parameters
point1 , point2 , point3 , point4 : array - like , shape = ( 3 , ) , dtype = float
Four points that define two planes
Returns
float
The dihedral angle between the two planes defined by the four
points .""" | points = np . array ( [ point1 , point2 , point3 , point4 ] )
x = np . cross ( points [ 1 ] - points [ 0 ] , points [ 2 ] - points [ 1 ] )
y = np . cross ( points [ 2 ] - points [ 1 ] , points [ 3 ] - points [ 2 ] )
return angle ( x , y ) |
def get_field_def ( schema , # type : GraphQLSchema
parent_type , # type : GraphQLObjectType
field_name , # type : str
) : # type : ( . . . ) - > Optional [ GraphQLField ]
"""This method looks up the field on the given type defintion .
It has special casing for the two introspection fields , _ _ schema
and _ _ typename . _ _ typename is special because it can always be
queried as a field , even in situations where no other fields
are allowed , like on a Union . _ _ schema could get automatically
added to the query type , but that would require mutating type
definitions , which would cause issues .""" | if field_name == "__schema" and schema . get_query_type ( ) == parent_type :
return SchemaMetaFieldDef
elif field_name == "__type" and schema . get_query_type ( ) == parent_type :
return TypeMetaFieldDef
elif field_name == "__typename" :
return TypeNameMetaFieldDef
return parent_type . fields . get ( field_name ) |
def get_address_nonce ( self , address , api_token ) :
"""Looks up the address nonce of this address
Neccesary for the transaction creation""" | broadcast_url = self . base_url + '?module=proxy&action=eth_getTransactionCount'
broadcast_url += '&address=%s' % address
broadcast_url += '&tag=latest'
if api_token :
'&apikey=%s' % api_token
response = requests . get ( broadcast_url , )
if int ( response . status_code ) == 200 : # the int ( res , 0 ) transforms the hex nonce to int
nonce = int ( response . json ( ) . get ( 'result' , None ) , 0 )
logging . info ( 'Nonce check went correct: %s' , response . json ( ) )
return nonce
else :
logging . info ( 'response error checking nonce' )
raise BroadcastError ( 'Error checking the nonce through the Etherscan API. Error msg: %s' , response . text ) |
def air_gap ( self , volume : float = None , height : float = None ) -> 'InstrumentContext' :
"""Pull air into the pipette current tip at the current location
: param volume : The amount in uL to aspirate air into the tube .
( Default will use all remaining volume in tip )
: type volume : float
: param height : The number of millimiters to move above the current Well
to air - gap aspirate . ( Default : 5mm above current Well )
: type height : float
: raises NoTipAttachedError : If no tip is attached to the pipette
: raises RuntimeError : If location cache is None .
This should happen if ` touch _ tip ` is called
without first calling a method that takes a
location ( eg , : py : meth : ` . aspirate ` ,
: py : meth : ` dispense ` )
: returns : This instance""" | if not self . hw_pipette [ 'has_tip' ] :
raise hc . NoTipAttachedError ( 'Pipette has no tip. Aborting air_gap' )
if height is None :
height = 5
loc = self . _ctx . location_cache
if not loc or not isinstance ( loc . labware , Well ) :
raise RuntimeError ( 'No previous Well cached to perform air gap' )
target = loc . labware . top ( height )
self . move_to ( target )
self . aspirate ( volume )
return self |
def _check_cache_minions ( self , expr , delimiter , greedy , search_type , regex_match = False , exact_match = False ) :
'''Helper function to search for minions in master caches If ' greedy ' ,
then return accepted minions matched by the condition or those absent
from the cache . If not ' greedy ' return the only minions have cache
data and matched by the condition .''' | cache_enabled = self . opts . get ( 'minion_data_cache' , False )
def list_cached_minions ( ) :
return self . cache . list ( 'minions' )
if greedy :
minions = [ ]
for fn_ in salt . utils . data . sorted_ignorecase ( os . listdir ( os . path . join ( self . opts [ 'pki_dir' ] , self . acc ) ) ) :
if not fn_ . startswith ( '.' ) and os . path . isfile ( os . path . join ( self . opts [ 'pki_dir' ] , self . acc , fn_ ) ) :
minions . append ( fn_ )
elif cache_enabled :
minions = list_cached_minions ( )
else :
return { 'minions' : [ ] , 'missing' : [ ] }
if cache_enabled :
if greedy :
cminions = list_cached_minions ( )
else :
cminions = minions
if not cminions :
return { 'minions' : minions , 'missing' : [ ] }
minions = set ( minions )
for id_ in cminions :
if greedy and id_ not in minions :
continue
mdata = self . cache . fetch ( 'minions/{0}' . format ( id_ ) , 'data' )
if mdata is None :
if not greedy :
minions . remove ( id_ )
continue
search_results = mdata . get ( search_type )
if not salt . utils . data . subdict_match ( search_results , expr , delimiter = delimiter , regex_match = regex_match , exact_match = exact_match ) :
minions . remove ( id_ )
minions = list ( minions )
return { 'minions' : minions , 'missing' : [ ] } |
def _resample ( self , arrays , ji_windows ) :
"""Resample all arrays with potentially different resolutions to a common resolution .""" | # get a destination array template
win_dst = ji_windows [ self . dst_res ]
aff_dst = self . _layer_meta [ self . _res_indices [ self . dst_res ] [ 0 ] ] [ "transform" ]
arrays_dst = list ( )
for i , array in enumerate ( arrays ) :
arr_dst = np . zeros ( ( int ( win_dst . height ) , int ( win_dst . width ) ) )
if self . _layer_resolution [ i ] > self . dst_res :
resampling = getattr ( Resampling , self . upsampler )
elif self . _layer_resolution [ i ] < self . dst_res :
resampling = getattr ( Resampling , self . downsampler )
else :
arrays_dst . append ( array . copy ( ) )
continue
reproject ( array , arr_dst , # arr _ dst [ 0 , : , : , i ] ,
src_transform = self . _layer_meta [ i ] [ "transform" ] , dst_transform = aff_dst , src_crs = self . _layer_meta [ 0 ] [ "crs" ] , dst_crs = self . _layer_meta [ 0 ] [ "crs" ] , resampling = resampling )
arrays_dst . append ( arr_dst . copy ( ) )
arrays_dst = np . stack ( arrays_dst , axis = 2 )
# n _ images x n x m x 10 would be the synergise format
return arrays_dst |
def vector_distance ( v1 , v2 ) :
"""Given 2 vectors of multiple dimensions , calculate the euclidean
distance measure between them .""" | dist = 0
for dim in v1 :
for x in v1 [ dim ] :
dd = int ( v1 [ dim ] [ x ] ) - int ( v2 [ dim ] [ x ] )
dist = dist + dd ** 2
return dist |
def zinnia_statistics ( template = 'zinnia/tags/statistics.html' ) :
"""Return statistics on the content of Zinnia .""" | content_type = ContentType . objects . get_for_model ( Entry )
discussions = get_comment_model ( ) . objects . filter ( content_type = content_type )
entries = Entry . published
categories = Category . objects
tags = tags_published ( )
authors = Author . published
replies = discussions . filter ( flags = None , is_public = True )
pingbacks = discussions . filter ( flags__flag = PINGBACK , is_public = True )
trackbacks = discussions . filter ( flags__flag = TRACKBACK , is_public = True )
rejects = discussions . filter ( is_public = False )
entries_count = entries . count ( )
replies_count = replies . count ( )
pingbacks_count = pingbacks . count ( )
trackbacks_count = trackbacks . count ( )
if entries_count :
first_entry = entries . order_by ( 'publication_date' ) [ 0 ]
last_entry = entries . latest ( )
months_count = ( last_entry . publication_date - first_entry . publication_date ) . days / 31.0
entries_per_month = entries_count / ( months_count or 1.0 )
comments_per_entry = float ( replies_count ) / entries_count
linkbacks_per_entry = float ( pingbacks_count + trackbacks_count ) / entries_count
total_words_entry = 0
for e in entries . all ( ) :
total_words_entry += e . word_count
words_per_entry = float ( total_words_entry ) / entries_count
words_per_comment = 0.0
if replies_count :
total_words_comment = 0
for c in replies . all ( ) :
total_words_comment += len ( c . comment . split ( ) )
words_per_comment = float ( total_words_comment ) / replies_count
else :
words_per_entry = words_per_comment = entries_per_month = comments_per_entry = linkbacks_per_entry = 0.0
return { 'template' : template , 'entries' : entries_count , 'categories' : categories . count ( ) , 'tags' : tags . count ( ) , 'authors' : authors . count ( ) , 'comments' : replies_count , 'pingbacks' : pingbacks_count , 'trackbacks' : trackbacks_count , 'rejects' : rejects . count ( ) , 'words_per_entry' : words_per_entry , 'words_per_comment' : words_per_comment , 'entries_per_month' : entries_per_month , 'comments_per_entry' : comments_per_entry , 'linkbacks_per_entry' : linkbacks_per_entry } |
def get_instance ( self , payload ) :
"""Build an instance of NotificationInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . notify . v1 . service . notification . NotificationInstance
: rtype : twilio . rest . notify . v1 . service . notification . NotificationInstance""" | return NotificationInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , ) |
def gff ( args ) :
"""% prog gff * . gff
Draw exons for genes based on gff files . Each gff file should contain only
one gene , and only the " mRNA " and " CDS " feature will be drawn on the canvas .""" | align_choices = ( "left" , "center" , "right" )
p = OptionParser ( gff . __doc__ )
p . add_option ( "--align" , default = "left" , choices = align_choices , help = "Horizontal alignment [default: %default]" )
p . add_option ( "--noUTR" , default = False , action = "store_true" , help = "Do not plot UTRs [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) < 1 :
sys . exit ( not p . print_help ( ) )
fig = plt . figure ( 1 , ( 8 , 5 ) )
root = fig . add_axes ( [ 0 , 0 , 1 , 1 ] )
gffiles = args
ngenes = len ( gffiles )
canvas = .6
setups , ratio = get_setups ( gffiles , canvas = canvas , noUTR = opts . noUTR )
align = opts . align
xs = .2 if align == "left" else .8
yinterval = canvas / ngenes
ys = .8
tip = .01
for genename , mrnabed , cdsbeds in setups :
ExonGlyph ( root , xs , ys , mrnabed , cdsbeds , ratio = ratio , align = align )
if align == "left" :
root . text ( xs - tip , ys , genename , ha = "right" , va = "center" )
elif align == "right" :
root . text ( xs + tip , ys , genename , ha = "left" , va = "center" )
ys -= yinterval
root . set_xlim ( 0 , 1 )
root . set_ylim ( 0 , 1 )
root . set_axis_off ( )
figname = "exons.pdf"
savefig ( figname , dpi = 300 ) |
def validate_collection ( self , name_or_collection , scandata = False , full = False , session = None ) :
"""Validate a collection .
Returns a dict of validation info . Raises CollectionInvalid if
validation fails .
: Parameters :
- ` name _ or _ collection ` : A Collection object or the name of a
collection to validate .
- ` scandata ` : Do extra checks beyond checking the overall
structure of the collection .
- ` full ` : Have the server do a more thorough scan of the
collection . Use with ` scandata ` for a thorough scan
of the structure of the collection and the individual
documents .
- ` session ` ( optional ) : a
: class : ` ~ pymongo . client _ session . ClientSession ` .
. . versionchanged : : 3.6
Added ` ` session ` ` parameter .""" | name = name_or_collection
if isinstance ( name , Collection ) :
name = name . name
if not isinstance ( name , string_type ) :
raise TypeError ( "name_or_collection must be an instance of " "%s or Collection" % ( string_type . __name__ , ) )
result = self . command ( "validate" , _unicode ( name ) , scandata = scandata , full = full , session = session )
valid = True
# Pre 1.9 results
if "result" in result :
info = result [ "result" ]
if info . find ( "exception" ) != - 1 or info . find ( "corrupt" ) != - 1 :
raise CollectionInvalid ( "%s invalid: %s" % ( name , info ) )
# Sharded results
elif "raw" in result :
for _ , res in iteritems ( result [ "raw" ] ) :
if "result" in res :
info = res [ "result" ]
if ( info . find ( "exception" ) != - 1 or info . find ( "corrupt" ) != - 1 ) :
raise CollectionInvalid ( "%s invalid: " "%s" % ( name , info ) )
elif not res . get ( "valid" , False ) :
valid = False
break
# Post 1.9 non - sharded results .
elif not result . get ( "valid" , False ) :
valid = False
if not valid :
raise CollectionInvalid ( "%s invalid: %r" % ( name , result ) )
return result |
def Planck_wavenumber ( n , T ) :
'''The Planck function ( flux density for blackbody radition )
in wavenumber space
n is wavenumber in 1 / cm
T is temperature in Kelvin
Formula from Raymond Pierrehumbert , " Principles of Planetary Climate " , page 140.''' | # convert to mks units
n = n * 100.
return c_light * Planck_frequency ( n * c_light , T ) |
def result ( self ) :
"""Formats the result .""" | for value in six . itervalues ( self . __result ) :
value . sort ( key = _humanSortKey )
return self . __result |
def create_assessment ( self , assessment_form ) :
"""Creates a new ` ` Assessment ` ` .
arg : assessment _ form ( osid . assessment . AssessmentForm ) : the
form for this ` ` Assessment ` `
return : ( osid . assessment . Assessment ) - the new ` ` Assessment ` `
raise : IllegalState - ` ` assessment _ form ` ` already used in a
create transaction
raise : InvalidArgument - one or more of the form elements is
invalid
raise : NullArgument - ` ` assessment _ form ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
raise : Unsupported - ` ` assessment _ form ` ` did not originate from
` ` get _ assessment _ form _ for _ create ( ) ` `
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceAdminSession . create _ resource _ template
collection = JSONClientValidated ( 'assessment' , collection = 'Assessment' , runtime = self . _runtime )
if not isinstance ( assessment_form , ABCAssessmentForm ) :
raise errors . InvalidArgument ( 'argument type is not an AssessmentForm' )
if assessment_form . is_for_update ( ) :
raise errors . InvalidArgument ( 'the AssessmentForm is for update only, not create' )
try :
if self . _forms [ assessment_form . get_id ( ) . get_identifier ( ) ] == CREATED :
raise errors . IllegalState ( 'assessment_form already used in a create transaction' )
except KeyError :
raise errors . Unsupported ( 'assessment_form did not originate from this session' )
if not assessment_form . is_valid ( ) :
raise errors . InvalidArgument ( 'one or more of the form elements is invalid' )
insert_result = collection . insert_one ( assessment_form . _my_map )
self . _forms [ assessment_form . get_id ( ) . get_identifier ( ) ] = CREATED
result = objects . Assessment ( osid_object_map = collection . find_one ( { '_id' : insert_result . inserted_id } ) , runtime = self . _runtime , proxy = self . _proxy )
return result |
def aggr ( self , group , ** named_attributes ) :
"""Aggregation of the type U ( ' attr1 ' , ' attr2 ' ) . aggr ( group , computation = " QueryExpression " )
has the primary key ( ' attr1 ' , ' attr2 ' ) and performs aggregation computations for all matching elements of ` group ` .
: param group : The query expression to be aggregated .
: param named _ attributes : computations of the form new _ attribute = " sql expression on attributes of group "
: return : The derived query expression""" | return ( GroupBy . create ( self , group = group , keep_all_rows = False , attributes = ( ) , named_attributes = named_attributes ) if self . primary_key else Projection . create ( group , attributes = ( ) , named_attributes = named_attributes , include_primary_key = False ) ) |
def _transform_coefficients ( self , NN , HHw , CCw , ffparm , polycf , any_pwl , npol , nw ) :
"""Transforms quadratic coefficients for w into coefficients for x .""" | nnw = any_pwl + npol + nw
M = csr_matrix ( ( ffparm [ : , 3 ] , ( range ( nnw ) , range ( nnw ) ) ) )
MR = M * ffparm [ : , 2 ]
# FIXME : Possibly column 1.
HMR = HHw * MR
MN = M * NN
HH = MN . T * HHw * MN
CC = MN . T * ( CCw - HMR )
# Constant term of cost .
C0 = 1. / 2. * MR . T * HMR + sum ( polycf [ : , 2 ] )
return HH , CC , C0 [ 0 ] |
def norm ( values , min = None , max = None ) :
"""Unity - based normalization to scale data into 0-1 range .
( values - min ) / ( max - min )
Args :
values : Array of values to be normalized
min ( float , optional ) : Lower bound of normalization range
max ( float , optional ) : Upper bound of normalization range
Returns :
Array of normalized values""" | min = np . min ( values ) if min is None else min
max = np . max ( values ) if max is None else max
return ( values - min ) / ( max - min ) |
def sell_limit ( self , quantity , price , ** kwargs ) :
"""Shortcut for ` ` instrument . order ( " SELL " , . . . ) ` ` and accepts all of its
` optional parameters < # qtpylib . instrument . Instrument . order > ` _
: Parameters :
quantity : int
Order quantity
price : float
Limit price""" | kwargs [ 'limit_price' ] = price
kwargs [ 'order_type' ] = "LIMIT"
self . parent . order ( "SELL" , self , quantity = quantity , ** kwargs ) |
def is_presence_handler ( type_ , from_ , cb ) :
"""Return true if ` cb ` has been decorated with : func : ` presence _ handler ` for
the given ` type _ ` and ` from _ ` .""" | try :
handlers = aioxmpp . service . get_magic_attr ( cb )
except AttributeError :
return False
return aioxmpp . service . HandlerSpec ( ( _apply_presence_handler , ( type_ , from_ ) ) , require_deps = ( SimplePresenceDispatcher , ) ) in handlers |
def logout ( self ) :
"""* * DEPRECATED * * : Deauthorize use of this database .""" | warnings . warn ( "Database.logout() is deprecated" , DeprecationWarning , stacklevel = 2 )
# Sockets will be deauthenticated as they are used .
self . client . _purge_credentials ( self . name ) |
def _init ( self , state , initiate_arg ) :
'''Set initial state of the task . Called from initiate .
@ param initiate _ arg : either the PartnerClass object ( StartTask )
or an agent _ id ( RestartTask )''' | state . descriptor = None
state . hosts = state . agent . query_partners ( 'hosts' )
state . current_index = - 1
self . log ( '%s task initiated, will be trying to start a ' '%r on one of the hosts: %r' , str ( self . __class__ . __name__ ) , initiate_arg , state . hosts )
if len ( state . hosts ) == 0 : # FIXME : Here would be a good idea to put an alert
return self . _fail ( 'Shard Agent cannot start partner %r as it has ' 'no Host Partners!' % ( initiate_arg , ) ) |
def depend ( * args ) :
"""Decorator to declare dependencies to other modules . Recommended usage is : :
import other _ module
@ depend ( other _ module . ModuleClass )
class MyModule ( Module ) :
: param \ * args : depended module classes .""" | def decfunc ( cls ) :
if not 'depends' in cls . __dict__ :
cls . depends = [ ]
cls . depends . extend ( list ( args ) )
for a in args :
if not hasattr ( a , 'referencedBy' ) :
a . referencedBy = [ ]
a . referencedBy . append ( cls )
return cls
return decfunc |
def setup_logger ( logger , stream , filename = None , fmt = None ) :
"""Set up a logger ( if no handlers exist ) for console output ,
and file ' tee ' output if desired .""" | if len ( logger . handlers ) < 1 :
console = logging . StreamHandler ( stream )
console . setLevel ( logging . DEBUG )
console . setFormatter ( logging . Formatter ( fmt ) )
logger . addHandler ( console )
logger . setLevel ( logging . DEBUG )
logger . propagate = False
if filename :
outfile = logging . FileHandler ( filename )
outfile . setLevel ( logging . INFO )
outfile . setFormatter ( logging . Formatter ( "%(asctime)s " + ( fmt if fmt else '%(message)s' ) ) )
logger . addHandler ( outfile ) |
def populate ( self , priority , address , rtr , data ) :
""": return : None""" | assert isinstance ( data , bytes )
self . needs_firmware_priority ( priority )
self . needs_no_rtr ( rtr )
self . needs_data ( data , 6 )
self . set_attributes ( priority , address , rtr )
self . module_type = data [ 0 ]
prefix = bytes ( [ 0 , 0 ] )
( self . current_serial , ) = struct . unpack ( '>L' , prefix + data [ 1 ] + data [ 2 ] )
self . module_address = data [ 3 ]
( self . new_serial , ) = struct . unpack ( '>L' , prefix + data [ 4 ] + data [ 5 ] ) |
def crypto_pwhash_scryptsalsa208sha256_ll ( passwd , salt , n , r , p , dklen = 64 , maxmem = SCRYPT_MAX_MEM ) :
"""Derive a cryptographic key using the ` ` passwd ` ` and ` ` salt ` `
given as input .
The work factor can be tuned by by picking different
values for the parameters
: param bytes passwd :
: param bytes salt :
: param bytes salt : * must * be * exactly * : py : const : ` . SALTBYTES ` long
: param int dklen :
: param int opslimit :
: param int n :
: param int r : block size ,
: param int p : the parallelism factor
: param int maxmem : the maximum available memory available for scrypt ' s
operations
: rtype : bytes""" | ensure ( isinstance ( n , integer_types ) , raising = TypeError )
ensure ( isinstance ( r , integer_types ) , raising = TypeError )
ensure ( isinstance ( p , integer_types ) , raising = TypeError )
ensure ( isinstance ( passwd , bytes ) , raising = TypeError )
ensure ( isinstance ( salt , bytes ) , raising = TypeError )
_check_memory_occupation ( n , r , p , maxmem )
buf = ffi . new ( "uint8_t[]" , dklen )
ret = lib . crypto_pwhash_scryptsalsa208sha256_ll ( passwd , len ( passwd ) , salt , len ( salt ) , n , r , p , buf , dklen )
ensure ( ret == 0 , 'Unexpected failure in key derivation' , raising = exc . RuntimeError )
return ffi . buffer ( ffi . cast ( "char *" , buf ) , dklen ) [ : ] |
def _changes ( cur , dns_proto , dns_servers , ip_proto , ip_addrs , gateway ) :
'''Compares the current interface against the desired configuration and
returns a dictionary describing the changes that need to be made .''' | changes = { }
cur_dns_proto = ( 'static' if 'Statically Configured DNS Servers' in cur else 'dhcp' )
if cur_dns_proto == 'static' :
if isinstance ( cur [ 'Statically Configured DNS Servers' ] , list ) :
cur_dns_servers = cur [ 'Statically Configured DNS Servers' ]
else :
cur_dns_servers = [ cur [ 'Statically Configured DNS Servers' ] ]
if set ( dns_servers or [ 'None' ] ) != set ( cur_dns_servers ) :
changes [ 'dns_servers' ] = dns_servers
elif 'DNS servers configured through DHCP' in cur :
cur_dns_servers = cur [ 'DNS servers configured through DHCP' ]
if dns_proto == 'static' : # If we ' re currently set to ' dhcp ' but moving to ' static ' , specify the changes .
if set ( dns_servers or [ 'None' ] ) != set ( cur_dns_servers ) :
changes [ 'dns_servers' ] = dns_servers
cur_ip_proto = 'static' if cur [ 'DHCP enabled' ] == 'No' else 'dhcp'
cur_ip_addrs = _addrdict_to_ip_addrs ( cur . get ( 'ip_addrs' , [ ] ) )
cur_gateway = cur . get ( 'Default Gateway' )
if dns_proto != cur_dns_proto :
changes [ 'dns_proto' ] = dns_proto
if ip_proto != cur_ip_proto :
changes [ 'ip_proto' ] = ip_proto
if set ( ip_addrs or [ ] ) != set ( cur_ip_addrs ) :
if ip_proto == 'static' :
changes [ 'ip_addrs' ] = ip_addrs
if gateway != cur_gateway :
if ip_proto == 'static' :
changes [ 'gateway' ] = gateway
return changes |
def conditional ( self , condition , name ) :
"""Defines a ' condition ' when conditional element of ' name ' exists if ` condition ` is true .
` condition ` can contain multiple conditions combined together using Logical Expressions ( & & , | | ) .
Example :
| Conditional | mycondition = = 1 | foo |
| u8 | myelement | 42 |
| End conditional |
| Conditional | condition1 = = 1 & & condition2 ! = 2 | bar |
| u8 | myelement | 8 |
| End condtional |""" | self . _message_stack . append ( ConditionalTemplate ( condition , name , self . _current_container ) ) |
def source ( self , value ) :
"""Setter for * * self . _ _ source * * attribute .
: param value : Attribute value .
: type value : unicode""" | if value is not None :
assert type ( value ) is unicode , "'{0}' attribute: '{1}' type is not 'unicode'!" . format ( "source" , value )
assert os . path . exists ( value ) , "'{0}' attribute: '{1}' file doesn't exists!" . format ( "source" , value )
self . __source = value |
def received_message ( self , m ) :
"""Push upstream messages to downstream .""" | # TODO : No support for binary messages
m = str ( m )
logger . debug ( "Incoming upstream WS: %s" , m )
uwsgi . websocket_send ( m )
logger . debug ( "Send ok" ) |
def refresh ( self ) -> None :
"""Update the actual simulation values based on the toy - value pairs .
Usually , one does not need to call refresh explicitly . The
" magic " methods _ _ call _ _ , _ _ setattr _ _ , and _ _ delattr _ _ invoke
it automatically , when required .
Instantiate a 1 - dimensional | SeasonalParameter | object :
> > > from hydpy . core . parametertools import SeasonalParameter
> > > class Par ( SeasonalParameter ) :
. . . NDIM = 1
. . . TYPE = float
. . . TIME = None
> > > par = Par ( None )
> > > par . simulationstep = ' 1d '
> > > par . shape = ( None , )
When a | SeasonalParameter | object does not contain any toy - value
pairs yet , the method | SeasonalParameter . refresh | sets all actual
simulation values to zero :
> > > par . values = 1.
> > > par . refresh ( )
> > > par . values [ 0]
0.0
When there is only one toy - value pair , its values are relevant
for all actual simulation values :
> > > par . toy _ 1 = 2 . # calls refresh automatically
> > > par . values [ 0]
2.0
Method | SeasonalParameter . refresh | performs a linear interpolation
for the central time points of each simulation time step . Hence ,
in the following example , the original values of the toy - value
pairs do not show up :
> > > par . toy _ 12_31 = 4.
> > > from hydpy import round _
> > > round _ ( par . values [ 0 ] )
2.00274
> > > round _ ( par . values [ - 2 ] )
3.99726
> > > par . values [ - 1]
3.0
If one wants to preserve the original values in this example , one
would have to set the corresponding toy instances in the middle of
some simulation step intervals :
> > > del par . toy _ 1
> > > del par . toy _ 12_31
> > > par . toy _ 1_1_12 = 2
> > > par . toy _ 12_31_12 = 4.
> > > par . values [ 0]
2.0
> > > round _ ( par . values [ 1 ] )
2.005479
> > > round _ ( par . values [ - 2 ] )
3.994521
> > > par . values [ - 1]
4.0""" | if not self :
self . values [ : ] = 0.
elif len ( self ) == 1 :
values = list ( self . _toy2values . values ( ) ) [ 0 ]
self . values [ : ] = self . apply_timefactor ( values )
else :
for idx , date in enumerate ( timetools . TOY . centred_timegrid ( self . simulationstep ) ) :
values = self . interp ( date )
self . values [ idx ] = self . apply_timefactor ( values ) |
def _resource_deletion ( resource ) :
"""Recalculate consumption details and save resource details""" | if resource . __class__ not in CostTrackingRegister . registered_resources :
return
new_configuration = { }
price_estimate = models . PriceEstimate . update_resource_estimate ( resource , new_configuration )
price_estimate . init_details ( ) |
def generateSummary ( self , extraLapse = TYPICAL_LAPSE ) :
'''Generates a summary of the status of the expected scripts broken based on the log .
This summary ( a list of strings ) is returned as well as a list with the dates ( which
can be used to index the log ) of the most recent attempts at the failed jobs .''' | scriptsRun = self . scriptsRun
body = [ ]
numberOfFailed = 0
numberWithWarnings = 0
failedList = [ ]
successList = [ ]
warningsList = [ ]
for name , details in sorted ( scriptsRun . iteritems ( ) ) :
status = None
daysSinceSuccess = None
if details [ "lastSuccess" ] and expectedScripts . get ( name ) :
daysSinceSuccess = expectedScripts . get ( name ) . getDaysSinceLastSuccess ( details [ "lastSuccess" ] )
if not expectedScripts . check ( name , details [ "lastSuccess" ] , extraLapse ) :
status = "FAILED"
else :
status = "FAILED"
if not status :
if details [ "status" ] & RETROSPECT_FAIL :
status = "FAILED"
elif details [ "status" ] & RETROSPECT_WARNING :
status = "WARNINGS"
elif status != "FAILED" :
status = "OK"
if details [ "lastSuccess" ] and daysSinceSuccess :
lastSuccessDetails = "Last successful run on %s (%0.1f days ago)" % ( details [ "lastSuccess" ] , daysSinceSuccess )
else :
lastSuccessDetails = "No recent successful run."
if details [ "lastRun" ] :
lastRunDetails = "Last run on %s (%s)" % ( details [ "lastRun" ] , status )
else :
lastRunDetails = "No recent run (%s)" % status
if status == "FAILED" :
numberOfFailed += 1
failedList . append ( "%s: %s. %s" % ( name , lastRunDetails , lastSuccessDetails ) )
elif status == "WARNINGS" :
numberWithWarnings += 1
warningsList . append ( "%s: %s. %s" % ( name , lastRunDetails , lastSuccessDetails ) )
else :
successList . append ( "%s: %s. %s" % ( name , lastRunDetails , lastSuccessDetails ) )
body = [ ]
if failedList :
body . append ( "FAILED JOBS (%d)" % numberOfFailed )
body . append ( "****************" )
for j in failedList :
body . append ( j )
body . append ( "\n" )
if warningsList :
body . append ( "JOBS WITH WARNINGS (%d)" % numberWithWarnings )
body . append ( "***********************" )
for j in warningsList :
body . append ( j )
body . append ( "\n" )
if successList :
body . append ( "SUCCESSFUL JOBS" )
body . append ( "***************" )
for j in successList :
body . append ( j )
return body , failedList |
def show_rich_text ( self , text , collapse = False , img_path = '' ) :
"""Show text in rich mode""" | self . switch_to_plugin ( )
self . switch_to_rich_text ( )
context = generate_context ( collapse = collapse , img_path = img_path , css_path = self . css_path )
self . render_sphinx_doc ( text , context ) |
def _handle_call ( self , actual_call , stubbed_call ) :
"""Extends Stub call handling behavior to be callable by default .""" | self . _actual_calls . append ( actual_call )
use_call = stubbed_call or actual_call
return use_call . return_value |
def load ( self , filename , params = None , force = False , depthrange = None , timerange = None , output_is_dict = True , ** kwargs ) :
"""NetCDF data loader
: parameter filename : file name
: parameter params : a list of variables to load ( default : load ALL variables ) .
: parameter depthrange : if a depth dimension is found , subset along this dimension .
: parameter timerange : if a time dimension is found , subset along this dimension .
. . note : : using : attr : ` altimetry . tools . nctools . limit ` allows subsetting to a given region .
: parameter kwargs : additional arguments for subsetting along given dimensions .
. . note : : You can index along any dimension by providing the name of the dimensions to subsample along . Values associated to the provided keywords should be a length 2 or 3 tuple ( min , max , < step > ) ( cf . : func : ` altimetry . data . nctools . load _ ncVar ` ) .
: keyword output _ is _ dict : data structures are dictionnaries ( eg . my _ hydro _ data . variable [ ' data ' ] ) . If false uses an object with attributes ( eg . my _ hydro _ data . variable . data ) .
: return { type : dict } outStr : Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list .
: author : Renaud Dussurget""" | if ( params is not None ) & isinstance ( params , str ) :
params = [ params ]
# Open file
self . _filename = filename
try :
ncf = ncfile ( self . _filename , "r" )
except Exception , e :
warn ( repr ( e ) , stacklevel = 2 )
return { }
# Load global attributes
akeys = ncf . ncattrs ( )
attrStr = OrderedDict ( )
for A in akeys :
attrStr . update ( { A : ncf . getncattr ( A ) } )
# Get list of recorded parameters :
dum = ncf . variables . keys ( )
nparam = np . shape ( dum ) [ 0 ]
# par _ list = np . array ( [ ' ' . join ( ncf . variables . keys ( ) [ 0 , i , : ] . compressed ( ) ) for i in np . arange ( nparam ) ] )
par_list = np . array ( [ '{0}' . format ( v ) for v in ncf . variables . keys ( ) ] )
# remove empty items and update nparam
par_list = par_list . compress ( [ len ( par ) != 0 for par in par_list ] )
nparam = par_list . size
if nparam == 0 :
self . Error ( 'File has no data ({0})' . format ( self . _filename ) )
# Get dimensions
ncdimlist = np . array ( [ '{0}' . format ( d ) for d in ncf . dimensions . keys ( ) ] )
ndims = len ( ncdimlist )
dimStr = OrderedDict ( )
dimStr . update ( { '_ndims' : ndims } )
if ndims == 0 :
self . Error ( 'File has no dimensions ({0})' . format ( self . _filename ) )
# Check for the presence of strategic dimensions
checkedDims = np . array ( [ 'lon' , 'lat' , 'time' , 'depth' ] )
existDim = - np . ones ( 4 , dtype = int )
if not self . use_local_dims :
for i , d in enumerate ( ncdimlist ) :
if ( ( d . lower ( ) . startswith ( 'lon' ) ) | ( d . lower ( ) . find ( 'longitude' ) != - 1 ) ) & ( d . find ( 'LatLon' ) == - 1 ) :
existDim [ 0 ] = i
if ( ( d . lower ( ) . startswith ( 'lat' ) ) | ( d . lower ( ) . find ( 'latitude' ) != - 1 ) ) & ( d . find ( 'LatLon' ) == - 1 ) :
existDim [ 1 ] = i
if ( d . lower ( ) . startswith ( 'time' ) ) | ( d . lower ( ) . startswith ( 'date' ) ) :
existDim [ 2 ] = i
if ( d . lower ( ) . startswith ( 'lev' ) ) | ( d . lower ( ) . startswith ( 'dep' ) ) :
existDim [ 3 ] = i
# existDim [ 0 ] = np . where ( [ d . lower ( ) . startswith ( ' lon ' ) | ( d . lower ( ) . find ( ' longitude ' ) ! = - 1 ) for d in ncdimlist ] ) [ 0]
# existDim [ 1 ] = np . where ( [ d . lower ( ) . startswith ( ' lat ' ) | ( d . lower ( ) . find ( ' latitude ' ) ! = - 1 ) for d in ncdimlist ] ) [ 0]
# existDim [ 2 ] = np . where ( [ ( d . lower ( ) . startswith ( ' time ' ) ) | ( d . lower ( ) . startswith ( ' date ' ) ) for d in ncdimlist ] ) [ 0]
# existDim [ 3 ] = np . where ( [ ( d . lower ( ) . startswith ( ' lev ' ) ) | ( d . lower ( ) . startswith ( ' dep ' ) ) for d in ncdimlist ] ) [ 0]
identified = existDim > - 1
# checkedDims [ identified ] = checkedDims [ identified ] [ existDim . compress ( identified ) . astype ( int ) ]
# checkedDims = checkedDims [ identified ]
# for cn , vn in enumerate ( checkedDims ) : dimStr . update ( { cn : len ( ncf . dimensions [ vn ] ) } )
# Update dimension structure with identified dimensions
# Load dimensional variables
# TODO : Add scaling here in case . . .
for i , d in enumerate ( existDim ) :
if identified [ i ] :
dimStr . update ( { ncdimlist [ d ] : len ( ncf . dimensions [ ncdimlist [ d ] ] ) } )
# Append dimension
cmd = 'load_ncVar(\'' + ncdimlist [ d ] + '\',nc=ncf)'
self . message ( 4 , 'loading : {0}={1}' . format ( checkedDims [ i ] , cmd ) )
locals ( ) [ checkedDims [ i ] ] = load_ncVar ( ncdimlist [ d ] , nc = ncf , ** kwargs )
missdims = set ( ncdimlist )
missdims . difference_update ( ncdimlist [ existDim [ identified ] ] )
missdims = list ( missdims )
for i , d in enumerate ( missdims ) :
dimStr . update ( { d : len ( ncf . dimensions [ d ] ) } )
if ncf . variables . has_key ( d ) :
cmd = 'load_ncVar(\'' + d + '\',nc=ncf)'
self . message ( 4 , 'loading : {0}={1}' . format ( d , cmd ) )
locals ( ) [ d ] = load_ncVar ( d , nc = ncf , ** kwargs )
# If the variable associated to the dimension do not exist , generate it
else :
self . message ( 1 , '[WARNING] Netcdf file not standard - creating data for {0} dimnsion' . format ( d ) )
ndim = len ( ncf . dimensions [ d ] )
cmd = '=var'
self . message ( 4 , 'loading : {0}={1}' . format ( d , cmd ) )
locals ( ) [ d ] = { '_dimensions' : { '_ndims' : 1 , d : ndim } , 'data' : np . arange ( ndim ) }
# # Update dimension structure with identified dimensions
# for cn , vn in zip ( * ( checkedDims [ identified ] , ncdimlist [ identified ] ) ) : dimStr . update ( { cn : len ( ncf . dimensions [ vn ] ) } )
# for vn in ncdimlist [ ~ identified ] : dimStr . update ( { vn : len ( ncf . dimensions [ vn ] ) } )
# Load dimensional variables
# TODO : Add scaling here in case . . .
# for cn , vn in zip ( * ( checkedDims [ identified ] , ncdimlist [ identified ] ) ) :
# # cmd = ' self . ' + cn + ' = ncf . variables [ \ ' ' + vn + ' \ ' ] [ : ] '
# cmd = cn + ' = load _ ncVar ( \ ' ' + vn + ' \ ' , nc = ncf ) '
# self . message ( 4 , ' exec : ' + cmd )
# exec ( cmd )
# for vn in ncdimlist [ ~ identified ] :
# # cmd = ' self . ' + vn + ' = ncf . variables [ \ ' ' + vn + ' \ ' ] [ : ] '
# cmd = vn + ' = load _ ncVar ( \ ' ' + vn + ' \ ' , nc = ncf ) '
# self . message ( 4 , ' exec : ' + cmd )
# exec ( cmd )
# Update dimlist with dimensions present in the object
# dimlist = np . append ( checkedDims [ identified ] , ncdimlist [ ~ identified ] )
dimlist = ncdimlist . copy ( )
# dimlist [ existDim [ identified ] ] = checkedDims [ identified ]
if identified . sum ( ) > 0 :
dimlist [ existDim [ identified ] ] = checkedDims [ identified ]
else :
dimlist = dimlist [ [ ] ]
# for d in ncdimlist [ checkedDims [ identified ] ] :
# if not d . startswith ( ' _ ' ) : dimlist = np . append ( dimlist , d )
# dimlist = [ ( d if not d . startswith ( ' _ ' ) else None ) for d in dimStr . keys ( ) ]
if params is not None :
if force :
par_list = [ i . upper ( ) for i in params ]
else :
par_list = list ( set ( params ) . intersection ( par_list ) )
else :
par_list = par_list . tolist ( )
# remove dimensional variable
for d in ncdimlist [ existDim [ identified ] ] :
par_list . pop ( par_list . index ( d ) )
self . message ( 2 , 'Recorded parameters : ' + str ( nparam ) + ' -> ' + str ( par_list ) )
# Extract within limits
if ( existDim [ 0 ] > - 1 ) & ( existDim [ 1 ] > - 1 ) :
llind , flag = in_limits ( lon [ 'data' ] , lat [ 'data' ] , limit = self . limit )
if isinstance ( flag , tuple ) :
lon [ 'data' ] = recale ( lon [ 'data' ] . compress ( flag [ 0 ] ) , degrees = True )
lon [ '_dimensions' ] [ lon [ '_dimensions' ] . keys ( ) [ 1 ] ] = flag [ 0 ] . sum ( )
lat [ 'data' ] = lat [ 'data' ] . compress ( flag [ 1 ] )
lat [ '_dimensions' ] [ lat [ '_dimensions' ] . keys ( ) [ 1 ] ] = flag [ 1 ] . sum ( )
else :
lon [ 'data' ] = recale ( lon [ 'data' ] . compress ( flag ) , degrees = True )
lon [ '_dimensions' ] [ lon [ '_dimensions' ] . keys ( ) [ 1 ] ] = flag . sum ( )
lat [ 'data' ] = lat [ 'data' ] . compress ( flag )
lat [ '_dimensions' ] [ lat [ '_dimensions' ] . keys ( ) [ 1 ] ] = flag . sum ( )
locals ( ) [ ncdimlist [ existDim [ 0 ] ] ] = lon . copy ( )
locals ( ) [ ncdimlist [ existDim [ 1 ] ] ] = lat . copy ( )
dimStr . update ( { ncdimlist [ existDim [ 0 ] ] : len ( lon [ 'data' ] ) } )
dimStr . update ( { ncdimlist [ existDim [ 1 ] ] : len ( lat [ 'data' ] ) } )
# self . message ( 4 , ' self . lon & self . lat updated ' )
if ( existDim [ 2 ] > - 1 ) :
if ( timerange is not None ) :
timeflag = ( time [ 'data' ] >= np . min ( timerange ) ) & ( time [ 'data' ] <= np . max ( timerange ) )
else :
timeflag = np . ones ( len ( time [ 'data' ] ) , dtype = bool )
if timeflag . sum ( ) == 0 :
self . Error ( 'No data within specified depth range (min/max = {0}/{1})' . format ( np . min ( time ) , np . max ( time ) ) )
time [ 'data' ] = time [ 'data' ] . compress ( timeflag )
time [ '_dimensions' ] [ time [ '_dimensions' ] . keys ( ) [ 1 ] ] = timeflag . sum ( )
locals ( ) [ ncdimlist [ existDim [ 2 ] ] ] = time . copy ( )
dimStr . update ( { ncdimlist [ existDim [ 2 ] ] : len ( time [ 'data' ] ) } )
# self . message ( 4 , ' self . lon & self . lat updated ' )
# Extract within depth range
if ( existDim [ 3 ] > - 1 ) :
if ( depthrange is not None ) :
depthflag = ( depth [ 'data' ] >= np . min ( depthrange ) ) & ( depth [ 'data' ] <= np . max ( depthrange ) )
else :
depthflag = np . ones ( len ( depth [ 'data' ] ) , dtype = bool )
if depthflag . sum ( ) == 0 :
self . Error ( 'No data within specified depth range (min/max = {0}/{1})' . format ( np . min ( depth ) , np . max ( depth ) ) )
depth [ 'data' ] = depth [ 'data' ] . compress ( depthflag )
depth [ '_dimensions' ] [ depth [ '_dimensions' ] . keys ( ) [ 1 ] ] = depthflag . sum ( )
locals ( ) [ ncdimlist [ existDim [ 3 ] ] ] = depth . copy ( )
dimStr . update ( { ncdimlist [ existDim [ 3 ] ] : len ( depth [ 'data' ] ) } )
# Create output data structure
outStr = OrderedDict ( )
outStr . update ( { '_dimensions' : dimStr } )
outStr . update ( { '_attributes' : attrStr } )
if ( existDim [ 0 ] > - 1 ) :
outStr . update ( { ncdimlist [ existDim [ 0 ] ] : lon } )
if ( existDim [ 1 ] > - 1 ) :
outStr . update ( { ncdimlist [ existDim [ 1 ] ] : lat } )
if ( existDim [ 2 ] > - 1 ) :
outStr . update ( { ncdimlist [ existDim [ 2 ] ] : time } )
if ( existDim [ 3 ] > - 1 ) :
outStr . update ( { ncdimlist [ existDim [ 3 ] ] : depth } )
# Update object with remaining variables
for d in dimlist . compress ( [ not outStr . has_key ( f ) for f in dimlist ] ) : # cmd = ' outStr . update ( { \ ' ' + d + ' \ ' : ' + d + ' [ \ ' data \ ' ] } ) '
cmd = 'outStr.update({\'' + d + '\':' + d + '})'
self . message ( 4 , 'exec : ' + cmd )
exec ( cmd )
ncdimStr = outStr . copy ( )
# Get dimension lengths
shape = ( )
for d in dimlist :
shape += np . shape ( locals ( ) [ d ] [ 'data' ] )
ndims = np . size ( shape )
# # Create dimension structure
# curDim = [ str ( dimname ) for dimname in dimStr . keys ( ) [ 1 : ] ] # [ str ( dimname ) for dimname in ncf . variables [ ' LONGITUDE ' ] . dimensions ]
# curDimval = [ dimStr [ dim ] for dim in curDim ] # [ len ( ncf . dimensions [ dimname ] ) for dimname in curDim ]
# outStr = { ' _ dimensions ' : { ' _ ndims ' : ndims , ' nbpoints ' : sz [ 0 ] } , ' lon ' : lon , ' lat ' : lat , ' date ' : date }
# for d in dimlist : outStr . update ( { d : self . _ _ dict _ _ [ d ] } )
# Sort NCDIMLIST to match DIMLIST
# ncdimlist [ np . sort ( existDim . astype ( np . int ) [ identified ] ) ] = ncdimlist [ existDim [ identified ] . tolist ( ) ]
# Setup kwargs with current dimensionnal properties
for d , ncd in zip ( * ( dimlist , ncdimlist ) ) :
if not kwargs . has_key ( ncd ) :
if kwargs . has_key ( d ) :
kwargs . update ( { ncd : kwargs [ d ] } )
del kwargs [ d ]
else :
dvar = ncdimStr [ d ] [ 'data' ]
if isinstance ( dvar , np . ma . masked_array ) :
kwargs . update ( { ncd : ( np . nanmin ( dvar . data ) , np . nanmax ( dvar . data ) ) } )
else :
kwargs . update ( { ncd : ( np . nanmin ( dvar ) , np . nanmax ( dvar ) ) } )
# else :
# outStr [ ' NbLatitudes ' ] [ ' data ' ]
for param in par_list : # dumVar = load _ ncVar ( param , nc = ncf , lon = llind [ 0 ] , lat = llind [ 1 ] , time = np . arange ( len ( time [ ' data ' ] ) ) . compress ( timeflag ) , * * kwargs ) # Load variables
# dumVar = load _ ncVar ( param , nc = ncf , longitude = ( self . limit [ 1 ] , self . limit [ 3 ] ) , latitude = ( self . limit [ 0 ] , self . limit [ 2 ] ) , time = ( self . time . min ( ) , self . time . max ( ) ) , * * kwargs ) # Load variables
dumVar = load_ncVar ( param , nc = ncf , ** kwargs )
# Load variables
# dimStr = dumVar [ ' _ dimensions ' ]
# update dimensions
# curDim = [ str ( dimname ) for dimname in dimStr . keys ( ) [ 1 : ] ] # [ str ( dimname ) for dimname in ncf . variables [ ' LONGITUDE ' ] . dimensions ]
# curDimval = [ dimStr [ dim ] for dim in curDim ] # [ len ( ncf . dimensions [ dimname ] ) for dimname in curDim ]
# curDim = dimlist [ where _ list ( curDim , ncdimlist . tolist ( ) ) ] # Convert to object dimension names
# curDim = dimlist [ where _ list ( curDim , dimlist . tolist ( ) ) ] # Convert to object dimension names ( ? ? ? )
# # curDim = [ str ( dimname ) for dimname in ncf . variables [ param ] . dimensions ]
# # curDimval = [ len ( ncf . dimensions [ dimname ] ) for dimname in curDim ]
# flag = [ ( np . array ( dimname ) = = outStr [ ' _ dimensions ' ] . keys ( ) ) . sum ( ) = = 0 for dimname in curDim ] # find dimensions to update
# dimUpdate = np . array ( curDim ) . compress ( flag )
# for enum in enumerate ( dimUpdate ) :
# self . message ( 2 , ' Appending dimensions { 0 } : { 1 } to dataStructure ' . format ( enum [ 1 ] , np . array ( curDimval ) . compress ( flag ) [ enum [ 0 ] ] ) )
# outStr [ ' _ dimensions ' ] . update ( { enum [ 1 ] : np . array ( curDimval ) . compress ( flag ) [ enum [ 0 ] ] } ) # Append new dimension
# outStr [ ' _ dimensions ' ] [ ' _ ndims ' ] + = 1 # update dimension counts
# cmd = ' dumStr = { \ ' ' + param + ' \ ' : dumVar [ \ ' data \ ' ] } '
# Set list as variable with attributes
# if ( not output _ is _ dict ) :
# var = dumVar . pop ( ' data ' )
# for k in dumVar . keys ( ) :
# setattr ( var , k , dumVar . pop ( k ) )
# dumVar = var . copy ( )
cmd = 'dumStr = {\'' + param + '\':dumVar}'
self . message ( 4 , 'exec : ' + cmd )
exec ( cmd )
outStr . update ( dumStr )
# Update output dimensions with extracted dimensions
for ddum in dumStr [ param ] [ '_dimensions' ] . keys ( ) [ 1 : ] :
if outStr [ '_dimensions' ] . get ( ddum ) != dumStr [ param ] [ '_dimensions' ] [ ddum ] :
outStr [ '_dimensions' ] [ ddum ] = dumStr [ param ] [ '_dimensions' ] [ ddum ]
# cmd = ' self . ' + param + ' = '
ncf . close ( )
return outStr |
def disable_availability_zones ( self , load_balancer_name , zones_to_remove ) :
"""Remove availability zones from an existing Load Balancer .
All zones must be in the same region as the Load Balancer .
Removing zones that are not registered with the Load Balancer
has no effect .
You cannot remove all zones from an Load Balancer .
: type load _ balancer _ name : string
: param load _ balancer _ name : The name of the Load Balancer
: type zones : List of strings
: param zones : The name of the zone ( s ) to remove .
: rtype : List of strings
: return : An updated list of zones for this Load Balancer .""" | params = { 'LoadBalancerName' : load_balancer_name }
self . build_list_params ( params , zones_to_remove , 'AvailabilityZones.member.%d' )
return self . get_list ( 'DisableAvailabilityZonesForLoadBalancer' , params , None ) |
def _add_decision_criteria ( self , criteria_dict ) :
"""Adds Decision Criteria to the ProbModelXML .
Parameters
criteria _ dict : dict
Dictionary containing Deecision Criteria data .
For example : { ' effectiveness ' : { } , ' cost ' : { } }
Examples
> > > writer = ProbModelXMLWriter ( model )
> > > writer . _ add _ decision _ criteria ( criteria _ dict )""" | decision_tag = etree . SubElement ( self . xml , 'DecisionCriteria' , attrib = { } )
for criteria in sorted ( criteria_dict ) :
criteria_tag = etree . SubElement ( decision_tag , 'Criterion' , attrib = { 'name' : criteria } )
self . _add_additional_properties ( criteria_tag , criteria_dict [ criteria ] ) |
def create_bundle ( self , bundleId , data = None ) :
"""Creates a bundle using Globalization Pipeline service""" | headers = { 'content-type' : 'application/json' }
url = self . __get_base_bundle_url ( ) + "/" + bundleId
if data is None :
data = { }
data [ 'sourceLanguage' ] = 'en'
data [ 'targetLanguages' ] = [ ]
data [ 'notes' ] = [ ]
data [ 'metadata' ] = { }
data [ 'partner' ] = ''
data [ 'segmentSeparatorPattern' ] = ''
data [ 'noTranslationPattern' ] = ''
json_data = json . dumps ( data )
response = self . __perform_rest_call ( requestURL = url , restType = 'PUT' , body = json_data , headers = headers )
return response |
def _extract_docs_return ( self ) :
"""Extract return description and type""" | if self . dst . style [ 'in' ] == 'numpydoc' :
data = '\n' . join ( [ d . rstrip ( ) . replace ( self . docs [ 'out' ] [ 'spaces' ] , '' , 1 ) for d in self . docs [ 'in' ] [ 'raw' ] . splitlines ( ) ] )
self . docs [ 'in' ] [ 'return' ] = self . dst . numpydoc . get_return_list ( data )
self . docs [ 'in' ] [ 'rtype' ] = None
# TODO : fix this
elif self . dst . style [ 'in' ] == 'google' :
data = '\n' . join ( [ d . rstrip ( ) . replace ( self . docs [ 'out' ] [ 'spaces' ] , '' , 1 ) for d in self . docs [ 'in' ] [ 'raw' ] . splitlines ( ) ] )
self . docs [ 'in' ] [ 'return' ] = self . dst . googledoc . get_return_list ( data )
self . docs [ 'in' ] [ 'rtype' ] = None
elif self . dst . style [ 'in' ] == 'groups' :
self . _extract_groupstyle_docs_return ( )
elif self . dst . style [ 'in' ] in [ 'javadoc' , 'reST' ] :
self . _extract_tagstyle_docs_return ( ) |
def get ( self , key ) :
"""Gets the value of the property of the given key .
Args :
key ( str ) : Key of the property to look - up .""" | match = self . _get_match ( key = key )
if not match :
return None
return self . _get_value_from_match ( key = key , match = match ) |
def _format_multirow ( self , row , ilevels , i , rows ) :
r"""Check following rows , whether row should be a multirow
e . g . : becomes :
a & 0 & \ multirow { 2 } { * } { a } & 0 &
& 1 & & 1 &
b & 0 & \ cline { 1-2}
b & 0 &""" | for j in range ( ilevels ) :
if row [ j ] . strip ( ) :
nrow = 1
for r in rows [ i + 1 : ] :
if not r [ j ] . strip ( ) :
nrow += 1
else :
break
if nrow > 1 : # overwrite non - multirow entry
row [ j ] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}' . format ( nrow = nrow , row = row [ j ] . strip ( ) )
# save when to end the current block with \ cline
self . clinebuf . append ( [ i + nrow - 1 , j + 1 ] )
return row |
def exps ( self , opttype , strike ) :
"""Prices for given strike on all available dates .
Parameters
opttype : str ( ' call ' or ' put ' )
strike : numeric
Returns
df : : class : ` pandas . DataFrame `
eq : float
Price of underlying .
qt : : class : ` datetime . datetime `
Time of quote .
See Also
: meth : ` strikes `""" | _relevant = _relevant_rows ( self . data , ( strike , slice ( None ) , opttype , ) , "No key for {} {}" . format ( strike , opttype ) )
_index = _relevant . index . get_level_values ( 'Expiry' )
_columns = [ 'Price' , 'Time_Val' , 'Last' , 'Bid' , 'Ask' , 'Vol' , 'Open_Int' ]
_df = pd . DataFrame ( index = _index , columns = _columns )
_eq = _relevant . loc [ : , 'Underlying_Price' ] . values [ 0 ]
_qt = pd . to_datetime ( _relevant . loc [ : , 'Quote_Time' ] . values [ 0 ] , utc = True ) . to_datetime ( )
for _col in _columns [ 2 : ] :
_df . loc [ : , _col ] = _relevant . loc [ : , _col ] . values
_df . loc [ : , 'Price' ] = ( _df . loc [ : , 'Bid' ] + _df . loc [ : , 'Ask' ] ) / 2.
_set_tv_other_ix ( _df , opttype , 'Price' , 'Time_Val' , _eq , strike )
return _df , _eq , _qt |
def _allocate_segment ( self , session , net_id , source ) :
"""Allocate segment from pool .
Return allocated db object or None .""" | with session . begin ( subtransactions = True ) :
hour_lapse = utils . utc_time_lapse ( self . seg_timeout )
count = ( session . query ( self . model ) . filter ( self . model . delete_time < hour_lapse ) . update ( { "delete_time" : None } ) )
select = ( session . query ( self . model ) . filter_by ( allocated = False , delete_time = None ) )
# Selected segment can be allocated before update by someone else ,
# We retry until update success or DB _ MAX _ RETRIES retries
for attempt in range ( DB_MAX_RETRIES + 1 ) :
alloc = select . first ( )
if not alloc :
LOG . info ( "No segment resource available" )
# No resource available
return
count = ( session . query ( self . model ) . filter_by ( segmentation_id = alloc . segmentation_id , allocated = False ) . update ( { "allocated" : True , "network_id" : net_id , "source" : source } ) )
if count :
return alloc
LOG . error ( "ERROR: Failed to allocate segment for net %(net)s" " source %(src)s" , { 'net' : net_id , 'src' : source } ) |
def _setup_output_file ( self , output_filename , args , write_header = True ) :
"""Open and prepare output file .""" | # write command line into outputFile
# ( without environment variables , they are documented by benchexec )
try :
output_file = open ( output_filename , 'w' )
# override existing file
except IOError as e :
sys . exit ( e )
if write_header :
output_file . write ( ' ' . join ( map ( util . escape_string_shell , self . _build_cmdline ( args ) ) ) + '\n\n\n' + '-' * 80 + '\n\n\n' )
output_file . flush ( )
return output_file |
def delete_service_network ( self , tenant_name , network ) :
"""Delete service network on the DCNM .
: param tenant _ name : name of tenant the network belongs to
: param network : object that contains network parameters""" | network_info = { }
part_name = network . part_name
if not part_name :
part_name = self . _part_name
seg_id = str ( network . segmentation_id )
if network . vlan :
vlan_id = str ( network . vlan )
if network . mob_domain_name is not None :
mob_domain_name = network . mob_domain_name
else : # The current way will not work since _ default _ md is obtained
# during create _ service _ network . It ' s preferrable to get it
# during init TODO ( padkrish )
if self . _default_md is None :
self . _set_default_mobility_domain ( )
mob_domain_name = self . _default_md
network_info = { 'organizationName' : tenant_name , 'partitionName' : part_name , 'mobDomainName' : mob_domain_name , 'vlanId' : vlan_id , 'segmentId' : seg_id , }
else :
network_info = { 'organizationName' : tenant_name , 'partitionName' : part_name , 'segmentId' : seg_id , }
LOG . debug ( "Deleting %s network in DCNM." , network_info )
res = self . _delete_network ( network_info )
if res and res . status_code in self . _resp_ok :
LOG . debug ( "Deleted %s network in DCNM." , network_info )
else :
LOG . error ( "Failed to delete %s network in DCNM." , network_info )
raise dexc . DfaClientRequestFailed ( reason = self . _failure_msg ( res ) ) |
def destroy ( self ) :
"""A reimplemented destructor .
This destructor will remove itself from the superview .""" | widget = self . widget
if widget is not None :
widget . removeFromSuperview ( )
super ( UiKitView , self ) . destroy ( ) |
def discard_all ( self , filterfunc = None ) :
"""Discard all waiting messages .
: param filterfunc : A filter function to only discard the messages this
filter returns .
: returns : the number of messages discarded .
* WARNING * : All incoming messages will be ignored and not processed .
Example using filter :
> > > def waiting _ feeds _ only ( message ) :
. . . try :
. . . message _ data = message . decode ( )
. . . except : # Should probably be more specific .
. . . pass
. . . if message _ data . get ( " type " ) = = " feed " :
. . . return True
. . . else :
. . . return False""" | if not filterfunc :
return self . backend . queue_purge ( self . queue )
if self . no_ack or self . auto_ack :
raise Exception ( "discard_all: Can't use filter with auto/no-ack." )
discarded_count = 0
while True :
message = self . fetch ( )
if message is None :
return discarded_count
if filterfunc ( message ) :
message . ack ( )
discarded_count += 1 |
def handle_closed_task ( self , task_name , record ) :
"""Do everything needed when a task is closed
Params :
task _ name ( str ) : name of the task that is finishing
record ( logging . LogRecord ) : log record with all the info
Returns :
None""" | if task_name not in self . tasks :
return
if self . main_failed :
self . mark_parent_tasks_as_failed ( self . cur_task )
if self . tasks [ task_name ] . failed :
record . msg = ColorFormatter . colored ( 'red' , END_TASK_ON_ERROR_MSG )
else :
record . msg = ColorFormatter . colored ( 'green' , END_TASK_MSG )
record . msg += ' (in %s)' % self . tasks [ task_name ] . elapsed_time ( )
if self . should_show_by_depth ( ) or self . tasks [ task_name ] . force_show :
if self . tasks [ task_name ] . force_show :
self . handle_error ( )
self . pretty_emit ( record , is_header = True )
self . close_children_tasks ( task_name )
self . tasks . pop ( task_name ) |
def load_conll ( f , features , n_features = ( 2 ** 16 ) , split = False ) :
"""Load CoNLL file , extract features on the tokens and vectorize them .
The ConLL file format is a line - oriented text format that describes
sequences in a space - separated format , separating the sequences with
blank lines . Typically , the last space - separated part is a label .
Since the tab - separated parts are usually tokens ( and maybe things like
part - of - speech tags ) rather than feature vectors , a function must be
supplied that does the actual feature extraction . This function has access
to the entire sequence , so that it can extract context features .
A ` ` sklearn . feature _ extraction . FeatureHasher ` ` ( the " hashing trick " )
is used to map symbolic input feature names to columns , so this function
dos not remember the actual input feature names .
Parameters
f : { string , file - like }
Input file .
features : callable
Feature extraction function . Must take a list of tokens l that
represent a single sequence and an index i into this list , and must
return an iterator over strings that represent the features of l [ i ] .
n _ features : integer , optional
Number of columns in the output .
split : boolean , default = False
Whether to split lines on whitespace beyond what is needed to parse
out the labels . This is useful for CoNLL files that have extra columns
containing information like part of speech tags .
Returns
X : scipy . sparse matrix , shape ( n _ samples , n _ features )
Samples ( feature vectors ) , as a single sparse matrix .
y : np . ndarray , dtype np . string , shape n _ samples
Per - sample labels .
lengths : np . ndarray , dtype np . int32 , shape n _ sequences
Lengths of sequences within ( X , y ) . The sum of these is equal to
n _ samples .""" | fh = FeatureHasher ( n_features = n_features , input_type = "string" )
labels = [ ]
lengths = [ ]
with _open ( f ) as f :
raw_X = _conll_sequences ( f , features , labels , lengths , split )
X = fh . transform ( raw_X )
return X , np . asarray ( labels ) , np . asarray ( lengths , dtype = np . int32 ) |
def dispatch_job_hook ( self , link , key , job_config , logfile , stream = sys . stdout ) :
"""Hook to dispatch a single job""" | raise NotImplementedError ( "SysInterface.dispatch_job_hook" ) |
def page_for_in ( self , leaderboard_name , member , page_size = DEFAULT_PAGE_SIZE ) :
'''Determine the page where a member falls in the named leaderboard .
@ param leaderboard [ String ] Name of the leaderboard .
@ param member [ String ] Member name .
@ param page _ size [ int ] Page size to be used in determining page location .
@ return the page where a member falls in the leaderboard .''' | rank_for_member = None
if self . order == self . ASC :
rank_for_member = self . redis_connection . zrank ( leaderboard_name , member )
else :
rank_for_member = self . redis_connection . zrevrank ( leaderboard_name , member )
if rank_for_member is None :
rank_for_member = 0
else :
rank_for_member += 1
return int ( math . ceil ( float ( rank_for_member ) / float ( page_size ) ) ) |
def open ( self ) :
"""Retrieve this file ' s attributes from the server .
Returns a Future .
. . versionchanged : : 2.0
No longer accepts a callback argument .
. . versionchanged : : 0.2
: class : ` ~ motor . MotorGridOut ` now opens itself on demand , calling
` ` open ` ` explicitly is rarely needed .""" | return self . _framework . chain_return_value ( self . _ensure_file ( ) , self . get_io_loop ( ) , self ) |
def read_annotation_file ( annotation_file , annotation_type ) :
"""read _ annotation _ file ( annotation _ file , annotation _ type ) - > annotations
Reads annotations from the given ` ` annotation _ file ` ` .
The way , how annotations are read depends on the given ` ` annotation _ type ` ` .
Depending on the type , one or several annotations might be present in the annotation file .
Currently , these variants are implemented :
- ` ` ' lr - eyes ' ` ` : Only the eye positions are stored , in a single row , like : ` ` le _ x le _ y re _ x re _ y ` ` , comment lines starting with ` ` ' # ' ` ` are ignored .
- ` ` ' named ' ` ` : Each line of the file contains a name and two floats , like ` ` reye x y ` ` ; empty lines separate between sets of annotations .
- ` ` ' idiap ' ` ` : A special 22 point format , where each line contains the index and the locations , like ` ` 1 x y ` ` .
- ` ` ' fddb ' ` ` : a special format for the FDDB database ; empty lines separate between sets of annotations
Finally , a list of ` ` annotations ` ` is returned in the format : ` ` [ { name : ( y , x ) } ] ` ` .
* * Parameters : * *
` ` annotation _ file ` ` : str
The file name of the annotation file to read
` ` annotation _ type ` ` : str ( see above )
The style of annotation file , in which the given ` ` annotation _ file ` ` is
* * Returns : * *
` ` annotations ` ` : [ dict ]
A list of annotations read from the given file , grouped by annotated objects ( faces ) .
Each annotation is generally specified as the two eye coordinates , i . e . , ` ` { ' reye ' : ( rey , rex ) , ' leye ' : ( ley , lex ) } ` ` , but other types of annotations might occur as well .""" | annotations = [ { } ]
with open ( annotation_file ) as f :
if annotation_type == 'idiap' : # This is a special format where we have enumerated annotations , and a ' gender '
for line in f :
positions = line . rstrip ( ) . split ( )
if positions :
if positions [ 0 ] . isdigit ( ) : # position field
assert len ( positions ) == 3
id = int ( positions [ 0 ] )
annotations [ - 1 ] [ 'key%d' % id ] = ( float ( positions [ 2 ] ) , float ( positions [ 1 ] ) )
else : # another field , we take the first entry as key and the rest as values
annotations [ - 1 ] [ positions [ 0 ] ] = positions [ 1 : ]
elif len ( annotations [ - 1 ] ) > 0 : # empty line ; split between annotations
annotations . append ( { } )
# finally , we add the eye center coordinates as the center between the eye corners ; the annotations 3 and 8 seem to be the pupils . . .
for annotation in annotations :
if 'key1' in annotation and 'key5' in annotation :
annotation [ 'reye' ] = ( ( annotation [ 'key1' ] [ 0 ] + annotation [ 'key5' ] [ 0 ] ) / 2. , ( annotation [ 'key1' ] [ 1 ] + annotation [ 'key5' ] [ 1 ] ) / 2. )
if 'key6' in annotation and 'key10' in annotation :
annotation [ 'leye' ] = ( ( annotation [ 'key6' ] [ 0 ] + annotation [ 'key10' ] [ 0 ] ) / 2. , ( annotation [ 'key6' ] [ 1 ] + annotation [ 'key10' ] [ 1 ] ) / 2. )
elif annotation_type == 'lr-eyes' : # In this format , the eyes are given in a single row " le _ x le _ y re _ x re _ y " , possibly with a comment line
# There is only a single annotation per image
for line in f :
if len ( line ) and line [ 0 ] != '#' :
positions = line . rstrip ( ) . split ( )
annotations [ 0 ] [ 'leye' ] = ( float ( positions [ 1 ] ) , float ( positions [ 0 ] ) )
annotations [ 0 ] [ 'reye' ] = ( float ( positions [ 3 ] ) , float ( positions [ 2 ] ) )
elif annotation_type == 'named' : # In this format , each line contains three entries : " keyword x y "
for line in f :
positions = line . rstrip ( ) . split ( )
if positions :
annotations [ - 1 ] [ positions [ 0 ] ] = ( float ( positions [ 2 ] ) , float ( positions [ 1 ] ) )
elif len ( annotations [ - 1 ] ) > 0 : # empty line ; split between annotations
annotations . append ( { } )
elif annotation_type == 'fddb' : # This is a special format for the FDDB database
for line in f :
positions = line . rstrip ( ) . split ( )
if not len ( positions ) :
if len ( annotations [ - 1 ] ) > 0 : # empty line ; split between annotations
annotations . append ( { } )
elif len ( positions ) == 2 :
annotations [ - 1 ] [ positions [ 0 ] ] = float ( positions [ 1 ] )
elif len ( positions ) == 3 :
annotations [ - 1 ] [ positions [ 0 ] ] = ( float ( positions [ 2 ] ) , float ( positions [ 1 ] ) )
else :
raise ValueError ( "Could not interpret line %s of the annotation file" % line )
else :
raise ValueError ( "The given annotation type %s is not known" % annotation_type )
if not annotations [ - 1 ] :
del annotations [ - 1 ]
return annotations |
def zone_update ( cls , zone_id , records ) :
"""Update records for a zone""" | cls . echo ( 'Creating new zone file' )
new_version_id = Zone . new ( zone_id )
cls . echo ( 'Updating zone records' )
cls . call ( 'domain.zone.record.set' , zone_id , new_version_id , records )
cls . echo ( 'Activation of new zone version' )
Zone . set ( zone_id , new_version_id )
return new_version_id |
def apply_RGB_matrix ( var1 , var2 , var3 , rgb_type , convtype = "xyz_to_rgb" ) :
"""Applies an RGB working matrix to convert from XYZ to RGB .
The arguments are tersely named var1 , var2 , and var3 to allow for the
passing of XYZ _ or _ RGB values . var1 is X for XYZ , and R for RGB . var2 and
var3 follow suite .""" | convtype = convtype . lower ( )
# Retrieve the appropriate transformation matrix from the constants .
rgb_matrix = rgb_type . conversion_matrices [ convtype ]
logger . debug ( " \* Applying RGB conversion matrix: %s->%s" , rgb_type . __class__ . __name__ , convtype )
# Stuff the RGB / XYZ values into a NumPy matrix for conversion .
var_matrix = numpy . array ( ( var1 , var2 , var3 ) )
# Perform the adaptation via matrix multiplication .
result_matrix = numpy . dot ( rgb_matrix , var_matrix )
rgb_r , rgb_g , rgb_b = result_matrix
# Clamp these values to a valid range .
rgb_r = max ( rgb_r , 0.0 )
rgb_g = max ( rgb_g , 0.0 )
rgb_b = max ( rgb_b , 0.0 )
return rgb_r , rgb_g , rgb_b |
def mark_streamer ( self , index ) :
"""Manually mark a streamer that should trigger .
The next time check _ streamers is called , the given streamer will be
manually marked that it should trigger , which will cause it to trigger
unless it has no data .
Args :
index ( int ) : The index of the streamer that we should mark as
manually triggered .
Raises :
ArgumentError : If the streamer index is invalid .""" | self . _logger . debug ( "Marking streamer %d manually" , index )
if index >= len ( self . streamers ) :
raise ArgumentError ( "Invalid streamer index" , index = index , num_streamers = len ( self . streamers ) )
self . _manually_triggered_streamers . add ( index ) |
def get_api_root_view ( self , api_urls = None ) :
"""Return a basic root view .""" | api_root_dict = OrderedDict ( )
list_name = self . routes [ 0 ] . name
for prefix , viewset , basename in self . registry :
api_root_dict [ prefix ] = list_name . format ( basename = basename )
class APIRootView ( views . APIView ) :
_ignore_model_permissions = True
exclude_from_schema = True
def get ( self , request , * args , ** kwargs ) : # Return a plain { " name " : " hyperlink " } response .
ret = OrderedDict ( )
namespace = request . resolver_match . namespace
for key , url_name in sorted ( api_root_dict . items ( ) , key = itemgetter ( 0 ) ) :
if namespace :
url_name = namespace + ':' + url_name
try :
ret [ key ] = reverse ( url_name , args = args , kwargs = kwargs , request = request , format = kwargs . get ( 'format' , None ) )
except NoReverseMatch : # Don ' t bail out if eg . no list routes exist , only detail routes .
continue
return Response ( ret )
return APIRootView . as_view ( ) |
def add_link_type_vlan ( enode , portlbl , name , vlan_id , shell = None ) :
"""Add a new virtual link with the type set to VLAN .
Creates a new vlan device { name } on device { port } .
Will raise an exception if value is already assigned .
: param enode : Engine node to communicate with .
: type enode : topology . platforms . base . BaseNode
: param str portlbl : Port label to configure . Port label will be mapped
automatically .
: param str name : specifies the name of the new virtual device .
: param str vlan _ id : specifies the VLAN identifier .
: param str shell : Shell name to execute commands . If ` ` None ` ` , use the
Engine Node default shell .""" | assert name
if name in enode . ports :
raise ValueError ( 'Port {name} already exists' . format ( name = name ) )
assert portlbl
assert vlan_id
port = enode . ports [ portlbl ]
cmd = 'ip link add link {dev} name {name} type vlan id {vlan_id}' . format ( dev = port , name = name , vlan_id = vlan_id )
response = enode ( cmd , shell = shell )
assert not response , 'Cannot add virtual link {name}' . format ( name = name )
enode . ports [ name ] = name |
def close ( self , clear = False ) :
"""Do final refresh and remove from manager
If ` ` leave ` ` is True , the default , the effect is the same as : py : meth : ` refresh ` .""" | if clear and not self . leave :
self . clear ( )
else :
self . refresh ( )
self . manager . remove ( self ) |
def correlation ( P , obs1 , obs2 = None , times = [ 1 ] , k = None ) :
r"""Time - correlation for equilibrium experiment .
Parameters
P : ( M , M ) ndarray
Transition matrix
obs1 : ( M , ) ndarray
Observable , represented as vector on state space
obs2 : ( M , ) ndarray ( optional )
Second observable , for cross - correlations
times : list of int ( optional )
List of times ( in tau ) at which to compute correlation
k : int ( optional )
Number of eigenvectors and eigenvalues to use for computation
Returns
correlations : ndarray
Correlation values at given times""" | M = P . shape [ 0 ]
T = np . asarray ( times ) . max ( )
if T < M :
return correlation_matvec ( P , obs1 , obs2 = obs2 , times = times )
else :
return correlation_decomp ( P , obs1 , obs2 = obs2 , times = times , k = k ) |
def unsubscribe ( self , peer_jid ) :
"""Unsubscribe from the presence of the given ` peer _ jid ` .""" | self . client . enqueue ( stanza . Presence ( type_ = structs . PresenceType . UNSUBSCRIBE , to = peer_jid ) ) |
def authorize ( context , action , target , do_raise = True ) :
"""Verify that the action is valid on the target in this context .
: param context : monasca project context
: param action : String representing the action to be checked . This
should be colon separated for clarity .
: param target : Dictionary representing the object of the action for
object creation . This should be a dictionary representing
the location of the object e . g .
` ` { ' project _ id ' : ' context . project _ id ' } ` `
: param do _ raise : if True ( the default ) , raises PolicyNotAuthorized ,
if False returns False
: type context : object
: type action : str
: type target : dict
: type do _ raise : bool
: return : returns a non - False value ( not necessarily True ) if authorized ,
and the False if not authorized and do _ raise if False
: raises oslo _ policy . policy . PolicyNotAuthorized : if verification fails""" | init ( )
credentials = context . to_policy_values ( )
try :
result = _ENFORCER . authorize ( action , target , credentials , do_raise = do_raise , action = action )
return result
except policy . PolicyNotRegistered :
LOG . exception ( 'Policy not registered' )
raise
except Exception :
LOG . debug ( 'Policy check for %(action)s failed with credentials ' '%(credentials)s' , { 'action' : action , 'credentials' : credentials } )
raise |
def asyncPipeLoop ( context = None , _INPUT = None , conf = None , embed = None , ** kwargs ) :
"""An operator that asynchronously loops over the input and performs the
embedded submodule . Not loopable .
Parameters
context : pipe2py . Context object
_ INPUT : asyncPipe like object ( twisted Deferred iterable of items )
embed : the submodule , i . e . , asyncPipe * ( context , _ INPUT , conf )
Most modules , with the exception of User inputs and Operators can be
sub - modules .
conf : {
' assign _ part ' : { ' value ' : < all or first > } ,
' assign _ to ' : { ' value ' : < assigned field name > } ,
' emit _ part ' : { ' value ' : < all or first > } ,
' mode ' : { ' value ' : < assign or EMIT > } ,
' with ' : { ' value ' : < looped field name or blank > } ,
' embed ' : { ' value ' : { ' conf ' : < module conf > } }
Returns
_ OUTPUT : twisted . internet . defer . Deferred generator of items""" | cust_func = get_cust_func ( context , conf , embed , parse_embed , ** kwargs )
opts . update ( { 'cust_func' : cust_func } )
splits = yield asyncGetSplits ( _INPUT , conf , ** cdicts ( opts , kwargs ) )
gathered = yield asyncStarMap ( asyncParseResult , splits )
_OUTPUT = utils . multiplex ( gathered )
returnValue ( _OUTPUT ) |
def wait_for_host ( self , host ) :
"""Throttle requests to one host .""" | t = time . time ( )
if host in self . times :
due_time = self . times [ host ]
if due_time > t :
wait = due_time - t
time . sleep ( wait )
t = time . time ( )
wait_time = random . uniform ( self . wait_time_min , self . wait_time_max )
self . times [ host ] = t + wait_time |
def get_metadata ( filename , scan , paramfile = '' , ** kwargs ) :
"""Parses sdm file to define metadata for observation , including scan info ,
image grid parameters , pipeline memory usage , etc .
Mirrors parsems . get _ metadata ( ) .
If paramfile defined , it will use it ( filename or RT . Params instance ok ) .""" | # create primary state dictionary
d = { }
# set workdir
d [ 'filename' ] = os . path . abspath ( filename )
d [ 'workdir' ] = os . path . dirname ( d [ 'filename' ] )
# define parameters of pipeline via Params object
params = pp . Params ( paramfile )
for k in params . defined : # fill in default params
d [ k ] = params [ k ]
# overload with provided kwargs
for key in kwargs . keys ( ) :
if key in params . defined :
stdname = '(standard)'
else :
stdname = ''
logger . info ( 'Setting %s key %s to %s' % ( stdname , key , kwargs [ key ] ) )
d [ key ] = kwargs [ key ]
# option of not writing log file ( need to improve later )
if d [ 'logfile' ] :
fh = logging . FileHandler ( os . path . join ( d [ 'workdir' ] , 'rtpipe_%d.log' % int ( round ( time ( ) ) ) ) )
fh . setFormatter ( logging . Formatter ( '%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) )
logger . parent . addHandler ( fh )
if hasattr ( logging , d [ 'loglevel' ] ) :
logger . parent . setLevel ( getattr ( logging , d [ 'loglevel' ] ) )
else :
logger . warn ( 'loglevel of {0} not attribute of logging' . format ( d [ 'loglevel' ] ) )
# define scan list
# if ' bdfdir ' not in d :
# d [ ' bdfdir ' ] = ' '
scans = read_scans ( d [ 'filename' ] , bdfdir = d [ 'bdfdir' ] )
sources = read_sources ( d [ 'filename' ] )
# define source props
d [ 'source' ] = scans [ scan ] [ 'source' ]
d [ 'radec' ] = [ ( prop [ 'ra' ] , prop [ 'dec' ] ) for ( sr , prop ) in sources . iteritems ( ) if prop [ 'source' ] == d [ 'source' ] ] [ 0 ]
# define spectral info
sdm = getsdm ( d [ 'filename' ] , bdfdir = d [ 'bdfdir' ] )
d [ 'spw_orig' ] = [ int ( str ( row . spectralWindowId ) . split ( '_' ) [ 1 ] ) for row in sdm [ 'SpectralWindow' ] ]
d [ 'spw_nchan' ] = [ int ( row . numChan ) for row in sdm [ 'SpectralWindow' ] ]
try :
d [ 'spw_reffreq' ] = [ float ( row . chanFreqStart ) for row in sdm [ 'SpectralWindow' ] ]
# nominal
except : # GMRT uses array of all channel starts
d [ 'spw_reffreq' ] = [ float ( row . chanFreqArray . strip ( ) . split ( ' ' ) [ 2 ] ) for row in sdm [ 'SpectralWindow' ] ]
try :
d [ 'spw_chansize' ] = [ float ( row . chanFreqStep ) for row in sdm [ 'SpectralWindow' ] ]
# nominal
except : # GMRT uses array of all channel starts
d [ 'spw_chansize' ] = [ float ( row . chanWidthArray . strip ( ) . split ( ' ' ) [ 2 ] ) for row in sdm [ 'SpectralWindow' ] ]
# select spw . note that spw selection not fully supported yet .
if not len ( d [ 'spw' ] ) :
d [ 'spw' ] = d [ 'spw_orig' ]
spwch = [ ]
reffreq = d [ 'spw_reffreq' ]
spectralwindow = d [ 'spw_orig' ]
numchan = d [ 'spw_nchan' ]
chansize = d [ 'spw_chansize' ]
for freq in sorted ( d [ 'spw_reffreq' ] ) :
ii = reffreq . index ( freq )
if spectralwindow [ ii ] in d [ 'spw' ] : # spacing of channel * centers *
spwch . extend ( list ( np . linspace ( reffreq [ ii ] , reffreq [ ii ] + ( numchan [ ii ] - 1 ) * chansize [ ii ] , numchan [ ii ] ) ) )
d [ 'freq_orig' ] = np . array ( [ np . mean ( spwch [ i : i + d [ 'read_fdownsample' ] ] ) for i in range ( 0 , len ( spwch ) , d [ 'read_fdownsample' ] ) ] , dtype = 'float32' ) / 1e9
# select subset of channels
if not len ( d [ 'chans' ] ) :
d [ 'chans' ] = range ( len ( d [ 'freq_orig' ] ) )
d [ 'nspw' ] = len ( d [ 'spw' ] )
d [ 'freq' ] = d [ 'freq_orig' ] [ d [ 'chans' ] ]
d [ 'nchan' ] = len ( d [ 'chans' ] )
# define chan ranges per spw ( before selecting subset )
spw_chanr = [ ]
i0 = 0
for nch in d [ 'spw_nchan' ] :
spw_chanr . append ( ( i0 , i0 + nch ) )
i0 = nch
d [ 'spw_chanr' ] = spw_chanr
# define nchan per spw after selecting subset
d [ 'spw_nchan_select' ] = [ len ( [ ch for ch in range ( d [ 'spw_chanr' ] [ i ] [ 0 ] , d [ 'spw_chanr' ] [ i ] [ 1 ] ) if ch in d [ 'chans' ] ] ) for i in range ( len ( d [ 'spw_chanr' ] ) ) ]
spw_chanr_select = [ ]
i0 = 0
for nch in d [ 'spw_nchan_select' ] :
spw_chanr_select . append ( ( i0 , i0 + nch ) )
i0 += nch
d [ 'spw_chanr_select' ] = spw_chanr_select
# define image params
d [ 'urange' ] = { }
d [ 'vrange' ] = { }
d [ 'scan' ] = scan
( u , v , w ) = calc_uvw ( d [ 'filename' ] , d [ 'scan' ] , bdfdir = d [ 'bdfdir' ] )
# default uses time at start
u = u * d [ 'freq_orig' ] [ 0 ] * ( 1e9 / 3e8 ) * ( - 1 )
v = v * d [ 'freq_orig' ] [ 0 ] * ( 1e9 / 3e8 ) * ( - 1 )
d [ 'urange' ] [ d [ 'scan' ] ] = u . max ( ) - u . min ( )
d [ 'vrange' ] [ d [ 'scan' ] ] = v . max ( ) - v . min ( )
d [ 'dishdiameter' ] = float ( str ( sdm [ 'Antenna' ] [ 0 ] . dishDiameter ) . strip ( ) )
# should be in meters
# delay beam larger than VLA field of view at all freqs
d [ 'uvres_full' ] = np . round ( d [ 'dishdiameter' ] / ( 3e-1 / d [ 'freq' ] . min ( ) ) / 2 ) . astype ( 'int' )
if not all ( ( 'npixx_full' in d , 'npixy_full' in d ) ) : # uvw from get _ uvw already in lambda at ch0
urange = d [ 'urange' ] [ d [ 'scan' ] ] * ( d [ 'freq' ] . max ( ) / d [ 'freq_orig' ] [ 0 ] )
vrange = d [ 'vrange' ] [ d [ 'scan' ] ] * ( d [ 'freq' ] . max ( ) / d [ 'freq_orig' ] [ 0 ] )
# power array for 2 * * i * 3 * * j
powers = np . fromfunction ( lambda i , j : 2 ** i * 3 ** j , ( 14 , 10 ) , dtype = 'int' )
rangex = np . round ( d [ 'uvoversample' ] * urange ) . astype ( 'int' )
rangey = np . round ( d [ 'uvoversample' ] * vrange ) . astype ( 'int' )
largerx = np . where ( powers - rangex / d [ 'uvres_full' ] > 0 , powers , powers [ - 1 , - 1 ] )
p2x , p3x = np . where ( largerx == largerx . min ( ) )
largery = np . where ( powers - rangey / d [ 'uvres_full' ] > 0 , powers , powers [ - 1 , - 1 ] )
p2y , p3y = np . where ( largery == largery . min ( ) )
d [ 'npixx_full' ] = ( 2 ** p2x * 3 ** p3x ) [ 0 ]
d [ 'npixy_full' ] = ( 2 ** p2y * 3 ** p3y ) [ 0 ]
# define ants / bls
# hacking here to fit observatory - specific use of antenna names
if 'VLA' in str ( sdm [ 'ExecBlock' ] [ 0 ] [ 'telescopeName' ] ) : # find config first , then antids , then ant names
configid = [ str ( row . configDescriptionId ) for row in sdm [ 'Main' ] if d [ 'scan' ] == int ( row . scanNumber ) ] [ 0 ]
antids = [ str ( row . antennaId ) for row in sdm [ 'ConfigDescription' ] if configid == row . configDescriptionId ] [ 0 ] . split ( ' ' ) [ 2 : ]
d [ 'ants' ] = [ int ( str ( row . name ) . lstrip ( 'ea' ) ) for antid in antids for row in sdm [ 'Antenna' ] if antid == str ( row . antennaId ) ]
# Not complete . Execblock defines ants per scan , which can change .
# d [ ' ants ' ] = [ int ( ant . name . lstrip ( ' ea ' ) )
# for ant in sdm [ ' Antenna ' ] ]
elif 'GMRT' in str ( sdm [ 'ExecBlock' ] [ 0 ] [ 'telescopeName' ] ) :
d [ 'ants' ] = [ int ( str ( ant . antennaId ) . split ( '_' ) [ 1 ] ) for ant in sdm [ 'Antenna' ] ]
# remove unwanted ants
for ant in d [ 'excludeants' ] :
d [ 'ants' ] . remove ( ant )
# required to assure that added antennas don ' t confuse cal antenna parsing
d [ 'ants' ] . sort ( )
d [ 'nants' ] = len ( d [ 'ants' ] )
# d [ ' blarr ' ] = np . array ( [ [ d [ ' ants ' ] [ i ] , d [ ' ants ' ] [ j ] ]
# for j in range ( d [ ' nants ' ] ) for i in range ( 0 , j ) ] )
d [ 'nbl' ] = d [ 'nants' ] * ( d [ 'nants' ] - 1 ) / 2
# define times
d [ 'starttime_mjd' ] = scans [ d [ 'scan' ] ] [ 'startmjd' ]
# assume inttime same for all scans
scan = sdm . scan ( d [ 'scan' ] )
d [ 'inttime' ] = scan . bdf . get_integration ( 0 ) . interval
d [ 'nints' ] = int ( scan . bdf . numIntegration )
# define pols
d [ 'pols_orig' ] = [ pol for pol in ( str ( sdm [ 'Polarization' ] [ 0 ] . corrType ) . strip ( ) . split ( ' ' ) ) if pol in [ 'XX' , 'YY' , 'XY' , 'YX' , 'RR' , 'LL' , 'RL' , 'LR' ] ]
d [ 'npol_orig' ] = int ( sdm [ 'Polarization' ] [ 0 ] . numCorr )
# summarize metadata
logger . info ( '\n' )
logger . info ( 'Metadata summary:' )
logger . info ( '\t Working directory and data at %s, %s' % ( d [ 'workdir' ] , os . path . basename ( d [ 'filename' ] ) ) )
logger . info ( '\t Using scan %d, source %s' % ( int ( d [ 'scan' ] ) , d [ 'source' ] ) )
logger . info ( '\t nants, nbl: %d, %d' % ( d [ 'nants' ] , d [ 'nbl' ] ) )
logger . info ( '\t Freq range (%.3f -- %.3f). %d spw with %d chans.' % ( d [ 'freq' ] . min ( ) , d [ 'freq' ] . max ( ) , d [ 'nspw' ] , d [ 'nchan' ] ) )
logger . info ( '\t Scan has %d ints (%.1f s) and inttime %.3f s' % ( d [ 'nints' ] , d [ 'nints' ] * d [ 'inttime' ] , d [ 'inttime' ] ) )
logger . info ( '\t %d polarizations: %s' % ( d [ 'npol_orig' ] , d [ 'pols_orig' ] ) )
logger . info ( '\t Ideal uvgrid npix=(%d,%d) and res=%d (oversample %.1f)' % ( d [ 'npixx_full' ] , d [ 'npixy_full' ] , d [ 'uvres_full' ] , d [ 'uvoversample' ] ) )
return d |
def remote_close ( self ) :
"""Called by remote worker to state that no more data will be transferred""" | self . fp . close ( )
self . fp = None
# on windows , os . rename does not automatically unlink , so do it
# manually
if os . path . exists ( self . destfile ) :
os . unlink ( self . destfile )
os . rename ( self . tmpname , self . destfile )
self . tmpname = None
if self . mode is not None :
os . chmod ( self . destfile , self . mode ) |
def _synthesize_single_subprocess_helper ( self , text , voice_code , output_file_path = None , return_audio_data = True ) :
"""This is an helper function to synthesize a single text fragment via ` ` subprocess ` ` .
If ` ` output _ file _ path ` ` is ` ` None ` ` ,
the audio data will not persist to file at the end of the method .
If ` ` return _ audio _ data ` ` is ` ` True ` ` ,
return the audio data at the end of the function call ;
if ` ` False ` ` , just return ` ` ( True , None ) ` ` in case of success .
: rtype : tuple ( result , ( duration , sample _ rate , codec , data ) ) or ( result , None )""" | # return zero if text is the empty string
if len ( text ) == 0 : # NOTE sample _ rate , codec , data do not matter
# if the duration is 0.000 = > set them to None
self . log ( u"len(text) is zero: returning 0.000" )
return ( True , ( TimeValue ( "0.000" ) , None , None , None ) )
# create a temporary output file if needed
synt_tmp_file = ( output_file_path is None )
if synt_tmp_file :
self . log ( u"Synthesizer helper called with output_file_path=None => creating temporary output file" )
output_file_handler , output_file_path = gf . tmp_file ( suffix = u".wav" , root = self . rconf [ RuntimeConfiguration . TMP_PATH ] )
self . log ( [ u"Temporary output file path is '%s'" , output_file_path ] )
try : # if the TTS engine reads text from file ,
# write the text into a temporary file
if self . CLI_PARAMETER_TEXT_PATH in self . subprocess_arguments :
self . log ( u"TTS engine reads text from file" )
tmp_text_file_handler , tmp_text_file_path = gf . tmp_file ( suffix = u".txt" , root = self . rconf [ RuntimeConfiguration . TMP_PATH ] )
self . log ( [ u"Creating temporary text file '%s'..." , tmp_text_file_path ] )
with io . open ( tmp_text_file_path , "w" , encoding = "utf-8" ) as tmp_text_file :
tmp_text_file . write ( text )
self . log ( [ u"Creating temporary text file '%s'... done" , tmp_text_file_path ] )
else :
self . log ( u"TTS engine reads text from stdin" )
tmp_text_file_handler = None
tmp_text_file_path = None
# copy all relevant arguments
self . log ( u"Creating arguments list..." )
arguments = [ ]
for arg in self . subprocess_arguments :
if arg == self . CLI_PARAMETER_VOICE_CODE_FUNCTION :
arguments . extend ( self . _voice_code_to_subprocess ( voice_code ) )
elif arg == self . CLI_PARAMETER_VOICE_CODE_STRING :
arguments . append ( voice_code )
elif arg == self . CLI_PARAMETER_TEXT_PATH :
arguments . append ( tmp_text_file_path )
elif arg == self . CLI_PARAMETER_WAVE_PATH :
arguments . append ( output_file_path )
elif arg == self . CLI_PARAMETER_TEXT_STDIN : # placeholder , do not append
pass
elif arg == self . CLI_PARAMETER_WAVE_STDOUT : # placeholder , do not append
pass
else :
arguments . append ( arg )
self . log ( u"Creating arguments list... done" )
# actual call via subprocess
self . log ( u"Calling TTS engine..." )
self . log ( [ u"Calling with arguments '%s'" , arguments ] )
self . log ( [ u"Calling with text '%s'" , text ] )
proc = subprocess . Popen ( arguments , stdout = subprocess . PIPE , stdin = subprocess . PIPE , stderr = subprocess . PIPE , universal_newlines = True )
if self . CLI_PARAMETER_TEXT_STDIN in self . subprocess_arguments :
self . log ( u"Passing text via stdin..." )
if gf . PY2 :
( stdoutdata , stderrdata ) = proc . communicate ( input = gf . safe_bytes ( text ) )
else :
( stdoutdata , stderrdata ) = proc . communicate ( input = text )
self . log ( u"Passing text via stdin... done" )
else :
self . log ( u"Passing text via file..." )
( stdoutdata , stderrdata ) = proc . communicate ( )
self . log ( u"Passing text via file... done" )
proc . stdout . close ( )
proc . stdin . close ( )
proc . stderr . close ( )
if self . CLI_PARAMETER_WAVE_STDOUT in self . subprocess_arguments :
self . log ( u"TTS engine wrote audio data to stdout" )
self . log ( [ u"Writing audio data to file '%s'..." , output_file_path ] )
with io . open ( output_file_path , "wb" ) as output_file :
output_file . write ( stdoutdata )
self . log ( [ u"Writing audio data to file '%s'... done" , output_file_path ] )
else :
self . log ( u"TTS engine wrote audio data to file" )
if tmp_text_file_path is not None :
self . log ( [ u"Delete temporary text file '%s'" , tmp_text_file_path ] )
gf . delete_file ( tmp_text_file_handler , tmp_text_file_path )
self . log ( u"Calling TTS ... done" )
except Exception as exc :
self . log_exc ( u"An unexpected error occurred while calling TTS engine via subprocess" , exc , False , None )
return ( False , None )
# check the file can be read
if not gf . file_can_be_read ( output_file_path ) :
self . log_exc ( u"Output file '%s' cannot be read" % ( output_file_path ) , None , True , None )
return ( False , None )
# read audio data
ret = self . _read_audio_data ( output_file_path ) if return_audio_data else ( True , None )
# if the output file was temporary , remove it
if synt_tmp_file :
self . log ( [ u"Removing temporary output file path '%s'" , output_file_path ] )
gf . delete_file ( output_file_handler , output_file_path )
# return audio data or ( True , None )
return ret |
def distance_two ( GPS_RAW1 , GPS_RAW2 , horizontal = True ) :
'''distance between two points''' | if hasattr ( GPS_RAW1 , 'Lat' ) :
lat1 = radians ( GPS_RAW1 . Lat )
lat2 = radians ( GPS_RAW2 . Lat )
lon1 = radians ( GPS_RAW1 . Lng )
lon2 = radians ( GPS_RAW2 . Lng )
alt1 = GPS_RAW1 . Alt
alt2 = GPS_RAW2 . Alt
elif hasattr ( GPS_RAW1 , 'cog' ) :
lat1 = radians ( GPS_RAW1 . lat ) * 1.0e-7
lat2 = radians ( GPS_RAW2 . lat ) * 1.0e-7
lon1 = radians ( GPS_RAW1 . lon ) * 1.0e-7
lon2 = radians ( GPS_RAW2 . lon ) * 1.0e-7
alt1 = GPS_RAW1 . alt * 0.001
alt2 = GPS_RAW2 . alt * 0.001
else :
lat1 = radians ( GPS_RAW1 . lat )
lat2 = radians ( GPS_RAW2 . lat )
lon1 = radians ( GPS_RAW1 . lon )
lon2 = radians ( GPS_RAW2 . lon )
alt1 = GPS_RAW1 . alt * 0.001
alt2 = GPS_RAW2 . alt * 0.001
dLat = lat2 - lat1
dLon = lon2 - lon1
a = sin ( 0.5 * dLat ) ** 2 + sin ( 0.5 * dLon ) ** 2 * cos ( lat1 ) * cos ( lat2 )
c = 2.0 * atan2 ( sqrt ( a ) , sqrt ( 1.0 - a ) )
ground_dist = 6371 * 1000 * c
if horizontal :
return ground_dist
return sqrt ( ground_dist ** 2 + ( alt2 - alt1 ) ** 2 ) |
def send_produce_request ( self , payloads = None , acks = 1 , timeout = DEFAULT_REPLICAS_ACK_MSECS , fail_on_error = True , callback = None ) :
"""Encode and send some ProduceRequests
ProduceRequests will be grouped by ( topic , partition ) and then
sent to a specific broker . Output is a list of responses in the
same order as the list of payloads specified
Parameters
payloads :
list of ProduceRequest
acks :
How many Kafka broker replicas need to write before
the leader replies with a response
timeout :
How long the server has to receive the acks from the
replicas before returning an error .
fail _ on _ error :
boolean , should we raise an Exception if we encounter an API error ?
callback :
function , instead of returning the ProduceResponse ,
first pass it through this function
Return
a deferred which callbacks with a list of ProduceResponse
Raises
FailedPayloadsError , LeaderUnavailableError , PartitionUnavailableError""" | encoder = partial ( KafkaCodec . encode_produce_request , acks = acks , timeout = timeout )
if acks == 0 :
decoder = None
else :
decoder = KafkaCodec . decode_produce_response
resps = yield self . _send_broker_aware_request ( payloads , encoder , decoder )
returnValue ( self . _handle_responses ( resps , fail_on_error , callback ) ) |
def from_stream ( cls , stream ) :
"""Extract data from stream . Returns None if some error occurred .""" | cycles = [ ]
while True :
scf_cycle = GroundStateScfCycle . from_stream ( stream )
if scf_cycle is None :
break
cycles . append ( scf_cycle )
return cls ( cycles ) if cycles else None |
from typing import List
def has_zero_sum_pair ( nums : List [ int ] ) -> bool :
"""Identifies if a list contains a pair of distinct elements , whose sum equals to zero .
Args :
nums : The list of numbers which is to be checked for pairs summing to zero .
Returns :
A boolean indicating whether there are two distinct elements in the list that total zero .
Examples :
> > > has _ zero _ sum _ pair ( [ 1 , 3 , 5 , 0 ] )
False
> > > has _ zero _ sum _ pair ( [ 1 , 3 , - 2 , 1 ] )
False
> > > has _ zero _ sum _ pair ( [ 1 , 2 , 3 , 7 ] )
False
> > > has _ zero _ sum _ pair ( [ 2 , 4 , - 5 , 3 , 5 , 7 ] )
True
> > > has _ zero _ sum _ pair ( [ 1 ] )
False""" | for idx , num1 in enumerate ( nums ) :
for num2 in nums [ idx + 1 : ] :
if num1 + num2 == 0 :
return True
return False |
def write_file ( self , molecule , inpfile ) :
"""Write an ADF input file .
Parameters
molecule : Molecule
The molecule for this task .
inpfile : str
The name where the input file will be saved .""" | mol_blocks = [ ]
atom_block = AdfKey ( "Atoms" , options = [ "cartesian" ] )
for site in molecule :
atom_block . add_subkey ( AdfKey ( str ( site . specie ) , list ( site . coords ) ) )
mol_blocks . append ( atom_block )
if molecule . charge != 0 :
netq = molecule . charge
ab = molecule . spin_multiplicity - 1
charge_block = AdfKey ( "Charge" , [ netq , ab ] )
mol_blocks . append ( charge_block )
if ab != 0 :
unres_block = AdfKey ( "Unrestricted" )
mol_blocks . append ( unres_block )
with open ( inpfile , "w+" ) as f :
for block in mol_blocks :
f . write ( str ( block ) + "\n" )
f . write ( str ( self . task ) + "\n" )
f . write ( "END INPUT" ) |
def get_export_configuration ( self , config_id ) :
"""Retrieve the ExportConfiguration with the given ID
: param string config _ id :
ID for which to search
: return :
a : class : ` meteorpi _ model . ExportConfiguration ` or None , or no match was found .""" | sql = ( 'SELECT uid, exportConfigId, exportType, searchString, targetURL, ' 'targetUser, targetPassword, exportName, description, active ' 'FROM archive_exportConfig WHERE exportConfigId = %s' )
return first_from_generator ( self . generators . export_configuration_generator ( sql = sql , sql_args = ( config_id , ) ) ) |
def reduce_terms ( term_doc_matrix , scores , num_term_to_keep = None ) :
'''Parameters
term _ doc _ matrix : TermDocMatrix or descendant
scores : array - like
Same length as number of terms in TermDocMatrix .
num _ term _ to _ keep : int , default = 4000.
Should be > 0 . Number of terms to keep . Will keep between num _ terms _ to _ keep / 2 and num _ terms _ to _ keep .
Returns
TermDocMatrix stripped of non - important terms . , array of scores''' | terms_to_show = AutoTermSelector . get_selected_terms ( term_doc_matrix , scores , num_term_to_keep )
return term_doc_matrix . remove_terms ( set ( term_doc_matrix . get_terms ( ) ) - set ( terms_to_show ) ) |
def get_sample_values ( self ) :
"""Read the rows specifying Samples and return a dictionary with
related data .
keys are :
headers - row with " Samples " in column 0 . These headers are
used as dictionary keys in the rows below .
prices - Row with " Analysis Price " in column 0.
total _ analyses - Row with " Total analyses " in colmn 0
price _ totals - Row with " Total price excl Tax " in column 0
samples - All other sample rows .""" | res = { 'samples' : [ ] }
lines = self . getOriginalFile ( ) . data . splitlines ( )
reader = csv . reader ( lines )
next_rows_are_sample_rows = False
for row in reader :
if not any ( row ) :
continue
if next_rows_are_sample_rows :
vals = [ x . strip ( ) for x in row ]
if not any ( vals ) :
continue
res [ 'samples' ] . append ( zip ( res [ 'headers' ] , vals ) )
elif row [ 0 ] . strip ( ) . lower ( ) == 'samples' :
res [ 'headers' ] = [ x . strip ( ) for x in row ]
elif row [ 0 ] . strip ( ) . lower ( ) == 'analysis price' :
res [ 'prices' ] = zip ( res [ 'headers' ] , [ x . strip ( ) for x in row ] )
elif row [ 0 ] . strip ( ) . lower ( ) == 'total analyses' :
res [ 'total_analyses' ] = zip ( res [ 'headers' ] , [ x . strip ( ) for x in row ] )
elif row [ 0 ] . strip ( ) . lower ( ) == 'total price excl tax' :
res [ 'price_totals' ] = zip ( res [ 'headers' ] , [ x . strip ( ) for x in row ] )
next_rows_are_sample_rows = True
return res |
def _converged ( self , X ) :
"""Covergence if | | likehood - last _ likelihood | | < tolerance""" | if len ( self . responsibilities ) < 2 :
return False
diff = np . linalg . norm ( self . responsibilities [ - 1 ] - self . responsibilities [ - 2 ] )
return diff <= self . tolerance |
def run_step ( self , context ) :
"""Run a single pipeline step .
Args :
context : ( pypyr . context . Context ) The pypyr context . This arg will
mutate .""" | logger . debug ( "starting" )
# the in params should be added to context before step execution .
self . set_step_input_context ( context )
if self . while_decorator :
self . while_decorator . while_loop ( context , self . run_foreach_or_conditional )
else :
self . run_foreach_or_conditional ( context )
logger . debug ( "done" ) |
def _generate_args ( pipeline , future , queue_name , base_path ) :
"""Generate the params used to describe a Pipeline ' s depedencies .
The arguments passed to this method may be normal values , Slot instances
( for named outputs ) , or PipelineFuture instances ( for referring to the
default output slot ) .
Args :
pipeline : The Pipeline instance to generate args for .
future : The PipelineFuture for the Pipeline these arguments correspond to .
queue _ name : The queue to run the pipeline on .
base _ path : Relative URL for pipeline URL handlers .
Returns :
Tuple ( dependent _ slots , output _ slot _ keys , params _ text , params _ blob ) where :
dependent _ slots : List of db . Key instances of _ SlotRecords on which
this pipeline will need to block before execution ( passed to
create a _ BarrierRecord for running the pipeline ) .
output _ slot _ keys : List of db . Key instances of _ SlotRecords that will
be filled by this pipeline during its execution ( passed to create
a _ BarrierRecord for finalizing the pipeline ) .
params _ text : JSON dictionary of pipeline parameters to be serialized and
saved in a corresponding _ PipelineRecord . Will be None if the params are
too big and must be saved in a blob instead .
params _ blob : JSON dictionary of pipeline parameters to be serialized and
saved in a Blob file , and then attached to a _ PipelineRecord . Will be
None if the params data size was small enough to fit in the entity .""" | params = { 'args' : [ ] , 'kwargs' : { } , 'after_all' : [ ] , 'output_slots' : { } , 'class_path' : pipeline . _class_path , 'queue_name' : queue_name , 'base_path' : base_path , 'backoff_seconds' : pipeline . backoff_seconds , 'backoff_factor' : pipeline . backoff_factor , 'max_attempts' : pipeline . max_attempts , 'task_retry' : pipeline . task_retry , 'target' : pipeline . target , }
dependent_slots = set ( )
arg_list = params [ 'args' ]
for current_arg in pipeline . args :
if isinstance ( current_arg , PipelineFuture ) :
current_arg = current_arg . default
if isinstance ( current_arg , Slot ) :
arg_list . append ( { 'type' : 'slot' , 'slot_key' : str ( current_arg . key ) } )
dependent_slots . add ( current_arg . key )
else :
arg_list . append ( { 'type' : 'value' , 'value' : current_arg } )
kwarg_dict = params [ 'kwargs' ]
for name , current_arg in pipeline . kwargs . iteritems ( ) :
if isinstance ( current_arg , PipelineFuture ) :
current_arg = current_arg . default
if isinstance ( current_arg , Slot ) :
kwarg_dict [ name ] = { 'type' : 'slot' , 'slot_key' : str ( current_arg . key ) }
dependent_slots . add ( current_arg . key )
else :
kwarg_dict [ name ] = { 'type' : 'value' , 'value' : current_arg }
after_all = params [ 'after_all' ]
for other_future in future . _after_all_pipelines :
slot_key = other_future . _output_dict [ 'default' ] . key
after_all . append ( str ( slot_key ) )
dependent_slots . add ( slot_key )
output_slots = params [ 'output_slots' ]
output_slot_keys = set ( )
for name , slot in future . _output_dict . iteritems ( ) :
output_slot_keys . add ( slot . key )
output_slots [ name ] = str ( slot . key )
params_encoded = json . dumps ( params , cls = mr_util . JsonEncoder )
params_text = None
params_blob = None
if len ( params_encoded ) > _MAX_JSON_SIZE :
params_blob = _write_json_blob ( params_encoded , pipeline . pipeline_id )
else :
params_text = params_encoded
return dependent_slots , output_slot_keys , params_text , params_blob |
def _load_calib ( self ) :
"""Load and compute intrinsic and extrinsic calibration parameters .""" | # We ' ll build the calibration parameters as a dictionary , then
# convert it to a namedtuple to prevent it from being modified later
data = { }
# Load the rigid transformation from IMU to velodyne
data [ 'T_velo_imu' ] = self . _load_calib_rigid ( 'calib_imu_to_velo.txt' )
# Load the camera intrinsics and extrinsics
data . update ( self . _load_calib_cam_to_cam ( 'calib_velo_to_cam.txt' , 'calib_cam_to_cam.txt' ) )
# Pre - compute the IMU to rectified camera coordinate transforms
data [ 'T_cam0_imu' ] = data [ 'T_cam0_velo' ] . dot ( data [ 'T_velo_imu' ] )
data [ 'T_cam1_imu' ] = data [ 'T_cam1_velo' ] . dot ( data [ 'T_velo_imu' ] )
data [ 'T_cam2_imu' ] = data [ 'T_cam2_velo' ] . dot ( data [ 'T_velo_imu' ] )
data [ 'T_cam3_imu' ] = data [ 'T_cam3_velo' ] . dot ( data [ 'T_velo_imu' ] )
self . calib = namedtuple ( 'CalibData' , data . keys ( ) ) ( * data . values ( ) ) |
def get_vault_hierarchy_design_session ( self ) :
"""Gets the session designing vault hierarchies .
return : ( osid . authorization . VaultHierarchyDesignSession ) - a
` ` VaultHierarchyDesignSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ vault _ hierarchy _ design ( ) is
false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ vault _ hierarchy _ design ( ) ` ` is true . *""" | if not self . supports_vault_hierarchy_design ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . VaultHierarchyDesignSession ( runtime = self . _runtime ) |
def build_markdown ( toc_headlines , body , spacer = 0 , placeholder = None ) :
"""Returns a string with the Markdown output contents incl .
the table of contents .
Keyword arguments :
toc _ headlines : lines for the table of contents
as created by the create _ toc function .
body : contents of the Markdown file including
ID - anchor tags as returned by the
tag _ and _ collect function .
spacer : Adds vertical space after the table
of contents . Height in pixels .
placeholder : If a placeholder string is provided , the placeholder
will be replaced by the TOC instead of inserting the TOC at
the top of the document""" | if spacer :
spacer_line = [ '\n<div style="height:%spx;"></div>\n' % ( spacer ) ]
toc_markdown = "\n" . join ( toc_headlines + spacer_line )
else :
toc_markdown = "\n" . join ( toc_headlines )
body_markdown = "\n" . join ( body ) . strip ( )
if placeholder :
markdown = body_markdown . replace ( placeholder , toc_markdown )
else :
markdown = toc_markdown + body_markdown
return markdown |
def do_exit ( self , arg_list : List [ str ] ) -> bool :
"""Exit the application with an optional exit code .
Usage : exit [ exit _ code ]
Where :
* exit _ code - integer exit code to return to the shell""" | # If an argument was provided
if arg_list :
try :
self . exit_code = int ( arg_list [ 0 ] )
except ValueError :
self . perror ( "{} isn't a valid integer exit code" . format ( arg_list [ 0 ] ) )
self . exit_code = - 1
self . _should_quit = True
return self . _STOP_AND_EXIT |
def push ( self , buf ) :
"""Push a buffer into the source .""" | self . _src . emit ( 'push-buffer' , Gst . Buffer . new_wrapped ( buf ) ) |
def delete ( args ) :
"""Delete NApps from server .""" | mgr = NAppsManager ( )
for napp in args [ '<napp>' ] :
mgr . set_napp ( * napp )
LOG . info ( 'Deleting NApp %s from server...' , mgr . napp_id )
try :
mgr . delete ( )
LOG . info ( ' Deleted.' )
except requests . HTTPError as exception :
if exception . response . status_code == 405 :
LOG . error ( 'Delete Napp is not allowed yet.' )
else :
msg = json . loads ( exception . response . content )
LOG . error ( ' Server error: %s - ' , msg [ 'error' ] ) |
def import_qutip ( ) :
"""Try importing the qutip module , log an error if unsuccessful .
: return : The qutip module if successful or None
: rtype : Optional [ module ]""" | global _QUTIP_ERROR_LOGGED
try :
import qutip
except ImportError : # pragma no coverage
qutip = None
if not _QUTIP_ERROR_LOGGED :
_log . error ( "Could not import qutip. Tomography tools will not function." )
_QUTIP_ERROR_LOGGED = True
return qutip |
def get_alignak_configuration ( self , section = SECTION_CONFIGURATION , legacy_cfg = False , macros = False ) :
"""Get the Alignak configuration parameters . All the variables included in
the SECTION _ CONFIGURATION section except the variables starting with ' cfg '
and the macros .
If ` lecagy _ cfg ` is True , this function only returns the variables included in
the SECTION _ CONFIGURATION section except the variables starting with ' cfg '
If ` macros ` is True , this function only returns the variables included in
the SECTION _ CONFIGURATION section that are considered as macros
: param section : name of the sectio nto search for
: type section : str
: param legacy _ cfg : only get the legacy cfg declarations
: type legacy _ cfg : bool
: param macros : only get the macros declarations
: type macros : bool
: return : a dict containing the Alignak configuration parameters""" | configuration = self . _search_sections ( section )
if section not in configuration :
return [ ]
for prop , _ in list ( configuration [ section ] . items ( ) ) : # Only legacy configuration items
if legacy_cfg :
if not prop . startswith ( 'cfg' ) :
configuration [ section ] . pop ( prop )
continue
# Only macro definitions
if macros :
if not prop . startswith ( '_' ) and not prop . startswith ( '$' ) :
configuration [ section ] . pop ( prop )
continue
# All values except legacy configuration and macros
if prop . startswith ( 'cfg' ) or prop . startswith ( '_' ) or prop . startswith ( '$' ) :
configuration [ section ] . pop ( prop )
return configuration [ section ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.