signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def zadd ( self , key , score , member , * pairs , exist = None ) :
"""Add one or more members to a sorted set or update its score .
: raises TypeError : score not int or float
: raises TypeError : length of pairs is not even number""" | if not isinstance ( score , ( int , float ) ) :
raise TypeError ( "score argument must be int or float" )
if len ( pairs ) % 2 != 0 :
raise TypeError ( "length of pairs must be even number" )
scores = ( item for i , item in enumerate ( pairs ) if i % 2 == 0 )
if any ( not isinstance ( s , ( int , float ) ) for s in scores ) :
raise TypeError ( "all scores must be int or float" )
args = [ ]
if exist is self . ZSET_IF_EXIST :
args . append ( b'XX' )
elif exist is self . ZSET_IF_NOT_EXIST :
args . append ( b'NX' )
args . extend ( [ score , member ] )
if pairs :
args . extend ( pairs )
return self . execute ( b'ZADD' , key , * args ) |
def persist_checksum ( self , requirement , cache_file ) :
"""Persist the checksum of the input used to generate a binary distribution .
: param requirement : A : class : ` . Requirement ` object .
: param cache _ file : The pathname of a cached binary distribution ( a string ) .
. . note : : The checksum is only calculated and persisted when
: attr : ` ~ . Config . trust _ mod _ times ` is : data : ` False ` .""" | if not self . config . trust_mod_times :
checksum_file = '%s.txt' % cache_file
with AtomicReplace ( checksum_file ) as temporary_file :
with open ( temporary_file , 'w' ) as handle :
handle . write ( '%s\n' % requirement . checksum ) |
def where ( self , key , operator , value ) :
"""Make where clause
: @ param key
: @ param operator
: @ param value
: @ type key , operator , value : string
: @ return self""" | self . __store_query ( { "key" : key , "operator" : operator , "value" : value } )
return self |
def ColumnTypeParser ( description ) :
"""Parses a single column description . Internal helper method .
Args :
description : a column description in the possible formats :
' id '
( ' id ' , )
( ' id ' , ' type ' )
( ' id ' , ' type ' , ' label ' )
( ' id ' , ' type ' , ' label ' , { ' custom _ prop1 ' : ' custom _ val1 ' } )
Returns :
Dictionary with the following keys : id , label , type , and
custom _ properties where :
- If label not given , it equals the id .
- If type not given , string is used by default .
- If custom properties are not given , an empty dictionary is used by
default .
Raises :
DataTableException : The column description did not match the RE , or
unsupported type was passed .""" | if not description :
raise DataTableException ( "Description error: empty description given" )
if not isinstance ( description , ( six . string_types , tuple ) ) :
raise DataTableException ( "Description error: expected either string or " "tuple, got %s." % type ( description ) )
if isinstance ( description , six . string_types ) :
description = ( description , )
# According to the tuple ' s length , we fill the keys
# We verify everything is of type string
for elem in description [ : 3 ] :
if not isinstance ( elem , six . string_types ) :
raise DataTableException ( "Description error: expected tuple of " "strings, current element of type %s." % type ( elem ) )
desc_dict = { "id" : description [ 0 ] , "label" : description [ 0 ] , "type" : "string" , "custom_properties" : { } }
if len ( description ) > 1 :
desc_dict [ "type" ] = description [ 1 ] . lower ( )
if len ( description ) > 2 :
desc_dict [ "label" ] = description [ 2 ]
if len ( description ) > 3 :
if not isinstance ( description [ 3 ] , dict ) :
raise DataTableException ( "Description error: expected custom " "properties of type dict, current element " "of type %s." % type ( description [ 3 ] ) )
desc_dict [ "custom_properties" ] = description [ 3 ]
if len ( description ) > 4 :
raise DataTableException ( "Description error: tuple of length > 4" )
if desc_dict [ "type" ] not in [ "string" , "number" , "boolean" , "date" , "datetime" , "timeofday" ] :
raise DataTableException ( "Description error: unsupported type '%s'" % desc_dict [ "type" ] )
return desc_dict |
def fetch_tuples ( self , max_tuples = 20 , timeout = None ) :
"""Fetch a number of tuples from this view .
Fetching of data must have been started with
: py : meth : ` start _ data _ fetch ` before calling this method .
If ` ` timeout ` ` is ` ` None ` ` then the returned list will
contain ` ` max _ tuples ` ` tuples . Otherwise if the timeout is reached
the list may contain less than ` ` max _ tuples ` ` tuples .
Args :
max _ tuples ( int ) : Maximum number of tuples to fetch .
timeout ( float ) : Maximum time to wait for ` ` max _ tuples ` ` tuples .
Returns :
list : List of fetched tuples .
. . versionadded : : 1.12""" | tuples = list ( )
if timeout is None :
while len ( tuples ) < max_tuples :
fetcher = self . _data_fetcher
if not fetcher :
break
tuples . append ( fetcher . items . get ( ) )
return tuples
timeout = float ( timeout )
end = time . time ( ) + timeout
while len ( tuples ) < max_tuples :
qto = end - time . time ( )
if qto <= 0 :
break
try :
fetcher = self . _data_fetcher
if not fetcher :
break
tuples . append ( fetcher . items . get ( timeout = qto ) )
except queue . Empty :
break
return tuples |
def _add_attribute_values ( self , value , att_mappings , indices ) :
"""Add an attribute value to the given vertices .
: param int value : Attribute value .
: param dict att _ mappings : Dictionary of mappings between vertices and enumerated attributes .
: param list indices : Indices of the vertices .""" | for i in indices :
att_mappings [ i ] . append ( value ) |
def to_representation ( self , failure_line ) :
"""Manually add matches our wrapper of the TLEMetadata - > TLE relation .
I could not work out how to do this multiple relation jump with DRF ( or
even if it was possible ) so using this manual method instead .""" | try :
matches = failure_line . error . matches . all ( )
except AttributeError : # failure _ line . error can return None
matches = [ ]
tle_serializer = TextLogErrorMatchSerializer ( matches , many = True )
classified_failures = models . ClassifiedFailure . objects . filter ( error_matches__in = matches )
cf_serializer = ClassifiedFailureSerializer ( classified_failures , many = True )
response = super ( ) . to_representation ( failure_line )
response [ 'matches' ] = tle_serializer . data
response [ 'classified_failures' ] = cf_serializer . data
return response |
def _pipdeptree ( python_bin , package_name : str = None , warn : bool = False ) -> typing . Optional [ dict ] :
"""Get pip dependency tree by executing pipdeptree tool .""" | cmd = "{} -m pipdeptree --json" . format ( python_bin )
_LOGGER . debug ( "Obtaining pip dependency tree using: %r" , cmd )
output = run_command ( cmd , is_json = True ) . stdout
if not package_name :
return output
for entry in output : # In some versions pipdeptree does not work with - - packages flag , do the logic on out own .
# TODO : we should probably do difference of reference this output and original environment
if entry [ "package" ] [ "key" ] . lower ( ) == package_name . lower ( ) :
return entry
# The given package was not found .
if warn :
_LOGGER . warning ( "Package %r was not found in pipdeptree output %r" , package_name , output )
return None |
def tplot_rename ( old_name , new_name ) :
"""This function will rename tplot variables that are already stored in memory .
Parameters :
old _ name : str
Old name of the Tplot Variable
new _ name : str
New name of the Tplot Variable
Returns :
None
Examples :
> > > # Rename Variable 1 to Variable 2
> > > import pytplot
> > > pytplot . tplot _ rename ( " Variable1 " , " Variable2 " )""" | # check if old name is in current dictionary
if old_name not in pytplot . data_quants . keys ( ) :
print ( "That name is currently not in pytplot" )
return
# if old name input is a number , convert to corresponding name
if isinstance ( old_name , int ) :
old_name = pytplot . data_quants [ old_name ] . name
# remake dictionary with new name in old name ' s slot
d = pytplot . data_quants
d2 = OrderedDict ( [ ( new_name , v ) if k == old_name else ( k , v ) for k , v in d . items ( ) ] )
data_quants = d2
for key in d2 :
data_quants [ key ] . name = key
pytplot . data_quants = data_quants
return |
def _compute_term2 ( self , C , mag , r ) :
"""This computes the term f2 equation 8 Drouet & Cotton ( 2015)""" | return ( C [ 'c4' ] + C [ 'c5' ] * mag ) * np . log ( np . sqrt ( r ** 2 + C [ 'c6' ] ** 2 ) ) + C [ 'c7' ] * r |
def get ( self , sid ) :
"""Constructs a MessageContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . chat . v2 . service . channel . message . MessageContext
: rtype : twilio . rest . chat . v2 . service . channel . message . MessageContext""" | return MessageContext ( self . _version , service_sid = self . _solution [ 'service_sid' ] , channel_sid = self . _solution [ 'channel_sid' ] , sid = sid , ) |
def train ( net , train_data , test_data ) :
"""Train textCNN model for sentiment analysis .""" | start_pipeline_time = time . time ( )
net , trainer = text_cnn . init ( net , vocab , args . model_mode , context , args . lr )
random . shuffle ( train_data )
sp = int ( len ( train_data ) * 0.9 )
train_dataloader = DataLoader ( dataset = train_data [ : sp ] , batch_size = args . batch_size , shuffle = True )
val_dataloader = DataLoader ( dataset = train_data [ sp : ] , batch_size = args . batch_size , shuffle = False )
test_dataloader = DataLoader ( dataset = test_data , batch_size = args . batch_size , shuffle = False )
# Training / Testing .
best_val_acc = 0
for epoch in range ( args . epochs ) : # Epoch training stats .
start_epoch_time = time . time ( )
epoch_L = 0.0
epoch_sent_num = 0
epoch_wc = 0
# Log interval training stats .
start_log_interval_time = time . time ( )
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0.0
for i , ( data , label ) in enumerate ( train_dataloader ) :
data = mx . nd . transpose ( data . as_in_context ( context ) )
label = label . as_in_context ( context )
wc = max_len
log_interval_wc += wc
epoch_wc += wc
log_interval_sent_num += data . shape [ 1 ]
epoch_sent_num += data . shape [ 1 ]
with autograd . record ( ) :
output = net ( data )
L = loss ( output , label ) . mean ( )
L . backward ( )
# Update parameter .
trainer . step ( 1 )
log_interval_L += L . asscalar ( )
epoch_L += L . asscalar ( )
if ( i + 1 ) % args . log_interval == 0 :
print ( '[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % ( epoch , i + 1 , len ( train_dataloader ) , log_interval_L / log_interval_sent_num , log_interval_wc / 1000 / ( time . time ( ) - start_log_interval_time ) ) )
# Clear log interval training stats .
start_log_interval_time = time . time ( )
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0
end_epoch_time = time . time ( )
val_avg_L , val_acc = evaluate ( net , val_dataloader )
print ( '[Epoch %d] train avg loss %g, ' 'test acc %.4f, test avg loss %g, throughput %gK wps' % ( epoch , epoch_L / epoch_sent_num , val_acc , val_avg_L , epoch_wc / 1000 / ( end_epoch_time - start_epoch_time ) ) )
if val_acc >= best_val_acc :
print ( 'Observed Improvement.' )
best_val_acc = val_acc
test_avg_L , test_acc = evaluate ( net , test_dataloader )
print ( 'Test loss %g, test acc %.4f' % ( test_avg_L , test_acc ) )
print ( 'Total time cost %.2fs' % ( time . time ( ) - start_pipeline_time ) )
return test_acc |
def read_data ( self , sheet , begin = None ) :
"""用于简单模板匹配 , 只处理一行的模板 , begin为None时自动从for行开始
: param sheet : 应该使用read _ only模式打开
: param begin :
: return :""" | line = self . begin
rows = sheet . rows
for i in range ( line - 1 ) :
rows . next ( )
template_line = self . template [ self . begin - 1 ]
if not template_line [ 'subs' ] :
raise ValueError ( "Template definition is not right" )
for row in rows :
d = { }
for c in template_line [ 'subs' ] [ 0 ] [ 'cols' ] :
if c [ 'field' ] :
cell = row [ c [ 'col' ] - 1 ]
if isinstance ( cell . value , ( str , unicode ) ) :
v = cell . value . strip ( )
else :
v = cell . value
d [ c [ 'field' ] ] = v
yield d |
def ipv4_range_to_list ( start_packed , end_packed ) :
"""Return a list of IPv4 entries from start _ packed to end _ packed .""" | new_list = list ( )
start = struct . unpack ( '!L' , start_packed ) [ 0 ]
end = struct . unpack ( '!L' , end_packed ) [ 0 ]
for value in range ( start , end + 1 ) :
new_ip = socket . inet_ntoa ( struct . pack ( '!L' , value ) )
new_list . append ( new_ip )
return new_list |
def GET_AUTH ( self , courseid ) : # pylint : disable = arguments - differ
"""GET request""" | course , __ = self . get_course_and_check_rights ( courseid )
return self . page ( course ) |
def _skip_whitespace ( self ) :
"""Increment over whitespace , counting characters .""" | i = 0
while self . _cur_token [ 'type' ] is TT . ws and not self . _finished :
self . _increment ( )
i += 1
return i |
def setDocument ( self , filename , empty = "" ) :
"""Sets the HTML text to be displayed .""" | self . _source = QUrl . fromLocalFile ( filename )
if os . path . exists ( filename ) :
self . viewer . setSource ( self . _source )
else :
self . viewer . setText ( empty ) |
def create_snapshot_chart ( self , filename = '' ) :
"""Create chart that depicts the memory allocation over time apportioned to
the tracked classes .""" | try :
from pylab import figure , title , xlabel , ylabel , plot , fill , legend , savefig
import matplotlib . mlab as mlab
except ImportError :
return self . nopylab_msg % ( "memory allocation" )
classlist = self . tracked_classes
times = [ snapshot . timestamp for snapshot in self . snapshots ]
base = [ 0 ] * len ( self . snapshots )
poly_labels = [ ]
polys = [ ]
for cn in classlist :
pct = [ snapshot . classes [ cn ] [ 'pct' ] for snapshot in self . snapshots ]
if max ( pct ) > 3.0 :
sz = [ float ( fp . classes [ cn ] [ 'sum' ] ) / ( 1024 * 1024 ) for fp in self . snapshots ]
sz = [ sx + sy for sx , sy in zip ( base , sz ) ]
xp , yp = mlab . poly_between ( times , base , sz )
polys . append ( ( ( xp , yp ) , { 'label' : cn } ) )
poly_labels . append ( cn )
base = sz
figure ( )
title ( "Snapshot Memory" )
xlabel ( "Execution Time [s]" )
ylabel ( "Virtual Memory [MiB]" )
sizes = [ float ( fp . asizeof_total ) / ( 1024 * 1024 ) for fp in self . snapshots ]
plot ( times , sizes , 'r--' , label = 'Total' )
sizes = [ float ( fp . tracked_total ) / ( 1024 * 1024 ) for fp in self . snapshots ]
plot ( times , sizes , 'b--' , label = 'Tracked total' )
for ( args , kwds ) in polys :
fill ( * args , ** kwds )
legend ( loc = 2 )
savefig ( filename )
return self . chart_tag % ( self . relative_path ( filename ) ) |
def reset ( self ) :
"""Reset Union Pooler , clear active cell history""" | self . _unionSDR = numpy . zeros ( shape = ( self . _numInputs , ) )
self . _activeCellsHistory = [ ] |
def stream ( ** kwargs ) :
"""Provides an : py : func : ` open ` - like interface to the streaming encryptor / decryptor classes .
. . warning : :
Take care when decrypting framed messages with large frame length and large non - framed
messages . In order to protect the authenticity of the encrypted data , no plaintext
is returned until it has been authenticated . Because of this , potentially large amounts
of data may be read into memory . In the case of framed messages , the entire contents
of each frame are read into memory and authenticated before returning any plaintext .
In the case of non - framed messages , the entire message is read into memory and
authenticated before returning any plaintext . The authenticated plaintext is held in
memory until it is requested .
. . note : :
Consequently , keep the above decrypting consideration in mind when encrypting messages
to ensure that issues are not encountered when decrypting those messages .
. . code : : python
> > > import aws _ encryption _ sdk
> > > kms _ key _ provider = aws _ encryption _ sdk . KMSMasterKeyProvider ( key _ ids = [
. . . ' arn : aws : kms : us - east - 1:22222 : key / 22222-2222-2222-2222-22222 ' ,
. . . ' arn : aws : kms : us - east - 1:33333 : key / 33333-3333-3333-3333-33333'
> > > plaintext _ filename = ' my - secret - data . dat '
> > > ciphertext _ filename = ' my - encrypted - data . ct '
> > > with open ( plaintext _ filename , ' rb ' ) as pt _ file , open ( ciphertext _ filename , ' wb ' ) as ct _ file :
. . . with aws _ encryption _ sdk . stream (
. . . mode = ' e ' ,
. . . source = pt _ file ,
. . . key _ provider = kms _ key _ provider
. . . ) as encryptor :
. . . for chunk in encryptor :
. . . ct _ file . write ( chunk )
> > > new _ plaintext _ filename = ' my - decrypted - data . dat '
> > > with open ( ciphertext _ filename , ' rb ' ) as ct _ file , open ( new _ plaintext _ filename , ' wb ' ) as pt _ file :
. . . with aws _ encryption _ sdk . stream (
. . . mode = ' d ' ,
. . . source = ct _ file ,
. . . key _ provider = kms _ key _ provider
. . . ) as decryptor :
. . . for chunk in decryptor :
. . . pt _ file . write ( chunk )
: param str mode : Type of streaming client to return ( e / encrypt : encryptor , d / decrypt : decryptor )
: param * * kwargs : All other parameters provided are passed to the appropriate Streaming client
: returns : Streaming Encryptor or Decryptor , as requested
: rtype : : class : ` aws _ encryption _ sdk . streaming _ client . StreamEncryptor `
or : class : ` aws _ encryption _ sdk . streaming _ client . StreamDecryptor `
: raises ValueError : if supplied with an unsupported mode value""" | mode = kwargs . pop ( "mode" )
_stream_map = { "e" : StreamEncryptor , "encrypt" : StreamEncryptor , "d" : StreamDecryptor , "decrypt" : StreamDecryptor }
try :
return _stream_map [ mode . lower ( ) ] ( ** kwargs )
except KeyError :
raise ValueError ( "Unsupported mode: {}" . format ( mode ) ) |
def add_oauth_header ( self ) :
"""Validate token and add the proper header for further requests .
: return : ( None )""" | # abort if no token
oauth_token = self . _get_token ( )
if not oauth_token :
return
# add oauth header & reach the api
self . headers [ "Authorization" ] = "token " + oauth_token
url = self . _api_url ( "user" )
raw_resp = self . requests . get ( url )
resp = raw_resp . json ( )
# abort & remove header if token is invalid
if resp . get ( "login" , None ) != self . user :
self . oops ( "Invalid token for user " + self . user )
self . headers . pop ( "Authorization" )
return
self . is_authenticated = True
self . yeah ( "User {} authenticated" . format ( self . user ) ) |
def formatPoint ( point , affine ) :
"""Retrieves a string representation of @ point""" | # Affine coordinates : ( x , y )
if affine :
fmt = "\tx:{}\n\ty:{}"
coords = [ point . x , point . y ]
# Projected coordinates : ( x , y , z )
else :
fmt = "\tx:{}\n\ty:{}\n\tz:{}"
coords = [ point . x , point . y , point . z ]
coordText = map ( hexString , coords )
return fmt . format ( * coordText ) |
def _set_mac_move_limit ( self , v , load = False ) :
"""Setter method for mac _ move _ limit , mapped from YANG variable / mac _ address _ table / mac _ move / mac _ move _ limit ( uint32)
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ mac _ move _ limit is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ mac _ move _ limit ( ) directly .
YANG Description : MAC move detect limit ( default = 20)""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'5..500' ] } ) , default = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) ( 20 ) , is_leaf = True , yang_name = "mac-move-limit" , rest_name = "limit" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'MAC move detect limit (default = 20)' , u'cli-full-command' : None , u'alt-name' : u'limit' } } , namespace = 'urn:brocade.com:mgmt:brocade-mac-address-table' , defining_module = 'brocade-mac-address-table' , yang_type = 'uint32' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """mac_move_limit must be of a type compatible with uint32""" , 'defined-type' : "uint32" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'5..500']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(20), is_leaf=True, yang_name="mac-move-limit", rest_name="limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC move detect limit (default = 20)', u'cli-full-command': None, u'alt-name': u'limit'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='uint32', is_config=True)""" , } )
self . __mac_move_limit = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def get_col_rgba ( color , transparency = None , opacity = None ) :
"""This class converts a Gdk . Color into its r , g , b parts and adds an alpha according to needs
If both transparency and opacity is None , alpha is set to 1 = > opaque
: param Gdk . Color color : Color to extract r , g and b from
: param float | None transparency : Value between 0 ( opaque ) and 1 ( transparent ) or None if opacity is to be used
: param float | None opacity : Value between 0 ( transparent ) and 1 ( opaque ) or None if transparency is to be used
: return : Red , Green , Blue and Alpha value ( all between 0.0 - 1.0)""" | r , g , b = color . red , color . green , color . blue
# Convert from 0-6535 to 0-1
r /= 65535.
g /= 65535.
b /= 65535.
if transparency is not None or opacity is None :
transparency = 0 if transparency is None else transparency
# default value
if transparency < 0 or transparency > 1 :
raise ValueError ( "Transparency must be between 0 and 1" )
alpha = 1 - transparency
else :
if opacity < 0 or opacity > 1 :
raise ValueError ( "Opacity must be between 0 and 1" )
alpha = opacity
return r , g , b , alpha |
def start ( self ) -> None :
"""Create the remote Spark session and wait for it to be ready .""" | session = self . client . create_session ( self . kind , self . proxy_user , self . jars , self . py_files , self . files , self . driver_memory , self . driver_cores , self . executor_memory , self . executor_cores , self . num_executors , self . archives , self . queue , self . name , self . spark_conf , )
self . session_id = session . session_id
not_ready = { SessionState . NOT_STARTED , SessionState . STARTING }
intervals = polling_intervals ( [ 0.1 , 0.2 , 0.3 , 0.5 ] , 1.0 )
while self . state in not_ready :
time . sleep ( next ( intervals ) ) |
def collect ( self ) :
"""Collect metrics from all registered metric sets
: return :""" | logger . debug ( "Collecting metrics" )
for name , metricset in compat . iteritems ( self . _metricsets ) :
data = metricset . collect ( )
if data :
self . _queue_func ( constants . METRICSET , data ) |
def _set_load_interval ( self , v , load = False ) :
"""Setter method for load _ interval , mapped from YANG variable / mpls _ config / router / mpls / mpls _ cmds _ holder / policy / load _ interval ( uint32)
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ load _ interval is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ load _ interval ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'30..300' ] } ) , default = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) ( 300 ) , is_leaf = True , yang_name = "load-interval" , rest_name = "load-interval" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Polling interval for MPLS LSP traffic statistics' , u'hidden' : u'full' , u'cli-full-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'uint32' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """load_interval must be of a type compatible with uint32""" , 'defined-type' : "uint32" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""" , } )
self . __load_interval = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def removeSubscriber ( self , email ) :
"""Remove a subscriber from this workitem
If the subscriber has not been added , no more actions will be
performed .
: param email : the subscriber ' s email""" | headers , raw_data = self . _perform_subscribe ( )
missing_flag , raw_data = self . _remove_subscriber ( email , raw_data )
if missing_flag :
return
self . _update_subscribe ( headers , raw_data )
self . log . info ( "Successfully remove a subscriber: %s for <Workitem %s>" , email , self ) |
def format_result ( input ) :
"""From : http : / / stackoverflow . com / questions / 13062300 / convert - a - dict - to - sorted - dict - in - python""" | items = list ( iteritems ( input ) )
return OrderedDict ( sorted ( items , key = lambda x : x [ 0 ] ) ) |
def rollback ( awsclient , function_name , alias_name = ALIAS_NAME , version = None ) :
"""Rollback a lambda function to a given version .
: param awsclient :
: param function _ name :
: param alias _ name :
: param version :
: return : exit _ code""" | if version :
log . info ( 'rolling back to version {}' . format ( version ) )
else :
log . info ( 'rolling back to previous version' )
version = _get_previous_version ( awsclient , function_name , alias_name )
if version == '0' :
log . error ( 'unable to find previous version of lambda function' )
return 1
log . info ( 'new version is %s' % str ( version ) )
_update_alias ( awsclient , function_name , version , alias_name )
return 0 |
def set_db_row ( db , start , size , _bytearray ) :
"""Here we replace a piece of data in a db block with new data
Args :
db ( int ) : The db to use
start ( int ) : The start within the db
size ( int ) : The size of the data in bytes
_ butearray ( enumerable ) : The data to put in the db""" | client . db_write ( db , start , size , _bytearray ) |
def write_fasta ( path , sequences , names , mode = 'w' , width = 80 ) :
"""Write nucleotide sequences stored as numpy arrays to a FASTA file .
Parameters
path : string
File path .
sequences : sequence of arrays
One or more ndarrays of dtype ' S1 ' containing the sequences .
names : sequence of strings
Names of the sequences .
mode : string , optional
Use ' a ' to append to an existing file .
width : int , optional
Maximum line width .""" | # check inputs
if isinstance ( sequences , np . ndarray ) : # single sequence
sequences = [ sequences ]
names = [ names ]
if len ( sequences ) != len ( names ) :
raise ValueError ( 'must provide the same number of sequences and names' )
for sequence in sequences :
if sequence . dtype != np . dtype ( 'S1' ) :
raise ValueError ( 'expected S1 dtype, found %r' % sequence . dtype )
# force binary mode
mode = 'ab' if 'a' in mode else 'wb'
# write to file
with open ( path , mode = mode ) as fasta :
for name , sequence in zip ( names , sequences ) : # force bytes
if isinstance ( name , text_type ) :
name = name . encode ( 'ascii' )
header = b'>' + name + b'\n'
fasta . write ( header )
for i in range ( 0 , sequence . size , width ) :
line = sequence [ i : i + width ] . tostring ( ) + b'\n'
fasta . write ( line ) |
def _netsh_file ( content ) :
'''helper function to get the results of ` ` netsh - f content . txt ` `
Running ` ` netsh ` ` will drop you into a ` ` netsh ` ` prompt where you can issue
` ` netsh ` ` commands . You can put a series of commands in an external file and
run them as if from a ` ` netsh ` ` prompt using the ` ` - f ` ` switch . That ' s what
this function does .
Args :
content ( str ) :
The contents of the file that will be run by the ` ` netsh - f ` `
command
Returns :
str : The text returned by the netsh command''' | with tempfile . NamedTemporaryFile ( mode = 'w' , prefix = 'salt-' , suffix = '.netsh' , delete = False ) as fp :
fp . write ( content )
try :
log . debug ( '%s:\n%s' , fp . name , content )
return salt . modules . cmdmod . run ( 'netsh -f {0}' . format ( fp . name ) , python_shell = True )
finally :
os . remove ( fp . name ) |
def query ( self , query , time_precision = 's' , chunked = False ) :
"""Query data from the influxdb v0.8 database .
: param time _ precision : [ Optional , default ' s ' ] Either ' s ' , ' m ' , ' ms '
or ' u ' .
: param chunked : [ Optional , default = False ] True if the data shall be
retrieved in chunks , False otherwise .""" | return self . _query ( query , time_precision = time_precision , chunked = chunked ) |
def long_poll_notifications ( self , ** kwargs ) : # noqa : E501
"""Get notifications using Long Poll # noqa : E501
In this case , notifications are delivered through HTTP long poll requests . The HTTP request is kept open until an event notification or a batch of event notifications are delivered to the client or the request times out ( response code 204 ) . In both cases , the client should open a new polling connection after the previous one closes . Only a single long polling connection per API key can be ongoing at any given time . You must have a persistent connection ( Connection keep - alive header in the request ) to avoid excess TLS handshakes . The pull channel is implicitly created by the first GET call to ` / v2 / notification / pull ` . It is refreshed on each GET call . If the channel is not polled for a long time ( 10 minutes ) - it expires and will be deleted . This means that no notifications will stay in the queue between polls . A channel can be also deleted explicitly by a DELETE call . * * Note : * * If you cannot have a public facing callback URL , for example when developing on your local machine , you can use long polling to check for new messages . However , * * long polling is deprecated * * and will likely be replaced in future . It is meant only for experimentation and not for commercial usage . The proper method to receive notifications is a * * notification callback * * . There can only be one notification channel per API key at a time in Device Management Connect . If a callback notification channel already exists , you need to delete it before creating a long poll notification channel , and vice - versa . * * Example usage : * * curl - X GET https : / / api . us - east - 1 . mbedcloud . com / v2 / notification / pull - H ' authorization : Bearer { api - key } ' # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . long _ poll _ notifications ( asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: return : NotificationMessage
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . long_poll_notifications_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . long_poll_notifications_with_http_info ( ** kwargs )
# noqa : E501
return data |
def add_code_verifier ( request_args , service , ** kwargs ) :
"""PKCE RFC 7636 support
To be added as a post _ construct method to an
: py : class : ` oidcservice . oidc . service . AccessToken ` instance
: param service : The service that uses this function
: param request _ args : Set of request arguments
: return : updated set of request arguments""" | _item = service . get_item ( Message , 'pkce' , kwargs [ 'state' ] )
request_args . update ( { 'code_verifier' : _item [ 'code_verifier' ] } )
return request_args |
def _gcs_get_keys ( bucket , pattern ) :
"""Get names of all Google Cloud Storage keys in a specified bucket that match a pattern .""" | return [ obj for obj in list ( bucket . objects ( ) ) if fnmatch . fnmatch ( obj . metadata . name , pattern ) ] |
def _central_slopes_directions ( self , data , dX , dY ) :
"""Calculates magnitude / direction of slopes using central difference""" | shp = np . array ( data . shape ) - 1
direction = np . full ( data . shape , FLAT_ID_INT , 'float64' )
mag = np . full ( direction , FLAT_ID_INT , 'float64' )
ind = 0
d1 , d2 , theta = _get_d1_d2 ( dX , dY , ind , [ 0 , 1 ] , [ 1 , 1 ] , shp )
s2 = ( data [ 0 : - 2 , 1 : - 1 ] - data [ 2 : , 1 : - 1 ] ) / d2
s1 = - ( data [ 1 : - 1 , 0 : - 2 ] - data [ 1 : - 1 , 2 : ] ) / d1
direction [ 1 : - 1 , 1 : - 1 ] = np . arctan2 ( s2 , s1 ) + np . pi
mag = np . sqrt ( s1 ** 2 + s2 ** 2 )
return mag , direction |
def get_run_configuration ( fname ) :
"""Return script * fname * run configuration""" | configurations = _get_run_configurations ( )
for filename , options in configurations :
if fname == filename :
runconf = RunConfiguration ( )
runconf . set ( options )
return runconf |
def _getNewXref ( self ) :
"""_ getNewXref ( self ) - > PyObject *""" | if self . isClosed or self . isEncrypted :
raise ValueError ( "operation illegal for closed / encrypted doc" )
return _fitz . Document__getNewXref ( self ) |
def add_listener ( self , callback , event_type = None ) :
"""Add a listener that will send a callback when the client recieves
an event .
Args :
callback ( func ( roomchunk ) ) : Callback called when an event arrives .
event _ type ( str ) : The event _ type to filter for .
Returns :
uuid . UUID : Unique id of the listener , can be used to identify the listener .""" | listener_uid = uuid4 ( )
# TODO : listeners should be stored in dict and accessed / deleted directly . Add
# convenience method such that MatrixClient . listeners . new ( Listener ( . . . ) ) performs
# MatrixClient . listeners [ uuid4 ( ) ] = Listener ( . . . )
self . listeners . append ( { 'uid' : listener_uid , 'callback' : callback , 'event_type' : event_type } )
return listener_uid |
def censor ( self , input_text ) :
"""Returns input _ text with any profane words censored .""" | bad_words = self . get_profane_words ( )
res = input_text
for word in bad_words : # Apply word boundaries to the bad word
regex_string = r'{0}' if self . _no_word_boundaries else r'\b{0}\b'
regex_string = regex_string . format ( word )
regex = re . compile ( regex_string , re . IGNORECASE )
res = regex . sub ( self . _censor_char * len ( word ) , res )
return res |
def _coarsen_reshape ( self , windows , boundary , side ) :
"""Construct a reshaped - array for corsen""" | if not utils . is_dict_like ( boundary ) :
boundary = { d : boundary for d in windows . keys ( ) }
if not utils . is_dict_like ( side ) :
side = { d : side for d in windows . keys ( ) }
# remove unrelated dimensions
boundary = { k : v for k , v in boundary . items ( ) if k in windows }
side = { k : v for k , v in side . items ( ) if k in windows }
for d , window in windows . items ( ) :
if window <= 0 :
raise ValueError ( 'window must be > 0. Given {}' . format ( window ) )
variable = self
for d , window in windows . items ( ) : # trim or pad the object
size = variable . shape [ self . _get_axis_num ( d ) ]
n = int ( size / window )
if boundary [ d ] == 'exact' :
if n * window != size :
raise ValueError ( 'Could not coarsen a dimension of size {} with ' 'window {}' . format ( size , window ) )
elif boundary [ d ] == 'trim' :
if side [ d ] == 'left' :
variable = variable . isel ( { d : slice ( 0 , window * n ) } )
else :
excess = size - window * n
variable = variable . isel ( { d : slice ( excess , None ) } )
elif boundary [ d ] == 'pad' : # pad
pad = window * n - size
if pad < 0 :
pad += window
if side [ d ] == 'left' :
pad_widths = { d : ( 0 , pad ) }
else :
pad_widths = { d : ( pad , 0 ) }
variable = variable . pad_with_fill_value ( pad_widths )
else :
raise TypeError ( "{} is invalid for boundary. Valid option is 'exact', " "'trim' and 'pad'" . format ( boundary [ d ] ) )
shape = [ ]
axes = [ ]
axis_count = 0
for i , d in enumerate ( variable . dims ) :
if d in windows :
size = variable . shape [ i ]
shape . append ( int ( size / windows [ d ] ) )
shape . append ( windows [ d ] )
axis_count += 1
axes . append ( i + axis_count )
else :
shape . append ( variable . shape [ i ] )
return variable . data . reshape ( shape ) , tuple ( axes ) |
def connect ( self , maximize = True ) :
"""Set up the selenium driver and connect to the server
: param maximize : True if the driver should be maximized
: returns : selenium driver""" | if not self . config . get ( 'Driver' , 'type' ) or self . config . get ( 'Driver' , 'type' ) in [ 'api' , 'no_driver' ] :
return None
self . driver = ConfigDriver ( self . config , self . utils ) . create_driver ( )
# Save session id and remote node to download video after the test execution
self . session_id = self . driver . session_id
self . server_type , self . remote_node = self . utils . get_remote_node ( )
if self . server_type == 'grid' :
self . remote_node_video_enabled = self . utils . is_remote_video_enabled ( self . remote_node )
else :
self . remote_node_video_enabled = True if self . server_type in [ 'ggr' , 'selenoid' ] else False
# Save app _ strings in mobile tests
if self . is_mobile_test ( ) and not self . is_web_test ( ) and self . config . getboolean_optional ( 'Driver' , 'appium_app_strings' ) :
self . app_strings = self . driver . app_strings ( )
if self . is_maximizable ( ) : # Bounds and screen
bounds_x , bounds_y = self . get_config_window_bounds ( )
self . driver . set_window_position ( bounds_x , bounds_y )
self . logger . debug ( 'Window bounds: %s x %s' , bounds_x , bounds_y )
# Maximize browser
if maximize : # Set window size or maximize
window_width = self . config . get_optional ( 'Driver' , 'window_width' )
window_height = self . config . get_optional ( 'Driver' , 'window_height' )
if window_width and window_height :
self . driver . set_window_size ( window_width , window_height )
else :
self . driver . maximize_window ( )
# Log window size
window_size = self . utils . get_window_size ( )
self . logger . debug ( 'Window size: %s x %s' , window_size [ 'width' ] , window_size [ 'height' ] )
# Update baseline
self . update_visual_baseline ( )
# Discard previous logcat logs
self . utils . discard_logcat_logs ( )
# Set implicitly wait timeout
self . utils . set_implicitly_wait ( )
return self . driver |
def ts_merge ( series ) :
'''Merge timeseries into a new : class : ` ~ . TimeSeries ` instance .
: parameter series : an iterable over : class : ` ~ . TimeSeries ` .''' | series = iter ( series )
ts = next ( series )
return ts . merge ( series ) |
def partition ( f , xs ) :
"""Works similar to filter , except it returns a two - item tuple where the
first item is the sequence of items that passed the filter and the
second is a sequence of items that didn ' t pass the filter""" | t = type ( xs )
true = filter ( f , xs )
false = [ x for x in xs if x not in true ]
return t ( true ) , t ( false ) |
def add_function ( self , function ) :
"""Adds the function to the list of registered functions .""" | function = self . build_function ( function )
if function . name in self . functions :
raise FunctionAlreadyRegistered ( function . name )
self . functions [ function . name ] = function |
def convert2hdf5 ( platform_name ) :
"""Retrieve original RSR data and convert to internal hdf5 format""" | import h5py
ahi = AhiRSR ( platform_name )
filename = os . path . join ( ahi . output_dir , "rsr_ahi_{platform}.h5" . format ( platform = platform_name ) )
with h5py . File ( filename , "w" ) as h5f :
h5f . attrs [ 'description' ] = 'Relative Spectral Responses for AHI'
h5f . attrs [ 'platform_name' ] = platform_name
h5f . attrs [ 'sensor' ] = 'ahi'
h5f . attrs [ 'band_names' ] = AHI_BAND_NAMES . values ( )
for chname in AHI_BAND_NAMES . values ( ) :
grp = h5f . create_group ( chname )
wvl = ahi . rsr [ chname ] [ 'wavelength' ] [ ~ np . isnan ( ahi . rsr [ chname ] [ 'wavelength' ] ) ]
rsp = ahi . rsr [ chname ] [ 'response' ] [ ~ np . isnan ( ahi . rsr [ chname ] [ 'wavelength' ] ) ]
grp . attrs [ 'central_wavelength' ] = get_central_wave ( wvl , rsp )
arr = ahi . rsr [ chname ] [ 'wavelength' ]
dset = grp . create_dataset ( 'wavelength' , arr . shape , dtype = 'f' )
dset . attrs [ 'unit' ] = 'm'
dset . attrs [ 'scale' ] = 1e-06
dset [ ... ] = arr
arr = ahi . rsr [ chname ] [ 'response' ]
dset = grp . create_dataset ( 'response' , arr . shape , dtype = 'f' )
dset [ ... ] = arr |
def context_processor ( self , func ) :
"""Decorate a given function to use as a context processor .
@ app . ps . jinja2 . context _ processor
def my _ context ( ) :
return { . . . }""" | func = to_coroutine ( func )
self . providers . append ( func )
return func |
def do_create_subject ( self , subject_context ) :
"""By the time this method is invoked , all possible
` ` SubjectContext ` ` data ( session , identifiers , et . al . ) has been made
accessible using all known heuristics .
: returns : a Subject instance reflecting the data in the specified
SubjectContext data map""" | if not isinstance ( subject_context , web_subject_abcs . WebSubjectContext ) :
return super ( ) . do_create_subject ( subject_context = subject_context )
security_manager = subject_context . resolve_security_manager ( )
session = subject_context . resolve_session ( )
session_creation_enabled = subject_context . session_creation_enabled
# passing the session arg is new to yosai , eliminating redunant
# get _ session calls :
identifiers = subject_context . resolve_identifiers ( session )
authenticated = subject_context . resolve_authenticated ( session )
host = subject_context . resolve_host ( session )
# must run after resolve _ identifiers :
remembered = getattr ( subject_context , 'remembered' , None )
return WebDelegatingSubject ( identifiers = identifiers , remembered = remembered , authenticated = authenticated , host = host , session = session , session_creation_enabled = session_creation_enabled , security_manager = security_manager , web_registry = subject_context . web_registry ) |
def _stage ( self , accepted , count = 0 ) :
"""This is a repeated state in the state removal algorithm""" | new5 = self . _combine_rest_push ( )
new1 = self . _combine_push_pop ( )
new2 = self . _combine_push_rest ( )
new3 = self . _combine_pop_rest ( )
new4 = self . _combine_rest_rest ( )
new = new1 + new2 + new3 + new4 + new5
del new1
del new2
del new3
del new4
del new5
if len ( new ) == 0 : # self . printer ( )
# print ' PDA is empty '
# logging . debug ( ' PDA is empty ' )
return None
self . statediag = self . statediag + new
del new
# print ' cleaning . . . '
# It is cheaper to create a new array than to use the old one and
# delete a key
newstates = [ ]
for key in self . statediag :
if len ( key . trans ) == 0 or key . trans == { } : # rint ' delete ' + ` key . id `
# self . statediag . remove ( key )
pass
else :
newstates . append ( key )
del self . statediag
self . statediag = newstates
self . quickresponse = { }
self . quickresponse_types = { }
self . quickresponse_types [ 0 ] = [ ]
self . quickresponse_types [ 1 ] = [ ]
self . quickresponse_types [ 2 ] = [ ]
self . quickresponse_types [ 3 ] = [ ]
self . quickresponse_types [ 4 ] = [ ]
for state in self . statediag :
if state . id not in self . quickresponse :
self . quickresponse [ state . id ] = [ state ]
else :
self . quickresponse [ state . id ] . append ( state )
self . quickresponse_types [ state . type ] . append ( state )
# else :
# print ` key . id ` + ' ( type : ' + ` key . type ` + ' and sym : ' + ` key . sym ` + ' ) '
# print key . trans
# print ' checking . . . '
exists = self . _check ( accepted )
if exists == - 1 : # DEBUGself . printer ( )
# raw _ input ( ' next step ? ' )
return self . _stage ( accepted , count + 1 )
else : # DEBUGself . printer ( )
# print ' Found '
print exists
# return self . _ stage ( accepted , count + 1)
return exists |
def register_view ( self ) :
"""Display registration form and create new User .""" | safe_next_url = self . _get_safe_next_url ( 'next' , self . USER_AFTER_LOGIN_ENDPOINT )
safe_reg_next_url = self . _get_safe_next_url ( 'reg_next' , self . USER_AFTER_REGISTER_ENDPOINT )
# Initialize form
login_form = self . LoginFormClass ( )
# for login _ or _ register . html
register_form = self . RegisterFormClass ( request . form )
# for register . html
# invite token used to determine validity of registeree
invite_token = request . values . get ( "token" )
# require invite without a token should disallow the user from registering
if self . USER_REQUIRE_INVITATION and not invite_token :
flash ( "Registration is invite only" , "error" )
return redirect ( url_for ( 'user.login' ) )
user_invitation = None
if invite_token and self . db_manager . UserInvitationClass :
data_items = self . token_manager . verify_token ( invite_token , self . USER_INVITE_EXPIRATION )
if data_items :
user_invitation_id = data_items [ 0 ]
user_invitation = self . db_manager . get_user_invitation_by_id ( user_invitation_id )
if not user_invitation :
flash ( "Invalid invitation token" , "error" )
return redirect ( url_for ( 'user.login' ) )
register_form . invite_token . data = invite_token
if request . method != 'POST' :
login_form . next . data = register_form . next . data = safe_next_url
login_form . reg_next . data = register_form . reg_next . data = safe_reg_next_url
if user_invitation :
register_form . email . data = user_invitation . email
# Process valid POST
if request . method == 'POST' and register_form . validate ( ) :
user = self . db_manager . add_user ( )
register_form . populate_obj ( user )
user_email = self . db_manager . add_user_email ( user = user , is_primary = True )
register_form . populate_obj ( user_email )
# Store password hash instead of password
user . password = self . hash_password ( user . password )
# Email confirmation depends on the USER _ ENABLE _ CONFIRM _ EMAIL setting
request_email_confirmation = self . USER_ENABLE_CONFIRM_EMAIL
# Users that register through an invitation , can skip this process
# but only when they register with an email that matches their invitation .
if user_invitation :
if user_invitation . email . lower ( ) == register_form . email . data . lower ( ) :
user_email . email_confirmed_at = datetime . utcnow ( )
request_email_confirmation = False
self . db_manager . save_user_and_user_email ( user , user_email )
self . db_manager . commit ( )
# Send ' registered ' email and delete new User object if send fails
if self . USER_SEND_REGISTERED_EMAIL :
try : # Send ' confirm email ' or ' registered ' email
self . _send_registered_email ( user , user_email , request_email_confirmation )
except Exception as e : # delete new User object if send fails
self . db_manager . delete_object ( user )
self . db_manager . commit ( )
raise
# Send user _ registered signal
signals . user_registered . send ( current_app . _get_current_object ( ) , user = user , user_invitation = user_invitation )
# Redirect if USER _ ENABLE _ CONFIRM _ EMAIL is set
if self . USER_ENABLE_CONFIRM_EMAIL and request_email_confirmation :
safe_reg_next_url = self . make_safe_url ( register_form . reg_next . data )
return redirect ( safe_reg_next_url )
# Auto - login after register or redirect to login page
if 'reg_next' in request . args :
safe_reg_next_url = self . make_safe_url ( register_form . reg_next . data )
else :
safe_reg_next_url = self . _endpoint_url ( self . USER_AFTER_CONFIRM_ENDPOINT )
if self . USER_AUTO_LOGIN_AFTER_REGISTER :
return self . _do_login_user ( user , safe_reg_next_url )
# auto - login
else :
return redirect ( url_for ( 'user.login' ) + '?next=' + quote ( safe_reg_next_url ) )
# redirect to login page
# Render form
self . prepare_domain_translations ( )
return render_template ( self . USER_REGISTER_TEMPLATE , form = register_form , login_form = login_form , register_form = register_form ) |
def handle_request ( self , environ , start_response ) :
"""Handle an HTTP request from the client .
This is the entry point of the Engine . IO application , using the same
interface as a WSGI application . For the typical usage , this function
is invoked by the : class : ` Middleware ` instance , but it can be invoked
directly when the middleware is not used .
: param environ : The WSGI environment .
: param start _ response : The WSGI ` ` start _ response ` ` function .
This function returns the HTTP response body to deliver to the client
as a byte sequence .""" | method = environ [ 'REQUEST_METHOD' ]
query = urllib . parse . parse_qs ( environ . get ( 'QUERY_STRING' , '' ) )
if 'j' in query :
self . logger . warning ( 'JSONP requests are not supported' )
r = self . _bad_request ( )
else :
sid = query [ 'sid' ] [ 0 ] if 'sid' in query else None
b64 = False
if 'b64' in query :
if query [ 'b64' ] [ 0 ] == "1" or query [ 'b64' ] [ 0 ] . lower ( ) == "true" :
b64 = True
if method == 'GET' :
if sid is None :
transport = query . get ( 'transport' , [ 'polling' ] ) [ 0 ]
if transport != 'polling' and transport != 'websocket' :
self . logger . warning ( 'Invalid transport %s' , transport )
r = self . _bad_request ( )
else :
r = self . _handle_connect ( environ , start_response , transport , b64 )
else :
if sid not in self . sockets :
self . logger . warning ( 'Invalid session %s' , sid )
r = self . _bad_request ( )
else :
socket = self . _get_socket ( sid )
try :
packets = socket . handle_get_request ( environ , start_response )
if isinstance ( packets , list ) :
r = self . _ok ( packets , b64 = b64 )
else :
r = packets
except exceptions . EngineIOError :
if sid in self . sockets : # pragma : no cover
self . disconnect ( sid )
r = self . _bad_request ( )
if sid in self . sockets and self . sockets [ sid ] . closed :
del self . sockets [ sid ]
elif method == 'POST' :
if sid is None or sid not in self . sockets :
self . logger . warning ( 'Invalid session %s' , sid )
r = self . _bad_request ( )
else :
socket = self . _get_socket ( sid )
try :
socket . handle_post_request ( environ )
r = self . _ok ( )
except exceptions . EngineIOError :
if sid in self . sockets : # pragma : no cover
self . disconnect ( sid )
r = self . _bad_request ( )
except : # pragma : no cover
# for any other unexpected errors , we log the error
# and keep going
self . logger . exception ( 'post request handler error' )
r = self . _ok ( )
elif method == 'OPTIONS' :
r = self . _ok ( )
else :
self . logger . warning ( 'Method %s not supported' , method )
r = self . _method_not_found ( )
if not isinstance ( r , dict ) :
return r or [ ]
if self . http_compression and len ( r [ 'response' ] ) >= self . compression_threshold :
encodings = [ e . split ( ';' ) [ 0 ] . strip ( ) for e in environ . get ( 'HTTP_ACCEPT_ENCODING' , '' ) . split ( ',' ) ]
for encoding in encodings :
if encoding in self . compression_methods :
r [ 'response' ] = getattr ( self , '_' + encoding ) ( r [ 'response' ] )
r [ 'headers' ] += [ ( 'Content-Encoding' , encoding ) ]
break
cors_headers = self . _cors_headers ( environ )
start_response ( r [ 'status' ] , r [ 'headers' ] + cors_headers )
return [ r [ 'response' ] ] |
def name ( self ) :
"""Return the String assosciated with the tag name""" | if self . m_name == - 1 or ( self . m_event != START_TAG and self . m_event != END_TAG ) :
return ''
return self . sb [ self . m_name ] |
def pull_log ( self , project_name , logstore_name , shard_id , from_time , to_time , batch_size = None , compress = None ) :
"""batch pull log data from log service using time - range
Unsuccessful opertaion will cause an LogException . the time parameter means the time when server receives the logs
: type project _ name : string
: param project _ name : the Project name
: type logstore _ name : string
: param logstore _ name : the logstore name
: type shard _ id : int
: param shard _ id : the shard id
: type from _ time : string / int
: param from _ time : curosr value , could be begin , timestamp or readable time in readable time like " % Y - % m - % d % H : % M : % S < time _ zone > " e . g . " 2018-01-02 12:12:10 + 8:00 " , also support human readable string , e . g . " 1 hour ago " , " now " , " yesterday 0:0:0 " , refer to https : / / aliyun - log - cli . readthedocs . io / en / latest / tutorials / tutorial _ human _ readable _ datetime . html
: type to _ time : string / int
: param to _ time : curosr value , could be begin , timestamp or readable time in readable time like " % Y - % m - % d % H : % M : % S < time _ zone > " e . g . " 2018-01-02 12:12:10 + 8:00 " , also support human readable string , e . g . " 1 hour ago " , " now " , " yesterday 0:0:0 " , refer to https : / / aliyun - log - cli . readthedocs . io / en / latest / tutorials / tutorial _ human _ readable _ datetime . html
: type batch _ size : int
: param batch _ size : batch size to fetch the data in each iteration . by default it ' s 1000
: type compress : bool
: param compress : if use compression , by default it ' s True
: return : PullLogResponse
: raise : LogException""" | begin_cursor = self . get_cursor ( project_name , logstore_name , shard_id , from_time ) . get_cursor ( )
end_cursor = self . get_cursor ( project_name , logstore_name , shard_id , to_time ) . get_cursor ( )
while True :
res = self . pull_logs ( project_name , logstore_name , shard_id , begin_cursor , count = batch_size , end_cursor = end_cursor , compress = compress )
yield res
if res . get_log_count ( ) <= 0 :
break
begin_cursor = res . get_next_cursor ( ) |
def set_guest_access ( self , room_id , guest_access ) :
"""Set the guest access policy of the room .
Args :
room _ id ( str ) : The room to set the rules for .
guest _ access ( str ) : Wether guests can join . One of : [ " can _ join " ,
" forbidden " ]""" | content = { "guest_access" : guest_access }
return self . send_state_event ( room_id , "m.room.guest_access" , content ) |
def _build_arguments ( self ) :
"""build arguments for command .""" | self . _parser . add_argument ( '--clean' , type = bool , required = False , default = False , help = "clean up everything that was created by freight forwarder at the end." )
self . _parser . add_argument ( '--configs' , type = bool , required = False , default = False , help = "Would you like to inject configuration files?" )
self . _parser . add_argument ( '--test' , type = bool , required = False , default = False , help = "Run tests." )
self . _parser . add_argument ( '-t' , '--tag' , required = False , type = six . text_type , action = 'append' , help = 'list of tags applied to the image being exported. example: sh1hash' )
self . _parser . add_argument ( '--use-cache' , required = False , action = 'store_true' , default = False , help = 'Allow build to use cached image layers.' )
self . _parser . add_argument ( '--no-tagging-scheme' , required = False , action = 'store_true' , default = False , help = 'Turn off freight forwarders tagging scheme.' )
self . _parser . add_argument ( '--no-validation' , action = "store_true" , required = False , default = False , help = '**UNSAFE**. The image will be built, NOT started, and pushed to the registry' )
self . _parser . add_argument ( '-y' , required = False , action = 'store_true' , default = False , help = '**UNSAFE**. Turn off `--no-validation` interaction during export' ) |
def _convert_epytext ( line ) :
"""> > > _ convert _ epytext ( " L { A } " )
: class : ` A `""" | line = line . replace ( '@' , ':' )
for p , sub in RULES :
line = re . sub ( p , sub , line )
return line |
def compound_object ( element_name , attrnames , warn = False ) :
"""return a class which delegates bracket access to an internal dict .
Missing attributes are delegated to the child dict for convenience .
@ note : Care must be taken when child nodes and attributes have the same names""" | class CompoundObject ( ) :
_original_fields = sorted ( attrnames )
_fields = [ _prefix_keyword ( a , warn ) for a in _original_fields ]
def __init__ ( self , values , child_dict ) :
for name , val in zip ( self . _fields , values ) :
self . __dict__ [ name ] = val
self . _child_dict = child_dict
self . name = element_name
def getAttributes ( self ) :
return [ ( k , getattr ( self , k ) ) for k in self . _fields ]
def hasAttribute ( self , name ) :
return name in self . _fields
def setAttribute ( self , name , value ) :
if name not in self . _fields :
self . _original_fields . append ( name )
self . _fields . append ( _prefix_keyword ( name , warn ) )
self . __dict__ [ name ] = value
def hasChild ( self , name ) :
return name in self . _child_dict
def getChild ( self , name ) :
return self . _child_dict [ name ]
def addChild ( self , name , attrs = None ) :
if attrs is None :
attrs = { }
clazz = compound_object ( name , attrs . keys ( ) )
child = clazz ( [ attrs . get ( a ) for a in sorted ( attrs . keys ( ) ) ] , _NO_CHILDREN )
if len ( self . _child_dict ) == 0 :
self . _child_dict = OrderedDict ( )
self . _child_dict . setdefault ( name , [ ] ) . append ( child )
return child
def __getattr__ ( self , name ) :
if name [ : 2 ] != "__" :
return self . _child_dict . get ( name , None )
raise AttributeError
def __setattr__ ( self , name , value ) :
if name != "_child_dict" and name in self . _child_dict :
self . _child_dict [ name ] = value
else :
self . __dict__ [ name ] = value
def __delattr__ ( self , name ) :
if name in self . _child_dict :
del self . _child_dict [ name ]
else :
if name in self . __dict__ :
del self . __dict__ [ name ]
self . _original_fields . remove ( name )
self . _fields . remove ( _prefix_keyword ( name , False ) )
def __getitem__ ( self , name ) :
return self . _child_dict [ name ]
def __str__ ( self ) :
return "<%s,child_dict=%s>" % ( self . getAttributes ( ) , dict ( self . _child_dict ) )
def toXML ( self , initialIndent = "" , indent = " " ) :
fields = [ '%s="%s"' % ( self . _original_fields [ i ] , str ( getattr ( self , k ) ) ) for i , k in enumerate ( self . _fields ) if getattr ( self , k ) is not None # see # 3454
and not '{' in self . _original_fields [ i ] ]
if not self . _child_dict :
return "%s<%s %s/>\n" % ( initialIndent , element_name , " " . join ( fields ) )
else :
s = "%s<%s %s>\n" % ( initialIndent , element_name , " " . join ( fields ) )
for l in self . _child_dict . values ( ) :
for c in l :
s += c . toXML ( initialIndent + indent )
return s + "%s</%s>\n" % ( initialIndent , element_name )
def __repr__ ( self ) :
return str ( self )
return CompoundObject |
def tzname ( self , dt ) :
"""http : / / docs . python . org / library / datetime . html # datetime . tzinfo . tzname""" | sign = '+'
if self . __offset < datetime . timedelta ( ) :
sign = '-'
# total _ seconds was introduced in Python 2.7
if hasattr ( self . __offset , 'total_seconds' ) :
total_seconds = self . __offset . total_seconds ( )
else :
total_seconds = ( self . __offset . days * 24 * 60 * 60 ) + ( self . __offset . seconds ) + ( self . __offset . microseconds / 1000000.0 )
hours = total_seconds // ( 60 * 60 )
total_seconds -= hours * 60 * 60
minutes = total_seconds // 60
total_seconds -= minutes * 60
seconds = total_seconds // 1
total_seconds -= seconds
if seconds :
return '%s%02d:%02d:%02d' % ( sign , hours , minutes , seconds )
else :
return '%s%02d:%02d' % ( sign , hours , minutes ) |
def compamp_to_spectrogram ( compamp ) :
'''Returns spectrogram , with each row containing the measured power spectrum for a XX second time sample .
Using this function is shorthand for :
aca = ibmseti . compamp . Compamp ( raw _ data )
power = ibmseti . dsp . complex _ to _ power ( aca . complex _ data ( ) , aca . header ( ) [ ' over _ sampling ' ] )
spectrogram = ibmseti . dsp . reshape _ to _ 2d ( power )
Example Usage :
import ibmseti
import matplotlib . pyplot as plt
plt . ion ( )
aca = ibmseti . compamp . Compamp ( raw _ data )
spectrogram = ibmseti . dsp . compamp _ to _ spectrogram ( aca )
time _ bins = ibmseti . dsp . time _ bins ( aca . header ( ) )
freq _ bins = ibmseti . dsp . frequency _ bins ( aca . header ( ) )
fig , ax = plt . subplots ( )
ax . pcolormesh ( freq _ bins , time _ bins , spectrogram )
# Time is on the horizontal axis and frequency is along the vertical .''' | power = complex_to_power ( compamp . complex_data ( ) , compamp . header ( ) [ 'over_sampling' ] )
return reshape_to_2d ( power ) |
def GetSectionByIndex ( self , section_index ) :
"""Retrieves a specific section based on the index .
Args :
section _ index ( int ) : index of the section .
Returns :
VolumeExtent : a volume extent or None if not available .""" | if not self . _is_parsed :
self . _Parse ( )
self . _is_parsed = True
if section_index < 0 or section_index >= len ( self . _sections ) :
return None
return self . _sections [ section_index ] |
def _parse_interval ( self , tokens ) :
"""Parses a range
Range : : = < num > | < num > ( ' . . ' | ' ^ ' ) < num >""" | fr = int ( tokens . pop ( 0 ) ) - 1
if len ( tokens ) > 1 and tokens [ 0 ] in [ '..' , '^' ] :
tokens . pop ( 0 )
# Pop ' . . ' | ' ^ '
to = int ( tokens . pop ( 0 ) )
return GenomicInterval ( fr , to , chromosome = self . hdr [ 'ACCESSION' ] [ 'value' ] )
return GenomicInterval ( fr , fr + 1 , chromosome = self . hdr [ 'ACCESSION' ] [ 'value' ] ) |
def collect_metrics ( local_evaluator = None , remote_evaluators = [ ] , timeout_seconds = 180 ) :
"""Gathers episode metrics from PolicyEvaluator instances .""" | episodes , num_dropped = collect_episodes ( local_evaluator , remote_evaluators , timeout_seconds = timeout_seconds )
metrics = summarize_episodes ( episodes , episodes , num_dropped )
return metrics |
def visitAdditionOrSubtractionExpression ( self , ctx ) :
"""expression : expression ( PLUS | MINUS ) expression""" | is_add = ctx . PLUS ( ) is not None
arg1 = self . visit ( ctx . expression ( 0 ) )
arg2 = self . visit ( ctx . expression ( 1 ) )
# first try as decimals
try :
_arg1 = conversions . to_decimal ( arg1 , self . _eval_context )
_arg2 = conversions . to_decimal ( arg2 , self . _eval_context )
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError :
pass
# then as date + something
try :
_arg1 = conversions . to_date_or_datetime ( arg1 , self . _eval_context )
if isinstance ( arg2 , datetime . time ) : # upgrade our date to datetime
_arg1 = conversions . to_datetime ( _arg1 , self . _eval_context )
# convert time value to a duration
_arg2 = datetime . timedelta ( hours = arg2 . hour , minutes = arg2 . minute , seconds = arg2 . second , microseconds = arg2 . microsecond )
else :
_arg2 = datetime . timedelta ( days = conversions . to_integer ( arg2 , self . _eval_context ) )
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError as ex :
raise EvaluationError ( "Expression could not be evaluated as decimal or date arithmetic" , ex ) |
def set_widgets ( self ) :
"""Set widgets on the Hazard Category tab .""" | self . clear_further_steps ( )
# Set widgets
self . lstHazardCategories . clear ( )
self . lblDescribeHazardCategory . setText ( '' )
self . lblSelectHazardCategory . setText ( hazard_category_question )
hazard_categories = self . hazard_categories_for_layer ( )
for hazard_category in hazard_categories :
if not isinstance ( hazard_category , dict ) : # noinspection PyTypeChecker
hazard_category = definition ( hazard_category )
# noinspection PyTypeChecker
item = QListWidgetItem ( hazard_category [ 'name' ] , self . lstHazardCategories )
# noinspection PyTypeChecker
item . setData ( QtCore . Qt . UserRole , hazard_category [ 'key' ] )
self . lstHazardCategories . addItem ( item )
# Set values based on existing keywords ( if already assigned )
category_keyword = self . parent . get_existing_keyword ( 'hazard_category' )
if category_keyword :
categories = [ ]
for index in range ( self . lstHazardCategories . count ( ) ) :
item = self . lstHazardCategories . item ( index )
categories . append ( item . data ( QtCore . Qt . UserRole ) )
if category_keyword in categories :
self . lstHazardCategories . setCurrentRow ( categories . index ( category_keyword ) )
self . auto_select_one_item ( self . lstHazardCategories ) |
def memoize ( func ) :
"""Provides memoization for methods on a specific instance .
Results are cached for given parameter list .
See also : http : / / en . wikipedia . org / wiki / Memoization
N . B . The cache object gets added to the instance instead of the global scope .
Therefore cached results are restricted to that instance .
The cache dictionary gets a name containing the name of the decorated function to
avoid clashes .
Example :
class MyClass ( object ) :
@ memoize
def foo ( self , a , b ) :
return self . _ do _ calculation ( a , b )
HINT : - The decorator does not work with keyword arguments .""" | cache_name = '__CACHED_{}' . format ( func . __name__ )
def wrapper ( self , * args ) :
cache = getattr ( self , cache_name , None )
if cache is None :
cache = { }
setattr ( self , cache_name , cache )
if args not in cache :
cache [ args ] = func ( self , * args )
return cache [ args ]
return wrapper |
def fetch_followers ( account_file , outfile , limit , do_loop ) :
"""Fetch up to limit followers for each Twitter account in
account _ file . Write results to outfile file in format :
screen _ name user _ id follower _ id _ 1 follower _ id _ 2 . . .""" | print ( 'Fetching followers for accounts in %s' % account_file )
niters = 1
while True :
outf = gzip . open ( outfile , 'wt' )
for screen_name in iter_lines ( account_file ) :
timestamp = datetime . datetime . now ( ) . isoformat ( )
print ( 'collecting followers for' , screen_name )
followers = twutil . collect . followers_for_screen_name ( screen_name , limit )
if len ( followers ) > 0 :
outf . write ( '%s %s %s\n' % ( timestamp , screen_name , ' ' . join ( followers ) ) )
outf . flush ( )
else :
print ( 'unknown user' , screen_name )
outf . close ( )
if not do_loop :
return
else :
if niters == 1 :
outfile = '%s.%d' % ( outfile , niters )
else :
outfile = outfile [ : outfile . rindex ( '.' ) ] + '.%d' % niters
niters += 1 |
def set_dtreat_order ( self , order = None ) :
"""Set the order in which the data treatment should be performed
Provide an ordered list of keywords indicating the order in which
you wish the data treatment steps to be performed .
Each keyword corresponds to a step .
Available steps are ( in default order ) :
- ' mask ' :
- ' interp _ indt ' :
- ' interp _ indch ' :
- ' data0 ' :
- ' dfit ' :
- ' indt ' :
- ' indch ' :
- ' interp _ t ' :
All steps are performed on the stored reference self . dataRef [ ' data ' ]
Thus , the time and channels restriction must be the last 2 steps before
interpolating on an external time vector""" | if order is None :
order = list ( self . _ddef [ 'dtreat' ] [ 'order' ] )
assert type ( order ) is list and all ( [ type ( ss ) is str for ss in order ] )
if not all ( [ ss in [ 'indt' , 'indch' , 'indlamb' ] for ss in order ] [ - 4 : - 1 ] ) :
msg = "indt and indch must be the treatment steps -2 and -3 !"
raise Exception ( msg )
if not order [ - 1 ] == 'interp-t' :
msg = "interp-t must be the last treatment step !"
raise Exception ( msg )
self . _dtreat [ 'order' ] = order
self . _ddata [ 'uptodate' ] = False |
def sodium_pad ( s , blocksize ) :
"""Pad the input bytearray ` ` s ` ` to a multiple of ` ` blocksize ` `
using the ISO / IEC 7816-4 algorithm
: param s : input bytes string
: type s : bytes
: param blocksize :
: type blocksize : int
: return : padded string
: rtype : bytes""" | ensure ( isinstance ( s , bytes ) , raising = exc . TypeError )
ensure ( isinstance ( blocksize , integer_types ) , raising = exc . TypeError )
if blocksize <= 0 :
raise exc . ValueError
s_len = len ( s )
m_len = s_len + blocksize
buf = ffi . new ( "unsigned char []" , m_len )
p_len = ffi . new ( "size_t []" , 1 )
ffi . memmove ( buf , s , s_len )
rc = lib . sodium_pad ( p_len , buf , s_len , blocksize , m_len )
ensure ( rc == 0 , "Padding failure" , raising = exc . CryptoError )
return ffi . buffer ( buf , p_len [ 0 ] ) [ : ] |
def get_multi_async ( cls , blob_keys , ** ctx_options ) :
"""Async version of get _ multi ( ) .""" | for blob_key in blob_keys :
if not isinstance ( blob_key , ( BlobKey , basestring ) ) :
raise TypeError ( 'Expected blob key, got %r' % ( blob_key , ) )
if 'parent' in ctx_options :
raise TypeError ( 'Parent is not supported' )
blob_key_strs = map ( str , blob_keys )
keys = [ model . Key ( BLOB_INFO_KIND , id ) for id in blob_key_strs ]
return model . get_multi_async ( keys , ** ctx_options ) |
def pull ( options ) :
"""pull all remote programs to a local directory""" | configuration = config . get_default ( )
app_url = configuration [ 'app_url' ]
if options . deployment != None :
deployment_name = options . deployment
else :
deployment_name = configuration [ 'deployment_name' ]
client_id = configuration [ 'client_id' ]
client_secret = configuration [ 'client_secret' ]
token_manager = auth . TokenManager ( client_id = client_id , client_secret = client_secret , app_url = app_url )
if options . all == True :
account_id = None
else :
account_id = accounts . get_logged_in_account_id ( token_manager = token_manager , app_url = app_url )
programs_details = programs . get_programs ( deployment_name , token_manager = token_manager , created_by = account_id , app_url = app_url )
if not os . path . exists ( options . directory ) :
os . mkdir ( options . directory )
account_ids = set ( )
for program in programs_details :
account_ids . add ( program [ 'createdBy' ] )
accounts_details = accounts . get_accounts ( account_ids , token_manager = token_manager , app_url = app_url )
account_lookup = { }
for account in accounts_details [ 'accounts' ] :
account_lookup [ account [ 'id' ] ] = account
decision = None
for program in programs_details :
program_name = program [ 'name' ]
juttle_filename = '%s.juttle' % escape_filename ( program_name )
if options . per_user_directory :
username = account_lookup [ program [ 'createdBy' ] ] [ 'username' ]
userdir = os . path . join ( options . directory , username )
if not os . path . exists ( userdir ) :
os . mkdir ( userdir )
juttle_filepath = os . path . join ( userdir , juttle_filename )
else :
juttle_filepath = os . path . join ( options . directory , juttle_filename )
if os . path . exists ( juttle_filepath ) and decision != 'A' :
program_code = None
with codecs . open ( juttle_filepath , 'r' , encoding = 'UTF-8' ) as program_file :
program_code = program_file . read ( )
local_last_edited = int ( os . stat ( juttle_filepath ) . st_mtime )
remote_last_edited = dates . iso8601_to_epoch ( program [ 'lastEdited' ] )
if local_last_edited != remote_last_edited :
info ( 'Juttle changed since last pull for "%s"' % program_name )
decision = console . prompt ( 'Would you like to ' '(O - Override,' ' S - Skip,' ' R - Review Changes,' ' A - override All)?' )
if decision == 'R' :
info ( 'Following is what would change if we overrode using your copy:' )
info ( '*' * 80 )
for line in difflib . ndiff ( program [ 'code' ] . split ( '\n' ) , program_code . split ( '\n' ) ) :
info ( line )
info ( '*' * 80 )
decision = console . prompt ( 'Would you like to ' '(O - Override,' ' S - Skip)?' )
if decision == 'S' : # jump to the next file
continue
elif decision == 'O' :
pass
elif decision == 'A' :
pass
else :
raise JutException ( 'Unexpected option "%s"' % decision )
info ( 'importing program "%s" to %s' % ( program [ 'name' ] , juttle_filepath ) )
with codecs . open ( juttle_filepath , 'w' , encoding = 'UTF-8' ) as program_file :
program_file . write ( program [ 'code' ] )
# update creation time to match the lastEdited field
epoch = dates . iso8601_to_epoch ( program [ 'lastEdited' ] )
os . utime ( juttle_filepath , ( epoch , epoch ) ) |
def _set_vcs_rbridge_config ( self , v , load = False ) :
"""Setter method for vcs _ rbridge _ config , mapped from YANG variable / brocade _ vcs _ rpc / vcs _ rbridge _ config ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ vcs _ rbridge _ config is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ vcs _ rbridge _ config ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = vcs_rbridge_config . vcs_rbridge_config , is_leaf = True , yang_name = "vcs-rbridge-config" , rest_name = "vcs-rbridge-config" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'vcsenable-action-point' } } , namespace = 'urn:brocade.com:mgmt:brocade-vcs' , defining_module = 'brocade-vcs' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """vcs_rbridge_config must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=vcs_rbridge_config.vcs_rbridge_config, is_leaf=True, yang_name="vcs-rbridge-config", rest_name="vcs-rbridge-config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vcsenable-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='rpc', is_config=True)""" , } )
self . __vcs_rbridge_config = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def hardware_custom_profile_kap_custom_profile_rpvst_rpvst_hello_interval ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
hardware = ET . SubElement ( config , "hardware" , xmlns = "urn:brocade.com:mgmt:brocade-hardware" )
custom_profile = ET . SubElement ( hardware , "custom-profile" )
kap_custom_profile = ET . SubElement ( custom_profile , "kap-custom-profile" )
name_key = ET . SubElement ( kap_custom_profile , "name" )
name_key . text = kwargs . pop ( 'name' )
rpvst = ET . SubElement ( kap_custom_profile , "rpvst" )
rpvst_hello_interval = ET . SubElement ( rpvst , "rpvst_hello_interval" )
rpvst_hello_interval . text = kwargs . pop ( 'rpvst_hello_interval' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def append ( self , key , _item ) : # type : ( Union [ Key , str ] , Any ) - > Table
"""Appends a ( key , item ) to the table .""" | if not isinstance ( _item , Item ) :
_item = item ( _item )
self . _value . append ( key , _item )
if isinstance ( key , Key ) :
key = key . key
if key is not None :
super ( Table , self ) . __setitem__ ( key , _item )
m = re . match ( "(?s)^[^ ]*([ ]+).*$" , self . _trivia . indent )
if not m :
return self
indent = m . group ( 1 )
if not isinstance ( _item , Whitespace ) :
m = re . match ( "(?s)^([^ ]*)(.*)$" , _item . trivia . indent )
if not m :
_item . trivia . indent = indent
else :
_item . trivia . indent = m . group ( 1 ) + indent + m . group ( 2 )
return self |
def _wrapper ( func , * args , ** kwargs ) :
'Decorator for the methods that follow' | try :
if func . __name__ == "init" : # init may not fail , as its return code is just stored as
# private _ data field of struct fuse _ context
return func ( * args , ** kwargs ) or 0
else :
try :
return func ( * args , ** kwargs ) or 0
except OSError as e :
if e . errno > 0 :
log . debug ( "FUSE operation %s raised a %s, returning errno %s." , func . __name__ , type ( e ) , e . errno , exc_info = True )
return - e . errno
else :
log . error ( "FUSE operation %s raised an OSError with negative " "errno %s, returning errno.EINVAL." , func . __name__ , e . errno , exc_info = True )
return - errno . EINVAL
except Exception :
log . error ( "Uncaught exception from FUSE operation %s, " "returning errno.EINVAL." , func . __name__ , exc_info = True )
return - errno . EINVAL
except BaseException as e :
self . __critical_exception = e
log . critical ( "Uncaught critical exception from FUSE operation %s, aborting." , func . __name__ , exc_info = True )
# the raised exception ( even SystemExit ) will be caught by FUSE
# potentially causing SIGSEGV , so tell system to stop / interrupt FUSE
fuse_exit ( )
return - errno . EFAULT |
def convertTimestamps ( column ) :
"""Convert a dtype of a given column to a datetime .
This method tries to do this by brute force .
Args :
column ( pandas . Series ) : A Series object with all rows .
Returns :
column : Converted to datetime if no errors occured , else the
original column will be returned .""" | tempColumn = column
try : # Try to convert the first row and a random row instead of the complete
# column , might be faster
# tempValue = np . datetime64 ( column [ 0 ] )
tempValue = np . datetime64 ( column [ randint ( 0 , len ( column . index ) - 1 ) ] )
tempColumn = column . apply ( to_datetime )
except Exception :
pass
return tempColumn |
def add_tracks ( self , subtracks ) :
"""Add one or more tracks to this view .
subtracks : Track or iterable of Tracks
A single Track instance or an iterable of them .""" | if isinstance ( subtracks , Track ) :
subtracks = [ subtracks ]
for subtrack in subtracks :
subtrack . subgroups [ 'view' ] = self . view
self . add_child ( subtrack )
self . subtracks . append ( subtrack ) |
def on_windows ( ) :
"""Returns true if running on windows , whether in cygwin or not .""" | if bjam . variable ( "NT" ) :
return True
elif bjam . variable ( "UNIX" ) :
uname = bjam . variable ( "JAMUNAME" )
if uname and uname [ 0 ] . startswith ( "CYGWIN" ) :
return True
return False |
def expand_exc ( excs , search , replace ) :
"""Find string in tokenizer exceptions , duplicate entry and replace string .
For example , to add additional versions with typographic apostrophes .
excs ( dict ) : Tokenizer exceptions .
search ( unicode ) : String to find and replace .
replace ( unicode ) : Replacement .
RETURNS ( dict ) : Combined tokenizer exceptions .""" | def _fix_token ( token , search , replace ) :
fixed = dict ( token )
fixed [ ORTH ] = fixed [ ORTH ] . replace ( search , replace )
return fixed
new_excs = dict ( excs )
for token_string , tokens in excs . items ( ) :
if search in token_string :
new_key = token_string . replace ( search , replace )
new_value = [ _fix_token ( t , search , replace ) for t in tokens ]
new_excs [ new_key ] = new_value
return new_excs |
def build_expression_from_tree ( self , runnable , regime , tree_node ) :
"""Recursively builds a Python expression from a parsed expression tree .
@ param runnable : Runnable object to which this expression would be added .
@ type runnable : lems . sim . runnable . Runnable
@ param regime : Dynamics regime being built .
@ type regime : lems . model . dynamics . Regime
@ param tree _ node : Root node for the tree from which the expression
is to be built .
@ type tree _ node : lems . parser . expr . ExprNode
@ return : Generated Python expression .
@ rtype : string""" | component_type = self . model . component_types [ runnable . component . type ]
dynamics = component_type . dynamics
if tree_node . type == ExprNode . VALUE :
if tree_node . value [ 0 ] . isalpha ( ) :
if tree_node . value == 't' :
return 'self.time_completed'
elif tree_node . value in component_type . requirements :
var_prefix = 'self'
v = tree_node . value
r = runnable
while ( v not in r . instance_variables and v not in r . derived_variables ) :
var_prefix = '{0}.{1}' . format ( var_prefix , 'parent' )
r = r . parent
if r == None :
raise SimBuildError ( "Unable to resolve required " "variable '{0}'" . format ( v ) )
return '{0}.{1}' . format ( var_prefix , v )
elif ( tree_node . value in dynamics . derived_variables or ( regime is not None and tree_node . value in regime . derived_variables ) ) :
return 'self.{0}' . format ( tree_node . value )
else :
return 'self.{0}_shadow' . format ( tree_node . value )
else :
return tree_node . value
elif tree_node . type == ExprNode . FUNC1 :
pattern = '({0}({1}))'
func = self . convert_func ( tree_node . func )
if 'random.uniform' in func :
pattern = '({0}(0,{1}))'
return pattern . format ( func , self . build_expression_from_tree ( runnable , regime , tree_node . param ) )
else :
return '({0}) {1} ({2})' . format ( self . build_expression_from_tree ( runnable , regime , tree_node . left ) , self . convert_op ( tree_node . op ) , self . build_expression_from_tree ( runnable , regime , tree_node . right ) ) |
def handle_m2m ( self , sender , instance , ** kwargs ) :
"""Handle many to many relationships""" | self . handle_save ( instance . __class__ , instance ) |
def connect_pull ( self , timeout = 1 ) :
'''Establish a connection with the event pull socket
Default timeout is 1 s''' | if self . cpush :
return True
if self . _run_io_loop_sync :
with salt . utils . asynchronous . current_ioloop ( self . io_loop ) :
if self . pusher is None :
self . pusher = salt . transport . ipc . IPCMessageClient ( self . pulluri , io_loop = self . io_loop )
try :
self . io_loop . run_sync ( lambda : self . pusher . connect ( timeout = timeout ) )
self . cpush = True
except Exception :
pass
else :
if self . pusher is None :
self . pusher = salt . transport . ipc . IPCMessageClient ( self . pulluri , io_loop = self . io_loop )
# For the asynchronous case , the connect will be deferred to when
# fire _ event ( ) is invoked .
self . cpush = True
return self . cpush |
def divisors ( n ) :
"""Generate the divisors of n""" | for i in range ( 1 , int ( math . sqrt ( n ) + 1 ) ) :
if n % i == 0 :
yield i
if i * i != n :
yield n / i |
def receivestealth ( scanpriv , spendpriv , ephempub ) :
'''Derive the private key for a stealth payment , using the scan and
spend private keys , and the ephemeral public key .
Input private keys should be 64 - char hex strings , and ephemeral
public key should be a 66 - char hex compressed public key .
> > > receivestealth ( ' af4afaeb40810e5f8abdbb177c31a2d310913f91cf556f5350bca10cbfe8b9ec ' , ' d39758028e201e8edf6d6eec6910ae4038f9b1db3f2d4e2d109ed833be94a026 ' , ' 03b8a715c9432b2b52af9d58aaaf0ccbdefe36d45e158589ecc21ba2f064ebb315 ' )
'6134396c3bc9a56ccaf80cd38728e6d3a7751524246e7924b21b08b0bfcc3cc4' ''' | return addprivkeys ( sha256 ( multiplypub ( ephempub , scanpriv , True ) ) , spendpriv ) |
def get ( self , * args , ** kwargs ) :
"""Get the sub interfaces for this VlanInterface
> > > itf = engine . interface . get ( 3)
> > > list ( itf . vlan _ interface )
[ Layer3PhysicalInterfaceVlan ( name = VLAN 3.3 ) , Layer3PhysicalInterfaceVlan ( name = VLAN 3.5 ) ,
Layer3PhysicalInterfaceVlan ( name = VLAN 3.4 ) ]
: param int args : args are translated to vlan _ id = args [ 0]
: param kwargs : key value for sub interface
: rtype : VlanInterface or None""" | if args :
kwargs = { 'vlan_id' : str ( args [ 0 ] ) }
key , value = kwargs . popitem ( )
for item in self :
if 'vlan_id' in key and getattr ( item , key , None ) == value :
return item
for vlan in item . interfaces :
if getattr ( vlan , key , None ) == value :
return item |
def _start ( self ) :
"""Starts the underlying send and receive threads .""" | # Initialize the locks
self . _recv_lock = coros . Semaphore ( 0 )
self . _send_lock = coros . Semaphore ( 0 )
# Boot the threads
self . _recv_thread = gevent . spawn ( self . _recv )
self . _send_thread = gevent . spawn ( self . _send )
# Link the threads such that we get notified if one or the
# other exits
self . _recv_thread . link ( self . _thread_error )
self . _send_thread . link ( self . _thread_error ) |
def transform ( self , path ) :
"""Transform a path into an actual Python object .
The path can be arbitrary long . You can pass the path to a package ,
a module , a class , a function or a global variable , as deep as you
want , as long as the deepest module is importable through
` ` importlib . import _ module ` ` and each object is obtainable through
the ` ` getattr ` ` method . Local objects will not work .
Args :
path ( str ) : the dot - separated path of the object .
Returns :
object : the imported module or obtained object .""" | if path is None or not path :
return None
obj_parent_modules = path . split ( "." )
objects = [ obj_parent_modules . pop ( - 1 ) ]
while True :
try :
parent_module_path = "." . join ( obj_parent_modules )
parent_module = importlib . import_module ( parent_module_path )
break
except ImportError :
if len ( obj_parent_modules ) == 1 :
raise ImportError ( "No module named '%s'" % obj_parent_modules [ 0 ] )
objects . insert ( 0 , obj_parent_modules . pop ( - 1 ) )
current_object = parent_module
for obj in objects :
current_object = getattr ( current_object , obj )
return current_object |
def send_invitation ( self , user , sender = None , ** kwargs ) :
"""An intermediary function for sending an invitation email that
selects the templates , generating the token , and ensuring that the user
has not already joined the site .""" | if user . is_active :
return False
token = self . get_token ( user )
kwargs . update ( { "token" : token } )
self . email_message ( user , self . invitation_subject , self . invitation_body , sender , ** kwargs ) . send ( )
return True |
def info ( self , id ) :
"""Return the ` ` Package ` ` or ` ` Collection ` ` record for the
given item .""" | # self . _ update _ index ( ) # This is commented because it leads to
# excessive network load
if id in self . _packages :
return self . _packages [ id ]
if id in self . _collections :
return self . _collections [ id ]
self . _update_index ( )
# If package is not found , most probably we did not
# warm up the cache
if id in self . _packages :
return self . _packages [ id ]
if id in self . _collections :
return self . _collections [ id ]
raise ValueError ( 'Package %r not found in index' % id ) |
def run ( coro , loop = None ) :
"""Convenient shortcut alias to ` ` loop . run _ until _ complete ` ` .
Arguments :
coro ( coroutine ) : coroutine object to schedule .
loop ( asyncio . BaseEventLoop ) : optional event loop to use .
Defaults to : ` ` asyncio . get _ event _ loop ( ) ` ` .
Returns :
mixed : returned value by coroutine .
Usage : :
async def mul _ 2 ( num ) :
return num * 2
paco . run ( mul _ 2(4 ) )""" | loop = loop or asyncio . get_event_loop ( )
return loop . run_until_complete ( coro ) |
def com_google_fonts_check_metadata_valid_full_name_values ( style , font_metadata , font_familynames , typographic_familynames ) :
"""METADATA . pb font . full _ name field contains font name in right format ?""" | from fontbakery . constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES :
familynames = font_familynames
if familynames == [ ] :
yield SKIP , "No FONT_FAMILYNAME"
else :
familynames = typographic_familynames
if familynames == [ ] :
yield SKIP , "No TYPOGRAPHIC_FAMILYNAME"
for font_familyname in familynames :
if font_familyname in font_metadata . full_name :
yield PASS , ( "METADATA.pb font.full_name field contains" " font name in right format." " ('{}' in '{}')" ) . format ( font_familyname , font_metadata . full_name )
else :
yield FAIL , ( "METADATA.pb font.full_name field (\"{}\")" " does not match correct font name format (\"{}\")." "" ) . format ( font_metadata . full_name , font_familyname ) |
def _add_endpoints_to_config ( self , config , co_name , backend_name ) :
"""Use the request path from the context to determine the target backend ,
then construct mappings from bindings to endpoints for the virtual
IdP for the CO .
The endpoint URLs have the form
{ base } / { backend } / { co _ name } / { path }
: type config : satosa . satosa _ config . SATOSAConfig
: type co _ name : str
: type backend _ name : str
: rtype : satosa . satosa _ config . SATOSAConfig
: param config : satosa proxy config
: param co _ name : CO name
: param backend _ name : The target backend name
: return : config with mappings for CO IdP""" | for service , endpoint in self . endpoints . items ( ) :
idp_endpoints = [ ]
for binding , path in endpoint . items ( ) :
url = "{base}/{backend}/{co_name}/{path}" . format ( base = self . base_url , backend = backend_name , co_name = quote_plus ( co_name ) , path = path )
mapping = ( url , binding )
idp_endpoints . append ( mapping )
# Overwrite the IdP config with the CO specific mappings between
# SAML binding and URL endpoints .
config [ "service" ] [ "idp" ] [ "endpoints" ] [ service ] = idp_endpoints
return config |
def advance ( parser ) : # type : ( Parser ) - > None
"""Moves the internal parser object to the next lexed token .""" | prev_end = parser . token . end
parser . prev_end = prev_end
parser . token = parser . lexer . next_token ( prev_end ) |
def Rsync ( url , tgt_name , tgt_root = None ) :
"""RSync a folder .
Args :
url ( str ) : The url of the SOURCE location .
fname ( str ) : The name of the TARGET .
to ( str ) : Path of the target location .
Defaults to ` ` CFG [ " tmpdir " ] ` ` .""" | if tgt_root is None :
tgt_root = str ( CFG [ "tmp_dir" ] )
from benchbuild . utils . cmd import rsync
tgt_dir = local . path ( tgt_root ) / tgt_name
if not source_required ( tgt_dir ) :
Copy ( tgt_dir , "." )
return
rsync ( "-a" , url , tgt_dir )
update_hash ( tgt_dir )
Copy ( tgt_dir , "." ) |
def init_app ( self , app , config_prefix = None ) :
"""Actual method to read redis settings from app configuration , initialize
Redis connection and copy all public connection methods to current
instance .
: param app : : class : ` flask . Flask ` application instance .
: param config _ prefix : Config prefix to use . By default : ` ` REDIS ` `""" | # Put redis to application extensions
if 'redis' not in app . extensions :
app . extensions [ 'redis' ] = { }
# Which config prefix to use , custom or default one ?
self . config_prefix = config_prefix = config_prefix or 'REDIS'
# No way to do registration two times
if config_prefix in app . extensions [ 'redis' ] :
raise ValueError ( 'Already registered config prefix {0!r}.' . format ( config_prefix ) )
# Start reading configuration , define converters to use and key func
# to prepend config prefix to key value
converters = { 'port' : int }
convert = lambda arg , value : ( converters [ arg ] ( value ) if arg in converters else value )
key = lambda param : '{0}_{1}' . format ( config_prefix , param )
# Which redis connection class to use ?
klass = app . config . get ( key ( 'CLASS' ) , RedisClass )
# Import connection class if it stil path notation
if isinstance ( klass , string_types ) :
klass = import_string ( klass )
# Should we use URL configuration
url = app . config . get ( key ( 'URL' ) )
# If should , parse URL and store values to application config to later
# reuse if necessary
if url :
urlparse . uses_netloc . append ( 'redis' )
url = urlparse . urlparse ( url )
# URL could contains host , port , user , password and db values
app . config [ key ( 'HOST' ) ] = url . hostname
app . config [ key ( 'PORT' ) ] = url . port or 6379
app . config [ key ( 'USER' ) ] = url . username
app . config [ key ( 'PASSWORD' ) ] = url . password
db = url . path . replace ( '/' , '' )
app . config [ key ( 'DB' ) ] = db if db . isdigit ( ) else None
# Host is not a mandatory key if you want to use connection pool . But
# when present and starts with file : / / or / use it as unix socket path
host = app . config . get ( key ( 'HOST' ) )
if host and ( host . startswith ( 'file://' ) or host . startswith ( '/' ) ) :
app . config . pop ( key ( 'HOST' ) )
app . config [ key ( 'UNIX_SOCKET_PATH' ) ] = host
args = self . _build_connection_args ( klass )
kwargs = dict ( [ ( arg , convert ( arg , app . config [ key ( arg . upper ( ) ) ] ) ) for arg in args if key ( arg . upper ( ) ) in app . config ] )
# Initialize connection and store it to extensions
connection = klass ( ** kwargs )
app . extensions [ 'redis' ] [ config_prefix ] = connection
# Include public methods to current instance
self . _include_public_methods ( connection ) |
def _load_secedit_data ( ) :
'''Helper function that loads secedit data . It runs ` secedit / export / cfg
< file _ name > ` which creates a file that contains the secedit data .
Returns :
str : The contents of the file generated by the secedit command''' | try :
f_exp = os . path . join ( __opts__ [ 'cachedir' ] , 'secedit-{0}.txt' . format ( UUID ) )
__salt__ [ 'cmd.run' ] ( [ 'secedit' , '/export' , '/cfg' , f_exp ] )
with io . open ( f_exp , encoding = 'utf-16' ) as fp :
secedit_data = fp . readlines ( )
return secedit_data
finally :
if __salt__ [ 'file.file_exists' ] ( f_exp ) :
__salt__ [ 'file.remove' ] ( f_exp ) |
def _bsd_brdel ( br ) :
'''Internal , deletes the bridge''' | ifconfig = _tool_path ( 'ifconfig' )
if not br :
return False
return __salt__ [ 'cmd.run' ] ( '{0} {1} destroy' . format ( ifconfig , br ) , python_shell = False ) |
def add_cnt_64bit ( self , oid , value , label = None ) :
"""Short helper to add a 64 bit counter value to the MIB subtree .""" | # Truncate integer to 64bits ma , x
self . add_oid_entry ( oid , 'Counter64' , int ( value ) % 18446744073709551615 , label = label ) |
def InsertFloatArg ( self , string = '' , ** unused_kwargs ) :
"""Inserts a Float argument .""" | try :
float_value = float ( string )
except ( TypeError , ValueError ) :
raise errors . ParseError ( '{0:s} is not a valid float.' . format ( string ) )
return self . InsertArg ( float_value ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.