signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def setup ( self , app ) :
"""Parse and prepare the plugin ' s configuration .""" | super ( ) . setup ( app )
self . enabled = len ( self . cfg . backends )
self . default = self . cfg . default
if not self . default and self . enabled :
self . default = self . cfg . backends [ 0 ] [ 0 ]
self . backends_hash = { name : parse . urlparse ( loc ) for ( name , loc ) in self . cfg . backends }
if self . default and self . default not in self . backends_hash :
raise PluginException ( 'Backend not found: %s' % self . default ) |
def get_size ( self ) :
"""Get size of this VideoFile in bytes
: return : size as integer""" | if self . _size is None :
self . _size = self . _filepath . stat ( ) . st_size
return self . _size |
def get_perturbed_indices ( self , tol = 1e-8 ) :
"""Gets indices of perturbed elements of the deformation gradient ,
i . e . those that differ from the identity""" | indices = list ( zip ( * np . where ( abs ( self - np . eye ( 3 ) ) > tol ) ) )
return indices |
def distance_to ( self , other_catchment ) :
"""Returns the distance between the centroids of two catchments in kilometers .
: param other _ catchment : Catchment to calculate distance to
: type other _ catchment : : class : ` . Catchment `
: return : Distance between the catchments in km .
: rtype : float""" | try :
if self . country == other_catchment . country :
try :
return 0.001 * hypot ( self . descriptors . centroid_ngr . x - other_catchment . descriptors . centroid_ngr . x , self . descriptors . centroid_ngr . y - other_catchment . descriptors . centroid_ngr . y )
except TypeError : # In case no centroid available , just return infinity which is helpful in most cases
return float ( '+inf' )
else : # If the catchments are in a different country ( e . g . ` ni ` versus ` gb ` ) then set distance to infinity .
return float ( '+inf' )
except ( TypeError , KeyError ) :
raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." ) |
def A_cylinder ( D , L ) :
r'''Returns the surface area of a cylinder .
. . math : :
A = \ pi D L + 2 \ cdot \ frac { \ pi D ^ 2 } { 4}
Parameters
D : float
Diameter of the cylinder , [ m ]
L : float
Length of the cylinder , [ m ]
Returns
A : float
Surface area [ m ]
Examples
> > > A _ cylinder ( 0.01 , . 1)
0.0032986722862692833''' | cap = pi * D ** 2 / 4 * 2
side = pi * D * L
A = cap + side
return A |
def create_app ( config_name ) :
"""Factory Function""" | app = Flask ( __name__ )
app . config . from_object ( CONFIG [ config_name ] )
BOOTSTRAP . init_app ( app )
# call controllers
from flask_seguro . controllers . main import main as main_blueprint
app . register_blueprint ( main_blueprint )
return app |
def pivot ( self , columns , rows , values = None , collect = None , zero = None ) :
"""Generate a table with a column for each unique value in ` ` columns ` ` ,
with rows for each unique value in ` ` rows ` ` . Each row counts / aggregates
the values that match both row and column based on ` ` collect ` ` .
Args :
` ` columns ` ` - - a single column label or index , ( ` ` str ` ` or ` ` int ` ` ) ,
used to create new columns , based on its unique values .
` ` rows ` ` - - row labels or indices , ( ` ` str ` ` or ` ` int ` ` or list ) ,
used to create new rows based on it ' s unique values .
` ` values ` ` - - column label in table for use in aggregation .
Default None .
` ` collect ` ` - - aggregation function , used to group ` ` values ` `
over row - column combinations . Default None .
` ` zero ` ` - - zero value for non - existent row - column combinations .
Raises :
TypeError - - if ` ` collect ` ` is passed in and ` ` values ` ` is not ,
vice versa .
Returns :
New pivot table , with row - column combinations , as specified , with
aggregated ` ` values ` ` by ` ` collect ` ` across the intersection of
` ` columns ` ` and ` ` rows ` ` . Simple counts provided if values and
collect are None , as default .
> > > titanic = Table ( ) . with _ columns ( ' age ' , make _ array ( 21 , 44 , 56 , 89 , 95
. . . , 40 , 80 , 45 ) , ' survival ' , make _ array ( 0,0,0,1 , 1 , 1 , 0 , 1 ) ,
. . . ' gender ' , make _ array ( ' M ' , ' M ' , ' M ' , ' M ' , ' F ' , ' F ' , ' F ' , ' F ' ) ,
. . . ' prediction ' , make _ array ( 0 , 0 , 1 , 1 , 0 , 1 , 0 , 1 ) )
> > > titanic
age | survival | gender | prediction
21 | 0 | M | 0
44 | 0 | M | 0
56 | 0 | M | 1
89 | 1 | M | 1
95 | 1 | F | 0
40 | 1 | F | 1
80 | 0 | F | 0
45 | 1 | F | 1
> > > titanic . pivot ( ' survival ' , ' gender ' )
gender | 0 | 1
F | 1 | 3
M | 3 | 1
> > > titanic . pivot ( ' prediction ' , ' gender ' )
gender | 0 | 1
F | 2 | 2
M | 2 | 2
> > > titanic . pivot ( ' survival ' , ' gender ' , values = ' age ' , collect = np . mean )
gender | 0 | 1
F | 80 | 60
M | 40.3333 | 89
> > > titanic . pivot ( ' survival ' , make _ array ( ' prediction ' , ' gender ' ) )
prediction | gender | 0 | 1
0 | F | 1 | 1
0 | M | 2 | 0
1 | F | 0 | 2
1 | M | 1 | 1
> > > titanic . pivot ( ' survival ' , ' gender ' , values = ' age ' )
Traceback ( most recent call last ) :
TypeError : values requires collect to be specified
> > > titanic . pivot ( ' survival ' , ' gender ' , collect = np . mean )
Traceback ( most recent call last ) :
TypeError : collect requires values to be specified""" | if collect is not None and values is None :
raise TypeError ( 'collect requires values to be specified' )
if values is not None and collect is None :
raise TypeError ( 'values requires collect to be specified' )
columns = self . _as_label ( columns )
rows = self . _as_labels ( rows )
if values is None :
selected = self . select ( [ columns ] + rows )
else :
selected = self . select ( [ columns , values ] + rows )
grouped = selected . groups ( [ columns ] + rows , collect )
# Generate existing combinations of values from columns in rows
rows_values = sorted ( list ( set ( self . select ( rows ) . rows ) ) )
pivoted = type ( self ) ( rows ) . with_rows ( rows_values )
# Generate other columns and add them to pivoted
by_columns = grouped . index_by ( columns )
for label in sorted ( by_columns ) :
tuples = [ t [ 1 : ] for t in by_columns [ label ] ]
# Discard column value
column = _fill_with_zeros ( rows_values , tuples , zero )
pivot = self . _unused_label ( str ( label ) )
pivoted [ pivot ] = column
return pivoted |
def pad_img ( im , pad ) :
"""Pad positively with 0 or negatively ( cut )
Parameters
im : 2d array
The image
pad : 4 numbers
( ytop , ybottom , xleft , xright ) or ( imin , imax , jmin , jmax )
Returns
im : 2d array
The padded ( or cropped ) image
offset : 2 numbers
The offset related to the input image
Notes
This changes the size of the image""" | im = np . asarray ( im )
pad = np . asarray ( pad )
# get shape
shape = im . shape
# extract offset from padding
offset = - pad [ : : 2 ]
# if the padding is negatif , cut the matrix
cut = pad < 0
if cut . any ( ) : # Extract value for pad
cut *= pad
# the left / top components should be positive
cut [ : : 2 ] *= - 1
# The right / bottom components can ' t be 0 , replace by shape0
cut [ 1 : : 2 ] += ( cut [ 1 : : 2 ] == 0 ) * shape
# cut the image
im = im [ cut [ 0 ] : cut [ 1 ] , cut [ 2 ] : cut [ 3 ] ]
# extract positive padding
ppad = pad > 0
if ppad . any ( ) :
pad = pad * ppad
# separate pad for application on matrix
ypad = ( pad [ 0 ] , pad [ 1 ] )
xpad = ( pad [ 2 ] , pad [ 3 ] )
# prepare matrix
im = np . lib . pad ( im , ( ypad , xpad ) , mode = 'mean' )
return im , offset |
def div_safe ( numerator , denominator ) :
"""Ufunc - extension that returns 0 instead of nan when dividing numpy arrays
Parameters
numerator : array - like
denominator : scalar or array - like that can be validly divided by the numerator
returns a numpy array
example : div _ safe ( [ - 1 , 0 , 1 ] , 0 ) = = [ 0 , 0 , 0]""" | # First handle scalars
if np . isscalar ( numerator ) :
raise ValueError ( "div_safe should only be used with an array-like numerator" )
# Then numpy arrays
try :
with np . errstate ( divide = 'ignore' , invalid = 'ignore' ) :
result = np . true_divide ( numerator , denominator )
result [ ~ np . isfinite ( result ) ] = 0
# - inf inf NaN
return result
except ValueError as e :
raise e |
def download_manylinux_wheels ( self , abi , packages , directory ) : # type : ( str , List [ str ] , str ) - > None
"""Download wheel files for manylinux for all the given packages .""" | # If any one of these dependencies fails pip will bail out . Since we
# are only interested in all the ones we can download , we need to feed
# each package to pip individually . The return code of pip doesn ' t
# matter here since we will inspect the working directory to see which
# wheels were downloaded . We are only interested in wheel files
# compatible with lambda , which means manylinux1 _ x86_64 platform and
# cpython implementation . The compatible abi depends on the python
# version and is checked later .
for package in packages :
arguments = [ '--only-binary=:all:' , '--no-deps' , '--platform' , 'manylinux1_x86_64' , '--implementation' , 'cp' , '--abi' , abi , '--dest' , directory , package ]
self . _execute ( 'download' , arguments ) |
def get_hotp ( secret , intervals_no , as_string = False , casefold = True , digest_method = hashlib . sha1 , token_length = 6 , ) :
"""Get HMAC - based one - time password on the basis of given secret and
interval number .
: param secret : the base32 - encoded string acting as secret key
: type secret : str or unicode
: param intervals _ no : interval number used for getting different tokens , it
is incremented with each use
: type intervals _ no : int
: param as _ string : True if result should be padded string , False otherwise
: type as _ string : bool
: param casefold : True ( default ) , if should accept also lowercase alphabet
: type casefold : bool
: param digest _ method : method of generating digest ( hashlib . sha1 by default )
: type digest _ method : callable
: param token _ length : length of the token ( 6 by default )
: type token _ length : int
: return : generated HOTP token
: rtype : int or str
> > > get _ hotp ( b ' MFRGGZDFMZTWQ2LK ' , intervals _ no = 1)
765705
> > > get _ hotp ( b ' MFRGGZDFMZTWQ2LK ' , intervals _ no = 2)
816065
> > > result = get _ hotp ( b ' MFRGGZDFMZTWQ2LK ' , intervals _ no = 2 , as _ string = True )
> > > result = = b ' 816065'
True""" | if isinstance ( secret , six . string_types ) : # It is unicode , convert it to bytes
secret = secret . encode ( 'utf-8' )
# Get rid of all the spacing :
secret = secret . replace ( b' ' , b'' )
try :
key = base64 . b32decode ( secret , casefold = casefold )
except ( TypeError ) :
raise TypeError ( 'Incorrect secret' )
msg = struct . pack ( '>Q' , intervals_no )
hmac_digest = hmac . new ( key , msg , digest_method ) . digest ( )
ob = hmac_digest [ 19 ] if six . PY3 else ord ( hmac_digest [ 19 ] )
o = ob & 15
token_base = struct . unpack ( '>I' , hmac_digest [ o : o + 4 ] ) [ 0 ] & 0x7fffffff
token = token_base % ( 10 ** token_length )
if as_string : # TODO : should as _ string = True return unicode , not bytes ?
return six . b ( '{{:0{}d}}' . format ( token_length ) . format ( token ) )
else :
return token |
def traverse_nodes ( self , qids , up = True , down = False , ** args ) :
"""Traverse ( optionally ) up and ( optionally ) down from an input set of nodes
Arguments
qids : list [ str ]
list of seed node IDs to start from
up : bool
if True , include ancestors
down : bool
if True , include descendants
relations : list [ str ]
list of relations used to filter
Return
list [ str ]
nodes reachable from qids""" | g = self . get_filtered_graph ( ** args )
nodes = set ( )
for id in qids : # reflexive - always add self
nodes . add ( id )
if down :
nodes . update ( nx . descendants ( g , id ) )
if up :
nodes . update ( nx . ancestors ( g , id ) )
return nodes |
def _adjust_scrollbar ( self , f ) :
"""Adjust the scrollbar position to take into account the zooming of
the figure .""" | # Adjust horizontal scrollbar :
hb = self . horizontalScrollBar ( )
hb . setValue ( int ( f * hb . value ( ) + ( ( f - 1 ) * hb . pageStep ( ) / 2 ) ) )
# Adjust the vertical scrollbar :
vb = self . verticalScrollBar ( )
vb . setValue ( int ( f * vb . value ( ) + ( ( f - 1 ) * vb . pageStep ( ) / 2 ) ) ) |
def _calc_starts ( dims ) :
"""Calculate starting indexes
Parameters
dims : list of list of int
from ( via cython conversion ) vector [ vector [ uint ] ] dims
Examples
> > > _ calc _ starts ( [ [ 8 , 2 ] , [ 5 ] , [ 6 , 2 ] ] )
[0 , 16 , 21]""" | # NB : Python uses 0 - indexing ; R uses 1 - indexing .
l = len ( dims )
s = [ np . prod ( d ) for d in dims ]
starts = np . cumsum ( [ 0 ] + s ) [ 0 : l ] . tolist ( )
# coerce things into ints before returning
return [ int ( i ) for i in starts ] |
def roleCreated ( self , * args , ** kwargs ) :
"""Role Created Messages
Message that a new role has been created .
This exchange outputs : ` ` v1 / role - message . json # ` ` This exchange takes the following keys :
* reserved : Space reserved for future routing - key entries , you should always match this entry with ` # ` . As automatically done by our tooling , if not specified .""" | ref = { 'exchange' : 'role-created' , 'name' : 'roleCreated' , 'routingKey' : [ { 'multipleWords' : True , 'name' : 'reserved' , } , ] , 'schema' : 'v1/role-message.json#' , }
return self . _makeTopicExchange ( ref , * args , ** kwargs ) |
def read_examples ( input_files , batch_size , shuffle , num_epochs = None ) :
"""Creates readers and queues for reading example protos .""" | files = [ ]
for e in input_files :
for path in e . split ( ',' ) :
files . extend ( file_io . get_matching_files ( path ) )
thread_count = multiprocessing . cpu_count ( )
# The minimum number of instances in a queue from which examples are drawn
# randomly . The larger this number , the more randomness at the expense of
# higher memory requirements .
min_after_dequeue = 1000
# When batching data , the queue ' s capacity will be larger than the batch _ size
# by some factor . The recommended formula is ( num _ threads + a small safety
# margin ) . For now , we use a single thread for reading , so this can be small .
queue_size_multiplier = thread_count + 3
# Convert num _ epochs = = 0 - > num _ epochs is None , if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read .
filename_queue = tf . train . string_input_producer ( files , num_epochs , shuffle )
example_id , encoded_example = tf . TextLineReader ( ) . read_up_to ( filename_queue , batch_size )
if shuffle :
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf . train . shuffle_batch ( [ example_id , encoded_example ] , batch_size , capacity , min_after_dequeue , enqueue_many = True , num_threads = thread_count )
else :
capacity = queue_size_multiplier * batch_size
return tf . train . batch ( [ example_id , encoded_example ] , batch_size , capacity = capacity , enqueue_many = True , num_threads = thread_count ) |
def _process_state_change_events ( ) :
"""Process events relating to the overall state of SDP .
This function starts and event loop which continually checks for
and responds to SDP state change events .""" | sdp_state = SDPState ( )
service_states = get_service_state_list ( )
state_events = sdp_state . get_event_queue ( subscriber = __service_name__ )
state_is_off = sdp_state . current_state == 'off'
counter = 0
while True :
time . sleep ( 0.1 )
if not state_is_off : # * Hack * to avoid problems with historical events not being
# correctly handled by EventQueue . get ( ) , replay old events every
# 10s
# - see issue # 54
if counter % 1000 == 0 :
LOG . debug ( 'Checking published events ... %d' , counter / 1000 )
_published_events = state_events . get_published_events ( process = True )
for _state_event in _published_events :
_process_event ( _state_event , sdp_state , service_states )
else :
_state_event = state_events . get ( )
if _state_event :
_process_event ( _state_event , sdp_state , service_states )
state_is_off = sdp_state . current_state == 'off'
counter += 1 |
def double_typos ( self ) :
"""letter combinations two typos away from word""" | return { e2 for e1 in self . typos ( ) for e2 in Word ( e1 ) . typos ( ) } |
def x509_name ( name ) :
"""Parses a subject into a : py : class : ` x509 . Name < cg : cryptography . x509 . Name > ` .
If ` ` name ` ` is a string , : py : func : ` parse _ name ` is used to parse it .
> > > x509 _ name ( ' / C = AT / CN = example . com ' )
< Name ( C = AT , CN = example . com ) >
> > > x509 _ name ( [ ( ' C ' , ' AT ' ) , ( ' CN ' , ' example . com ' ) ] )
< Name ( C = AT , CN = example . com ) >""" | if isinstance ( name , six . string_types ) :
name = parse_name ( name )
return x509 . Name ( [ x509 . NameAttribute ( NAME_OID_MAPPINGS [ typ ] , force_text ( value ) ) for typ , value in name ] ) |
def _GetStat ( self ) :
"""Retrieves information about the file entry .
Returns :
VFSStat : a stat object .""" | stat_object = super ( NTFSFileEntry , self ) . _GetStat ( )
# File data stat information .
if self . _fsntfs_file_entry . has_default_data_stream ( ) :
stat_object . size = self . _fsntfs_file_entry . get_size ( )
# Ownership and permissions stat information .
# TODO : stat _ object . mode
# TODO : stat _ object . uid
# TODO : stat _ object . gid
# File entry type stat information .
if self . _IsLink ( self . _fsntfs_file_entry . file_attribute_flags ) :
stat_object . type = stat_object . TYPE_LINK
elif self . _fsntfs_file_entry . has_directory_entries_index ( ) :
stat_object . type = stat_object . TYPE_DIRECTORY
else :
stat_object . type = stat_object . TYPE_FILE
# Other stat information .
file_reference = self . _fsntfs_file_entry . file_reference
stat_object . ino = file_reference & _FILE_REFERENCE_MFT_ENTRY_BITMASK
stat_object . fs_type = 'NTFS'
stat_object . is_allocated = self . _fsntfs_file_entry . is_allocated ( )
return stat_object |
def _data_augmentation ( self , data , label ) :
"""perform data augmentations : crop , mirror , resize , sub mean , swap channels . . .""" | if self . is_train and self . _rand_samplers :
rand_crops = [ ]
for rs in self . _rand_samplers :
rand_crops += rs . sample ( label )
num_rand_crops = len ( rand_crops )
# randomly pick up one as input data
if num_rand_crops > 0 :
index = int ( np . random . uniform ( 0 , 1 ) * num_rand_crops )
width = data . shape [ 1 ]
height = data . shape [ 0 ]
crop = rand_crops [ index ] [ 0 ]
xmin = int ( crop [ 0 ] * width )
ymin = int ( crop [ 1 ] * height )
xmax = int ( crop [ 2 ] * width )
ymax = int ( crop [ 3 ] * height )
if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height :
data = mx . img . fixed_crop ( data , xmin , ymin , xmax - xmin , ymax - ymin )
else : # padding mode
new_width = xmax - xmin
new_height = ymax - ymin
offset_x = 0 - xmin
offset_y = 0 - ymin
data_bak = data
data = mx . nd . full ( ( new_height , new_width , 3 ) , 128 , dtype = 'uint8' )
data [ offset_y : offset_y + height , offset_x : offset_x + width , : ] = data_bak
label = rand_crops [ index ] [ 1 ]
if self . is_train :
interp_methods = [ cv2 . INTER_LINEAR , cv2 . INTER_CUBIC , cv2 . INTER_AREA , cv2 . INTER_NEAREST , cv2 . INTER_LANCZOS4 ]
else :
interp_methods = [ cv2 . INTER_LINEAR ]
interp_method = interp_methods [ int ( np . random . uniform ( 0 , 1 ) * len ( interp_methods ) ) ]
data = mx . img . imresize ( data , self . _data_shape [ 1 ] , self . _data_shape [ 0 ] , interp_method )
if self . is_train and self . _rand_mirror :
if np . random . uniform ( 0 , 1 ) > 0.5 :
data = mx . nd . flip ( data , axis = 1 )
valid_mask = np . where ( label [ : , 0 ] > - 1 ) [ 0 ]
tmp = 1.0 - label [ valid_mask , 1 ]
label [ valid_mask , 1 ] = 1.0 - label [ valid_mask , 3 ]
label [ valid_mask , 3 ] = tmp
data = mx . nd . transpose ( data , ( 2 , 0 , 1 ) )
data = data . astype ( 'float32' )
data = data - self . _mean_pixels
return data , label |
def create_client ( self , only_db = False ) :
"""返回连接的客户端""" | # database = parse _ uri ( self . uri ) . get ( " database " )
if self . ioloop :
if only_db == False :
client = AsyncIOMotorClient ( "/" . join ( self . uri . split ( "/" ) [ : - 1 ] ) , io_loop = self . ioloop )
else :
client = AsyncIOMotorClient ( self . uri , io_loop = self . ioloop )
else :
if only_db == False :
client = AsyncIOMotorClient ( "/" . join ( self . uri . split ( "/" ) [ : - 1 ] ) )
else :
client = AsyncIOMotorClient ( self . uri )
return client |
def calculate_sum_to_n ( n : int ) -> int :
"""Sums all integers from 1 to n .
Parameters :
n ( int ) : The range ' s upper limit for the summation .
Returns :
int : The result of the summation from 1 to n .
Examples :
> > > calculate _ sum _ to _ n ( 30)
465
> > > calculate _ sum _ to _ n ( 100)
5050
> > > calculate _ sum _ to _ n ( 5)
15
> > > calculate _ sum _ to _ n ( 10)
55
> > > calculate _ sum _ to _ n ( 1)""" | return ( 1 + n ) * n // 2 |
def build_model ( hparams_set , model_name , data_dir , problem_name , beam_size = 1 ) :
"""Build the graph required to fetch the attention weights .
Args :
hparams _ set : HParams set to build the model with .
model _ name : Name of model .
data _ dir : Path to directory containing training data .
problem _ name : Name of problem .
beam _ size : ( Optional ) Number of beams to use when decoding a translation .
If set to 1 ( default ) then greedy decoding is used .
Returns :
Tuple of (
inputs : Input placeholder to feed in ids to be translated .
targets : Targets placeholder to feed to translation when fetching
attention weights .
samples : Tensor representing the ids of the translation .
att _ mats : Tensors representing the attention weights .""" | hparams = trainer_lib . create_hparams ( hparams_set , data_dir = data_dir , problem_name = problem_name )
translate_model = registry . model ( model_name ) ( hparams , tf . estimator . ModeKeys . EVAL )
inputs = tf . placeholder ( tf . int32 , shape = ( 1 , None , 1 , 1 ) , name = "inputs" )
targets = tf . placeholder ( tf . int32 , shape = ( 1 , None , 1 , 1 ) , name = "targets" )
translate_model ( { "inputs" : inputs , "targets" : targets , } )
# Must be called after building the training graph , so that the dict will
# have been filled with the attention tensors . BUT before creating the
# inference graph otherwise the dict will be filled with tensors from
# inside a tf . while _ loop from decoding and are marked unfetchable .
att_mats = get_att_mats ( translate_model )
with tf . variable_scope ( tf . get_variable_scope ( ) , reuse = True ) :
samples = translate_model . infer ( { "inputs" : inputs , } , beam_size = beam_size ) [ "outputs" ]
return inputs , targets , samples , att_mats |
def decode_conjure_union_type ( cls , obj , conjure_type ) :
"""Decodes json into a conjure union type .
Args :
obj : the json object to decode
conjure _ type : a class object which is the union type
we ' re decoding into
Returns :
An instance of type conjure _ type .""" | type_of_union = obj [ "type" ]
# type : str
for attr , conjure_field in conjure_type . _options ( ) . items ( ) :
if conjure_field . identifier == type_of_union :
attribute = attr
conjure_field_definition = conjure_field
break
else :
raise ValueError ( "unknown union type {0} for {1}" . format ( type_of_union , conjure_type ) )
deserialized = { }
# type : Dict [ str , Any ]
if type_of_union not in obj or obj [ type_of_union ] is None :
cls . check_null_field ( obj , deserialized , conjure_field_definition )
else :
value = obj [ type_of_union ]
field_type = conjure_field_definition . field_type
deserialized [ attribute ] = cls . do_decode ( value , field_type )
return conjure_type ( ** deserialized ) |
def list_actions ( self ) :
"""Returns the registered actions .
: return : Actions list .
: rtype : list""" | actions = [ ]
for path , actionName , action in self :
actions . append ( self . __namespace_splitter . join ( itertools . chain ( path , ( actionName , ) ) ) )
return sorted ( actions ) |
def PopAttributeContainer ( self ) :
"""Pops a serialized attribute container from the list .
Returns :
bytes : serialized attribute container data .""" | try :
serialized_data = self . _list . pop ( 0 )
self . data_size -= len ( serialized_data )
return serialized_data
except IndexError :
return None |
def request_get_next ( request , default_next ) :
"""get next url form request
order : POST . next GET . next HTTP _ REFERER , default _ next""" | next_url = request . POST . get ( 'next' ) or request . GET . get ( 'next' ) or request . META . get ( 'HTTP_REFERER' ) or default_next
return next_url |
def _process_tz ( self , dt , naive , tz ) :
"""Process timezone casting and conversion .""" | def _tz ( t ) :
if t in ( None , 'naive' ) :
return t
if t == 'local' :
if __debug__ and not localtz :
raise ValueError ( "Requested conversion to local timezone, but `localtz` not installed." )
t = localtz
if not isinstance ( t , tzinfo ) :
if __debug__ and not localtz :
raise ValueError ( "The `pytz` package must be installed to look up timezone: " + repr ( t ) )
t = get_tz ( t )
if not hasattr ( t , 'normalize' ) and get_tz : # Attempt to handle non - pytz tzinfo .
t = get_tz ( t . tzname ( dt ) )
return t
naive = _tz ( naive )
tz = _tz ( tz )
if not dt . tzinfo and naive :
if hasattr ( naive , 'localize' ) :
dt = naive . localize ( dt )
else :
dt = dt . replace ( tzinfo = naive )
if not tz :
return dt
if hasattr ( tz , 'normalize' ) :
dt = tz . normalize ( dt . astimezone ( tz ) )
elif tz == 'naive' :
dt = dt . replace ( tzinfo = None )
else :
dt = dt . astimezone ( tz )
# Warning : this might not always be entirely correct !
return dt |
def rel_humid_from_db_wb ( db_temp , wet_bulb , b_press = 101325 ) :
"""Relative Humidity ( % ) at db _ temp ( C ) , wet _ bulb ( C ) , and Pressure b _ press ( Pa ) .""" | # Calculate saturation pressure .
p_ws = saturated_vapor_pressure ( db_temp + 273.15 )
p_ws_wb = saturated_vapor_pressure ( wet_bulb + 273.15 )
# calculate partial vapor pressure
p_w = p_ws_wb - ( b_press * 0.000662 * ( db_temp - wet_bulb ) )
# Calculate the relative humidity .
rel_humid = ( p_w / p_ws ) * 100
return rel_humid |
def format_sympy_expr ( sympy_expr , functions = None ) :
"""Convert sympy expression into a string which can be encoded .
Args :
sympy _ expr : Any sympy expression tree or string .
functions : Defines special functions . A dict mapping human readable string
names , like " log " , " exp " , " sin " , " cos " , etc . , to single chars . Each
function gets a unique token , like " L " for " log " .
Returns :
A string representation of the expression suitable for encoding as a
sequence input .""" | if functions is None :
functions = { }
str_expr = str ( sympy_expr )
result = str_expr . replace ( " " , "" )
for fn_name , char in six . iteritems ( functions ) :
result = result . replace ( fn_name , char )
return result |
def format_pair ( input_pair ) :
"""Formats input to conform with kraken pair format . The API expects one of
two formats :
XBTXLT
or
XXBTXLTC
Where crypto currencies have an X prepended , and fiat currencies have
a Z prepended . Since the API returns the 8 character format , that ' s what
we will format into as well .
We expect 6 or 8 character strings , but do not explicitly check for it .
Should the string be of uneven length , we ' ll split the pair in the middle
like so :
BTC - LTC - > BTC , LTC .
Furthermore , since Kraken uses ' XBT ' as Bitcoins symbol , we look for , and
replace occurrences of ' btc ' with ' XBT ' .
In addition there are some exceptions from this rules . Kraken is using
the pairs ' BCHEUR ' , ' BCHUSD ' , ' BCHXBT ' , ' DASHEUR ' , ' DASHUSD ' , ' DASHXBT ' ,
' EOSETH ' , ' EOSXBT ' , ' GNOETH ' , ' GNOXBT ' and ' USDTZUSD ' as they are .
If the input matches one of this pairs , we just return the uppercase
representation of it .
: param input _ pair : str
: return : str""" | # There are some exceptions from the general formatting rule
# see https : / / api . kraken . com / 0 / public / AssetPairs
format_exceptions = [ 'BCHEUR' , 'BCHUSD' , 'BCHXBT' , 'DASHEUR' , 'DASHUSD' , 'DASHXBT' , 'EOSETH' , 'EOSXBT' , 'GNOETH' , 'GNOXBT' , 'USDTZUSD' ]
if input_pair . upper ( ) in format_exceptions :
return input_pair . upper ( )
if len ( input_pair ) % 2 == 0 :
base_cur , quote_cur = input_pair [ : len ( input_pair ) // 2 ] , input_pair [ len ( input_pair ) // 2 : ]
else :
base_cur , quote_cur = input_pair . split ( input_pair [ len ( input_pair ) // 2 ] )
def add_prefix ( input_string ) :
input_string = input_string . lower ( )
if any ( x in input_string for x in [ 'usd' , 'eur' , 'jpy' , 'gbp' , 'cad' ] ) : # appears to be fiat currency
if not input_string . startswith ( 'z' ) :
input_string = 'z' + input_string
else : # Appears to be Crypto currency
if 'btc' in input_string :
input_string = input_string . replace ( 'btc' , 'xbt' )
if not input_string . startswith ( 'x' ) or len ( input_string ) == 3 :
input_string = 'x' + input_string
return input_string
base_cur = add_prefix ( base_cur )
quote_cur = add_prefix ( quote_cur )
return ( base_cur + quote_cur ) . upper ( ) |
def getTickTock ( self , vals ) :
'''Get a tick , tock time pair .
Args :
vals ( list ) : A pair of values to norm .
Returns :
( int , int ) : A ordered pair of integers .''' | val0 , val1 = vals
try :
_tick = self . _getLiftValu ( val0 )
except ValueError as e :
raise s_exc . BadTypeValu ( name = self . name , valu = val0 , mesg = 'Unable to process the value for val0 in getTickTock.' )
sortval = False
if isinstance ( val1 , str ) :
if val1 . startswith ( ( '+-' , '-+' ) ) :
sortval = True
delt = s_time . delta ( val1 [ 2 : ] )
# order matters
_tock = _tick + delt
_tick = _tick - delt
elif val1 . startswith ( '-' ) :
sortval = True
_tock = self . _getLiftValu ( val1 , relto = _tick )
else :
_tock = self . _getLiftValu ( val1 , relto = _tick )
else :
_tock = self . _getLiftValu ( val1 , relto = _tick )
if sortval and _tick >= _tock :
tick = min ( _tick , _tock )
tock = max ( _tick , _tock )
return tick , tock
return _tick , _tock |
def port_provisioned ( port_id ) :
"""Returns true if port still exists .""" | session = db . get_reader_session ( )
with session . begin ( ) :
port_model = models_v2 . Port
res = bool ( session . query ( port_model ) . filter ( port_model . id == port_id ) . count ( ) )
return res |
def display_context ( doc ) :
"""Create a Jinja context for display""" | from rowgenerators . exceptions import DownloadError
context = { s . name . lower ( ) : s . as_dict ( ) for s in doc if s . name . lower ( ) != 'schema' }
# import json
# print ( json . dumps ( context , indent = 4 ) )
mandatory_sections = [ 'documentation' , 'contacts' ]
# Remove section names
deletes = [ ]
for k , v in context . items ( ) :
try :
del v [ '@value' ]
except KeyError :
pass
# Doesn ' t have the value
except TypeError : # Is actually completely empty , and has a scalar value . Delete and re - create
deletes . append ( k )
if isinstance ( v , str ) : # Shouldn ' t ever happen , but who knows ?
deletes . append ( k )
for d in deletes :
try :
del context [ d ]
except KeyError : # Fails in TravisCI , no idea why .
pass
for ms in mandatory_sections :
if not ms in context :
context [ ms ] = { }
# Load inline documentation
inline = ''
for d in context . get ( 'documentation' , { } ) . get ( 'documentation' , [ ] ) :
u = parse_app_url ( d [ 'url' ] )
if u . target_format == 'md' : # The README . md file
inline = ''
if u . proto == 'file' : # File really ought to be relative
t = doc . package_url . join_target ( u ) . get_resource ( ) . get_target ( )
else :
try :
t = u . get_resource ( ) . get_target ( )
except DownloadError as e :
raise e
try :
with open ( t . fspath ) as f :
inline += f . read ( )
except FileNotFoundError :
pass
del d [ 'title' ]
# Will cause it to be ignored in next section
# Strip off the leading title , if it exists , because it will be re - applied
# by the templates
import re
lines = inline . strip ( ) . splitlines ( )
if lines and lines [ 0 ] . startswith ( '# ' ) :
lines = lines [ 1 : ]
context [ 'inline_doc' ] = '\n' . join ( lines )
# Convert doc section
doc_links = { }
images = { }
for term_name , terms in context [ 'documentation' ] . items ( ) :
if term_name == 'note' :
context [ 'notes' ] = terms
else :
for i , term in enumerate ( terms ) :
try :
if term_name == 'image' :
images [ term [ 'title' ] ] = term
else :
doc_links [ term [ 'title' ] ] = term
except AttributeError : # A scalar
pass
# There should not be any scalars in the documentation section
except KeyError :
pass
# ignore entries without titles
except TypeError :
pass
# Also probably a ascalar
context [ 'doc_links' ] = doc_links
context [ 'images' ] = images
del context [ 'documentation' ]
# Update contacts
origin = None
for term_name , terms in context [ 'contacts' ] . items ( ) :
if isinstance ( terms , dict ) :
origin = terms
# Origin is a scalar in roort , must be converted to sequence here
else :
for t in terms :
try :
t . update ( process_contact ( t ) )
except AttributeError :
pass
# Probably got a scalar
if origin :
origin . update ( process_contact ( origin ) )
context [ 'contacts' ] [ 'origin' ] = [ origin ]
# For resources and references , convert scalars into lists of dicts , which are the
# default for Datafiles and References .
for section in ( 'references' , 'resources' ) :
for term_key , term_vals in context . get ( section , { } ) . items ( ) :
if isinstance ( term_vals , dict ) :
if '@value' in term_vals :
term_vals [ 'url' ] = term_vals [ '@value' ]
del term_vals [ '@value' ]
new_term_vals = [ term_vals ]
elif isinstance ( term_vals , list ) :
new_term_vals = None
else :
new_term_vals = [ { 'url' : term_vals , 'name' : term_vals } ]
if new_term_vals :
context [ section ] [ term_key ] = new_term_vals
context [ 'distributions' ] = { }
for dist in doc . find ( 'Root.Distribution' ) :
context [ 'distributions' ] [ dist . type ] = dist . value
if doc . find ( 'Root.Giturl' ) :
context [ 'distributions' ] [ 'source' ] = doc . get_value ( 'Root.Giturl' )
return context |
def evaluateRforces ( Pot , R , z , phi = None , t = 0. , v = None ) :
"""NAME :
evaluateRforce
PURPOSE :
convenience function to evaluate a possible sum of potentials
INPUT :
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance ( can be Quantity )
z - distance above the plane ( can be Quantity )
phi - azimuth ( optional ; can be Quantity ) )
t - time ( optional ; can be Quantity )
v - current velocity in cylindrical coordinates ( optional , but required when including dissipative forces ; can be a Quantity )
OUTPUT :
F _ R ( R , z , phi , t )
HISTORY :
2010-04-16 - Written - Bovy ( NYU )
2018-03-16 - Added velocity input for dissipative forces - Bovy ( UofT )""" | return _evaluateRforces ( Pot , R , z , phi = phi , t = t , v = v ) |
def _get_scalar_names ( self , limit = None ) :
"""Only give scalar options that have a varying range""" | names = [ ]
if limit == 'point' :
inpnames = list ( self . input_dataset . point_arrays . keys ( ) )
elif limit == 'cell' :
inpnames = list ( self . input_dataset . cell_arrays . keys ( ) )
else :
inpnames = self . input_dataset . scalar_names
for name in inpnames :
arr = self . input_dataset . get_scalar ( name )
rng = self . input_dataset . get_data_range ( name )
if arr is not None and arr . size > 0 and ( rng [ 1 ] - rng [ 0 ] > 0.0 ) :
names . append ( name )
try :
self . _last_scalars = names [ 0 ]
except IndexError :
pass
return names |
def download_data ( request_list , redownload = False , max_threads = None ) :
"""Download all requested data or read data from disk , if already downloaded and available and redownload is
not required .
: param request _ list : list of DownloadRequests
: type request _ list : list of DownloadRequests
: param redownload : if ` ` True ` ` , download again the data , although it was already downloaded and is available
on the disk . Default is ` ` False ` ` .
: type redownload : bool
: param max _ threads : number of threads to use when downloading data ; default is ` ` max _ threads = None ` ` which
by default uses the number of processors on the system
: type max _ threads : int
: return : list of Futures holding downloaded data , where each element in the list corresponds to an element
in the download request list .
: rtype : list [ concurrent . futures . Future ]""" | _check_if_must_download ( request_list , redownload )
LOGGER . debug ( "Using max_threads=%s for %s requests" , max_threads , len ( request_list ) )
with concurrent . futures . ThreadPoolExecutor ( max_workers = max_threads ) as executor :
return [ executor . submit ( execute_download_request , request ) for request in request_list ] |
def loadfile ( method = True , writable = False , create = False ) :
"""A decorator for functions taking a ` filething ` as a first argument .
Passes a FileThing instance as the first argument to the wrapped function .
Args :
method ( bool ) : If the wrapped functions is a method
writable ( bool ) : If a filename is passed opens the file readwrite , if
passed a file object verifies that it is writable .
create ( bool ) : If passed a filename that does not exist will create
a new empty file .""" | def convert_file_args ( args , kwargs ) :
filething = args [ 0 ] if args else None
filename = kwargs . pop ( "filename" , None )
fileobj = kwargs . pop ( "fileobj" , None )
return filething , filename , fileobj , args [ 1 : ] , kwargs
def wrap ( func ) :
@ wraps ( func )
def wrapper ( self , * args , ** kwargs ) :
filething , filename , fileobj , args , kwargs = convert_file_args ( args , kwargs )
with _openfile ( self , filething , filename , fileobj , writable , create ) as h :
return func ( self , h , * args , ** kwargs )
@ wraps ( func )
def wrapper_func ( * args , ** kwargs ) :
filething , filename , fileobj , args , kwargs = convert_file_args ( args , kwargs )
with _openfile ( None , filething , filename , fileobj , writable , create ) as h :
return func ( h , * args , ** kwargs )
return wrapper if method else wrapper_func
return wrap |
def loads ( text ) :
"""Read CCSDS from a string , and provide the beyond class corresponding ;
Orbit or list of Orbit if it ' s an OPM , Ephem if it ' s an OEM .
Args :
text ( str ) :
Return :
Orbit or Ephem
Raise :
ValueError : when the text is not a recognizable CCSDS format""" | if text . startswith ( "CCSDS_OEM_VERS" ) :
func = _read_oem
elif text . startswith ( "CCSDS_OPM_VERS" ) :
func = _read_opm
else :
raise ValueError ( "Unknown CCSDS type" )
return func ( text ) |
async def runCmdLine ( self , line ) :
'''Run a single command line .
Args :
line ( str ) : Line to execute .
Examples :
Execute the ' woot ' command with the ' help ' switch :
await cli . runCmdLine ( ' woot - - help ' )
Returns :
object : Arbitrary data from the cmd class .''' | if self . echoline :
self . outp . printf ( f'{self.cmdprompt}{line}' )
ret = None
name = line . split ( None , 1 ) [ 0 ]
cmdo = self . getCmdByName ( name )
if cmdo is None :
self . printf ( 'cmd not found: %s' % ( name , ) )
return
try :
ret = await cmdo . runCmdLine ( line )
except s_exc . CliFini :
await self . fini ( )
except asyncio . CancelledError :
self . printf ( 'Cmd cancelled' )
except Exception as e :
exctxt = traceback . format_exc ( )
self . printf ( exctxt )
self . printf ( 'error: %s' % e )
return ret |
def get_doc ( logger = None , plugin = None , reporthook = None ) :
"""Return URL to documentation . Attempt download if does not exist .
Parameters
logger : obj or ` None `
Ginga logger .
plugin : obj or ` None `
Plugin object . If given , URL points to plugin doc directly .
If this function is called from within plugin class ,
pass ` ` self ` ` here .
reporthook : callable or ` None `
Report hook for ` ` urlretrieve ( ) ` ` .
Returns
url : str or ` None `
URL to local documentation , if available .""" | from ginga . GingaPlugin import GlobalPlugin , LocalPlugin
if isinstance ( plugin , GlobalPlugin ) :
plugin_page = 'plugins_global'
plugin_name = str ( plugin )
elif isinstance ( plugin , LocalPlugin ) :
plugin_page = 'plugins_local'
plugin_name = str ( plugin )
else :
plugin_page = None
plugin_name = None
try :
index_html = _download_rtd_zip ( reporthook = reporthook )
# Download failed , use online resource
except Exception as e :
url = 'https://ginga.readthedocs.io/en/latest/'
if plugin_name is not None :
if toolkit . family . startswith ( 'qt' ) : # This displays plugin docstring .
url = None
else : # This redirects to online doc .
url += 'manual/{}/{}.html' . format ( plugin_page , plugin_name )
if logger is not None :
logger . error ( str ( e ) )
# Use local resource
else :
pfx = 'file:'
url = '{}{}' . format ( pfx , index_html )
# https : / / github . com / rtfd / readthedocs . org / issues / 2803
if plugin_name is not None :
url += '#{}' . format ( plugin_name )
return url |
def generate_map_from_dataset ( self , l_dataset ) :
"""creates a map file ( in the standard CSV format ) based on
columns of a dataset .
1 . read column names , lookup names in list
2 . read column content , get highest match of distinct values
from ontology lists ( eg , Years , countries , cities , ages )""" | l_map = [ ]
headers = l_dataset . get_header ( )
print ( headers )
for row_num , col in enumerate ( headers ) :
if col != '' :
l_map . append ( 'column:name:' + str ( row_num ) + '=' + l_dataset . force_to_string ( col ) )
for row_num , col in enumerate ( headers ) :
if col != '' :
vals = l_dataset . get_distinct_values_from_cols ( [ col ] )
l_map . append ( 'column:count:distinct:' + col + '=' + str ( len ( vals [ 0 ] ) ) )
for row_num , col in enumerate ( headers ) :
if col != '' :
col_vals = l_dataset . count_unique_values ( row_num , col , 10 )
for val_num , v in enumerate ( col_vals ) :
l_map . append ( 'column:topvalues:' + col + ':' + str ( val_num ) + '=' + v )
# l _ map . append ( ' column : values : top5 : ' + str ( row _ num ) + ' = ' + col _ vals )
return l_map |
async def unban_chat_member ( self , chat_id : typing . Union [ base . Integer , base . String ] , user_id : base . Integer ) -> base . Boolean :
"""Use this method to unban a previously kicked user in a supergroup or channel . `
The user will not return to the group or channel automatically , but will be able to join via link , etc .
The bot must be an administrator for this to work .
Source : https : / / core . telegram . org / bots / api # unbanchatmember
: param chat _ id : Unique identifier for the target group or username of the target supergroup or channel
: type chat _ id : : obj : ` typing . Union [ base . Integer , base . String ] `
: param user _ id : Unique identifier of the target user
: type user _ id : : obj : ` base . Integer `
: return : Returns True on success
: rtype : : obj : ` base . Boolean `""" | payload = generate_payload ( ** locals ( ) )
result = await self . request ( api . Methods . UNBAN_CHAT_MEMBER , payload )
return result |
def DisplayAccountTree ( account , accounts , links , depth = 0 ) :
"""Displays an account tree .
Args :
account : dict The account to display .
accounts : dict Map from customerId to account .
links : dict Map from customerId to child links .
depth : int Depth of the current account in the tree .""" | prefix = '-' * depth * 2
print '%s%s, %s' % ( prefix , account [ 'customerId' ] , account [ 'name' ] )
if account [ 'customerId' ] in links :
for child_link in links [ account [ 'customerId' ] ] :
child_account = accounts [ child_link [ 'clientCustomerId' ] ]
DisplayAccountTree ( child_account , accounts , links , depth + 1 ) |
def iter_all_volumes ( self ) :
'''iterate over all stored volumes''' | for raw_volume in self . _db . iterate_all ( ) :
v = self . normalize_volume ( raw_volume )
del v [ 'score' ]
yield v |
def NotificationsDelete ( self , notification_id ) :
"""Delete a notification from CommonSense .
@ param notification _ id ( int ) - Notification id of the notification to delete .
@ return ( bool ) - Boolean indicating whether NotificationsDelete was successful .""" | if self . __SenseApiCall__ ( '/notifications/{0}.json' . format ( notification_id ) , 'DELETE' ) :
return True
else :
self . __error__ = "api call unsuccessful"
return False |
def info ( name ) :
'''Return information about a group
CLI Example :
. . code - block : : bash
salt ' * ' group . info foo''' | if salt . utils . stringutils . contains_whitespace ( name ) :
raise SaltInvocationError ( 'Group name cannot contain whitespace' )
try : # getgrnam seems to cache weirdly , so don ' t use it
grinfo = next ( iter ( x for x in grp . getgrall ( ) if x . gr_name == name ) )
except StopIteration :
return { }
else :
return _format_info ( grinfo ) |
def partition_query ( self , sql , params = None , param_types = None , partition_size_bytes = None , max_partitions = None , ) :
"""Perform a ` ` ParitionQuery ` ` API request .
: type sql : str
: param sql : SQL query statement
: type params : dict , { str - > column value }
: param params : values for parameter replacement . Keys must match
the names used in ` ` sql ` ` .
: type param _ types : dict [ str - > Union [ dict , . types . Type ] ]
: param param _ types :
( Optional ) maps explicit types for one or more param values ;
required if parameters are passed .
: type partition _ size _ bytes : int
: param partition _ size _ bytes :
( Optional ) desired size for each partition generated . The service
uses this as a hint , the actual partition size may differ .
: type max _ partitions : int
: param max _ partitions :
( Optional ) desired maximum number of partitions generated . The
service uses this as a hint , the actual number of partitions may
differ .
: rtype : iterable of bytes
: returns : a sequence of partition tokens
: raises ValueError :
for single - use snapshots , or if a transaction ID is
already associtated with the snapshot .""" | if not self . _multi_use :
raise ValueError ( "Cannot use single-use snapshot." )
if self . _transaction_id is None :
raise ValueError ( "Transaction not started." )
if params is not None :
if param_types is None :
raise ValueError ( "Specify 'param_types' when passing 'params'." )
params_pb = Struct ( fields = { key : _make_value_pb ( value ) for key , value in params . items ( ) } )
else :
params_pb = None
database = self . _session . _database
api = database . spanner_api
metadata = _metadata_with_prefix ( database . name )
transaction = self . _make_txn_selector ( )
partition_options = PartitionOptions ( partition_size_bytes = partition_size_bytes , max_partitions = max_partitions )
response = api . partition_query ( session = self . _session . name , sql = sql , transaction = transaction , params = params_pb , param_types = param_types , partition_options = partition_options , metadata = metadata , )
return [ partition . partition_token for partition in response . partitions ] |
def store ( self , addr , data , size = None , condition = None , add_constraints = None , endness = None , action = None , inspect = True , priv = None , disable_actions = False ) :
"""Stores content into memory .
: param addr : A claripy expression representing the address to store at .
: param data : The data to store ( claripy expression or something convertable to a claripy expression ) .
: param size : A claripy expression representing the size of the data to store .
The following parameters are optional .
: param condition : A claripy expression representing a condition if the store is conditional .
: param add _ constraints : Add constraints resulting from the merge ( default : True ) .
: param endness : The endianness for the data .
: param action : A SimActionData to fill out with the final written value and constraints .
: param bool inspect : Whether this store should trigger SimInspect breakpoints or not .
: param bool disable _ actions : Whether this store should avoid creating SimActions or not . When set to False ,
state options are respected .""" | _inspect = inspect and self . state . supports_inspect
if priv is not None :
self . state . scratch . push_priv ( priv )
addr_e = _raw_ast ( addr )
data_e = _raw_ast ( data )
size_e = _raw_ast ( size )
condition_e = _raw_ast ( condition )
add_constraints = True if add_constraints is None else add_constraints
if isinstance ( addr , str ) :
named_addr , named_size = self . _resolve_location_name ( addr , is_write = True )
addr = named_addr
addr_e = addr
if size is None :
size = named_size
size_e = size
if isinstance ( data_e , str ) :
data_e = data_e . encode ( )
l . warning ( "Storing unicode string encoded as utf-8. Did you mean to use a bytestring?" )
# store everything as a BV
data_e = self . _convert_to_ast ( data_e , size_e if isinstance ( size_e , int ) else None )
# zero extend if size is greater than len ( data _ e )
stored_size = size_e * self . state . arch . byte_width if isinstance ( size_e , int ) else self . state . arch . bits
if size_e is not None and self . category == 'reg' and len ( data_e ) < stored_size :
data_e = data_e . zero_extend ( stored_size - len ( data_e ) )
if type ( size_e ) is int :
size_e = self . state . solver . BVV ( size_e , self . state . arch . bits )
elif size_e is None :
size_e = self . state . solver . BVV ( data_e . size ( ) // self . state . arch . byte_width , self . state . arch . bits )
if len ( data_e ) % self . state . arch . byte_width != 0 :
raise SimMemoryError ( "Attempting to store non-byte data to memory" )
if not size_e . symbolic and ( len ( data_e ) < size_e * self . state . arch . byte_width ) . is_true ( ) :
raise SimMemoryError ( "Provided data is too short for this memory store" )
if _inspect :
if self . category == 'reg' :
self . state . _inspect ( 'reg_write' , BP_BEFORE , reg_write_offset = addr_e , reg_write_length = size_e , reg_write_expr = data_e , reg_write_condition = condition_e , )
addr_e = self . state . _inspect_getattr ( 'reg_write_offset' , addr_e )
size_e = self . state . _inspect_getattr ( 'reg_write_length' , size_e )
data_e = self . state . _inspect_getattr ( 'reg_write_expr' , data_e )
condition_e = self . state . _inspect_getattr ( 'reg_write_condition' , condition_e )
elif self . category == 'mem' :
self . state . _inspect ( 'mem_write' , BP_BEFORE , mem_write_address = addr_e , mem_write_length = size_e , mem_write_expr = data_e , mem_write_condition = condition_e , )
addr_e = self . state . _inspect_getattr ( 'mem_write_address' , addr_e )
size_e = self . state . _inspect_getattr ( 'mem_write_length' , size_e )
data_e = self . state . _inspect_getattr ( 'mem_write_expr' , data_e )
condition_e = self . state . _inspect_getattr ( 'mem_write_condition' , condition_e )
# if the condition is false , bail
if condition_e is not None and self . state . solver . is_false ( condition_e ) :
if priv is not None :
self . state . scratch . pop_priv ( )
return
if ( o . UNDER_CONSTRAINED_SYMEXEC in self . state . options and isinstance ( addr_e , claripy . ast . Base ) and addr_e . uninitialized ) :
self . _constrain_underconstrained_index ( addr_e )
request = MemoryStoreRequest ( addr_e , data = data_e , size = size_e , condition = condition_e , endness = endness )
try :
self . _store ( request )
# will use state _ plugins / symbolic _ memory . py
except SimSegfaultError as e :
e . original_addr = addr_e
raise
if _inspect :
if self . category == 'reg' :
self . state . _inspect ( 'reg_write' , BP_AFTER )
elif self . category == 'mem' :
self . state . _inspect ( 'mem_write' , BP_AFTER )
# tracer uses address _ concretization _ add _ constraints
add_constraints = self . state . _inspect_getattr ( 'address_concretization_add_constraints' , add_constraints )
if add_constraints and len ( request . constraints ) > 0 :
self . state . add_constraints ( * request . constraints )
if not disable_actions :
if request . completed and o . AUTO_REFS in self . state . options and action is None and not self . _abstract_backer :
ref_size = size * self . state . arch . byte_width if size is not None else data_e . size ( )
region_type = self . category
if region_type == 'file' : # Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self . id
action = SimActionData ( self . state , region_type , 'write' , addr = addr_e , data = data_e , size = ref_size , condition = condition )
self . state . history . add_action ( action )
if request . completed and action is not None :
action . actual_addrs = request . actual_addresses
action . actual_value = action . _make_object ( request . stored_values [ 0 ] )
# TODO
if len ( request . constraints ) > 0 :
action . added_constraints = action . _make_object ( self . state . solver . And ( * request . constraints ) )
else :
action . added_constraints = action . _make_object ( self . state . solver . true )
if priv is not None :
self . state . scratch . pop_priv ( ) |
def merge ( self , cluster_ids = None , to = None ) :
"""Merge the selected clusters .""" | if cluster_ids is None :
cluster_ids = self . selected
if len ( cluster_ids or [ ] ) <= 1 :
return
self . clustering . merge ( cluster_ids , to = to )
self . _global_history . action ( self . clustering ) |
def get_box_comments ( self , box_key ) :
'''Gets comments in a box with the provided attributes .
Args :
box _ keykey for box
return ( status code , list of comment dicts )''' | uri = '/' . join ( [ self . api_uri , self . boxes_suffix , box_key , self . comments_suffix ] )
return self . _req ( 'get' , uri ) |
async def try_trigger_before_first_request_functions ( self ) -> None :
"""Trigger the before first request methods .""" | if self . _got_first_request :
return
# Reverse the teardown functions , so as to match the expected usage
self . teardown_appcontext_funcs = list ( reversed ( self . teardown_appcontext_funcs ) )
for key , value in self . teardown_request_funcs . items ( ) :
self . teardown_request_funcs [ key ] = list ( reversed ( value ) )
for key , value in self . teardown_websocket_funcs . items ( ) :
self . teardown_websocket_funcs [ key ] = list ( reversed ( value ) )
async with self . _first_request_lock :
if self . _got_first_request :
return
for function in self . before_first_request_funcs :
await function ( )
self . _got_first_request = True |
def roll ( self , count = 0 ) :
'''Roll some dice !
: param count : [ 0 ] Return list of sums
: return : A single sum or list of ` ` count ` ` sums''' | return super ( FuncRoll , self ) . roll ( count , self . _func ) |
def parse_soap_enveloped_saml_thingy ( text , expected_tags ) :
"""Parses a SOAP enveloped SAML thing and returns the thing as
a string .
: param text : The SOAP object as XML string
: param expected _ tags : What the tag of the SAML thingy is expected to be .
: return : SAML thingy as a string""" | envelope = defusedxml . ElementTree . fromstring ( text )
# Make sure it ' s a SOAP message
assert envelope . tag == '{%s}Envelope' % soapenv . NAMESPACE
assert len ( envelope ) >= 1
body = None
for part in envelope :
if part . tag == '{%s}Body' % soapenv . NAMESPACE :
assert len ( part ) == 1
body = part
break
if body is None :
return ""
saml_part = body [ 0 ]
if saml_part . tag in expected_tags :
return ElementTree . tostring ( saml_part , encoding = "UTF-8" )
else :
raise WrongMessageType ( "Was '%s' expected one of %s" % ( saml_part . tag , expected_tags ) ) |
def getresource ( self , schemacls , name ) :
"""Get a resource from a builder name .
: param type schemacls : waited schema class .
: param str name : builder name to use .
: return : resource returned by the right builder . getresource ( schema ) .""" | return _SCHEMAFACTORY . getresource ( schemacls = schemacls , name = name ) |
def cmd_reboot ( self , args ) :
'''reboot autopilot''' | if len ( args ) > 0 and args [ 0 ] == 'bootloader' :
self . master . reboot_autopilot ( True )
else :
self . master . reboot_autopilot ( ) |
def push ( ** kwargs ) :
'''Force synchronization of directory .''' | output , err = cli_syncthing_adapter . refresh ( ** kwargs )
if output :
click . echo ( "%s" % output , err = err )
if kwargs [ 'verbose' ] and not err :
with click . progressbar ( iterable = None , length = 100 , label = 'Synchronizing' ) as bar :
device_num = 0
max_devices = 1
prev_percent = 0
while True :
kwargs [ 'progress' ] = True
kwargs [ 'device_num' ] = device_num
data , err = cli_syncthing_adapter . refresh ( ** kwargs )
device_num = data [ 'device_num' ]
max_devices = data [ 'max_devices' ]
cur_percent = math . floor ( data [ 'percent' ] ) - prev_percent
if cur_percent > 0 :
bar . update ( cur_percent )
prev_percent = math . floor ( data [ 'percent' ] )
if device_num < max_devices :
time . sleep ( 0.5 )
else :
break |
def guard_submit ( obj ) :
"""Returns if ' submit ' transition can be applied to the worksheet passed in .
By default , the target state for the ' submit ' transition for a worksheet is
' to _ be _ verified ' , so this guard returns true if all the analyses assigned
to the worksheet have already been submitted . Those analyses that are in a
non - valid state ( cancelled , inactive ) are dismissed in the evaluation , but
at least one analysis must be in an active state ( and submitted ) for this
guard to return True . Otherwise , always returns False .
Note this guard depends entirely on the current status of the children .""" | analyses = obj . getAnalyses ( )
if not analyses : # An empty worksheet cannot be submitted
return False
can_submit = False
for analysis in obj . getAnalyses ( ) : # Dismiss analyses that are not active
if not api . is_active ( analysis ) :
continue
# Dismiss analyses that have been rejected or retracted
if api . get_workflow_status_of ( analysis ) in [ "rejected" , "retracted" ] :
continue
# Worksheet cannot be submitted if there is one analysis not submitted
can_submit = ISubmitted . providedBy ( analysis )
if not can_submit : # No need to look further
return False
# This prevents the submission of the worksheet if all its analyses are in
# a detached status ( rejected , retracted or cancelled )
return can_submit |
def construct_sm_match_dict ( self ) :
"""Construct the sm _ match _ dict .
Reverse the key value structure . The sm _ match _ dict is bigger ,
but allows for direct matching using a dictionary key access .
The sound _ mode _ dict is uses externally to set this dictionary
because that has a nicer syntax .""" | mode_dict = list ( self . _sound_mode_dict . items ( ) )
match_mode_dict = { }
for matched_mode , sublist in mode_dict :
for raw_mode in sublist :
match_mode_dict [ raw_mode . upper ( ) ] = matched_mode
return match_mode_dict |
def load_corpus ( * data_file_paths ) :
"""Return the data contained within a specified corpus .""" | for file_path in data_file_paths :
corpus = [ ]
corpus_data = read_corpus ( file_path )
conversations = corpus_data . get ( 'conversations' , [ ] )
corpus . extend ( conversations )
categories = corpus_data . get ( 'categories' , [ ] )
yield corpus , categories , file_path |
def p_IfBlock ( p ) :
'''IfBlock : IF Expression COLON Terminator Block
| IfBlock ELIF Expression COLON Terminator Block''' | if isinstance ( p [ 1 ] , str ) :
p [ 0 ] = IfBlock ( None , p [ 2 ] , p [ 4 ] , p [ 5 ] )
else :
p [ 0 ] = IfBlock ( p [ 1 ] , p [ 3 ] , p [ 5 ] , p [ 6 ] ) |
def make_systeminfoitem_hostname ( hostname , condition = 'contains' , negate = False , preserve_case = False ) :
"""Create a node for SystemInfoItem / hostname
: return : A IndicatorItem represented as an Element node""" | document = 'SystemInfoItem'
search = 'SystemInfoItem/hostname'
content_type = 'string'
content = hostname
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node |
def get_html_desc ( self , markdown_inst = None ) :
"""Translates the enum ' s ' desc ' property into HTML .
Any RDLFormatCode tags used in the description are converted to HTML .
The text is also fed through a Markdown processor .
The additional Markdown processing allows designers the choice to use a
more modern lightweight markup language as an alternative to SystemRDL ' s
" RDLFormatCode " .
Parameters
markdown _ inst : ` ` markdown . Markdown ` `
Override the class instance of the Markdown processor .
See the ` Markdown module < https : / / python - markdown . github . io / reference / # Markdown > ` _
for more details .
Returns
str or None
HTML formatted string .
If node does not have a description , returns ` ` None ` `""" | desc_str = self . _rdl_desc_
if desc_str is None :
return None
return rdlformatcode . rdlfc_to_html ( desc_str , md = markdown_inst ) |
def _validate_request_action ( self , action ) :
"""Validate action from the request json ,
according to APPLY _ CONNECTIVITY _ CHANGES _ ACTION _ REQUIRED _ ATTRIBUTE _ LIST""" | is_fail = False
fail_attribute = ""
for class_attribute in self . APPLY_CONNECTIVITY_CHANGES_ACTION_REQUIRED_ATTRIBUTE_LIST :
if type ( class_attribute ) is tuple :
if not hasattr ( action , class_attribute [ 0 ] ) :
is_fail = True
fail_attribute = class_attribute [ 0 ]
if not hasattr ( getattr ( action , class_attribute [ 0 ] ) , class_attribute [ 1 ] ) :
is_fail = True
fail_attribute = class_attribute [ 1 ]
else :
if not hasattr ( action , class_attribute ) :
is_fail = True
fail_attribute = class_attribute
if is_fail :
raise Exception ( self . __class__ . __name__ , "Mandatory field {0} is missing in ApplyConnectivityChanges request json" . format ( fail_attribute ) ) |
def assign_comment_to_book ( self , comment_id , book_id ) :
"""Adds an existing ` ` Comment ` ` to a ` ` Book ` ` .
arg : comment _ id ( osid . id . Id ) : the ` ` Id ` ` of the ` ` Comment ` `
arg : book _ id ( osid . id . Id ) : the ` ` Id ` ` of the ` ` Book ` `
raise : AlreadyExists - ` ` comment _ id ` ` is already assigned to
` ` book _ id ` `
raise : NotFound - ` ` comment _ id ` ` or ` ` book _ id ` ` not found
raise : NullArgument - ` ` comment _ id ` ` or ` ` book _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceBinAssignmentSession . assign _ resource _ to _ bin
mgr = self . _get_provider_manager ( 'COMMENTING' , local = True )
lookup_session = mgr . get_book_lookup_session ( proxy = self . _proxy )
lookup_session . get_book ( book_id )
# to raise NotFound
self . _assign_object_to_catalog ( comment_id , book_id ) |
def validate_request ( self , iface_name , func_name , params ) :
"""Validates that the given params match the expected length and types for this
interface and function .
Returns two element tuple : ( bool , string )
- ` bool ` - True if valid , False if not
- ` string ` - Description of validation error , or None if valid
: Parameters :
iface _ name
Name of interface
func _ name
Name of function
params
List of params to validate against this function""" | self . interface ( iface_name ) . function ( func_name ) . validate_params ( params ) |
def get_path_str ( self , sep = os . path . sep , type_str = None ) :
"""Get path from root to this node .
Args :
sep : str
One or more characters to insert between each element in the path .
Defaults to " / " on Unix and " \" on Windows .
type _ str :
SUBJECT _ NODE _ TAG , TYPE _ NODE _ TAG or None . If set , only include
information from nodes of that type .
Returns :
str : String describing the path from the root to this node .""" | return sep . join ( list ( reversed ( [ v . label_str for v in self . parent_gen if type_str in ( None , v . type_str ) ] ) ) ) |
def mapkeys ( function , dict_ ) :
"""Return a new dictionary where the keys come from applying ` ` function ` `
to the keys of given dictionary .
. . warning : :
If ` ` function ` ` returns the same value for more than one key ,
it is undefined which key will be chosen for the resulting dictionary .
: param function : Function taking a dictionary key ,
or None ( corresponding to identity function )
. . versionadded : : 0.0.2""" | ensure_mapping ( dict_ )
function = identity ( ) if function is None else ensure_callable ( function )
return dict_ . __class__ ( ( function ( k ) , v ) for k , v in iteritems ( dict_ ) ) |
def prep_jid ( nocache = False , passed_jid = None , recurse_count = 0 ) :
'''Return a job id and prepare the job id directory .
This is the function responsible for making sure jids don ' t collide ( unless
it is passed a jid ) .
So do what you have to do to make sure that stays the case''' | if recurse_count >= 5 :
err = 'prep_jid could not store a jid after {0} tries.' . format ( recurse_count )
log . error ( err )
raise salt . exceptions . SaltCacheError ( err )
if passed_jid is None : # this can be a None or an empty string .
jid = salt . utils . jid . gen_jid ( __opts__ )
else :
jid = passed_jid
jid_dir = salt . utils . jid . jid_dir ( jid , _job_dir ( ) , __opts__ [ 'hash_type' ] )
# Make sure we create the jid dir , otherwise someone else is using it ,
# meaning we need a new jid .
if not os . path . isdir ( jid_dir ) :
try :
os . makedirs ( jid_dir )
except OSError :
time . sleep ( 0.1 )
if passed_jid is None :
return prep_jid ( nocache = nocache , recurse_count = recurse_count + 1 )
try :
with salt . utils . files . fopen ( os . path . join ( jid_dir , 'jid' ) , 'wb+' ) as fn_ :
fn_ . write ( salt . utils . stringutils . to_bytes ( jid ) )
if nocache :
with salt . utils . files . fopen ( os . path . join ( jid_dir , 'nocache' ) , 'wb+' ) :
pass
except IOError :
log . warning ( 'Could not write out jid file for job %s. Retrying.' , jid )
time . sleep ( 0.1 )
return prep_jid ( passed_jid = jid , nocache = nocache , recurse_count = recurse_count + 1 )
return jid |
def close ( self ) :
"""Closes Serial port , or TCP - Socket connection""" | if ( self . __ser is not None ) :
self . __ser . close ( )
if ( self . __tcpClientSocket is not None ) :
self . __stoplistening = True
self . __tcpClientSocket . shutdown ( socket . SHUT_RDWR )
self . __tcpClientSocket . close ( )
self . __connected = False |
def main ( ) :
"""This example demonstrates how to generate pretty equations from the analytic
expressions found in ` ` chempy . kinetics . integrated ` ` .""" | t , kf , t0 , major , minor , prod , beta = sympy . symbols ( 't k_f t0 Y Z X beta' , negative = False )
for f in funcs :
args = [ t , kf , prod , major , minor ]
if f in ( pseudo_rev , binary_rev ) :
args . insert ( 2 , kf / beta )
expr = f ( * args , backend = 'sympy' )
with open ( f . __name__ + '.png' , 'wb' ) as ofh :
sympy . printing . preview ( expr , output = 'png' , filename = 'out.png' , viewer = 'BytesIO' , outputbuffer = ofh )
with open ( f . __name__ + '_diff.png' , 'wb' ) as ofh :
sympy . printing . preview ( expr . diff ( t ) . subs ( { t0 : 0 } ) . simplify ( ) , output = 'png' , filename = 'out.png' , viewer = 'BytesIO' , outputbuffer = ofh ) |
def scene ( self , value ) :
"""Set the reference to the scene that this camera is in .
Parameters
scene : None , or trimesh . Scene
Scene where this camera is attached""" | # save the scene reference
self . _scene = value
# check if we have local not None transform
# an if we can apply it to the scene graph
# also check here that scene is a real scene
if ( hasattr ( self , '_transform' ) and self . _transform is not None and hasattr ( value , 'graph' ) ) : # set scene transform to locally saved transform
self . _scene . graph [ self . name ] = self . _transform
# set local transform to None
self . _transform = None |
async def load_uint ( reader , width ) :
"""Constant - width integer serialization
: param reader :
: param width :
: return :""" | buffer = _UINT_BUFFER
result = 0
shift = 0
for _ in range ( width ) :
await reader . areadinto ( buffer )
result += buffer [ 0 ] << shift
shift += 8
return result |
def root2hdf5 ( rfile , hfile , rpath = '' , entries = - 1 , userfunc = None , show_progress = False , ignore_exception = False , ** kwargs ) :
"""Convert all trees in a ROOT file into tables in an HDF5 file .
Parameters
rfile : string or asrootpy ' d ROOT File
A ROOT File handle or string path to an existing ROOT file .
hfile : string or PyTables HDF5 File
A PyTables HDF5 File handle or string path to an existing HDF5 file .
rpath : string , optional ( default = ' ' )
Top level path to begin traversal through the ROOT file . By default
convert everything in and below the root directory .
entries : int , optional ( default = - 1)
The number of entries to read at once while converting a ROOT TTree
into an HDF5 table . By default read the entire TTree into memory ( this
may not be desired if your TTrees are large ) .
userfunc : callable , optional ( default = None )
A function that will be called on every tree and that must return a
tree or list of trees that will be converted instead of the original
tree .
show _ progress : bool , optional ( default = False )
If True , then display and update a progress bar on stdout as each tree
is converted .
ignore _ exception : bool , optional ( default = False )
If True , then ignore exceptions raised in converting trees and instead
skip such trees .
kwargs : dict , optional
Additional keyword arguments for the tree2array function .""" | own_rootfile = False
if isinstance ( rfile , string_types ) :
rfile = root_open ( rfile )
own_rootfile = True
own_h5file = False
if isinstance ( hfile , string_types ) :
hfile = tables_open ( filename = hfile , mode = "w" , title = "Data" )
own_h5file = True
for dirpath , dirnames , treenames in rfile . walk ( rpath , class_ref = QROOT . TTree ) : # skip directories w / o trees
if not treenames :
continue
treenames . sort ( )
group_where = '/' + os . path . dirname ( dirpath )
group_name = os . path . basename ( dirpath )
if not group_name :
group = hfile . root
elif TABLES_NEW_API :
group = hfile . create_group ( group_where , group_name , createparents = True )
else :
group = hfile . createGroup ( group_where , group_name )
ntrees = len ( treenames )
log . info ( "Will convert {0:d} tree{1} in {2}" . format ( ntrees , 's' if ntrees != 1 else '' , os . path . join ( group_where , group_name ) ) )
for treename in treenames :
input_tree = rfile . Get ( os . path . join ( dirpath , treename ) )
if userfunc is not None :
tmp_file = TemporaryFile ( )
# call user - defined function on tree and get output trees
log . info ( "Calling user function on tree '{0}'" . format ( input_tree . GetName ( ) ) )
trees = userfunc ( input_tree )
if not isinstance ( trees , list ) :
trees = [ trees ]
else :
trees = [ input_tree ]
tmp_file = None
for tree in trees :
try :
tree2hdf5 ( tree , hfile , group = group , entries = entries , show_progress = show_progress , ** kwargs )
except Exception as e :
if ignore_exception :
log . error ( "Failed to convert tree '{0}': {1}" . format ( tree . GetName ( ) , str ( e ) ) )
else :
raise
input_tree . Delete ( )
if userfunc is not None :
for tree in trees :
tree . Delete ( )
tmp_file . Close ( )
if own_h5file :
hfile . close ( )
if own_rootfile :
rfile . Close ( ) |
def Artifacts ( self , os_name = None , cpe = None , label = None ) :
"""Whether the conditions applies , modulo host data .
Args :
os _ name : An OS string .
cpe : A CPE string .
label : A label string .
Returns :
True if os _ name , cpe or labels match . Empty values are ignored .""" | hit = lambda x : x [ 0 ] == x [ 1 ] or not x [ 0 ]
seq = [ ( self . os_name , os_name ) , ( self . cpe , cpe ) , ( self . label , label ) ]
return all ( map ( hit , seq ) ) |
def cmd ( parent ) :
"""Determine subshell command for subprocess . call
Arguments :
parent ( str ) : Absolute path to parent shell executable""" | shell_name = os . path . basename ( parent ) . rsplit ( "." , 1 ) [ 0 ]
dirname = os . path . dirname ( __file__ )
# Support for Bash
if shell_name in ( "bash" , "sh" ) :
shell = os . path . join ( dirname , "_shell.sh" ) . replace ( "\\" , "/" )
cmd = [ parent . replace ( "\\" , "/" ) , shell ]
# Support for Cmd
elif shell_name in ( "cmd" , ) :
shell = os . path . join ( dirname , "_shell.bat" ) . replace ( "\\" , "/" )
cmd = [ parent , "/K" , shell ]
# Support for Powershell
elif shell_name in ( "powershell" , ) :
raise SystemError ( "Powershell not yet supported" )
# Unsupported
else :
raise SystemError ( "Unsupported shell: %s" % shell_name )
return cmd |
def convert_data_array ( arr , filter_func = None , converter_func = None ) :
'''Filter and convert any given data array of any dtype .
Parameters
arr : numpy . array
Data array of any dtype .
filter _ func : function
Function that takes array and returns true or false for each item in array .
converter _ func : function
Function that takes array and returns an array or tuple of arrays .
Returns
array of specified dimension ( converter _ func ) and content ( filter _ func )''' | # if filter _ func ! = None :
# if not hasattr ( filter _ func , ' _ _ call _ _ ' ) :
# raise ValueError ( ' Filter is not callable ' )
if filter_func :
array = arr [ filter_func ( arr ) ]
# Indexing with Boolean Arrays
# if converter _ func ! = None :
# if not hasattr ( converter _ func , ' _ _ call _ _ ' ) :
# raise ValueError ( ' Converter is not callable ' )
if converter_func :
arr = converter_func ( arr )
return array |
def get_gaussian_weight ( self , anchor ) :
"""Args :
anchor : coordinate of the center""" | ret = np . zeros ( self . shape , dtype = 'float32' )
y , x = np . mgrid [ : self . shape [ 0 ] , : self . shape [ 1 ] ]
y = y . astype ( 'float32' ) / ret . shape [ 0 ] - anchor [ 0 ]
x = x . astype ( 'float32' ) / ret . shape [ 1 ] - anchor [ 1 ]
g = np . exp ( - ( x ** 2 + y ** 2 ) / self . sigma )
# cv2 . imshow ( " " , g )
# cv2 . waitKey ( )
return g |
def is_transactional ( self , state ) :
'''Decide if a request should be wrapped in a transaction , based
upon the state of the request . By default , wraps all but ` ` GET ` `
and ` ` HEAD ` ` requests in a transaction , along with respecting
the ` ` transactional ` ` decorator from : mod : pecan . decorators .
: param state : The Pecan state object for the current request .''' | controller = getattr ( state , 'controller' , None )
if controller :
force_transactional = _cfg ( controller ) . get ( 'transactional' , False )
else :
force_transactional = False
if state . request . method not in ( 'GET' , 'HEAD' ) or force_transactional :
return True
return False |
def readinto ( self , byte_array ) :
"""Read data into a byte array , upto the size of the byte array .
: param byte _ array : A byte array / memory view to pour bytes into .
: type byte _ array : ` ` bytearray ` ` or ` ` memoryview ` `""" | max_size = len ( byte_array )
data = self . read ( max_size )
bytes_read = len ( data )
byte_array [ : bytes_read ] = data
return bytes_read |
def GTax ( x , ax ) :
"""Compute transpose of gradient of ` x ` along axis ` ax ` .
Parameters
x : array _ like
Input array
ax : int
Axis on which gradient transpose is to be computed
Returns
xg : ndarray
Output array""" | slc0 = ( slice ( None ) , ) * ax
xg = np . roll ( x , 1 , axis = ax ) - x
xg [ slc0 + ( slice ( 0 , 1 ) , ) ] = - x [ slc0 + ( slice ( 0 , 1 ) , ) ]
xg [ slc0 + ( slice ( - 1 , None ) , ) ] = x [ slc0 + ( slice ( - 2 , - 1 ) , ) ]
return xg |
def to_one_str ( cls , value , * args , ** kwargs ) :
"""Convert single record ' s values to str""" | if kwargs . get ( 'wrapper' ) :
return cls . _wrapper_to_one_str ( value )
return _es . to_dict_str ( value ) |
def json ( self ) :
"""Return a JSON - serializable representation of this result .
The output of this function can be converted to a serialized string
with : any : ` json . dumps ` .""" | return { "observed_length" : _json_safe_float ( self . observed_length ) , "predicted_length" : _json_safe_float ( self . predicted_length ) , "merged_length" : _json_safe_float ( self . merged_length ) , "num_parameters" : _json_safe_float ( self . num_parameters ) , "observed_mean" : _json_safe_float ( self . observed_mean ) , "predicted_mean" : _json_safe_float ( self . predicted_mean ) , "observed_variance" : _json_safe_float ( self . observed_variance ) , "predicted_variance" : _json_safe_float ( self . predicted_variance ) , "observed_skew" : _json_safe_float ( self . observed_skew ) , "predicted_skew" : _json_safe_float ( self . predicted_skew ) , "observed_kurtosis" : _json_safe_float ( self . observed_kurtosis ) , "predicted_kurtosis" : _json_safe_float ( self . predicted_kurtosis ) , "observed_cvstd" : _json_safe_float ( self . observed_cvstd ) , "predicted_cvstd" : _json_safe_float ( self . predicted_cvstd ) , "r_squared" : _json_safe_float ( self . r_squared ) , "r_squared_adj" : _json_safe_float ( self . r_squared_adj ) , "rmse" : _json_safe_float ( self . rmse ) , "rmse_adj" : _json_safe_float ( self . rmse_adj ) , "cvrmse" : _json_safe_float ( self . cvrmse ) , "cvrmse_adj" : _json_safe_float ( self . cvrmse_adj ) , "mape" : _json_safe_float ( self . mape ) , "mape_no_zeros" : _json_safe_float ( self . mape_no_zeros ) , "num_meter_zeros" : _json_safe_float ( self . num_meter_zeros ) , "nmae" : _json_safe_float ( self . nmae ) , "nmbe" : _json_safe_float ( self . nmbe ) , "autocorr_resid" : _json_safe_float ( self . autocorr_resid ) , } |
def on_finish ( self ) :
"""Records the time taken to process the request .
This method records the amount of time taken to process the request
( as reported by
: meth : ` ~ tornado . httputil . HTTPServerRequest . request _ time ` ) under the
path defined by the class ' s module , it ' s name , the request method ,
and the status code . The : meth : ` . record _ timing ` method is used
to send the metric , so the configured namespace is used as well .""" | super ( ) . on_finish ( )
self . record_timing ( self . request . request_time ( ) , self . __class__ . __name__ , self . request . method , self . get_status ( ) ) |
def centroid ( coo ) :
"""Calculates the centroid from a 3D point cloud and returns the coordinates
: param coo : Array of coordinate arrays
: returns : centroid coordinates as list""" | return list ( map ( np . mean , ( ( [ c [ 0 ] for c in coo ] ) , ( [ c [ 1 ] for c in coo ] ) , ( [ c [ 2 ] for c in coo ] ) ) ) ) |
def format_help ( self ) :
"""Override help doc to add cell args .""" | if not self . _cell_args :
return super ( CommandParser , self ) . format_help ( )
else : # Print the standard argparse info , the cell arg block , and then the epilog
# If we don ' t remove epilog before calling the super , then epilog will
# be printed before the ' Cell args ' block .
epilog = self . epilog
self . epilog = None
orig_help = super ( CommandParser , self ) . format_help ( )
cell_args_help = '\nCell args:\n\n'
for cell_arg , v in six . iteritems ( self . _cell_args ) :
required = 'Required' if v [ 'required' ] else 'Optional'
cell_args_help += '%s: %s. %s.\n\n' % ( cell_arg , required , v [ 'help' ] )
orig_help += cell_args_help
if epilog :
orig_help += epilog + '\n\n'
return orig_help |
def unique_bincount ( values , minlength , return_inverse = True ) :
"""For arrays of integers find unique values using bin counting .
Roughly 10x faster for correct input than np . unique
Parameters
values : ( n , ) int
Values to find unique members of
minlength : int
Maximum value that will occur in values ( values . max ( ) )
return _ inverse : bool
If True , return an inverse such that unique [ inverse ] = = values
Returns
unique : ( m , ) int
Unique values in original array
inverse : ( n , ) int
An array such that unique [ inverse ] = = values
Only returned if return _ inverse is True""" | values = np . asanyarray ( values )
if len ( values . shape ) != 1 or values . dtype . kind != 'i' :
raise ValueError ( 'input must be 1D integers!' )
try : # count the number of occurrences of each value
counts = np . bincount ( values , minlength = minlength )
except TypeError : # casting failed on 32 bit windows
log . error ( 'casting failed!' , exc_info = True )
# fall back to numpy unique
return np . unique ( values , return_inverse = return_inverse )
# which bins are occupied at all
# counts are integers so this works
unique_bin = counts . astype ( np . bool )
# which values are unique
# indexes correspond to original values
unique = np . where ( unique_bin ) [ 0 ]
if return_inverse : # find the inverse to reconstruct original
inverse = ( np . cumsum ( unique_bin ) - 1 ) [ values ]
return unique , inverse
return unique |
def set_position_target_local_ned_encode ( self , time_boot_ms , target_system , target_component , coordinate_frame , type_mask , x , y , z , vx , vy , vz , afx , afy , afz , yaw , yaw_rate ) :
'''Sets a desired vehicle position in a local north - east - down coordinate
frame . Used by an external controller to command the
vehicle ( manual controller or other system ) .
time _ boot _ ms : Timestamp in milliseconds since system boot ( uint32 _ t )
target _ system : System ID ( uint8 _ t )
target _ component : Component ID ( uint8 _ t )
coordinate _ frame : Valid options are : MAV _ FRAME _ LOCAL _ NED = 1 , MAV _ FRAME _ LOCAL _ OFFSET _ NED = 7 , MAV _ FRAME _ BODY _ NED = 8 , MAV _ FRAME _ BODY _ OFFSET _ NED = 9 ( uint8 _ t )
type _ mask : Bitmask to indicate which dimensions should be ignored by the vehicle : a value of 0b00000 or 0b00000100000 indicates that none of the setpoint dimensions should be ignored . If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration . Mapping : bit 1 : x , bit 2 : y , bit 3 : z , bit 4 : vx , bit 5 : vy , bit 6 : vz , bit 7 : ax , bit 8 : ay , bit 9 : az , bit 10 : is force setpoint , bit 11 : yaw , bit 12 : yaw rate ( uint16 _ t )
x : X Position in NED frame in meters ( float )
y : Y Position in NED frame in meters ( float )
z : Z Position in NED frame in meters ( note , altitude is negative in NED ) ( float )
vx : X velocity in NED frame in meter / s ( float )
vy : Y velocity in NED frame in meter / s ( float )
vz : Z velocity in NED frame in meter / s ( float )
afx : X acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
afy : Y acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
afz : Z acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
yaw : yaw setpoint in rad ( float )
yaw _ rate : yaw rate setpoint in rad / s ( float )''' | return MAVLink_set_position_target_local_ned_message ( time_boot_ms , target_system , target_component , coordinate_frame , type_mask , x , y , z , vx , vy , vz , afx , afy , afz , yaw , yaw_rate ) |
def decompose_covariance ( c ) :
"""This decomposes a covariance matrix into an error vector and a correlation matrix""" | # make it a kickass copy of the original
c = _n . array ( c )
# first get the error vector
e = [ ]
for n in range ( 0 , len ( c [ 0 ] ) ) :
e . append ( _n . sqrt ( c [ n ] [ n ] ) )
# now cycle through the matrix , dividing by e [ 1 ] * e [ 2]
for n in range ( 0 , len ( c [ 0 ] ) ) :
for m in range ( 0 , len ( c [ 0 ] ) ) :
c [ n ] [ m ] = c [ n ] [ m ] / ( e [ n ] * e [ m ] )
return [ _n . array ( e ) , _n . array ( c ) ] |
def calc_x_from_L ( L , y ) :
"""Calculate the industry output x from L and a y vector
Parameters
L : pandas . DataFrame or numpy . array
Symmetric input output Leontief table
y : pandas . DataFrame or numpy . array
a column vector of the total final demand
Returns
pandas . DataFrame or numpy . array
Industry output x as column vector
The type is determined by the type of L . If DataFrame index as L""" | x = L . dot ( y )
if type ( x ) is pd . Series :
x = pd . DataFrame ( x )
if type ( x ) is pd . DataFrame :
x . columns = [ 'indout' ]
return x |
def key ( self , direction , mechanism , purviews = False , _prefix = None ) :
"""Cache key . This is the call signature of | Subsystem . find _ mice ( ) | .""" | return ( _prefix , direction , mechanism , purviews ) |
def set_func ( self , func , pnames , args = ( ) ) :
"""Set the model function to use an efficient but tedious calling convention .
The function should obey the following convention : :
def func ( param _ vec , * args ) :
modeled _ data = { do something using param _ vec }
return modeled _ data
This function creates the : class : ` pwkit . lmmin . Problem ` so that the
caller can futz with it before calling : meth : ` solve ` , if so desired .
Returns * self * .""" | from . lmmin import Problem
self . func = func
self . _args = args
self . pnames = list ( pnames )
self . lm_prob = Problem ( len ( self . pnames ) )
return self |
def match_any_learning_objective ( self , match ) :
"""Matches an item with any objective .
arg : match ( boolean ) : ` ` true ` ` to match items with any
learning objective , ` ` false ` ` to match items with no
learning objectives
* compliance : mandatory - - This method must be implemented . *""" | match_key = 'learningObjectiveIds'
param = '$exists'
if match :
flag = 'true'
else :
flag = 'false'
if match_key in self . _my_osid_query . _query_terms :
self . _my_osid_query . _query_terms [ match_key ] [ param ] = flag
else :
self . _my_osid_query . _query_terms [ match_key ] = { param : flag }
self . _my_osid_query . _query_terms [ match_key ] [ '$nin' ] = [ [ ] , [ '' ] ] |
def _gather_config_parms ( self , is_provider_vlan , vlan_id ) :
"""Collect auto _ create , auto _ trunk from config .""" | if is_provider_vlan :
auto_create = cfg . CONF . ml2_cisco . provider_vlan_auto_create
auto_trunk = cfg . CONF . ml2_cisco . provider_vlan_auto_trunk
else :
auto_create = True
auto_trunk = True
return auto_create , auto_trunk |
def _is_child ( self , parent , child ) : # type : ( str , str ) - > bool
"""Returns whether a key is strictly a child of another key .
AoT siblings are not considered children of one another .""" | parent_parts = tuple ( self . _split_table_name ( parent ) )
child_parts = tuple ( self . _split_table_name ( child ) )
if parent_parts == child_parts :
return False
return parent_parts == child_parts [ : len ( parent_parts ) ] |
def AuthenticatedOrRedirect ( invocation ) :
"""Middleware class factory that redirects if the user is not logged in .
Otherwise , nothing is effected .""" | class AuthenticatedOrRedirect ( GiottoInputMiddleware ) :
def http ( self , request ) :
if request . user :
return request
return Redirection ( invocation )
def cmd ( self , request ) :
if request . user :
return request
return Redirection ( invocation )
return AuthenticatedOrRedirect |
def kpl_on ( self , address , group ) :
"""Get the status of a KPL button .""" | addr = Address ( address )
device = self . plm . devices [ addr . id ]
device . states [ group ] . on ( ) |
def contraction_conical ( Di1 , Di2 , fd = None , l = None , angle = None , Re = None , roughness = 0.0 , method = 'Rennels' ) :
r'''Returns the loss coefficient for any conical pipe contraction .
This calculation has five methods available . The ' Idelchik ' [ 2 ] _ and
' Blevins ' [ 3 ] _ methods use interpolation among tables of values ; ' Miller '
uses a 2d spline representation of a graph ; and the
' Rennels ' [ 1 ] _ , ' Crane ' [ 4 ] _ , and ' Swamee ' [ 5 ] _ methods use formulas for
their calculations .
The ' Rennels ' [ 1 ] _ formulas are :
. . math : :
K _ 2 = K _ { fr , 2 } + K _ { conv , 2}
. . math : :
K _ { fr , 2 } = \ frac { f _ d } { 1 - \ beta ^ 4 } { 8 \ sin ( \ theta / 2 ) }
. . math : :
K _ { conv , 2 } = 0.0696[1 + C _ B ( \ sin ( \ alpha / 2 ) - 1 ) ] ( 1 - \ beta ^ 5 ) \ lambda ^ 2 + ( \ lambda - 1 ) ^ 2
. . math : :
\ lambda = 1 + 0.622 ( \ alpha / 180 ) ^ { 0.8 } ( 1-0.215 \ beta ^ 2-0.785 \ beta ^ 5)
. . math : :
\ beta = d _ 2 / d _ 1
The ' Swamee ' [ 5 ] _ formula is :
. . math : :
K = 0.315 \ theta ^ { 1/3}
. . figure : : fittings / contraction _ conical . png
: scale : 30 %
: alt : contraction conical ; after [ 1 ] _
Parameters
Di1 : float
Inside pipe diameter of the larger , upstream , pipe , [ m ]
Di2 : float
Inside pipe diameter of the smaller , downstream , pipe , [ m ]
fd : float , optional
Darcy friction factor ; used only in the Rennels method and will be
calculated if not given , [ - ]
l : float , optional
Length of the contraction , optional [ m ]
angle : float , optional
Angle of contraction ( 180 = sharp , 0 = infinitely long contraction ) ,
optional [ degrees ]
Re : float , optional
Reynolds number of the pipe ( used in Rennels method only if no friction
factor given ) , [ m ]
roughness : float , optional
Roughness of bend wall ( used in Rennel method if no friction factor
given ) , [ m ]
method : str , optional
The method to use for the calculation ; one of ' Rennels ' , ' Idelchik ' ,
' Crane ' , ' Swamee ' or ' Blevins ' , [ - ]
Returns
K : float
Loss coefficient in terms of the following pipe [ - ]
Notes
Cheap and has substantial impact on pressure drop .
The ' Idelchik ' method includes two tabular interpolations ; its friction
term is limited to angles between 2 and 20 degrees and area ratios 0.05 to
0.6 , while its main term is limited to length over diameter ratios 0.025 to
0.6 . This seems to give it high results for angles < 25 degrees .
The ' Blevins ' method is based on Idelchik data ; it should not be used ,
because its data jumps around and its data is limited to area ratios . 1 to
0.83 , and length over diameter ratios 0 to 0.6 . The ' Miller ' method jumps
around as well . Unlike most of Miller ' s method , there is no correction for
Reynolds number .
There is quite a bit of variance in the predictions of the methods , as
demonstrated by the following figure .
. . plot : : plots / contraction _ conical . py
Examples
> > > contraction _ conical ( Di1 = 0.1 , Di2 = 0.04 , l = 0.04 , Re = 1E6)
0.15639885880609544
References
. . [ 1 ] Rennels , Donald C . , and Hobart M . Hudson . Pipe Flow : A Practical
and Comprehensive Guide . 1st edition . Hoboken , N . J : Wiley , 2012.
. . [ 2 ] Idel ’ chik , I . E . Handbook of Hydraulic Resistance : Coefficients of
Local Resistance and of Friction ( Spravochnik Po Gidravlicheskim
Soprotivleniyam , Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya ) . National technical information Service , 1966.
. . [ 3 ] Blevins , Robert D . Applied Fluid Dynamics Handbook . New York , N . Y . :
Van Nostrand Reinhold Co . , 1984.
. . [ 4 ] Crane Co . Flow of Fluids Through Valves , Fittings , and Pipe . Crane ,
2009.
. . [ 5 ] Swamee , Prabhata K . , and Ashok K . Sharma . Design of Water Supply
Pipe Networks . John Wiley & Sons , 2008.
. . [ 6 ] Miller , Donald S . Internal Flow Systems : Design and Performance
Prediction . Gulf Publishing Company , 1990.''' | beta = Di2 / Di1
if angle is not None :
angle_rad = radians ( angle )
l = ( Di1 - Di2 ) / ( 2.0 * tan ( 0.5 * angle_rad ) )
elif l is not None :
try :
angle_rad = 2.0 * atan ( ( Di1 - Di2 ) / ( 2.0 * l ) )
angle = degrees ( angle_rad )
except ZeroDivisionError :
angle_rad = pi
angle = 180.0
else :
raise Exception ( 'Either l or angle is required' )
if method is None :
method == 'Rennels'
if method == 'Rennels' :
if fd is None :
if Re is None :
raise ValueError ( "The `Rennels` method requires either a " "specified friction factor or `Re`" )
fd = Colebrook ( Re = Re , eD = roughness / Di2 , tol = - 1 )
beta2 = beta * beta
beta4 = beta2 * beta2
beta5 = beta4 * beta
lbd = 1.0 + 0.622 * ( angle_rad / pi ) ** 0.8 * ( 1.0 - 0.215 * beta2 - 0.785 * beta5 )
sin_half_angle = sin ( 0.5 * angle_rad )
K_fr2 = fd * ( 1.0 - beta4 ) / ( 8.0 * sin_half_angle )
K_conv2 = 0.0696 * sin_half_angle * ( 1.0 - beta5 ) * lbd * lbd + ( lbd - 1.0 ) ** 2
return K_fr2 + K_conv2
elif method == 'Crane' :
return contraction_conical_Crane ( Di1 = Di1 , Di2 = Di2 , l = l , angle = angle )
elif method == 'Swamee' :
return 0.315 * angle_rad ** ( 1.0 / 3.0 )
elif method == 'Idelchik' : # Diagram 3-6 ; already digitized for beveled entrance
K0 = float ( entrance_beveled_Idelchik_obj ( angle , l / Di2 ) )
# Angles 0 to 20 , ratios 0.05 to 0.06
if angle > 20.0 :
angle_fric = 20.0
elif angle < 2.0 :
angle_fric = 2.0
else :
angle_fric = angle
A_ratio = A_ratio_fric = Di2 * Di2 / ( Di1 * Di1 )
if A_ratio_fric < 0.05 :
A_ratio_fric = 0.05
elif A_ratio_fric > 0.6 :
A_ratio_fric = 0.6
K_fr = float ( contraction_conical_frction_Idelchik_obj ( angle_fric , A_ratio_fric ) )
return K0 * ( 1.0 - A_ratio ) + K_fr
elif method == 'Blevins' :
A_ratio = Di1 * Di1 / ( Di2 * Di2 )
if A_ratio < 1.2 :
A_ratio = 1.2
elif A_ratio > 10.0 :
A_ratio = 10.0
l_ratio = l / Di2
if l_ratio > 0.6 :
l_ratio = 0.6
return float ( contraction_conical_Blevins_obj ( l_ratio , A_ratio ) )
elif method == 'Miller' :
A_ratio = Di1 * Di1 / ( Di2 * Di2 )
if A_ratio > 4.0 :
A_ratio = 4.0
elif A_ratio < 1.1 :
A_ratio = 1.1
l_ratio = l / ( Di2 * 0.5 )
if l_ratio < 0.1 :
l_ratio = 0.1
elif l_ratio > 10.0 :
l_ratio = 10.0
# Turning on ofr off the limits - little difference in plot
return contraction_conical_Miller_obj ( l_ratio , A_ratio )
else :
raise ValueError ( 'Specified method not recognized; methods are %s' % ( contraction_conical_methods ) ) |
def load_inventory_from_cache ( self ) :
'''Reads the inventory from the cache file and returns it as a JSON
object''' | cache = open ( self . cache_path_cache , 'r' )
json_inventory = cache . read ( )
self . inventory = json . loads ( json_inventory ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.