signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def check_vector_template ( self ) :
"""Determines if features are defined as vector source based on MapViz arguments .""" | if self . vector_url is not None and self . vector_layer_name is not None :
self . template = 'vector_' + self . template
self . vector_source = True
else :
self . vector_source = False |
def payments ( self ) :
"""Return a list of payments for this subscription .""" | payments = self . client . subscription_payments . on ( self ) . list ( )
return payments |
def verify_module ( self , filename , module , verify_signature ) :
"""Verify kernel module checksum and signature
: type filename : str
: param filename : downloaded kernel module path
: type module : dict
: param module : kernel module metadata
: type verify _ signature : bool
: param verify _ signature : enable / disable signature verification""" | with open ( filename , 'rb' ) as f :
module_data = f . read ( )
self . verify_checksum ( module_data , module [ 'checksum' ] , module [ 'location' ] )
if self . gpg_verify :
signature_url = "{0}/{1}" . format ( self . url , module [ 'signature' ] )
file_url = "{0}/{1}" . format ( self . url , module [ 'location' ] )
self . verify_file_signature ( signature_url , file_url , filename ) |
def get_perceval_params_from_url ( cls , urls ) :
"""Get the perceval params given the URLs for the data source""" | params = [ ]
dparam = cls . get_arthur_params_from_url ( urls )
params . append ( dparam [ "url" ] )
return params |
def get_sport ( sport ) :
"""Get live scores for all matches in a particular sport
: param sport : the sport being played
: type sport : string
: return : List containing Match objects
: rtype : list""" | sport = sport . lower ( )
data = _request_xml ( sport )
matches = [ ]
for match in data :
if sport == constants . SOCCER :
desc = match . find ( 'description' ) . text
match_info = _parse_match_info ( desc , soccer = True )
else :
desc = match . find ( 'title' ) . text
match_info = _parse_match_info ( desc )
match_info [ 'match_time' ] = match . find ( 'description' ) . text
match_info [ 'match_date' ] = match . find ( 'pubDate' ) . text
match_info [ 'match_link' ] = match . find ( 'guid' ) . text
matches . append ( Match ( sport , match_info ) )
return matches |
def _set_config ( c ) :
"""Set gl configuration for GLFW""" | glfw . glfwWindowHint ( glfw . GLFW_RED_BITS , c [ 'red_size' ] )
glfw . glfwWindowHint ( glfw . GLFW_GREEN_BITS , c [ 'green_size' ] )
glfw . glfwWindowHint ( glfw . GLFW_BLUE_BITS , c [ 'blue_size' ] )
glfw . glfwWindowHint ( glfw . GLFW_ALPHA_BITS , c [ 'alpha_size' ] )
glfw . glfwWindowHint ( glfw . GLFW_ACCUM_RED_BITS , 0 )
glfw . glfwWindowHint ( glfw . GLFW_ACCUM_GREEN_BITS , 0 )
glfw . glfwWindowHint ( glfw . GLFW_ACCUM_BLUE_BITS , 0 )
glfw . glfwWindowHint ( glfw . GLFW_ACCUM_ALPHA_BITS , 0 )
glfw . glfwWindowHint ( glfw . GLFW_DEPTH_BITS , c [ 'depth_size' ] )
glfw . glfwWindowHint ( glfw . GLFW_STENCIL_BITS , c [ 'stencil_size' ] )
# glfw . glfwWindowHint ( glfw . GLFW _ CONTEXT _ VERSION _ MAJOR , c [ ' major _ version ' ] )
# glfw . glfwWindowHint ( glfw . GLFW _ CONTEXT _ VERSION _ MINOR , c [ ' minor _ version ' ] )
# glfw . glfwWindowHint ( glfw . GLFW _ SRGB _ CAPABLE , c [ ' srgb ' ] )
glfw . glfwWindowHint ( glfw . GLFW_SAMPLES , c [ 'samples' ] )
glfw . glfwWindowHint ( glfw . GLFW_STEREO , c [ 'stereo' ] )
if not c [ 'double_buffer' ] :
raise RuntimeError ( 'GLFW must double buffer, consider using a ' 'different backend, or using double buffering' ) |
def set_default_locale ( code : str ) -> None :
"""Sets the default locale .
The default locale is assumed to be the language used for all strings
in the system . The translations loaded from disk are mappings from
the default locale to the destination locale . Consequently , you don ' t
need to create a translation file for the default locale .""" | global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset ( list ( _translations . keys ( ) ) + [ _default_locale ] ) |
def _find_usage_elbv2 ( self ) :
"""Find usage for ELBv2 / Application LB and update the appropriate limits .
: returns : number of Application LBs in use
: rtype : int""" | logger . debug ( 'Checking usage for ELBv2' )
conn2 = client ( 'elbv2' , config = Config ( retries = { 'max_attempts' : ELBV2_MAX_RETRY_ATTEMPTS } ) , ** self . _boto3_connection_kwargs )
logger . debug ( "Connected to %s in region %s (with max retry attempts " "overridden to %d)" , 'elbv2' , conn2 . _client_config . region_name , ELBV2_MAX_RETRY_ATTEMPTS )
# Target groups
tgroups = paginate_dict ( conn2 . describe_target_groups , alc_marker_path = [ 'NextMarker' ] , alc_data_path = [ 'TargetGroups' ] , alc_marker_param = 'Marker' )
self . limits [ 'Target groups' ] . _add_current_usage ( len ( tgroups [ 'TargetGroups' ] ) , aws_type = 'AWS::ElasticLoadBalancingV2::TargetGroup' )
# ALBs
lbs = paginate_dict ( conn2 . describe_load_balancers , alc_marker_path = [ 'NextMarker' ] , alc_data_path = [ 'LoadBalancers' ] , alc_marker_param = 'Marker' )
logger . debug ( 'Checking usage for each of %d ALBs' , len ( lbs [ 'LoadBalancers' ] ) )
alb_count = 0
nlb_count = 0
for lb in lbs [ 'LoadBalancers' ] :
if lb . get ( 'Type' ) == 'network' :
nlb_count += 1
else :
alb_count += 1
self . _update_usage_for_alb ( conn2 , lb [ 'LoadBalancerArn' ] , lb [ 'LoadBalancerName' ] )
self . limits [ 'Network load balancers' ] . _add_current_usage ( nlb_count , aws_type = 'AWS::ElasticLoadBalancing::NetworkLoadBalancer' )
logger . debug ( 'Done with ELBv2 usage' )
return alb_count |
def access_token ( self ) :
"""Create an OAuth access token for an authorized client .
Defaults to / access _ token . Invoked by client applications .""" | access_token = generate_token ( length = self . access_token_length [ 1 ] )
token_secret = generate_token ( self . secret_length )
client_key = request . oauth . client_key
self . save_access_token ( client_key , access_token , request . oauth . resource_owner_key , secret = token_secret )
return urlencode ( [ ( u'oauth_token' , access_token ) , ( u'oauth_token_secret' , token_secret ) ] ) |
def _validate_number_sequence ( self , seq , n ) :
"""Validate a sequence to be of a certain length and ensure it ' s a numpy array of floats .
Raises :
ValueError : Invalid length or non - numeric value""" | if seq is None :
return np . zeros ( n )
if len ( seq ) is n :
try :
l = [ float ( e ) for e in seq ]
except ValueError :
raise ValueError ( "One or more elements in sequence <" + repr ( seq ) + "> cannot be interpreted as a real number" )
else :
return np . asarray ( l )
elif len ( seq ) is 0 :
return np . zeros ( n )
else :
raise ValueError ( "Unexpected number of elements in sequence. Got: " + str ( len ( seq ) ) + ", Expected: " + str ( n ) + "." ) |
def result_consumed ( self , task_id ) :
"""Report the result as successfully consumed .""" | logger . debug ( 'Sending result consumed message.' )
data = { 'task_ids' : task_id , }
return self . _perform_post_request ( self . results_consumed_endpoint , data , self . token_header ) |
def define_cyclic_can_msg ( self , channel , can_msg = None ) :
"""Defines a list of CAN messages for automatic transmission .
: param int channel : CAN channel , to be used ( : data : ` Channel . CHANNEL _ CH0 ` or : data : ` Channel . CHANNEL _ CH1 ` ) .
: param list ( CanMsg ) can _ msg :
List of CAN messages ( up to 16 , see structure : class : ` CanMsg ` ) , or None to delete an older list .""" | if can_msg is not None :
c_can_msg = ( CanMsg * len ( can_msg ) ) ( * can_msg )
c_count = DWORD ( len ( can_msg ) )
else :
c_can_msg = CanMsg ( )
c_count = 0
UcanDefineCyclicCanMsg ( self . _handle , channel , c_can_msg , c_count ) |
def latexify_u3col ( obj , ** kwargs ) :
"""Convert an object to special LaTeX for uncertainty tables .
This conversion is meant for uncertain values in a table . The return value
should span three columns . The first column ends just before the decimal
point in the main number value , if it has one . It has no separation from
the second column . The second column goes from the decimal point until
just before the " plus - or - minus " indicator . The third column goes from the
" plus - or - minus " until the end . If the item being formatted does not fit this
schema , it can be wrapped in something like ' \ multicolumn { 3 } { c } { . . . } ' .""" | if hasattr ( obj , '__pk_latex_u3col__' ) :
return obj . __pk_latex_u3col__ ( ** kwargs )
# TODO : there are reasonable ways to format many basic types , but I ' m not
# going to implement them until I need to .
raise ValueError ( 'can\'t LaTeXify %r in 3-column uncertain format' % obj ) |
def from_surface ( surf_faces , mesh_in ) :
"""Create a mesh given a set of surface faces and the original mesh .""" | aux = nm . concatenate ( [ faces . ravel ( ) for faces in surf_faces ] )
inod = nm . unique ( aux )
n_nod = len ( inod )
n_nod_m , dim = mesh_in . coors . shape
aux = nm . arange ( n_nod , dtype = nm . int32 )
remap = nm . zeros ( ( n_nod_m , ) , nm . int32 )
remap [ inod ] = aux
mesh = Mesh ( mesh_in . name + "_surf" )
mesh . coors = mesh_in . coors [ inod ]
mesh . ngroups = mesh_in . ngroups [ inod ]
sfm = { 3 : "2_3" , 4 : "2_4" }
mesh . conns = [ ]
mesh . descs = [ ]
mesh . mat_ids = [ ]
for ii , sf in enumerate ( surf_faces ) :
n_el , n_fp = sf . shape
conn = remap [ sf ]
mat_id = nm . empty ( ( conn . shape [ 0 ] , ) , dtype = nm . int32 )
mat_id . fill ( ii )
mesh . descs . append ( sfm [ n_fp ] )
mesh . conns . append ( conn )
mesh . mat_ids . append ( mat_id )
mesh . _set_shape_info ( )
return mesh |
def _file_prompt_quiet ( f ) :
"""Decorator to toggle ' file prompt quiet ' for methods that perform file operations .""" | @ functools . wraps ( f )
def wrapper ( self , * args , ** kwargs ) :
if not self . prompt_quiet_configured :
if self . auto_file_prompt : # disable file operation prompts
self . device . send_config_set ( [ "file prompt quiet" ] )
self . prompt_quiet_changed = True
self . prompt_quiet_configured = True
else : # check if the command is already in the running - config
cmd = "file prompt quiet"
show_cmd = "show running-config | inc {}" . format ( cmd )
output = self . device . send_command_expect ( show_cmd )
if cmd in output :
self . prompt_quiet_configured = True
else :
msg = ( "on-device file operations require prompts to be disabled. " "Configure 'file prompt quiet' or set 'auto_file_prompt=True'" )
raise CommandErrorException ( msg )
# call wrapped function
return f ( self , * args , ** kwargs )
return wrapper |
def OnCellBorderColor ( self , event ) :
"""Cell border color event handler""" | with undo . group ( _ ( "Border color" ) ) :
self . grid . actions . set_border_attr ( "bordercolor" , event . color , event . borders )
self . grid . ForceRefresh ( )
self . grid . update_attribute_toolbar ( )
event . Skip ( ) |
def format ( self , action ) :
"""Returns a formatted dictionary for the given action .""" | item = { 'id' : self . get_uri ( action ) , 'url' : self . get_url ( action ) , 'verb' : action . verb , 'published' : rfc3339_date ( action . timestamp ) , 'actor' : self . format_actor ( action ) , 'title' : text_type ( action ) , }
if action . description :
item [ 'content' ] = action . description
if action . target :
item [ 'target' ] = self . format_target ( action )
if action . action_object :
item [ 'object' ] = self . format_action_object ( action )
return item |
def _update_method ( self , view ) :
"""Decide which method to use for * view * and configure it accordingly .""" | method = self . _method
if method == 'auto' :
if view . transforms . get_transform ( ) . Linear :
method = 'subdivide'
else :
method = 'impostor'
view . _method_used = method
if method == 'subdivide' :
view . view_program [ 'method' ] = 0
view . view_program [ 'a_position' ] = self . _subdiv_position
view . view_program [ 'a_texcoord' ] = self . _subdiv_texcoord
elif method == 'impostor' :
view . view_program [ 'method' ] = 1
view . view_program [ 'a_position' ] = self . _impostor_coords
view . view_program [ 'a_texcoord' ] = self . _impostor_coords
else :
raise ValueError ( "Unknown image draw method '%s'" % method )
self . shared_program [ 'image_size' ] = self . size
view . _need_method_update = False
self . _prepare_transforms ( view ) |
def cpprint ( object , stream = _UNSET_SENTINEL , indent = _UNSET_SENTINEL , width = _UNSET_SENTINEL , depth = _UNSET_SENTINEL , * , compact = False , ribbon_width = _UNSET_SENTINEL , max_seq_len = _UNSET_SENTINEL , sort_dict_keys = _UNSET_SENTINEL , style = None , end = '\n' ) :
"""Pretty print a Python value ` ` object ` ` to ` ` stream ` ` ,
which defaults to sys . stdout . The output will be colored and
syntax highlighted .
: param indent : number of spaces to add for each level of nesting .
: param stream : the output stream , defaults to sys . stdout
: param width : a soft maximum allowed number of columns in the output ,
which the layout algorithm attempts to stay under .
: param depth : maximum depth to print nested structures
: param ribbon _ width : a soft maximum allowed number of columns in the output ,
after indenting the line
: param max _ seq _ len : a maximum sequence length that applies to subclasses of
lists , sets , frozensets , tuples and dicts . A trailing
comment that indicates the number of truncated elements .
Setting max _ seq _ len to ` ` None ` ` disables truncation .
: param sort _ dict _ keys : a ` ` bool ` ` value indicating if dict keys should be
sorted in the output . Defaults to ` ` False ` ` , in
which case the default order is used , which is the
insertion order in CPython 3.6 + .
: param style : one of ` ` ' light ' ` ` , ` ` ' dark ' ` ` or a subclass
of ` ` pygments . styles . Style ` ` . If omitted ,
will use the default style . If the default style
is not changed by the user with : func : ` ~ prettyprinter . set _ default _ style ` ,
the default is ` ` ' dark ' ` ` .""" | sdocs = python_to_sdocs ( object , ** _merge_defaults ( indent = indent , width = width , depth = depth , ribbon_width = ribbon_width , max_seq_len = max_seq_len , sort_dict_keys = sort_dict_keys , ) )
stream = ( # This is not in _ default _ config in case
# sys . stdout changes .
sys . stdout if stream is _UNSET_SENTINEL else stream )
colored_render_to_stream ( stream , sdocs , style = style )
if end :
stream . write ( end ) |
def bin ( self , command ) :
"""Runs the omero command - line client with an array of arguments using the
old environment""" | if isinstance ( command , basestring ) :
command = command . split ( )
self . external . omero_bin ( command ) |
async def locate_tip_probe_center ( self , mount , tip_length = None ) -> top_types . Point :
"""Use the specified mount ( which should have a tip ) to find the
position of the tip probe target center relative to its definition
: param mount : The mount to use for the probe
: param tip _ length : If specified ( it should usually be specified ) ,
the length of the tip assumed to be attached .
The tip length specification is for the use case during protocol
calibration , when the machine cannot yet pick up a tip on its own .
For that reason , it is not universally necessary . Instead , there
are several cases :
1 . A tip has previously been picked up with : py : meth : ` pick _ up _ tip ` .
` ` tip _ length ` ` should not be specified since the tip length is
known . If ` ` tip _ length ` ` is not ` ` None ` ` , this function asserts .
2 . A tip has not previously been picked up , and ` ` tip _ length ` ` is
specified . The pipette will internally have a tip added of the
specified length .
3 . A tip has not previously been picked up , and ` ` tip _ length ` ` is
not specified . The pipette will use the tip length from its
config .
The return value is a dict containing the updated position , in deck
coordinates , of the tip probe center .""" | opt_pip = self . _attached_instruments [ mount ]
assert opt_pip , '{} has no pipette' . format ( mount . name . lower ( ) )
pip = opt_pip
if pip . has_tip and tip_length :
pip . remove_tip ( )
if not tip_length :
assert pip . has_tip , 'If pipette has no tip a tip length must be specified'
tip_length = pip . _current_tip_length
# assure _ tip lets us make sure we don ’ t pollute the pipette
# state even if there ’ s an exception in tip probe
@ contextlib . contextmanager
def _assure_tip ( ) :
if pip . has_tip :
old_tip = pip . _current_tip_length
pip . remove_tip ( )
else :
old_tip = None
pip . add_tip ( tip_length )
try :
yield
finally :
pip . remove_tip ( )
if old_tip :
pip . add_tip ( old_tip )
with _assure_tip ( ) :
return await self . _do_tp ( pip , mount ) |
def compute_K_analytical ( self , spacing ) :
"""Compute geometrical factors over the homogeneous half - space with a
constant electrode spacing""" | K = redaK . compute_K_analytical ( self . data , spacing = spacing )
self . data = redaK . apply_K ( self . data , K )
redafixK . fix_sign_with_K ( self . data ) |
def hash_tags ( text , hashes ) :
"""Hashes any non - block tags .
Only the tags themselves are hashed - - the contains surrounded
by tags are not touched . Indeed , there is no notion of " contained "
text for non - block tags .
Inline tags that are to be hashed are not white - listed , which
allows users to define their own tags . These user - defined tags
will also be preserved in their original form until the controller
( see link . py ) is applied to them .""" | def sub ( match ) :
hashed = hash_text ( match . group ( 0 ) , 'tag' )
hashes [ hashed ] = match . group ( 0 )
return hashed
return re_tag . sub ( sub , text ) |
def put ( self , artifact ) :
"""Adds the given coordinate to the set , using its version to pin it .
If this set already contains an artifact with the same coordinates other than the version , it is
replaced by the new artifact .
: param M2Coordinate artifact : the artifact coordinate .""" | artifact = M2Coordinate . create ( artifact )
if artifact . rev is None :
raise self . MissingVersion ( 'Cannot pin an artifact to version "None"! {}' . format ( artifact ) )
key = self . _key ( artifact )
previous = self . _artifacts_to_versions . get ( key )
self . _artifacts_to_versions [ key ] = artifact
if previous != artifact :
self . _id = None |
def touch ( fname ) :
"""Mimics the ` touch ` command
Busy loops until the mtime has actually been changed , use for tests only""" | orig_mtime = get_mtime ( fname )
while get_mtime ( fname ) == orig_mtime :
pathlib . Path ( fname ) . touch ( ) |
def placeOrder ( self , id , contract , order ) :
"""placeOrder ( EClientSocketBase self , OrderId id , Contract contract , Order order )""" | return _swigibpy . EClientSocketBase_placeOrder ( self , id , contract , order ) |
def get_type ( self , idx ) :
"""Return the resolved type name based on the index
This returns the string associated with the type .
: param int idx :
: return : the type name
: rtype : str""" | _type = self . get_type_ref ( idx )
if _type == - 1 :
return "AG:ITI: invalid type"
return self . get_string ( _type ) |
def set_context ( self , expr , ctx ) :
"""Set the context of an expression to Store or Del if possible .""" | t = type ( expr )
try : # TODO : check if Starred is ok
if t in ( ast . Attribute , ast . Name ) :
if type ( ctx ) == ast . Store ( ) :
mis . check_forbidden_name ( getattr ( expr , expression_name_map [ t ] ) , expr )
elif t in ( ast . Subscript , ast . Starred ) :
pass
elif t in ( ast . List , ast . Tuple ) :
for elt in expr . elts :
self . set_context ( elt , ctx )
expr . ctx = ctx
except misc . ForbiddenNameAssignment as e :
self . error_ast ( "cannot assign to %s" % ( e . name , ) , e . node ) |
def xstep ( self ) :
r"""Minimise Augmented Lagrangian with respect to
: math : ` \ mathbf { x } ` .""" | self . YU [ : ] = self . Y - self . U
b = self . DSf + self . rho * sl . rfftn ( self . YU , None , self . cri . axisN )
if self . cri . Cd == 1 :
self . Xf [ : ] = sl . solvedbi_sm ( self . Df , self . mu + self . rho , b , self . c , self . cri . axisM )
else :
self . Xf [ : ] = sl . solvemdbi_ism ( self . Df , self . mu + self . rho , b , self . cri . axisM , self . cri . axisC )
self . X = sl . irfftn ( self . Xf , self . cri . Nv , self . cri . axisN )
if self . opt [ 'LinSolveCheck' ] :
Dop = lambda x : sl . inner ( self . Df , x , axis = self . cri . axisM )
if self . cri . Cd == 1 :
DHop = lambda x : np . conj ( self . Df ) * x
else :
DHop = lambda x : sl . inner ( np . conj ( self . Df ) , x , axis = self . cri . axisC )
ax = DHop ( Dop ( self . Xf ) ) + ( self . mu + self . rho ) * self . Xf
self . xrrs = sl . rrs ( ax , b )
else :
self . xrrs = None |
def create_engine ( database , minsize = 1 , maxsize = 10 , loop = None , dialect = _dialect , paramstyle = None , ** kwargs ) :
"""A coroutine for Engine creation .
Returns Engine instance with embedded connection pool .
The pool has * minsize * opened connections to sqlite3.""" | coro = _create_engine ( database = database , minsize = minsize , maxsize = maxsize , loop = loop , dialect = dialect , paramstyle = paramstyle , ** kwargs )
return _EngineContextManager ( coro ) |
def parse_text ( document , container , element ) :
"Parse text element ." | txt = None
alternate = element . find ( _name ( '{{{mc}}}AlternateContent' ) )
if alternate is not None :
parse_alternate ( document , container , alternate )
br = element . find ( _name ( '{{{w}}}br' ) )
if br is not None :
if _name ( '{{{w}}}type' ) in br . attrib :
_type = br . attrib [ _name ( '{{{w}}}type' ) ]
brk = doc . Break ( _type )
else :
brk = doc . Break ( )
container . elements . append ( brk )
t = element . find ( _name ( '{{{w}}}t' ) )
if t is not None :
txt = doc . Text ( t . text )
txt . parent = container
container . elements . append ( txt )
rpr = element . find ( _name ( '{{{w}}}rPr' ) )
if rpr is not None : # Notice it is using txt as container
parse_previous_properties ( document , txt , rpr )
for r in element . findall ( _name ( '{{{w}}}r' ) ) :
parse_text ( document , container , r )
foot = element . find ( _name ( '{{{w}}}footnoteReference' ) )
if foot is not None :
parse_footnote ( document , container , foot )
end = element . find ( _name ( '{{{w}}}endnoteReference' ) )
if end is not None :
parse_endnote ( document , container , end )
sym = element . find ( _name ( '{{{w}}}sym' ) )
if sym is not None :
_font = sym . attrib [ _name ( '{{{w}}}font' ) ]
_char = sym . attrib [ _name ( '{{{w}}}char' ) ]
container . elements . append ( doc . Symbol ( font = _font , character = _char ) )
image = element . find ( _name ( '{{{w}}}drawing' ) )
if image is not None :
parse_drawing ( document , container , image )
refe = element . find ( _name ( '{{{w}}}commentReference' ) )
if refe is not None :
_m = doc . Comment ( refe . attrib [ _name ( '{{{w}}}id' ) ] , 'reference' )
container . elements . append ( _m )
return |
def open_hierarchy ( self , path , relative_to_object_id , object_id , create_file_type = 0 ) :
"""CreateFileType
0 - Creates no new object .
1 - Creates a notebook with the specified name at the specified location .
2 - Creates a section group with the specified name at the specified location .
3 - Creates a section with the specified name at the specified location .""" | try :
return ( self . process . OpenHierarchy ( path , relative_to_object_id , "" , create_file_type ) )
except Exception as e :
print ( e )
print ( "Could not Open Hierarchy" ) |
def _histogram_binsize_weighted ( a , w , start , width , n ) :
"""histogram _ even _ weighted ( a , start , width , n ) - > histogram
Return an histogram where the first bin counts the number of lower
outliers and the last bin the number of upper outliers . Works only with
fixed width bins .
: Stochastics :
a : array
Array of samples .
w : array
Weights of samples .
start : float
Left - most bin edge .
width : float
Width of the bins . All bins are considered to have the same width .
n : int
Number of bins .
: Return :
H : array
Array containing the number of elements in each bin . H [ 0 ] is the number
of samples smaller than start and H [ - 1 ] the number of samples
greater than start + n * width .""" | return flib . weighted_fixed_binsize ( a , w , start , width , n ) |
def prepare_request ( self , method , url , body = '' ) :
"""Prepare the request body and headers
: returns : headers of the signed request""" | headers = { 'Content-type' : 'application/json' , }
# Note : we don ' t pass body to sign ( ) since it ' s only for bodies that
# are form - urlencoded . Similarly , we don ' t care about the body that
# sign ( ) returns .
uri , signed_headers , signed_body = self . oauth_client . sign ( url , http_method = method , headers = headers )
if body :
if method == 'GET' :
body = urllib . urlencode ( body )
else :
body = json . dumps ( body )
headers . update ( signed_headers )
return { "headers" : headers , "data" : body } |
def getall ( self ) :
"""Get the vrrp configurations for all interfaces on a node
Returns :
A dictionary containing the vrrp configurations on the node ,
keyed by interface .""" | vrrps = dict ( )
# Find the available interfaces
interfaces = re . findall ( r'^interface\s(\S+)' , self . config , re . M )
# Get the vrrps defined for each interface
for interface in interfaces :
vrrp = self . get ( interface )
# Only add those interfaces that have vrrps defined
if vrrp :
vrrps . update ( { interface : vrrp } )
return vrrps |
def standard_backtrack ( self ) :
"""Estimate step size L by computing a linesearch that
guarantees that F < = Q according to the standard FISTA
backtracking strategy in : cite : ` beck - 2009 - fast ` .
This also updates variable Y .""" | gradY = self . eval_grad ( )
# Given Y ( f ) , this updates computes gradY ( f )
maxiter = self . L_maxiter
iterBTrack = 0
linesearch = 1
while linesearch and iterBTrack < maxiter :
self . proximal_step ( gradY )
# Given gradY ( f ) , L , this updates X ( f )
f = self . obfn_f ( self . var_x ( ) )
Dxy = self . eval_Dxy ( )
Q = self . obfn_f ( self . var_y ( ) ) + self . eval_linear_approx ( Dxy , gradY ) + ( self . L / 2. ) * np . linalg . norm ( Dxy . flatten ( ) , 2 ) ** 2
if f <= Q :
linesearch = 0
else :
self . L *= self . L_gamma_u
iterBTrack += 1
self . F = f
self . Q = Q
self . iterBTrack = iterBTrack
# Update auxiliary sequence
self . combination_step ( ) |
def add_info_to_uncommon_items ( filtered_items , uncommon_items ) :
"""Add extra info to the uncommon items .""" | result = uncommon_items
url_prefix = '/prestacao-contas/analisar/comprovante'
for _ , item in filtered_items . iterrows ( ) :
item_id = item [ 'idPlanilhaItens' ]
item_name = uncommon_items [ item_id ]
result [ item_id ] = { 'name' : item_name , 'salic_url' : get_salic_url ( item , url_prefix ) , 'has_recepit' : has_receipt ( item ) }
return result |
def _write_SOCKS5_address ( self , addr , file ) :
"""Return the host and port packed for the SOCKS5 protocol ,
and the resolved address as a tuple object .""" | host , port = addr
proxy_type , _ , _ , rdns , username , password = self . proxy
family_to_byte = { socket . AF_INET : b"\x01" , socket . AF_INET6 : b"\x04" }
# If the given destination address is an IP address , we ' ll
# use the IP address request even if remote resolving was specified .
# Detect whether the address is IPv4/6 directly .
for family in ( socket . AF_INET , socket . AF_INET6 ) :
try :
addr_bytes = socket . inet_pton ( family , host )
file . write ( family_to_byte [ family ] + addr_bytes )
host = socket . inet_ntop ( family , addr_bytes )
file . write ( struct . pack ( ">H" , port ) )
return host , port
except socket . error :
continue
# Well it ' s not an IP number , so it ' s probably a DNS name .
if rdns : # Resolve remotely
host_bytes = host . encode ( 'idna' )
file . write ( b"\x03" + chr ( len ( host_bytes ) ) . encode ( ) + host_bytes )
else : # Resolve locally
addresses = socket . getaddrinfo ( host , port , socket . AF_UNSPEC , socket . SOCK_STREAM , socket . IPPROTO_TCP , socket . AI_ADDRCONFIG )
# We can ' t really work out what IP is reachable , so just pick the
# first .
target_addr = addresses [ 0 ]
family = target_addr [ 0 ]
host = target_addr [ 4 ] [ 0 ]
addr_bytes = socket . inet_pton ( family , host )
file . write ( family_to_byte [ family ] + addr_bytes )
host = socket . inet_ntop ( family , addr_bytes )
file . write ( struct . pack ( ">H" , port ) )
return host , port |
def tx_schema ( self , ** kwargs ) :
"""Builds the data structure edn , and puts it in the db""" | for s in self . schema . schema :
tx = self . tx ( s , ** kwargs ) |
def fetch ( self , refspec = None , progress = None , ** kwargs ) :
"""Fetch the latest changes for this remote
: param refspec :
A " refspec " is used by fetch and push to describe the mapping
between remote ref and local ref . They are combined with a colon in
the format < src > : < dst > , preceded by an optional plus sign , + .
For example : git fetch $ URL refs / heads / master : refs / heads / origin means
" grab the master branch head from the $ URL and store it as my origin
branch head " . And git push $ URL refs / heads / master : refs / heads / to - upstream
means " publish my master branch head as to - upstream branch at $ URL " .
See also git - push ( 1 ) .
Taken from the git manual
Fetch supports multiple refspecs ( as the
underlying git - fetch does ) - supplying a list rather than a string
for ' refspec ' will make use of this facility .
: param progress : See ' push ' method
: param kwargs : Additional arguments to be passed to git - fetch
: return :
IterableList ( FetchInfo , . . . ) list of FetchInfo instances providing detailed
information about the fetch results
: note :
As fetch does not provide progress information to non - ttys , we cannot make
it available here unfortunately as in the ' push ' method .""" | if refspec is None : # No argument refspec , then ensure the repo ' s config has a fetch refspec .
self . _assert_refspec ( )
kwargs = add_progress ( kwargs , self . repo . git , progress )
if isinstance ( refspec , list ) :
args = refspec
else :
args = [ refspec ]
proc = self . repo . git . fetch ( self , * args , as_process = True , with_stdout = False , universal_newlines = True , v = True , ** kwargs )
res = self . _get_fetch_info_from_stderr ( proc , progress )
if hasattr ( self . repo . odb , 'update_cache' ) :
self . repo . odb . update_cache ( )
return res |
def cluster_setup ( nodes , pcsclustername = 'pcscluster' , extra_args = None ) :
'''Setup pacemaker cluster via pcs command
nodes
a list of nodes which should be set up
pcsclustername
Name of the Pacemaker cluster ( default : pcscluster )
extra _ args
list of extra option for the \' pcs cluster setup \' command
CLI Example :
. . code - block : : bash
salt ' * ' pcs . cluster _ setup nodes = ' [ node1 . example . org node2 . example . org ] ' pcsclustername = pcscluster''' | cmd = [ 'pcs' , 'cluster' , 'setup' ]
cmd += [ '--name' , pcsclustername ]
cmd += nodes
if isinstance ( extra_args , ( list , tuple ) ) :
cmd += extra_args
return __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False ) |
def astra_cpu_forward_projector ( vol_data , geometry , proj_space , out = None ) :
"""Run an ASTRA forward projection on the given data using the CPU .
Parameters
vol _ data : ` DiscreteLpElement `
Volume data to which the forward projector is applied
geometry : ` Geometry `
Geometry defining the tomographic setup
proj _ space : ` DiscreteLp `
Space to which the calling operator maps
out : ` ` proj _ space ` ` element , optional
Element of the projection space to which the result is written . If
` ` None ` ` , an element in ` ` proj _ space ` ` is created .
Returns
out : ` ` proj _ space ` ` element
Projection data resulting from the application of the projector .
If ` ` out ` ` was provided , the returned object is a reference to it .""" | if not isinstance ( vol_data , DiscreteLpElement ) :
raise TypeError ( 'volume data {!r} is not a `DiscreteLpElement` ' 'instance.' . format ( vol_data ) )
if vol_data . space . impl != 'numpy' :
raise TypeError ( "`vol_data.space.impl` must be 'numpy', got {!r}" "" . format ( vol_data . space . impl ) )
if not isinstance ( geometry , Geometry ) :
raise TypeError ( 'geometry {!r} is not a Geometry instance' '' . format ( geometry ) )
if not isinstance ( proj_space , DiscreteLp ) :
raise TypeError ( '`proj_space` {!r} is not a DiscreteLp ' 'instance.' . format ( proj_space ) )
if proj_space . impl != 'numpy' :
raise TypeError ( "`proj_space.impl` must be 'numpy', got {!r}" "" . format ( proj_space . impl ) )
if vol_data . ndim != geometry . ndim :
raise ValueError ( 'dimensions {} of volume data and {} of geometry ' 'do not match' '' . format ( vol_data . ndim , geometry . ndim ) )
if out is None :
out = proj_space . element ( )
else :
if out not in proj_space :
raise TypeError ( '`out` {} is neither None nor a ' 'DiscreteLpElement instance' . format ( out ) )
ndim = vol_data . ndim
# Create astra geometries
vol_geom = astra_volume_geometry ( vol_data . space )
proj_geom = astra_projection_geometry ( geometry )
# Create projector
if not all ( s == vol_data . space . interp_byaxis [ 0 ] for s in vol_data . space . interp_byaxis ) :
raise ValueError ( 'volume interpolation must be the same in each ' 'dimension, got {}' . format ( vol_data . space . interp ) )
vol_interp = vol_data . space . interp
proj_id = astra_projector ( vol_interp , vol_geom , proj_geom , ndim , impl = 'cpu' )
# Create ASTRA data structures
vol_data_arr = np . asarray ( vol_data )
vol_id = astra_data ( vol_geom , datatype = 'volume' , data = vol_data_arr , allow_copy = True )
with writable_array ( out , dtype = 'float32' , order = 'C' ) as out_arr :
sino_id = astra_data ( proj_geom , datatype = 'projection' , data = out_arr , ndim = proj_space . ndim )
# Create algorithm
algo_id = astra_algorithm ( 'forward' , ndim , vol_id , sino_id , proj_id , impl = 'cpu' )
# Run algorithm
astra . algorithm . run ( algo_id )
# Delete ASTRA objects
astra . algorithm . delete ( algo_id )
astra . data2d . delete ( ( vol_id , sino_id ) )
astra . projector . delete ( proj_id )
return out |
def get_charge_calibration ( calibation_file , max_tdc ) :
'''Open the hit or calibration file and return the calibration per pixel''' | with tb . open_file ( calibation_file , mode = "r" ) as in_file_calibration_h5 :
tdc_calibration = in_file_calibration_h5 . root . HitOrCalibration [ : , : , : , 1 ]
tdc_calibration_values = in_file_calibration_h5 . root . HitOrCalibration . attrs . scan_parameter_values [ : ]
return get_charge ( max_tdc , tdc_calibration_values , tdc_calibration ) |
def get ( self ) -> Union [ Event , None ] :
"""Get the latest event from the queue .
Call this method to query the queue for the latest event .
If no event has been published None is returned .
Returns :
Event or None""" | message = self . _queue . get_message ( )
if message and message [ 'type' ] == 'message' :
event_id = DB . get_event ( self . _pub_key , self . _processed_key )
event_data_str = DB . get_hash_value ( self . _data_key , event_id )
event_dict = ast . literal_eval ( event_data_str )
event_dict [ 'id' ] = event_id
event_dict [ 'subscriber' ] = self . _subscriber
return Event . from_config ( event_dict )
return None |
def creator_type ( self , creator_type ) :
"""Sets the creator _ type of this Event .
: param creator _ type : The creator _ type of this Event . # noqa : E501
: type : list [ str ]""" | allowed_values = [ "USER" , "ALERT" , "SYSTEM" ]
# noqa : E501
if not set ( creator_type ) . issubset ( set ( allowed_values ) ) :
raise ValueError ( "Invalid values for `creator_type` [{0}], must be a subset of [{1}]" # noqa : E501
. format ( ", " . join ( map ( str , set ( creator_type ) - set ( allowed_values ) ) ) , # noqa : E501
", " . join ( map ( str , allowed_values ) ) ) )
self . _creator_type = creator_type |
def flip_one ( self , tour ) :
"""Test flipping every single contig sequentially to see if score
improves .""" | n_accepts = n_rejects = 0
any_tag_ACCEPT = False
for i , t in enumerate ( tour ) :
if i == 0 :
score , = self . evaluate_tour_Q ( tour )
self . signs [ t ] = - self . signs [ t ]
score_flipped , = self . evaluate_tour_Q ( tour )
if score_flipped > score :
n_accepts += 1
tag = ACCEPT
else :
self . signs [ t ] = - self . signs [ t ]
n_rejects += 1
tag = REJECT
self . flip_log ( "FLIPONE ({}/{})" . format ( i + 1 , len ( self . signs ) ) , score , score_flipped , tag )
if tag == ACCEPT :
any_tag_ACCEPT = True
score = score_flipped
logging . debug ( "FLIPONE: N_accepts={} N_rejects={}" . format ( n_accepts , n_rejects ) )
return ACCEPT if any_tag_ACCEPT else REJECT |
def custom_triplet_bytes ( custom_triplet ) :
"""Convert triplet of [ label _ store , symbol , description ] into bytes
for defining custom labels in the annotation file""" | # Structure : 0 , NOTE , len ( aux _ note ) , aux _ note , codenumber , space , codesymbol , space , description , ( 0 null if necessary )
# Remember , aux _ note string includes ' number ( s ) < space > < symbol > < space > < description > ' '
annbytes = [ 0 , 88 , len ( custom_triplet [ 2 ] ) + 3 + len ( str ( custom_triplet [ 0 ] ) ) , 252 ] + [ ord ( c ) for c in str ( custom_triplet [ 0 ] ) ] + [ 32 ] + [ ord ( custom_triplet [ 1 ] ) ] + [ 32 ] + [ ord ( c ) for c in custom_triplet [ 2 ] ]
if len ( annbytes ) % 2 :
annbytes . append ( 0 )
return annbytes |
def activate ( admin = True , browser = True , name = 'admin' , reflect_all = False ) :
"""Activate each pre - registered model or generate the model classes and
( possibly ) register them for the admin .
: param bool admin : should we generate the admin interface ?
: param bool browser : should we open the browser for the user ?
: param name : name to use for blueprint created by the admin interface . Set
this to avoid naming conflicts with other blueprints ( if
trying to use sandman to connect to multiple databases
simultaneously )""" | with app . app_context ( ) :
generate_pks = app . config . get ( 'SANDMAN_GENERATE_PKS' , None ) or False
if getattr ( app , 'class_references' , None ) is None or reflect_all :
app . class_references = collections . OrderedDict ( )
generate_endpoint_classes ( db , generate_pks )
else :
Model . prepare ( db . engine )
prepare_relationships ( db , current_app . class_references )
if admin :
try :
show_pks = current_app . config [ 'SANDMAN_SHOW_PKS' ]
except KeyError :
show_pks = False
register_classes_for_admin ( db . session , show_pks , name )
if browser :
port = app . config . get ( 'SERVER_PORT' , None ) or 5000
webbrowser . open ( 'http://localhost:{}/admin' . format ( port ) ) |
def calculate_minimum_swaps ( binary_string1 , binary_string2 ) :
"""This Python function calculates the minimum number of swaps required to make two binary strings identical .
> > > calculate _ minimum _ swaps ( ' 0011 ' , ' 1111 ' )
> > > calculate _ minimum _ swaps ( ' 00011 ' , ' 01001 ' )
> > > calculate _ minimum _ swaps ( ' 111 ' , ' 111 ' )""" | count_0 , count_1 = 0 , 0
for i in range ( len ( binary_string1 ) ) :
if binary_string1 [ i ] == '0' and binary_string2 [ i ] == '1' :
count_0 += 1
elif binary_string1 [ i ] == '1' and binary_string2 [ i ] == '0' :
count_1 += 1
intermediate_result = count_0 // 2 + count_1 // 2
if count_0 % 2 == 0 and count_1 % 2 == 0 :
return intermediate_result
elif ( count_0 + count_1 ) % 2 == 0 :
return intermediate_result + 2
else :
return - 1 |
def bind ( self , model , template = "{}" ) :
"""Bind the ` ` model ` ` to the reference . This uses the model ' s
` ` id ` ` attribute and the given ` ` template ` ` to
dynamically produce a uri when accessed .""" | self . _bound_model = model
self . _uri_template = template
self . _set_uri_from_bound_model ( ) |
def save_as ( self , target_dir = None ) :
"""Save the kecpkg service script to an ( optional ) target dir .
Retains the filename of the service as known in KE - chain .
. . versionadded : : 1.13
: param target _ dir : ( optional ) target dir . If not provided will save to current working directory .
: type target _ dir : basestring or None
: raises APIError : if unable to download the service .
: raises OSError : if unable to save the service kecpkg file to disk .""" | full_path = os . path . join ( target_dir or os . getcwd ( ) , self . filename )
url = self . _client . _build_url ( 'service_download' , service_id = self . id )
response = self . _client . _request ( 'GET' , url )
if response . status_code != requests . codes . ok : # pragma : no cover
raise APIError ( "Could not download service script file ({})" . format ( response ) )
with open ( full_path , 'w+b' ) as f :
for chunk in response :
f . write ( chunk ) |
def shlex_quotes ( value ) :
'''see http : / / stackoverflow . com / questions / 6868382 / python - shlex - split - ignore - single - quotes''' | lex = shlex . shlex ( value )
lex . quotes = '"'
lex . whitespace_split = True
lex . commenters = ''
return list ( lex ) |
def remove_udp_port ( self , port ) :
"""Removes an associated UDP port number from this project .
: param port : UDP port number""" | if port in self . _used_udp_ports :
self . _used_udp_ports . remove ( port ) |
def zscore ( bars , window = 20 , stds = 1 , col = 'close' ) :
"""get zscore of price""" | std = numpy_rolling_std ( bars [ col ] , window )
mean = numpy_rolling_mean ( bars [ col ] , window )
return ( bars [ col ] - mean ) / ( std * stds ) |
def r_sa_check ( template , tag_type , is_standalone ) :
"""Do a final checkto see if a tag could be a standalone""" | # Check right side if we might be a standalone
if is_standalone and tag_type not in [ 'variable' , 'no escape' ] :
on_newline = template . split ( '\n' , 1 )
# If the stuff to the right of us are spaces we ' re a standalone
if on_newline [ 0 ] . isspace ( ) or not on_newline [ 0 ] :
return True
else :
return False
# If we ' re a tag can ' t be a standalone
else :
return False |
def check_suspension ( user_twitter_id_list ) :
"""Looks up a list of user ids and checks whether they are currently suspended .
Input : - user _ twitter _ id _ list : A python list of Twitter user ids in integer format to be looked - up .
Outputs : - suspended _ user _ twitter _ id _ list : A python list of suspended Twitter user ids in integer format .
- non _ suspended _ user _ twitter _ id _ list : A python list of non suspended Twitter user ids in integer format .
- unknown _ status _ user _ twitter _ id _ list : A python list of unknown status Twitter user ids in integer format .""" | # Log into my application .
twitter = login ( )
# Lookup users
# Initialize look - up lists
suspended_user_twitter_id_list = list ( )
non_suspended_user_twitter_id_list = list ( )
unknown_status_user_twitter_id_list = list ( )
append_suspended_twitter_user = suspended_user_twitter_id_list . append
append_non_suspended_twitter_user = non_suspended_user_twitter_id_list . append
extend_unknown_status_twitter_user = unknown_status_user_twitter_id_list . extend
# Split twitter user id list into sub - lists of length 100 ( This is the Twitter API function limit ) .
user_lookup_counter = 0
user_lookup_time_window_start = time . perf_counter ( )
for hundred_length_sub_list in chunks ( list ( user_twitter_id_list ) , 100 ) : # Make safe twitter request .
try :
api_result , user_lookup_counter , user_lookup_time_window_start = safe_twitter_request_handler ( twitter_api_func = twitter . lookup_user , call_rate_limit = 60 , call_counter = user_lookup_counter , time_window_start = user_lookup_time_window_start , max_retries = 10 , wait_period = 2 , parameters = hundred_length_sub_list )
# If the call is succesful , turn hundred sub - list to a set for faster search .
hundred_length_sub_list = set ( hundred_length_sub_list )
# Check who is suspended and who is not .
for hydrated_user_object in api_result :
hydrated_twitter_user_id = hydrated_user_object [ "id" ]
if hydrated_twitter_user_id in hundred_length_sub_list :
append_non_suspended_twitter_user ( hydrated_twitter_user_id )
else :
append_suspended_twitter_user ( hydrated_twitter_user_id )
except twython . TwythonError : # If the call is unsuccesful , we do not know about the status of the users .
extend_unknown_status_twitter_user ( hundred_length_sub_list )
except URLError : # If the call is unsuccesful , we do not know about the status of the users .
extend_unknown_status_twitter_user ( hundred_length_sub_list )
except BadStatusLine : # If the call is unsuccesful , we do not know about the status of the users .
extend_unknown_status_twitter_user ( hundred_length_sub_list )
return suspended_user_twitter_id_list , non_suspended_user_twitter_id_list , unknown_status_user_twitter_id_list |
def loads ( ion_str , catalog = None , single_value = True , encoding = 'utf-8' , cls = None , object_hook = None , parse_float = None , parse_int = None , parse_constant = None , object_pairs_hook = None , use_decimal = None , ** kw ) :
"""Deserialize ` ` ion _ str ` ` , which is a string representation of an Ion object , to a Python object using the
conversion table used by load ( above ) .
Args :
fp ( str ) : A string representation of Ion data .
catalog ( Optional [ SymbolTableCatalog ] ) : The catalog to use for resolving symbol table imports .
single _ value ( Optional [ True | False ] ) : When True , the data in ` ` ion _ str ` ` is interpreted as a single Ion value ,
and will be returned without an enclosing container . If True and there are multiple top - level values in
the Ion stream , IonException will be raised . NOTE : this means that when data is dumped using
` ` sequence _ as _ stream = True ` ` , it must be loaded using ` ` single _ value = False ` ` . Default : True .
encoding : NOT IMPLEMENTED
cls : NOT IMPLEMENTED
object _ hook : NOT IMPLEMENTED
parse _ float : NOT IMPLEMENTED
parse _ int : NOT IMPLEMENTED
parse _ constant : NOT IMPLEMENTED
object _ pairs _ hook : NOT IMPLEMENTED
use _ decimal : NOT IMPLEMENTED
* * kw : NOT IMPLEMENTED
Returns ( Any ) :
if single _ value is True :
A Python object representing a single Ion value .
else :
A sequence of Python objects representing a stream of Ion values .""" | if isinstance ( ion_str , six . binary_type ) :
ion_buffer = BytesIO ( ion_str )
elif isinstance ( ion_str , six . text_type ) :
ion_buffer = six . StringIO ( ion_str )
else :
raise TypeError ( 'Unsupported text: %r' % ion_str )
return load ( ion_buffer , catalog = catalog , single_value = single_value , encoding = encoding , cls = cls , object_hook = object_hook , parse_float = parse_float , parse_int = parse_int , parse_constant = parse_constant , object_pairs_hook = object_pairs_hook , use_decimal = use_decimal ) |
def _start_primary ( self ) :
"""Start as the primary""" | self . em . start ( )
self . em . set_secondary_state ( _STATE_RUNNING )
self . _set_shared_instances ( ) |
def getPositionAndSize ( self ) :
'''Gets the position and size ( X , Y , W , H )
@ return : A tuple containing the View ' s coordinates ( X , Y , W , H )''' | ( x , y ) = self . getXY ( ) ;
w = self . getWidth ( )
h = self . getHeight ( )
return ( x , y , w , h ) |
def jwt_optional ( fn ) :
"""A decorator to optionally protect a Flask endpoint
If an access token in present in the request , this will call the endpoint
with : func : ` ~ flask _ jwt _ extended . get _ jwt _ identity ` having the identity
of the access token . If no access token is present in the request ,
this endpoint will still be called , but
: func : ` ~ flask _ jwt _ extended . get _ jwt _ identity ` will return ` None ` instead .
If there is an invalid access token in the request ( expired , tampered with ,
etc ) , this will still call the appropriate error handler instead of allowing
the endpoint to be called as if there is no access token in the request .""" | @ wraps ( fn )
def wrapper ( * args , ** kwargs ) :
verify_jwt_in_request_optional ( )
return fn ( * args , ** kwargs )
return wrapper |
def calcFashionEvoFunc ( pNow ) :
'''Calculates a new approximate dynamic rule for the evolution of the proportion
of punks as a linear function and a " shock width " .
Parameters
pNow : [ float ]
List describing the history of the proportion of punks in the population .
Returns
( unnamed ) : FashionEvoFunc
A new rule for the evolution of the population punk proportion , based on
the history in input pNow .''' | pNowX = np . array ( pNow )
T = pNowX . size
p_t = pNowX [ 100 : ( T - 1 ) ]
p_tp1 = pNowX [ 101 : T ]
pNextSlope , pNextIntercept , trash1 , trash2 , trash3 = stats . linregress ( p_t , p_tp1 )
pPopExp = pNextIntercept + pNextSlope * p_t
pPopErrSq = ( pPopExp - p_tp1 ) ** 2
pNextStd = np . sqrt ( np . mean ( pPopErrSq ) )
print ( str ( pNextIntercept ) + ', ' + str ( pNextSlope ) + ', ' + str ( pNextStd ) )
return FashionEvoFunc ( pNextIntercept , pNextSlope , 2 * pNextStd ) |
def shanum ( filename , dmax = None , noprint = True ) :
"""Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels .
Inputs :
filename : either a name of an ascii file , or an instance
of Curve
dmax : the cut - off of the P ( r ) function , if known . If None ,
this will be determined by the shanum program
noprint : if the printout of the program is to be suppressed .
Outputs : dmax , nsh , nopt , qmaxopt
dmax : the cut - off of the P ( r ) function .
nsh : the estimated number of Shannon channels
nopt : the optimum number of Shannon channels
qmaxopt : the optimum value of the high - q cutoff""" | if isinstance ( filename , Curve ) :
curve = filename
with tempfile . NamedTemporaryFile ( 'w+b' , delete = False ) as f :
curve . save ( f )
filename = f . name
cmdline = [ 'shanum' , filename ]
if dmax is not None :
cmdline . append ( str ( float ( dmax ) ) )
result = execute_command ( cmdline , noprint = noprint )
for l in result :
l = l . strip ( )
if l . startswith ( 'Dmax=' ) :
dmax = float ( l . split ( '=' ) [ 1 ] )
elif l . startswith ( 'Smax=' ) :
qmax = float ( l . split ( '=' ) [ 1 ] )
elif l . startswith ( 'Nsh=' ) :
nsh = float ( l . split ( '=' ) [ 1 ] )
elif l . startswith ( 'Nopt=' ) :
nopt = float ( l . split ( '=' ) [ 1 ] )
elif l . startswith ( 'Sopt=' ) :
qmaxopt = float ( l . split ( '=' ) [ 1 ] )
return dmax , nsh , nopt , qmaxopt |
def wulff_gform_and_r ( self , wulffshape , bulk_entry , r , from_sphere_area = False , r_units = "nanometers" , e_units = "keV" , normalize = False , scale_per_atom = False ) :
"""Calculates the formation energy of the particle with arbitrary radius r .
Args :
wulffshape ( WulffShape ) : Initial , unscaled WulffShape
bulk _ entry ( ComputedStructureEntry ) : Entry of the corresponding bulk .
r ( float ( Ang ) ) : Arbitrary effective radius of the WulffShape
from _ sphere _ area ( bool ) : There are two ways to calculate the bulk
formation energy . Either by treating the volume and thus surface
area of the particle as a perfect sphere , or as a Wulff shape .
r _ units ( str ) : Can be nanometers or Angstrom
e _ units ( str ) : Can be keV or eV
normalize ( bool ) : Whether or not to normalize energy by volume
scale _ per _ atom ( True ) : Whether or not to normalize by number of
atoms in the particle
Returns :
particle formation energy ( float in keV ) , effective radius""" | # Set up
miller_se_dict = wulffshape . miller_energy_dict
new_wulff = self . scaled_wulff ( wulffshape , r )
new_wulff_area = new_wulff . miller_area_dict
# calculate surface energy of the particle
if not from_sphere_area : # By approximating the particle as a Wulff shape
w_vol = new_wulff . volume
tot_wulff_se = 0
for hkl in new_wulff_area . keys ( ) :
tot_wulff_se += miller_se_dict [ hkl ] * new_wulff_area [ hkl ]
Ebulk = self . bulk_gform ( bulk_entry ) * w_vol
new_r = new_wulff . effective_radius
else : # By approximating the particle as a perfect sphere
w_vol = ( 4 / 3 ) * np . pi * r ** 3
sphere_sa = 4 * np . pi * r ** 2
tot_wulff_se = wulffshape . weighted_surface_energy * sphere_sa
Ebulk = self . bulk_gform ( bulk_entry ) * w_vol
new_r = r
new_r = new_r / 10 if r_units == "nanometers" else new_r
e = ( Ebulk + tot_wulff_se )
e = e / 1000 if e_units == "keV" else e
e = e / ( ( 4 / 3 ) * np . pi * new_r ** 3 ) if normalize else e
bulk_struct = bulk_entry . structure
density = len ( bulk_struct ) / bulk_struct . lattice . volume
e = e / ( density * w_vol ) if scale_per_atom else e
return e , new_r |
def do_session_info ( self , params ) :
"""\x1b [1mNAME \x1b [0m
session _ info - Shows information about the current session
\x1b [1mSYNOPSIS \x1b [0m
session _ info [ match ]
\x1b [1mOPTIONS \x1b [0m
* match : only include lines that match ( default : ' ' )
\x1b [1mEXAMPLES \x1b [0m
> session _ info
state = CONNECTED
xid = 4
last _ zxid = 0x00000505f8be5b3
timeout = 10000
client = ( ' 127.0.0.1 ' , 60348)
server = ( ' 127.0.0.1 ' , 2181)""" | fmt_str = """state=%s
sessionid=%s
auth_info=%s
protocol_version=%d
xid=%d
last_zxid=0x%.16x
timeout=%d
client=%s
server=%s
data_watches=%s
child_watches=%s"""
content = fmt_str % ( self . _zk . client_state , self . _zk . sessionid , list ( self . _zk . auth_data ) , self . _zk . protocol_version , self . _zk . xid , self . _zk . last_zxid , self . _zk . session_timeout , self . _zk . client , self . _zk . server , "," . join ( self . _zk . data_watches ) , "," . join ( self . _zk . child_watches ) )
output = get_matching ( content , params . match )
self . show_output ( output ) |
def translate ( self , dct ) :
"""Translate leaf names using a dictionary of names
: param dct : Dictionary of current names - > updated names
: return : Copy of tree with names changed""" | new_tree = self . copy ( )
for leaf in new_tree . _tree . leaf_node_iter ( ) :
curr_name = leaf . taxon . label
leaf . taxon . label = dct . get ( curr_name , curr_name )
return new_tree |
def halfmax_points ( self ) :
"""Get the bandpass ' half - maximum wavelengths . These can be used to
compute a representative bandwidth , or for display purposes .
Unlike calc _ halfmax _ points ( ) , this function will use a cached value if
available .""" | t = self . registry . _halfmaxes . get ( ( self . telescope , self . band ) )
if t is not None :
return t
t = self . calc_halfmax_points ( )
self . registry . register_halfmaxes ( self . telescope , self . band , t [ 0 ] , t [ 1 ] )
return t |
def disconnect ( self ) :
"""Disconnects from the MQTT server""" | # Stop the timer
self . __stop_timer ( )
# Unlock all publishers
for event in self . __in_flight . values ( ) :
event . set ( )
# Disconnect from the server
self . __mqtt . disconnect ( )
# Stop the MQTT loop thread
# Use a thread to avoid a dead lock in Paho
thread = threading . Thread ( target = self . __mqtt . loop_stop )
thread . daemon = True
thread . start ( )
# Give it some time
thread . join ( 4 ) |
def get_clients_per_page ( self , per_page = 1000 , page = 1 , params = None ) :
"""Get clients per page
: param per _ page : How many objects per page . Default : 1000
: param page : Which page . Default : 1
: param params : Search parameters . Default : { }
: return : list""" | return self . _get_resource_per_page ( resource = CLIENTS , per_page = per_page , page = page , params = params ) |
def to_reasonable_unit ( value , units , round_digits = 2 ) :
"""Convert a value to the most reasonable unit .
The most reasonable unit is roughly the one with the smallest exponent
absolute value when written in scientific notation . For example
` 1.5 ` is more reasonable that ` . 0015 ` and ` 22 ` is more reasonable
than ` 22000 ` . There is a bias towards numbers > 1 , so ` 3.2 ` is
considered more reasonable that ` . 32 ` .""" | def to_unit ( unit ) :
return float ( value ) / unit [ 1 ]
exponents = [ abs ( Decimal ( to_unit ( u ) ) . adjusted ( ) - 1 ) for u in units ]
best = min ( enumerate ( exponents ) , key = itemgetter ( 1 ) ) [ 0 ]
return dict ( val = round ( to_unit ( units [ best ] ) , round_digits ) , label = units [ best ] [ 0 ] , multiplier = units [ best ] [ 1 ] ) |
def previousElementSibling ( self ) :
'''previousElementSibling - Returns the previous sibling that is an element .
This is the previous tag node in the parent ' s list of children
@ return < None / AdvancedTag > - None if there are no children ( tag ) in the parent before this node ,
Otherwise the previous element ( tag )''' | parentNode = self . parentNode
# If no parent , no siblings
if not parentNode :
return None
# Determine this node ' s index in the children of parent
myElementIdx = parentNode . children . index ( self )
# If we are the first child , no previous element
if myElementIdx == 0 :
return None
# Else , return previous element tag
return parentNode . children [ myElementIdx - 1 ] |
def blame_diff_hunk ( self , dependent , parent , path , hunk ) :
"""Run git blame on the parts of the hunk which exist in the
older commit in the diff . The commits generated by git blame
are the commits which the newer commit in the diff depends on ,
because without the lines from those commits , the hunk would
not apply correctly .""" | line_range_before = "-%d,%d" % ( hunk . old_start , hunk . old_lines )
line_range_after = "+%d,%d" % ( hunk . new_start , hunk . new_lines )
self . logger . info ( " Blaming hunk %s @ %s (listed below)" % ( line_range_before , parent . hex [ : 8 ] ) )
if not self . tree_lookup ( path , parent ) : # This is probably because dependent added a new directory
# which was not previously in the parent .
return
blame = self . run_blame ( hunk , parent , path )
dependent_sha1 = dependent . hex
self . register_new_dependent ( dependent , dependent_sha1 )
line_to_culprit = { }
for line in blame . split ( '\n' ) :
self . process_hunk_line ( dependent , dependent_sha1 , parent , path , line , line_to_culprit )
self . debug_hunk ( line_range_before , line_range_after , hunk , line_to_culprit ) |
def render_template ( self , name , ** kwargs ) :
"""Search for a setting named ` ` template _ < name > ` ` and renders it .
If one is not defined it uses the default template of the library
at ` ` autchode / templates / < name > , html ` ` .
To render the template uses the ` ` render ` ` function , a property that
has been probably overwritten in a ` ` auth . setup _ for _ something ` `
function ( eg . ` ` setup _ for _ flask ` ` ) .""" | custom_template = getattr ( self , 'template_' + name )
if custom_template :
return self . render ( custom_template , ** kwargs )
template = TEMPLATES . get ( name )
return self . default_render ( template , ** kwargs ) |
def _TTA ( learn : Learner , beta : float = 0.4 , scale : float = 1.35 , ds_type : DatasetType = DatasetType . Valid , with_loss : bool = False ) -> Tensors :
"Applies TTA to predict on ` ds _ type ` dataset ." | preds , y = learn . get_preds ( ds_type )
all_preds = list ( learn . tta_only ( scale = scale , ds_type = ds_type ) )
avg_preds = torch . stack ( all_preds ) . mean ( 0 )
if beta is None :
return preds , avg_preds , y
else :
final_preds = preds * beta + avg_preds * ( 1 - beta )
if with_loss :
with NoneReduceOnCPU ( learn . loss_func ) as lf :
loss = lf ( final_preds , y )
return final_preds , y , loss
return final_preds , y |
def dump_simple_db ( path ) :
"""Dumps a SimpleDb as string in the following format :
< key > : < json - encoded string >""" | output = [ ]
simpledb = SimpleDb ( path , mode = "r" , sync = False )
with simpledb as db :
for key in db :
output . append ( "{0}: {1}" . format ( key , db . dumpvalue ( key ) ) )
return "\n" . join ( output ) |
def translate_index_to_position ( self , index ) :
"""Given an index for the text , return the corresponding ( row , col ) tuple .
(0 - based . Returns ( 0 , 0 ) for index = 0 . )""" | # Find start of this line .
row , row_index = self . _find_line_start_index ( index )
col = index - row_index
return row , col |
def select ( sockets , remain = conf . recv_poll_rate ) :
"""This function is called during sendrecv ( ) routine to select
the available sockets .
params :
- sockets : an array of sockets that need to be selected
returns :
- an array of sockets that were selected
- the function to be called next to get the packets ( i . g . recv )""" | try :
inp , _ , _ = select ( sockets , [ ] , [ ] , remain )
except ( IOError , select_error ) as exc : # select . error has no . errno attribute
if exc . args [ 0 ] != errno . EINTR :
raise
return inp , None |
def async_steps ( self , n ) :
"""Progress simulation by running all agents * n * times asynchronously .""" | assert len ( self . _agents_to_act ) == 0
for _ in range ( n ) :
self . async_step ( ) |
def wcs_update ( self , wcs_text , fb = None ) :
"""parses the wcs _ text and populates the fields
of a coord _ tran instance .
we start from the coord _ tran of the input
frame buffer , if any""" | if ( fb ) :
ct = fb . ct
else :
ct = coord_tran ( )
if ( not ct . valid ) :
ct . zt = W_UNITARY
# read wcs _ text
data = string . split ( wcs_text , '\n' )
ct . imtitle = data [ 0 ]
# we are expecting 8 floats and 1 int
try :
( ct . a , ct . b , ct . c , ct . d , ct . tx , ct . ty , ct . z1 , ct . z2 , ct . zt ) = string . split ( data [ 1 ] )
ct . a = float ( ct . a )
ct . b = float ( ct . b )
ct . c = float ( ct . c )
ct . d = float ( ct . d )
ct . tx = float ( ct . tx )
ct . ty = float ( ct . ty )
ct . z1 = float ( ct . z1 )
ct . z2 = float ( ct . z2 )
ct . zt = int ( ct . zt )
except Exception :
ct . imtitle = "[NO WCS]"
ct . a = 1
ct . d = 1
ct . b = 0
ct . c = 0
ct . tx = 0
ct . ty = 0
ct . zt = W_UNITARY
ct . valid += 1
# determine the best format for WCS output
if ( ct . valid and ct . zt == W_LINEAR ) :
z1 = ct . z1
z2 = ct . z2
zrange = abs ( z1 - z2 )
zavg = ( abs ( z1 ) + abs ( z2 ) ) / 2.0
if ( zrange < 100.0 and zavg < 200.0 ) :
ct . format = " %7.2f %7.2f %7.3f%c"
elif ( zrange > 99999.0 or zavg > 99999.0 ) :
ct . format = " %7.2f %7.2f %7.3g%c"
else :
ct . format = W_DEFFORMAT
else :
ct . format = " %7.2f %7.2f %7.0f%c"
# add _ mapping , if we can
if ( len ( data ) < 4 ) :
return ( ct )
# we are expecting 1 string , 2 floats , and 6 int
try :
print ( "updating WCS: %s" % str ( data [ 2 ] ) )
( ct . region , ct . sx , ct . sy , ct . snx , ct . sny , ct . dx , ct . dy , ct . dnx , ct . dny ) = string . split ( data [ 2 ] )
ct . sx = float ( ct . sx )
ct . sy = float ( ct . sy )
ct . snx = int ( ct . snx )
ct . sny = int ( ct . sny )
# dx , dy : offset into frame where actual data starts
ct . dx = int ( ct . dx )
ct . dy = int ( ct . dy )
# dnx , dny : length of actual data in frame from offsets
ct . dnx = int ( ct . dnx )
ct . dny = int ( ct . dny )
ct . ref = string . strip ( data [ 3 ] )
# if this works , we also have the real size of the image
fb . img_width = ct . dnx + 1
# for some reason , the width is always 1 pixel smaller . . .
fb . img_height = ct . dny
except Exception :
ct . region = 'none'
ct . sx = 1.0
ct . sy = 1.0
ct . snx = fb . width
ct . sny = fb . height
ct . dx = 1
ct . dy = 1
ct . dnx = fb . width
ct . dny = fb . height
ct . ref = 'none'
return ( ct ) |
def _add_new_spawn_method ( cls ) :
"""TODO""" | def new_spawn_method ( self , dependency_mapping ) : # TODO / FIXME : Check that this does the right thing :
# ( i ) the spawned generator is independent of the original one ( i . e . they can be reset independently without altering the other ' s behaviour )
# ( ii ) ensure that it also works if this custom generator ' s _ _ init _ _ requires additional arguments
# new _ instance = self . _ _ class _ _ ( )
# FIXME : It would be good to explicitly spawn the field generators of ` self `
# here because this would ensure that the internal random generators
# of the spawned versions are in the same state as the ones in ` self ` .
# This would guarantee that the spawned custom generator produces the
# same elements as ` self ` even before reset ( ) is called explicitly .
new_instance = cls ( )
return new_instance
cls . spawn = new_spawn_method |
def sample_hgd ( in_range , out_range , nsample , seed_coins ) :
"""Get a sample from the hypergeometric distribution , using the provided bit list as a source of randomness""" | in_size = in_range . size ( )
out_size = out_range . size ( )
assert in_size > 0 and out_size > 0
assert in_size <= out_size
assert out_range . contains ( nsample )
# 1 - based index of nsample in out _ range
nsample_index = nsample - out_range . start + 1
if in_size == out_size : # Input and output domains have equal size
return in_range . start + nsample_index - 1
in_sample_num = HGD . rhyper ( nsample_index , in_size , out_size - in_size , seed_coins )
if in_sample_num == 0 :
return in_range . start
else :
in_sample = in_range . start + in_sample_num - 1
assert in_range . contains ( in_sample )
return in_sample |
def me ( cls ) :
"""Returns information about the currently authenticated user .
: return :
: rtype : User""" | return fields . ObjectField ( name = cls . ENDPOINT , init_class = cls ) . decode ( cls . element_from_string ( cls . _get_request ( endpoint = cls . ENDPOINT + '/me' ) . text ) ) |
def clean ( self ) :
"""Validate that the submited : attr : ` username ` and : attr : ` password ` are valid using
the : class : ` CASFederateAuth < cas _ server . auth . CASFederateAuth > ` auth class .
: raises django . forms . ValidationError : if the : attr : ` username ` and : attr : ` password `
do not correspond to a : class : ` FederatedUser < cas _ server . models . FederatedUser > ` .
: return : The cleaned POST data
: rtype : dict""" | cleaned_data = super ( FederateUserCredential , self ) . clean ( )
try :
user = models . FederatedUser . get_from_federated_username ( cleaned_data [ "username" ] )
user . ticket = ""
user . save ( )
# should not happed as if the FederatedUser do not exists , super should
# raise before a ValidationError ( " bad user " )
except models . FederatedUser . DoesNotExist : # pragma : no cover ( should not happend )
raise forms . ValidationError ( _ ( u"User not found in the temporary database, please try to reconnect" ) )
return cleaned_data |
def _find_folders ( self , folder_name ) :
"""Return a list of sub - directories .""" | found_folders = [ ]
for app_config in apps . get_app_configs ( ) :
folder_path = os . path . join ( app_config . path , folder_name )
if os . path . isdir ( folder_path ) :
found_folders . append ( folder_path )
return found_folders |
def request_motion_detection_enable ( blink , network , camera_id ) :
"""Enable motion detection for a camera .
: param blink : Blink instance .
: param network : Sync module network id .
: param camera _ id : Camera ID of camera to enable .""" | url = "{}/network/{}/camera/{}/enable" . format ( blink . urls . base_url , network , camera_id )
return http_post ( blink , url ) |
def _original_vocab ( tmp_dir ) :
"""Returns a set containing the original vocabulary .
This is important for comparing with published results .
Args :
tmp _ dir : directory containing dataset .
Returns :
a set of strings""" | vocab_url = ( "http://download.tensorflow.org/models/LM_LSTM_CNN/" "vocab-2016-09-10.txt" )
vocab_filename = os . path . basename ( vocab_url + ".en" )
vocab_filepath = os . path . join ( tmp_dir , vocab_filename )
if not os . path . exists ( vocab_filepath ) :
generator_utils . maybe_download ( tmp_dir , vocab_filename , vocab_url )
return set ( [ text_encoder . native_to_unicode ( l . strip ( ) ) for l in tf . gfile . Open ( vocab_filepath ) ] ) |
def _ubridge_apply_filters ( self , adapter_number , port_number , filters ) :
"""Apply filter like rate limiting
: param adapter _ number : adapter number
: param port _ number : port number
: param filters : Array of filter dictionnary""" | bridge_name = "IOL-BRIDGE-{}" . format ( self . application_id + 512 )
location = '{bridge_name} {bay} {unit}' . format ( bridge_name = bridge_name , bay = adapter_number , unit = port_number )
yield from self . _ubridge_send ( 'iol_bridge reset_packet_filters ' + location )
for filter in self . _build_filter_list ( filters ) :
cmd = 'iol_bridge add_packet_filter {} {}' . format ( location , filter )
yield from self . _ubridge_send ( cmd ) |
def from_url ( cls , url , ** kwargs ) :
"""Downloads a zipped app source code from an url .
: param url : url to download the app source from
Returns
A project instance .""" | username = kwargs . get ( 'username' )
password = kwargs . get ( 'password' )
headers = kwargs . get ( 'headers' , { } )
auth = None
path = kwargs . get ( 'path' , '/tmp/app.zip' )
dest = kwargs . get ( 'dest' , '/app' )
if username and password :
auth = base64 . b64encode ( b'%s:%s' % ( username , password ) )
if auth :
headers [ 'Authorization' ] = 'Basic %s' % auth . decode ( 'utf8' )
r = request . get ( url , headers = headers , stream = True )
if r . status_code != 200 :
err_msg = 'Could not download resource from url (%s): %s'
err_args = ( r . status_code , url )
raise errors . DownloadError ( err_msg % err_args )
with open ( '/tmp/app.zip' , 'wb+' ) as f :
chunks = r . iter_content ( chunk_size = 1024 )
[ f . write ( chunk ) for chunk in chunks if chunk ]
return cls . from_zip ( path , dest ) |
def versionok_for_gui ( ) :
'''Return True if running Python is suitable for GUI Event Integration and deeper IPython integration''' | # We require Python 2.6 + . . .
if sys . hexversion < 0x02060000 :
return False
# Or Python 3.2 +
if sys . hexversion >= 0x03000000 and sys . hexversion < 0x03020000 :
return False
# Not supported under Jython nor IronPython
if sys . platform . startswith ( "java" ) or sys . platform . startswith ( 'cli' ) :
return False
return True |
def loadtitlefont ( self ) :
"""Auxiliary method to load font if not yet done .""" | if self . titlefont == None : # print ' the bloody fonts dir is ? ? ? ? ' , fontsdir
# print ' pero esto que hace ? ? ' , os . path . join ( fontsdir , " courR18 . pil " )
# / home / vital / Workspace / pyResources / Scientific _ Lib / f2n _ fonts / f2n _ fonts / courR18 . pil
# / home / vital / Workspace / pyResources / Scientific _ Lib / f2n _ fonts
self . titlefont = imft . load_path ( os . path . join ( fontsdir , "courR18.pil" ) ) |
def with_zero_or_more ( cls , converter , pattern = None , listsep = "," ) :
"""Creates a type converter function for a list < T > with 0 . . N items
by using the type converter for one item of T .
: param converter : Type converter ( function ) for data type T .
: param pattern : Regexp pattern for an item ( = converter . pattern ) .
: param listsep : Optional list separator between items ( default : ' , ' )
: return : type - converter for list < T >""" | cardinality = Cardinality . zero_or_more
if not pattern :
pattern = getattr ( converter , "pattern" , cls . default_pattern )
many0_pattern = cardinality . make_pattern ( pattern , listsep )
group_count = cardinality . compute_group_count ( pattern )
def convert_list0 ( text , m = None ) :
if text :
text = text . strip ( )
if not text :
return [ ]
return [ converter ( part . strip ( ) ) for part in text . split ( listsep ) ]
convert_list0 . pattern = many0_pattern
# OLD convert _ list0 . group _ count = group _ count
convert_list0 . regex_group_count = group_count
return convert_list0 |
def parsexml ( self , node , ParentClass = None ) :
"""Internal method .
This is the main XML parser , will invoke class - specific XML parsers .""" | if ( LXE and isinstance ( node , ElementTree . _ElementTree ) ) or ( not LXE and isinstance ( node , ElementTree . ElementTree ) ) : # pylint : disable = protected - access
node = node . getroot ( )
elif isstring ( node ) :
node = xmltreefromstring ( node ) . getroot ( )
if node . tag . startswith ( '{' + NSFOLIA + '}' ) :
foliatag = node . tag [ nslen : ]
if foliatag == "FoLiA" :
if self . debug >= 1 :
print ( "[PyNLPl FoLiA DEBUG] Found FoLiA document" , file = stderr )
try :
self . id = node . attrib [ '{http://www.w3.org/XML/1998/namespace}id' ]
except KeyError :
try :
self . id = node . attrib [ 'XMLid' ]
except KeyError :
try :
self . id = node . attrib [ 'id' ]
except KeyError :
raise Exception ( "FoLiA Document has no ID!" )
if 'version' in node . attrib :
self . version = node . attrib [ 'version' ]
if checkversion ( self . version ) > 0 :
print ( "WARNING!!! Document uses a newer version of FoLiA than this library! (" + self . version + " vs " + FOLIAVERSION + "). Any possible subsequent failures in parsing or processing may probably be attributed to this. Upgrade to foliapy (https://github.com/proycon/foliapy) to remedy this." , file = sys . stderr )
else :
self . version = None
if 'external' in node . attrib :
self . external = ( node . attrib [ 'external' ] == 'yes' )
if self . external and not self . parentdoc :
raise DeepValidationError ( "Document is marked as external and should not be loaded independently. However, no parentdoc= has been specified!" )
for subnode in node :
if subnode . tag == '{' + NSFOLIA + '}metadata' :
self . parsemetadata ( subnode )
elif ( subnode . tag == '{' + NSFOLIA + '}text' or subnode . tag == '{' + NSFOLIA + '}speech' ) and self . mode == Mode . MEMORY :
if self . debug >= 1 :
print ( "[PyNLPl FoLiA DEBUG] Found Text" , file = stderr )
e = self . parsexml ( subnode )
if e is not None :
self . data . append ( e )
else : # generic handling ( FoLiA )
if not foliatag in XML2CLASS :
raise Exception ( "Unknown FoLiA XML tag: " + foliatag )
Class = XML2CLASS [ foliatag ]
return Class . parsexml ( node , self )
elif node . tag == '{' + NSDCOI + '}DCOI' :
if self . debug >= 1 :
print ( "[PyNLPl FoLiA DEBUG] Found DCOI document" , file = stderr )
self . autodeclare = True
try :
self . id = node . attrib [ '{http://www.w3.org/XML/1998/namespace}id' ]
except KeyError :
try :
self . id = node . attrib [ 'id' ]
except KeyError :
try :
self . id = node . attrib [ 'XMLid' ]
except KeyError :
raise Exception ( "D-Coi Document has no ID!" )
for subnode in node :
if subnode . tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT' :
self . metadatatype = MetaDataType . IMDI
self . setimdi ( subnode )
elif subnode . tag == '{' + NSDCOI + '}text' :
if self . debug >= 1 :
print ( "[PyNLPl FoLiA DEBUG] Found Text" , file = stderr )
e = self . parsexml ( subnode )
if e is not None :
self . data . append ( e )
elif node . tag . startswith ( '{' + NSDCOI + '}' ) : # generic handling ( D - Coi )
if node . tag [ nslendcoi : ] in XML2CLASS :
Class = XML2CLASS [ node . tag [ nslendcoi : ] ]
return Class . parsexml ( node , self )
elif node . tag [ nslendcoi : ] [ 0 : 3 ] == 'div' : # support for div0 , div1 , etc :
Class = Division
return Class . parsexml ( node , self )
elif node . tag [ nslendcoi : ] == 'item' : # support for listitem
Class = ListItem
return Class . parsexml ( node , self )
elif node . tag [ nslendcoi : ] == 'figDesc' : # support for description in figures
Class = Description
return Class . parsexml ( node , self )
else :
raise Exception ( "Unknown DCOI XML tag: " + node . tag )
else :
raise Exception ( "Unknown FoLiA XML tag: " + node . tag )
self . pendingvalidation ( ) |
def text ( self ) -> str :
"""Returns text of the current page .
: return : text of the current page""" | txt = self . summary
if len ( txt ) > 0 :
txt += "\n\n"
for sec in self . sections :
txt += sec . full_text ( level = 2 )
return txt . strip ( ) |
def _set_filter_change_update_delay ( self , v , load = False ) :
"""Setter method for filter _ change _ update _ delay , mapped from YANG variable / rbridge _ id / filter _ change _ update _ delay ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ filter _ change _ update _ delay is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ filter _ change _ update _ delay ( ) directly .
YANG Description : Change filter change update delay timer""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "filter_delay_value" , filter_change_update_delay . filter_change_update_delay , yang_name = "filter-change-update-delay" , rest_name = "filter-change-update-delay" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'filter-delay-value' , extensions = { u'tailf-common' : { u'info' : u'Change filter change update delay timer' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'sort-priority' : u'57' , u'cli-suppress-key-abbreviation' : None , u'callpoint' : u'filterChangeUpdateDelay' } } ) , is_container = 'list' , yang_name = "filter-change-update-delay" , rest_name = "filter-change-update-delay" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Change filter change update delay timer' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'sort-priority' : u'57' , u'cli-suppress-key-abbreviation' : None , u'callpoint' : u'filterChangeUpdateDelay' } } , namespace = 'urn:brocade.com:mgmt:brocade-ip-policy' , defining_module = 'brocade-ip-policy' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """filter_change_update_delay must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""" , } )
self . __filter_change_update_delay = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def process ( self , flightmode_selections , _flightmodes , block = True ) :
'''process and display graph''' | self . msg_types = set ( )
self . multiplier = [ ]
self . field_types = [ ]
self . xlim = None
self . flightmode_list = _flightmodes
# work out msg types we are interested in
self . x = [ ]
self . y = [ ]
self . modes = [ ]
self . axes = [ ]
self . first_only = [ ]
re_caps = re . compile ( '[A-Z_][A-Z0-9_]+' )
for f in self . fields :
caps = set ( re . findall ( re_caps , f ) )
self . msg_types = self . msg_types . union ( caps )
self . field_types . append ( caps )
self . y . append ( [ ] )
self . x . append ( [ ] )
self . axes . append ( 1 )
self . first_only . append ( False )
timeshift = self . timeshift
for fi in range ( 0 , len ( self . mav_list ) ) :
mlog = self . mav_list [ fi ]
self . process_mav ( mlog , flightmode_selections ) |
def duty_cycle_sp ( self ) :
"""Writing sets the duty cycle setpoint . Reading returns the current value .
Units are in percent . Valid values are - 100 to 100 . A negative value causes
the motor to rotate in reverse .""" | self . _duty_cycle_sp , value = self . get_attr_int ( self . _duty_cycle_sp , 'duty_cycle_sp' )
return value |
def write ( self , tid , data , offset , fh ) :
"""Write operation . Applicable only for control files - updateResults is called .
Parameters
tid : str
Path to file . Original ` path ` argument is converted to tuple identifier by ` ` _ pathdec ` ` decorator .
data : bytes
Ignored .
offset : int
Ignored .
fh : int
File descriptor .
Returns
int
Length of data written .""" | if tid [ 1 ] == " next" :
d = True
elif tid [ 1 ] == " prev" :
d = False
else :
raise FuseOSError ( errno . EPERM )
try :
self . searches [ tid [ 0 ] ] . updateResults ( d )
except KeyError :
raise FuseOSError ( errno . EINVAL )
# sth went wrong . . .
except ConnectionError :
raise FuseOSError ( errno . ENETDOWN )
return len ( data ) |
def _shape_text ( self , text , colsep = u"\t" , rowsep = u"\n" , transpose = False , skiprows = 0 , comments = '#' ) :
"""Decode the shape of the given text""" | assert colsep != rowsep
out = [ ]
text_rows = text . split ( rowsep ) [ skiprows : ]
for row in text_rows :
stripped = to_text_string ( row ) . strip ( )
if len ( stripped ) == 0 or stripped . startswith ( comments ) :
continue
line = to_text_string ( row ) . split ( colsep )
line = [ try_to_parse ( to_text_string ( x ) ) for x in line ]
out . append ( line )
# Replace missing elements with np . nan ' s or None ' s
if programs . is_module_installed ( 'numpy' ) :
from numpy import nan
out = list ( zip_longest ( * out , fillvalue = nan ) )
else :
out = list ( zip_longest ( * out , fillvalue = None ) )
# Tranpose the last result to get the expected one
out = [ [ r [ col ] for r in out ] for col in range ( len ( out [ 0 ] ) ) ]
if transpose :
return [ [ r [ col ] for r in out ] for col in range ( len ( out [ 0 ] ) ) ]
return out |
def DeleteContainer ( self , collection_link , options = None ) :
"""Deletes a collection .
: param str collection _ link :
The link to the document collection .
: param dict options :
The request options for the request .
: return :
The deleted Collection .
: rtype :
dict""" | if options is None :
options = { }
path = base . GetPathFromLink ( collection_link )
collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link )
return self . DeleteResource ( path , 'colls' , collection_id , None , options ) |
def _find_playlist ( self ) :
"""Internal method to populate the object given the ` ` id ` ` or
` ` reference _ id ` ` that has been set in the constructor .""" | data = None
if self . id :
data = self . connection . get_item ( 'find_playlist_by_id' , playlist_id = self . id )
elif self . reference_id :
data = self . connection . get_item ( 'find_playlist_by_reference_id' , reference_id = self . reference_id )
if data :
self . _load ( data ) |
def Uniform ( low , high , tag = None ) :
"""A Uniform random variate
Parameters
low : scalar
Lower bound of the distribution support .
high : scalar
Upper bound of the distribution support .""" | assert low < high , 'Uniform "low" must be less than "high"'
return uv ( ss . uniform ( loc = low , scale = high - low ) , tag = tag ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.