signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _make_bright_pixel_mask ( intensity_mean , mask_factor = 5.0 ) :
"""Make of mask of all the brightest pixels""" | mask = np . zeros ( ( intensity_mean . data . shape ) , bool )
nebins = len ( intensity_mean . data )
sum_intensity = intensity_mean . data . sum ( 0 )
mean_intensity = sum_intensity . mean ( )
for i in range ( nebins ) :
mask [ i , 0 : ] = sum_intensity > ( mask_factor * mean_intensity )
return HpxMap ( mask , intensity_mean . hpx ) |
def dinf_downslope_direction ( a ) :
"""Get the downslope directions of an dinf direction value
Args :
a : Dinf value
Returns :
downslope directions""" | taud , d = DinfUtil . check_orthogonal ( a )
if d != - 1 :
down = [ d ]
return down
else :
if a < FlowModelConst . ne : # 129 = 1 + 128
down = [ 1 , 2 ]
elif a < FlowModelConst . n : # 192 = 128 + 64
down = [ 2 , 3 ]
elif a < FlowModelConst . nw : # 96 = 64 + 32
down = [ 3 , 4 ]
elif a < FlowModelConst . w : # 48 = 32 + 16
down = [ 4 , 5 ]
elif a < FlowModelConst . sw : # 24 = 16 + 8
down = [ 5 , 6 ]
elif a < FlowModelConst . s : # 12 = 8 + 4
down = [ 6 , 7 ]
elif a < FlowModelConst . se : # 6 = 4 + 2
down = [ 7 , 8 ]
else : # 3 = 2 + 1
down = [ 8 , 1 ]
return down |
def dump_json_file ( json_data , pwd_dir_path , dump_file_name ) :
"""dump json data to file""" | class PythonObjectEncoder ( json . JSONEncoder ) :
def default ( self , obj ) :
try :
return super ( ) . default ( self , obj )
except TypeError :
return str ( obj )
logs_dir_path = os . path . join ( pwd_dir_path , "logs" )
if not os . path . isdir ( logs_dir_path ) :
os . makedirs ( logs_dir_path )
dump_file_path = os . path . join ( logs_dir_path , dump_file_name )
try :
with io . open ( dump_file_path , 'w' , encoding = 'utf-8' ) as outfile :
if is_py2 :
outfile . write ( unicode ( json . dumps ( json_data , indent = 4 , separators = ( ',' , ':' ) , ensure_ascii = False , cls = PythonObjectEncoder ) ) )
else :
json . dump ( json_data , outfile , indent = 4 , separators = ( ',' , ':' ) , ensure_ascii = False , cls = PythonObjectEncoder )
msg = "dump file: {}" . format ( dump_file_path )
logger . color_print ( msg , "BLUE" )
except TypeError as ex :
msg = "Failed to dump json file: {}\nReason: {}" . format ( dump_file_path , ex )
logger . color_print ( msg , "RED" ) |
def fetchnumpybatches ( self ) :
"""Returns an iterator over all rows in the active result set generated with ` ` execute ( ) ` ` or
` ` executemany ( ) ` ` .
: return : An iterator you can use to iterate over batches of rows of the result set . Each
batch consists of an ` ` OrderedDict ` ` of NumPy ` ` MaskedArray ` ` instances . See
` ` fetchallnumpy ( ) ` ` for details .""" | batchgen = self . _numpy_batch_generator ( )
column_names = [ description [ 0 ] for description in self . description ]
for next_batch in batchgen :
yield OrderedDict ( zip ( column_names , next_batch ) ) |
def _parse_and_verify ( self , candidate , offset ) :
"""Parses a phone number from the candidate using phonenumberutil . parse and
verifies it matches the requested leniency . If parsing and verification succeed , a
corresponding PhoneNumberMatch is returned , otherwise this method returns None .
Arguments :
candidate - - The candidate match .
offset - - The offset of candidate within self . text .
Returns the parsed and validated phone number match , or None .""" | try : # Check the candidate doesn ' t contain any formatting which would
# indicate that it really isn ' t a phone number .
if ( not fullmatch ( _MATCHING_BRACKETS , candidate ) or _PUB_PAGES . search ( candidate ) ) :
return None
# If leniency is set to VALID or stricter , we also want to skip
# numbers that are surrounded by Latin alphabetic characters , to
# skip cases like abc8005001234 or 8005001234def .
if self . leniency >= Leniency . VALID : # If the candidate is not at the start of the text , and does
# not start with phone - number punctuation , check the previous
# character
if ( offset > 0 and not _LEAD_PATTERN . match ( candidate ) ) :
previous_char = self . text [ offset - 1 ]
# We return None if it is a latin letter or an invalid
# punctuation symbol
if ( self . _is_invalid_punctuation_symbol ( previous_char ) or self . _is_latin_letter ( previous_char ) ) :
return None
last_char_index = offset + len ( candidate )
if last_char_index < len ( self . text ) :
next_char = self . text [ last_char_index ]
if ( self . _is_invalid_punctuation_symbol ( next_char ) or self . _is_latin_letter ( next_char ) ) :
return None
numobj = parse ( candidate , self . preferred_region , keep_raw_input = True )
if _verify ( self . leniency , numobj , candidate , self ) : # We used parse ( keep _ raw _ input = True ) to create this number ,
# but for now we don ' t return the extra values parsed .
# TODO : stop clearing all values here and switch all users
# over to using raw _ input rather than the raw _ string of
# PhoneNumberMatch .
numobj . country_code_source = CountryCodeSource . UNSPECIFIED
numobj . raw_input = None
numobj . preferred_domestic_carrier_code = None
return PhoneNumberMatch ( offset , candidate , numobj )
except NumberParseException : # ignore and continue
pass
return None |
def _is_balanced ( root ) :
"""Return the height if the binary tree is balanced , - 1 otherwise .
: param root : Root node of the binary tree .
: type root : binarytree . Node | None
: return : Height if the binary tree is balanced , - 1 otherwise .
: rtype : int""" | if root is None :
return 0
left = _is_balanced ( root . left )
if left < 0 :
return - 1
right = _is_balanced ( root . right )
if right < 0 :
return - 1
return - 1 if abs ( left - right ) > 1 else max ( left , right ) + 1 |
def argmax_with_score ( logits , axis = None ) :
"""Argmax along with the value .""" | axis = axis or len ( logits . get_shape ( ) ) - 1
predictions = tf . argmax ( logits , axis = axis )
logits_shape = shape_list ( logits )
prefix_shape , vocab_size = logits_shape [ : - 1 ] , logits_shape [ - 1 ]
prefix_size = 1
for d in prefix_shape :
prefix_size *= d
# Flatten to extract scores
flat_logits = tf . reshape ( logits , [ prefix_size , vocab_size ] )
flat_predictions = tf . reshape ( predictions , [ prefix_size ] )
flat_indices = tf . stack ( [ tf . range ( tf . to_int64 ( prefix_size ) ) , tf . to_int64 ( flat_predictions ) ] , axis = 1 )
flat_scores = tf . gather_nd ( flat_logits , flat_indices )
# Unflatten
scores = tf . reshape ( flat_scores , prefix_shape )
return predictions , scores |
def _onMotion ( self , evt ) :
"""Start measuring on an axis .""" | x = evt . GetX ( )
y = self . figure . bbox . height - evt . GetY ( )
evt . Skip ( )
FigureCanvasBase . motion_notify_event ( self , x , y , guiEvent = evt ) |
def get_value ( self , label , takeable = False ) :
"""Retrieve single value at passed index label
. . deprecated : : 0.21.0
Please use . at [ ] or . iat [ ] accessors .
Parameters
index : label
takeable : interpret the index as indexers , default False
Returns
value : scalar value""" | warnings . warn ( "get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead" , FutureWarning , stacklevel = 2 )
return self . _get_value ( label , takeable = takeable ) |
def with_metaclass ( meta , * bases ) :
"""copied from https : / / github . com / Byron / bcore / blob / master / src / python / butility / future . py # L15""" | class metaclass ( meta ) :
__call__ = type . __call__
__init__ = type . __init__
def __new__ ( cls , name , nbases , d ) :
if nbases is None :
return type . __new__ ( cls , name , ( ) , d )
# There may be clients who rely on this attribute to be set to a reasonable value , which is why
# we set the _ _ metaclass _ _ attribute explicitly
if not PY3 and '___metaclass__' not in d :
d [ '__metaclass__' ] = meta
return meta ( name , bases , d )
return metaclass ( meta . __name__ + 'Helper' , None , { } ) |
def get_umbrella_sampling_data ( ntherm = 11 , us_fc = 20.0 , us_length = 500 , md_length = 1000 , nmd = 20 ) :
"""Continuous MCMC process in an asymmetric double well potential using umbrella sampling .
Parameters
ntherm : int , optional , default = 11
Number of umbrella states .
us _ fc : double , optional , default = 20.0
Force constant in kT / length ^ 2 for each umbrella .
us _ length : int , optional , default = 500
Length in steps of each umbrella trajectory .
md _ length : int , optional , default = 1000
Length in steps of each unbiased trajectory .
nmd : int , optional , default = 20
Number of unbiased trajectories .
Returns
dict - keys shown below in brackets
Trajectory data from umbrella sampling ( us _ trajs ) and unbiased ( md _ trajs ) MCMC runs and
their discretised counterparts ( us _ dtrajs + md _ dtrajs + centers ) . The umbrella sampling
parameters ( us _ centers + us _ force _ constants ) are in the same order as the umbrella sampling
trajectories . Energies are given in kT , lengths in arbitrary units .""" | dws = _DWS ( )
us_data = dws . us_sample ( ntherm = ntherm , us_fc = us_fc , us_length = us_length , md_length = md_length , nmd = nmd )
us_data . update ( centers = dws . centers )
return us_data |
def expand_ranges ( value ) :
""": param str value : The value to be " expanded " .
: return : A generator to yield the different resulting values from expanding
the eventual ranges present in the input value .
> > > tuple ( expand _ ranges ( " Item [ 1-3 ] - Bla " ) )
( ' Item 1 - Bla ' , ' Item 2 - Bla ' , ' Item 3 - Bla ' )
> > > tuple ( expand _ ranges ( " X [ 1-10/2 ] Y " ) )
( ' X1Y ' , ' X3Y ' , ' X5Y ' , ' X7Y ' , ' X9Y ' )
> > > tuple ( expand _ ranges ( " [ 1-6/2 ] [ 1-3 ] " ) )
( ' 1 1 ' , ' 1 2 ' , ' 1 3 ' , ' 3 1 ' , ' 3 2 ' , ' 3 3 ' , ' 5 1 ' , ' 5 2 ' , ' 5 3 ' )""" | match_dict = RANGE_REGEX . match ( value ) . groupdict ( )
# the regex is supposed to always match . .
before = match_dict [ 'before' ]
after = match_dict [ 'after' ]
from_value = match_dict [ 'from' ]
if from_value is None :
yield value
else : # we have a [ x - y ] range
from_value = int ( from_value )
to_value = int ( match_dict [ 'to' ] ) + 1
# y is inclusive
step = int ( match_dict [ 'step' ] or 1 )
for idx in range ( from_value , to_value , step ) : # yield " % s % s % s " % ( before , idx , after )
for sub_val in expand_ranges ( "%s%s%s" % ( before , idx , after ) ) :
yield sub_val |
def _remove_bound_conditions ( agent , keep_criterion ) :
"""Removes bound conditions of agent such that keep _ criterion is False .
Parameters
agent : Agent
The agent whose bound conditions we evaluate
keep _ criterion : function
Evaluates removal _ criterion ( a ) for each agent a in a bound condition
and if it evaluates to False , removes a from agent ' s bound _ conditions""" | new_bc = [ ]
for ind in range ( len ( agent . bound_conditions ) ) :
if keep_criterion ( agent . bound_conditions [ ind ] . agent ) :
new_bc . append ( agent . bound_conditions [ ind ] )
agent . bound_conditions = new_bc |
def set_measurements ( test ) :
"""Test phase that sets a measurement .""" | test . measurements . level_none = 0
time . sleep ( 1 )
test . measurements . level_some = 8
time . sleep ( 1 )
test . measurements . level_all = 9
time . sleep ( 1 )
level_all = test . get_measurement ( 'level_all' )
assert level_all . value == 9 |
def tag ( self , tokens ) :
"""Return a list of ( ( token , tag ) , label ) tuples for a given list of ( token , tag ) tuples .""" | # Lazy load model first time we tag
if not self . _loaded_model :
self . load ( self . model )
features = [ self . _get_features ( tokens , i ) for i in range ( len ( tokens ) ) ]
labels = self . _tagger . tag ( features )
tagged_sent = list ( zip ( tokens , labels ) )
return tagged_sent |
def replace_lines_in_files ( search_string , replacement_line ) :
"""Finds lines containing the search string and replaces the whole line with
the specified replacement string .""" | # have the user select some files
paths = _s . dialogs . MultipleFiles ( 'DIS AND DAT|*.*' )
if paths == [ ] :
return
for path in paths :
_shutil . copy ( path , path + ".backup" )
lines = read_lines ( path )
for n in range ( 0 , len ( lines ) ) :
if lines [ n ] . find ( search_string ) >= 0 :
print ( lines [ n ] )
lines [ n ] = replacement_line . strip ( ) + "\n"
write_to_file ( path , join ( lines , '' ) )
return |
def plot_mag ( fignum , datablock , s , num , units , norm ) :
"""plots magnetization against ( de ) magnetizing temperature or field
Parameters
_ _ _ _ _
fignum : matplotlib figure number for plotting
datablock : nested list of [ step , 0 , 0 , magnetization , 1 , quality ]
s : string for title
num : matplotlib figure number , can set to 1
units : [ T , K , U ] for tesla , kelvin or arbitrary
norm : [ True , False ] if True , normalize
Effects
_ _ _ _ _
plots figure""" | global globals , graphmenu
Ints = [ ]
for plotrec in datablock :
Ints . append ( plotrec [ 3 ] )
Ints . sort ( )
plt . figure ( num = fignum )
T , M , Tv , recnum = [ ] , [ ] , [ ] , 0
Mex , Tex , Vdif = [ ] , [ ] , [ ]
recbak = [ ]
for rec in datablock :
if rec [ 5 ] == 'g' :
if units == "T" :
T . append ( rec [ 0 ] * 1e3 )
Tv . append ( rec [ 0 ] * 1e3 )
if recnum > 0 :
Tv . append ( rec [ 0 ] * 1e3 )
elif units == "U" :
T . append ( rec [ 0 ] )
Tv . append ( rec [ 0 ] )
if recnum > 0 :
Tv . append ( rec [ 0 ] )
elif units == "K" :
T . append ( rec [ 0 ] - 273 )
Tv . append ( rec [ 0 ] - 273 )
if recnum > 0 :
Tv . append ( rec [ 0 ] - 273 )
elif "T" in units and "K" in units :
if rec [ 0 ] < 1. :
T . append ( rec [ 0 ] * 1e3 )
Tv . append ( rec [ 0 ] * 1e3 )
else :
T . append ( rec [ 0 ] - 273 )
Tv . append ( rec [ 0 ] - 273 )
if recnum > 0 :
Tv . append ( rec [ 0 ] - 273 )
else :
T . append ( rec [ 0 ] )
Tv . append ( rec [ 0 ] )
if recnum > 0 :
Tv . append ( rec [ 0 ] )
if norm :
M . append ( old_div ( rec [ 3 ] , Ints [ - 1 ] ) )
else :
M . append ( rec [ 3 ] )
if recnum > 0 and len ( rec ) > 0 and len ( recbak ) > 0 :
v = [ ]
if recbak [ 0 ] != rec [ 0 ] :
V0 = pmag . dir2cart ( [ recbak [ 1 ] , recbak [ 2 ] , recbak [ 3 ] ] )
V1 = pmag . dir2cart ( [ rec [ 1 ] , rec [ 2 ] , rec [ 3 ] ] )
for el in range ( 3 ) :
v . append ( abs ( V1 [ el ] - V0 [ el ] ) )
vdir = pmag . cart2dir ( v )
# append vector difference
Vdif . append ( old_div ( vdir [ 2 ] , Ints [ - 1 ] ) )
Vdif . append ( old_div ( vdir [ 2 ] , Ints [ - 1 ] ) )
recbak = [ ]
for el in rec :
recbak . append ( el )
delta = .005 * M [ 0 ]
if num == 1 :
if recnum % 2 == 0 :
plt . text ( T [ - 1 ] + delta , M [ - 1 ] , ( ' ' + str ( recnum ) ) , fontsize = 9 )
recnum += 1
else :
if rec [ 0 ] < 200 :
Tex . append ( rec [ 0 ] * 1e3 )
if rec [ 0 ] >= 200 :
Tex . append ( rec [ 0 ] - 273 )
Mex . append ( old_div ( rec [ 3 ] , Ints [ - 1 ] ) )
recnum += 1
if globals != 0 :
globals . MTlist = T
globals . MTlisty = M
if len ( Mex ) > 0 and len ( Tex ) > 0 :
plt . scatter ( Tex , Mex , marker = 'd' , color = 'k' )
if len ( Vdif ) > 0 :
Vdif . append ( old_div ( vdir [ 2 ] , Ints [ - 1 ] ) )
Vdif . append ( 0 )
if Tv :
Tv . append ( Tv [ - 1 ] )
plt . plot ( T , M )
plt . plot ( T , M , 'ro' )
if len ( Tv ) == len ( Vdif ) and norm :
plt . plot ( Tv , Vdif , 'g-' )
if units == "T" :
plt . xlabel ( "Step (mT)" )
elif units == "K" :
plt . xlabel ( "Step (C)" )
elif units == "J" :
plt . xlabel ( "Step (J)" )
else :
plt . xlabel ( "Step [mT,C]" )
if norm == 1 :
plt . ylabel ( "Fractional Magnetization" )
if norm == 0 :
plt . ylabel ( "Magnetization" )
plt . axvline ( 0 , color = 'k' )
plt . axhline ( 0 , color = 'k' )
tstring = s
plt . title ( tstring )
plt . draw ( ) |
def can_create ( self , locator ) :
"""Checks if this factory is able to create component by given locator .
This method searches for all registered components and returns
a locator for component it is able to create that matches the given locator .
If the factory is not able to create a requested component is returns null .
: param locator : a locator to identify component to be created .
: return : a locator for a component that the factory is able to create .""" | if locator == None :
raise Exception ( "Locator cannot be null" )
# Iterate from the latest factories
for factory in reversed ( self . _factories ) :
locator = factory . can_create ( locator )
if locator != None :
return locator
return None |
def mouseMoveEvent ( self , event ) :
"""Handle the mouse move event for a drag operation .""" | # if event . buttons ( ) & Qt . LeftButton and self . _ drag _ origin is not None :
# dist = ( event . pos ( ) - self . _ drag _ origin ) . manhattanLength ( )
# if dist > = QApplication . startDragDistance ( ) :
# self . do _ drag ( event . widget ( ) )
# self . _ drag _ origin = None
# return # Don ' t returns
widget = self . widget
type ( widget ) . mouseMoveEvent ( widget , event ) |
def get_storage ( clear = False ) :
"""helper function to get annotation storage on the portal
: param clear : If true is passed in , annotations will be cleared
: returns : portal annotations
: rtype : IAnnotations""" | portal = getUtility ( ISiteRoot )
annotations = IAnnotations ( portal )
if ANNOTATION_KEY not in annotations or clear :
annotations [ ANNOTATION_KEY ] = OOBTree ( )
return annotations [ ANNOTATION_KEY ] |
def CreateClass ( self , * args , ** kwargs ) :
"""Override the CreateClass method in MOFWBEMConnection
For a description of the parameters , see
: meth : ` pywbem . WBEMConnection . CreateClass ` .""" | cc = args [ 0 ] if args else kwargs [ 'NewClass' ]
namespace = self . getns ( )
try :
self . compile_ordered_classnames . append ( cc . classname )
# The following generates an exception for each new ns
self . classes [ self . default_namespace ] [ cc . classname ] = cc
except KeyError :
self . classes [ namespace ] = NocaseDict ( { cc . classname : cc } )
# Validate that references and embedded instance properties , methods ,
# etc . have classes that exist in repo . This also institates the
# mechanism that gets insures that prerequisite classes are inserted
# into the repo .
objects = list ( cc . properties . values ( ) )
for meth in cc . methods . values ( ) :
objects += list ( meth . parameters . values ( ) )
for obj in objects : # Validate that reference _ class exists in repo
if obj . type == 'reference' :
try :
self . GetClass ( obj . reference_class , LocalOnly = True , IncludeQualifiers = True )
except CIMError as ce :
if ce . status_code == CIM_ERR_NOT_FOUND :
raise CIMError ( CIM_ERR_INVALID_PARAMETER , _format ( "Class {0!A} referenced by element {1!A} " "of class {2!A} in namespace {3!A} does " "not exist" , obj . reference_class , obj . name , cc . classname , self . getns ( ) ) , conn_id = self . conn_id )
raise
elif obj . type == 'string' :
if 'EmbeddedInstance' in obj . qualifiers :
eiqualifier = obj . qualifiers [ 'EmbeddedInstance' ]
try :
self . GetClass ( eiqualifier . value , LocalOnly = True , IncludeQualifiers = False )
except CIMError as ce :
if ce . status_code == CIM_ERR_NOT_FOUND :
raise CIMError ( CIM_ERR_INVALID_PARAMETER , _format ( "Class {0!A} specified by " "EmbeddInstance qualifier on element " "{1!A} of class {2!A} in namespace " "{3!A} does not exist" , eiqualifier . value , obj . name , cc . classname , self . getns ( ) ) , conn_id = self . conn_id )
raise
ccr = self . conn . _resolve_class ( # pylint : disable = protected - access
cc , namespace , self . qualifiers [ namespace ] )
if namespace not in self . classes :
self . classes [ namespace ] = NocaseDict ( )
self . classes [ namespace ] [ ccr . classname ] = ccr
try :
self . class_names [ namespace ] . append ( ccr . classname )
except KeyError :
self . class_names [ namespace ] = [ ccr . classname ] |
def add_constraint ( self , * args , ** kwargs ) :
"""TODO : add documentation
args can be string representation ( length 1)
func and strings to pass to function""" | # TODO : be smart enough to take kwargs ( especially for undoing a
# remove _ constraint ) for kind , value ( expression ) ,
redo_kwargs = deepcopy ( kwargs )
if len ( args ) == 1 and isinstance ( args [ 0 ] , str ) and not _get_add_func ( _constraint , args [ 0 ] , return_none_if_not_found = True ) : # then only the expression has been passed ,
# we just need to pass it on to constraints . custom
func = constraint . custom
func_args = args
elif len ( args ) == 2 and all ( [ isinstance ( arg , Parameter ) or isinstance ( arg , ConstraintParameter ) for arg in args ] ) : # then we have 2 constraint expressions
func = constraint . custom
func_args = args
elif len ( args ) == 0 : # then everything is passed through kwargs
if 'kind' in kwargs . keys ( ) :
func = _get_add_func ( _constraint , kwargs [ 'kind' ] )
elif 'func' in kwargs . keys ( ) :
func = _get_add_func ( _constraint , kwargs [ 'func' ] )
elif 'constraint_func' in kwargs . keys ( ) :
func = _get_add_func ( _constraint , kwargs [ 'constraint_func' ] )
else :
func = constraint . custom
func_args = [ ]
# constraint _ param = ConstraintParameter ( self , * * kwargs )
else : # then we ' ve been passed the function in constraints and its
# arguments
func = _get_add_func ( _constraint , args [ 0 ] )
func_args = args [ 1 : ]
if 'solve_for' in kwargs . keys ( ) : # solve _ for is a twig , we need to pass the parameter
kwargs [ 'solve_for' ] = self . get_parameter ( kwargs [ 'solve_for' ] )
lhs , rhs , constraint_kwargs = func ( self , * func_args , ** kwargs )
# NOTE that any component parameters required have already been
# created by this point
constraint_param = ConstraintParameter ( self , qualifier = lhs . qualifier , component = lhs . component , dataset = lhs . dataset , feature = lhs . feature , kind = lhs . kind , model = lhs . model , constraint_func = func . __name__ , constraint_kwargs = constraint_kwargs , in_solar_units = func . __name__ not in constraint . list_of_constraints_requiring_si , value = rhs , default_unit = lhs . default_unit , description = 'expression that determines the constraint' )
newly_constrained_param = constraint_param . get_constrained_parameter ( )
check_kwargs = { k : v for k , v in newly_constrained_param . meta . items ( ) if k not in [ 'context' , 'twig' , 'uniquetwig' ] }
check_kwargs [ 'context' ] = 'constraint'
if len ( self . _bundle . filter ( ** check_kwargs ) ) :
raise ValueError ( "'{}' is already constrained" . format ( newly_constrained_param . twig ) )
metawargs = { 'context' : 'constraint' , 'kind' : func . func_name }
params = ParameterSet ( [ constraint_param ] )
constraint_param . _update_bookkeeping ( )
self . _attach_params ( params , ** metawargs )
redo_kwargs [ 'func' ] = func . func_name
self . _add_history ( redo_func = 'add_constraint' , redo_kwargs = redo_kwargs , undo_func = 'remove_constraint' , undo_kwargs = { 'uniqueid' : constraint_param . uniqueid } )
# we should run it now to make sure everything is in - sync
if conf . interactive_constraints :
self . run_constraint ( uniqueid = constraint_param . uniqueid , skip_kwargs_checks = True )
else :
self . _delayed_constraints . append ( constraint_param . uniqueid )
return params |
def timeout ( timeout_time , default ) :
'''Decorate a method so it is required to execute in a given time period ,
or return a default value .''' | def timeout_function ( f ) :
def f2 ( * args ) :
def timeout_handler ( signum , frame ) :
raise MethodTimer . DecoratorTimeout ( )
old_handler = signal . signal ( signal . SIGALRM , timeout_handler )
# triger alarm in timeout _ time seconds
signal . alarm ( timeout_time )
try :
retval = f ( * args )
except MethodTimer . DecoratorTimeout :
return default
finally :
signal . signal ( signal . SIGALRM , old_handler )
signal . alarm ( 0 )
return retval
return f2
return timeout_function |
def locate_intersection_ranges ( self , starts , stops ) :
"""Locate the intersection with a set of ranges .
Parameters
starts : array _ like , int
Range start values .
stops : array _ like , int
Range stop values .
Returns
loc : ndarray , bool
Boolean array with location of entries found .
loc _ ranges : ndarray , bool
Boolean array with location of ranges containing one or more
entries .
Examples
> > > import allel
> > > import numpy as np
> > > idx = allel . SortedIndex ( [ 3 , 6 , 11 , 20 , 35 ] )
> > > ranges = np . array ( [ [ 0 , 2 ] , [ 6 , 17 ] , [ 12 , 15 ] , [ 31 , 35 ] ,
. . . [ 100 , 120 ] ] )
> > > starts = ranges [ : , 0]
> > > stops = ranges [ : , 1]
> > > loc , loc _ ranges = idx . locate _ intersection _ ranges ( starts , stops )
> > > loc
array ( [ False , True , True , False , True ] )
> > > loc _ ranges
array ( [ False , True , False , True , False ] )
> > > idx [ loc ]
< SortedIndex shape = ( 3 , ) dtype = int64 >
[6 , 11 , 35]
> > > ranges [ loc _ ranges ]
array ( [ [ 6 , 17 ] ,
[31 , 35 ] ] )""" | # check inputs
starts = asarray_ndim ( starts , 1 )
stops = asarray_ndim ( stops , 1 )
check_dim0_aligned ( starts , stops )
# find indices of start and stop values in idx
start_indices = np . searchsorted ( self , starts )
stop_indices = np . searchsorted ( self , stops , side = 'right' )
# find intervals overlapping at least one value
loc_ranges = start_indices < stop_indices
# find values within at least one interval
loc = np . zeros ( self . shape , dtype = np . bool )
for i , j in zip ( start_indices [ loc_ranges ] , stop_indices [ loc_ranges ] ) :
loc [ i : j ] = True
return loc , loc_ranges |
def match ( line , keyword ) :
"""If the first part of line ( modulo blanks ) matches keyword ,
returns the end of that line . Otherwise checks if keyword is
anywhere in the line and returns that section , else returns None""" | line = line . lstrip ( )
length = len ( keyword )
if line [ : length ] == keyword :
return line [ length : ]
else :
if keyword in line :
return line [ line . index ( keyword ) : ]
else :
return None |
def answer ( self , c , details ) :
"""Answer will provide all necessary feedback for the caller
Args :
c ( int ) : HTTP Code
details ( dict ) : Response payload
Returns :
dict : Response payload
Raises :
ErrAtlasBadRequest
ErrAtlasUnauthorized
ErrAtlasForbidden
ErrAtlasNotFound
ErrAtlasMethodNotAllowed
ErrAtlasConflict
ErrAtlasServerErrors""" | if c in [ Settings . SUCCESS , Settings . CREATED , Settings . ACCEPTED ] :
return details
elif c == Settings . BAD_REQUEST :
raise ErrAtlasBadRequest ( c , details )
elif c == Settings . UNAUTHORIZED :
raise ErrAtlasUnauthorized ( c , details )
elif c == Settings . FORBIDDEN :
raise ErrAtlasForbidden ( c , details )
elif c == Settings . NOTFOUND :
raise ErrAtlasNotFound ( c , details )
elif c == Settings . METHOD_NOT_ALLOWED :
raise ErrAtlasMethodNotAllowed ( c , details )
elif c == Settings . CONFLICT :
raise ErrAtlasConflict ( c , details )
else : # Settings . SERVER _ ERRORS
raise ErrAtlasServerErrors ( c , details ) |
def element_to_dict ( elem_to_parse , element_path = None , recurse = True ) :
""": return : an element losslessly as a dictionary . If recurse is True ,
the element ' s children are included , otherwise they are omitted .
The resulting Dictionary will have the following attributes :
- name : the name of the element tag
- text : the text contained by element
- tail : text immediately following the element
- attributes : a Dictionary containing element attributes
- children : a List of converted child elements""" | element = get_element ( elem_to_parse , element_path )
if element is not None :
converted = { _ELEM_NAME : element . tag , _ELEM_TEXT : element . text , _ELEM_TAIL : element . tail , _ELEM_ATTRIBS : element . attrib , _ELEM_CHILDREN : [ ] }
if recurse is True :
for child in element :
converted [ _ELEM_CHILDREN ] . append ( element_to_dict ( child , recurse = recurse ) )
return converted
return { } |
def getConfigDirectory ( ) :
"""Determines the platform - specific config directory location for ue4cli""" | if platform . system ( ) == 'Windows' :
return os . path . join ( os . environ [ 'APPDATA' ] , 'ue4cli' )
else :
return os . path . join ( os . environ [ 'HOME' ] , '.config' , 'ue4cli' ) |
def _change_secure_boot_settings ( self , property , value ) :
"""Change secure boot settings on the server .""" | system = self . _get_host_details ( )
# find the BIOS URI
if ( 'links' not in system [ 'Oem' ] [ 'Hp' ] or 'SecureBoot' not in system [ 'Oem' ] [ 'Hp' ] [ 'links' ] ) :
msg = ( ' "SecureBoot" resource or feature is not ' 'supported on this system' )
raise exception . IloCommandNotSupportedError ( msg )
secure_boot_uri = system [ 'Oem' ] [ 'Hp' ] [ 'links' ] [ 'SecureBoot' ] [ 'href' ]
# Change the property required
new_secure_boot_settings = { }
new_secure_boot_settings [ property ] = value
# perform the patch
status , headers , response = self . _rest_patch ( secure_boot_uri , None , new_secure_boot_settings )
if status >= 300 :
msg = self . _get_extended_error ( response )
raise exception . IloError ( msg )
# Change the bios setting as a workaround to enable secure boot
# Can be removed when fixed for Gen9 snap2
val = self . _get_bios_setting ( 'CustomPostMessage' )
val = val . rstrip ( ) if val . endswith ( " " ) else val + " "
self . _change_bios_setting ( { 'CustomPostMessage' : val } ) |
def underscore ( name ) :
'''Transform CamelCase - > snake _ case .''' | s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , name )
return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( ) |
def update_list_widget ( self ) :
"""Update list widget when radio button is clicked .""" | # Get selected radio button
radio_button_checked_id = self . input_button_group . checkedId ( )
# No radio button checked , then default value = None
if radio_button_checked_id > - 1 :
selected_dict = list ( self . _parameter . options . values ( ) ) [ radio_button_checked_id ]
if selected_dict . get ( 'type' ) == MULTIPLE_DYNAMIC :
for field in selected_dict . get ( 'value' ) : # Update list widget
field_item = QListWidgetItem ( self . list_widget )
field_item . setFlags ( Qt . ItemIsEnabled | Qt . ItemIsSelectable | Qt . ItemIsDragEnabled )
field_item . setData ( Qt . UserRole , field )
field_item . setText ( field )
self . list_widget . addItem ( field_item ) |
def process_docstring ( app , what , name , obj , options , lines ) :
"""Process the docstring for a given python object .
Called when autodoc has read and processed a docstring . ` lines ` is a list
of docstring lines that ` _ process _ docstring ` modifies in place to change
what Sphinx outputs .
The following settings in conf . py control what styles of docstrings will
be parsed :
* ` ` napoleon _ google _ docstring ` ` - - parse Google style docstrings
* ` ` napoleon _ numpy _ docstring ` ` - - parse NumPy style docstrings
Parameters
app : sphinx . application . Sphinx
Application object representing the Sphinx process .
what : str
A string specifying the type of the object to which the docstring
belongs . Valid values : " module " , " class " , " exception " , " function " ,
" method " , " attribute " .
name : str
The fully qualified name of the object .
obj : module , class , exception , function , method , or attribute
The object to which the docstring belongs .
options : sphinx . ext . autodoc . Options
The options given to the directive : an object with attributes
inherited _ members , undoc _ members , show _ inheritance and noindex that
are True if the flag option of same name was given to the auto
directive .
lines : list of str
The lines of the docstring , see above .
. . note : : ` lines ` is modified * in place *
Notes
This function is ( to most parts ) taken from the : mod : ` sphinx . ext . napoleon `
module , sphinx version 1.3.1 , and adapted to the classes defined here""" | result_lines = lines
if app . config . napoleon_numpy_docstring :
docstring = ExtendedNumpyDocstring ( result_lines , app . config , app , what , name , obj , options )
result_lines = docstring . lines ( )
if app . config . napoleon_google_docstring :
docstring = ExtendedGoogleDocstring ( result_lines , app . config , app , what , name , obj , options )
result_lines = docstring . lines ( )
lines [ : ] = result_lines [ : ] |
def on_timer ( self , evt ) :
"""Keep watching the mouse displacement via timer
Needed since EVT _ MOVE doesn ' t happen once the mouse gets outside the
frame""" | ctrl_is_down = wx . GetKeyState ( wx . WXK_CONTROL )
ms = wx . GetMouseState ( )
# New initialization when keys pressed change
if self . _key_state != ctrl_is_down :
self . _key_state = ctrl_is_down
# Keep state at click
self . _click_ms_x , self . _click_ms_y = ms . x , ms . y
self . _click_frame_x , self . _click_frame_y = self . Position
self . _click_frame_width , self . _click_frame_height = self . ClientSize
# Avoids refresh when there ' s no move ( stores last mouse state )
self . _last_ms = ms . x , ms . y
# Quadrant at click ( need to know how to resize )
width , height = self . ClientSize
self . _quad_signal_x = 1 if ( self . _click_ms_x - self . _click_frame_x ) / width > .5 else - 1
self . _quad_signal_y = 1 if ( self . _click_ms_y - self . _click_frame_y ) / height > .5 else - 1
# " Polling watcher " for mouse left button while it ' s kept down
if ms . leftDown :
if self . _last_ms != ( ms . x , ms . y ) : # Moved ?
self . _last_ms = ( ms . x , ms . y )
delta_x = ms . x - self . _click_ms_x
delta_y = ms . y - self . _click_ms_y
# Resize
if ctrl_is_down : # New size
new_w = max ( MIN_WIDTH , self . _click_frame_width + 2 * delta_x * self . _quad_signal_x )
new_h = max ( MIN_HEIGHT , self . _click_frame_height + 2 * delta_y * self . _quad_signal_y )
self . ClientSize = new_w , new_h
self . SendSizeEvent ( )
# Needed for wxGTK
# Center should be kept
center_x = self . _click_frame_x + self . _click_frame_width / 2
center_y = self . _click_frame_y + self . _click_frame_height / 2
self . Position = ( center_x - new_w / 2 , center_y - new_h / 2 )
self . Refresh ( )
self . volume_ctrl . value = ( new_h * new_w ) / 3e5
# Move the window
else :
self . Position = ( self . _click_frame_x + delta_x , self . _click_frame_y + delta_y )
# Find the new center position
x , y = self . Position
w , h = self . ClientSize
cx , cy = x + w / 2 , y + h / 2
self . mod_ctrl . value = 2.5 * cx
self . carrier_ctrl . value = 2.5 * cy
self . angstep . value = ( cx + cy ) * pi * 2e-4
# Since left button is kept down , there should be another one shot
# timer event again , without creating many timers like wx . CallLater
self . _timer . Start ( MOUSE_TIMER_WATCH , True ) |
def update_value_map ( layer , exposure_key = None ) :
"""Assign inasafe values according to definitions for a vector layer .
: param layer : The vector layer .
: type layer : QgsVectorLayer
: param exposure _ key : The exposure key .
: type exposure _ key : str
: return : The classified vector layer .
: rtype : QgsVectorLayer
. . versionadded : : 4.0""" | output_layer_name = assign_inasafe_values_steps [ 'output_layer_name' ]
output_layer_name = output_layer_name % layer . keywords [ 'layer_purpose' ]
keywords = layer . keywords
inasafe_fields = keywords [ 'inasafe_fields' ]
classification = None
if keywords [ 'layer_purpose' ] == layer_purpose_hazard [ 'key' ] :
if not inasafe_fields . get ( hazard_value_field [ 'key' ] ) :
raise InvalidKeywordsForProcessingAlgorithm
old_field = hazard_value_field
new_field = hazard_class_field
classification = active_classification ( layer . keywords , exposure_key )
elif keywords [ 'layer_purpose' ] == layer_purpose_exposure [ 'key' ] :
if not inasafe_fields . get ( exposure_type_field [ 'key' ] ) :
raise InvalidKeywordsForProcessingAlgorithm
old_field = exposure_type_field
new_field = exposure_class_field
else :
raise InvalidKeywordsForProcessingAlgorithm
# It ' s a hazard layer
if exposure_key :
if not active_thresholds_value_maps ( keywords , exposure_key ) :
raise InvalidKeywordsForProcessingAlgorithm
value_map = active_thresholds_value_maps ( keywords , exposure_key )
# It ' s exposure layer
else :
if not keywords . get ( 'value_map' ) :
raise InvalidKeywordsForProcessingAlgorithm
value_map = keywords . get ( 'value_map' )
unclassified_column = inasafe_fields [ old_field [ 'key' ] ]
unclassified_index = layer . fields ( ) . lookupField ( unclassified_column )
reversed_value_map = { }
for inasafe_class , values in list ( value_map . items ( ) ) :
for val in values :
reversed_value_map [ val ] = inasafe_class
classified_field = QgsField ( )
classified_field . setType ( new_field [ 'type' ] )
classified_field . setName ( new_field [ 'field_name' ] )
classified_field . setLength ( new_field [ 'length' ] )
classified_field . setPrecision ( new_field [ 'precision' ] )
layer . startEditing ( )
layer . addAttribute ( classified_field )
classified_field_index = layer . fields ( ) . lookupField ( classified_field . name ( ) )
for feature in layer . getFeatures ( ) :
attributes = feature . attributes ( )
source_value = attributes [ unclassified_index ]
classified_value = reversed_value_map . get ( source_value )
if not classified_value :
classified_value = ''
layer . changeAttributeValue ( feature . id ( ) , classified_field_index , classified_value )
layer . commitChanges ( )
remove_fields ( layer , [ unclassified_column ] )
# We transfer keywords to the output .
# We add new class field
inasafe_fields [ new_field [ 'key' ] ] = new_field [ 'field_name' ]
# and we remove hazard value field
inasafe_fields . pop ( old_field [ 'key' ] )
layer . keywords = keywords
layer . keywords [ 'inasafe_fields' ] = inasafe_fields
if exposure_key :
value_map_key = 'value_maps'
else :
value_map_key = 'value_map'
if value_map_key in list ( layer . keywords . keys ( ) ) :
layer . keywords . pop ( value_map_key )
layer . keywords [ 'title' ] = output_layer_name
if classification :
layer . keywords [ 'classification' ] = classification
check_layer ( layer )
return layer |
def prune_anomalies ( e_seq , smoothed_errors , max_error_below_e , anomaly_indices ) :
"""Helper method that removes anomalies which don ' t meet
a minimum separation from next anomaly .""" | # min accepted perc decrease btwn max errors in anomalous sequences
MIN_PERCENT_DECREASE = 0.05
e_seq_max , smoothed_errors_max = [ ] , [ ]
for error_seq in e_seq :
if len ( smoothed_errors [ error_seq [ 0 ] : error_seq [ 1 ] ] ) > 0 :
sliced_errors = smoothed_errors [ error_seq [ 0 ] : error_seq [ 1 ] ]
e_seq_max . append ( max ( sliced_errors ) )
smoothed_errors_max . append ( max ( sliced_errors ) )
smoothed_errors_max . sort ( reverse = True )
if max_error_below_e > 0 :
smoothed_errors_max . append ( max_error_below_e )
indices_remove = [ ]
for i in range ( len ( smoothed_errors_max ) ) :
if i < len ( smoothed_errors_max ) - 1 :
delta = smoothed_errors_max [ i ] - smoothed_errors_max [ i + 1 ]
perc_change = delta / smoothed_errors_max [ i ]
if perc_change < MIN_PERCENT_DECREASE :
indices_remove . append ( e_seq_max . index ( smoothed_errors_max [ i ] ) )
for index in sorted ( indices_remove , reverse = True ) :
del e_seq [ index ]
pruned_indices = [ ]
for i in anomaly_indices :
for error_seq in e_seq :
if i >= error_seq [ 0 ] and i <= error_seq [ 1 ] :
pruned_indices . append ( i )
return pruned_indices |
def t_name ( self , s ) :
r'[ A - Za - z _ ] [ A - Za - z _ 0-9 ] *' | if s in RESERVED_WORDS :
self . add_token ( s . upper ( ) , s )
else :
self . add_token ( 'NAME' , s ) |
def range_to_numeric ( ranges ) :
"""Converts a sequence of string ranges to a sequence of floats .
E . g . : :
> > > range _ to _ numeric ( [ ' 1 uV ' , ' 2 mV ' , ' 1 V ' ] )
[1E - 6 , 0.002 , 1.0]""" | values , units = zip ( * ( r . split ( ) for r in ranges ) )
# Detect common unit .
unit = os . path . commonprefix ( [ u [ : : - 1 ] for u in units ] )
# Strip unit to get just the SI prefix .
prefixes = ( u [ : - len ( unit ) ] for u in units )
# Convert string value and scale with prefix .
values = [ float ( v ) * SI_PREFIX [ p ] for v , p in zip ( values , prefixes ) ]
return values |
def enumerate_local_modules ( tokens , path ) :
"""Returns a list of modules inside * tokens * that are local to * path * .
* * Note : * * Will recursively look inside * path * for said modules .""" | # Have to get a list of all modules before we can do anything else
modules = enumerate_imports ( tokens )
local_modules = [ ]
parent = ""
# Now check the local dir for matching modules
for root , dirs , files in os . walk ( path ) :
if not parent :
parent = os . path . split ( root ) [ 1 ]
for f in files :
if f . endswith ( '.py' ) :
f = f [ : - 3 ]
# Strip . py
module_tree = root . split ( parent ) [ 1 ] . replace ( '/' , '.' )
module_tree = module_tree . lstrip ( '.' )
if module_tree :
module = "%s.%s" % ( module_tree , f )
else :
module = f
if not module in modules :
local_modules . append ( module )
return local_modules |
def target_to_ipv6_cidr ( target ) :
"""Attempt to return a IPv6 CIDR list from a target string .""" | splitted = target . split ( '/' )
if len ( splitted ) != 2 :
return None
try :
start_packed = inet_pton ( socket . AF_INET6 , splitted [ 0 ] )
block = int ( splitted [ 1 ] )
except ( socket . error , ValueError ) :
return None
if block <= 0 or block > 126 :
return None
start_value = int ( binascii . hexlify ( start_packed ) , 16 ) >> ( 128 - block )
start_value = ( start_value << ( 128 - block ) ) + 1
end_value = ( start_value | ( int ( 'ff' * 16 , 16 ) >> block ) ) - 1
high = start_value >> 64
low = start_value & ( ( 1 << 64 ) - 1 )
start_packed = struct . pack ( '!QQ' , high , low )
high = end_value >> 64
low = end_value & ( ( 1 << 64 ) - 1 )
end_packed = struct . pack ( '!QQ' , high , low )
return ipv6_range_to_list ( start_packed , end_packed ) |
def stop_ppp_link ( self ) :
'''stop the link''' | if self . ppp_fd == - 1 :
return
try :
self . mpself . select_extra . pop ( self . ppp_fd )
os . close ( self . ppp_fd )
os . waitpid ( self . pid , 0 )
except Exception :
pass
self . pid = - 1
self . ppp_fd = - 1
print ( "stopped ppp link" ) |
def build_static ( self ) :
"""Build static files""" | if not os . path . isdir ( self . build_static_dir ) :
os . makedirs ( self . build_static_dir )
copy_tree ( self . static_dir , self . build_static_dir )
if self . webassets_cmd :
self . webassets_cmd . build ( ) |
def init_generic_serial_dut ( contextlist , conf , index , args ) :
"""Initializes a local hardware dut""" | port = conf [ 'serial_port' ]
baudrate = ( args . baudrate if args . baudrate else conf . get ( "application" , { } ) . get ( "baudrate" , 115200 ) )
serial_config = { }
if args . serial_rtscts :
serial_config [ "serial_rtscts" ] = args . serial_rtscts
elif args . serial_xonxoff :
serial_config [ "serial_xonxoff" ] = args . serial_xonxoff
if args . serial_timeout :
serial_config [ "serial_timeout" ] = args . serial_timeout
ch_mode_config = { }
if args . serial_ch_size > 0 :
ch_mode_config [ "ch_mode" ] = True
ch_mode_config [ "ch_mode_chunk_size" ] = args . serial_ch_size
elif args . serial_ch_size is 0 :
ch_mode_config [ "ch_mode" ] = False
if args . ch_mode_ch_delay :
ch_mode_config [ "ch_mode_ch_delay" ] = args . ch_mode_ch_delay
dut = DutSerial ( name = "D%d" % index , port = port , baudrate = baudrate , config = conf , ch_mode_config = ch_mode_config , serial_config = serial_config , params = args )
dut . index = index
dut . platform = conf . get ( "platform_name" , "serial" )
msg = 'Use device in serial port {} as D{}'
contextlist . logger . info ( msg . format ( port , index ) )
contextlist . duts . append ( dut )
contextlist . dutinformations . append ( dut . get_info ( ) ) |
def check ( self , val ) :
"""Make sure given value is consistent with this ` Key ` specification .
NOTE : if ` type ` is ' None ' , then ` listable ` also is * not * checked .""" | # If there is no ` type ` requirement , everything is allowed
if self . type is None :
return True
is_list = isinstance ( val , list )
# If lists are not allowed , and this is a list - - > false
if not self . listable and is_list :
return False
# ` is _ number ` already checks for either list or single value
if self . type == KEY_TYPES . NUMERIC and not is_number ( val ) :
return False
elif ( self . type == KEY_TYPES . TIME and not is_number ( val ) and '-' not in val and '/' not in val ) :
return False
elif self . type == KEY_TYPES . STRING : # If its a list , check first element
if is_list :
if not isinstance ( val [ 0 ] , basestring ) :
return False
# Otherwise , check it
elif not isinstance ( val , basestring ) :
return False
elif self . type == KEY_TYPES . BOOL :
if is_list and not isinstance ( val [ 0 ] , bool ) :
return False
elif not isinstance ( val , bool ) :
return False
return True |
def p_static_scalar_namespace_name ( p ) :
'''static _ scalar : namespace _ name
| NS _ SEPARATOR namespace _ name
| NAMESPACE NS _ SEPARATOR namespace _ name''' | if len ( p ) == 2 :
p [ 0 ] = ast . Constant ( p [ 1 ] , lineno = p . lineno ( 1 ) )
elif len ( p ) == 3 :
p [ 0 ] = ast . Constant ( p [ 1 ] + p [ 2 ] , lineno = p . lineno ( 1 ) )
else :
p [ 0 ] = ast . Constant ( p [ 1 ] + p [ 2 ] + p [ 3 ] , lineno = p . lineno ( 1 ) ) |
def reqHeadTimeStamp ( self , contract : Contract , whatToShow : str , useRTH : bool , formatDate : int = 1 ) -> datetime . datetime :
"""Get the datetime of earliest available historical data
for the contract .
Args :
contract : Contract of interest .
useRTH : If True then only show data from within Regular
Trading Hours , if False then show all data .
formatDate : If set to 2 then the result is returned as a
timezone - aware datetime . datetime with UTC timezone .""" | return self . _run ( self . reqHeadTimeStampAsync ( contract , whatToShow , useRTH , formatDate ) ) |
def fn_std ( self , a , axis = None ) :
"""Compute the standard deviation of an array , ignoring NaNs .
: param a : The array .
: return : The standard deviation of the array .""" | return numpy . nanstd ( self . _to_ndarray ( a ) , axis = axis ) |
def outputZip ( self , figtype = 'png' ) :
"""Outputs the report in a zip container .
Figs and tabs as pngs and excells .
Args :
figtype ( str ) : Figure type of images in the zip folder .""" | from zipfile import ZipFile
with ZipFile ( self . outfile + '.zip' , 'w' ) as zipcontainer :
zipcontainer . writestr ( 'summary.txt' , '# {}\n\n{}\n{}' . format ( self . title , self . p , ( '\n## Conclusion\n' if self . conclusion else '' ) + self . conclusion ) . encode ( ) )
c = count ( 1 )
for section in self . sections :
section . sectionOutZip ( zipcontainer , 's{}_{}/' . format ( next ( c ) , section . title . replace ( ' ' , '_' ) ) , figtype = figtype ) |
def _find_header_flat ( self ) :
"""Find header elements in a table , if possible . This case handles
situations where ' < th > ' elements are not within a row ( ' < tr > ' )""" | nodes = self . _node . contents . filter_tags ( matches = ftag ( 'th' ) , recursive = False )
if not nodes :
return
self . _log ( 'found header outside rows (%d <th> elements)' % len ( nodes ) )
return nodes |
def resolution ( file_ , resolution_string ) :
"""A filter to return the URL for the provided resolution of the thumbnail .""" | if sorl_settings . THUMBNAIL_DUMMY :
dummy_source = sorl_settings . THUMBNAIL_DUMMY_SOURCE
source = dummy_source . replace ( '%(width)s' , '(?P<width>[0-9]+)' )
source = source . replace ( '%(height)s' , '(?P<height>[0-9]+)' )
source = re . compile ( source )
try :
resolution = decimal . Decimal ( resolution_string . strip ( 'x' ) )
info = source . match ( file_ ) . groupdict ( )
info = { dimension : int ( int ( size ) * resolution ) for ( dimension , size ) in info . items ( ) }
return dummy_source % info
except ( AttributeError , TypeError , KeyError ) : # If we can ' t manipulate the dummy we shouldn ' t change it at all
return file_
filename , extension = os . path . splitext ( file_ )
return '%s@%s%s' % ( filename , resolution_string , extension ) |
def vote_count ( self ) :
"""Returns the total number of votes cast across all the
poll ' s options .""" | return Vote . objects . filter ( content_type = ContentType . objects . get ( app_label = 'poll' , model = 'polloption' ) , object_id__in = [ o . id for o in self . polloption_set . all ( ) ] ) . aggregate ( Sum ( 'vote' ) ) [ 'vote__sum' ] or 0 |
async def FindTools ( self , arch , major , minor , number , series ) :
'''arch : str
major : int
minor : int
number : Number
series : str
Returns - > typing . Union [ _ ForwardRef ( ' Error ' ) , typing . Sequence [ ~ Tools ] ]''' | # map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'Client' , request = 'FindTools' , version = 1 , params = _params )
_params [ 'arch' ] = arch
_params [ 'major' ] = major
_params [ 'minor' ] = minor
_params [ 'number' ] = number
_params [ 'series' ] = series
reply = await self . rpc ( msg )
return reply |
def model ( x_train , y_train , x_test , y_test ) :
"""Model providing function :
Create Keras model with double curly brackets dropped - in as needed .
Return value has to be a valid python dictionary with two customary keys :
- loss : Specify a numeric evaluation metric to be minimized
- status : Just use STATUS _ OK and see hyperopt documentation if not feasible
The last one is optional , though recommended , namely :
- model : specify the model just created so that we can later use it again .""" | from keras . models import Sequential
from keras . layers . core import Dense , Dropout , Activation
from keras . optimizers import RMSprop
keras_model = Sequential ( )
keras_model . add ( Dense ( 512 , input_shape = ( 784 , ) ) )
keras_model . add ( Activation ( 'relu' ) )
keras_model . add ( Dropout ( { { uniform ( 0 , 1 ) } } ) )
keras_model . add ( Dense ( { { choice ( [ 256 , 512 , 1024 ] ) } } ) )
keras_model . add ( Activation ( 'relu' ) )
keras_model . add ( Dropout ( { { uniform ( 0 , 1 ) } } ) )
keras_model . add ( Dense ( 10 ) )
keras_model . add ( Activation ( 'softmax' ) )
rms = RMSprop ( )
keras_model . compile ( loss = 'categorical_crossentropy' , optimizer = rms , metrics = [ 'acc' ] )
keras_model . fit ( x_train , y_train , batch_size = { { choice ( [ 64 , 128 ] ) } } , epochs = 1 , verbose = 2 , validation_data = ( x_test , y_test ) )
score , acc = keras_model . evaluate ( x_test , y_test , verbose = 0 )
print ( 'Test accuracy:' , acc )
return { 'loss' : - acc , 'status' : STATUS_OK , 'model' : keras_model . to_yaml ( ) , 'weights' : pickle . dumps ( keras_model . get_weights ( ) ) } |
def gossip_connect_curve ( self , public_key , format , * args ) :
"""Set - up gossip discovery with CURVE enabled .""" | return lib . zyre_gossip_connect_curve ( self . _as_parameter_ , public_key , format , * args ) |
def run ( self ) :
"""Append version number to gvar / _ _ init _ _ . py""" | with open ( 'src/gvar/__init__.py' , 'a' ) as gvfile :
gvfile . write ( "\n__version__ = '%s'\n" % GVAR_VERSION )
_build_py . run ( self ) |
def process_nxml_file ( fname , output_fmt = 'json' , outbuf = None , cleanup = True , ** kwargs ) :
"""Return processor with Statements extracted by reading an NXML file .
Parameters
fname : str
The path to the NXML file to be read .
output _ fmt : Optional [ str ]
The output format to obtain from Sparser , with the two options being
' json ' and ' xml ' . Default : ' json '
outbuf : Optional [ file ]
A file like object that the Sparser output is written to .
cleanup : Optional [ bool ]
If True , the output file created by Sparser is removed .
Default : True
Returns
sp : SparserXMLProcessor or SparserJSONProcessor depending on what output
format was chosen .""" | sp = None
out_fname = None
try :
out_fname = run_sparser ( fname , output_fmt , outbuf , ** kwargs )
sp = process_sparser_output ( out_fname , output_fmt )
except Exception as e :
logger . error ( "Sparser failed to run on %s." % fname )
logger . exception ( e )
finally :
if out_fname is not None and os . path . exists ( out_fname ) and cleanup :
os . remove ( out_fname )
return sp |
def new ( cls , shapes , start_x , start_y , x_scale , y_scale ) :
"""Return a new | FreeformBuilder | object .
The initial pen location is specified ( in local coordinates ) by
( * start _ x * , * start _ y * ) .""" | return cls ( shapes , int ( round ( start_x ) ) , int ( round ( start_y ) ) , x_scale , y_scale ) |
def setAllowedTypes ( self , allowed_types ) :
"""Set the allowed association types , checking to make sure
each combination is valid .""" | for ( assoc_type , session_type ) in allowed_types :
checkSessionType ( assoc_type , session_type )
self . allowed_types = allowed_types |
def _clone_file_everywhere ( self , finfo ) :
"""Clone file ( * src _ editor * widget ) in all editorstacks
Cloning from the first editorstack in which every single new editor
is created ( when loading or creating a new file )""" | for editorstack in self . editorstacks [ 1 : ] :
editor = editorstack . clone_editor_from ( finfo , set_current = False )
self . register_widget_shortcuts ( editor ) |
def kill_filler_process ( self ) :
"""terminates the process that fills this buffers
: class : ` ~ alot . walker . PipeWalker ` .""" | if self . proc :
if self . proc . is_alive ( ) :
self . proc . terminate ( ) |
def translate ( self , exc ) :
"""Return whether or not to do translation .""" | from boto . exception import StorageResponseError
if isinstance ( exc , StorageResponseError ) :
if exc . status == 404 :
return self . error_cls ( str ( exc ) )
return None |
def V_vertical_spherical_concave ( D , a , h ) :
r'''Calculates volume of a vertical tank with a concave spherical bottom ,
according to [ 1 ] _ . No provision for the top of the tank is made here .
. . math : :
V = \ frac { \ pi } { 12 } \ left [ 3D ^ 2h + \ frac { a } { 2 } ( 3D ^ 2 + 4a ^ 2 ) + ( a + h ) ^ 3
\ left ( 4 - \ frac { 3D ^ 2 + 12a ^ 2 } { 2a ( a + h ) } \ right ) \ right ] , \ ; \ ; 0 \ le h < | a |
. . math : :
V = \ frac { \ pi } { 12 } \ left [ 3D ^ 2h + \ frac { a } { 2 } ( 3D ^ 2 + 4a ^ 2 ) \ right ]
, \ ; \ ; h \ ge | a |
Parameters
D : float
Diameter of the main cylindrical section , [ m ]
a : float
Negative distance the spherical head extends inside the main cylinder , [ m ]
h : float
Height , as measured up to where the fluid ends , [ m ]
Returns
V : float
Volume [ m ^ 3]
Examples
Matching example from [ 1 ] _ , with inputs in inches and volume in gallons .
> > > V _ vertical _ spherical _ concave ( D = 113 . , a = - 33 , h = 15 ) / 231
112.81405437348528
References
. . [ 1 ] Jones , D . " Compute Fluid Volumes in Vertical Tanks . " Chemical
Processing . December 18 , 2003.
http : / / www . chemicalprocessing . com / articles / 2003/193/''' | if h < abs ( a ) :
Vf = pi / 12 * ( 3 * D ** 2 * h + a / 2. * ( 3 * D ** 2 + 4 * a ** 2 ) + ( a + h ) ** 3 * ( 4 - ( 3 * D ** 2 + 12 * a ** 2 ) / ( 2. * a * ( a + h ) ) ) )
else :
Vf = pi / 12 * ( 3 * D ** 2 * h + a / 2. * ( 3 * D ** 2 + 4 * a ** 2 ) )
return Vf |
def get_c_extension ( support_legacy = False , system_zstd = False , name = 'zstd' , warnings_as_errors = False , root = None ) :
"""Obtain a distutils . extension . Extension for the C extension .
` ` support _ legacy ` ` controls whether to compile in legacy zstd format support .
` ` system _ zstd ` ` controls whether to compile against the system zstd library .
For this to work , the system zstd library and headers must match what
python - zstandard is coded against exactly .
` ` name ` ` is the module name of the C extension to produce .
` ` warnings _ as _ errors ` ` controls whether compiler warnings are turned into
compiler errors .
` ` root ` ` defines a root path that source should be computed as relative
to . This should be the directory with the main ` ` setup . py ` ` that is
being invoked . If not defined , paths will be relative to this file .""" | actual_root = os . path . abspath ( os . path . dirname ( __file__ ) )
root = root or actual_root
sources = set ( [ os . path . join ( actual_root , p ) for p in ext_sources ] )
if not system_zstd :
sources . update ( [ os . path . join ( actual_root , p ) for p in zstd_sources ] )
if support_legacy :
sources . update ( [ os . path . join ( actual_root , p ) for p in zstd_sources_legacy ] )
sources = list ( sources )
include_dirs = set ( [ os . path . join ( actual_root , d ) for d in ext_includes ] )
if not system_zstd :
include_dirs . update ( [ os . path . join ( actual_root , d ) for d in zstd_includes ] )
if support_legacy :
include_dirs . update ( [ os . path . join ( actual_root , d ) for d in zstd_includes_legacy ] )
include_dirs = list ( include_dirs )
depends = [ os . path . join ( actual_root , p ) for p in zstd_depends ]
compiler = distutils . ccompiler . new_compiler ( )
# Needed for MSVC .
if hasattr ( compiler , 'initialize' ) :
compiler . initialize ( )
if compiler . compiler_type == 'unix' :
compiler_type = 'unix'
elif compiler . compiler_type == 'msvc' :
compiler_type = 'msvc'
elif compiler . compiler_type == 'mingw32' :
compiler_type = 'mingw32'
else :
raise Exception ( 'unhandled compiler type: %s' % compiler . compiler_type )
extra_args = [ '-DZSTD_MULTITHREAD' ]
if not system_zstd :
extra_args . append ( '-DZSTDLIB_VISIBILITY=' )
extra_args . append ( '-DZDICTLIB_VISIBILITY=' )
extra_args . append ( '-DZSTDERRORLIB_VISIBILITY=' )
if compiler_type == 'unix' :
extra_args . append ( '-fvisibility=hidden' )
if not system_zstd and support_legacy :
extra_args . append ( '-DZSTD_LEGACY_SUPPORT=1' )
if warnings_as_errors :
if compiler_type in ( 'unix' , 'mingw32' ) :
extra_args . append ( '-Werror' )
elif compiler_type == 'msvc' :
extra_args . append ( '/WX' )
else :
assert False
libraries = [ 'zstd' ] if system_zstd else [ ]
# Python 3.7 doesn ' t like absolute paths . So normalize to relative .
sources = [ os . path . relpath ( p , root ) for p in sources ]
include_dirs = [ os . path . relpath ( p , root ) for p in include_dirs ]
depends = [ os . path . relpath ( p , root ) for p in depends ]
# TODO compile with optimizations .
return Extension ( name , sources , include_dirs = include_dirs , depends = depends , extra_compile_args = extra_args , libraries = libraries ) |
def getConf ( self , conftype ) :
'''conftype must be a Zooborg constant''' | if conftype not in [ ZooConst . CLIENT , ZooConst . WORKER , ZooConst . BROKER ] :
raise Exception ( 'Zooborg.getConf: invalid type' )
zooconf = { }
# TODO : specialconf entries for the mock
if conftype == ZooConst . CLIENT :
zooconf [ 'broker' ] = { }
zooconf [ 'broker' ] [ 'connectionstr' ] = b"tcp://localhost:5555"
elif conftype == ZooConst . BROKER :
zooconf [ 'bindstr' ] = b"tcp://*:5555"
elif conftype == ZooConst . WORKER :
zooconf [ 'broker' ] = { }
zooconf [ 'broker' ] [ 'connectionstr' ] = b"tcp://localhost:5555"
else :
raise Exception ( "ZooBorgconftype unknown" )
return zooconf |
def add_shadow ( img , vertices_list ) :
"""Add shadows to the image .
From https : / / github . com / UjjwalSaxena / Automold - - Road - Augmentation - Library
Args :
img ( np . array ) :
vertices _ list ( list ) :
Returns :""" | non_rgb_warning ( img )
input_dtype = img . dtype
needs_float = False
if input_dtype == np . float32 :
img = from_float ( img , dtype = np . dtype ( 'uint8' ) )
needs_float = True
elif input_dtype not in ( np . uint8 , np . float32 ) :
raise ValueError ( 'Unexpected dtype {} for RandomSnow augmentation' . format ( input_dtype ) )
image_hls = cv2 . cvtColor ( img , cv2 . COLOR_RGB2HLS )
mask = np . zeros_like ( img )
# adding all shadow polygons on empty mask , single 255 denotes only red channel
for vertices in vertices_list :
cv2 . fillPoly ( mask , vertices , 255 )
# if red channel is hot , image ' s " Lightness " channel ' s brightness is lowered
red_max_value_ind = mask [ : , : , 0 ] == 255
image_hls [ : , : , 1 ] [ red_max_value_ind ] = image_hls [ : , : , 1 ] [ red_max_value_ind ] * 0.5
image_rgb = cv2 . cvtColor ( image_hls , cv2 . COLOR_HLS2RGB )
if needs_float :
image_rgb = to_float ( image_rgb , max_value = 255 )
return image_rgb |
def validate_confusables_email ( value ) :
"""Validator which disallows ' dangerous ' email addresses likely to
represent homograph attacks .
An email address is ' dangerous ' if either the local - part or the
domain , considered on their own , are mixed - script and contain one
or more characters appearing in the Unicode Visually Confusable
Characters file .""" | if '@' not in value :
return
local_part , domain = value . split ( '@' )
if confusables . is_dangerous ( local_part ) or confusables . is_dangerous ( domain ) :
raise ValidationError ( CONFUSABLE_EMAIL , code = 'invalid' ) |
def stable_cho_factor ( x , tiny = _TINY ) :
"""NAME :
stable _ cho _ factor
PURPOSE :
Stable version of the cholesky decomposition
INPUT :
x - ( sc . array ) positive definite matrix
tiny - ( double ) tiny number to add to the covariance matrix to make the decomposition stable ( has a default )
OUTPUT :
( L , lowerFlag ) - output from scipy . linalg . cho _ factor for lower = True
REVISION HISTORY :
2009-09-25 - Written - Bovy ( NYU )""" | return linalg . cho_factor ( x + numpy . sum ( numpy . diag ( x ) ) * tiny * numpy . eye ( x . shape [ 0 ] ) , lower = True ) |
def getdoc ( object ) :
"""Get the documentation string for an object .
All tabs are expanded to spaces . To clean up docstrings that are
indented to line up with blocks of code , any whitespace than can be
uniformly removed from the second line onwards is removed .""" | try :
doc = object . __doc__
except AttributeError :
return None
if not isinstance ( doc , ( str , unicode ) ) :
return None
try :
lines = string . split ( string . expandtabs ( doc ) , '\n' )
except UnicodeError :
return None
else :
margin = None
for line in lines [ 1 : ] :
content = len ( string . lstrip ( line ) )
if not content :
continue
indent = len ( line ) - content
if margin is None :
margin = indent
else :
margin = min ( margin , indent )
if margin is not None :
for i in range ( 1 , len ( lines ) ) :
lines [ i ] = lines [ i ] [ margin : ]
return string . join ( lines , '\n' ) |
def field_cache_to_index_pattern ( self , field_cache ) :
"""Return a . kibana index - pattern doc _ type""" | mapping_dict = { }
mapping_dict [ 'customFormats' ] = "{}"
mapping_dict [ 'title' ] = self . index_pattern
# now post the data into . kibana
mapping_dict [ 'fields' ] = json . dumps ( field_cache , separators = ( ',' , ':' ) )
# in order to post , we need to create the post string
mapping_str = json . dumps ( mapping_dict , separators = ( ',' , ':' ) )
return mapping_str |
def set_resolution ( self , resolution = 1090 ) :
"""Set the resolution of the sensor
The resolution value is the amount of steps recorded in a single Gauss . The possible options are :
Recommended Gauss range Resolution Gauss per bit
0.88 Ga 1370 0.73 mGa
1.3 Ga 1090 0.92 mGa
1.9 Ga 820 1.22 mGa
2.5 Ga 660 1.52 mGa
4.0 Ga 440 2.27 mGa
4.7 Ga 390 2.56 mGa
5.6 Ga 330 3.03 mGa
8.1 Ga 230 4.35 mGa
: param resolution : The resolution of the sensor""" | options = { 1370 : 0 , 1090 : 1 , 820 : 2 , 660 : 3 , 440 : 4 , 390 : 5 , 330 : 6 , 230 : 7 }
if resolution not in options . keys ( ) :
raise Exception ( 'Resolution of {} steps is not supported' . format ( resolution ) )
self . resolution = resolution
config_b = 0
config_b &= options [ resolution ] << 5
self . i2c_write_register ( 0x01 , config_b ) |
def recursive_cov ( self , cov , length , mean , chain , scaling = 1 , epsilon = 0 ) :
r"""Compute the covariance recursively .
Return the new covariance and the new mean .
. . math : :
C _ k & = \ frac { 1 } { k - 1 } ( \ sum _ { i = 1 } ^ k x _ i x _ i ^ T - k \ bar { x _ k } \ bar { x _ k } ^ T )
C _ n & = \ frac { 1 } { n - 1 } ( \ sum _ { i = 1 } ^ k x _ i x _ i ^ T + \ sum _ { i = k + 1 } ^ n x _ i x _ i ^ T - n \ bar { x _ n } \ bar { x _ n } ^ T )
& = \ frac { 1 } { n - 1 } ( ( k - 1 ) C _ k + k \ bar { x _ k } \ bar { x _ k } ^ T + \ sum _ { i = k + 1 } ^ n x _ i x _ i ^ T - n \ bar { x _ n } \ bar { x _ n } ^ T )
: Parameters :
- cov : matrix
Previous covariance matrix .
- length : int
Length of chain used to compute the previous covariance .
- mean : array
Previous mean .
- chain : array
Sample used to update covariance .
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices .""" | n = length + len ( chain )
k = length
new_mean = self . recursive_mean ( mean , length , chain )
t0 = k * np . outer ( mean , mean )
t1 = np . dot ( chain . T , chain )
t2 = n * np . outer ( new_mean , new_mean )
t3 = epsilon * np . eye ( cov . shape [ 0 ] )
new_cov = ( k - 1 ) / ( n - 1. ) * cov + scaling / ( n - 1. ) * ( t0 + t1 - t2 + t3 )
return new_cov , new_mean |
def create_account_api_key ( self , account_id , body , ** kwargs ) : # noqa : E501
"""Create a new API key . # noqa : E501
An endpoint for creating a new API key . There is no default value for the owner ID and it must be from the same account where the new API key is created . * * Example usage : * * ` curl - X POST https : / / api . us - east - 1 . mbedcloud . com / v3 / accounts / { accountID } / api - keys - d ' { \" name \" : \" MyKey1 \" } ' - H ' content - type : application / json ' - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . create _ account _ api _ key ( account _ id , body , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str account _ id : Account ID . ( required )
: param ApiKeyInfoReq body : Details of the API key to be created . ( required )
: return : ApiKeyInfoResp
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . create_account_api_key_with_http_info ( account_id , body , ** kwargs )
# noqa : E501
else :
( data ) = self . create_account_api_key_with_http_info ( account_id , body , ** kwargs )
# noqa : E501
return data |
def T_i ( v_vars : List [ fl . Var ] , mass : np . ndarray , i : int ) :
"""Make Fluxion with the kinetic energy of body i""" | # Check that the lengths are consistent
assert len ( v_vars ) == 3 * len ( mass )
# Mass of the body i
m = mass [ i ]
# kineteic energy = 1/2 * mass * speed ^ 2
T = ( 0.5 * m ) * flux_v2 ( v_vars , i )
return T |
def add_slice_db ( self , fid , slice_end , md5 ) :
'''在数据库中加入上传任务分片信息''' | sql = 'INSERT INTO slice VALUES(?, ?, ?)'
self . cursor . execute ( sql , ( fid , slice_end , md5 ) )
self . check_commit ( ) |
def plot ( self , figure_list ) :
"""plots the data contained in self . data , which should be a dictionary or a deque of dictionaries
for the latter use the last entry
Args :
figure _ list : list of figure objects that are passed to self . get _ axes _ layout to get axis objects for plotting""" | # if there is not data we do not plot anything
if not self . data :
return
# if plot function is called when script is not running we request a plot refresh
if not self . is_running :
self . _plot_refresh = True
axes_list = self . get_axes_layout ( figure_list )
if self . _plot_refresh is True :
self . _plot ( axes_list )
self . _plot_refresh = False
for figure in figure_list :
if figure . axes :
figure . set_tight_layout ( True )
else :
self . _update_plot ( axes_list ) |
def resolved_args ( self ) :
"""Parse args if they have not already been parsed and return the Namespace for args .
. . Note : : Accessing args should only be done directly in the App .
Returns :
( namespace ) : ArgParser parsed arguments with Playbook variables automatically resolved .""" | if not self . _parsed_resolved : # only resolve once
self . args ( )
# create new args Namespace for resolved args
self . _default_args_resolved = Namespace ( )
# iterate over args and resolve any playbook variables
for arg in vars ( self . _default_args ) :
arg_val = getattr ( self . _default_args , arg )
if arg not in self . tc_reserved_args :
if isinstance ( arg_val , ( str ) ) :
arg_val = self . tcex . playbook . read ( arg_val )
setattr ( self . _default_args_resolved , arg , arg_val )
# set parsed bool to ensure args are only parsed once
self . _parsed_resolved = True
return self . _default_args_resolved |
def call ( ) :
"""Execute command line helper .""" | args = get_arguments ( )
# Set up logging
if args . debug :
log_level = logging . DEBUG
elif args . quiet :
log_level = logging . WARN
else :
log_level = logging . INFO
setup_logging ( log_level )
abode = None
if not args . cache :
if not args . username or not args . password :
raise Exception ( "Please supply a cache or username and password." )
try : # Create abodepy instance .
if args . cache and args . username and args . password :
abode = abodepy . Abode ( username = args . username , password = args . password , get_devices = True , cache_path = args . cache )
elif args . cache and not ( not args . username or not args . password ) :
abode = abodepy . Abode ( get_devices = True , cache_path = args . cache )
else :
abode = abodepy . Abode ( username = args . username , password = args . password , get_devices = True )
# Output current mode .
if args . mode :
_LOGGER . info ( "Current alarm mode: %s" , abode . get_alarm ( ) . mode )
# Change system mode .
if args . arm :
if abode . get_alarm ( ) . set_mode ( args . arm ) :
_LOGGER . info ( "Alarm mode changed to: %s" , args . arm )
else :
_LOGGER . warning ( "Failed to change alarm mode to: %s" , args . arm )
# Set setting
for setting in args . set or [ ] :
keyval = setting . split ( "=" )
if abode . set_setting ( keyval [ 0 ] , keyval [ 1 ] ) :
_LOGGER . info ( "Setting %s changed to %s" , keyval [ 0 ] , keyval [ 1 ] )
# Switch on
for device_id in args . on or [ ] :
device = abode . get_device ( device_id )
if device :
if device . switch_on ( ) :
_LOGGER . info ( "Switched on device with id: %s" , device_id )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Switch off
for device_id in args . off or [ ] :
device = abode . get_device ( device_id )
if device :
if device . switch_off ( ) :
_LOGGER . info ( "Switched off device with id: %s" , device_id )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Lock
for device_id in args . lock or [ ] :
device = abode . get_device ( device_id )
if device :
if device . lock ( ) :
_LOGGER . info ( "Locked device with id: %s" , device_id )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Unlock
for device_id in args . unlock or [ ] :
device = abode . get_device ( device_id )
if device :
if device . unlock ( ) :
_LOGGER . info ( "Unlocked device with id: %s" , device_id )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Output Json
for device_id in args . json or [ ] :
device = abode . get_device ( device_id )
if device : # pylint : disable = protected - access
_LOGGER . info ( device_id + " JSON:\n" + json . dumps ( device . _json_state , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Print
def _device_print ( dev , append = '' ) :
_LOGGER . info ( "%s%s" , dev . desc , append )
# Print out all automations
if args . automations :
for automation in abode . get_automations ( ) :
_device_print ( automation )
# Enable automation
for automation_id in args . activate or [ ] :
automation = abode . get_automation ( automation_id )
if automation :
if automation . set_active ( True ) :
_LOGGER . info ( "Activated automation with id: %s" , automation_id )
else :
_LOGGER . warning ( "Could not find automation with id: %s" , automation_id )
# Disable automation
for automation_id in args . deactivate or [ ] :
automation = abode . get_automation ( automation_id )
if automation :
if automation . set_active ( False ) :
_LOGGER . info ( "Deactivated automation with id: %s" , automation_id )
else :
_LOGGER . warning ( "Could not find automation with id: %s" , automation_id )
# Trigger automation
for automation_id in args . trigger or [ ] :
automation = abode . get_automation ( automation_id )
if automation :
if automation . trigger ( ) :
_LOGGER . info ( "Triggered automation with id: %s" , automation_id )
else :
_LOGGER . warning ( "Could not find automation with id: %s" , automation_id )
# Trigger image capture
for device_id in args . capture or [ ] :
device = abode . get_device ( device_id )
if device :
if device . capture ( ) :
_LOGGER . info ( "Image requested from device with id: %s" , device_id )
else :
_LOGGER . warning ( "Failed to request image from device with id: %s" , device_id )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Save camera image
for keyval in args . image or [ ] :
devloc = keyval . split ( "=" )
device = abode . get_device ( devloc [ 0 ] )
if device :
try :
if ( device . refresh_image ( ) and device . image_to_file ( devloc [ 1 ] ) ) :
_LOGGER . info ( "Saved image to %s for device id: %s" , devloc [ 1 ] , devloc [ 0 ] )
except AbodeException as exc :
_LOGGER . warning ( "Unable to save image: %s" , exc )
else :
_LOGGER . warning ( "Could not find device with id: %s" , devloc [ 0 ] )
# Print out all devices .
if args . devices :
for device in abode . get_devices ( ) :
_device_print ( device )
def _device_callback ( dev ) :
_device_print ( dev , ", At: " + time . strftime ( "%Y-%m-%d %H:%M:%S" ) )
def _timeline_callback ( tl_json ) :
event_code = int ( tl_json [ 'event_code' ] )
if 5100 <= event_code <= 5199 : # Ignore device changes
return
_LOGGER . info ( "%s - %s at %s %s" , tl_json [ 'event_name' ] , tl_json [ 'event_type' ] , tl_json [ 'date' ] , tl_json [ 'time' ] )
# Print out specific devices by device id .
if args . device :
for device_id in args . device :
device = abode . get_device ( device_id )
if device :
_device_print ( device )
# Register the specific devices if we decide to listen .
abode . events . add_device_callback ( device_id , _device_callback )
else :
_LOGGER . warning ( "Could not find device with id: %s" , device_id )
# Start device change listener .
if args . listen : # If no devices were specified then we listen to all devices .
if args . device is None :
_LOGGER . info ( "Adding all devices to listener..." )
for device in abode . get_devices ( ) :
abode . events . add_device_callback ( device . device_id , _device_callback )
abode . events . add_timeline_callback ( TIMELINE . ALL , _timeline_callback )
_LOGGER . info ( "Listening for device and timeline updates..." )
abode . events . start ( )
try :
while True :
time . sleep ( 1 )
except KeyboardInterrupt :
abode . events . stop ( )
_LOGGER . info ( "Device update listening stopped." )
except abodepy . AbodeException as exc :
_LOGGER . error ( exc )
finally :
if abode :
abode . logout ( ) |
def fd_solve ( self ) :
"""w = fd _ solve ( )
where coeff is the sparse coefficient matrix output from function
coeff _ matrix and qs is the array of loads ( stresses )
Sparse solver for one - dimensional flexure of an elastic plate""" | if self . Debug :
print ( "qs" , self . qs . shape )
print ( "Te" , self . Te . shape )
self . calc_max_flexural_wavelength ( )
print ( "maxFlexuralWavelength_ncells', self.maxFlexuralWavelength_ncells" )
if self . Solver == "iterative" or self . Solver == "Iterative" :
if self . Debug :
print ( "Using generalized minimal residual method for iterative solution" )
if self . Verbose :
print ( "Converging to a tolerance of" , self . iterative_ConvergenceTolerance , "m between iterations" )
# qs negative so bends down with positive load , bends up with neative load
# ( i . e . material removed )
w = isolve . lgmres ( self . coeff_matrix , - self . qs , tol = self . iterative_ConvergenceTolerance )
self . w = w [ 0 ]
# Reach into tuple to get my array back
else :
if self . Solver == 'direct' or self . Solver == 'Direct' :
if self . Debug :
print ( "Using direct solution with UMFpack" )
else :
print ( "Solution type not understood:" )
print ( "Defaulting to direct solution with UMFpack" )
# UMFpack is now the default , but setting true just to be sure in case
# anything changes
# qs negative so bends down with positive load , bends up with neative load
# ( i . e . material removed )
self . w = spsolve ( self . coeff_matrix , - self . qs , use_umfpack = True )
if self . Debug :
print ( "w.shape:" )
print ( self . w . shape )
print ( "w:" )
print ( self . w ) |
def insert ( self , xmltext ) :
"""Insert the LIGO _ LW metadata in the xmltext string into the database .
@ return : message received ( may be empty ) from LDBD Server as a string""" | msg = "INSERT\0" + xmltext + "\0"
self . sfile . write ( msg )
ret , output = self . __response__ ( )
reply = str ( output [ 0 ] )
if ret :
msg = "Error executing insert on server %d:%s" % ( ret , reply )
raise LDBDClientException , msg
return reply |
def copyError ( self , to ) :
"""Save the original error to the new place .""" | if to is None :
to__o = None
else :
to__o = to . _o
ret = libxml2mod . xmlCopyError ( self . _o , to__o )
return ret |
def set_widgets ( self ) :
"""Set widgets on the layer purpose tab .""" | self . clear_further_steps ( )
# Set widgets
self . lstCategories . clear ( )
self . lblDescribeCategory . setText ( '' )
self . lblIconCategory . setPixmap ( QPixmap ( ) )
self . lblSelectCategory . setText ( category_question % self . parent . layer . name ( ) )
purposes = self . purposes_for_layer ( )
for purpose in purposes :
if not isinstance ( purpose , dict ) :
purpose = definition ( purpose )
item = QListWidgetItem ( purpose [ 'name' ] , self . lstCategories )
item . setData ( QtCore . Qt . UserRole , purpose [ 'key' ] )
self . lstCategories . addItem ( item )
# Check if layer keywords are already assigned
purpose_keyword = self . parent . get_existing_keyword ( 'layer_purpose' )
# Overwrite the purpose _ keyword if it ' s KW mode embedded in IFCW mode
if self . parent . parent_step :
purpose_keyword = self . parent . get_parent_mode_constraints ( ) [ 0 ] [ 'key' ]
# Set values based on existing keywords or parent mode
if purpose_keyword :
purposes = [ ]
for index in range ( self . lstCategories . count ( ) ) :
item = self . lstCategories . item ( index )
purposes . append ( item . data ( QtCore . Qt . UserRole ) )
if purpose_keyword in purposes :
self . lstCategories . setCurrentRow ( purposes . index ( purpose_keyword ) )
self . auto_select_one_item ( self . lstCategories ) |
def sky2px ( wcs , ra , dec , dra , ddec , cell , beam ) :
"""convert a sky region to pixel positions""" | dra = beam if dra < beam else dra
# assume every source is at least as large as the psf
ddec = beam if ddec < beam else ddec
offsetDec = int ( ( ddec / 2. ) / cell )
offsetRA = int ( ( dra / 2. ) / cell )
if offsetDec % 2 == 1 :
offsetDec += 1
if offsetRA % 2 == 1 :
offsetRA += 1
raPix , decPix = map ( int , wcs . wcs2pix ( ra , dec ) )
return np . array ( [ raPix - offsetRA , raPix + offsetRA , decPix - offsetDec , decPix + offsetDec ] ) |
def load_dat ( self , delimiter = ',' ) :
"""Load the dat file into internal data structures , ` ` self . _ data ` `""" | try :
data = np . loadtxt ( self . _dat_file , delimiter = ',' )
except ValueError :
data = np . loadtxt ( self . _dat_file )
self . _data = data |
def get_annotations ( df , annotations , kind = 'lines' , theme = None , ** kwargs ) :
"""Generates an annotations object
Parameters :
df : DataFrame
Original DataFrame of values
annotations : dict or list
Dictionary of annotations
{ x _ point : text }
or
List of Plotly annotations""" | for key in list ( kwargs . keys ( ) ) :
if key not in __ANN_KWARGS :
raise Exception ( "Invalid keyword : '{0}'" . format ( key ) )
theme_data = getTheme ( theme )
kwargs [ 'fontcolor' ] = kwargs . pop ( 'fontcolor' , theme_data [ 'annotations' ] [ 'fontcolor' ] )
kwargs [ 'arrowcolor' ] = kwargs . pop ( 'arrowcolor' , theme_data [ 'annotations' ] [ 'arrowcolor' ] )
kwargs [ 'fontsize' ] = kwargs . pop ( 'fontsize' , 12 )
def check_ann ( annotation ) :
local_list = [ ]
if 'title' in annotation :
local_list . append ( dict ( text = annotation [ 'title' ] , showarrow = False , x = 0 , y = 1 , xref = 'paper' , yref = 'paper' , font = { 'size' : 24 if not 'fontsize' in kwargs else kwargs [ 'fontsize' ] } ) )
del annotation [ 'title' ]
local_list . append ( ann )
elif 'x' in annotation :
ann = dict ( x = annotation [ 'x' ] , y = annotation . get ( 'y' , .5 ) , xref = annotation . get ( 'xref' , 'x' ) , yref = annotation . get ( 'yref' , kwargs . get ( 'yref' , 'y1' ) ) , text = annotation . get ( 'text' ) , showarrow = annotation . get ( 'showarrow' , True ) , arrowhead = annotation . get ( 'arrowhead' , 7 ) , arrowcolor = annotation . get ( 'arrowcolor' , kwargs . get ( 'arrowcolor' ) ) , ax = annotation . get ( 'ax' , 0 ) , ay = annotation . get ( 'ay' , - 100 ) , textangle = annotation . get ( 'textangle' , - 90 ) , font = dict ( color = annotation . get ( 'fontcolor' , annotation . get ( 'color' , kwargs . get ( 'fontcolor' ) ) ) , size = annotation . get ( 'fontsize' , annotation . get ( 'size' , kwargs . get ( 'fontsize' ) ) ) ) )
local_list . append ( ann )
else :
for k , v in list ( annotation . items ( ) ) :
if kind in ( 'candlestick' , 'ohlc' , 'candle' ) :
d = ta . _ohlc_dict ( df )
maxv = df [ d [ 'high' ] ] . ix [ k ]
yref = 'y2'
else :
maxv = df . ix [ k ] . sum ( ) if k in df . index else 0
yref = 'y1'
ann = dict ( x = k , y = maxv , xref = 'x' , yref = yref , text = v , showarrow = kwargs . get ( 'showarrow' , True ) , arrowhead = kwargs . get ( 'arrowhead' , 7 ) , arrowcolor = kwargs [ 'arrowcolor' ] , ax = kwargs . get ( 'ax' , 0 ) , ay = kwargs . get ( 'ay' , - 100 ) , textangle = kwargs . get ( 'textangle' , - 90 ) , font = dict ( color = kwargs [ 'fontcolor' ] , size = kwargs [ 'fontsize' ] ) )
local_list . append ( ann )
return local_list
annotations = make_list ( annotations )
_list_ann = [ ]
for ann in annotations :
_list_ann . extend ( check_ann ( ann ) )
return _list_ann |
def _model ( self , beta ) :
"""Creates the structure of the model ( model matrices , etc )
Parameters
beta : np . array
Contains untransformed starting values for the latent variables
Returns
lambda : np . array
Contains the values for the conditional volatility series
Y : np . array
Contains the length - adjusted time series ( accounting for lags )
scores : np . array
Contains the score terms for the time series""" | Y = np . array ( self . data [ self . max_lag : self . data . shape [ 0 ] ] )
X = np . ones ( Y . shape [ 0 ] )
scores = np . zeros ( Y . shape [ 0 ] )
# Transform latent variables
parm = np . array ( [ self . latent_variables . z_list [ k ] . prior . transform ( beta [ k ] ) for k in range ( beta . shape [ 0 ] ) ] )
lmda = np . ones ( Y . shape [ 0 ] ) * parm [ 0 ]
# Loop over time series
for t in range ( 0 , Y . shape [ 0 ] ) :
if t < self . max_lag :
lmda [ t ] = parm [ 0 ] / ( 1 - np . sum ( parm [ 1 : ( self . p + 1 ) ] ) )
else : # Loop over GARCH terms
for p_term in range ( 0 , self . p ) :
lmda [ t ] += parm [ 1 + p_term ] * lmda [ t - p_term - 1 ]
# Loop over Score terms
for q_term in range ( 0 , self . q ) :
lmda [ t ] += parm [ 1 + self . p + q_term ] * scores [ t - q_term - 1 ]
if self . leverage is True :
lmda [ t ] += parm [ - 3 ] * np . sign ( - ( Y [ t - 1 ] - parm [ - 1 ] ) ) * ( scores [ t - 1 ] + 1 )
scores [ t ] = ( ( ( parm [ - 2 ] + 1.0 ) * np . power ( Y [ t ] - parm [ - 1 ] , 2 ) ) / float ( parm [ - 2 ] * np . exp ( lmda [ t ] ) + np . power ( Y [ t ] - parm [ - 1 ] , 2 ) ) ) - 1.0
return lmda , Y , scores |
def _covert_to_hashable ( data ) :
r"""Args :
data ( ? ) :
Returns :
CommandLine :
python - m utool . util _ hash _ covert _ to _ hashable
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ hash import * # NOQA
> > > from utool . util _ hash import _ covert _ to _ hashable # NOQA
> > > import utool as ut
> > > data = np . array ( [ 1 ] , dtype = np . int64)
> > > result = _ covert _ to _ hashable ( data )
> > > print ( result )""" | if isinstance ( data , six . binary_type ) :
hashable = data
prefix = b'TXT'
elif util_type . HAVE_NUMPY and isinstance ( data , np . ndarray ) :
if data . dtype . kind == 'O' :
msg = '[ut] hashing ndarrays with dtype=object is unstable'
warnings . warn ( msg , RuntimeWarning )
hashable = data . dumps ( )
else :
hashable = data . tobytes ( )
prefix = b'NDARR'
elif isinstance ( data , six . text_type ) : # convert unicode into bytes
hashable = data . encode ( 'utf-8' )
prefix = b'TXT'
elif isinstance ( data , uuid . UUID ) :
hashable = data . bytes
prefix = b'UUID'
elif isinstance ( data , int ) : # warnings . warn ( ' [ util _ hash ] Hashing ints is slow , numpy is prefered ' )
hashable = _int_to_bytes ( data )
# hashable = data . to _ bytes ( 8 , byteorder = ' big ' )
prefix = b'INT'
# elif isinstance ( data , float ) :
# hashable = repr ( data ) . encode ( ' utf8 ' )
# prefix = b ' FLT '
elif util_type . HAVE_NUMPY and isinstance ( data , np . int64 ) :
return _covert_to_hashable ( int ( data ) )
elif util_type . HAVE_NUMPY and isinstance ( data , np . float64 ) :
a , b = float ( data ) . as_integer_ratio ( )
hashable = ( a . to_bytes ( 8 , byteorder = 'big' ) + b . to_bytes ( 8 , byteorder = 'big' ) )
prefix = b'FLOAT'
else :
raise TypeError ( 'unknown hashable type=%r' % ( type ( data ) ) )
# import bencode
# hashable = bencode . Bencoder . encode ( data ) . encode ( ' utf - 8 ' )
# prefix = b ' BEN '
prefix = b''
return prefix , hashable |
def __load_config ( self ) :
'''Find and load . scuba . yml''' | # top _ path is where . scuba . yml is found , and becomes the top of our bind mount .
# top _ rel is the relative path from top _ path to the current working directory ,
# and is where we ' ll set the working directory in the container ( relative to
# the bind mount point ) .
try :
top_path , top_rel = find_config ( )
self . config = load_config ( os . path . join ( top_path , SCUBA_YML ) )
except ConfigNotFoundError as cfgerr : # SCUBA _ YML can be missing if - - image was given .
# In this case , we assume a default config
if not self . image_override :
raise ScubaError ( str ( cfgerr ) )
top_path , top_rel = os . getcwd ( ) , ''
self . config = ScubaConfig ( image = None )
except ConfigError as cfgerr :
raise ScubaError ( str ( cfgerr ) )
# Mount scuba root directory at the same path in the container . . .
self . add_volume ( top_path , top_path )
# . . . and set the working dir relative to it
self . set_workdir ( os . path . join ( top_path , top_rel ) )
self . add_env ( 'SCUBA_ROOT' , top_path ) |
def tag_add ( package , tag , pkghash ) :
"""Add a new tag for a given package hash .
Unlike versions , tags can have an arbitrary format , and can be modified
and deleted .
When a package is pushed , it gets the " latest " tag .""" | team , owner , pkg = parse_package ( package )
session = _get_session ( team )
session . put ( "{url}/api/tag/{owner}/{pkg}/{tag}" . format ( url = get_registry_url ( team ) , owner = owner , pkg = pkg , tag = tag ) , data = json . dumps ( dict ( hash = _match_hash ( package , pkghash ) ) ) ) |
def sphofs ( lat1 , lon1 , r , pa , tol = 1e-2 , rmax = None ) :
"""Offset from one location on the sphere to another .
This function is given a start location , expressed as a latitude and
longitude , a distance to offset , and a direction to offset ( expressed as a
bearing , AKA position angle ) . It uses these to compute a final location .
This function mirrors : func : ` sphdist ` and : func : ` sphbear ` such that : :
# If :
r = sphdist ( lat1 , lon1 , lat2a , lon2a )
pa = sphbear ( lat1 , lon1 , lat2a , lon2a )
lat2b , lon2b = sphofs ( lat1 , lon1 , r , pa )
# Then lat2b = lat2a and lon2b = lon2a
Arguments are :
lat1
The latitude of the start location .
lon1
The longitude of the start location .
The distance to offset by .
pa
The position angle ( “ PA ” or bearing ) to offset towards .
tol
The tolerance for the accuracy of the calculation .
rmax
The maximum allowed offset distance .
Returns a pair ` ` ( lat2 , lon2 ) ` ` . All arguments and the return values are
measured in radians . The arguments may be vectors . The PA sign convention
is astronomical , measuring orientation east from north .
Note that the ordering of the arguments and return values maps to the
nonstandard ordering ` ` ( Dec , RA ) ` ` in equatorial coordinates . In a
spherical projection it maps to ` ` ( Y , X ) ` ` which may also be unexpected .
The offset is computed naively as : :
lat2 = lat1 + r * cos ( pa )
lon2 = lon1 + r * sin ( pa ) / cos ( lat2)
This will fail for large offsets . Error checking can be done in two ways .
If * tol * is not None , : func : ` sphdist ` is used to calculate the actual
distance between the two locations , and if the magnitude of the fractional
difference between that and * r * is larger than * tol * , : exc : ` ValueError ` is
raised . This will add an overhead to the computation that may be
significant if you ' re going to be calling this function a lot .
Additionally , if * rmax * is not None , magnitudes of * r * greater than * rmax *
are rejected . For reference , an * r * of 0.2 ( ~ 11 deg ) gives a maximum
fractional distance error of ~ 3 % .""" | if rmax is not None and np . abs ( r ) > rmax :
raise ValueError ( 'sphofs radius value %f is too big for ' 'our approximation' % r )
lat2 = lat1 + r * np . cos ( pa )
lon2 = lon1 + r * np . sin ( pa ) / np . cos ( lat2 )
if tol is not None :
s = sphdist ( lat1 , lon1 , lat2 , lon2 )
if np . any ( np . abs ( ( s - r ) / s ) > tol ) :
raise ValueError ( 'sphofs approximation broke down ' '(%s %s %s %s %s %s %s)' % ( lat1 , lon1 , lat2 , lon2 , r , s , pa ) )
return lat2 , lon2 |
def file_filter ( ifn : str , indir : str , opts : Namespace ) -> bool :
"""Determine whether to process ifn . We con ' t process :
1 ) Anything in a directory having a path element that begins with " _ "
2 ) Really , really big files
3 ) Temporary lists of know errors
: param ifn : input file name
: param indir : input directory
: param opts : argparse options
: return : True if to be processed , false if to be skipped""" | # If it looks like we ' re processing a URL as an input file , skip the suffix check
if '://' in ifn :
return True
if not ifn . endswith ( '.json' ) :
return False
if indir and ( indir . startswith ( "_" ) or "/_" in indir or any ( dn in indir for dn in opts . skipdirs ) ) :
return False
if opts . skipfns and any ( sfn in ifn for sfn in opts . skipfns ) :
return False
infile = os . path . join ( indir , ifn )
if not opts . infile and opts . maxsize and os . path . getsize ( infile ) > ( opts . maxsize * 1000 ) :
return False
return True |
def _union_copy ( dict1 , dict2 ) :
"""Internal wrapper to keep one level of copying out of play , for efficiency .
Only copies data on dict2 , but will alter dict1.""" | for key , value in dict2 . items ( ) :
if key in dict1 and isinstance ( value , dict ) :
dict1 [ key ] = _union_copy ( dict1 [ key ] , value )
else :
dict1 [ key ] = copy . deepcopy ( value )
return dict1 |
def delete_episode ( db , aid , episode ) :
"""Delete an episode .""" | db . cursor ( ) . execute ( 'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number' , { 'aid' : aid , 'type' : episode . type , 'number' : episode . number , } ) |
def keystroke_output ( self , settings , key , user_data ) :
"""If the gconf var scroll _ output be changed , this method will
be called and will set the scroll _ on _ output in all terminals
open .""" | for i in self . guake . notebook_manager . iter_terminals ( ) :
i . set_scroll_on_output ( settings . get_boolean ( key ) ) |
def image_placeholder ( width : Union [ int , str ] = 1920 , height : Union [ int , str ] = 1080 ) -> str :
"""Generate a link to the image placeholder .
: param width : Width of image .
: param height : Height of image .
: return : URL to image placeholder .""" | url = 'http://placehold.it/{width}x{height}'
return url . format ( width = width , height = height ) |
def compress ( self , setup ) :
"""Returns the compressed graph according to the given experimental setup
Parameters
setup : : class : ` caspo . core . setup . Setup `
Experimental setup used to compress the graph
Returns
caspo . core . graph . Graph
Compressed graph""" | designated = set ( setup . nodes )
zipped = self . copy ( )
marked = [ ( n , d ) for n , d in self . nodes ( data = True ) if n not in designated and not d . get ( 'compressed' , False ) ]
while marked :
for node , _ in sorted ( marked ) :
backward = zipped . predecessors ( node )
forward = zipped . successors ( node )
if not backward or ( len ( backward ) == 1 and not backward [ 0 ] in forward ) :
self . __merge_source_targets ( node , zipped )
elif not forward or ( len ( forward ) == 1 and not forward [ 0 ] in backward ) :
self . __merge_target_sources ( node , zipped )
else :
designated . add ( node )
marked = [ ( n , d ) for n , d in self . nodes ( data = True ) if n not in designated and not d . get ( 'compressed' , False ) ]
not_compressed = [ ( n , d ) for n , d in zipped . nodes ( data = True ) if not d . get ( 'compressed' , False ) ]
return zipped . subgraph ( [ n for n , _ in not_compressed ] ) |
def show_schemas ( schemaname ) :
"""Show anchore document schemas .""" | ecode = 0
try :
schemas = { }
schema_dir = os . path . join ( contexts [ 'anchore_config' ] [ 'pkg_dir' ] , 'schemas' )
for f in os . listdir ( schema_dir ) :
sdata = { }
try :
with open ( os . path . join ( schema_dir , f ) , 'r' ) as FH :
sdata = json . loads ( FH . read ( ) )
except :
anchore_print_err ( 'found schema file but failed to parse: ' + os . path . join ( schema_dir , f ) )
if sdata and ( not schemaname or f in schemaname ) :
schemas [ f ] = sdata
if not schemas :
anchore_print_err ( "no specified schemas were found to show" )
else :
anchore_print ( json . dumps ( schemas , indent = 4 ) )
except Exception as err :
anchore_print_err ( 'operation failed' )
ecode = 1
sys . exit ( ecode ) |
def _find_by_id ( self , resource , _id , parent = None ) :
"""Find the document by Id . If parent is not provided then on
routing exception try to find using search .""" | def is_found ( hit ) :
if 'exists' in hit :
hit [ 'found' ] = hit [ 'exists' ]
return hit . get ( 'found' , False )
args = self . _es_args ( resource )
try : # set the parent if available
if parent :
args [ 'parent' ] = parent
hit = self . elastic ( resource ) . get ( id = _id , ** args )
if not is_found ( hit ) :
return
docs = self . _parse_hits ( { 'hits' : { 'hits' : [ hit ] } } , resource )
return docs . first ( )
except elasticsearch . NotFoundError :
return
except elasticsearch . TransportError as tex :
if tex . error == 'routing_missing_exception' or 'RoutingMissingException' in tex . error : # search for the item
args = self . _es_args ( resource )
query = { 'query' : { 'bool' : { 'must' : [ { 'term' : { '_id' : _id } } ] } } }
try :
args [ 'size' ] = 1
hits = self . elastic ( resource ) . search ( body = query , ** args )
docs = self . _parse_hits ( hits , resource )
return docs . first ( )
except elasticsearch . NotFoundError :
return |
def simxSetObjectParent ( clientID , objectHandle , parentObject , keepInPlace , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual''' | return c_SetObjectParent ( clientID , objectHandle , parentObject , keepInPlace , operationMode ) |
def page_data_frame ( df , pager_argv = [ 'less' ] , ** kwargs ) :
"""Render a DataFrame as text and send it to a terminal pager program ( e . g .
` less ` ) , so that one can browse a full table conveniently .
df
The DataFrame to view
pager _ argv : default ` ` [ ' less ' ] ` `
A list of strings passed to : class : ` subprocess . Popen ` that launches
the pager program
kwargs
Additional keywords are passed to : meth : ` pandas . DataFrame . to _ string ` .
Returns ` ` None ` ` . Execution blocks until the pager subprocess exits .""" | import codecs , subprocess , sys
pager = subprocess . Popen ( pager_argv , shell = False , stdin = subprocess . PIPE , close_fds = True )
try :
enc = codecs . getwriter ( sys . stdout . encoding or 'utf8' ) ( pager . stdin )
df . to_string ( enc , ** kwargs )
finally :
enc . close ( )
pager . stdin . close ( )
pager . wait ( ) |
def update_gen_report ( self , role , file , original ) :
"""Update the role state and adjust the gen totals .""" | state = self . report [ "state" ]
if not os . path . exists ( self . paths [ file ] ) :
state [ "ok_role" ] += 1
self . report [ "roles" ] [ role ] [ "state" ] = "ok"
elif ( self . report [ "roles" ] [ role ] [ file ] != original and self . report [ "roles" ] [ role ] [ "state" ] != "ok" ) :
state [ "changed_role" ] += 1
self . report [ "roles" ] [ role ] [ "state" ] = "changed"
elif self . report [ "roles" ] [ role ] [ file ] == original :
state [ "skipped_role" ] += 1
self . report [ "roles" ] [ role ] [ "state" ] = "skipped"
return
utils . string_to_file ( self . paths [ file ] , original ) |
def token_list_width ( tokenlist ) :
"""Return the character width of this token list .
( Take double width characters into account . )
: param tokenlist : List of ( token , text ) or ( token , text , mouse _ handler )
tuples .""" | ZeroWidthEscape = Token . ZeroWidthEscape
return sum ( get_cwidth ( c ) for item in tokenlist for c in item [ 1 ] if item [ 0 ] != ZeroWidthEscape ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.