signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _validate_information ( self ) :
"""Validate that all information has been filled in""" | needed_variables = [ "ModuleName" , "ModuleVersion" , "APIVersion" ]
for var in needed_variables :
if var not in self . variables :
raise DataError ( "Needed variable was not defined in mib file." , variable = var )
# Make sure ModuleName is < = 6 characters
if len ( self . variables [ "ModuleName" ] ) > 6 :
raise DataError ( "ModuleName too long, must be 6 or fewer characters." , module_name = self . variables [ "ModuleName" ] )
if not isinstance ( self . variables [ "ModuleVersion" ] , str ) :
raise ValueError ( "ModuleVersion ('%s') must be a string of the form X.Y.Z" % str ( self . variables [ 'ModuleVersion' ] ) )
if not isinstance ( self . variables [ "APIVersion" ] , str ) :
raise ValueError ( "APIVersion ('%s') must be a string of the form X.Y" % str ( self . variables [ 'APIVersion' ] ) )
self . variables [ 'ModuleVersion' ] = self . _convert_module_version ( self . variables [ "ModuleVersion" ] )
self . variables [ 'APIVersion' ] = self . _convert_api_version ( self . variables [ "APIVersion" ] )
self . variables [ "ModuleName" ] = self . variables [ "ModuleName" ] . ljust ( 6 )
self . valid = True |
def visit_Call ( self , node ) :
"""Call visitor - used for finding setup ( ) call .""" | self . generic_visit ( node )
# Setup ( ) is a keywords - only function .
if node . args :
return
keywords = set ( )
for k in node . keywords :
if k . arg is not None :
keywords . add ( k . arg )
# Simple case for dictionary expansion for Python > = 3.5.
if k . arg is None and isinstance ( k . value , ast . Dict ) :
keywords . update ( x . s for x in k . value . keys )
# Simple case for dictionary expansion for Python < = 3.4.
if getattr ( node , 'kwargs' , ( ) ) and isinstance ( node . kwargs , ast . Dict ) :
keywords . update ( x . s for x in node . kwargs . keys )
# The bare minimum number of arguments seems to be around five , which
# includes author , name , version , module / package and something extra .
if len ( keywords ) < 5 :
return
score = sum ( self . attributes . get ( x , 0 ) for x in keywords ) / len ( keywords )
if score < 0.5 :
LOG . debug ( "Scoring for setup%r below 0.5: %.2f" , tuple ( keywords ) , score )
return
# Redirect call to our setup ( ) tap function .
node . func = ast . Name ( id = '__f8r_setup' , ctx = node . func . ctx )
self . redirected = True |
def settings ( ** kwargs ) :
"""Generally , this will automatically be added to a newly initialized
: class : ` phoebe . frontend . bundle . Bundle `
: parameter * * kwargs : defaults for the values of any of the parameters
: return : a : class : ` phoebe . parameters . parameters . ParameterSet ` of all newly
created : class : ` phoebe . parameters . parameters . Parameter ` s""" | params = [ ]
params += [ StringParameter ( qualifier = 'phoebe_version' , value = kwargs . get ( 'phoebe_version' , __version__ ) , description = 'Version of PHOEBE - change with caution' ) ]
params += [ BoolParameter ( qualifier = 'log_history' , value = kwargs . get ( 'log_history' , False ) , description = 'Whether to log history (undo/redo)' ) ]
params += [ DictParameter ( qualifier = 'dict_filter' , value = kwargs . get ( 'dict_filter' , { } ) , description = 'Filters to use when using dictionary access' ) ]
params += [ BoolParameter ( qualifier = 'dict_set_all' , value = kwargs . get ( 'dict_set_all' , False ) , description = 'Whether to set all values for dictionary access that returns more than 1 result' ) ]
# params + = [ ChoiceParameter ( qualifier = ' plotting _ backend ' , value = kwargs . get ( ' plotting _ backend ' , ' mpl ' ) , choices = [ ' mpl ' , ' mpld3 ' , ' mpl2bokeh ' , ' bokeh ' ] if conf . devel else [ ' mpl ' ] , description = ' Default backend to use for plotting ' ) ]
# problem with try _ sympy parameter : it can ' t be used during initialization . . . so this may need to be a phoebe - level setting
# params + = [ BoolParameter ( qualifier = ' try _ sympy ' , value = kwargs . get ( ' try _ sympy ' , True ) , description = ' Whether to use sympy if installed for constraints ' ) ]
# This could be complicated - because then we ' ll have to specifically watch to see when its enabled and then run all constraints - not sure if that is worth the time savings
# params + = [ BoolParameter ( qualifier = ' run _ constraints ' , value = kwargs . get ( ' run _ constraints ' , True ) , description = ' Whether to run _ constraints whenever a parameter changes ( warning : turning off will disable constraints until enabled at which point all constraints will be run ) ' ) ]
return ParameterSet ( params ) |
def show ( config_file = False ) :
'''Return a list of sysctl parameters for this minion
CLI Example :
. . code - block : : bash
salt ' * ' sysctl . show''' | roots = ( 'compat' , 'debug' , 'dev' , 'hptmv' , 'hw' , 'kern' , 'machdep' , 'net' , 'p1003_1b' , 'security' , 'user' , 'vfs' , 'vm' )
cmd = 'sysctl -ae'
ret = { }
comps = [ '' ]
if config_file : # If the file doesn ' t exist , return an empty list
if not os . path . exists ( config_file ) :
return [ ]
try :
with salt . utils . files . fopen ( config_file , 'r' ) as f :
for line in f . readlines ( ) :
l = line . strip ( )
if l != "" and not l . startswith ( "#" ) :
comps = line . split ( '=' , 1 )
ret [ comps [ 0 ] ] = comps [ 1 ]
return ret
except ( OSError , IOError ) :
log . error ( 'Could not open sysctl config file' )
return None
else :
out = __salt__ [ 'cmd.run' ] ( cmd , output_loglevel = 'trace' )
for line in out . splitlines ( ) :
if any ( [ line . startswith ( '{0}.' . format ( root ) ) for root in roots ] ) :
comps = line . split ( '=' , 1 )
ret [ comps [ 0 ] ] = comps [ 1 ]
elif comps [ 0 ] :
ret [ comps [ 0 ] ] += '{0}\n' . format ( line )
else :
continue
return ret |
def _get_device_names_in_group ( self ) :
'''_ get _ device _ names _ in _ group
: returns : list - - list of device names in group''' | device_names = [ ]
dg = pollster ( self . _get_device_group ) ( self . devices [ 0 ] )
members = dg . devices_s . get_collection ( )
for member in members :
member_name = member . name . replace ( '/%s/' % self . partition , '' )
device_names . append ( member_name )
return device_names |
def _open_archive ( path ) :
""": param path :
A unicode string of the filesystem path to the archive
: return :
An archive object""" | if path . endswith ( '.zip' ) :
return zipfile . ZipFile ( path , 'r' )
return tarfile . open ( path , 'r' ) |
def _refresh_url ( self ) :
"""刷新获取 url , 失败的时候返回空而不是 None""" | songs = self . _api . weapi_songs_url ( [ int ( self . identifier ) ] )
if songs and songs [ 0 ] [ 'url' ] :
self . url = songs [ 0 ] [ 'url' ]
else :
self . url = '' |
def tospark ( self , engine = None ) :
"""Convert to spark mode .""" | from thunder . series . readers import fromarray
if self . mode == 'spark' :
logging . getLogger ( 'thunder' ) . warn ( 'images already in local mode' )
pass
if engine is None :
raise ValueError ( 'Must provide SparkContext' )
return fromarray ( self . toarray ( ) , index = self . index , labels = self . labels , engine = engine ) |
def get_tag ( self , el ) :
"""Get tag .""" | name = self . get_tag_name ( el )
return util . lower ( name ) if name is not None and not self . is_xml else name |
async def listHooks ( self , * args , ** kwargs ) :
"""List hooks in a given group
This endpoint will return a list of all the hook definitions within a
given hook group .
This method gives output : ` ` v1 / list - hooks - response . json # ` `
This method is ` ` stable ` `""" | return await self . _makeApiCall ( self . funcinfo [ "listHooks" ] , * args , ** kwargs ) |
def smart_initialize ( self ) :
'''Use k - means + + to initialize a good set of centroids''' | if self . seed is not None : # useful for obtaining consistent results
np . random . seed ( self . seed )
centroids = np . zeros ( ( self . k , self . data . shape [ 1 ] ) )
# Randomly choose the first centroid .
# Since we have no prior knowledge , choose uniformly at random
idx = np . random . randint ( self . data . shape [ 0 ] )
centroids [ 0 ] = self . data [ idx , : ] . toarray ( )
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances ( self . data , centroids [ 0 : 1 ] , metric = 'euclidean' ) . flatten ( ) ** 2
for i in range ( 1 , self . k ) : # Choose the next centroid randomly , so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid .
# Roughtly speaking , a new centroid should be as far as from ohter centroids as possible .
idx = np . random . choice ( self . data . shape [ 0 ] , 1 , p = squared_distances / sum ( squared_distances ) )
centroids [ i ] = self . data [ idx , : ] . toarray ( )
# Now compute distances from the centroids to all data points
squared_distances = np . min ( pairwise_distances ( self . data , centroids [ 0 : i + 1 ] , metric = 'euclidean' ) ** 2 , axis = 1 )
self . centroids = centroids
return centroids |
def _mirror_groups ( self ) :
"""Mirrors the user ' s LDAP groups in the Django database and updates the
user ' s membership .""" | target_group_names = frozenset ( self . _get_groups ( ) . get_group_names ( ) )
current_group_names = frozenset ( self . _user . groups . values_list ( "name" , flat = True ) . iterator ( ) )
# These were normalized to sets above .
MIRROR_GROUPS_EXCEPT = self . settings . MIRROR_GROUPS_EXCEPT
MIRROR_GROUPS = self . settings . MIRROR_GROUPS
# If the settings are white - or black - listing groups , we ' ll update
# target _ group _ names such that we won ' t modify the membership of groups
# beyond our purview .
if isinstance ( MIRROR_GROUPS_EXCEPT , ( set , frozenset ) ) :
target_group_names = ( target_group_names - MIRROR_GROUPS_EXCEPT ) | ( current_group_names & MIRROR_GROUPS_EXCEPT )
elif isinstance ( MIRROR_GROUPS , ( set , frozenset ) ) :
target_group_names = ( target_group_names & MIRROR_GROUPS ) | ( current_group_names - MIRROR_GROUPS )
if target_group_names != current_group_names :
existing_groups = list ( Group . objects . filter ( name__in = target_group_names ) . iterator ( ) )
existing_group_names = frozenset ( group . name for group in existing_groups )
new_groups = [ Group . objects . get_or_create ( name = name ) [ 0 ] for name in target_group_names if name not in existing_group_names ]
self . _user . groups . set ( existing_groups + new_groups ) |
def _is_vis ( channel ) :
"""Determine whether the given channel is a visible channel""" | if isinstance ( channel , str ) :
return channel == '00_7'
elif isinstance ( channel , int ) :
return channel == 1
else :
raise ValueError ( 'Invalid channel' ) |
def insert ( key , value ) :
"""Store a value with a key .
If the key is already present in the database , this does nothing .""" | # Pickle the value .
value = pickle . dumps ( value , protocol = constants . PICKLE_PROTOCOL )
# Store the value as binary data in a document .
doc = { KEY_FIELD : key , VALUE_FIELD : Binary ( value ) }
# Pickle and store the value with its key . If the key already exists , we
# don ' t insert ( since the key is a unique index ) , and we don ' t care .
try :
return collection . insert ( doc )
except pymongo . errors . DuplicateKeyError :
return None |
def cli_verify_jar_signature ( argument_list ) :
"""Command - line wrapper around verify ( )
TODO : use trusted keystore ;""" | usage_message = "jarutil v file.jar trusted_certificate.pem [SF_NAME.SF]"
if len ( argument_list ) < 2 or len ( argument_list ) > 3 :
print ( usage_message )
return 1
jar_file , certificate , sf_name = ( argument_list + [ None ] ) [ : 3 ]
try :
verify ( certificate , jar_file , sf_name )
except VerificationError as error_message :
print ( error_message )
return 1
else :
print ( "Jar verified." )
return 0 |
def _set_description ( self , schema ) :
"""Set description from schema .
: type schema : Sequence [ google . cloud . bigquery . schema . SchemaField ]
: param schema : A description of fields in the schema .""" | if schema is None :
self . description = None
return
self . description = tuple ( [ Column ( name = field . name , type_code = field . field_type , display_size = None , internal_size = None , precision = None , scale = None , null_ok = field . is_nullable , ) for field in schema ] ) |
def append_line ( self , new_line ) :
"""Appends the new _ line to the LS340 program .""" | # TODO : The user still has to write the raw line , this is error prone .
self . _write ( ( 'PGM' , [ Integer , String ] ) , self . idx , new_line ) |
def resolve_reference ( target_reference , project ) :
"""Given a target _ reference , made in context of ' project ' ,
returns the AbstractTarget instance that is referred to , as well
as properties explicitly specified for this reference .""" | # Separate target name from properties override
assert isinstance ( target_reference , basestring )
assert isinstance ( project , ProjectTarget )
split = _re_separate_target_from_properties . match ( target_reference )
if not split :
raise BaseException ( "Invalid reference: '%s'" % target_reference )
id = split . group ( 1 )
sproperties = [ ]
if split . group ( 3 ) :
sproperties = property . create_from_strings ( feature . split ( split . group ( 3 ) ) )
sproperties = feature . expand_composites ( sproperties )
# Find the target
target = project . find ( id )
return ( target , property_set . create ( sproperties ) ) |
def add_comment ( self , app_id , record_id , field_id , message ) :
"""Directly add a comment to a record without retrieving the app or record first
Warnings :
Does not perform any app , record , or field ID validation
Args :
app _ id ( str ) : Full App ID string
record _ id ( str ) : Full parent Record ID string
field _ id ( str ) : Full field ID to target reference field on parent Record string
message ( str ) : New comment message body""" | self . _swimlane . request ( 'post' , 'app/{0}/record/{1}/{2}/comment' . format ( app_id , record_id , field_id ) , json = { 'message' : message , 'createdDate' : pendulum . now ( ) . to_rfc3339_string ( ) } ) |
def _set_people ( self , people ) :
"""Sets who the object is sent to""" | if hasattr ( people , "object_type" ) :
people = [ people ]
elif hasattr ( people , "__iter__" ) :
people = list ( people )
return people |
def convert_time_string ( date_str ) :
"""Change a date string from the format 2018-08-15T23:55:17 into a datetime object""" | dt , _ , _ = date_str . partition ( "." )
dt = datetime . strptime ( dt , "%Y-%m-%dT%H:%M:%S" )
return dt |
def phi4 ( gold_clustering , predicted_clustering ) :
"""Subroutine for ceafe . Computes the mention F measure between gold and
predicted mentions in a cluster .""" | return 2 * len ( [ mention for mention in gold_clustering if mention in predicted_clustering ] ) / float ( len ( gold_clustering ) + len ( predicted_clustering ) ) |
def _build_likelihood ( self ) :
"""Construct a tensorflow function to compute the bound on the marginal
likelihood .""" | pX = DiagonalGaussian ( self . X_mean , self . X_var )
num_inducing = len ( self . feature )
psi0 = tf . reduce_sum ( expectation ( pX , self . kern ) )
psi1 = expectation ( pX , ( self . kern , self . feature ) )
psi2 = tf . reduce_sum ( expectation ( pX , ( self . kern , self . feature ) , ( self . kern , self . feature ) ) , axis = 0 )
Kuu = features . Kuu ( self . feature , self . kern , jitter = settings . jitter )
L = tf . cholesky ( Kuu )
sigma2 = self . likelihood . variance
sigma = tf . sqrt ( sigma2 )
# Compute intermediate matrices
A = tf . matrix_triangular_solve ( L , tf . transpose ( psi1 ) , lower = True ) / sigma
tmp = tf . matrix_triangular_solve ( L , psi2 , lower = True )
AAT = tf . matrix_triangular_solve ( L , tf . transpose ( tmp ) , lower = True ) / sigma2
B = AAT + tf . eye ( num_inducing , dtype = settings . float_type )
LB = tf . cholesky ( B )
log_det_B = 2. * tf . reduce_sum ( tf . log ( tf . matrix_diag_part ( LB ) ) )
c = tf . matrix_triangular_solve ( LB , tf . matmul ( A , self . Y ) , lower = True ) / sigma
# KL [ q ( x ) | | p ( x ) ]
dX_var = self . X_var if len ( self . X_var . get_shape ( ) ) == 2 else tf . matrix_diag_part ( self . X_var )
NQ = tf . cast ( tf . size ( self . X_mean ) , settings . float_type )
D = tf . cast ( tf . shape ( self . Y ) [ 1 ] , settings . float_type )
KL = - 0.5 * tf . reduce_sum ( tf . log ( dX_var ) ) + 0.5 * tf . reduce_sum ( tf . log ( self . X_prior_var ) ) - 0.5 * NQ + 0.5 * tf . reduce_sum ( ( tf . square ( self . X_mean - self . X_prior_mean ) + dX_var ) / self . X_prior_var )
# compute log marginal bound
ND = tf . cast ( tf . size ( self . Y ) , settings . float_type )
bound = - 0.5 * ND * tf . log ( 2 * np . pi * sigma2 )
bound += - 0.5 * D * log_det_B
bound += - 0.5 * tf . reduce_sum ( tf . square ( self . Y ) ) / sigma2
bound += 0.5 * tf . reduce_sum ( tf . square ( c ) )
bound += - 0.5 * D * ( tf . reduce_sum ( psi0 ) / sigma2 - tf . reduce_sum ( tf . matrix_diag_part ( AAT ) ) )
bound -= KL
return bound |
def add_sparql_line_nums ( sparql ) :
"""Returns a sparql query with line numbers prepended""" | lines = sparql . split ( "\n" )
return "\n" . join ( [ "%s %s" % ( i + 1 , line ) for i , line in enumerate ( lines ) ] ) |
def _augmenting_row_reduction ( self ) :
"""Augmenting row reduction step from LAPJV algorithm""" | unassigned = np . where ( self . _x == - 1 ) [ 0 ]
for i in unassigned :
for _ in range ( self . c . size ) : # Time in this loop can be proportional to 1 / epsilon
# This step is not strictly necessary , so cutoff early
# to avoid near - infinite loops
# find smallest 2 values and indices
temp = self . c [ i ] - self . _v
j1 = np . argmin ( temp )
u1 = temp [ j1 ]
temp [ j1 ] = np . inf
j2 = np . argmin ( temp )
u2 = temp [ j2 ]
if u1 < u2 :
self . _v [ j1 ] -= u2 - u1
elif self . _y [ j1 ] != - 1 :
j1 = j2
k = self . _y [ j1 ]
if k != - 1 :
self . _x [ k ] = - 1
self . _x [ i ] = j1
self . _y [ j1 ] = i
i = k
if k == - 1 or abs ( u1 - u2 ) < self . epsilon :
break |
def new ( self , time_flags ) : # type : ( int ) - > None
'''Create a new Rock Ridge Time Stamp record .
Parameters :
time _ flags - The flags to use for this time stamp record .
Returns :
Nothing .''' | if self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'TF record already initialized!' )
self . time_flags = time_flags
tflen = 7
if self . time_flags & ( 1 << 7 ) :
tflen = 17
for index , fieldname in enumerate ( self . FIELDNAMES ) :
if self . time_flags & ( 1 << index ) :
if tflen == 7 :
setattr ( self , fieldname , dates . DirectoryRecordDate ( ) )
elif tflen == 17 :
setattr ( self , fieldname , dates . VolumeDescriptorDate ( ) )
getattr ( self , fieldname ) . new ( )
self . _initialized = True |
def show_download_links ( self ) :
"""Query PyPI for pkg download URI for a packge
@ returns : 0""" | # In case they specify version as ' dev ' instead of using - T svn ,
# don ' t show three svn URI ' s
if self . options . file_type == "all" and self . version == "dev" :
self . options . file_type = "svn"
if self . options . file_type == "svn" :
version = "dev"
else :
if self . version :
version = self . version
else :
version = self . all_versions [ 0 ]
if self . options . file_type == "all" : # Search for source , egg , and svn
self . print_download_uri ( version , True )
self . print_download_uri ( version , False )
self . print_download_uri ( "dev" , True )
else :
if self . options . file_type == "source" :
source = True
else :
source = False
self . print_download_uri ( version , source )
return 0 |
def is_none ( string_ , default = 'raise' ) :
"""Check if a string is equivalent to None .
Parameters
string _ : str
default : { ' raise ' , False }
Default behaviour if none of the " None " strings is detected .
Returns
is _ none : bool
Examples
> > > is _ none ( ' 2 ' , default = False )
False
> > > is _ none ( ' undefined ' , default = False )
True""" | none = [ 'none' , 'undefined' , 'unknown' , 'null' , '' ]
if string_ . lower ( ) in none :
return True
elif not default :
return False
else :
raise ValueError ( 'The value \'{}\' cannot be mapped to none.' . format ( string_ ) ) |
def _setSpeed ( self , speed , motor , device ) :
"""Set motor speed . This method takes into consideration the PWM frequency
that the hardware is currently running at and limits the values passed
to the hardware accordingly .
: Parameters :
speed : ` int `
Motor speed as an integer . Negative numbers indicate reverse
speeds .
motor : ` str `
A string value indicating the motor to set the speed on .
device : ` int `
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol .""" | reverse = False
if speed < 0 :
speed = - speed
reverse = True
# 0 and 2 for Qik 2s9v1 , 0 , 2 , and 4 for 2s12v10
if self . _deviceConfig [ device ] [ 'pwm' ] in ( 0 , 2 , 4 , ) and speed > 127 :
speed = 127
if speed > 127 :
if speed > 255 :
speed = 255
if reverse :
cmd = self . _COMMAND . get ( '{}-reverse-8bit' . format ( motor ) )
else :
cmd = self . _COMMAND . get ( '{}-forward-8bit' . format ( motor ) )
speed -= 128
else :
if reverse :
cmd = self . _COMMAND . get ( '{}-reverse-7bit' . format ( motor ) )
else :
cmd = self . _COMMAND . get ( '{}-forward-7bit' . format ( motor ) )
if not cmd :
msg = "Invalid motor specified: {}" . format ( motor )
self . _log and self . _log . error ( msg )
raise ValueError ( msg )
self . _writeData ( cmd , device , params = ( speed , ) ) |
def local_to_global ( self , index ) :
"""Calculate local index from global index
: param index : input index
: return : local index for data""" | if ( type ( index ) is int ) or ( type ( index ) is slice ) :
if len ( self . __mask ) > 1 :
raise IndexError ( 'check length of parameter index' )
# 1D array
if type ( index ) is int :
return self . int_local_to_global ( index )
elif type ( index ) is slice :
return self . slice_local_to_global ( index )
else :
raise IndexError ( 'check data type of index to be integer or slice' )
elif type ( index ) is tuple :
local_index = [ ]
for k , item in enumerate ( index ) :
if k < len ( self . __mask ) :
if type ( item ) is slice :
temp_index = self . slice_local_to_global ( item , k )
elif type ( item ) in [ int , np . int64 , np . int32 ] :
temp_index = self . int_local_to_global ( item , k )
if temp_index is None :
return temp_index
else :
temp_index = item
local_index . append ( temp_index )
return tuple ( local_index )
else :
raise IndexError ( 'check index for correct length and type' ) |
def plot_gos ( fout_img , goids , go2obj , ** kws ) :
"""Given GO ids and the obo _ dag , create a plot of paths from GO ids .""" | gosubdag = GoSubDag ( goids , go2obj , rcntobj = True )
godagplot = GoSubDagPlot ( gosubdag , ** kws )
godagplot . plt_dag ( fout_img ) |
def run ( self ) :
'''Spin up the multiprocess event returner''' | salt . utils . process . appendproctitle ( self . __class__ . __name__ )
self . event = get_event ( 'master' , opts = self . opts , listen = True )
events = self . event . iter_events ( full = True )
self . event . fire_event ( { } , 'salt/event_listen/start' )
try :
for event in events :
if event [ 'tag' ] == 'salt/event/exit' :
self . stop = True
if self . _filter ( event ) :
self . event_queue . append ( event )
if len ( self . event_queue ) >= self . event_return_queue :
self . flush_events ( )
if self . stop :
break
finally : # flush all we have at this moment
if self . event_queue :
self . flush_events ( ) |
def make_ica_funs ( observed_dimension , latent_dimension ) :
"""These functions implement independent component analysis .
The model is :
latents are drawn i . i . d . for each data point from a product of student - ts .
weights are the same across all datapoints .
each data = latents * weghts + noise .""" | def sample ( weights , n_samples , noise_std , rs ) :
latents = rs . randn ( latent_dimension , n_samples )
latents = np . array ( sorted ( latents . T , key = lambda a_entry : a_entry [ 0 ] ) ) . T
noise = rs . randn ( n_samples , observed_dimension ) * noise_std
observed = predict ( weights , latents ) + noise
return latents , observed
def predict ( weights , latents ) :
return np . dot ( weights , latents ) . T
def logprob ( weights , latents , noise_std , observed ) :
preds = predict ( weights , latents )
log_lik = np . sum ( t . logpdf ( preds , 2.4 , observed , noise_std ) )
return log_lik
num_weights = observed_dimension * latent_dimension
def unpack_weights ( weights ) :
return np . reshape ( weights , ( observed_dimension , latent_dimension ) )
return num_weights , sample , logprob , unpack_weights |
def create ( self , name , backend_router_id , flavor , instances , test = False ) :
"""Orders a Virtual _ ReservedCapacityGroup
: param string name : Name for the new reserved capacity
: param int backend _ router _ id : This selects the pod . See create _ options for a list
: param string flavor : Capacity KeyName , see create _ options for a list
: param int instances : Number of guest this capacity can support
: param bool test : If True , don ' t actually order , just test .""" | # Since orderManger needs a DC id , just send in 0 , the API will ignore it
args = ( self . capacity_package , 0 , [ flavor ] )
extras = { "backendRouterId" : backend_router_id , "name" : name }
kwargs = { 'extras' : extras , 'quantity' : instances , 'complex_type' : 'SoftLayer_Container_Product_Order_Virtual_ReservedCapacity' , 'hourly' : True }
if test :
receipt = self . ordering_manager . verify_order ( * args , ** kwargs )
else :
receipt = self . ordering_manager . place_order ( * args , ** kwargs )
return receipt |
def change_tunnel_ad_url ( self ) :
'''Change tunnel ad url .''' | if self . is_open :
self . close ( )
req = requests . delete ( 'https://api.psiturk.org/api/tunnel/' , auth = ( self . access_key , self . secret_key ) )
# the request content here actually will include the tunnel _ hostname
# if needed or wanted .
if req . status_code in [ 401 , 403 , 500 ] :
print ( req . content )
return False |
def analyze ( self , filename ) :
"""Reimplement analyze method""" | if self . dockwidget and not self . ismaximized :
self . dockwidget . setVisible ( True )
self . dockwidget . setFocus ( )
self . dockwidget . raise_ ( )
self . pylint . analyze ( filename ) |
def command ( self , function = None , prefix = None , unobserved = False ) :
"""Decorator to define a new command for this Ingredient or Experiment .
The name of the command will be the name of the function . It can be
called from the command - line or by using the run _ command function .
Commands are automatically also captured functions .
The command can be given a prefix , to restrict its configuration space
to a subtree . ( see ` ` capture ` ` for more information )
A command can be made unobserved ( i . e . ignoring all observers ) by
passing the unobserved = True keyword argument .""" | captured_f = self . capture ( function , prefix = prefix )
captured_f . unobserved = unobserved
self . commands [ function . __name__ ] = captured_f
return captured_f |
def serialize_wrapped_key ( key_provider , wrapping_algorithm , wrapping_key_id , encrypted_wrapped_key ) :
"""Serializes EncryptedData into a Wrapped EncryptedDataKey .
: param key _ provider : Info for Wrapping MasterKey
: type key _ provider : aws _ encryption _ sdk . structures . MasterKeyInfo
: param wrapping _ algorithm : Wrapping Algorithm with which to wrap plaintext _ data _ key
: type wrapping _ algorithm : aws _ encryption _ sdk . identifiers . WrappingAlgorithm
: param bytes wrapping _ key _ id : Key ID of wrapping MasterKey
: param encrypted _ wrapped _ key : Encrypted data key
: type encrypted _ wrapped _ key : aws _ encryption _ sdk . internal . structures . EncryptedData
: returns : Wrapped EncryptedDataKey
: rtype : aws _ encryption _ sdk . structures . EncryptedDataKey""" | if encrypted_wrapped_key . iv is None :
key_info = wrapping_key_id
key_ciphertext = encrypted_wrapped_key . ciphertext
else :
key_info = struct . pack ( ">{key_id_len}sII{iv_len}s" . format ( key_id_len = len ( wrapping_key_id ) , iv_len = wrapping_algorithm . algorithm . iv_len ) , to_bytes ( wrapping_key_id ) , len ( encrypted_wrapped_key . tag ) * 8 , # Tag Length is stored in bits , not bytes
wrapping_algorithm . algorithm . iv_len , encrypted_wrapped_key . iv , )
key_ciphertext = encrypted_wrapped_key . ciphertext + encrypted_wrapped_key . tag
return EncryptedDataKey ( key_provider = MasterKeyInfo ( provider_id = key_provider . provider_id , key_info = key_info ) , encrypted_data_key = key_ciphertext , ) |
def valid_config_exists ( config_path = CONFIG_PATH ) :
"""Verify that a valid config file exists .
Args :
config _ path ( str ) : Path to the config file .
Returns :
boolean : True if there is a valid config file , false if not .""" | if os . path . isfile ( config_path ) :
try :
config = read_config ( config_path )
check_config ( config )
except ( ConfigurationError , IOError ) :
return False
else :
return False
return True |
def _delete ( self , tree ) :
"""Run a DELETE statement""" | tablename = tree . table
table = self . describe ( tablename , require = True )
kwargs = { }
visitor = Visitor ( self . reserved_words )
if tree . where :
constraints = ConstraintExpression . from_where ( tree . where )
kwargs [ "condition" ] = constraints . build ( visitor )
kwargs [ "expr_values" ] = visitor . expression_values
kwargs [ "alias" ] = visitor . attribute_names
return self . _query_and_op ( tree , table , "delete_item" , kwargs ) |
def _get_end_time ( self , start_time : datetime ) -> datetime :
"""Generates the end time to be used for the store range query .
: param start _ time : Start time to use as an offset to calculate the end time
based on the window type in the schema .
: return :""" | if Type . is_type_equal ( self . _schema . window_type , Type . DAY ) :
return start_time + timedelta ( days = self . _schema . window_value )
elif Type . is_type_equal ( self . _schema . window_type , Type . HOUR ) :
return start_time + timedelta ( hours = self . _schema . window_value ) |
def tarbell_switch ( command , args ) :
"""Switch to a project .""" | with ensure_settings ( command , args ) as settings :
projects_path = settings . config . get ( "projects_path" )
if not projects_path :
show_error ( "{0} does not exist" . format ( projects_path ) )
sys . exit ( )
project = args . get ( 0 )
args . remove ( project )
project_path = os . path . join ( projects_path , project )
if os . path . isdir ( project_path ) :
os . chdir ( project_path )
puts ( "\nSwitching to {0}" . format ( colored . red ( project ) ) )
tarbell_serve ( command , args )
else :
show_error ( "{0} isn't a tarbell project" . format ( project_path ) ) |
def init ( opts ) :
'''Open the connection to the Junos device , login , and bind to the
Resource class''' | opts [ 'multiprocessing' ] = False
log . debug ( 'Opening connection to junos' )
args = { "host" : opts [ 'proxy' ] [ 'host' ] }
optional_args = [ 'user' , 'username' , 'password' , 'passwd' , 'port' , 'gather_facts' , 'mode' , 'baud' , 'attempts' , 'auto_probe' , 'ssh_private_key_file' , 'ssh_config' , 'normalize' ]
if 'username' in opts [ 'proxy' ] . keys ( ) :
opts [ 'proxy' ] [ 'user' ] = opts [ 'proxy' ] . pop ( 'username' )
proxy_keys = opts [ 'proxy' ] . keys ( )
for arg in optional_args :
if arg in proxy_keys :
args [ arg ] = opts [ 'proxy' ] [ arg ]
thisproxy [ 'conn' ] = jnpr . junos . Device ( ** args )
try :
thisproxy [ 'conn' ] . open ( )
except ( ProbeError , ConnectAuthError , ConnectRefusedError , ConnectTimeoutError , ConnectError ) as ex :
log . error ( "{} : not able to initiate connection to the device" . format ( str ( ex ) ) )
thisproxy [ 'initialized' ] = False
return
if 'timeout' in proxy_keys :
timeout = int ( opts [ 'proxy' ] [ 'timeout' ] )
try :
thisproxy [ 'conn' ] . timeout = timeout
except Exception as ex :
log . error ( 'Not able to set timeout due to: %s' , str ( ex ) )
else :
log . debug ( 'RPC timeout set to %d seconds' , timeout )
try :
thisproxy [ 'conn' ] . bind ( cu = jnpr . junos . utils . config . Config )
except Exception as ex :
log . error ( 'Bind failed with Config class due to: {}' . format ( str ( ex ) ) )
try :
thisproxy [ 'conn' ] . bind ( sw = jnpr . junos . utils . sw . SW )
except Exception as ex :
log . error ( 'Bind failed with SW class due to: {}' . format ( str ( ex ) ) )
thisproxy [ 'initialized' ] = True |
def _check_error ( response ) :
"""Checks for JSON error messages and raises Python exception""" | if 'error' in response :
raise InfluxDBError ( response [ 'error' ] )
elif 'results' in response :
for statement in response [ 'results' ] :
if 'error' in statement :
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError ( msg . format ( d = statement ) ) |
def mb_neg_logposterior ( self , beta , mini_batch ) :
"""Returns negative log posterior
Parameters
beta : np . array
Contains untransformed starting values for latent variables
mini _ batch : int
Batch size for the data
Returns
Negative log posterior""" | post = ( self . data . shape [ 0 ] / mini_batch ) * self . mb_neg_loglik ( beta , mini_batch )
for k in range ( 0 , self . z_no ) :
post += - self . latent_variables . z_list [ k ] . prior . logpdf ( beta [ k ] )
return post |
def remove_quotes ( self , value ) :
"""Remove any surrounding quotes from a value and unescape any contained
quotes of that type .""" | # beware the empty string
if not value :
return value
if value [ 0 ] == value [ - 1 ] == '"' :
return value [ 1 : - 1 ] . replace ( '\\"' , '"' )
if value [ 0 ] == value [ - 1 ] == "'" :
return value [ 1 : - 1 ] . replace ( "\\'" , "'" )
return value |
def remove ( local_file_name ) :
"""Function attempts to remove file , if failure occures - > print exception
: param local _ file _ name : name of file to remove""" | try :
os . remove ( local_file_name )
except Exception as e :
print ( "Cannot remove file '" + local_file_name + "'. Please remove it manually." )
print ( e ) |
def opponent_rank ( self ) :
"""Returns a ` ` string ` ` of the opponent ' s rank when the game was played
and None if the team was unranked .""" | rank = re . findall ( r'\d+' , self . _opponent_name )
if len ( rank ) > 0 :
return int ( rank [ 0 ] )
return None |
def set_line_width ( self , width ) :
"""Sets the current line width within the cairo context .
The line width value specifies the diameter of a pen
that is circular in user space ,
( though device - space pen may be an ellipse in general
due to scaling / shear / rotation of the CTM ) .
. . note : :
When the description above refers to user space and CTM
it refers to the user space and CTM in effect
at the time of the stroking operation ,
not the user space and CTM in effect
at the time of the call to : meth : ` set _ line _ width ` .
The simplest usage makes both of these spaces identical .
That is , if there is no change to the CTM
between a call to : meth : ` set _ line _ width `
and the stroking operation ,
then one can just pass user - space values to : meth : ` set _ line _ width `
and ignore this note .
As with the other stroke parameters ,
the current line cap style is examined by
: meth : ` stroke ` , : meth : ` stroke _ extents ` , and : meth : ` stroke _ to _ path ` ,
but does not have any effect during path construction .
The default line width value is 2.0.
: type width : float
: param width : The new line width .""" | cairo . cairo_set_line_width ( self . _pointer , width )
self . _check_status ( ) |
def get_appointment_groups ( self , ** kwargs ) :
"""List appointment groups .
: calls : ` GET / api / v1 / appointment _ groups < https : / / canvas . instructure . com / doc / api / appointment _ groups . html # method . appointment _ groups . index > ` _
: rtype : : class : ` canvasapi . paginated _ list . PaginatedList ` of
: class : ` canvasapi . appointment _ group . AppointmentGroup `""" | from canvasapi . appointment_group import AppointmentGroup
return PaginatedList ( AppointmentGroup , self . __requester , 'GET' , 'appointment_groups' , _kwargs = combine_kwargs ( ** kwargs ) ) |
def get_next_departures ( stop , filter_line = None , num_line_groups = 1 , verbose = False ) :
"""Get all real - time departure times for given stop and return as filtered table .
Terminate if we can assume there is no connection to the internet .""" | # Get departures table from online service
# ( great : we don ' t have to iterate over multiple pages ) .
url = BVG_URL_PAT % stop
if verbose :
print ( '- Fetching table for URL "%s".' % url )
try :
tables = pd . read_html ( url . encode ( 'utf-8' ) )
except urllib . error . URLError :
msg = 'Not connected to the internet?'
termcolor . cprint ( msg , 'red' , attrs = [ 'bold' ] )
sys . exit ( 1 )
except ValueError :
return [ ]
table = tables [ 0 ]
table . columns = [ 'Departure' , 'Line' , 'Destination' ]
if verbose :
print ( '- Got table with %d entries for "%s".' % ( len ( table ) , stop ) )
# Cleanup
# Drop entries with ' * ' in column Departure . . .
# ( causes trouble when resulting in an empty table ! )
# table = table [ table . Departure . apply ( lambda row : " * " not in row ) ]
# So , instead remove ' * ' in column Departure . . .
table . is_copy = False
# prevents SettingWithCopyWarning
table . Departure = table . apply ( lambda row : re . sub ( '\s*\*\s*' , '' , row . Departure ) , axis = 1 )
# Replace regex ' + ' with ' ' in column Line . . .
table . Line = table . apply ( lambda row : re . sub ( ' +' , ' ' , row . Line ) , axis = 1 )
# Filter desired number of unique combinations of Line and Destination column .
indices = [ ]
for i in range ( num_line_groups ) :
try :
indices += sorted ( [ tab . index . values [ i ] for line_dest , tab in table . groupby ( [ 'Line' , 'Destination' ] ) ] )
except IndexError :
break
table = table [ table . index . map ( lambda x : x in indices ) ]
# Insert a left - most column with minutes and seconds from now
# until the departure time .
table . insert ( 0 , "Wait" , table . Departure . apply ( lambda dep : wait_time ( dep ) ) )
# Filter on desired lines only
if filter_line :
table = table [ table . Line . apply ( lambda cell : filter_line . lower ( ) . encode ( 'utf-8' ) in cell . lower ( ) ) ]
return table |
def extract_fields ( lines , delim , searches , match_lineno = 1 , ** kwargs ) :
"""Return generator of fields matching ` searches ` .
Parameters
lines : iterable
Provides line number ( 1 - based ) and line ( str )
delim : str
Delimiter to split line by to produce fields
searches : iterable
Returns search ( str ) to match against line fields .
match _ lineno : int
Line number of line to split and search fields
Remaining keyword arguments are passed to ` match _ fields ` .""" | keep_idx = [ ]
for lineno , line in lines :
if lineno < match_lineno or delim not in line :
if lineno == match_lineno :
raise WcutError ( 'Delimter not found in line {}' . format ( match_lineno ) )
yield [ line ]
continue
fields = line . split ( delim )
if lineno == match_lineno :
keep_idx = list ( match_fields ( fields , searches , ** kwargs ) )
keep_fields = [ fields [ i ] for i in keep_idx ]
if keep_fields :
yield keep_fields |
def sanitize_op ( self , op_data ) :
"""Remove unnecessary fields for an operation , i . e . prior to committing it .
This includes any invariant tags we ' ve added with our invariant decorators
( such as @ state _ create or @ state _ transition ) .
TODO : less ad - hoc way to do this""" | op_data = super ( BlockstackDB , self ) . sanitize_op ( op_data )
# remove invariant tags ( i . e . added by our invariant state _ * decorators )
to_remove = get_state_invariant_tags ( )
for tag in to_remove :
if tag in op_data . keys ( ) :
del op_data [ tag ]
# NOTE : this is called the opcode family , because
# different operation names can have the same operation code
# ( such as NAME _ RENEWAL and NAME _ REGISTRATION ) . They must
# have the same mutation fields .
opcode_family = op_get_opcode_name ( op_data [ 'op' ] )
# for each column in the appropriate state table ,
# if the column is not identified in the operation ' s
# MUTATE _ FIELDS list , then set it to None here .
mutate_fields = op_get_mutate_fields ( opcode_family )
for mf in mutate_fields :
if not op_data . has_key ( mf ) :
log . debug ( "Adding NULL mutate field '%s.%s'" % ( opcode_family , mf ) )
op_data [ mf ] = None
# TODO : less ad - hoc
for extra_field in [ 'opcode' ] :
if extra_field in op_data :
del op_data [ extra_field ]
return op_data |
def _expire_data ( self ) :
"""Remove all expired entries .""" | expire_time_stamp = time . time ( ) - self . expire_time
self . timed_data = { d : t for d , t in self . timed_data . items ( ) if t > expire_time_stamp } |
def format ( self , message_format ) :
"""Set the message format
: param message _ format : The format to set
: type message _ format : str""" | if message_format not in self . formats :
self . _log . error ( 'Invalid Message format specified: {format}' . format ( format = message_format ) )
return
self . _log . debug ( 'Setting message format to {format}' . format ( format = message_format ) )
self . _format = message_format |
def _collect_placeholders_required ( self ) :
"""* collect placeholders required from filename etc *""" | self . log . info ( 'starting the ``_collect_placeholders_required`` method' )
phs = self . settings [ "frankenstein" ] [ "placeholder delimiters" ]
phsString = "|" . join ( phs )
matchObject = re . finditer ( r'(%(phsString)s)([^\s]*?)\1' % locals ( ) , string = self . contentString , flags = re . S # re . S
)
phDict = { }
for match in matchObject :
phDict [ match . group ( 2 ) ] = None
self . phDict = phDict
self . log . info ( 'completed the ``_collect_placeholders_required`` method' )
return None |
def set_current_operation_progress ( self , percent ) :
"""Internal method , not to be called externally .
in percent of type int""" | if not isinstance ( percent , baseinteger ) :
raise TypeError ( "percent can only be an instance of type baseinteger" )
self . _call ( "setCurrentOperationProgress" , in_p = [ percent ] ) |
def inserir ( self , name ) :
"""Inserts a new Group L3 and returns its identifier .
: param name : Group L3 name . String with a minimum 2 and maximum of 80 characters
: return : Dictionary with the following structure :
{ ' group _ l3 ' : { ' id ' : < id _ group _ l3 > } }
: raise InvalidParameterError : Name is null and invalid .
: raise NomeGrupoL3DuplicadoError : There is already a registered Group L3 with the value of name .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | group_l3_map = dict ( )
group_l3_map [ 'name' ] = name
code , xml = self . submit ( { 'group_l3' : group_l3_map } , 'POST' , 'groupl3/' )
return self . response ( code , xml ) |
def write ( self , data , ** keys ) :
"""Write data into this HDU
parameters
data : ndarray or list of ndarray
A numerical python array . Should be an ordinary array for image
HDUs , should have fields for tables . To write an ordinary array to
a column in a table HDU , use write _ column . If data already exists
in this HDU , it will be overwritten . See the append ( ( ) method to
append new rows to a table HDU .
firstrow : integer , optional
At which row you should begin writing to tables . Be sure you know
what you are doing ! For appending see the append ( ) method .
Default 0.
columns : list , optional
If data is a list of arrays , you must send columns as a list
of names or column numbers
You can also send names =
names : list , optional
same as columns =""" | slow = keys . get ( 'slow' , False )
isrec = False
if isinstance ( data , ( list , dict ) ) :
if isinstance ( data , list ) :
data_list = data
columns_all = keys . get ( 'columns' , None )
if columns_all is None :
columns_all = keys . get ( 'names' , None )
if columns_all is None :
raise ValueError ( "you must send columns with a list of arrays" )
else :
columns_all = list ( data . keys ( ) )
data_list = [ data [ n ] for n in columns_all ]
colnums_all = [ self . _extract_colnum ( c ) for c in columns_all ]
names = [ self . get_colname ( c ) for c in colnums_all ]
isobj = numpy . zeros ( len ( data_list ) , dtype = numpy . bool )
for i in xrange ( len ( data_list ) ) :
isobj [ i ] = is_object ( data_list [ i ] )
else :
if data . dtype . fields is None :
raise ValueError ( "You are writing to a table, so I expected " "an array with fields as input. If you want " "to write a simple array, you should use " "write_column to write to a single column, " "or instead write to an image hdu" )
if data . shape is ( ) :
raise ValueError ( "cannot write data with shape ()" )
isrec = True
names = data . dtype . names
# only write object types ( variable - length columns ) after
# writing the main table
isobj = fields_are_object ( data )
data_list = [ ]
colnums_all = [ ]
for i , name in enumerate ( names ) :
colnum = self . _extract_colnum ( name )
data_list . append ( data [ name ] )
colnums_all . append ( colnum )
if slow :
for i , name in enumerate ( names ) :
if not isobj [ i ] :
self . write_column ( name , data_list [ i ] , ** keys )
else :
nonobj_colnums = [ ]
nonobj_arrays = [ ]
for i in xrange ( len ( data_list ) ) :
if not isobj [ i ] :
nonobj_colnums . append ( colnums_all [ i ] )
if isrec : # this still leaves possibility of f - order sub - arrays . .
colref = array_to_native ( data_list [ i ] , inplace = False )
else :
colref = array_to_native_c ( data_list [ i ] , inplace = False )
if IS_PY3 and colref . dtype . char == 'U' : # for python3 , we convert unicode to ascii
# this will error if the character is not in ascii
colref = colref . astype ( 'S' , copy = False )
nonobj_arrays . append ( colref )
for tcolnum , tdata in zip ( nonobj_colnums , nonobj_arrays ) :
self . _verify_column_data ( tcolnum , tdata )
if len ( nonobj_arrays ) > 0 :
firstrow = keys . get ( 'firstrow' , 0 )
self . _FITS . write_columns ( self . _ext + 1 , nonobj_colnums , nonobj_arrays , firstrow = firstrow + 1 , write_bitcols = self . write_bitcols )
# writing the object arrays always occurs the same way
# need to make sure this works for array fields
for i , name in enumerate ( names ) :
if isobj [ i ] :
self . write_var_column ( name , data_list [ i ] , ** keys )
self . _update_info ( ) |
def generate_mfa_token ( self , user_id , expires_in = 259200 , reusable = False ) :
"""Use to generate a temporary MFA token that can be used in place of other MFA tokens for a set time period .
For example , use this token for account recovery .
: param user _ id : Id of the user
: type user _ id : int
: param expires _ in : Set the duration of the token in seconds .
( default : 259200 seconds = 72h ) 72 hours is the max value .
: type expires _ in : int
: param reusable : Defines if the token reusable . ( default : false ) If set to true , token can be used for multiple apps , until it expires .
: type reusable : bool
Returns a mfa token
: return : return the object if success
: rtype : MFAToken
See https : / / developers . onelogin . com / api - docs / 1 / multi - factor - authentication / generate - mfa - token Generate MFA Token documentation""" | self . clean_error ( )
try :
url = self . get_url ( Constants . GENERATE_MFA_TOKEN_URL , user_id )
data = { 'expires_in' : expires_in , 'reusable' : reusable }
response = self . execute_call ( 'post' , url , json = data )
if response . status_code == 201 :
json_data = response . json ( )
if json_data :
return MFAToken ( json_data )
else :
self . error = self . extract_status_code_from_response ( response )
self . error_description = self . extract_error_message_from_response ( response )
except Exception as e :
self . error = 500
self . error_description = e . args [ 0 ] |
async def status ( request : web . Request ) -> web . Response :
"""Get request will return the status of the machine ' s connection to the
internet as well as the status of its network interfaces .
The body of the response is a json dict containing
' status ' : internet connectivity status , where the options are :
" none " - no connection to router or network
" portal " - device behind a captive portal and cannot reach full internet
" limited " - connection to router but not internet
" full " - connection to router and internet
" unknown " - an exception occured while trying to determine status
' interfaces ' : JSON object of networking interfaces , keyed by device name ,
where the value of each entry is another object with the keys :
- ' type ' : " ethernet " or " wifi "
- ' state ' : state string , e . g . " disconnected " , " connecting " , " connected "
- ' ipAddress ' : the ip address , if it exists ( null otherwise ) ; this also
contains the subnet mask in CIDR notation , e . g . 10.2.12.120/16
- ' macAddress ' : the MAC address of the interface device
- ' gatewayAddress ' : the address of the current gateway , if it exists
( null otherwise )
Example request :
GET / networking / status
Example response :
200 OK
" status " : " full " ,
" interfaces " : {
" wlan0 " : {
" ipAddress " : " 192.168.43.97/24 " ,
" macAddress " : " B8:27 : EB : 6C : 95 : CF " ,
" gatewayAddress " : " 192.168.43.161 " ,
" state " : " connected " ,
" type " : " wifi "
" eth0 " : {
" ipAddress " : " 169.254.229.173/16 " ,
" macAddress " : " B8:27 : EB : 39 : C0:9A " ,
" gatewayAddress " : null ,
" state " : " connected " ,
" type " : " ethernet " """ | connectivity = { 'status' : 'none' , 'interfaces' : { } }
try :
connectivity [ 'status' ] = await nmcli . is_connected ( )
connectivity [ 'interfaces' ] = { i . value : await nmcli . iface_info ( i ) for i in nmcli . NETWORK_IFACES }
log . debug ( "Connectivity: {}" . format ( connectivity [ 'status' ] ) )
log . debug ( "Interfaces: {}" . format ( connectivity [ 'interfaces' ] ) )
status = 200
except subprocess . CalledProcessError as e :
log . error ( "CalledProcessError: {}" . format ( e . stdout ) )
status = 500
except FileNotFoundError as e :
log . error ( "FileNotFoundError: {}" . format ( e ) )
status = 500
return web . json_response ( connectivity , status = status ) |
def exerciseOptions ( self , tickerId , contract , exerciseAction , exerciseQuantity , account , override ) :
"""exerciseOptions ( EClientSocketBase self , TickerId tickerId , Contract contract , int exerciseAction , int exerciseQuantity , IBString const & account , int override )""" | return _swigibpy . EClientSocketBase_exerciseOptions ( self , tickerId , contract , exerciseAction , exerciseQuantity , account , override ) |
def rdb_repository ( _context , name = None , make_default = False , aggregate_class = None , repository_class = None , db_string = None , metadata_factory = None ) :
"""Directive for registering a RDBM based repository .""" | cnf = { }
if not db_string is None :
cnf [ 'db_string' ] = db_string
if not metadata_factory is None :
cnf [ 'metadata_factory' ] = metadata_factory
_repository ( _context , name , make_default , aggregate_class , repository_class , REPOSITORY_TYPES . RDB , 'add_rdb_repository' , cnf ) |
async def AddMetricBatches ( self , batches ) :
'''batches : typing . Sequence [ ~ MetricBatchParam ]
Returns - > typing . Sequence [ ~ ErrorResult ]''' | # map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'MetricsAdder' , request = 'AddMetricBatches' , version = 2 , params = _params )
_params [ 'batches' ] = batches
reply = await self . rpc ( msg )
return reply |
def set_reverb ( self , roomsize = - 1.0 , damping = - 1.0 , width = - 1.0 , level = - 1.0 ) :
"""roomsize Reverb room size value ( 0.0-1.2)
damping Reverb damping value ( 0.0-1.0)
width Reverb width value ( 0.0-100.0)
level Reverb level value ( 0.0-1.0)""" | set = 0
if roomsize >= 0 :
set += 0b0001
if damping >= 0 :
set += 0b0010
if width >= 0 :
set += 0b0100
if level >= 0 :
set += 0b1000
return fluid_synth_set_reverb_full ( self . synth , set , roomsize , damping , width , level ) |
def load_from_sens_file ( self , filename ) :
"""Load real and imaginary parts from a sens . dat file generated by
CRMod
Parameters
filename : string
filename of sensitivity file
Returns
nid _ re : int
ID of real part of sensitivities
nid _ im : int
ID of imaginary part of sensitivities""" | sens_data = np . loadtxt ( filename , skiprows = 1 )
nid_re = self . add_data ( sens_data [ : , 2 ] )
nid_im = self . add_data ( sens_data [ : , 3 ] )
return nid_re , nid_im |
def make_format ( self , fmt , width ) :
"""Make subreport text in a specified format""" | if not self . report_data :
return
for data_item in self . report_data :
if data_item . results :
if fmt is None or fmt == 'text' :
data_item . make_text ( width )
elif fmt == 'html' :
data_item . make_html ( )
elif fmt == 'csv' :
data_item . make_csv ( ) |
def download_subtitle ( self , subtitle ) :
"""Download ` subtitle ` ' s : attr : ` ~ subliminal . subtitle . Subtitle . content ` .
: param subtitle : subtitle to download .
: type subtitle : : class : ` ~ subliminal . subtitle . Subtitle `
: return : ` True ` if the subtitle has been successfully downloaded , ` False ` otherwise .
: rtype : bool""" | # check discarded providers
if subtitle . provider_name in self . discarded_providers :
logger . warning ( 'Provider %r is discarded' , subtitle . provider_name )
return False
logger . info ( 'Downloading subtitle %r' , subtitle )
try :
self [ subtitle . provider_name ] . download_subtitle ( subtitle )
except ( requests . Timeout , socket . timeout ) :
logger . error ( 'Provider %r timed out, discarding it' , subtitle . provider_name )
self . discarded_providers . add ( subtitle . provider_name )
return False
except :
logger . exception ( 'Unexpected error in provider %r, discarding it' , subtitle . provider_name )
self . discarded_providers . add ( subtitle . provider_name )
return False
# check subtitle validity
if not subtitle . is_valid ( ) :
logger . error ( 'Invalid subtitle' )
return False
return True |
def get_svc_stats ( self , svcs ) :
"""Get statistics for Services , resp . Service entities""" | stats = { "services.total" : 0 , "services.ok" : 0 , "services.warning" : 0 , "services.critical" : 0 , "services.unknown" : 0 , "services.flapping" : 0 , "services.in_downtime" : 0 , "services.checked" : 0 , "services.scheduled" : 0 , "services.active_checks" : 0 , "services.passive_checks" : 0 , }
for svc in svcs :
if type ( svc ) is not dict :
continue
sane = self . _sanitize_entity ( svc )
stats [ "services.total" ] += 1
stats [ "services.flapping" ] += self . _trans_binary ( sane [ "flapping" ] )
stats [ "services.in_downtime" ] += self . _trans_dtime ( sane [ "in_downtime" ] )
stats [ "services.checked" ] += self . _trans_binary ( sane [ "checked" ] )
stats [ "services.scheduled" ] += self . _trans_binary ( sane [ "scheduled" ] )
stats [ "services.active_checks" ] += sane [ "active_checks" ]
stats [ "services.passive_checks" ] += sane [ "passive_checks" ]
state_key = self . _trans_svc_state ( sane [ "state" ] )
stats [ "services.%s" % ( state_key ) ] += 1
return stats |
def plot_contours ( self , grid , filled = True , ax = None , labels = None , subplots_kw = dict ( ) , ** kwargs ) :
"""Plot equipotentials contours . Computes the potential energy on a grid
( specified by the array ` grid ` ) .
. . warning : : Right now the grid input must be arrays and must already
be in the unit system of the potential . Quantity support is coming . . .
Parameters
grid : tuple
Coordinate grids or slice value for each dimension . Should be a
tuple of 1D arrays or numbers .
filled : bool ( optional )
Use : func : ` ~ matplotlib . pyplot . contourf ` instead of
: func : ` ~ matplotlib . pyplot . contour ` . Default is ` ` True ` ` .
ax : matplotlib . Axes ( optional )
labels : iterable ( optional )
List of axis labels .
subplots _ kw : dict
kwargs passed to matplotlib ' s subplots ( ) function if an axes object
is not specified .
kwargs : dict
kwargs passed to either contourf ( ) or plot ( ) .
Returns
fig : ` ~ matplotlib . Figure `""" | import matplotlib . pyplot as plt
from matplotlib import cm
# figure out which elements are iterable , which are numeric
_grids = [ ]
_slices = [ ]
for ii , g in enumerate ( grid ) :
if isiterable ( g ) :
_grids . append ( ( ii , g ) )
else :
_slices . append ( ( ii , g ) )
# figure out the dimensionality
ndim = len ( _grids )
# if ndim > 2 , don ' t know how to handle this !
if ndim > 2 :
raise ValueError ( "ndim > 2: you can only make contours on a 2D grid. For other " "dimensions, you have to specify values to slice." )
if ax is None : # default figsize
fig , ax = plt . subplots ( 1 , 1 , ** subplots_kw )
else :
fig = ax . figure
if ndim == 1 : # 1D curve
x1 = _grids [ 0 ] [ 1 ]
r = np . zeros ( ( len ( _grids ) + len ( _slices ) , len ( x1 ) ) )
r [ _grids [ 0 ] [ 0 ] ] = x1
for ii , slc in _slices :
r [ ii ] = slc
Z = self . energy ( r * self . units [ 'length' ] ) . value
ax . plot ( x1 , Z , ** kwargs )
if labels is not None :
ax . set_xlabel ( labels [ 0 ] )
ax . set_ylabel ( "potential" )
else : # 2D contours
x1 , x2 = np . meshgrid ( _grids [ 0 ] [ 1 ] , _grids [ 1 ] [ 1 ] )
shp = x1 . shape
x1 , x2 = x1 . ravel ( ) , x2 . ravel ( )
r = np . zeros ( ( len ( _grids ) + len ( _slices ) , len ( x1 ) ) )
r [ _grids [ 0 ] [ 0 ] ] = x1
r [ _grids [ 1 ] [ 0 ] ] = x2
for ii , slc in _slices :
r [ ii ] = slc
Z = self . energy ( r * self . units [ 'length' ] ) . value
# make default colormap not suck
cmap = kwargs . pop ( 'cmap' , cm . Blues )
if filled :
cs = ax . contourf ( x1 . reshape ( shp ) , x2 . reshape ( shp ) , Z . reshape ( shp ) , cmap = cmap , ** kwargs )
else :
cs = ax . contour ( x1 . reshape ( shp ) , x2 . reshape ( shp ) , Z . reshape ( shp ) , cmap = cmap , ** kwargs )
if labels is not None :
ax . set_xlabel ( labels [ 0 ] )
ax . set_ylabel ( labels [ 1 ] )
return fig |
def summary ( app ) :
"""Print a summary of a deployed app ' s status .""" | r = requests . get ( 'https://{}.herokuapp.com/summary' . format ( app ) )
summary = r . json ( ) [ 'summary' ]
click . echo ( "\nstatus \t| count" )
click . echo ( "----------------" )
for s in summary :
click . echo ( "{}\t| {}" . format ( s [ 0 ] , s [ 1 ] ) )
num_101s = sum ( [ s [ 1 ] for s in summary if s [ 0 ] == 101 ] )
num_10xs = sum ( [ s [ 1 ] for s in summary if s [ 0 ] >= 100 ] )
if num_10xs > 0 :
click . echo ( "\nYield: {:.2%}" . format ( 1.0 * num_101s / num_10xs ) ) |
def _request_callback ( self , request_id ) :
"""Construct a request callback for the given request ID .""" | def callback ( future ) : # Remove the future from the client requests map
self . _client_request_futures . pop ( request_id , None )
if future . cancelled ( ) :
future . set_exception ( JsonRpcRequestCancelled ( ) )
message = { 'jsonrpc' : JSONRPC_VERSION , 'id' : request_id , }
try :
message [ 'result' ] = future . result ( )
except JsonRpcException as e :
log . exception ( "Failed to handle request %s" , request_id )
message [ 'error' ] = e . to_dict ( )
except Exception : # pylint : disable = broad - except
log . exception ( "Failed to handle request %s" , request_id )
message [ 'error' ] = JsonRpcInternalError . of ( sys . exc_info ( ) ) . to_dict ( )
self . _consumer ( message )
return callback |
def curve_spline ( x , y = None , weights = None , order = 1 , even_out = True , smoothing = None , periodic = False , meta_data = None ) :
'''curve _ spline ( coords ) yields a bicubic spline function through
the points in the given coordinate matrix .
curve _ spline ( x , y ) uses the coordinate matrix [ x , y ] .
The function returned by curve _ spline ( ) is f ( t ) , defined on the
interval from 0 to n - 1 where n is the number of points in the
coordinate matrix provided .
The following options are accepted :
* weights ( None ) the weights to use in smoothing .
* smoothing ( None ) the amount to smooth the points .
* order ( 3 ) the order of the polynomial used in the splines .
* periodic ( False ) whether the points are periodic or not .
* even _ out ( True ) whether to even out the distances along
the curve .
* meta _ data ( None ) an optional map of meta - data to give the
spline representation .''' | curv = CurveSpline ( x , y , weights = weights , order = order , smoothing = smoothing , periodic = periodic , meta_data = meta_data )
if even_out :
curv = curv . even_out ( )
return curv |
def now ( self ) :
"""Return a : py : class : ` datetime . datetime ` instance representing the current time .
: rtype : : py : class : ` datetime . datetime `""" | if self . use_utc :
return datetime . datetime . utcnow ( )
else :
return datetime . datetime . now ( ) |
def unlink ( self , node , hyperedge ) :
"""Unlink given node and hyperedge .
@ type node : node
@ param node : Node .
@ type hyperedge : hyperedge
@ param hyperedge : Hyperedge .""" | self . node_links [ node ] . remove ( hyperedge )
self . edge_links [ hyperedge ] . remove ( node )
self . graph . del_edge ( ( ( node , 'n' ) , ( hyperedge , 'h' ) ) ) |
def disable_token ( platform , user_id , token , on_error = None , on_success = None ) :
"""Disable a device token for a user .
: param str platform The platform which to disable token on . One of either
Google Cloud Messaging ( outbound . GCM ) or Apple Push Notification Service
( outbound . APNS ) .
: param str | number user _ id : the id you use to identify a user . this should
be static for the lifetime of a user .
: param str token : the token to disable .
: param func on _ error : An optional function to call in the event of an error .
on _ error callback should take 2 parameters : ` code ` and ` error ` . ` code ` will be
one of outbound . ERROR _ XXXXX . ` error ` will be the corresponding message .
: param func on _ success : An optional function to call if / when the API call succeeds .
on _ success callback takes no parameters .""" | __device_token ( platform , False , user_id , token = token , on_error = on_error , on_success = on_success ) |
def stored_bind ( self , instance ) :
"""Bind an instance to this Pangler , using the bound Pangler store .
This method functions identically to ` bind ` , except that it might
return a Pangler which was previously bound to the provided instance .""" | if self . id is None :
return self . bind ( instance )
store = self . _bound_pangler_store . setdefault ( instance , { } )
p = store . get ( self . id )
if p is None :
p = store [ self . id ] = self . bind ( instance )
return p |
def _notify_remove ( self , slice_ ) :
"""Notify about a RemoveChange .""" | change = RemoveChange ( self , slice_ )
self . notify_observers ( change ) |
def clear_screen ( ) :
import os , platform
"""http : / / stackoverflow . com / questions / 18937058 / python - clear - screen - in - shell""" | if platform . system ( ) == "Windows" :
tmp = os . system ( 'cls' )
# for window
else :
tmp = os . system ( 'clear' )
# for Linux
return True |
def get ( self , _create = False , ** ctx_options ) :
"""NOTE : ` ctx _ options ` are ignored""" | if _create :
return self . _kind_factory . create ( key = self )
else :
return self . _kind_factory ( key = self ) |
def tempo_account_delete_account_by_id ( self , account_id ) :
"""Delete an Account by id . Caller must have the Manage Account Permission for the Account .
The Account can not be deleted if it has an AccountLinkBean .
: param account _ id : the id of the Account to be deleted .
: return :""" | url = 'rest/tempo-accounts/1/account/{id}/' . format ( id = account_id )
return self . delete ( url ) |
def load ( self ) :
"""Get load time series ( only active power )
Returns
dict or : pandas : ` pandas . DataFrame < dataframe > `
See class definition for details .""" | try :
return self . _load . loc [ [ self . timeindex ] , : ]
except :
return self . _load . loc [ self . timeindex , : ] |
def _sampleRange ( rng , start , end , step , k ) :
"""Equivalent to :
random . sample ( xrange ( start , end , step ) , k )
except it uses our random number generator .
This wouldn ' t need to create the arange if it were implemented in C .""" | array = numpy . empty ( k , dtype = "uint32" )
rng . sample ( numpy . arange ( start , end , step , dtype = "uint32" ) , array )
return array |
def read_config ( file_name ) :
"""Read YAML file with configuration and pointers to example data .
Args :
file _ name ( str ) : Name of the file , where the configuration is stored .
Returns :
dict : Parsed and processed data ( see : func : ` _ process _ config _ item ` ) .
Example YAML file : :
html : simple _ xml . xml
first :
data : i wan ' t this
required : true
notfoundmsg : Can ' t find variable $ name .
second :
data : and this
html : simple _ xml2 . xml
first :
data : something wanted
required : true
notfoundmsg : Can ' t find variable $ name .
second :
data : another wanted thing""" | dirname = os . path . dirname ( os . path . abspath ( file_name ) )
dirname = os . path . relpath ( dirname )
# create utf - 8 strings , not unicode
def custom_str_constructor ( loader , node ) :
return loader . construct_scalar ( node ) . encode ( 'utf-8' )
yaml . add_constructor ( u'tag:yaml.org,2002:str' , custom_str_constructor )
config = [ ]
with open ( file_name ) as f :
for item in yaml . load_all ( f . read ( ) ) :
config . append ( _process_config_item ( item , dirname ) )
return config |
def gen_front_term ( self , x , dmp_num ) :
"""Generates the front term on the forcing term .
For rhythmic DMPs it ' s non - diminishing , so this
function is just a placeholder to return 1.
x float : the current value of the canonical system
dmp _ num int : the index of the current dmp""" | if isinstance ( x , np . ndarray ) :
return np . ones ( x . shape )
return 1 |
def find_all_mappings ( self , other_lattice : "Lattice" , ltol : float = 1e-5 , atol : float = 1 , skip_rotation_matrix : bool = False , ) -> Iterator [ Tuple [ "Lattice" , Optional [ np . ndarray ] , np . ndarray ] ] :
"""Finds all mappings between current lattice and another lattice .
Args :
other _ lattice ( Lattice ) : Another lattice that is equivalent to
this one .
ltol ( float ) : Tolerance for matching lengths . Defaults to 1e - 5.
atol ( float ) : Tolerance for matching angles . Defaults to 1.
skip _ rotation _ matrix ( bool ) : Whether to skip calculation of the
rotation matrix
Yields :
( aligned _ lattice , rotation _ matrix , scale _ matrix ) if a mapping is
found . aligned _ lattice is a rotated version of other _ lattice that
has the same lattice parameters , but which is aligned in the
coordinate system of this lattice so that translational points
match up in 3D . rotation _ matrix is the rotation that has to be
applied to other _ lattice to obtain aligned _ lattice , i . e . ,
aligned _ matrix = np . inner ( other _ lattice , rotation _ matrix ) and
op = SymmOp . from _ rotation _ and _ translation ( rotation _ matrix )
aligned _ matrix = op . operate _ multi ( latt . matrix )
Finally , scale _ matrix is the integer matrix that expresses
aligned _ matrix as a linear combination of this
lattice , i . e . , aligned _ matrix = np . dot ( scale _ matrix , self . matrix )
None is returned if no matches are found .""" | ( lengths , angles ) = other_lattice . lengths_and_angles
( alpha , beta , gamma ) = angles
frac , dist , _ , _ = self . get_points_in_sphere ( [ [ 0 , 0 , 0 ] ] , [ 0 , 0 , 0 ] , max ( lengths ) * ( 1 + ltol ) , zip_results = False )
cart = self . get_cartesian_coords ( frac )
# this can ' t be broadcast because they ' re different lengths
inds = [ np . logical_and ( dist / l < 1 + ltol , dist / l > 1 / ( 1 + ltol ) ) for l in lengths ]
c_a , c_b , c_c = ( cart [ i ] for i in inds )
f_a , f_b , f_c = ( frac [ i ] for i in inds )
l_a , l_b , l_c = ( np . sum ( c ** 2 , axis = - 1 ) ** 0.5 for c in ( c_a , c_b , c_c ) )
def get_angles ( v1 , v2 , l1 , l2 ) :
x = np . inner ( v1 , v2 ) / l1 [ : , None ] / l2
x [ x > 1 ] = 1
x [ x < - 1 ] = - 1
angles = np . arccos ( x ) * 180.0 / pi
return angles
alphab = np . abs ( get_angles ( c_b , c_c , l_b , l_c ) - alpha ) < atol
betab = np . abs ( get_angles ( c_a , c_c , l_a , l_c ) - beta ) < atol
gammab = np . abs ( get_angles ( c_a , c_b , l_a , l_b ) - gamma ) < atol
for i , all_j in enumerate ( gammab ) :
inds = np . logical_and ( all_j [ : , None ] , np . logical_and ( alphab , betab [ i ] [ None , : ] ) )
for j , k in np . argwhere ( inds ) :
scale_m = np . array ( ( f_a [ i ] , f_b [ j ] , f_c [ k ] ) , dtype = np . int )
if abs ( np . linalg . det ( scale_m ) ) < 1e-8 :
continue
aligned_m = np . array ( ( c_a [ i ] , c_b [ j ] , c_c [ k ] ) )
if skip_rotation_matrix :
rotation_m = None
else :
rotation_m = np . linalg . solve ( aligned_m , other_lattice . matrix )
yield Lattice ( aligned_m ) , rotation_m , scale_m |
def main ( args , stop = False ) :
"""Arguments parsing , etc . .""" | daemon = AMQPDaemon ( con_param = getConParams ( settings . RABBITMQ_MX2MODS_VIRTUALHOST ) , queue = settings . RABBITMQ_MX2MODS_INPUT_QUEUE , out_exch = settings . RABBITMQ_MX2MODS_EXCHANGE , out_key = settings . RABBITMQ_MX2MODS_OUTPUT_KEY , react_fn = reactToAMQPMessage , glob = globals ( ) # used in deserializer
)
if not stop and args and args . foreground : # run at foreground
daemon . run ( )
else :
daemon . run_daemon ( ) |
def update_lipd_v1_1 ( d ) :
"""Update LiPD v1.0 to v1.1
- chronData entry is a list that allows multiple tables
- paleoData entry is a list that allows multiple tables
- chronData now allows measurement , model , summary , modelTable , ensemble , calibratedAges tables
- Added ' lipdVersion ' key
: param dict d : Metadata v1.0
: return dict d : Metadata v1.1""" | logger_versions . info ( "enter update_lipd_v1_1" )
tmp_all = [ ]
try : # ChronData is the only structure update
if "chronData" in d : # As of v1.1 , ChronData should have an extra level of abstraction .
# No longer shares the same structure of paleoData
# If no measurement table , then make a measurement table list with the table as the entry
for table in d [ "chronData" ] :
if "chronMeasurementTable" not in table :
tmp_all . append ( { "chronMeasurementTable" : [ table ] } )
# If the table exists , but it is a dictionary , then turn it into a list with one entry
elif "chronMeasurementTable" in table :
if isinstance ( table [ "chronMeasurementTable" ] , dict ) :
tmp_all . append ( { "chronMeasurementTable" : [ table [ "chronMeasurementTable" ] ] } )
if tmp_all :
d [ "chronData" ] = tmp_all
# Log that this is now a v1.1 structured file
d [ "lipdVersion" ] = 1.1
except Exception as e :
logger_versions . error ( "update_lipd_v1_1: Exception: {}" . format ( e ) )
logger_versions . info ( "exit update_lipd_v1_1" )
return d |
def update_steadystate ( x , z , K , H = None ) :
"""Add a new measurement ( z ) to the Kalman filter . If z is None , nothing
is changed .
Parameters
x : numpy . array ( dim _ x , 1 ) , or float
State estimate vector
z : ( dim _ z , 1 ) : array _ like
measurement for this update . z can be a scalar if dim _ z is 1,
otherwise it must be convertible to a column vector .
K : numpy . array , or float
Kalman gain matrix
H : numpy . array ( dim _ x , dim _ x ) , or float , optional
Measurement function . If not provided , a value of 1 is assumed .
Returns
x : numpy . array
Posterior state estimate vector
Examples
This can handle either the multidimensional or unidimensional case . If
all parameters are floats instead of arrays the filter will still work ,
and return floats for x , P as the result .
> > > update _ steadystate ( 1 , 2 , 1 ) # univariate
> > > update _ steadystate ( x , P , z , H )""" | if z is None :
return x
if H is None :
H = np . array ( [ 1 ] )
if np . isscalar ( H ) :
H = np . array ( [ H ] )
Hx = np . atleast_1d ( dot ( H , x ) )
z = reshape_z ( z , Hx . shape [ 0 ] , x . ndim )
# error ( residual ) between measurement and prediction
y = z - Hx
# estimate new x with residual scaled by the kalman gain
return x + dot ( K , y ) |
def _group_detect ( templates , stream , threshold , threshold_type , trig_int , plotvar , group_size = None , pre_processed = False , daylong = False , parallel_process = True , xcorr_func = None , concurrency = None , cores = None , ignore_length = False , overlap = "calculate" , debug = 0 , full_peaks = False , process_cores = None , ** kwargs ) :
"""Pre - process and compute detections for a group of templates .
Will process the stream object , so if running in a loop , you will want
to copy the stream before passing it to this function .
: type templates : list
: param templates : List of : class : ` eqcorrscan . core . match _ filter . Template `
: type stream : ` obspy . core . stream . Stream `
: param stream : Continuous data to detect within using the Template .
: type threshold : float
: param threshold :
Threshold level , if using ` threshold _ type = ' MAD ' ` then this will be
the multiple of the median absolute deviation .
: type threshold _ type : str
: param threshold _ type :
The type of threshold to be used , can be MAD , absolute or
av _ chan _ corr . See Note on thresholding below .
: type trig _ int : float
: param trig _ int :
Minimum gap between detections in seconds . If multiple detections
occur within trig _ int of one - another , the one with the highest
cross - correlation sum will be selected .
: type plotvar : bool
: param plotvar :
Turn plotting on or off , see warning about plotting below .
: type group _ size : int
: param group _ size :
Maximum number of templates to run at once , use to reduce memory
consumption , if unset will use all templates .
: type pre _ processed : bool
: param pre _ processed :
Set to True if ` stream ` has already undergone processing , in this
case eqcorrscan will only check that the sampling rate is correct .
Defaults to False , which will use the
: mod : ` eqcorrscan . utils . pre _ processing ` routines to resample and
filter the continuous data .
: type daylong : bool
: param daylong :
Set to True to use the
: func : ` eqcorrscan . utils . pre _ processing . dayproc ` routine , which
preforms additional checks and is more efficient for day - long data
over other methods .
: type parallel _ process : bool
: param parallel _ process :
: type xcorr _ func : str or callable
: param xcorr _ func :
A str of a registered xcorr function or a callable for implementing
a custom xcorr function . For more details see :
: func : ` eqcorrscan . utils . correlate . register _ array _ xcorr `
: type concurrency : str
: param concurrency :
The type of concurrency to apply to the xcorr function . Options are
' multithread ' , ' multiprocess ' , ' concurrent ' . For more details see
: func : ` eqcorrscan . utils . correlate . get _ stream _ xcorr `
: type cores : int
: param cores : Number of workers for processing and correlation .
: type ignore _ length : bool
: param ignore _ length :
If using daylong = True , then dayproc will try check that the data
are there for at least 80 % of the day , if you don ' t want this check
( which will raise an error if too much data are missing ) then set
ignore _ length = True . This is not recommended !
: type overlap : float
: param overlap :
Either None , " calculate " or a float of number of seconds to
overlap detection streams by . This is to counter the effects of
the delay - and - stack in calculating cross - correlation sums . Setting
overlap = " calculate " will work out the appropriate overlap based
on the maximum lags within templates .
: type debug : int
: param debug :
Debug level from 0-5 where five is more output , for debug levels
4 and 5 , detections will not be computed in parallel .
: type full _ peaks : bool
: param full _ peaks : See ` eqcorrscan . utils . findpeaks . find _ peaks2 _ short `
: type process _ cores : int
: param process _ cores :
Number of processes to use for pre - processing ( if different to
` cores ` ) .
: return :
: class : ` eqcorrscan . core . match _ filter . Party ` of families of detections .""" | master = templates [ 0 ]
# Check that they are all processed the same .
lap = 0.0
for template in templates :
starts = [ t . stats . starttime for t in template . st . sort ( [ 'starttime' ] ) ]
if starts [ - 1 ] - starts [ 0 ] > lap :
lap = starts [ - 1 ] - starts [ 0 ]
if not template . same_processing ( master ) :
raise MatchFilterError ( 'Templates must be processed the same.' )
if overlap is None :
overlap = 0.0
elif not isinstance ( overlap , float ) and str ( overlap ) == str ( "calculate" ) :
overlap = lap
elif not isinstance ( overlap , float ) :
raise NotImplementedError ( "%s is not a recognised overlap type" % str ( overlap ) )
if not pre_processed :
if process_cores is None :
process_cores = cores
streams = _group_process ( template_group = templates , parallel = parallel_process , debug = debug , cores = process_cores , stream = stream , daylong = daylong , ignore_length = ignore_length , overlap = overlap )
else :
warnings . warn ( 'Not performing any processing on the continuous data.' )
streams = [ stream ]
detections = [ ]
party = Party ( )
if group_size is not None :
n_groups = int ( len ( templates ) / group_size )
if n_groups * group_size < len ( templates ) :
n_groups += 1
else :
n_groups = 1
for st_chunk in streams :
debug_print ( 'Computing detections between %s and %s' % ( st_chunk [ 0 ] . stats . starttime , st_chunk [ 0 ] . stats . endtime ) , 0 , debug )
st_chunk . trim ( starttime = st_chunk [ 0 ] . stats . starttime , endtime = st_chunk [ 0 ] . stats . endtime )
for tr in st_chunk :
if len ( tr ) > len ( st_chunk [ 0 ] ) :
tr . data = tr . data [ 0 : len ( st_chunk [ 0 ] ) ]
for i in range ( n_groups ) :
if group_size is not None :
end_group = ( i + 1 ) * group_size
start_group = i * group_size
if i == n_groups :
end_group = len ( templates )
else :
end_group = len ( templates )
start_group = 0
template_group = [ t for t in templates [ start_group : end_group ] ]
detections += match_filter ( template_names = [ t . name for t in template_group ] , template_list = [ t . st for t in template_group ] , st = st_chunk , xcorr_func = xcorr_func , concurrency = concurrency , threshold = threshold , threshold_type = threshold_type , trig_int = trig_int , plotvar = plotvar , debug = debug , cores = cores , full_peaks = full_peaks , peak_cores = process_cores , ** kwargs )
for template in template_group :
family = Family ( template = template , detections = [ ] )
for detection in detections :
if detection . template_name == template . name :
family . detections . append ( detection )
party += family
return party |
def perms_check ( perms , prefix , ambiguous = False ) :
"""Return the user ' s perms for the specified prefix
perms < dict > permissions dict
prefix < string > namespace to check for perms
ambiguous < bool = False > if True reverse wildcard matching is active and a perm check for a . b . * will
be matched by the user having perms to a . b . c or a . b . d - only use this if you know what
you are doing .""" | try :
token = prefix . split ( "." )
i = 1
l = len ( token )
r = 0
# collect permission rules with a wildcard in them , so we dont do unecessary
# regex searches later on
perms_wc = { }
for ns , p in perms . items ( ) :
if ns . find ( "*" ) > - 1 :
perms_wc [ re . escape ( ns ) . replace ( "\*" , "[^\.]+" ) ] = p
while i <= l :
k = "." . join ( token [ : i ] )
matched = False
# check for exact match
if perms . has_key ( k ) :
r = perms . get ( k )
# check for wildcard matches ( if any wildcard rules exist )
elif perms_wc :
for ns , p in perms_wc . items ( ) :
a = "^%s$" % ns
b = "^%s\." % ns
j = len ( a )
u = len ( b )
if j > matched and re . match ( a , k ) :
r = p
matched = j
elif u > matched and re . match ( b , k ) :
r = p
matched = u
# if not matched at all and ambiguous flag is true , do ambiguous matching
if not matched and ambiguous :
m = "^%s" % re . escape ( k ) . replace ( "\*" , "[^\.]+" )
for ns , p in perms . items ( ) :
if re . match ( m , ns ) and p > r :
r = p
break
i += 1
return r
except :
raise |
def _contains_cftime_datetimes ( array ) -> bool :
"""Check if an array contains cftime . datetime objects""" | try :
from cftime import datetime as cftime_datetime
except ImportError :
return False
else :
if array . dtype == np . dtype ( 'O' ) and array . size > 0 :
sample = array . ravel ( ) [ 0 ]
if isinstance ( sample , dask_array_type ) :
sample = sample . compute ( )
if isinstance ( sample , np . ndarray ) :
sample = sample . item ( )
return isinstance ( sample , cftime_datetime )
else :
return False |
def _create_variables ( self , n_features ) :
"""Create the TensorFlow variables for the model .
: param n _ features : number of features
: return : self""" | w_name = 'weights'
self . W = tf . Variable ( tf . truncated_normal ( shape = [ n_features , self . num_hidden ] , stddev = 0.1 ) , name = w_name )
tf . summary . histogram ( w_name , self . W )
bh_name = 'hidden-bias'
self . bh_ = tf . Variable ( tf . constant ( 0.1 , shape = [ self . num_hidden ] ) , name = bh_name )
tf . summary . histogram ( bh_name , self . bh_ )
bv_name = 'visible-bias'
self . bv_ = tf . Variable ( tf . constant ( 0.1 , shape = [ n_features ] ) , name = bv_name )
tf . summary . histogram ( bv_name , self . bv_ ) |
def _extract_service_catalog ( self , body ) :
"""Set the client ' s service catalog from the response data .""" | self . auth_ref = access . create ( body = body )
self . service_catalog = self . auth_ref . service_catalog
self . auth_token = self . auth_ref . auth_token
self . auth_tenant_id = self . auth_ref . tenant_id
self . auth_user_id = self . auth_ref . user_id
if not self . endpoint_url :
self . endpoint_url = self . service_catalog . url_for ( region_name = self . region_name , service_type = self . service_type , interface = self . endpoint_type ) |
def on_linkType_changed ( self , evt ) :
"""User changed link kind , so prepare available fields .""" | if self . current_idx < 0 :
evt . Skip ( )
return
n = self . linkType . GetSelection ( )
lt_str = self . linkType . GetString ( n )
lt = self . link_code [ lt_str ]
self . prep_link_details ( lt )
lnk = self . page_links [ self . current_idx ]
lnk [ "update" ] = True
lnk [ "kind" ] = lt
self . enable_update ( )
if lt == fitz . LINK_GOTO :
if not self . toPage . Value . isdecimal ( ) :
self . toPage . ChangeValue ( "1" )
self . toPage . Enable ( )
if not self . toLeft . Value . isdecimal ( ) :
self . toLeft . ChangeValue ( "0" )
self . toLeft . Enable ( )
if not self . toHeight . Value . isdecimal ( ) :
self . toHeight . ChangeValue ( "0" )
self . toHeight . Enable ( )
lnk [ "page" ] = int ( self . toPage . Value ) - 1
lnk [ "to" ] = fitz . Point ( int ( self . toLeft . Value ) , int ( self . toHeight . Value ) )
elif lt == fitz . LINK_GOTOR :
if not self . toFile . Value :
self . toFile . SetValue ( self . text_in_rect ( ) )
self . toFile . MarkDirty ( )
if not self . toPage . Value . isdecimal ( ) :
self . toPage . ChangeValue ( "1" )
if not self . toLeft . Value . isdecimal ( ) :
self . toLeft . ChangeValue ( "0" )
if not self . toHeight . Value . isdecimal ( ) :
self . toHeight . ChangeValue ( "0" )
self . toLeft . Enable ( )
self . toPage . Enable ( )
self . toFile . Enable ( )
self . toHeight . Enable ( )
lnk [ "file" ] = self . toFile . Value
lnk [ "page" ] = int ( self . toPage . Value ) - 1
lnk [ "to" ] = fitz . Point ( int ( self . toLeft . Value ) , int ( self . toHeight . Value ) )
elif lt == fitz . LINK_URI :
if not self . toURI . Value :
self . toURI . SetValue ( self . text_in_rect ( ) )
self . toURI . MarkDirty ( )
lnk [ "uri" ] = self . toURI . Value
self . toURI . Enable ( )
elif lt == fitz . LINK_LAUNCH :
if not self . toFile . Value :
self . toFile . SetValue ( self . text_in_rect ( ) )
self . toFile . MarkDirty ( )
lnk [ "file" ] = self . toFile . Value
self . toFile . Enable ( )
elif lt == fitz . LINK_NAMED :
self . toName . SetSelection ( 0 )
self . toName . Enable ( )
self . page_links [ self . current_idx ] = lnk
evt . Skip ( )
return |
def _handle_location ( self , location ) :
"""Return an element located at location with flexible args .
Args :
location : String xpath to use in an Element . find search OR
an Element ( which is simply returned ) .
Returns :
The found Element .
Raises :
ValueError if the location is a string that results in a
find of None .""" | if not isinstance ( location , ElementTree . Element ) :
element = self . find ( location )
if element is None :
raise ValueError ( "Invalid path!" )
else :
element = location
return element |
def load_obj_from_path ( import_path , prefix = None , ld = dict ( ) ) :
"""import a python object from an import path
` import _ path ` - a python import path . For instance :
mypackage . module . func
or
mypackage . module . class
` prefix ` ( str ) - a value to prepend to the import path
if it isn ' t already there . For instance :
load _ obj _ from _ path ( ' module . func ' , prefix = ' mypackage ' )
is the same as
load _ obj _ from _ path ( ' mypackage . module . func ' )
` ld ` ( dict ) key : value data to pass to the logger if an error occurs""" | if prefix and not import_path . startswith ( prefix ) :
import_path = '.' . join ( [ prefix , import_path ] )
log . debug ( 'attempting to load a python object from an import path' , extra = dict ( import_path = import_path , ** ld ) )
try :
mod = importlib . import_module ( import_path )
return mod
# yay , we found a module . return it
except :
pass
# try to extract an object from a module
try :
path , obj_name = import_path . rsplit ( '.' , 1 )
except ValueError :
log_raise ( ( "import path needs at least 1 period in your import path." " An example import path is something like: module.obj" ) , dict ( import_path = import_path , ** ld ) , InvalidImportPath )
try :
mod = importlib . import_module ( path )
except ImportError :
newpath = path . replace ( prefix , '' , 1 ) . lstrip ( '.' )
log . debug ( "Could not load import path. Trying a different one" , extra = dict ( oldpath = path , newpath = newpath ) )
path = newpath
mod = importlib . import_module ( path )
try :
obj = getattr ( mod , obj_name )
except AttributeError :
log_raise ( ( "object does not exist in given module." " Your import path is not" " properly defined because the given `obj_name` does not exist" ) , dict ( import_path = path , obj_name = obj_name , ** ld ) , InvalidImportPath )
return obj |
def read_firmware_file ( file_path ) :
"""Reads a firmware file into a dequeue for processing .
: param file _ path : Path to the firmware file
: type file _ path : string
: returns : deque""" | data_queue = deque ( )
with open ( file_path ) as firmware_handle :
for line in firmware_handle :
line = line . rstrip ( )
if line != '' and line [ 0 ] == ':' :
data_queue . append ( line + "\r" )
return data_queue |
def get_bool ( self , name , default = None ) :
"""Retrieves an environment variable value as ` ` bool ` ` .
Integer values are converted as expected : zero evaluates to
` ` False ` ` , and non - zero to ` ` True ` ` . String values of ` ` ' true ' ` `
and ` ` ' false ' ` ` are evaluated case insensitive .
Args :
name ( str ) : The case - insensitive , unprefixed variable name .
default : If provided , a default value will be returned
instead of throwing ` ` EnvironmentError ` ` .
Returns :
bool : The environment variable ' s value as a ` ` bool ` ` .
Raises :
EnvironmentError : If the environment variable does not
exist , and ` ` default ` ` was not provided .
ValueError : If the environment variable value could not be
interpreted as a ` ` bool ` ` .""" | if name not in self :
if default is not None :
return default
raise EnvironmentError . not_found ( self . _prefix , name )
return bool ( self . get_int ( name ) ) |
def _end_of_decade ( self ) :
"""Reset the date to the last day of the decade .
: rtype : Date""" | year = self . year - self . year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1
return self . set ( year , 12 , 31 ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.