idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
19,500
def exit_on_error ( self , message , exit_code = 1 ) : # pylint: disable=no-self-use log = "I got an unrecoverable error. I have to exit." if message : log += "\n-----\nError message: %s" % message print ( "Error message: %s" % message ) log += "-----\n" log += "You can get help at https://github.com/Alignak-monitoring/alignak\n" log += "If you think this is a bug, create a new issue including as much " "details as possible (version, configuration,...)" if exit_code is not None : exit ( exit_code )
Log generic message when getting an error and exit
152
9
19,501
def exit_on_exception ( self , raised_exception , message = '' , exit_code = 99 ) : self . exit_on_error ( message = message , exit_code = None ) logger . critical ( "-----\nException: %s\nBack trace of the error:\n%s" , str ( raised_exception ) , traceback . format_exc ( ) ) exit ( exit_code )
Log generic message when getting an unrecoverable error
92
10
19,502
def get_objects_from_from_queues ( self ) : _t0 = time . time ( ) had_some_objects = False for module in self . modules_manager . get_external_instances ( ) : queue = module . from_q if not queue : continue while True : queue_size = queue . qsize ( ) if queue_size : statsmgr . gauge ( 'queues.from.%s.count' % module . get_name ( ) , queue_size ) try : obj = queue . get_nowait ( ) except Full : logger . warning ( "Module %s from queue is full" , module . get_name ( ) ) except Empty : break except ( IOError , EOFError ) as exp : logger . warning ( "Module %s from queue is no more available: %s" , module . get_name ( ) , str ( exp ) ) except Exception as exp : # pylint: disable=broad-except logger . error ( "An external module queue got a problem '%s'" , str ( exp ) ) else : had_some_objects = True self . add ( obj ) statsmgr . timer ( 'queues.time' , time . time ( ) - _t0 ) return had_some_objects
Get objects from from queues and add them .
275
9
19,503
def add_automatic_comment ( self , ref ) : if self . fixed is True : text = ( DOWNTIME_FIXED_MESSAGE % ( ref . my_type , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . start_time ) ) , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . end_time ) ) , ref . my_type ) ) else : hours , remainder = divmod ( self . duration , 3600 ) minutes , _ = divmod ( remainder , 60 ) text = ( DOWNTIME_FLEXIBLE_MESSAGE % ( ref . my_type , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . start_time ) ) , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . end_time ) ) , hours , minutes , ref . my_type ) ) data = { 'comment' : text , 'comment_type' : 1 if ref . my_type == 'host' else 2 , 'entry_type' : 2 , 'source' : 0 , 'expires' : False , 'ref' : ref . uuid } comment = Comment ( data ) self . comment_id = comment . uuid ref . comments [ comment . uuid ] = comment return comment
Add comment on ref for downtime
344
6
19,504
def get_raise_brok ( self , host_name , service_name = '' ) : data = self . serialize ( ) data [ 'host' ] = host_name if service_name != '' : data [ 'service' ] = service_name return Brok ( { 'type' : 'downtime_raise' , 'data' : data } )
Get a start downtime brok
80
6
19,505
def get_expire_brok ( self , host_name , service_name = '' ) : data = self . serialize ( ) data [ 'host' ] = host_name if service_name != '' : data [ 'service' ] = service_name return Brok ( { 'type' : 'downtime_expire' , 'data' : data } )
Get an expire downtime brok
82
6
19,506
def fill_data_brok_from ( self , data , brok_type ) : cls = self . __class__ # Now config properties for prop , entry in list ( cls . properties . items ( ) ) : # Is this property intended for broking? # if 'fill_brok' in entry[prop]: if brok_type in entry . fill_brok : if hasattr ( self , prop ) : data [ prop ] = getattr ( self , prop )
Add properties to data if fill_brok of these class properties is same as brok_type
105
20
19,507
def get_name ( self ) : return getattr ( self , 'dependent_host_name' , '' ) + '/' + getattr ( self , 'dependent_service_description' , '' ) + '..' + getattr ( self , 'host_name' , '' ) + '/' + getattr ( self , 'service_description' , '' )
Get name based on 4 class attributes Each attribute is substituted by if attribute does not exist
78
17
19,508
def explode_hostgroup ( self , svc_dep , hostgroups ) : # pylint: disable=too-many-locals # We will create a service dependency for each host part of the host group # First get services snames = [ d . strip ( ) for d in svc_dep . service_description . split ( ',' ) ] # And dep services dep_snames = [ d . strip ( ) for d in svc_dep . dependent_service_description . split ( ',' ) ] # Now for each host into hostgroup we will create a service dependency object hg_names = [ n . strip ( ) for n in svc_dep . hostgroup_name . split ( ',' ) ] for hg_name in hg_names : hostgroup = hostgroups . find_by_name ( hg_name ) if hostgroup is None : err = "ERROR: the servicedependecy got an unknown hostgroup_name '%s'" % hg_name self . add_error ( err ) continue hnames = [ ] hnames . extend ( [ m . strip ( ) for m in hostgroup . get_hosts ( ) ] ) for hname in hnames : for dep_sname in dep_snames : for sname in snames : new_sd = svc_dep . copy ( ) new_sd . host_name = hname new_sd . service_description = sname new_sd . dependent_host_name = hname new_sd . dependent_service_description = dep_sname self . add_item ( new_sd )
Explode a service dependency for each member of hostgroup
349
11
19,509
def linkify_sd_by_s ( self , hosts , services ) : to_del = [ ] errors = self . configuration_errors warns = self . configuration_warnings for servicedep in self : try : s_name = servicedep . dependent_service_description hst_name = servicedep . dependent_host_name # The new member list, in id serv = services . find_srv_by_name_and_hostname ( hst_name , s_name ) if serv is None : host = hosts . find_by_name ( hst_name ) if not ( host and host . is_excluded_for_sdesc ( s_name ) ) : errors . append ( "Service %s not found for host %s" % ( s_name , hst_name ) ) elif host : warns . append ( "Service %s is excluded from host %s ; " "removing this servicedependency as it's unusuable." % ( s_name , hst_name ) ) to_del . append ( servicedep ) continue servicedep . dependent_service_description = serv . uuid s_name = servicedep . service_description hst_name = servicedep . host_name # The new member list, in id serv = services . find_srv_by_name_and_hostname ( hst_name , s_name ) if serv is None : host = hosts . find_by_name ( hst_name ) if not ( host and host . is_excluded_for_sdesc ( s_name ) ) : errors . append ( "Service %s not found for host %s" % ( s_name , hst_name ) ) elif host : warns . append ( "Service %s is excluded from host %s ; " "removing this servicedependency as it's unusuable." % ( s_name , hst_name ) ) to_del . append ( servicedep ) continue servicedep . service_description = serv . uuid except AttributeError as err : logger . error ( "[servicedependency] fail to linkify by service %s: %s" , servicedep , err ) to_del . append ( servicedep ) for servicedep in to_del : self . remove_item ( servicedep )
Replace dependent_service_description and service_description in service dependency by the real object
512
18
19,510
def linkify_sd_by_tp ( self , timeperiods ) : for servicedep in self : try : tp_name = servicedep . dependency_period timeperiod = timeperiods . find_by_name ( tp_name ) if timeperiod : servicedep . dependency_period = timeperiod . uuid else : servicedep . dependency_period = '' except AttributeError as exp : logger . error ( "[servicedependency] fail to linkify by timeperiods: %s" , exp )
Replace dependency_period by a real object in service dependency
116
12
19,511
def linkify_s_by_sd ( self , services ) : for servicedep in self : # Only used for debugging purpose when loops are detected setattr ( servicedep , "service_description_string" , "undefined" ) setattr ( servicedep , "dependent_service_description_string" , "undefined" ) if getattr ( servicedep , 'service_description' , None ) is None or getattr ( servicedep , 'dependent_service_description' , None ) is None : continue services . add_act_dependency ( servicedep . dependent_service_description , servicedep . service_description , servicedep . notification_failure_criteria , getattr ( servicedep , 'dependency_period' , '' ) , servicedep . inherits_parent ) services . add_chk_dependency ( servicedep . dependent_service_description , servicedep . service_description , servicedep . execution_failure_criteria , getattr ( servicedep , 'dependency_period' , '' ) , servicedep . inherits_parent ) # Only used for debugging purpose when loops are detected setattr ( servicedep , "service_description_string" , services [ servicedep . service_description ] . get_name ( ) ) setattr ( servicedep , "dependent_service_description_string" , services [ servicedep . dependent_service_description ] . get_name ( ) )
Add dependency in service objects
323
5
19,512
def init ( self ) : # pylint: disable=too-many-branches if not self . enabled : logger . info ( " the module is disabled." ) return True try : connections = self . test_connection ( ) except Exception as exp : # pylint: disable=broad-except logger . error ( "initialization, test connection failed. Error: %s" , str ( exp ) ) if self . influxdb_enabled : try : # Check that configured TSDB is existing, else creates... dbs = self . influx . get_list_database ( ) for db in dbs : if db . get ( 'name' ) == self . influxdb_database : logger . info ( "the database %s is existing." , self . influxdb_database ) break else : # Create the database logger . info ( "creating database %s..." , self . influxdb_database ) self . influx . create_database ( self . influxdb_database ) # Check that configured TSDB retention is existing, else creates... if self . influxdb_retention_name : rps = self . influx . get_list_retention_policies ( ) for rp in rps : if rp . get ( 'name' ) == self . influxdb_retention_name : logger . info ( "the retention policy %s is existing." , self . influxdb_retention_name ) break else : # Create a retention policy for this database logger . info ( "creating database retention policy: %s - %s - %s..." , self . influxdb_retention_name , self . influxdb_retention_duration , self . influxdb_retention_replication ) self . influx . create_retention_policy ( self . influxdb_retention_name , self . influxdb_retention_duration , self . influxdb_retention_replication , database = self . influxdb_database ) # Check that configured TSDB user is existing, else creates... if self . influxdb_username : users = self . influx . get_list_users ( ) for user in users : if user . get ( 'user' ) == self . influxdb_username : logger . info ( "the user %s is existing." , self . influxdb_username ) break else : # Create a retention policy for this database logger . info ( "creating user: %s..." , self . influxdb_username ) self . influx . create_user ( self . influxdb_username , self . influxdb_password , admin = False ) connections = connections or True except Exception as exp : # pylint: disable=broad-except logger . error ( "InfluxDB, DB initialization failed. Error: %s" , str ( exp ) ) return connections
Called by the daemon broker to initialize the module
598
10
19,513
def get_metrics_from_perfdata ( self , service , perf_data ) : result = [ ] metrics = PerfDatas ( perf_data ) for metric in metrics : logger . debug ( "service: %s, metric: %s (%s)" , service , metric , metric . __dict__ ) if metric . name in [ 'time' ] : metric . name = "duration" name = sanitize_name ( metric . name ) name = self . multiple_values . sub ( r'.\1' , name ) if not name : continue # get metric value and its thresholds values if they exist name_value = { name : metric . value , 'uom_' + name : metric . uom } # Get or ignore extra values depending upon module configuration if metric . warning and self . send_warning : name_value [ name + '_warn' ] = metric . warning if metric . critical and self . send_critical : name_value [ name + '_crit' ] = metric . critical if metric . min and self . send_min : name_value [ name + '_min' ] = metric . min if metric . max and self . send_max : name_value [ name + '_max' ] = metric . max for key , value in name_value . items ( ) : result . append ( ( key , value , metric . uom ) ) logger . debug ( "Metrics: %s - %s" , service , result ) return result
Decode the performance data to build a metrics list
320
10
19,514
def send_to_tsdb ( self , realm , host , service , metrics , ts , path ) : if ts is None : ts = int ( time . time ( ) ) data = { "measurement" : service , "tags" : { "host" : host , "service" : service , "realm" : '.' . join ( realm ) if isinstance ( realm , list ) else realm , "path" : path } , "time" : ts , "fields" : { } } if path is not None : data [ 'tags' ] . update ( { "path" : path } ) for metric , value , _ in metrics : data [ 'fields' ] . update ( { metric : value } ) # Flush if necessary logger . debug ( "Data: %s" , data ) self . my_metrics . append ( data ) if self . metrics_count >= self . metrics_flush_count : # self.carbon.add_data_list(self.my_metrics) self . flush ( )
Send performance data to time series database
223
7
19,515
def manage_initial_service_status_brok ( self , b ) : host_name = b . data [ 'host_name' ] service_description = b . data [ 'service_description' ] service_id = host_name + "/" + service_description logger . debug ( "got initial service status: %s" , service_id ) if host_name not in self . hosts_cache : logger . error ( "initial service status, host is unknown: %s." , service_id ) return self . services_cache [ service_id ] = { } if 'customs' in b . data : self . services_cache [ service_id ] [ '_GRAPHITE_POST' ] = sanitize_name ( b . data [ 'customs' ] . get ( '_GRAPHITE_POST' , None ) ) logger . debug ( "initial service status received: %s" , service_id )
Prepare the known services cache
203
6
19,516
def manage_initial_host_status_brok ( self , b ) : host_name = b . data [ 'host_name' ] logger . debug ( "got initial host status: %s" , host_name ) self . hosts_cache [ host_name ] = { 'realm_name' : sanitize_name ( b . data . get ( 'realm_name' , b . data . get ( 'realm' , 'All' ) ) ) , } if 'customs' in b . data : self . hosts_cache [ host_name ] [ '_GRAPHITE_PRE' ] = sanitize_name ( b . data [ 'customs' ] . get ( '_GRAPHITE_PRE' , None ) ) self . hosts_cache [ host_name ] [ '_GRAPHITE_GROUP' ] = sanitize_name ( b . data [ 'customs' ] . get ( '_GRAPHITE_GROUP' , None ) ) logger . debug ( "initial host status received: %s" , host_name )
Prepare the known hosts cache
236
6
19,517
def manage_service_check_result_brok ( self , b ) : # pylint: disable=too-many-branches host_name = b . data . get ( 'host_name' , None ) service_description = b . data . get ( 'service_description' , None ) if not host_name or not service_description : return service_id = host_name + "/" + service_description logger . debug ( "service check result: %s" , service_id ) # If host and service initial status broks have not been received, ignore ... if not self . ignore_unknown and host_name not in self . hosts_cache : logger . warning ( "received service check result for an unknown host: %s" , service_id ) return if service_id not in self . services_cache and not self . ignore_unknown : logger . warning ( "received service check result for an unknown service: %s" , service_id ) return # Decode received metrics metrics = self . get_metrics_from_perfdata ( service_description , b . data [ 'perf_data' ] ) if not metrics : logger . debug ( "no metrics to send ..." ) return # If checks latency is ignored if self . ignore_latency_limit >= b . data [ 'latency' ] > 0 : check_time = int ( b . data [ 'last_chk' ] ) - int ( b . data [ 'latency' ] ) else : check_time = int ( b . data [ 'last_chk' ] ) # Custom hosts variables hname = sanitize_name ( host_name ) if host_name in self . hosts_cache : if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' ) , hname ) ) if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' ) , hname ) ) # Custom services variables desc = sanitize_name ( service_description ) if service_id in self . services_cache : if self . services_cache [ service_id ] . get ( '_GRAPHITE_POST' , None ) : desc = "." . join ( ( desc , self . services_cache [ service_id ] . get ( '_GRAPHITE_POST' , None ) ) ) # Graphite data source if self . graphite_data_source : path = '.' . join ( ( hname , self . graphite_data_source , desc ) ) else : path = '.' . join ( ( hname , desc ) ) # Realm as a prefix if self . realms_prefix and self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) : path = '.' . join ( ( self . hosts_cache [ host_name ] . get ( 'realm_name' ) , path ) ) realm_name = None if host_name in self . hosts_cache : realm_name = self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) # Send metrics self . send_to_tsdb ( realm_name , host_name , service_description , metrics , check_time , path )
A service check result brok has just arrived ...
776
10
19,518
def manage_host_check_result_brok ( self , b ) : # pylint: disable=too-many-branches host_name = b . data . get ( 'host_name' , None ) if not host_name : return logger . debug ( "host check result: %s" , host_name ) # If host initial status brok has not been received, ignore ... if host_name not in self . hosts_cache and not self . ignore_unknown : logger . warning ( "received host check result for an unknown host: %s" , host_name ) return # Decode received metrics metrics = self . get_metrics_from_perfdata ( 'host_check' , b . data [ 'perf_data' ] ) if not metrics : logger . debug ( "no metrics to send ..." ) return # If checks latency is ignored if self . ignore_latency_limit >= b . data [ 'latency' ] > 0 : check_time = int ( b . data [ 'last_chk' ] ) - int ( b . data [ 'latency' ] ) else : check_time = int ( b . data [ 'last_chk' ] ) # Custom hosts variables hname = sanitize_name ( host_name ) if host_name in self . hosts_cache : if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' ) , hname ) ) if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' ) , hname ) ) # Graphite data source if self . graphite_data_source : path = '.' . join ( ( hname , self . graphite_data_source ) ) if self . hostcheck : path = '.' . join ( ( hname , self . graphite_data_source , self . hostcheck ) ) else : path = '.' . join ( ( hname , self . hostcheck ) ) # Realm as a prefix if self . realms_prefix and self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) : path = '.' . join ( ( self . hosts_cache [ host_name ] . get ( 'realm_name' ) , path ) ) realm_name = None if host_name in self . hosts_cache : realm_name = self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) # Send metrics self . send_to_tsdb ( realm_name , host_name , self . hostcheck , metrics , check_time , path )
An host check result brok has just arrived ...
642
10
19,519
def get_comment_brok ( self , host_name , service_name = '' ) : data = self . serialize ( ) data [ 'host' ] = host_name if service_name : data [ 'service' ] = service_name return Brok ( { 'type' : 'comment' , 'data' : data } )
Get a comment brok
74
5
19,520
def new_inner_member ( self , name , params ) : params [ 'notificationway_name' ] = name self . add_item ( NotificationWay ( params ) )
Create new instance of NotificationWay with given name and parameters and add it to the item list
38
18
19,521
def serialize ( obj , no_dump = False ) : # print("Serialize (%s): %s" % (no_dump, obj)) if hasattr ( obj , "serialize" ) and isinstance ( obj . serialize , collections . Callable ) : o_dict = { '__sys_python_module__' : "%s.%s" % ( obj . __class__ . __module__ , obj . __class__ . __name__ ) , 'content' : obj . serialize ( ) } elif isinstance ( obj , dict ) : o_dict = { } for key , value in list ( obj . items ( ) ) : o_dict [ key ] = serialize ( value , True ) elif isinstance ( obj , ( list , set ) ) : o_dict = [ serialize ( item , True ) for item in obj ] else : o_dict = obj if no_dump : return o_dict result = None try : result = json . dumps ( o_dict , ensure_ascii = False ) except MemoryError : return { '_error' : 'Not enough memory on this computer to correctly manage Alignak ' 'objects serialization! ' 'Sorry for this, please log an issue in the project repository.' } return result
Serialize an object .
275
5
19,522
def get_event ( self ) : self . prepare ( ) return ( self . creation_time , self . data [ 'level' ] , self . data [ 'message' ] )
This function returns an Event from a Brok
39
9
19,523
def prepare ( self ) : # Maybe the Brok is a old daemon one or was already prepared # if so, the data is already ok if hasattr ( self , 'prepared' ) and not self . prepared : self . data = unserialize ( self . data ) if self . instance_id : self . data [ 'instance_id' ] = self . instance_id self . prepared = True
Un - serialize data from data attribute and add instance_id key if necessary
86
16
19,524
def resolve_elements ( self ) : # If it's a leaf, we just need to dump a set with the content of the node if self . leaf : if not self . content : return set ( ) return set ( self . content ) # first got the not ones in a list, and the other in the other list not_nodes = [ s for s in self . sons if s . not_value ] positiv_nodes = [ s for s in self . sons if not s . not_value ] # ok a not not is hard to read.. # By default we are using a OR rule if not self . operand : self . operand = '|' res = set ( ) # The operand will change the positiv loop only i = 0 for node in positiv_nodes : node_members = node . resolve_elements ( ) if self . operand == '|' : res = res . union ( node_members ) elif self . operand == '&' : # The first elements of an AND rule should be used if i == 0 : res = node_members else : res = res . intersection ( node_members ) i += 1 # And we finally remove all NOT elements from the result for node in not_nodes : node_members = node . resolve_elements ( ) res = res . difference ( node_members ) return res
Get element of this node recursively Compute rules with OR or AND rule then NOT rules .
294
20
19,525
def eval_cor_pattern ( self , pattern ) : # pylint:disable=too-many-branches pattern = pattern . strip ( ) complex_node = False # Look if it's a complex pattern (with rule) or # if it's a leaf of it, like a host/service for char in '()+&|,' : if char in pattern : complex_node = True node = ComplexExpressionNode ( ) # if it's a single expression like !linux or production # (where "linux" and "production" are hostgroup names) # we will get the objects from it and return a leaf node if not complex_node : # If it's a not value, tag the node and find # the name without this ! operator if pattern . startswith ( '!' ) : node . not_value = True pattern = pattern [ 1 : ] node . operand = self . ctx node . leaf = True obj , error = self . find_object ( pattern ) if obj is not None : node . content = obj else : node . configuration_errors . append ( error ) return node in_par = False tmp = '' stacked_par = 0 for char in pattern : if char in ( ',' , '|' ) : # Maybe we are in a par, if so, just stack it if in_par : tmp += char else : # Oh we got a real cut in an expression, if so, cut it tmp = tmp . strip ( ) node . operand = '|' if tmp != '' : son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) tmp = '' elif char in ( '&' , '+' ) : # Maybe we are in a par, if so, just stack it if in_par : tmp += char else : # Oh we got a real cut in an expression, if so, cut it tmp = tmp . strip ( ) node . operand = '&' if tmp != '' : son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) tmp = '' elif char == '(' : stacked_par += 1 in_par = True tmp = tmp . strip ( ) # Maybe we just start a par, but we got some things in tmp # that should not be good in fact ! if stacked_par == 1 and tmp != '' : # TODO : real error print ( "ERROR : bad expression near" , tmp ) continue # If we are already in a par, add this ( # but not if it's the first one so if stacked_par > 1 : tmp += char elif char == ')' : stacked_par -= 1 if stacked_par < 0 : # TODO : real error print ( "Error : bad expression near" , tmp , "too much ')'" ) continue if stacked_par == 0 : tmp = tmp . strip ( ) son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) in_par = False # OK now clean the tmp so we start clean tmp = '' continue # ok here we are still in a huge par, we just close one sub one tmp += char # Maybe it's a classic character, if so, continue else : tmp += char # Be sure to manage the trainling part when the line is done tmp = tmp . strip ( ) if tmp != '' : son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) return node
Parse and build recursively a tree of ComplexExpressionNode from pattern
737
16
19,526
def find_object ( self , pattern ) : obj = None error = None pattern = pattern . strip ( ) if pattern == '*' : obj = [ h . host_name for h in list ( self . all_elements . items . values ( ) ) if getattr ( h , 'host_name' , '' ) != '' and not h . is_tpl ( ) ] return obj , error # Ok a more classic way if self . ctx == 'hostgroups' : # Ok try to find this hostgroup hgr = self . grps . find_by_name ( pattern ) # Maybe it's an known one? if not hgr : error = "Error : cannot find the %s of the expression '%s'" % ( self . ctx , pattern ) return hgr , error # Ok the group is found, get the elements! elts = hgr . get_hosts ( ) elts = strip_and_uniq ( elts ) # Maybe the hostgroup memebrs is '*', if so expand with all hosts if '*' in elts : elts . extend ( [ h . host_name for h in list ( self . all_elements . items . values ( ) ) if getattr ( h , 'host_name' , '' ) != '' and not h . is_tpl ( ) ] ) # And remove this strange hostname too :) elts . remove ( '*' ) return elts , error obj = self . grps . find_hosts_that_use_template ( pattern ) return obj , error
Get a list of host corresponding to the pattern regarding the context
338
12
19,527
def all_my_hosts_and_services ( self ) : for what in ( self . hosts , self . services ) : for item in what : yield item
Create an iterator for all my known hosts and services
35
10
19,528
def load_conf ( self , instance_id , instance_name , conf ) : self . pushed_conf = conf logger . info ( "loading my configuration (%s / %s):" , instance_id , self . pushed_conf . instance_id ) logger . debug ( "Properties:" ) for key in sorted ( self . pushed_conf . properties ) : logger . debug ( "- %s: %s" , key , getattr ( self . pushed_conf , key , [ ] ) ) logger . debug ( "Macros:" ) for key in sorted ( self . pushed_conf . macros ) : logger . debug ( "- %s: %s" , key , getattr ( self . pushed_conf . macros , key , [ ] ) ) logger . debug ( "Objects types:" ) for _ , _ , strclss , _ , _ in list ( self . pushed_conf . types_creations . values ( ) ) : if strclss in [ 'arbiters' , 'schedulers' , 'brokers' , 'pollers' , 'reactionners' , 'receivers' ] : continue setattr ( self , strclss , getattr ( self . pushed_conf , strclss , [ ] ) ) # Internal statistics logger . debug ( "- %d %s" , len ( getattr ( self , strclss ) ) , strclss ) statsmgr . gauge ( 'configuration.%s' % strclss , len ( getattr ( self , strclss ) ) ) # We need reversed list for searching in the retention file read # todo: check what it is about... self . services . optimize_service_search ( self . hosts ) # Just deprecated # # Compile the triggers # if getattr(self, 'triggers', None): # logger.info("compiling the triggers...") # self.triggers.compile() # self.triggers.load_objects(self) # else: # logger.info("No triggers") # From the Arbiter configuration. Used for satellites to differentiate the schedulers self . alignak_name = self . pushed_conf . alignak_name self . instance_id = instance_id self . instance_name = instance_name self . push_flavor = getattr ( self . pushed_conf , 'push_flavor' , 'None' ) logger . info ( "Set my scheduler instance: %s - %s - %s" , self . instance_id , self . instance_name , self . push_flavor ) # Tag our monitored hosts/services with our instance_id for item in self . all_my_hosts_and_services ( ) : item . instance_id = self . instance_id
Load configuration received from Arbiter and pushed by our Scheduler daemon
596
13
19,529
def update_recurrent_works_tick ( self , conf ) : for key in self . recurrent_works : ( name , fun , _ ) = self . recurrent_works [ key ] if isinstance ( conf , dict ) : new_tick = conf . get ( 'tick_%s' % name , None ) else : new_tick = getattr ( conf , 'tick_%s' % name , None ) if new_tick is not None : logger . debug ( "Requesting to change the default tick to %d for the action %s" , int ( new_tick ) , name ) else : continue # Update the default scheduler tick for this function try : new_tick = int ( new_tick ) logger . info ( "Changing the default tick to %d for the action %s" , new_tick , name ) self . recurrent_works [ key ] = ( name , fun , new_tick ) except ValueError : logger . warning ( "Changing the default tick for '%s' to '%s' failed!" , new_tick , name )
Modify the tick value for the scheduler recurrent work
229
11
19,530
def dump_config ( self ) : path = os . path . join ( tempfile . gettempdir ( ) , 'dump-cfg-scheduler-%s-%d.json' % ( self . name , int ( time . time ( ) ) ) ) try : self . pushed_conf . dump ( path ) except ( OSError , IndexError ) as exp : # pragma: no cover, should never happen... logger . critical ( "Error when writing the configuration dump file %s: %s" , path , str ( exp ) )
Dump scheduler configuration into a temporary file
120
9
19,531
def add_notification ( self , notification ) : if notification . uuid in self . actions : logger . warning ( "Already existing notification: %s" , notification ) return logger . debug ( "Adding a notification: %s" , notification ) self . actions [ notification . uuid ] = notification self . nb_notifications += 1 # A notification which is not a master one asks for a brok if notification . contact is not None : self . add ( notification . get_initial_status_brok ( ) )
Add a notification into actions list
111
6
19,532
def add_check ( self , check ) : if check is None : return if check . uuid in self . checks : logger . debug ( "Already existing check: %s" , check ) return logger . debug ( "Adding a check: %s" , check ) # Add a new check to the scheduler checks list self . checks [ check . uuid ] = check self . nb_checks += 1 # Raise a brok to inform about a next check is to come ... # but only for items that are actively checked item = self . find_item_by_id ( check . ref ) if item . active_checks_enabled : self . add ( item . get_next_schedule_brok ( ) )
Add a check into the scheduler checks list
154
9
19,533
def add_event_handler ( self , action ) : if action . uuid in self . actions : logger . info ( "Already existing event handler: %s" , action ) return self . actions [ action . uuid ] = action self . nb_event_handlers += 1
Add a event handler into actions list
61
7
19,534
def hook_point ( self , hook_name ) : self . my_daemon . hook_point ( hook_name = hook_name , handle = self )
Generic function to call modules methods if such method is avalaible
35
13
19,535
def update_business_values ( self ) : for elt in self . all_my_hosts_and_services ( ) : if not elt . is_problem : was = elt . business_impact elt . update_business_impact_value ( self . hosts , self . services , self . timeperiods , self . businessimpactmodulations ) new = elt . business_impact # Ok, the business_impact change, we can update the broks if new != was : self . get_and_register_status_brok ( elt ) # When all impacts and classic elements are updated, # we can update problems (their value depend on impacts, so # they must be done after) for elt in self . all_my_hosts_and_services ( ) : # We first update impacts and classic elements if elt . is_problem : was = elt . business_impact elt . update_business_impact_value ( self . hosts , self . services , self . timeperiods , self . businessimpactmodulations ) new = elt . business_impact # Maybe one of the impacts change it's business_impact to a high value # and so ask for the problem to raise too if new != was : self . get_and_register_status_brok ( elt )
Iter over host and service and update business_impact
282
10
19,536
def scatter_master_notifications ( self ) : now = time . time ( ) # We only want the master scheduled notifications that are immediately launchable notifications = [ a for a in self . actions . values ( ) if a . is_a == u'notification' and a . status == ACT_STATUS_SCHEDULED and not a . contact and a . is_launchable ( now ) ] if notifications : logger . debug ( "Scatter master notification: %d notifications" , len ( notifications ) ) for notification in notifications : logger . debug ( "Scheduler got a master notification: %s" , notification ) # This is a "master" notification created by an host/service. # We use it to create children notifications (for the contacts and # notification_commands) which are executed in the reactionner. item = self . find_item_by_id ( notification . ref ) children = [ ] notification_period = None if getattr ( item , 'notification_period' , None ) is not None : notification_period = self . timeperiods [ item . notification_period ] if not item . is_blocking_notifications ( notification_period , self . hosts , self . services , notification . type , now ) : # If it is possible to send notifications # of this type at the current time, then create # a single notification for each contact of this item. children = item . scatter_notification ( notification , self . contacts , self . notificationways , self . timeperiods , self . macromodulations , self . escalations , self . find_item_by_id ( getattr ( item , "host" , None ) ) ) for notif in children : logger . debug ( " - child notification: %s" , notif ) notif . status = ACT_STATUS_SCHEDULED # Add the notification to the scheduler objects self . add ( notif ) # If we have notification_interval then schedule # the next notification (problems only) if notification . type == u'PROBLEM' : # Update the ref notif number after raise the one of the notification if children : # notif_nb of the master notification # was already current_notification_number+1. # If notifications were sent, # then host/service-counter will also be incremented item . current_notification_number = notification . notif_nb if item . notification_interval and notification . t_to_go is not None : # We must continue to send notifications. # Just leave it in the actions list and set it to "scheduled" # and it will be found again later # Ask the service/host to compute the next notif time. It can be just # a.t_to_go + item.notification_interval*item.__class__.interval_length # or maybe before because we have an # escalation that need to raise up before notification . t_to_go = item . get_next_notification_time ( notification , self . escalations , self . timeperiods ) notification . notif_nb = item . current_notification_number + 1 logger . debug ( "Repeat master notification: %s" , notification ) else : # Wipe out this master notification. It is a master one item . remove_in_progress_notification ( notification ) logger . debug ( "Remove master notification (no repeat): %s" , notification ) else : # Wipe out this master notification. logger . debug ( "Remove master notification (no more a problem): %s" , notification ) # We don't repeat recover/downtime/flap/etc... item . remove_in_progress_notification ( notification )
Generate children notifications from a master notification Also update notification number
796
12
19,537
def manage_internal_checks ( self ) : if os . getenv ( 'ALIGNAK_MANAGE_INTERNAL' , '1' ) != '1' : return now = time . time ( ) for chk in list ( self . checks . values ( ) ) : if not chk . internal : # Exclude checks that are not internal ones continue # Exclude checks that are not yet ready to launch if not chk . is_launchable ( now ) or chk . status not in [ ACT_STATUS_SCHEDULED ] : continue item = self . find_item_by_id ( chk . ref ) # Only if active checks are enabled if not item or not item . active_checks_enabled : # Ask to remove the check chk . status = ACT_STATUS_ZOMBIE continue logger . debug ( "Run internal check for %s" , item ) self . nb_internal_checks += 1 # Execute internal check item . manage_internal_check ( self . hosts , self . services , chk , self . hostgroups , self . servicegroups , self . macromodulations , self . timeperiods ) # Ask to consume the check result chk . status = ACT_STATUS_WAIT_CONSUME
Run internal checks
276
3
19,538
def reset_topology_change_flag ( self ) : for i in self . hosts : i . topology_change = False for i in self . services : i . topology_change = False
Set topology_change attribute to False in all hosts and services
43
13
19,539
def log_initial_states ( self ) : # Raise hosts initial status broks for elt in self . hosts : elt . raise_initial_state ( ) # And then services initial status broks for elt in self . services : elt . raise_initial_state ( )
Raise hosts and services initial status logs
61
8
19,540
def get_retention_data ( self ) : # pylint: disable=too-many-branches,too-many-statements # pylint: disable=too-many-locals retention_data = { 'hosts' : { } , 'services' : { } } for host in self . hosts : h_dict = { } # Get the hosts properties and running properties properties = host . __class__ . properties properties . update ( host . __class__ . running_properties ) for prop , entry in list ( properties . items ( ) ) : if not entry . retention : continue val = getattr ( host , prop ) # If a preparation function exists... prepare_retention = entry . retention_preparation if prepare_retention : val = prepare_retention ( host , val ) h_dict [ prop ] = val retention_data [ 'hosts' ] [ host . host_name ] = h_dict logger . info ( '%d hosts sent to retention' , len ( retention_data [ 'hosts' ] ) ) # Same for services for service in self . services : s_dict = { } # Get the services properties and running properties properties = service . __class__ . properties properties . update ( service . __class__ . running_properties ) for prop , entry in list ( properties . items ( ) ) : if not entry . retention : continue val = getattr ( service , prop ) # If a preparation function exists... prepare_retention = entry . retention_preparation if prepare_retention : val = prepare_retention ( service , val ) s_dict [ prop ] = val retention_data [ 'services' ] [ ( service . host_name , service . service_description ) ] = s_dict logger . info ( '%d services sent to retention' , len ( retention_data [ 'services' ] ) ) return retention_data
Get all hosts and services data to be sent to the retention storage .
406
14
19,541
def restore_retention_data ( self , data ) : if 'hosts' not in data : logger . warning ( "Retention data are not correct, no 'hosts' property!" ) return for host_name in data [ 'hosts' ] : # We take the dict of our value to load host = self . hosts . find_by_name ( host_name ) if host is not None : self . restore_retention_data_item ( data [ 'hosts' ] [ host_name ] , host ) statsmgr . gauge ( 'retention.hosts' , len ( data [ 'hosts' ] ) ) logger . info ( '%d hosts restored from retention' , len ( data [ 'hosts' ] ) ) # Same for services for ( host_name , service_description ) in data [ 'services' ] : # We take our dict to load service = self . services . find_srv_by_name_and_hostname ( host_name , service_description ) if service is not None : self . restore_retention_data_item ( data [ 'services' ] [ ( host_name , service_description ) ] , service ) statsmgr . gauge ( 'retention.services' , len ( data [ 'services' ] ) ) logger . info ( '%d services restored from retention' , len ( data [ 'services' ] ) )
Restore retention data
303
4
19,542
def restore_retention_data_item ( self , data , item ) : # pylint: disable=too-many-branches, too-many-locals # Manage the properties and running properties properties = item . __class__ . properties properties . update ( item . __class__ . running_properties ) for prop , entry in list ( properties . items ( ) ) : if not entry . retention : continue if prop not in data : continue # If a restoration function exists... restore_retention = entry . retention_restoration if restore_retention : setattr ( item , prop , restore_retention ( item , data [ prop ] ) ) else : setattr ( item , prop , data [ prop ] ) # Now manage all linked objects load from/ previous run for notification_uuid in item . notifications_in_progress : notification = item . notifications_in_progress [ notification_uuid ] # Update the notification referenced object notification [ 'ref' ] = item . uuid my_notification = Notification ( params = notification ) item . notifications_in_progress [ notification_uuid ] = my_notification # Add a notification in the scheduler actions self . add ( my_notification ) # todo: is it useful? We do not save/restore checks in the retention data... item . update_in_checking ( ) # And also add downtimes and comments # Downtimes are in a list.. for downtime_uuid in data [ 'downtimes' ] : downtime = data [ 'downtimes' ] [ downtime_uuid ] # Update the downtime referenced object downtime [ 'ref' ] = item . uuid my_downtime = Downtime ( params = downtime ) if downtime [ 'comment_id' ] : if downtime [ 'comment_id' ] not in data [ 'comments' ] : downtime [ 'comment_id' ] = '' # case comment_id has comment dict instead uuid # todo: This should never happen! Why this code ? if 'uuid' in downtime [ 'comment_id' ] : data [ 'comments' ] . append ( downtime [ 'comment_id' ] ) downtime [ 'comment_id' ] = downtime [ 'comment_id' ] [ 'uuid' ] item . add_downtime ( my_downtime ) # Comments are in a list.. for comment_uuid in data [ 'comments' ] : comment = data [ 'comments' ] [ comment_uuid ] # Update the comment referenced object comment [ 'ref' ] = item . uuid item . add_comment ( Comment ( comment ) ) if item . acknowledgement is not None : # Update the comment referenced object item . acknowledgement [ 'ref' ] = item . uuid item . acknowledgement = Acknowledge ( item . acknowledgement ) # Relink the notified_contacts as a set() of true contacts objects # if it was loaded from the retention, it's now a list of contacts # names new_notified_contacts = set ( ) new_notified_contacts_ids = set ( ) for contact_name in item . notified_contacts : contact = self . contacts . find_by_name ( contact_name ) if contact is not None : new_notified_contacts . add ( contact_name ) new_notified_contacts_ids . add ( contact . uuid ) item . notified_contacts = new_notified_contacts item . notified_contacts_ids = new_notified_contacts_ids
Restore data in item
755
5
19,543
def fill_initial_broks ( self , broker_name ) : # pylint: disable=too-many-branches broker_uuid = None logger . debug ( "My brokers: %s" , self . my_daemon . brokers ) for broker_link in list ( self . my_daemon . brokers . values ( ) ) : logger . debug ( "Searching broker: %s" , broker_link ) if broker_name == broker_link . name : broker_uuid = broker_link . uuid logger . info ( "Filling initial broks for: %s (%s)" , broker_name , broker_uuid ) break else : if self . pushed_conf : # I am yet configured but I do not know this broker ! Something went wrong!!! logger . error ( "Requested initial broks for an unknown broker: %s" , broker_name ) else : logger . info ( "Requested initial broks for an unknown broker: %s" , broker_name ) return 0 if self . my_daemon . brokers [ broker_uuid ] . initialized : logger . warning ( "The broker %s still got its initial broks..." , broker_name ) return 0 initial_broks_count = len ( self . my_daemon . brokers [ broker_uuid ] . broks ) # First the program status brok = self . get_program_status_brok ( ) self . add_brok ( brok , broker_uuid ) # We can't call initial_status from all this types # The order is important, service need host... initial_status_types = ( self . timeperiods , self . commands , self . contacts , self . contactgroups , self . hosts , self . hostgroups , self . services , self . servicegroups ) self . pushed_conf . skip_initial_broks = getattr ( self . pushed_conf , 'skip_initial_broks' , False ) logger . debug ( "Skipping initial broks? %s" , str ( self . pushed_conf . skip_initial_broks ) ) if not self . pushed_conf . skip_initial_broks : # We call initial_status from all this types # The order is important, service need host... initial_status_types = ( self . realms , self . timeperiods , self . commands , self . notificationways , self . contacts , self . contactgroups , self . hosts , self . hostgroups , self . hostdependencies , self . services , self . servicegroups , self . servicedependencies , self . escalations ) for tab in initial_status_types : for item in tab : # Awful! simply to get the group members property name... :( # todo: replace this! member_items = None if hasattr ( item , 'members' ) : member_items = getattr ( self , item . my_type . replace ( "group" , "s" ) ) brok = item . get_initial_status_brok ( member_items ) self . add_brok ( brok , broker_uuid ) # Add a brok to say that we finished all initial_pass brok = Brok ( { 'type' : 'initial_broks_done' , 'data' : { 'instance_id' : self . instance_id } } ) self . add_brok ( brok , broker_uuid ) final_broks_count = len ( self . my_daemon . brokers [ broker_uuid ] . broks ) self . my_daemon . brokers [ broker_uuid ] . initialized = True # Send the initial broks to our modules self . send_broks_to_modules ( ) # We now have raised all the initial broks self . raised_initial_broks = True logger . info ( "Created %d initial broks for %s" , final_broks_count - initial_broks_count , broker_name ) return final_broks_count - initial_broks_count
Create initial broks for a specific broker
871
8
19,544
def get_program_status_brok ( self , brok_type = 'program_status' ) : # Get the running statistics data = { "is_running" : True , "instance_id" : self . instance_id , # "alignak_name": self.alignak_name, "instance_name" : self . name , "last_alive" : time . time ( ) , "pid" : os . getpid ( ) , '_running' : self . get_scheduler_stats ( details = True ) , '_config' : { } , '_macros' : { } } # Get configuration data from the pushed configuration cls = self . pushed_conf . __class__ for prop , entry in list ( cls . properties . items ( ) ) : # Is this property intended for broking? if 'full_status' not in entry . fill_brok : continue data [ '_config' ] [ prop ] = self . pushed_conf . get_property_value_for_brok ( prop , cls . properties ) # data['_config'][prop] = getattr(self.pushed_conf, prop, entry.default) # Get the macros from the pushed configuration and try to resolve # the macros to provide the result in the status brok macro_resolver = MacroResolver ( ) macro_resolver . init ( self . pushed_conf ) for macro_name in sorted ( self . pushed_conf . macros ) : data [ '_macros' ] [ macro_name ] = macro_resolver . resolve_simple_macros_in_string ( "$%s$" % macro_name , [ ] , None , None ) logger . debug ( "Program status brok %s data: %s" , brok_type , data ) return Brok ( { 'type' : brok_type , 'data' : data } )
Create a program status brok
415
6
19,545
def consume_results ( self ) : # pylint: disable=too-many-branches # All results are in self.waiting_results # We need to get them first queue_size = self . waiting_results . qsize ( ) for _ in range ( queue_size ) : self . manage_results ( self . waiting_results . get ( ) ) # Then we consume them for chk in list ( self . checks . values ( ) ) : if chk . status == ACT_STATUS_WAIT_CONSUME : logger . debug ( "Consuming: %s" , chk ) item = self . find_item_by_id ( chk . ref ) notification_period = None if getattr ( item , 'notification_period' , None ) is not None : notification_period = self . timeperiods [ item . notification_period ] dep_checks = item . consume_result ( chk , notification_period , self . hosts , self . services , self . timeperiods , self . macromodulations , self . checkmodulations , self . businessimpactmodulations , self . resultmodulations , self . checks , self . pushed_conf . log_active_checks and not chk . passive_check ) # # Raise the log only when the check got consumed! # # Else the item information are not up-to-date :/ # if self.pushed_conf.log_active_checks and not chk.passive_check: # item.raise_check_result() # for check in dep_checks : logger . debug ( "-> raised a dependency check: %s" , chk ) self . add ( check ) # loop to resolve dependencies have_resolved_checks = True while have_resolved_checks : have_resolved_checks = False # All 'finished' checks (no more dep) raise checks they depend on for chk in list ( self . checks . values ( ) ) : if chk . status == ACT_STATUS_WAITING_ME : for dependent_checks in chk . depend_on_me : # Ok, now dependent will no more wait dependent_checks . depend_on . remove ( chk . uuid ) have_resolved_checks = True # REMOVE OLD DEP CHECK -> zombie chk . status = ACT_STATUS_ZOMBIE # Now, reinteger dep checks for chk in list ( self . checks . values ( ) ) : if chk . status == ACT_STATUS_WAIT_DEPEND and not chk . depend_on : item = self . find_item_by_id ( chk . ref ) notification_period = None if getattr ( item , 'notification_period' , None ) is not None : notification_period = self . timeperiods [ item . notification_period ] dep_checks = item . consume_result ( chk , notification_period , self . hosts , self . services , self . timeperiods , self . macromodulations , self . checkmodulations , self . businessimpactmodulations , self . resultmodulations , self . checks , self . pushed_conf . log_active_checks and not chk . passive_check ) for check in dep_checks : self . add ( check )
Handle results waiting in waiting_results list . Check ref will call consume result and update their status
710
19
19,546
def get_new_actions ( self ) : _t0 = time . time ( ) self . hook_point ( 'get_new_actions' ) statsmgr . timer ( 'hook.get-new-actions' , time . time ( ) - _t0 ) # ask for service and hosts their next check for elt in self . all_my_hosts_and_services ( ) : for action in elt . actions : logger . debug ( "Got a new action for %s: %s" , elt , action ) self . add ( action ) # We take all, we can clear it elt . actions = [ ]
Call get_new_actions hook point Iter over all hosts and services to add new actions in internal lists
139
21
19,547
def get_new_broks ( self ) : # ask for service and hosts their broks waiting # be eaten for elt in self . all_my_hosts_and_services ( ) : for brok in elt . broks : self . add ( brok ) # We got all, clear item broks list elt . broks = [ ] # Also fetch broks from contact (like contactdowntime) for contact in self . contacts : for brok in contact . broks : self . add ( brok ) # We got all, clear contact broks list contact . broks = [ ]
Iter over all hosts and services to add new broks in internal lists
132
14
19,548
def send_broks_to_modules ( self ) : t00 = time . time ( ) nb_sent = 0 broks = [ ] for broker_link in list ( self . my_daemon . brokers . values ( ) ) : for brok in broker_link . broks : if not getattr ( brok , 'sent_to_externals' , False ) : brok . to_send = True broks . append ( brok ) if not broks : return logger . debug ( "sending %d broks to modules..." , len ( broks ) ) for mod in self . my_daemon . modules_manager . get_external_instances ( ) : logger . debug ( "Look for sending to module %s" , mod . get_name ( ) ) module_queue = mod . to_q if module_queue : to_send = [ b for b in broks if mod . want_brok ( b ) ] module_queue . put ( to_send ) nb_sent += len ( to_send ) # No more need to send them for broker_link in list ( self . my_daemon . brokers . values ( ) ) : for brok in broker_link . broks : if not getattr ( brok , 'sent_to_externals' , False ) : brok . to_send = False brok . sent_to_externals = True logger . debug ( "Time to send %d broks (after %d secs)" , nb_sent , time . time ( ) - t00 )
Put broks into module queues Only broks without sent_to_externals to True are sent Only modules that ask for broks will get some
344
31
19,549
def find_item_by_id ( self , object_id ) : # Item id may be an item if isinstance ( object_id , Item ) : return object_id # Item id should be a uuid string if not isinstance ( object_id , string_types ) : logger . debug ( "Find an item by id, object_id is not int nor string: %s" , object_id ) return object_id for items in [ self . hosts , self . services , self . actions , self . checks , self . hostgroups , self . servicegroups , self . contacts , self . contactgroups ] : if object_id in items : return items [ object_id ] # raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover, logger . error ( "Item with id %s not found" , str ( object_id ) ) # pragma: no cover, return None
Get item based on its id or uuid
203
9
19,550
def before_run ( self ) : # Actions and checks counters self . nb_checks = 0 self . nb_internal_checks = 0 self . nb_checks_launched = 0 self . nb_actions_launched = 0 self . nb_checks_results = 0 self . nb_checks_results_timeout = 0 self . nb_checks_results_passive = 0 self . nb_checks_results_active = 0 self . nb_actions_results = 0 self . nb_actions_results_timeout = 0 self . nb_actions_results_passive = 0 self . nb_broks_dropped = 0 self . nb_checks_dropped = 0 self . nb_actions_dropped = 0 # Broks, notifications, ... counters self . nb_broks = 0 self . nb_notifications = 0 self . nb_event_handlers = 0 self . nb_external_commands = 0 self . ticks = 0
Initialize the scheduling process
222
5
19,551
def setup_new_conf ( self ) : # Execute the base class treatment... super ( Receiver , self ) . setup_new_conf ( ) # ...then our own specific treatment! with self . conf_lock : # self_conf is our own configuration from the alignak environment # self_conf = self.cur_conf['self_conf'] logger . debug ( "Got config: %s" , self . cur_conf ) # Configure and start our modules if not self . have_modules : try : self . modules = unserialize ( self . cur_conf [ 'modules' ] , no_load = True ) except AlignakClassLookupException as exp : # pragma: no cover, simple protection logger . error ( 'Cannot un-serialize modules configuration ' 'received from arbiter: %s' , exp ) if self . modules : logger . info ( "I received some modules configuration: %s" , self . modules ) self . have_modules = True self . do_load_modules ( self . modules ) # and start external modules too self . modules_manager . start_external_instances ( ) else : logger . info ( "I do not have modules" ) # Now create the external commands manager # We are a receiver: our role is to get and dispatch commands to the schedulers global_conf = self . cur_conf . get ( 'global_conf' , None ) if not global_conf : logger . error ( "Received a configuration without any global_conf! " "This may hide a configuration problem with the " "realms and the manage_sub_realms of the satellites!" ) global_conf = { 'accept_passive_unknown_check_results' : False , 'log_external_commands' : True } self . external_commands_manager = ExternalCommandManager ( None , 'receiver' , self , global_conf . get ( 'accept_passive_unknown_check_results' , False ) , global_conf . get ( 'log_external_commands' , False ) ) # Initialize connection with all our satellites logger . info ( "Initializing connection with my satellites:" ) my_satellites = self . get_links_of_type ( s_type = '' ) for satellite in list ( my_satellites . values ( ) ) : logger . info ( "- : %s/%s" , satellite . type , satellite . name ) if not self . daemon_connection_init ( satellite ) : logger . error ( "Satellite connection failed: %s" , satellite ) # Now I have a configuration! self . have_conf = True
Receiver custom setup_new_conf method
568
9
19,552
def get_external_commands_from_arbiters ( self ) : for arbiter_link_uuid in self . arbiters : link = self . arbiters [ arbiter_link_uuid ] if not link . active : logger . debug ( "The arbiter '%s' is not active, it is not possible to get " "its external commands!" , link . name ) continue try : logger . debug ( "Getting external commands from: %s" , link . name ) external_commands = link . get_external_commands ( ) if external_commands : logger . debug ( "Got %d commands from: %s" , len ( external_commands ) , link . name ) else : # Simple protection against None value external_commands = [ ] for external_command in external_commands : self . add ( external_command ) except LinkError : logger . warning ( "Arbiter connection failed, I could not get external commands!" ) except Exception as exp : # pylint: disable=broad-except logger . error ( "Arbiter connection failed, I could not get external commands!" ) logger . exception ( "Exception: %s" , exp )
Get external commands from our arbiters
257
7
19,553
def push_external_commands_to_schedulers ( self ) : if not self . unprocessed_external_commands : return # Those are the global external commands commands_to_process = self . unprocessed_external_commands self . unprocessed_external_commands = [ ] logger . debug ( "Commands: %s" , commands_to_process ) # Now get all external commands and put them into the good schedulers logger . debug ( "Commands to process: %d commands" , len ( commands_to_process ) ) for ext_cmd in commands_to_process : cmd = self . external_commands_manager . resolve_command ( ext_cmd ) logger . debug ( "Resolved command: %s, result: %s" , ext_cmd . cmd_line , cmd ) if cmd and cmd [ 'global' ] : # Send global command to all our schedulers for scheduler_link_uuid in self . schedulers : self . schedulers [ scheduler_link_uuid ] . pushed_commands . append ( ext_cmd ) # Now for all active schedulers, send the commands count_pushed_commands = 0 count_failed_commands = 0 for scheduler_link_uuid in self . schedulers : link = self . schedulers [ scheduler_link_uuid ] if not link . active : logger . debug ( "The scheduler '%s' is not active, it is not possible to push " "external commands to its connection!" , link . name ) continue # If there are some commands for this scheduler... commands = [ ext_cmd . cmd_line for ext_cmd in link . pushed_commands ] if not commands : logger . debug ( "The scheduler '%s' has no commands." , link . name ) continue logger . debug ( "Sending %d commands to scheduler %s" , len ( commands ) , link . name ) sent = [ ] try : sent = link . push_external_commands ( commands ) except LinkError : logger . warning ( "Scheduler connection failed, I could not push external commands!" ) # Whether we sent the commands or not, clean the scheduler list link . pushed_commands = [ ] # If we didn't sent them, add the commands to the arbiter list if sent : statsmgr . gauge ( 'external-commands.pushed.%s' % link . name , len ( commands ) ) count_pushed_commands = count_pushed_commands + len ( commands ) else : count_failed_commands = count_failed_commands + len ( commands ) statsmgr . gauge ( 'external-commands.failed.%s' % link . name , len ( commands ) ) # Kepp the not sent commands... for a next try self . external_commands . extend ( commands ) statsmgr . gauge ( 'external-commands.pushed.all' , count_pushed_commands ) statsmgr . gauge ( 'external-commands.failed.all' , count_failed_commands )
Push received external commands to the schedulers
683
9
19,554
def do_loop_turn ( self ) : # Begin to clean modules self . check_and_del_zombie_modules ( ) # Maybe the arbiter pushed a new configuration... if self . watch_for_new_conf ( timeout = 0.05 ) : logger . info ( "I got a new configuration..." ) # Manage the new configuration self . setup_new_conf ( ) # Maybe external modules raised 'objects' # we should get them _t0 = time . time ( ) self . get_objects_from_from_queues ( ) statsmgr . timer ( 'core.get-objects-from-queues' , time . time ( ) - _t0 ) # Get external commands from the arbiters... _t0 = time . time ( ) self . get_external_commands_from_arbiters ( ) statsmgr . timer ( 'external-commands.got.time' , time . time ( ) - _t0 ) statsmgr . gauge ( 'external-commands.got.count' , len ( self . unprocessed_external_commands ) ) _t0 = time . time ( ) self . push_external_commands_to_schedulers ( ) statsmgr . timer ( 'external-commands.pushed.time' , time . time ( ) - _t0 ) # Say to modules it's a new tick :) _t0 = time . time ( ) self . hook_point ( 'tick' ) statsmgr . timer ( 'hook.tick' , time . time ( ) - _t0 )
Receiver daemon main loop
346
5
19,555
def serialize ( self ) : res = super ( Check , self ) . serialize ( ) if 'depend_on' in res : del res [ 'depend_on' ] if 'depend_on_me' in res : del res [ 'depend_on_me' ] return res
This function serializes into a simple dict object .
62
10
19,556
def serialize ( self ) : # uuid is not in *_properties res = { 'uuid' : self . uuid } for prop in self . __class__ . properties : if not hasattr ( self , prop ) : continue res [ prop ] = getattr ( self , prop ) if isinstance ( self . __class__ . properties [ prop ] , SetProp ) : res [ prop ] = list ( getattr ( self , prop ) ) return res
This function serializes into a simple dictionary object .
99
10
19,557
def fill_default ( self ) : for prop , entry in self . __class__ . properties . items ( ) : if hasattr ( self , prop ) : continue if not hasattr ( entry , 'default' ) or entry . default is NONE_OBJECT : continue if hasattr ( entry . default , '__iter__' ) : setattr ( self , prop , copy ( entry . default ) ) else : setattr ( self , prop , entry . default )
Define the object properties with a default value when the property is not yet defined
100
16
19,558
def fill_predictive_missing_parameters ( self ) : if hasattr ( self , 'host_name' ) and not hasattr ( self , 'address' ) : self . address = self . host_name if hasattr ( self , 'host_name' ) and not hasattr ( self , 'alias' ) : self . alias = self . host_name if self . initial_state == 'd' : self . state = 'DOWN' elif self . initial_state == 'x' : self . state = 'UNREACHABLE'
Fill address with host_name if not already set and define state with initial_state
121
17
19,559
def get_groupnames ( self , hostgroups ) : group_names = [ ] for hostgroup_id in self . hostgroups : hostgroup = hostgroups [ hostgroup_id ] group_names . append ( hostgroup . get_name ( ) ) return ',' . join ( sorted ( group_names ) )
Get names of the host s hostgroups
68
8
19,560
def get_groupaliases ( self , hostgroups ) : group_aliases = [ ] for hostgroup_id in self . hostgroups : hostgroup = hostgroups [ hostgroup_id ] group_aliases . append ( hostgroup . alias ) return ',' . join ( sorted ( group_aliases ) )
Get aliases of the host s hostgroups
68
8
19,561
def set_state_from_exit_status ( self , status , notif_period , hosts , services ) : now = time . time ( ) # we should put in last_state the good last state: # if not just change the state by an problem/impact # we can take current state. But if it's the case, the # real old state is self.state_before_impact (it's the TRUE # state in fact) # And only if we enable the impact state change cls = self . __class__ if ( cls . enable_problem_impacts_states_change and self . is_impact and not self . state_changed_since_impact ) : self . last_state = self . state_before_impact else : self . last_state = self . state # There is no 1 case because it should have been managed by the caller for a host # like the schedulingitem::consume method. if status == 0 : self . state = u'UP' self . state_id = 0 self . last_time_up = int ( self . last_state_update ) # self.last_time_up = self.last_state_update state_code = 'u' elif status in ( 2 , 3 ) : self . state = u'DOWN' self . state_id = 1 self . last_time_down = int ( self . last_state_update ) # self.last_time_down = self.last_state_update state_code = 'd' elif status == 4 : self . state = u'UNREACHABLE' self . state_id = 4 self . last_time_unreachable = int ( self . last_state_update ) # self.last_time_unreachable = self.last_state_update state_code = 'x' else : self . state = u'DOWN' # exit code UNDETERMINED self . state_id = 1 # self.last_time_down = int(self.last_state_update) self . last_time_down = self . last_state_update state_code = 'd' if state_code in self . flap_detection_options : self . add_flapping_change ( self . state != self . last_state ) # Now we add a value, we update the is_flapping prop self . update_flapping ( notif_period , hosts , services ) if self . state != self . last_state and not ( self . state == "DOWN" and self . last_state == "UNREACHABLE" ) : self . last_state_change = self . last_state_update self . duration_sec = now - self . last_state_change
Set the state in UP DOWN or UNREACHABLE according to the status of a check result .
590
20
19,562
def is_state ( self , status ) : if status == self . state : return True # Now low status if status == 'o' and self . state == u'UP' : return True if status == 'd' and self . state == u'DOWN' : return True if status in [ 'u' , 'x' ] and self . state == u'UNREACHABLE' : return True return False
Return if status match the current host status
88
8
19,563
def last_time_non_ok_or_up ( self ) : non_ok_times = [ x for x in [ self . last_time_down ] if x > self . last_time_up ] if not non_ok_times : last_time_non_ok = 0 # todo: program_start would be better? else : last_time_non_ok = min ( non_ok_times ) return last_time_non_ok
Get the last time the host was in a non - OK state
101
13
19,564
def notification_is_blocked_by_contact ( self , notifways , timeperiods , notif , contact ) : return not contact . want_host_notification ( notifways , timeperiods , self . last_chk , self . state , notif . type , self . business_impact , notif . command_call )
Check if the notification is blocked by this contact .
76
10
19,565
def _tot_services_by_state ( self , services , state ) : return str ( sum ( 1 for s in self . services if services [ s ] . state_id == state ) )
Get the number of service in the specified state
43
9
19,566
def get_overall_state ( self , services ) : overall_state = 0 if not self . monitored : overall_state = 5 elif self . acknowledged : overall_state = 1 elif self . downtimed : overall_state = 2 elif self . state_type == 'HARD' : if self . state == 'UNREACHABLE' : overall_state = 3 elif self . state == 'DOWN' : overall_state = 4 # Only consider the hosts services state if all is ok (or almost...) if overall_state <= 2 : for service in self . services : if service in services : service = services [ service ] # Only for monitored services if service . overall_state_id < 5 : overall_state = max ( overall_state , service . overall_state_id ) return overall_state
Get the host overall state including the host self status and the status of its services
176
16
19,567
def linkify_h_by_h ( self ) : for host in self : # The new member list new_parents = [ ] for parent in getattr ( host , 'parents' , [ ] ) : parent = parent . strip ( ) o_parent = self . find_by_name ( parent ) if o_parent is not None : new_parents . append ( o_parent . uuid ) else : err = "the parent '%s' for the host '%s' is unknown!" % ( parent , host . get_name ( ) ) self . add_error ( err ) # We find the id, we replace the names host . parents = new_parents
Link hosts with their parents
145
5
19,568
def linkify_h_by_hg ( self , hostgroups ) : # Register host in the hostgroups for host in self : new_hostgroups = [ ] if hasattr ( host , 'hostgroups' ) and host . hostgroups != [ ] : hgs = [ n . strip ( ) for n in host . hostgroups if n . strip ( ) ] for hg_name in hgs : # TODO: should an unknown hostgroup raise an error ? hostgroup = hostgroups . find_by_name ( hg_name ) if hostgroup is not None : new_hostgroups . append ( hostgroup . uuid ) else : err = ( "the hostgroup '%s' of the host '%s' is " "unknown" % ( hg_name , host . host_name ) ) host . add_error ( err ) host . hostgroups = new_hostgroups
Link hosts with hostgroups
194
5
19,569
def apply_dependencies ( self ) : for host in self : for parent_id in getattr ( host , 'parents' , [ ] ) : if parent_id is None : continue parent = self [ parent_id ] if parent . active_checks_enabled : # Add parent in the list host . act_depend_of . append ( ( parent_id , [ 'd' , 'x' , 's' , 'f' ] , '' , True ) ) # Add child in the parent parent . act_depend_of_me . append ( ( host . uuid , [ 'd' , 'x' , 's' , 'f' ] , '' , True ) ) # And add the parent/child dep filling too, for broking parent . child_dependencies . add ( host . uuid ) host . parent_dependencies . add ( parent_id )
Loop on hosts and register dependency between parent and son
189
10
19,570
def find_hosts_that_use_template ( self , tpl_name ) : return [ h . host_name for h in self if tpl_name in h . tags if hasattr ( h , "host_name" ) ]
Find hosts that use the template defined in argument tpl_name
53
13
19,571
def is_me ( self ) : # pragma: no cover, seems not to be used anywhere logger . info ( "And arbiter is launched with the hostname:%s " "from an arbiter point of view of addr:%s" , self . host_name , socket . getfqdn ( ) ) return self . host_name == socket . getfqdn ( ) or self . host_name == socket . gethostname ( )
Check if parameter name if same than name of this object
98
11
19,572
def do_not_run ( self ) : logger . debug ( "[%s] do_not_run" , self . name ) try : self . con . get ( '_do_not_run' ) return True except HTTPClientConnectionException as exp : # pragma: no cover, simple protection self . add_failed_check_attempt ( "Connection error when " "sending do not run: %s" % str ( exp ) ) self . set_dead ( ) except HTTPClientTimeoutException as exp : # pragma: no cover, simple protection self . add_failed_check_attempt ( "Connection timeout when " "sending do not run: %s" % str ( exp ) ) except HTTPClientException as exp : self . add_failed_check_attempt ( "Error when " "sending do not run: %s" % str ( exp ) ) return False
Check if satellite running or not If not try to run
193
11
19,573
def get_broks ( self , broker_name ) : logger . debug ( "Broker %s requests my broks list" , broker_name ) res = [ ] if not broker_name : return res for broker_link in list ( self . brokers . values ( ) ) : if broker_name == broker_link . name : for brok in sorted ( broker_link . broks , key = lambda x : x . creation_time ) : # Only provide broks that did not yet sent to our external modules if getattr ( brok , 'sent_to_externals' , False ) : res . append ( brok ) brok . got = True broker_link . broks = [ b for b in broker_link . broks if not getattr ( b , 'got' , False ) ] logger . debug ( "Providing %d broks to %s" , len ( res ) , broker_name ) break else : logger . warning ( "Got a brok request from an unknown broker: %s" , broker_name ) return res
Send broks to a specific broker
229
7
19,574
def do_loop_turn ( self ) : if not self . first_scheduling : # Ok, now all is initialized, we can make the initial broks logger . info ( "First scheduling launched" ) _t0 = time . time ( ) # Program start brok self . sched . initial_program_status ( ) # First scheduling self . sched . schedule ( ) statsmgr . timer ( 'first_scheduling' , time . time ( ) - _t0 ) logger . info ( "First scheduling done" ) # Connect to our passive satellites if needed for satellite in [ s for s in list ( self . pollers . values ( ) ) if s . passive ] : if not self . daemon_connection_init ( satellite ) : logger . error ( "Passive satellite connection failed: %s" , satellite ) for satellite in [ s for s in list ( self . reactionners . values ( ) ) if s . passive ] : if not self . daemon_connection_init ( satellite ) : logger . error ( "Passive satellite connection failed: %s" , satellite ) # Ticks are for recurrent function call like consume, del zombies etc self . sched . ticks = 0 self . first_scheduling = True # Each loop turn, execute the daemon specific treatment... # only if the daemon has a configuration to manage if self . sched . pushed_conf : # If scheduling is not yet enabled, enable scheduling if not self . sched . must_schedule : self . sched . start_scheduling ( ) self . sched . before_run ( ) self . sched . run ( ) else : logger . warning ( "#%d - No monitoring configuration to scheduler..." , self . loop_count )
Scheduler loop turn
364
5
19,575
def get_managed_configurations ( self ) : # for scheduler_link in list(self.schedulers.values()): # res[scheduler_link.instance_id] = { # 'hash': scheduler_link.hash, # 'push_flavor': scheduler_link.push_flavor, # 'managed_conf_id': scheduler_link.managed_conf_id # } res = { } if self . sched . pushed_conf and self . cur_conf and 'instance_id' in self . cur_conf : res [ self . cur_conf [ 'instance_id' ] ] = { 'hash' : self . cur_conf [ 'hash' ] , 'push_flavor' : self . cur_conf [ 'push_flavor' ] , 'managed_conf_id' : self . cur_conf [ 'managed_conf_id' ] } logger . debug ( "Get managed configuration: %s" , res ) return res
Get the configurations managed by this scheduler
217
8
19,576
def clean_previous_run ( self ) : # Execute the base class treatment... super ( Alignak , self ) . clean_previous_run ( ) # Clean all lists self . pollers . clear ( ) self . reactionners . clear ( ) self . brokers . clear ( )
Clean variables from previous configuration
63
5
19,577
def get_monitoring_problems ( self ) : res = { } if not self . sched : return res # Get statistics from the scheduler scheduler_stats = self . sched . get_scheduler_stats ( details = True ) if 'livesynthesis' in scheduler_stats : res [ 'livesynthesis' ] = scheduler_stats [ 'livesynthesis' ] if 'problems' in scheduler_stats : res [ 'problems' ] = scheduler_stats [ 'problems' ] return res
Get the current scheduler livesynthesis
115
7
19,578
def merge_extinfo ( service , extinfo ) : properties = [ 'notes' , 'notes_url' , 'icon_image' , 'icon_image_alt' ] # service properties have precedence over serviceextinfo properties for prop in properties : if getattr ( service , prop ) == '' and getattr ( extinfo , prop ) != '' : setattr ( service , prop , getattr ( extinfo , prop ) )
Merge extended host information into a service
92
8
19,579
def get_command_and_args ( self ) : # First protect p_call = self . call . replace ( r'\!' , '___PROTECT_EXCLAMATION___' ) tab = p_call . split ( '!' ) return tab [ 0 ] . strip ( ) , [ s . replace ( '___PROTECT_EXCLAMATION___' , '!' ) for s in tab [ 1 : ] ]
r We want to get the command and the args with ! splitting . but don t forget to protect against the \ ! to avoid splitting on them
94
29
19,580
def get_a_satellite_link ( sat_type , sat_dict ) : cls = get_alignak_class ( 'alignak.objects.%slink.%sLink' % ( sat_type , sat_type . capitalize ( ) ) ) return cls ( params = sat_dict , parsing = False )
Get a SatelliteLink object for a given satellite type and a dictionary
72
13
19,581
def get_livestate ( self ) : livestate = 0 if self . active : if not self . reachable : livestate = 1 elif not self . alive : livestate = 2 else : livestate = 3 livestate_output = "%s/%s is %s" % ( self . type , self . name , [ "up and running." , "warning because not reachable." , "critical because not responding." , "not active by configuration." ] [ livestate ] ) return ( livestate , livestate_output )
Get the SatelliteLink live state .
114
7
19,582
def get_and_clear_context ( self ) : res = ( self . broks , self . actions , self . wait_homerun , self . pushed_commands ) self . broks = [ ] self . actions = { } self . wait_homerun = { } self . pushed_commands = [ ] return res
Get and clean all of our broks actions external commands and homerun
73
14
19,583
def prepare_for_conf ( self ) : logger . debug ( "- preparing: %s" , self ) self . cfg = { 'self_conf' : self . give_satellite_cfg ( ) , 'schedulers' : { } , 'arbiters' : { } } logger . debug ( "- prepared: %s" , self . cfg )
Initialize the pushed configuration dictionary with the inner properties that are to be propagated to the satellite link .
81
21
19,584
def give_satellite_cfg ( self ) : # All the satellite link class properties that are 'to_send' are stored in a # dictionary to be pushed to the satellite when the configuration is dispatched res = { } properties = self . __class__ . properties for prop , entry in list ( properties . items ( ) ) : if hasattr ( self , prop ) and entry . to_send : res [ prop ] = getattr ( self , prop ) return res
Get the default information for a satellite .
98
8
19,585
def give_satellite_json ( self ) : daemon_properties = [ 'type' , 'name' , 'uri' , 'spare' , 'configuration_sent' , 'realm_name' , 'manage_sub_realms' , 'active' , 'reachable' , 'alive' , 'passive' , 'last_check' , 'polling_interval' , 'max_check_attempts' ] ( livestate , livestate_output ) = self . get_livestate ( ) res = { "livestate" : livestate , "livestate_output" : livestate_output } for sat_prop in daemon_properties : res [ sat_prop ] = getattr ( self , sat_prop , 'not_yet_defined' ) return res
Get the json information for a satellite .
176
8
19,586
def manages ( self , cfg_part ) : logger . debug ( "Do I (%s/%s) manage: %s, my managed configuration(s): %s" , self . type , self . name , cfg_part , self . cfg_managed ) # If we do not yet manage a configuration if not self . cfg_managed : logger . info ( "I (%s/%s) do not manage (yet) any configuration!" , self . type , self . name ) return False # Check in the schedulers list configurations for managed_cfg in list ( self . cfg_managed . values ( ) ) : # If not even the cfg_id in the managed_conf, bail out if managed_cfg [ 'managed_conf_id' ] == cfg_part . instance_id and managed_cfg [ 'push_flavor' ] == cfg_part . push_flavor : logger . debug ( "I do manage this configuration: %s" , cfg_part ) break else : logger . warning ( "I (%s/%s) do not manage this configuration: %s" , self . type , self . name , cfg_part ) return False return True
Tell if the satellite is managing this configuration part
261
9
19,587
def set_alive ( self ) : was_alive = self . alive self . alive = True self . reachable = True self . attempt = 0 # We came from dead to alive! We must propagate the good news if not was_alive : logger . info ( "Setting %s satellite as alive :)" , self . name ) self . broks . append ( self . get_update_status_brok ( ) )
Set alive reachable and reset attempts . If we change state raise a status brok update
92
18
19,588
def add_failed_check_attempt ( self , reason = '' ) : self . reachable = False self . attempt = self . attempt + 1 logger . debug ( "Failed attempt for %s (%d/%d), reason: %s" , self . name , self . attempt , self . max_check_attempts , reason ) # Don't need to warn again and again if the satellite is already dead # Only warn when it is alive if self . alive : if not self . stopping : logger . warning ( "Add failed attempt for %s (%d/%d) - %s" , self . name , self . attempt , self . max_check_attempts , reason ) else : logger . info ( "Stopping... failed attempt for %s (%d/%d) - also probably stopping" , self . name , self . attempt , self . max_check_attempts ) # If we reached the maximum attempts, set the daemon as dead if self . attempt >= self . max_check_attempts : if not self . stopping : logger . warning ( "Set %s as dead, too much failed attempts (%d), last problem is: %s" , self . name , self . max_check_attempts , reason ) else : logger . info ( "Stopping... set %s as dead, too much failed attempts (%d)" , self . name , self . max_check_attempts ) self . set_dead ( )
Set the daemon as unreachable and add a failed attempt if we reach the maximum attempts set the daemon as dead
316
22
19,589
def valid_connection ( * outer_args , * * outer_kwargs ) : # pylint: disable=unused-argument, no-method-argument def decorator ( func ) : # pylint: disable=missing-docstring def decorated ( * args , * * kwargs ) : # pylint: disable=missing-docstring # outer_args and outer_kwargs are the decorator arguments # args and kwargs are the decorated function arguments link = args [ 0 ] if not link . con : raise LinkError ( "The connection is not created for %s" % link . name ) if not link . running_id : raise LinkError ( "The connection is not initialized for %s" % link . name ) return func ( * args , * * kwargs ) return decorated return decorator
Check if the daemon connection is established and valid
177
9
19,590
def communicate ( * outer_args , * * outer_kwargs ) : # pylint: disable=unused-argument, no-method-argument def decorator ( func ) : # pylint: disable=missing-docstring def decorated ( * args , * * kwargs ) : # pylint: disable=missing-docstring # outer_args and outer_kwargs are the decorator arguments # args and kwargs are the decorated function arguments fn_name = func . __name__ link = args [ 0 ] if not link . alive : logger . warning ( "%s is not alive for %s" , link . name , fn_name ) return None try : if not link . reachable : raise LinkError ( "The %s %s is not reachable" % ( link . type , link . name ) ) logger . debug ( "[%s] Calling: %s, %s, %s" , link . name , fn_name , args , kwargs ) return func ( * args , * * kwargs ) except HTTPClientConnectionException as exp : # A Connection error is raised when the daemon connection cannot be established # No way with the configuration parameters! if not link . stopping : logger . warning ( "A daemon (%s/%s) that we must be related with " "cannot be connected: %s" , link . type , link . name , exp ) else : logger . info ( "Stopping... daemon (%s/%s) cannot be connected. " "It is also probably stopping or yet stopped." , link . type , link . name ) link . set_dead ( ) except ( LinkError , HTTPClientTimeoutException ) as exp : link . add_failed_check_attempt ( "Connection timeout " "with '%s': %s" % ( fn_name , str ( exp ) ) ) return False except HTTPClientDataException as exp : # A Data error is raised when the daemon HTTP reponse is not 200! # No way with the communication if some problems exist in the daemon interface! # Abort all err = "Some daemons that we must be related with " "have some interface problems. Sorry, I bail out" logger . error ( err ) os . sys . exit ( err ) except HTTPClientException as exp : link . add_failed_check_attempt ( "Error with '%s': %s" % ( fn_name , str ( exp ) ) ) return None return decorated return decorator
Check if the daemon connection is authorized and valid
530
9
19,591
def stop_request ( self , stop_now = False ) : logger . debug ( "Sending stop request to %s, stop now: %s" , self . name , stop_now ) res = self . con . get ( 'stop_request' , { 'stop_now' : '1' if stop_now else '0' } ) return res
Send a stop request to the daemon
78
7
19,592
def update_infos ( self , forced = False , test = False ) : logger . debug ( "Update informations, forced: %s" , forced ) # First look if it's not too early to ping now = time . time ( ) if not forced and self . last_check and self . last_check + self . polling_interval > now : logger . debug ( "Too early to ping %s, ping period is %ds!, last check: %d, now: %d" , self . name , self . polling_interval , self . last_check , now ) return None self . get_conf ( test = test ) # Update the daemon last check timestamp self . last_check = time . time ( ) # Update the state of this element self . broks . append ( self . get_update_status_brok ( ) ) return self . cfg_managed
Update satellite info each self . polling_interval seconds so we smooth arbiter actions for just useful actions .
190
22
19,593
def push_actions ( self , actions , scheduler_instance_id ) : logger . debug ( "Pushing %d actions from %s" , len ( actions ) , scheduler_instance_id ) return self . con . post ( '_push_actions' , { 'actions' : actions , 'scheduler_instance_id' : scheduler_instance_id } , wait = True )
Post the actions to execute to the satellite . Indeed a scheduler post its checks to a poller and its actions to a reactionner .
87
28
19,594
def linkify ( self , modules ) : logger . debug ( "Linkify %s with %s" , self , modules ) self . linkify_s_by_module ( modules )
Link modules and Satellite links
40
5
19,595
def get_return_from ( self , notif ) : self . exit_status = notif . exit_status self . execution_time = notif . execution_time
Setter of exit_status and execution_time attributes
37
11
19,596
def get_initial_status_brok ( self ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'full_status' ) return Brok ( { 'type' : 'notification_raise' , 'data' : data } )
Get a initial status brok
68
6
19,597
def manage_brok ( self , brok ) : # Unserialize the brok before consuming it brok . prepare ( ) for module in self . modules_manager . get_internal_instances ( ) : try : _t0 = time . time ( ) module . manage_brok ( brok ) statsmgr . timer ( 'manage-broks.internal.%s' % module . get_name ( ) , time . time ( ) - _t0 ) except Exception as exp : # pylint: disable=broad-except logger . warning ( "The module %s raised an exception: %s, " "I'm tagging it to restart later" , module . get_name ( ) , str ( exp ) ) logger . exception ( exp ) self . modules_manager . set_to_restart ( module )
Get a brok . We put brok data to the modules
180
13
19,598
def get_internal_broks ( self ) : statsmgr . gauge ( 'get-new-broks-count.broker' , len ( self . internal_broks ) ) # Add the broks to our global list self . external_broks . extend ( self . internal_broks ) self . internal_broks = [ ]
Get all broks from self . broks_internal_raised and append them to our broks to manage
75
22
19,599
def get_arbiter_broks ( self ) : with self . arbiter_broks_lock : statsmgr . gauge ( 'get-new-broks-count.arbiter' , len ( self . arbiter_broks ) ) # Add the broks to our global list self . external_broks . extend ( self . arbiter_broks ) self . arbiter_broks = [ ]
Get the broks from the arbiters but as the arbiter_broks list can be push by arbiter without Global lock we must protect this with a lock
92
33