idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
19,600 | def linkify_sd_by_tp ( self , timeperiods ) : for servicedep in self : try : tp_name = servicedep . dependency_period timeperiod = timeperiods . find_by_name ( tp_name ) if timeperiod : servicedep . dependency_period = timeperiod . uuid else : servicedep . dependency_period = '' except AttributeError as exp : logger . error ( "[servicedependency] fail to linkify by timeperiods: %s" , exp ) | Replace dependency_period by a real object in service dependency |
19,601 | def linkify_s_by_sd ( self , services ) : for servicedep in self : setattr ( servicedep , "service_description_string" , "undefined" ) setattr ( servicedep , "dependent_service_description_string" , "undefined" ) if getattr ( servicedep , 'service_description' , None ) is None or getattr ( servicedep , 'dependent_service_description' , None ) is None : continue services . add_act_dependency ( servicedep . dependent_service_description , servicedep . service_description , servicedep . notification_failure_criteria , getattr ( servicedep , 'dependency_period' , '' ) , servicedep . inherits_parent ) services . add_chk_dependency ( servicedep . dependent_service_description , servicedep . service_description , servicedep . execution_failure_criteria , getattr ( servicedep , 'dependency_period' , '' ) , servicedep . inherits_parent ) setattr ( servicedep , "service_description_string" , services [ servicedep . service_description ] . get_name ( ) ) setattr ( servicedep , "dependent_service_description_string" , services [ servicedep . dependent_service_description ] . get_name ( ) ) | Add dependency in service objects |
19,602 | def init ( self ) : if not self . enabled : logger . info ( " the module is disabled." ) return True try : connections = self . test_connection ( ) except Exception as exp : logger . error ( "initialization, test connection failed. Error: %s" , str ( exp ) ) if self . influxdb_enabled : try : dbs = self . influx . get_list_database ( ) for db in dbs : if db . get ( 'name' ) == self . influxdb_database : logger . info ( "the database %s is existing." , self . influxdb_database ) break else : logger . info ( "creating database %s..." , self . influxdb_database ) self . influx . create_database ( self . influxdb_database ) if self . influxdb_retention_name : rps = self . influx . get_list_retention_policies ( ) for rp in rps : if rp . get ( 'name' ) == self . influxdb_retention_name : logger . info ( "the retention policy %s is existing." , self . influxdb_retention_name ) break else : logger . info ( "creating database retention policy: %s - %s - %s..." , self . influxdb_retention_name , self . influxdb_retention_duration , self . influxdb_retention_replication ) self . influx . create_retention_policy ( self . influxdb_retention_name , self . influxdb_retention_duration , self . influxdb_retention_replication , database = self . influxdb_database ) if self . influxdb_username : users = self . influx . get_list_users ( ) for user in users : if user . get ( 'user' ) == self . influxdb_username : logger . info ( "the user %s is existing." , self . influxdb_username ) break else : logger . info ( "creating user: %s..." , self . influxdb_username ) self . influx . create_user ( self . influxdb_username , self . influxdb_password , admin = False ) connections = connections or True except Exception as exp : logger . error ( "InfluxDB, DB initialization failed. Error: %s" , str ( exp ) ) return connections | Called by the daemon broker to initialize the module |
19,603 | def get_metrics_from_perfdata ( self , service , perf_data ) : result = [ ] metrics = PerfDatas ( perf_data ) for metric in metrics : logger . debug ( "service: %s, metric: %s (%s)" , service , metric , metric . __dict__ ) if metric . name in [ 'time' ] : metric . name = "duration" name = sanitize_name ( metric . name ) name = self . multiple_values . sub ( r'.\1' , name ) if not name : continue name_value = { name : metric . value , 'uom_' + name : metric . uom } if metric . warning and self . send_warning : name_value [ name + '_warn' ] = metric . warning if metric . critical and self . send_critical : name_value [ name + '_crit' ] = metric . critical if metric . min and self . send_min : name_value [ name + '_min' ] = metric . min if metric . max and self . send_max : name_value [ name + '_max' ] = metric . max for key , value in name_value . items ( ) : result . append ( ( key , value , metric . uom ) ) logger . debug ( "Metrics: %s - %s" , service , result ) return result | Decode the performance data to build a metrics list |
19,604 | def send_to_tsdb ( self , realm , host , service , metrics , ts , path ) : if ts is None : ts = int ( time . time ( ) ) data = { "measurement" : service , "tags" : { "host" : host , "service" : service , "realm" : '.' . join ( realm ) if isinstance ( realm , list ) else realm , "path" : path } , "time" : ts , "fields" : { } } if path is not None : data [ 'tags' ] . update ( { "path" : path } ) for metric , value , _ in metrics : data [ 'fields' ] . update ( { metric : value } ) logger . debug ( "Data: %s" , data ) self . my_metrics . append ( data ) if self . metrics_count >= self . metrics_flush_count : self . flush ( ) | Send performance data to time series database |
19,605 | def manage_initial_service_status_brok ( self , b ) : host_name = b . data [ 'host_name' ] service_description = b . data [ 'service_description' ] service_id = host_name + "/" + service_description logger . debug ( "got initial service status: %s" , service_id ) if host_name not in self . hosts_cache : logger . error ( "initial service status, host is unknown: %s." , service_id ) return self . services_cache [ service_id ] = { } if 'customs' in b . data : self . services_cache [ service_id ] [ '_GRAPHITE_POST' ] = sanitize_name ( b . data [ 'customs' ] . get ( '_GRAPHITE_POST' , None ) ) logger . debug ( "initial service status received: %s" , service_id ) | Prepare the known services cache |
19,606 | def manage_initial_host_status_brok ( self , b ) : host_name = b . data [ 'host_name' ] logger . debug ( "got initial host status: %s" , host_name ) self . hosts_cache [ host_name ] = { 'realm_name' : sanitize_name ( b . data . get ( 'realm_name' , b . data . get ( 'realm' , 'All' ) ) ) , } if 'customs' in b . data : self . hosts_cache [ host_name ] [ '_GRAPHITE_PRE' ] = sanitize_name ( b . data [ 'customs' ] . get ( '_GRAPHITE_PRE' , None ) ) self . hosts_cache [ host_name ] [ '_GRAPHITE_GROUP' ] = sanitize_name ( b . data [ 'customs' ] . get ( '_GRAPHITE_GROUP' , None ) ) logger . debug ( "initial host status received: %s" , host_name ) | Prepare the known hosts cache |
19,607 | def manage_service_check_result_brok ( self , b ) : host_name = b . data . get ( 'host_name' , None ) service_description = b . data . get ( 'service_description' , None ) if not host_name or not service_description : return service_id = host_name + "/" + service_description logger . debug ( "service check result: %s" , service_id ) if not self . ignore_unknown and host_name not in self . hosts_cache : logger . warning ( "received service check result for an unknown host: %s" , service_id ) return if service_id not in self . services_cache and not self . ignore_unknown : logger . warning ( "received service check result for an unknown service: %s" , service_id ) return metrics = self . get_metrics_from_perfdata ( service_description , b . data [ 'perf_data' ] ) if not metrics : logger . debug ( "no metrics to send ..." ) return if self . ignore_latency_limit >= b . data [ 'latency' ] > 0 : check_time = int ( b . data [ 'last_chk' ] ) - int ( b . data [ 'latency' ] ) else : check_time = int ( b . data [ 'last_chk' ] ) hname = sanitize_name ( host_name ) if host_name in self . hosts_cache : if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' ) , hname ) ) if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' ) , hname ) ) desc = sanitize_name ( service_description ) if service_id in self . services_cache : if self . services_cache [ service_id ] . get ( '_GRAPHITE_POST' , None ) : desc = "." . join ( ( desc , self . services_cache [ service_id ] . get ( '_GRAPHITE_POST' , None ) ) ) if self . graphite_data_source : path = '.' . join ( ( hname , self . graphite_data_source , desc ) ) else : path = '.' . join ( ( hname , desc ) ) if self . realms_prefix and self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) : path = '.' . join ( ( self . hosts_cache [ host_name ] . get ( 'realm_name' ) , path ) ) realm_name = None if host_name in self . hosts_cache : realm_name = self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) self . send_to_tsdb ( realm_name , host_name , service_description , metrics , check_time , path ) | A service check result brok has just arrived ... |
19,608 | def manage_host_check_result_brok ( self , b ) : host_name = b . data . get ( 'host_name' , None ) if not host_name : return logger . debug ( "host check result: %s" , host_name ) if host_name not in self . hosts_cache and not self . ignore_unknown : logger . warning ( "received host check result for an unknown host: %s" , host_name ) return metrics = self . get_metrics_from_perfdata ( 'host_check' , b . data [ 'perf_data' ] ) if not metrics : logger . debug ( "no metrics to send ..." ) return if self . ignore_latency_limit >= b . data [ 'latency' ] > 0 : check_time = int ( b . data [ 'last_chk' ] ) - int ( b . data [ 'latency' ] ) else : check_time = int ( b . data [ 'last_chk' ] ) hname = sanitize_name ( host_name ) if host_name in self . hosts_cache : if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_GROUP' ) , hname ) ) if self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' , None ) : hname = "." . join ( ( self . hosts_cache [ host_name ] . get ( '_GRAPHITE_PRE' ) , hname ) ) if self . graphite_data_source : path = '.' . join ( ( hname , self . graphite_data_source ) ) if self . hostcheck : path = '.' . join ( ( hname , self . graphite_data_source , self . hostcheck ) ) else : path = '.' . join ( ( hname , self . hostcheck ) ) if self . realms_prefix and self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) : path = '.' . join ( ( self . hosts_cache [ host_name ] . get ( 'realm_name' ) , path ) ) realm_name = None if host_name in self . hosts_cache : realm_name = self . hosts_cache [ host_name ] . get ( 'realm_name' , None ) self . send_to_tsdb ( realm_name , host_name , self . hostcheck , metrics , check_time , path ) | An host check result brok has just arrived ... |
19,609 | def get_comment_brok ( self , host_name , service_name = '' ) : data = self . serialize ( ) data [ 'host' ] = host_name if service_name : data [ 'service' ] = service_name return Brok ( { 'type' : 'comment' , 'data' : data } ) | Get a comment brok |
19,610 | def new_inner_member ( self , name , params ) : params [ 'notificationway_name' ] = name self . add_item ( NotificationWay ( params ) ) | Create new instance of NotificationWay with given name and parameters and add it to the item list |
19,611 | def serialize ( obj , no_dump = False ) : if hasattr ( obj , "serialize" ) and isinstance ( obj . serialize , collections . Callable ) : o_dict = { '__sys_python_module__' : "%s.%s" % ( obj . __class__ . __module__ , obj . __class__ . __name__ ) , 'content' : obj . serialize ( ) } elif isinstance ( obj , dict ) : o_dict = { } for key , value in list ( obj . items ( ) ) : o_dict [ key ] = serialize ( value , True ) elif isinstance ( obj , ( list , set ) ) : o_dict = [ serialize ( item , True ) for item in obj ] else : o_dict = obj if no_dump : return o_dict result = None try : result = json . dumps ( o_dict , ensure_ascii = False ) except MemoryError : return { '_error' : 'Not enough memory on this computer to correctly manage Alignak ' 'objects serialization! ' 'Sorry for this, please log an issue in the project repository.' } return result | Serialize an object . |
19,612 | def get_event ( self ) : self . prepare ( ) return ( self . creation_time , self . data [ 'level' ] , self . data [ 'message' ] ) | This function returns an Event from a Brok |
19,613 | def prepare ( self ) : if hasattr ( self , 'prepared' ) and not self . prepared : self . data = unserialize ( self . data ) if self . instance_id : self . data [ 'instance_id' ] = self . instance_id self . prepared = True | Un - serialize data from data attribute and add instance_id key if necessary |
19,614 | def resolve_elements ( self ) : if self . leaf : if not self . content : return set ( ) return set ( self . content ) not_nodes = [ s for s in self . sons if s . not_value ] positiv_nodes = [ s for s in self . sons if not s . not_value ] if not self . operand : self . operand = '|' res = set ( ) i = 0 for node in positiv_nodes : node_members = node . resolve_elements ( ) if self . operand == '|' : res = res . union ( node_members ) elif self . operand == '&' : if i == 0 : res = node_members else : res = res . intersection ( node_members ) i += 1 for node in not_nodes : node_members = node . resolve_elements ( ) res = res . difference ( node_members ) return res | Get element of this node recursively Compute rules with OR or AND rule then NOT rules . |
19,615 | def eval_cor_pattern ( self , pattern ) : pattern = pattern . strip ( ) complex_node = False for char in '()+&|,' : if char in pattern : complex_node = True node = ComplexExpressionNode ( ) if not complex_node : if pattern . startswith ( '!' ) : node . not_value = True pattern = pattern [ 1 : ] node . operand = self . ctx node . leaf = True obj , error = self . find_object ( pattern ) if obj is not None : node . content = obj else : node . configuration_errors . append ( error ) return node in_par = False tmp = '' stacked_par = 0 for char in pattern : if char in ( ',' , '|' ) : if in_par : tmp += char else : tmp = tmp . strip ( ) node . operand = '|' if tmp != '' : son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) tmp = '' elif char in ( '&' , '+' ) : if in_par : tmp += char else : tmp = tmp . strip ( ) node . operand = '&' if tmp != '' : son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) tmp = '' elif char == '(' : stacked_par += 1 in_par = True tmp = tmp . strip ( ) if stacked_par == 1 and tmp != '' : print ( "ERROR : bad expression near" , tmp ) continue if stacked_par > 1 : tmp += char elif char == ')' : stacked_par -= 1 if stacked_par < 0 : print ( "Error : bad expression near" , tmp , "too much ')'" ) continue if stacked_par == 0 : tmp = tmp . strip ( ) son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) in_par = False tmp = '' continue tmp += char else : tmp += char tmp = tmp . strip ( ) if tmp != '' : son = self . eval_cor_pattern ( tmp ) node . sons . append ( son ) return node | Parse and build recursively a tree of ComplexExpressionNode from pattern |
19,616 | def find_object ( self , pattern ) : obj = None error = None pattern = pattern . strip ( ) if pattern == '*' : obj = [ h . host_name for h in list ( self . all_elements . items . values ( ) ) if getattr ( h , 'host_name' , '' ) != '' and not h . is_tpl ( ) ] return obj , error if self . ctx == 'hostgroups' : hgr = self . grps . find_by_name ( pattern ) if not hgr : error = "Error : cannot find the %s of the expression '%s'" % ( self . ctx , pattern ) return hgr , error elts = hgr . get_hosts ( ) elts = strip_and_uniq ( elts ) if '*' in elts : elts . extend ( [ h . host_name for h in list ( self . all_elements . items . values ( ) ) if getattr ( h , 'host_name' , '' ) != '' and not h . is_tpl ( ) ] ) elts . remove ( '*' ) return elts , error obj = self . grps . find_hosts_that_use_template ( pattern ) return obj , error | Get a list of host corresponding to the pattern regarding the context |
19,617 | def all_my_hosts_and_services ( self ) : for what in ( self . hosts , self . services ) : for item in what : yield item | Create an iterator for all my known hosts and services |
19,618 | def load_conf ( self , instance_id , instance_name , conf ) : self . pushed_conf = conf logger . info ( "loading my configuration (%s / %s):" , instance_id , self . pushed_conf . instance_id ) logger . debug ( "Properties:" ) for key in sorted ( self . pushed_conf . properties ) : logger . debug ( "- %s: %s" , key , getattr ( self . pushed_conf , key , [ ] ) ) logger . debug ( "Macros:" ) for key in sorted ( self . pushed_conf . macros ) : logger . debug ( "- %s: %s" , key , getattr ( self . pushed_conf . macros , key , [ ] ) ) logger . debug ( "Objects types:" ) for _ , _ , strclss , _ , _ in list ( self . pushed_conf . types_creations . values ( ) ) : if strclss in [ 'arbiters' , 'schedulers' , 'brokers' , 'pollers' , 'reactionners' , 'receivers' ] : continue setattr ( self , strclss , getattr ( self . pushed_conf , strclss , [ ] ) ) logger . debug ( "- %d %s" , len ( getattr ( self , strclss ) ) , strclss ) statsmgr . gauge ( 'configuration.%s' % strclss , len ( getattr ( self , strclss ) ) ) self . services . optimize_service_search ( self . hosts ) self . alignak_name = self . pushed_conf . alignak_name self . instance_id = instance_id self . instance_name = instance_name self . push_flavor = getattr ( self . pushed_conf , 'push_flavor' , 'None' ) logger . info ( "Set my scheduler instance: %s - %s - %s" , self . instance_id , self . instance_name , self . push_flavor ) for item in self . all_my_hosts_and_services ( ) : item . instance_id = self . instance_id | Load configuration received from Arbiter and pushed by our Scheduler daemon |
19,619 | def update_recurrent_works_tick ( self , conf ) : for key in self . recurrent_works : ( name , fun , _ ) = self . recurrent_works [ key ] if isinstance ( conf , dict ) : new_tick = conf . get ( 'tick_%s' % name , None ) else : new_tick = getattr ( conf , 'tick_%s' % name , None ) if new_tick is not None : logger . debug ( "Requesting to change the default tick to %d for the action %s" , int ( new_tick ) , name ) else : continue try : new_tick = int ( new_tick ) logger . info ( "Changing the default tick to %d for the action %s" , new_tick , name ) self . recurrent_works [ key ] = ( name , fun , new_tick ) except ValueError : logger . warning ( "Changing the default tick for '%s' to '%s' failed!" , new_tick , name ) | Modify the tick value for the scheduler recurrent work |
19,620 | def dump_config ( self ) : path = os . path . join ( tempfile . gettempdir ( ) , 'dump-cfg-scheduler-%s-%d.json' % ( self . name , int ( time . time ( ) ) ) ) try : self . pushed_conf . dump ( path ) except ( OSError , IndexError ) as exp : logger . critical ( "Error when writing the configuration dump file %s: %s" , path , str ( exp ) ) | Dump scheduler configuration into a temporary file |
19,621 | def add_notification ( self , notification ) : if notification . uuid in self . actions : logger . warning ( "Already existing notification: %s" , notification ) return logger . debug ( "Adding a notification: %s" , notification ) self . actions [ notification . uuid ] = notification self . nb_notifications += 1 if notification . contact is not None : self . add ( notification . get_initial_status_brok ( ) ) | Add a notification into actions list |
19,622 | def add_check ( self , check ) : if check is None : return if check . uuid in self . checks : logger . debug ( "Already existing check: %s" , check ) return logger . debug ( "Adding a check: %s" , check ) self . checks [ check . uuid ] = check self . nb_checks += 1 item = self . find_item_by_id ( check . ref ) if item . active_checks_enabled : self . add ( item . get_next_schedule_brok ( ) ) | Add a check into the scheduler checks list |
19,623 | def add_event_handler ( self , action ) : if action . uuid in self . actions : logger . info ( "Already existing event handler: %s" , action ) return self . actions [ action . uuid ] = action self . nb_event_handlers += 1 | Add a event handler into actions list |
19,624 | def hook_point ( self , hook_name ) : self . my_daemon . hook_point ( hook_name = hook_name , handle = self ) | Generic function to call modules methods if such method is avalaible |
19,625 | def clean_queues ( self ) : if getattr ( self . pushed_conf , 'tick_clean_queues' , 0 ) == 0 : logger . debug ( "No queues cleaning..." ) return max_checks = MULTIPLIER_MAX_CHECKS * ( len ( self . hosts ) + len ( self . services ) ) max_broks = MULTIPLIER_MAX_BROKS * ( len ( self . hosts ) + len ( self . services ) ) max_actions = MULTIPLIER_MAX_ACTIONS * len ( self . contacts ) * ( len ( self . hosts ) + len ( self . services ) ) self . nb_checks_dropped = 0 if max_checks and len ( self . checks ) > max_checks : to_del_checks = [ c for c in list ( self . checks . values ( ) ) ] to_del_checks . sort ( key = lambda x : x . creation_time ) to_del_checks = to_del_checks [ : - max_checks ] self . nb_checks_dropped = len ( to_del_checks ) if to_del_checks : logger . warning ( "I have to drop some checks (%d)..., sorry :(" , self . nb_checks_dropped ) for chk in to_del_checks : c_id = chk . uuid items = getattr ( self , chk . ref_type + 's' ) elt = items [ chk . ref ] elt . remove_in_progress_check ( chk ) for dependent_checks in chk . depend_on_me : dependent_checks . depend_on . remove ( chk . uuid ) for c_temp in chk . depend_on : c_temp . depend_on_me . remove ( chk ) del self . checks [ c_id ] self . nb_broks_dropped = 0 for broker_link in list ( self . my_daemon . brokers . values ( ) ) : if max_broks and len ( broker_link . broks ) > max_broks : logger . warning ( "I have to drop some broks (%d > %d) for the broker %s " "..., sorry :(" , len ( broker_link . broks ) , max_broks , broker_link ) kept_broks = sorted ( broker_link . broks , key = lambda x : x . creation_time ) broker_link . broks = kept_broks [ 0 : max_broks ] self . nb_actions_dropped = 0 if max_actions and len ( self . actions ) > max_actions : logger . warning ( "I have to del some actions (currently: %d, max: %d)..., sorry :(" , len ( self . actions ) , max_actions ) to_del_actions = [ c for c in list ( self . actions . values ( ) ) ] to_del_actions . sort ( key = lambda x : x . creation_time ) to_del_actions = to_del_actions [ : - max_actions ] self . nb_actions_dropped = len ( to_del_actions ) for act in to_del_actions : if act . is_a == 'notification' : self . find_item_by_id ( act . ref ) . remove_in_progress_notification ( act ) del self . actions [ act . uuid ] | Reduces internal list size to max allowed |
19,626 | def update_business_values ( self ) : for elt in self . all_my_hosts_and_services ( ) : if not elt . is_problem : was = elt . business_impact elt . update_business_impact_value ( self . hosts , self . services , self . timeperiods , self . businessimpactmodulations ) new = elt . business_impact if new != was : self . get_and_register_status_brok ( elt ) for elt in self . all_my_hosts_and_services ( ) : if elt . is_problem : was = elt . business_impact elt . update_business_impact_value ( self . hosts , self . services , self . timeperiods , self . businessimpactmodulations ) new = elt . business_impact if new != was : self . get_and_register_status_brok ( elt ) | Iter over host and service and update business_impact |
19,627 | def scatter_master_notifications ( self ) : now = time . time ( ) notifications = [ a for a in self . actions . values ( ) if a . is_a == u'notification' and a . status == ACT_STATUS_SCHEDULED and not a . contact and a . is_launchable ( now ) ] if notifications : logger . debug ( "Scatter master notification: %d notifications" , len ( notifications ) ) for notification in notifications : logger . debug ( "Scheduler got a master notification: %s" , notification ) item = self . find_item_by_id ( notification . ref ) children = [ ] notification_period = None if getattr ( item , 'notification_period' , None ) is not None : notification_period = self . timeperiods [ item . notification_period ] if not item . is_blocking_notifications ( notification_period , self . hosts , self . services , notification . type , now ) : children = item . scatter_notification ( notification , self . contacts , self . notificationways , self . timeperiods , self . macromodulations , self . escalations , self . find_item_by_id ( getattr ( item , "host" , None ) ) ) for notif in children : logger . debug ( " - child notification: %s" , notif ) notif . status = ACT_STATUS_SCHEDULED self . add ( notif ) if notification . type == u'PROBLEM' : if children : item . current_notification_number = notification . notif_nb if item . notification_interval and notification . t_to_go is not None : notification . t_to_go = item . get_next_notification_time ( notification , self . escalations , self . timeperiods ) notification . notif_nb = item . current_notification_number + 1 logger . debug ( "Repeat master notification: %s" , notification ) else : item . remove_in_progress_notification ( notification ) logger . debug ( "Remove master notification (no repeat): %s" , notification ) else : logger . debug ( "Remove master notification (no more a problem): %s" , notification ) item . remove_in_progress_notification ( notification ) | Generate children notifications from a master notification Also update notification number |
19,628 | def manage_internal_checks ( self ) : if os . getenv ( 'ALIGNAK_MANAGE_INTERNAL' , '1' ) != '1' : return now = time . time ( ) for chk in list ( self . checks . values ( ) ) : if not chk . internal : continue if not chk . is_launchable ( now ) or chk . status not in [ ACT_STATUS_SCHEDULED ] : continue item = self . find_item_by_id ( chk . ref ) if not item or not item . active_checks_enabled : chk . status = ACT_STATUS_ZOMBIE continue logger . debug ( "Run internal check for %s" , item ) self . nb_internal_checks += 1 item . manage_internal_check ( self . hosts , self . services , chk , self . hostgroups , self . servicegroups , self . macromodulations , self . timeperiods ) chk . status = ACT_STATUS_WAIT_CONSUME | Run internal checks |
19,629 | def reset_topology_change_flag ( self ) : for i in self . hosts : i . topology_change = False for i in self . services : i . topology_change = False | Set topology_change attribute to False in all hosts and services |
19,630 | def log_initial_states ( self ) : for elt in self . hosts : elt . raise_initial_state ( ) for elt in self . services : elt . raise_initial_state ( ) | Raise hosts and services initial status logs |
19,631 | def get_retention_data ( self ) : retention_data = { 'hosts' : { } , 'services' : { } } for host in self . hosts : h_dict = { } properties = host . __class__ . properties properties . update ( host . __class__ . running_properties ) for prop , entry in list ( properties . items ( ) ) : if not entry . retention : continue val = getattr ( host , prop ) prepare_retention = entry . retention_preparation if prepare_retention : val = prepare_retention ( host , val ) h_dict [ prop ] = val retention_data [ 'hosts' ] [ host . host_name ] = h_dict logger . info ( '%d hosts sent to retention' , len ( retention_data [ 'hosts' ] ) ) for service in self . services : s_dict = { } properties = service . __class__ . properties properties . update ( service . __class__ . running_properties ) for prop , entry in list ( properties . items ( ) ) : if not entry . retention : continue val = getattr ( service , prop ) prepare_retention = entry . retention_preparation if prepare_retention : val = prepare_retention ( service , val ) s_dict [ prop ] = val retention_data [ 'services' ] [ ( service . host_name , service . service_description ) ] = s_dict logger . info ( '%d services sent to retention' , len ( retention_data [ 'services' ] ) ) return retention_data | Get all hosts and services data to be sent to the retention storage . |
19,632 | def restore_retention_data ( self , data ) : if 'hosts' not in data : logger . warning ( "Retention data are not correct, no 'hosts' property!" ) return for host_name in data [ 'hosts' ] : host = self . hosts . find_by_name ( host_name ) if host is not None : self . restore_retention_data_item ( data [ 'hosts' ] [ host_name ] , host ) statsmgr . gauge ( 'retention.hosts' , len ( data [ 'hosts' ] ) ) logger . info ( '%d hosts restored from retention' , len ( data [ 'hosts' ] ) ) for ( host_name , service_description ) in data [ 'services' ] : service = self . services . find_srv_by_name_and_hostname ( host_name , service_description ) if service is not None : self . restore_retention_data_item ( data [ 'services' ] [ ( host_name , service_description ) ] , service ) statsmgr . gauge ( 'retention.services' , len ( data [ 'services' ] ) ) logger . info ( '%d services restored from retention' , len ( data [ 'services' ] ) ) | Restore retention data |
19,633 | def restore_retention_data_item ( self , data , item ) : properties = item . __class__ . properties properties . update ( item . __class__ . running_properties ) for prop , entry in list ( properties . items ( ) ) : if not entry . retention : continue if prop not in data : continue restore_retention = entry . retention_restoration if restore_retention : setattr ( item , prop , restore_retention ( item , data [ prop ] ) ) else : setattr ( item , prop , data [ prop ] ) for notification_uuid in item . notifications_in_progress : notification = item . notifications_in_progress [ notification_uuid ] notification [ 'ref' ] = item . uuid my_notification = Notification ( params = notification ) item . notifications_in_progress [ notification_uuid ] = my_notification self . add ( my_notification ) item . update_in_checking ( ) for downtime_uuid in data [ 'downtimes' ] : downtime = data [ 'downtimes' ] [ downtime_uuid ] downtime [ 'ref' ] = item . uuid my_downtime = Downtime ( params = downtime ) if downtime [ 'comment_id' ] : if downtime [ 'comment_id' ] not in data [ 'comments' ] : downtime [ 'comment_id' ] = '' if 'uuid' in downtime [ 'comment_id' ] : data [ 'comments' ] . append ( downtime [ 'comment_id' ] ) downtime [ 'comment_id' ] = downtime [ 'comment_id' ] [ 'uuid' ] item . add_downtime ( my_downtime ) for comment_uuid in data [ 'comments' ] : comment = data [ 'comments' ] [ comment_uuid ] comment [ 'ref' ] = item . uuid item . add_comment ( Comment ( comment ) ) if item . acknowledgement is not None : item . acknowledgement [ 'ref' ] = item . uuid item . acknowledgement = Acknowledge ( item . acknowledgement ) new_notified_contacts = set ( ) new_notified_contacts_ids = set ( ) for contact_name in item . notified_contacts : contact = self . contacts . find_by_name ( contact_name ) if contact is not None : new_notified_contacts . add ( contact_name ) new_notified_contacts_ids . add ( contact . uuid ) item . notified_contacts = new_notified_contacts item . notified_contacts_ids = new_notified_contacts_ids | Restore data in item |
19,634 | def fill_initial_broks ( self , broker_name ) : broker_uuid = None logger . debug ( "My brokers: %s" , self . my_daemon . brokers ) for broker_link in list ( self . my_daemon . brokers . values ( ) ) : logger . debug ( "Searching broker: %s" , broker_link ) if broker_name == broker_link . name : broker_uuid = broker_link . uuid logger . info ( "Filling initial broks for: %s (%s)" , broker_name , broker_uuid ) break else : if self . pushed_conf : logger . error ( "Requested initial broks for an unknown broker: %s" , broker_name ) else : logger . info ( "Requested initial broks for an unknown broker: %s" , broker_name ) return 0 if self . my_daemon . brokers [ broker_uuid ] . initialized : logger . warning ( "The broker %s still got its initial broks..." , broker_name ) return 0 initial_broks_count = len ( self . my_daemon . brokers [ broker_uuid ] . broks ) brok = self . get_program_status_brok ( ) self . add_brok ( brok , broker_uuid ) initial_status_types = ( self . timeperiods , self . commands , self . contacts , self . contactgroups , self . hosts , self . hostgroups , self . services , self . servicegroups ) self . pushed_conf . skip_initial_broks = getattr ( self . pushed_conf , 'skip_initial_broks' , False ) logger . debug ( "Skipping initial broks? %s" , str ( self . pushed_conf . skip_initial_broks ) ) if not self . pushed_conf . skip_initial_broks : initial_status_types = ( self . realms , self . timeperiods , self . commands , self . notificationways , self . contacts , self . contactgroups , self . hosts , self . hostgroups , self . hostdependencies , self . services , self . servicegroups , self . servicedependencies , self . escalations ) for tab in initial_status_types : for item in tab : member_items = None if hasattr ( item , 'members' ) : member_items = getattr ( self , item . my_type . replace ( "group" , "s" ) ) brok = item . get_initial_status_brok ( member_items ) self . add_brok ( brok , broker_uuid ) brok = Brok ( { 'type' : 'initial_broks_done' , 'data' : { 'instance_id' : self . instance_id } } ) self . add_brok ( brok , broker_uuid ) final_broks_count = len ( self . my_daemon . brokers [ broker_uuid ] . broks ) self . my_daemon . brokers [ broker_uuid ] . initialized = True self . send_broks_to_modules ( ) self . raised_initial_broks = True logger . info ( "Created %d initial broks for %s" , final_broks_count - initial_broks_count , broker_name ) return final_broks_count - initial_broks_count | Create initial broks for a specific broker |
19,635 | def get_program_status_brok ( self , brok_type = 'program_status' ) : data = { "is_running" : True , "instance_id" : self . instance_id , "instance_name" : self . name , "last_alive" : time . time ( ) , "pid" : os . getpid ( ) , '_running' : self . get_scheduler_stats ( details = True ) , '_config' : { } , '_macros' : { } } cls = self . pushed_conf . __class__ for prop , entry in list ( cls . properties . items ( ) ) : if 'full_status' not in entry . fill_brok : continue data [ '_config' ] [ prop ] = self . pushed_conf . get_property_value_for_brok ( prop , cls . properties ) macro_resolver = MacroResolver ( ) macro_resolver . init ( self . pushed_conf ) for macro_name in sorted ( self . pushed_conf . macros ) : data [ '_macros' ] [ macro_name ] = macro_resolver . resolve_simple_macros_in_string ( "$%s$" % macro_name , [ ] , None , None ) logger . debug ( "Program status brok %s data: %s" , brok_type , data ) return Brok ( { 'type' : brok_type , 'data' : data } ) | Create a program status brok |
19,636 | def consume_results ( self ) : queue_size = self . waiting_results . qsize ( ) for _ in range ( queue_size ) : self . manage_results ( self . waiting_results . get ( ) ) for chk in list ( self . checks . values ( ) ) : if chk . status == ACT_STATUS_WAIT_CONSUME : logger . debug ( "Consuming: %s" , chk ) item = self . find_item_by_id ( chk . ref ) notification_period = None if getattr ( item , 'notification_period' , None ) is not None : notification_period = self . timeperiods [ item . notification_period ] dep_checks = item . consume_result ( chk , notification_period , self . hosts , self . services , self . timeperiods , self . macromodulations , self . checkmodulations , self . businessimpactmodulations , self . resultmodulations , self . checks , self . pushed_conf . log_active_checks and not chk . passive_check ) for check in dep_checks : logger . debug ( "-> raised a dependency check: %s" , chk ) self . add ( check ) have_resolved_checks = True while have_resolved_checks : have_resolved_checks = False for chk in list ( self . checks . values ( ) ) : if chk . status == ACT_STATUS_WAITING_ME : for dependent_checks in chk . depend_on_me : dependent_checks . depend_on . remove ( chk . uuid ) have_resolved_checks = True chk . status = ACT_STATUS_ZOMBIE for chk in list ( self . checks . values ( ) ) : if chk . status == ACT_STATUS_WAIT_DEPEND and not chk . depend_on : item = self . find_item_by_id ( chk . ref ) notification_period = None if getattr ( item , 'notification_period' , None ) is not None : notification_period = self . timeperiods [ item . notification_period ] dep_checks = item . consume_result ( chk , notification_period , self . hosts , self . services , self . timeperiods , self . macromodulations , self . checkmodulations , self . businessimpactmodulations , self . resultmodulations , self . checks , self . pushed_conf . log_active_checks and not chk . passive_check ) for check in dep_checks : self . add ( check ) | Handle results waiting in waiting_results list . Check ref will call consume result and update their status |
19,637 | def get_new_actions ( self ) : _t0 = time . time ( ) self . hook_point ( 'get_new_actions' ) statsmgr . timer ( 'hook.get-new-actions' , time . time ( ) - _t0 ) for elt in self . all_my_hosts_and_services ( ) : for action in elt . actions : logger . debug ( "Got a new action for %s: %s" , elt , action ) self . add ( action ) elt . actions = [ ] | Call get_new_actions hook point Iter over all hosts and services to add new actions in internal lists |
19,638 | def get_new_broks ( self ) : for elt in self . all_my_hosts_and_services ( ) : for brok in elt . broks : self . add ( brok ) elt . broks = [ ] for contact in self . contacts : for brok in contact . broks : self . add ( brok ) contact . broks = [ ] | Iter over all hosts and services to add new broks in internal lists |
19,639 | def send_broks_to_modules ( self ) : t00 = time . time ( ) nb_sent = 0 broks = [ ] for broker_link in list ( self . my_daemon . brokers . values ( ) ) : for brok in broker_link . broks : if not getattr ( brok , 'sent_to_externals' , False ) : brok . to_send = True broks . append ( brok ) if not broks : return logger . debug ( "sending %d broks to modules..." , len ( broks ) ) for mod in self . my_daemon . modules_manager . get_external_instances ( ) : logger . debug ( "Look for sending to module %s" , mod . get_name ( ) ) module_queue = mod . to_q if module_queue : to_send = [ b for b in broks if mod . want_brok ( b ) ] module_queue . put ( to_send ) nb_sent += len ( to_send ) for broker_link in list ( self . my_daemon . brokers . values ( ) ) : for brok in broker_link . broks : if not getattr ( brok , 'sent_to_externals' , False ) : brok . to_send = False brok . sent_to_externals = True logger . debug ( "Time to send %d broks (after %d secs)" , nb_sent , time . time ( ) - t00 ) | Put broks into module queues Only broks without sent_to_externals to True are sent Only modules that ask for broks will get some |
19,640 | def find_item_by_id ( self , object_id ) : if isinstance ( object_id , Item ) : return object_id if not isinstance ( object_id , string_types ) : logger . debug ( "Find an item by id, object_id is not int nor string: %s" , object_id ) return object_id for items in [ self . hosts , self . services , self . actions , self . checks , self . hostgroups , self . servicegroups , self . contacts , self . contactgroups ] : if object_id in items : return items [ object_id ] logger . error ( "Item with id %s not found" , str ( object_id ) ) return None | Get item based on its id or uuid |
19,641 | def before_run ( self ) : self . nb_checks = 0 self . nb_internal_checks = 0 self . nb_checks_launched = 0 self . nb_actions_launched = 0 self . nb_checks_results = 0 self . nb_checks_results_timeout = 0 self . nb_checks_results_passive = 0 self . nb_checks_results_active = 0 self . nb_actions_results = 0 self . nb_actions_results_timeout = 0 self . nb_actions_results_passive = 0 self . nb_broks_dropped = 0 self . nb_checks_dropped = 0 self . nb_actions_dropped = 0 self . nb_broks = 0 self . nb_notifications = 0 self . nb_event_handlers = 0 self . nb_external_commands = 0 self . ticks = 0 | Initialize the scheduling process |
19,642 | def setup_new_conf ( self ) : super ( Receiver , self ) . setup_new_conf ( ) with self . conf_lock : logger . debug ( "Got config: %s" , self . cur_conf ) if not self . have_modules : try : self . modules = unserialize ( self . cur_conf [ 'modules' ] , no_load = True ) except AlignakClassLookupException as exp : logger . error ( 'Cannot un-serialize modules configuration ' 'received from arbiter: %s' , exp ) if self . modules : logger . info ( "I received some modules configuration: %s" , self . modules ) self . have_modules = True self . do_load_modules ( self . modules ) self . modules_manager . start_external_instances ( ) else : logger . info ( "I do not have modules" ) global_conf = self . cur_conf . get ( 'global_conf' , None ) if not global_conf : logger . error ( "Received a configuration without any global_conf! " "This may hide a configuration problem with the " "realms and the manage_sub_realms of the satellites!" ) global_conf = { 'accept_passive_unknown_check_results' : False , 'log_external_commands' : True } self . external_commands_manager = ExternalCommandManager ( None , 'receiver' , self , global_conf . get ( 'accept_passive_unknown_check_results' , False ) , global_conf . get ( 'log_external_commands' , False ) ) logger . info ( "Initializing connection with my satellites:" ) my_satellites = self . get_links_of_type ( s_type = '' ) for satellite in list ( my_satellites . values ( ) ) : logger . info ( "- : %s/%s" , satellite . type , satellite . name ) if not self . daemon_connection_init ( satellite ) : logger . error ( "Satellite connection failed: %s" , satellite ) self . have_conf = True | Receiver custom setup_new_conf method |
19,643 | def get_external_commands_from_arbiters ( self ) : for arbiter_link_uuid in self . arbiters : link = self . arbiters [ arbiter_link_uuid ] if not link . active : logger . debug ( "The arbiter '%s' is not active, it is not possible to get " "its external commands!" , link . name ) continue try : logger . debug ( "Getting external commands from: %s" , link . name ) external_commands = link . get_external_commands ( ) if external_commands : logger . debug ( "Got %d commands from: %s" , len ( external_commands ) , link . name ) else : external_commands = [ ] for external_command in external_commands : self . add ( external_command ) except LinkError : logger . warning ( "Arbiter connection failed, I could not get external commands!" ) except Exception as exp : logger . error ( "Arbiter connection failed, I could not get external commands!" ) logger . exception ( "Exception: %s" , exp ) | Get external commands from our arbiters |
19,644 | def push_external_commands_to_schedulers ( self ) : if not self . unprocessed_external_commands : return commands_to_process = self . unprocessed_external_commands self . unprocessed_external_commands = [ ] logger . debug ( "Commands: %s" , commands_to_process ) logger . debug ( "Commands to process: %d commands" , len ( commands_to_process ) ) for ext_cmd in commands_to_process : cmd = self . external_commands_manager . resolve_command ( ext_cmd ) logger . debug ( "Resolved command: %s, result: %s" , ext_cmd . cmd_line , cmd ) if cmd and cmd [ 'global' ] : for scheduler_link_uuid in self . schedulers : self . schedulers [ scheduler_link_uuid ] . pushed_commands . append ( ext_cmd ) count_pushed_commands = 0 count_failed_commands = 0 for scheduler_link_uuid in self . schedulers : link = self . schedulers [ scheduler_link_uuid ] if not link . active : logger . debug ( "The scheduler '%s' is not active, it is not possible to push " "external commands to its connection!" , link . name ) continue commands = [ ext_cmd . cmd_line for ext_cmd in link . pushed_commands ] if not commands : logger . debug ( "The scheduler '%s' has no commands." , link . name ) continue logger . debug ( "Sending %d commands to scheduler %s" , len ( commands ) , link . name ) sent = [ ] try : sent = link . push_external_commands ( commands ) except LinkError : logger . warning ( "Scheduler connection failed, I could not push external commands!" ) link . pushed_commands = [ ] if sent : statsmgr . gauge ( 'external-commands.pushed.%s' % link . name , len ( commands ) ) count_pushed_commands = count_pushed_commands + len ( commands ) else : count_failed_commands = count_failed_commands + len ( commands ) statsmgr . gauge ( 'external-commands.failed.%s' % link . name , len ( commands ) ) self . external_commands . extend ( commands ) statsmgr . gauge ( 'external-commands.pushed.all' , count_pushed_commands ) statsmgr . gauge ( 'external-commands.failed.all' , count_failed_commands ) | Push received external commands to the schedulers |
19,645 | def do_loop_turn ( self ) : self . check_and_del_zombie_modules ( ) if self . watch_for_new_conf ( timeout = 0.05 ) : logger . info ( "I got a new configuration..." ) self . setup_new_conf ( ) _t0 = time . time ( ) self . get_objects_from_from_queues ( ) statsmgr . timer ( 'core.get-objects-from-queues' , time . time ( ) - _t0 ) _t0 = time . time ( ) self . get_external_commands_from_arbiters ( ) statsmgr . timer ( 'external-commands.got.time' , time . time ( ) - _t0 ) statsmgr . gauge ( 'external-commands.got.count' , len ( self . unprocessed_external_commands ) ) _t0 = time . time ( ) self . push_external_commands_to_schedulers ( ) statsmgr . timer ( 'external-commands.pushed.time' , time . time ( ) - _t0 ) _t0 = time . time ( ) self . hook_point ( 'tick' ) statsmgr . timer ( 'hook.tick' , time . time ( ) - _t0 ) | Receiver daemon main loop |
19,646 | def serialize ( self ) : res = super ( Check , self ) . serialize ( ) if 'depend_on' in res : del res [ 'depend_on' ] if 'depend_on_me' in res : del res [ 'depend_on_me' ] return res | This function serializes into a simple dict object . |
19,647 | def serialize ( self ) : res = { 'uuid' : self . uuid } for prop in self . __class__ . properties : if not hasattr ( self , prop ) : continue res [ prop ] = getattr ( self , prop ) if isinstance ( self . __class__ . properties [ prop ] , SetProp ) : res [ prop ] = list ( getattr ( self , prop ) ) return res | This function serializes into a simple dictionary object . |
19,648 | def fill_default ( self ) : for prop , entry in self . __class__ . properties . items ( ) : if hasattr ( self , prop ) : continue if not hasattr ( entry , 'default' ) or entry . default is NONE_OBJECT : continue if hasattr ( entry . default , '__iter__' ) : setattr ( self , prop , copy ( entry . default ) ) else : setattr ( self , prop , entry . default ) | Define the object properties with a default value when the property is not yet defined |
19,649 | def fill_predictive_missing_parameters ( self ) : if hasattr ( self , 'host_name' ) and not hasattr ( self , 'address' ) : self . address = self . host_name if hasattr ( self , 'host_name' ) and not hasattr ( self , 'alias' ) : self . alias = self . host_name if self . initial_state == 'd' : self . state = 'DOWN' elif self . initial_state == 'x' : self . state = 'UNREACHABLE' | Fill address with host_name if not already set and define state with initial_state |
19,650 | def get_groupnames ( self , hostgroups ) : group_names = [ ] for hostgroup_id in self . hostgroups : hostgroup = hostgroups [ hostgroup_id ] group_names . append ( hostgroup . get_name ( ) ) return ',' . join ( sorted ( group_names ) ) | Get names of the host s hostgroups |
19,651 | def get_groupaliases ( self , hostgroups ) : group_aliases = [ ] for hostgroup_id in self . hostgroups : hostgroup = hostgroups [ hostgroup_id ] group_aliases . append ( hostgroup . alias ) return ',' . join ( sorted ( group_aliases ) ) | Get aliases of the host s hostgroups |
19,652 | def set_state_from_exit_status ( self , status , notif_period , hosts , services ) : now = time . time ( ) cls = self . __class__ if ( cls . enable_problem_impacts_states_change and self . is_impact and not self . state_changed_since_impact ) : self . last_state = self . state_before_impact else : self . last_state = self . state if status == 0 : self . state = u'UP' self . state_id = 0 self . last_time_up = int ( self . last_state_update ) state_code = 'u' elif status in ( 2 , 3 ) : self . state = u'DOWN' self . state_id = 1 self . last_time_down = int ( self . last_state_update ) state_code = 'd' elif status == 4 : self . state = u'UNREACHABLE' self . state_id = 4 self . last_time_unreachable = int ( self . last_state_update ) state_code = 'x' else : self . state = u'DOWN' self . state_id = 1 self . last_time_down = self . last_state_update state_code = 'd' if state_code in self . flap_detection_options : self . add_flapping_change ( self . state != self . last_state ) self . update_flapping ( notif_period , hosts , services ) if self . state != self . last_state and not ( self . state == "DOWN" and self . last_state == "UNREACHABLE" ) : self . last_state_change = self . last_state_update self . duration_sec = now - self . last_state_change | Set the state in UP DOWN or UNREACHABLE according to the status of a check result . |
19,653 | def is_state ( self , status ) : if status == self . state : return True if status == 'o' and self . state == u'UP' : return True if status == 'd' and self . state == u'DOWN' : return True if status in [ 'u' , 'x' ] and self . state == u'UNREACHABLE' : return True return False | Return if status match the current host status |
19,654 | def last_time_non_ok_or_up ( self ) : non_ok_times = [ x for x in [ self . last_time_down ] if x > self . last_time_up ] if not non_ok_times : last_time_non_ok = 0 else : last_time_non_ok = min ( non_ok_times ) return last_time_non_ok | Get the last time the host was in a non - OK state |
19,655 | def notification_is_blocked_by_contact ( self , notifways , timeperiods , notif , contact ) : return not contact . want_host_notification ( notifways , timeperiods , self . last_chk , self . state , notif . type , self . business_impact , notif . command_call ) | Check if the notification is blocked by this contact . |
19,656 | def _tot_services_by_state ( self , services , state ) : return str ( sum ( 1 for s in self . services if services [ s ] . state_id == state ) ) | Get the number of service in the specified state |
19,657 | def get_overall_state ( self , services ) : overall_state = 0 if not self . monitored : overall_state = 5 elif self . acknowledged : overall_state = 1 elif self . downtimed : overall_state = 2 elif self . state_type == 'HARD' : if self . state == 'UNREACHABLE' : overall_state = 3 elif self . state == 'DOWN' : overall_state = 4 if overall_state <= 2 : for service in self . services : if service in services : service = services [ service ] if service . overall_state_id < 5 : overall_state = max ( overall_state , service . overall_state_id ) return overall_state | Get the host overall state including the host self status and the status of its services |
19,658 | def linkify_h_by_h ( self ) : for host in self : new_parents = [ ] for parent in getattr ( host , 'parents' , [ ] ) : parent = parent . strip ( ) o_parent = self . find_by_name ( parent ) if o_parent is not None : new_parents . append ( o_parent . uuid ) else : err = "the parent '%s' for the host '%s' is unknown!" % ( parent , host . get_name ( ) ) self . add_error ( err ) host . parents = new_parents | Link hosts with their parents |
19,659 | def linkify_h_by_hg ( self , hostgroups ) : for host in self : new_hostgroups = [ ] if hasattr ( host , 'hostgroups' ) and host . hostgroups != [ ] : hgs = [ n . strip ( ) for n in host . hostgroups if n . strip ( ) ] for hg_name in hgs : hostgroup = hostgroups . find_by_name ( hg_name ) if hostgroup is not None : new_hostgroups . append ( hostgroup . uuid ) else : err = ( "the hostgroup '%s' of the host '%s' is " "unknown" % ( hg_name , host . host_name ) ) host . add_error ( err ) host . hostgroups = new_hostgroups | Link hosts with hostgroups |
19,660 | def apply_dependencies ( self ) : for host in self : for parent_id in getattr ( host , 'parents' , [ ] ) : if parent_id is None : continue parent = self [ parent_id ] if parent . active_checks_enabled : host . act_depend_of . append ( ( parent_id , [ 'd' , 'x' , 's' , 'f' ] , '' , True ) ) parent . act_depend_of_me . append ( ( host . uuid , [ 'd' , 'x' , 's' , 'f' ] , '' , True ) ) parent . child_dependencies . add ( host . uuid ) host . parent_dependencies . add ( parent_id ) | Loop on hosts and register dependency between parent and son |
19,661 | def find_hosts_that_use_template ( self , tpl_name ) : return [ h . host_name for h in self if tpl_name in h . tags if hasattr ( h , "host_name" ) ] | Find hosts that use the template defined in argument tpl_name |
19,662 | def is_me ( self ) : logger . info ( "And arbiter is launched with the hostname:%s " "from an arbiter point of view of addr:%s" , self . host_name , socket . getfqdn ( ) ) return self . host_name == socket . getfqdn ( ) or self . host_name == socket . gethostname ( ) | Check if parameter name if same than name of this object |
19,663 | def do_not_run ( self ) : logger . debug ( "[%s] do_not_run" , self . name ) try : self . con . get ( '_do_not_run' ) return True except HTTPClientConnectionException as exp : self . add_failed_check_attempt ( "Connection error when " "sending do not run: %s" % str ( exp ) ) self . set_dead ( ) except HTTPClientTimeoutException as exp : self . add_failed_check_attempt ( "Connection timeout when " "sending do not run: %s" % str ( exp ) ) except HTTPClientException as exp : self . add_failed_check_attempt ( "Error when " "sending do not run: %s" % str ( exp ) ) return False | Check if satellite running or not If not try to run |
19,664 | def get_broks ( self , broker_name ) : logger . debug ( "Broker %s requests my broks list" , broker_name ) res = [ ] if not broker_name : return res for broker_link in list ( self . brokers . values ( ) ) : if broker_name == broker_link . name : for brok in sorted ( broker_link . broks , key = lambda x : x . creation_time ) : if getattr ( brok , 'sent_to_externals' , False ) : res . append ( brok ) brok . got = True broker_link . broks = [ b for b in broker_link . broks if not getattr ( b , 'got' , False ) ] logger . debug ( "Providing %d broks to %s" , len ( res ) , broker_name ) break else : logger . warning ( "Got a brok request from an unknown broker: %s" , broker_name ) return res | Send broks to a specific broker |
19,665 | def do_loop_turn ( self ) : if not self . first_scheduling : logger . info ( "First scheduling launched" ) _t0 = time . time ( ) self . sched . initial_program_status ( ) self . sched . schedule ( ) statsmgr . timer ( 'first_scheduling' , time . time ( ) - _t0 ) logger . info ( "First scheduling done" ) for satellite in [ s for s in list ( self . pollers . values ( ) ) if s . passive ] : if not self . daemon_connection_init ( satellite ) : logger . error ( "Passive satellite connection failed: %s" , satellite ) for satellite in [ s for s in list ( self . reactionners . values ( ) ) if s . passive ] : if not self . daemon_connection_init ( satellite ) : logger . error ( "Passive satellite connection failed: %s" , satellite ) self . sched . ticks = 0 self . first_scheduling = True if self . sched . pushed_conf : if not self . sched . must_schedule : self . sched . start_scheduling ( ) self . sched . before_run ( ) self . sched . run ( ) else : logger . warning ( "#%d - No monitoring configuration to scheduler..." , self . loop_count ) | Scheduler loop turn |
19,666 | def get_managed_configurations ( self ) : res = { } if self . sched . pushed_conf and self . cur_conf and 'instance_id' in self . cur_conf : res [ self . cur_conf [ 'instance_id' ] ] = { 'hash' : self . cur_conf [ 'hash' ] , 'push_flavor' : self . cur_conf [ 'push_flavor' ] , 'managed_conf_id' : self . cur_conf [ 'managed_conf_id' ] } logger . debug ( "Get managed configuration: %s" , res ) return res | Get the configurations managed by this scheduler |
19,667 | def clean_previous_run ( self ) : super ( Alignak , self ) . clean_previous_run ( ) self . pollers . clear ( ) self . reactionners . clear ( ) self . brokers . clear ( ) | Clean variables from previous configuration |
19,668 | def get_monitoring_problems ( self ) : res = { } if not self . sched : return res scheduler_stats = self . sched . get_scheduler_stats ( details = True ) if 'livesynthesis' in scheduler_stats : res [ 'livesynthesis' ] = scheduler_stats [ 'livesynthesis' ] if 'problems' in scheduler_stats : res [ 'problems' ] = scheduler_stats [ 'problems' ] return res | Get the current scheduler livesynthesis |
19,669 | def merge_extinfo ( service , extinfo ) : properties = [ 'notes' , 'notes_url' , 'icon_image' , 'icon_image_alt' ] for prop in properties : if getattr ( service , prop ) == '' and getattr ( extinfo , prop ) != '' : setattr ( service , prop , getattr ( extinfo , prop ) ) | Merge extended host information into a service |
19,670 | def get_command_and_args ( self ) : r p_call = self . call . replace ( r'\!' , ' PROTECT_EXCLAMATION ' ) tab = p_call . split ( '!' ) return tab [ 0 ] . strip ( ) , [ s . replace ( ' PROTECT_EXCLAMATION ' , '!' ) for s in tab [ 1 : ] ] | r We want to get the command and the args with ! splitting . but don t forget to protect against the \ ! to avoid splitting on them |
19,671 | def get_a_satellite_link ( sat_type , sat_dict ) : cls = get_alignak_class ( 'alignak.objects.%slink.%sLink' % ( sat_type , sat_type . capitalize ( ) ) ) return cls ( params = sat_dict , parsing = False ) | Get a SatelliteLink object for a given satellite type and a dictionary |
19,672 | def get_livestate ( self ) : livestate = 0 if self . active : if not self . reachable : livestate = 1 elif not self . alive : livestate = 2 else : livestate = 3 livestate_output = "%s/%s is %s" % ( self . type , self . name , [ "up and running." , "warning because not reachable." , "critical because not responding." , "not active by configuration." ] [ livestate ] ) return ( livestate , livestate_output ) | Get the SatelliteLink live state . |
19,673 | def get_and_clear_context ( self ) : res = ( self . broks , self . actions , self . wait_homerun , self . pushed_commands ) self . broks = [ ] self . actions = { } self . wait_homerun = { } self . pushed_commands = [ ] return res | Get and clean all of our broks actions external commands and homerun |
19,674 | def prepare_for_conf ( self ) : logger . debug ( "- preparing: %s" , self ) self . cfg = { 'self_conf' : self . give_satellite_cfg ( ) , 'schedulers' : { } , 'arbiters' : { } } logger . debug ( "- prepared: %s" , self . cfg ) | Initialize the pushed configuration dictionary with the inner properties that are to be propagated to the satellite link . |
19,675 | def give_satellite_cfg ( self ) : res = { } properties = self . __class__ . properties for prop , entry in list ( properties . items ( ) ) : if hasattr ( self , prop ) and entry . to_send : res [ prop ] = getattr ( self , prop ) return res | Get the default information for a satellite . |
19,676 | def give_satellite_json ( self ) : daemon_properties = [ 'type' , 'name' , 'uri' , 'spare' , 'configuration_sent' , 'realm_name' , 'manage_sub_realms' , 'active' , 'reachable' , 'alive' , 'passive' , 'last_check' , 'polling_interval' , 'max_check_attempts' ] ( livestate , livestate_output ) = self . get_livestate ( ) res = { "livestate" : livestate , "livestate_output" : livestate_output } for sat_prop in daemon_properties : res [ sat_prop ] = getattr ( self , sat_prop , 'not_yet_defined' ) return res | Get the json information for a satellite . |
19,677 | def manages ( self , cfg_part ) : logger . debug ( "Do I (%s/%s) manage: %s, my managed configuration(s): %s" , self . type , self . name , cfg_part , self . cfg_managed ) if not self . cfg_managed : logger . info ( "I (%s/%s) do not manage (yet) any configuration!" , self . type , self . name ) return False for managed_cfg in list ( self . cfg_managed . values ( ) ) : if managed_cfg [ 'managed_conf_id' ] == cfg_part . instance_id and managed_cfg [ 'push_flavor' ] == cfg_part . push_flavor : logger . debug ( "I do manage this configuration: %s" , cfg_part ) break else : logger . warning ( "I (%s/%s) do not manage this configuration: %s" , self . type , self . name , cfg_part ) return False return True | Tell if the satellite is managing this configuration part |
19,678 | def set_alive ( self ) : was_alive = self . alive self . alive = True self . reachable = True self . attempt = 0 if not was_alive : logger . info ( "Setting %s satellite as alive :)" , self . name ) self . broks . append ( self . get_update_status_brok ( ) ) | Set alive reachable and reset attempts . If we change state raise a status brok update |
19,679 | def add_failed_check_attempt ( self , reason = '' ) : self . reachable = False self . attempt = self . attempt + 1 logger . debug ( "Failed attempt for %s (%d/%d), reason: %s" , self . name , self . attempt , self . max_check_attempts , reason ) if self . alive : if not self . stopping : logger . warning ( "Add failed attempt for %s (%d/%d) - %s" , self . name , self . attempt , self . max_check_attempts , reason ) else : logger . info ( "Stopping... failed attempt for %s (%d/%d) - also probably stopping" , self . name , self . attempt , self . max_check_attempts ) if self . attempt >= self . max_check_attempts : if not self . stopping : logger . warning ( "Set %s as dead, too much failed attempts (%d), last problem is: %s" , self . name , self . max_check_attempts , reason ) else : logger . info ( "Stopping... set %s as dead, too much failed attempts (%d)" , self . name , self . max_check_attempts ) self . set_dead ( ) | Set the daemon as unreachable and add a failed attempt if we reach the maximum attempts set the daemon as dead |
19,680 | def valid_connection ( * outer_args , ** outer_kwargs ) : def decorator ( func ) : def decorated ( * args , ** kwargs ) : link = args [ 0 ] if not link . con : raise LinkError ( "The connection is not created for %s" % link . name ) if not link . running_id : raise LinkError ( "The connection is not initialized for %s" % link . name ) return func ( * args , ** kwargs ) return decorated return decorator | Check if the daemon connection is established and valid |
19,681 | def communicate ( * outer_args , ** outer_kwargs ) : def decorator ( func ) : def decorated ( * args , ** kwargs ) : fn_name = func . __name__ link = args [ 0 ] if not link . alive : logger . warning ( "%s is not alive for %s" , link . name , fn_name ) return None try : if not link . reachable : raise LinkError ( "The %s %s is not reachable" % ( link . type , link . name ) ) logger . debug ( "[%s] Calling: %s, %s, %s" , link . name , fn_name , args , kwargs ) return func ( * args , ** kwargs ) except HTTPClientConnectionException as exp : if not link . stopping : logger . warning ( "A daemon (%s/%s) that we must be related with " "cannot be connected: %s" , link . type , link . name , exp ) else : logger . info ( "Stopping... daemon (%s/%s) cannot be connected. " "It is also probably stopping or yet stopped." , link . type , link . name ) link . set_dead ( ) except ( LinkError , HTTPClientTimeoutException ) as exp : link . add_failed_check_attempt ( "Connection timeout " "with '%s': %s" % ( fn_name , str ( exp ) ) ) return False except HTTPClientDataException as exp : err = "Some daemons that we must be related with " "have some interface problems. Sorry, I bail out" logger . error ( err ) os . sys . exit ( err ) except HTTPClientException as exp : link . add_failed_check_attempt ( "Error with '%s': %s" % ( fn_name , str ( exp ) ) ) return None return decorated return decorator | Check if the daemon connection is authorized and valid |
19,682 | def stop_request ( self , stop_now = False ) : logger . debug ( "Sending stop request to %s, stop now: %s" , self . name , stop_now ) res = self . con . get ( 'stop_request' , { 'stop_now' : '1' if stop_now else '0' } ) return res | Send a stop request to the daemon |
19,683 | def update_infos ( self , forced = False , test = False ) : logger . debug ( "Update informations, forced: %s" , forced ) now = time . time ( ) if not forced and self . last_check and self . last_check + self . polling_interval > now : logger . debug ( "Too early to ping %s, ping period is %ds!, last check: %d, now: %d" , self . name , self . polling_interval , self . last_check , now ) return None self . get_conf ( test = test ) self . last_check = time . time ( ) self . broks . append ( self . get_update_status_brok ( ) ) return self . cfg_managed | Update satellite info each self . polling_interval seconds so we smooth arbiter actions for just useful actions . |
19,684 | def push_actions ( self , actions , scheduler_instance_id ) : logger . debug ( "Pushing %d actions from %s" , len ( actions ) , scheduler_instance_id ) return self . con . post ( '_push_actions' , { 'actions' : actions , 'scheduler_instance_id' : scheduler_instance_id } , wait = True ) | Post the actions to execute to the satellite . Indeed a scheduler post its checks to a poller and its actions to a reactionner . |
19,685 | def linkify ( self , modules ) : logger . debug ( "Linkify %s with %s" , self , modules ) self . linkify_s_by_module ( modules ) | Link modules and Satellite links |
19,686 | def get_return_from ( self , notif ) : self . exit_status = notif . exit_status self . execution_time = notif . execution_time | Setter of exit_status and execution_time attributes |
19,687 | def get_initial_status_brok ( self ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'full_status' ) return Brok ( { 'type' : 'notification_raise' , 'data' : data } ) | Get a initial status brok |
19,688 | def manage_brok ( self , brok ) : brok . prepare ( ) for module in self . modules_manager . get_internal_instances ( ) : try : _t0 = time . time ( ) module . manage_brok ( brok ) statsmgr . timer ( 'manage-broks.internal.%s' % module . get_name ( ) , time . time ( ) - _t0 ) except Exception as exp : logger . warning ( "The module %s raised an exception: %s, " "I'm tagging it to restart later" , module . get_name ( ) , str ( exp ) ) logger . exception ( exp ) self . modules_manager . set_to_restart ( module ) | Get a brok . We put brok data to the modules |
19,689 | def get_internal_broks ( self ) : statsmgr . gauge ( 'get-new-broks-count.broker' , len ( self . internal_broks ) ) self . external_broks . extend ( self . internal_broks ) self . internal_broks = [ ] | Get all broks from self . broks_internal_raised and append them to our broks to manage |
19,690 | def get_arbiter_broks ( self ) : with self . arbiter_broks_lock : statsmgr . gauge ( 'get-new-broks-count.arbiter' , len ( self . arbiter_broks ) ) self . external_broks . extend ( self . arbiter_broks ) self . arbiter_broks = [ ] | Get the broks from the arbiters but as the arbiter_broks list can be push by arbiter without Global lock we must protect this with a lock |
19,691 | def get_new_broks ( self ) : for satellites in [ self . schedulers , self . pollers , self . reactionners , self . receivers ] : for satellite_link in list ( satellites . values ( ) ) : logger . debug ( "Getting broks from %s" , satellite_link ) _t0 = time . time ( ) try : tmp_broks = satellite_link . get_broks ( self . name ) except LinkError : logger . warning ( "Daemon %s connection failed, I could not get the broks!" , satellite_link ) else : if tmp_broks : logger . debug ( "Got %d Broks from %s in %s" , len ( tmp_broks ) , satellite_link . name , time . time ( ) - _t0 ) statsmgr . gauge ( 'get-new-broks-count.%s' % ( satellite_link . name ) , len ( tmp_broks ) ) statsmgr . timer ( 'get-new-broks-time.%s' % ( satellite_link . name ) , time . time ( ) - _t0 ) for brok in tmp_broks : brok . instance_id = satellite_link . instance_id self . external_broks . extend ( tmp_broks ) | Get new broks from our satellites |
19,692 | def add_group_members ( self , members ) : if not isinstance ( members , list ) : members = [ members ] if not getattr ( self , 'group_members' , None ) : self . group_members = members else : self . group_members . extend ( members ) | Add a new group member to the groups list |
19,693 | def get_realms_by_explosion ( self , realms ) : if getattr ( self , 'rec_tag' , False ) : self . add_error ( "Error: there is a loop in the realm definition %s" % self . get_name ( ) ) return None self . rec_tag = True self . realm_members = sorted ( self . realm_members ) for member in self . realm_members : realm = realms . find_by_name ( member ) if not realm : self . add_unknown_members ( member ) continue children = realm . get_realms_by_explosion ( realms ) if children is None : self . all_sub_members = [ ] self . realm_members = [ ] return None return self . all_sub_members | Get all members of this realm including members of sub - realms on multi - levels |
19,694 | def set_level ( self , level , realms ) : self . level = level if not self . level : logger . info ( "- %s" , self . get_name ( ) ) else : logger . info ( " %s %s" , '+' * self . level , self . get_name ( ) ) self . all_sub_members = [ ] self . all_sub_members_names = [ ] for child in sorted ( self . realm_members ) : child = realms . find_by_name ( child ) if not child : continue self . all_sub_members . append ( child . uuid ) self . all_sub_members_names . append ( child . get_name ( ) ) grand_children = child . set_level ( self . level + 1 , realms ) for grand_child in grand_children : if grand_child in self . all_sub_members_names : continue grand_child = realms . find_by_name ( grand_child ) if grand_child : self . all_sub_members_names . append ( grand_child . get_name ( ) ) self . all_sub_members . append ( grand_child . uuid ) return self . all_sub_members_names | Set the realm level in the realms hierarchy |
19,695 | def get_all_subs_satellites_by_type ( self , sat_type , realms ) : res = copy . copy ( getattr ( self , sat_type ) ) for member in self . all_sub_members : res . extend ( realms [ member ] . get_all_subs_satellites_by_type ( sat_type , realms ) ) return res | Get all satellites of the wanted type in this realm recursively |
19,696 | def get_links_for_a_broker ( self , pollers , reactionners , receivers , realms , manage_sub_realms = False ) : cfg = { 'pollers' : { } , 'reactionners' : { } , 'receivers' : { } , } for poller_id in self . pollers : poller = pollers [ poller_id ] cfg [ 'pollers' ] [ poller . uuid ] = poller . give_satellite_cfg ( ) for reactionner_id in self . reactionners : reactionner = reactionners [ reactionner_id ] cfg [ 'reactionners' ] [ reactionner . uuid ] = reactionner . give_satellite_cfg ( ) for receiver_id in self . receivers : receiver = receivers [ receiver_id ] cfg [ 'receivers' ] [ receiver . uuid ] = receiver . give_satellite_cfg ( ) if manage_sub_realms : for poller_id in self . get_all_subs_satellites_by_type ( 'pollers' , realms ) : poller = pollers [ poller_id ] cfg [ 'pollers' ] [ poller . uuid ] = poller . give_satellite_cfg ( ) for reactionner_id in self . get_all_subs_satellites_by_type ( 'reactionners' , realms ) : reactionner = reactionners [ reactionner_id ] cfg [ 'reactionners' ] [ reactionner . uuid ] = reactionner . give_satellite_cfg ( ) for receiver_id in self . get_all_subs_satellites_by_type ( 'receivers' , realms ) : receiver = receivers [ receiver_id ] cfg [ 'receivers' ] [ receiver . uuid ] = receiver . give_satellite_cfg ( ) return cfg | Get a configuration dictionary with pollers reactionners and receivers links for a broker |
19,697 | def get_links_for_a_scheduler ( self , pollers , reactionners , brokers ) : cfg = { 'pollers' : { } , 'reactionners' : { } , 'brokers' : { } , } try : for poller in self . pollers + self . get_potential_satellites_by_type ( pollers , "poller" ) : if poller in pollers : poller = pollers [ poller ] cfg [ 'pollers' ] [ poller . uuid ] = poller . give_satellite_cfg ( ) for reactionner in self . reactionners + self . get_potential_satellites_by_type ( reactionners , "reactionner" ) : if reactionner in reactionners : reactionner = reactionners [ reactionner ] cfg [ 'reactionners' ] [ reactionner . uuid ] = reactionner . give_satellite_cfg ( ) for broker in self . brokers + self . get_potential_satellites_by_type ( brokers , "broker" ) : if broker in brokers : broker = brokers [ broker ] cfg [ 'brokers' ] [ broker . uuid ] = broker . give_satellite_cfg ( ) except Exception as exp : logger . exception ( "realm.get_links_for_a_scheduler: %s" , exp ) return cfg | Get a configuration dictionary with pollers reactionners and brokers links for a scheduler |
19,698 | def explode ( self ) : for realm in [ tmp_realm for tmp_realm in self if tmp_realm . higher_realms ] : for parent in realm . higher_realms : higher_realm = self . find_by_name ( parent ) if higher_realm : higher_realm . realm_members . append ( realm . get_name ( ) ) for realm in self : for tmp_realm in self : tmp_realm . rec_tag = False realm . get_realms_by_explosion ( self ) for tmp_realm in self : del tmp_realm . rec_tag | Explode realms with each realm_members and higher_realms to get all the realms sub realms . |
19,699 | def get_default ( self , check = False ) : found = [ ] for realm in sorted ( self , key = lambda r : r . level ) : if getattr ( realm , 'default' , False ) : found . append ( realm ) if not found : found_names = sorted ( [ r . get_name ( ) for r in self ] ) if not found_names : self . add_error ( "No realm is defined in this configuration! " "This should not be possible!" ) return None default_realm_name = found_names [ 0 ] default_realm = self . find_by_name ( default_realm_name ) default_realm . default = True found . append ( default_realm ) if check : self . add_error ( "No realm is defined as the default one! " "I set %s as the default realm" % default_realm_name ) default_realm = found [ 0 ] if len ( found ) > 1 : found_names = sorted ( [ r . get_name ( ) for r in found ] ) default_realm_name = found_names [ 0 ] default_realm = self . find_by_name ( default_realm_name ) for realm in found : if realm . get_name ( ) != default_realm_name : realm . default = False if check : self . add_warning ( "More than one realm is defined as the default one: %s. " "I set %s as the default realm." % ( ',' . join ( found_names ) , default_realm_name ) ) self . default = default_realm return default_realm | Get the default realm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.