idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
19,700 | def reload_configuration ( self ) : if not self . app . is_master : message = u"I received a request to reload the monitored configuration. " u"I am not the Master arbiter, I ignore and continue to run." logger . warning ( message ) return { '_status' : u'ERR' , '_message' : message } message = "I received a request to reload the monitored configuration" if self . app . loading_configuration : message = message + "and I am still reloading the monitored configuration ;)" else : self . app . need_config_reload = True logger . warning ( message ) return { '_status' : u'OK' , '_message' : message } | Ask to the arbiter to reload the monitored configuration |
19,701 | def command ( self , command = None , timestamp = None , element = None , host = None , service = None , user = None , parameters = None ) : if cherrypy . request . method in [ "POST" ] : if not cherrypy . request . json : return { '_status' : u'ERR' , '_message' : u'You must POST parameters on this endpoint.' } if command is None : try : command = cherrypy . request . json . get ( 'command' , None ) timestamp = cherrypy . request . json . get ( 'timestamp' , None ) element = cherrypy . request . json . get ( 'element' , None ) host = cherrypy . request . json . get ( 'host' , None ) service = cherrypy . request . json . get ( 'service' , None ) user = cherrypy . request . json . get ( 'user' , None ) parameters = cherrypy . request . json . get ( 'parameters' , None ) except AttributeError : return { '_status' : u'ERR' , '_message' : u'Missing command parameters' } if not command : return { '_status' : u'ERR' , '_message' : u'Missing command parameter' } fields = split_semicolon ( command ) command_line = command . replace ( fields [ 0 ] , fields [ 0 ] . upper ( ) ) if timestamp : try : timestamp = int ( timestamp ) except ValueError : return { '_status' : u'ERR' , '_message' : u'Timestamp must be an integer value' } command_line = '[%d] %s' % ( timestamp , command_line ) if host or service or user : if host : command_line = '%s;%s' % ( command_line , host ) if service : command_line = '%s;%s' % ( command_line , service ) if user : command_line = '%s;%s' % ( command_line , user ) elif element : if '/' in element : element = element . replace ( '/' , ';' , 1 ) command_line = '%s;%s' % ( command_line , element ) if parameters : command_line = '%s;%s' % ( command_line , parameters ) logger . warning ( "Got an external command: %s" , command_line ) self . app . add ( ExternalCommand ( command_line ) ) return { '_status' : u'OK' , '_message' : u"Got command: %s" % command_line , 'command' : command_line } | Request to execute an external command |
19,702 | def monitoring_problems ( self ) : res = self . identity ( ) res [ 'problems' ] = { } for scheduler_link in self . app . conf . schedulers : sched_res = scheduler_link . con . get ( 'monitoring_problems' , wait = True ) res [ 'problems' ] [ scheduler_link . name ] = { } if '_freshness' in sched_res : res [ 'problems' ] [ scheduler_link . name ] . update ( { '_freshness' : sched_res [ '_freshness' ] } ) if 'problems' in sched_res : res [ 'problems' ] [ scheduler_link . name ] . update ( { 'problems' : sched_res [ 'problems' ] } ) res [ '_freshness' ] = int ( time . time ( ) ) return res | Get Alignak detailed monitoring status |
19,703 | def livesynthesis ( self ) : res = self . identity ( ) res . update ( self . app . get_livesynthesis ( ) ) return res | Get Alignak live synthesis |
19,704 | def object ( self , o_type , o_name = None ) : for scheduler_link in self . app . conf . schedulers : sched_res = scheduler_link . con . get ( 'object' , { 'o_type' : o_type , 'o_name' : o_name } , wait = True ) if isinstance ( sched_res , dict ) and 'content' in sched_res : return sched_res return { '_status' : u'ERR' , '_message' : u'Required %s not found.' % o_type } | Get a monitored object from the arbiter . |
19,705 | def status ( self , details = False ) : if details is not False : details = bool ( details ) return self . app . get_alignak_status ( details = details ) | Get the overall alignak status |
19,706 | def events_log ( self , details = False , count = 0 , timestamp = 0 ) : if not count : count = 1 + int ( os . environ . get ( 'ALIGNAK_EVENTS_LOG_COUNT' , self . app . conf . events_log_count ) ) count = int ( count ) timestamp = float ( timestamp ) logger . debug ( 'Get max %d events, newer than %s out of %d' , count , timestamp , len ( self . app . recent_events ) ) res = [ ] for log in reversed ( self . app . recent_events ) : if timestamp and timestamp > log [ 'timestamp' ] : break if not count : break if details : res . append ( log ) else : res . append ( "%s - %s - %s" % ( log [ 'date' ] , log [ 'level' ] [ 0 ] . upper ( ) , log [ 'message' ] ) ) logger . debug ( 'Got %d events' , len ( res ) ) return res | Get the most recent Alignak events |
19,707 | def satellites_list ( self , daemon_type = '' ) : with self . app . conf_lock : res = { } for s_type in [ 'arbiter' , 'scheduler' , 'poller' , 'reactionner' , 'receiver' , 'broker' ] : if daemon_type and daemon_type != s_type : continue satellite_list = [ ] res [ s_type ] = satellite_list for daemon_link in getattr ( self . app . conf , s_type + 's' , [ ] ) : satellite_list . append ( daemon_link . name ) return res | Get the arbiter satellite names sorted by type |
19,708 | def satellites_configuration ( self ) : res = { } for s_type in [ 'arbiter' , 'scheduler' , 'poller' , 'reactionner' , 'receiver' , 'broker' ] : lst = [ ] res [ s_type ] = lst for daemon in getattr ( self . app . conf , s_type + 's' ) : cls = daemon . __class__ env = { } all_props = [ cls . properties , cls . running_properties ] for props in all_props : for prop in props : if not hasattr ( daemon , prop ) : continue if prop in [ "realms" , "conf" , "con" , "tags" , "modules" , "cfg" , "broks" , "cfg_to_manage" ] : continue val = getattr ( daemon , prop ) try : json . dumps ( val ) env [ prop ] = val except TypeError as exp : logger . warning ( 'satellites_configuration, %s: %s' , prop , str ( exp ) ) lst . append ( env ) return res | Return all the configuration data of satellites |
19,709 | def external_commands ( self ) : res = [ ] with self . app . external_commands_lock : for cmd in self . app . get_external_commands ( ) : res . append ( cmd . serialize ( ) ) return res | Get the external commands from the daemon |
19,710 | def search ( self ) : logger . debug ( "Grafana search... %s" , cherrypy . request . method ) if cherrypy . request . method == 'OPTIONS' : cherrypy . response . headers [ 'Access-Control-Allow-Methods' ] = 'GET,POST,PATCH,PUT,DELETE' cherrypy . response . headers [ 'Access-Control-Allow-Headers' ] = 'Content-Type,Authorization' cherrypy . response . headers [ 'Access-Control-Allow-Origin' ] = '*' cherrypy . request . handler = None return { } if getattr ( cherrypy . request , 'json' , None ) : logger . debug ( "Posted data: %s" , cherrypy . request . json ) logger . debug ( "Grafana search returns: %s" , GRAFANA_TARGETS ) return GRAFANA_TARGETS | Request available queries |
19,711 | def _build_host_livestate ( self , host_name , livestate ) : state = livestate . get ( 'state' , 'UP' ) . upper ( ) output = livestate . get ( 'output' , '' ) long_output = livestate . get ( 'long_output' , '' ) perf_data = livestate . get ( 'perf_data' , '' ) try : timestamp = int ( livestate . get ( 'timestamp' , 'ABC' ) ) except ValueError : timestamp = None host_state_to_id = { "UP" : 0 , "DOWN" : 1 , "UNREACHABLE" : 2 } parameters = '%s;%s' % ( host_state_to_id . get ( state , 3 ) , output ) if long_output and perf_data : parameters = '%s|%s\n%s' % ( parameters , perf_data , long_output ) elif long_output : parameters = '%s\n%s' % ( parameters , long_output ) elif perf_data : parameters = '%s|%s' % ( parameters , perf_data ) command_line = 'PROCESS_HOST_CHECK_RESULT;%s;%s' % ( host_name , parameters ) if timestamp is not None : command_line = '[%d] %s' % ( timestamp , command_line ) else : command_line = '[%d] %s' % ( int ( time . time ( ) ) , command_line ) return command_line | Build and notify the external command for an host livestate |
19,712 | def _build_service_livestate ( self , host_name , service_name , livestate ) : state = livestate . get ( 'state' , 'OK' ) . upper ( ) output = livestate . get ( 'output' , '' ) long_output = livestate . get ( 'long_output' , '' ) perf_data = livestate . get ( 'perf_data' , '' ) try : timestamp = int ( livestate . get ( 'timestamp' , 'ABC' ) ) except ValueError : timestamp = None service_state_to_id = { "OK" : 0 , "WARNING" : 1 , "CRITICAL" : 2 , "UNKNOWN" : 3 , "UNREACHABLE" : 4 } parameters = '%s;%s' % ( service_state_to_id . get ( state , 3 ) , output ) if long_output and perf_data : parameters = '%s|%s\n%s' % ( parameters , perf_data , long_output ) elif long_output : parameters = '%s\n%s' % ( parameters , long_output ) elif perf_data : parameters = '%s|%s' % ( parameters , perf_data ) command_line = 'PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s' % ( host_name , service_name , parameters ) if timestamp is not None : command_line = '[%d] %s' % ( timestamp , command_line ) else : command_line = '[%d] %s' % ( int ( time . time ( ) ) , command_line ) return command_line | Build and notify the external command for a service livestate |
19,713 | def _do_not_run ( self ) : if self . app . is_master : message = "Received message to not run. " "I am the Master arbiter, ignore and continue to run." logger . warning ( message ) return { '_status' : u'ERR' , '_message' : message } logger . debug ( "Received message to not run. I am the spare, stopping." ) self . app . last_master_speak = time . time ( ) self . app . must_run = False return { '_status' : u'OK' , '_message' : message } | The master arbiter tells to its spare arbiters to not run . |
19,714 | def create_commandcall ( prop , commands , command ) : cc = { 'commands' : commands , 'call' : command } if hasattr ( prop , 'enable_environment_macros' ) : cc [ 'enable_environment_macros' ] = prop . enable_environment_macros if hasattr ( prop , 'poller_tag' ) : cc [ 'poller_tag' ] = prop . poller_tag elif hasattr ( prop , 'reactionner_tag' ) : cc [ 'reactionner_tag' ] = prop . reactionner_tag return CommandCall ( cc ) | Create CommandCall object with command |
19,715 | def _push_broks ( self ) : data = cherrypy . request . json with self . app . arbiter_broks_lock : logger . debug ( "Pushing %d broks" , len ( data [ 'broks' ] ) ) self . app . arbiter_broks . extend ( [ unserialize ( elem , True ) for elem in data [ 'broks' ] ] ) | Push the provided broks objects to the broker daemon |
19,716 | def load_params ( self , params ) : logger . debug ( "Alignak parameters:" ) for key , value in sorted ( self . clean_params ( params ) . items ( ) ) : update_attribute = None if key [ 0 ] == '$' and key [ - 1 ] == '$' : key = key [ 1 : - 1 ] if key not in self . __class__ . macros : logger . debug ( "New macro %s: %s - %s" , self , key , value ) self . __class__ . macros [ key ] = '$%s$' % key key = '$%s$' % key logger . debug ( "- macro %s" , key ) update_attribute = value if isinstance ( value , list ) : self . __class__ . properties [ key ] = ListProp ( default = value ) else : self . __class__ . properties [ key ] = StringProp ( default = value ) elif key in self . properties : update_attribute = self . properties [ key ] . pythonize ( value ) elif key in self . running_properties : logger . warning ( "using a the running property %s in a config file" , key ) update_attribute = self . running_properties [ key ] . pythonize ( value ) elif key . startswith ( '$' ) or key in [ 'cfg_file' , 'cfg_dir' ] : update_attribute = value else : logger . debug ( "Guessing the property '%s' type because it " "is not in %s object properties" , key , self . __class__ . __name__ ) update_attribute = ToGuessProp ( ) . pythonize ( value ) if update_attribute is not None : setattr ( self , key , update_attribute ) logger . debug ( "- update %s = %s" , key , update_attribute ) self . old_properties_names_to_new ( ) self . fill_default ( ) | Load parameters from main configuration file |
19,717 | def _cut_line ( line ) : if re . search ( "([\t\n\r]+|[\x0b\x0c ]{3,})+" , line ) : tmp = re . split ( "([\t\n\r]+|[\x0b\x0c ]{3,})+" , line , 1 ) else : tmp = re . split ( "[" + string . whitespace + "]+" , line , 1 ) res = [ elt . strip ( ) for elt in tmp if elt . strip ( ) != '' ] return res | Split the line on whitespaces and remove empty chunks |
19,718 | def add_self_defined_objects ( raw_objects ) : logger . info ( "- creating internally defined commands..." ) if 'command' not in raw_objects : raw_objects [ 'command' ] = [ ] raw_objects [ 'command' ] . append ( { 'command_name' : 'bp_rule' , 'command_line' : 'bp_rule' , 'imported_from' : 'alignak-self' } ) raw_objects [ 'command' ] . append ( { 'command_name' : '_internal_host_up' , 'command_line' : '_internal_host_up' , 'imported_from' : 'alignak-self' } ) raw_objects [ 'command' ] . append ( { 'command_name' : '_internal_host_check' , 'command_line' : '_internal_host_check;$ARG1$;$ARG2$' , 'imported_from' : 'alignak-self' } ) raw_objects [ 'command' ] . append ( { 'command_name' : '_echo' , 'command_line' : '_echo' , 'imported_from' : 'alignak-self' } ) raw_objects [ 'command' ] . append ( { 'command_name' : '_internal_service_check' , 'command_line' : '_internal_service_check;$ARG1$;$ARG2$' , 'imported_from' : 'alignak-self' } ) | Add self defined command objects for internal processing ; bp_rule _internal_host_up _echo _internal_host_check _interna_service_check |
19,719 | def early_create_objects ( self , raw_objects ) : types_creations = self . __class__ . types_creations early_created_types = self . __class__ . early_created_types logger . info ( "Creating objects..." ) for o_type in sorted ( types_creations ) : if o_type in early_created_types : self . create_objects_for_type ( raw_objects , o_type ) logger . info ( "Done" ) | Create the objects needed for the post configuration file initialization |
19,720 | def create_objects ( self , raw_objects ) : types_creations = self . __class__ . types_creations early_created_types = self . __class__ . early_created_types logger . info ( "Creating objects..." ) self . add_self_defined_objects ( raw_objects ) for o_type in sorted ( types_creations ) : if o_type not in early_created_types : self . create_objects_for_type ( raw_objects , o_type ) logger . info ( "Done" ) | Create all the objects got after the post configuration file initialization |
19,721 | def create_objects_for_type ( self , raw_objects , o_type ) : types_creations = self . __class__ . types_creations ( cls , clss , prop , initial_index , _ ) = types_creations [ o_type ] lst = [ ] try : logger . info ( "- creating '%s' objects" , o_type ) for obj_cfg in raw_objects [ o_type ] : my_object = cls ( obj_cfg ) lst . append ( my_object ) if not lst : logger . info ( " none." ) except KeyError : logger . info ( " no %s objects in the configuration" , o_type ) setattr ( self , prop , clss ( lst , initial_index ) ) | Generic function to create objects regarding the o_type |
19,722 | def early_arbiter_linking ( self , arbiter_name , params ) : if not self . arbiters : params . update ( { 'name' : arbiter_name , 'arbiter_name' : arbiter_name , 'host_name' : socket . gethostname ( ) , 'address' : '127.0.0.1' , 'port' : 7770 , 'spare' : '0' } ) logger . warning ( "There is no arbiter, I add myself (%s) reachable on %s:%d" , arbiter_name , params [ 'address' ] , params [ 'port' ] ) arb = ArbiterLink ( params , parsing = True ) self . arbiters = ArbiterLinks ( [ arb ] ) self . arbiters . fill_default ( ) self . modules . fill_default ( ) self . arbiters . linkify ( modules = self . modules ) self . modules . linkify ( ) | Prepare the arbiter for early operations |
19,723 | def linkify_one_command_with_commands ( self , commands , prop ) : if not hasattr ( self , prop ) : return command = getattr ( self , prop ) . strip ( ) if not command : setattr ( self , prop , None ) return data = { "commands" : commands , "call" : command } if hasattr ( self , 'poller_tag' ) : data . update ( { "poller_tag" : self . poller_tag } ) if hasattr ( self , 'reactionner_tag' ) : data . update ( { "reactionner_tag" : self . reactionner_tag } ) setattr ( self , prop , CommandCall ( data ) ) | Link a command |
19,724 | def linkify ( self ) : self . services . optimize_service_search ( self . hosts ) self . linkify_one_command_with_commands ( self . commands , 'host_perfdata_command' ) self . linkify_one_command_with_commands ( self . commands , 'service_perfdata_command' ) self . linkify_one_command_with_commands ( self . commands , 'global_host_event_handler' ) self . linkify_one_command_with_commands ( self . commands , 'global_service_event_handler' ) self . hosts . linkify ( self . timeperiods , self . commands , self . contacts , self . realms , self . resultmodulations , self . businessimpactmodulations , self . escalations , self . hostgroups , self . checkmodulations , self . macromodulations ) self . hostsextinfo . merge ( self . hosts ) self . hostgroups . linkify ( self . hosts , self . realms , self . forced_realms_hostgroups ) self . services . linkify ( self . hosts , self . commands , self . timeperiods , self . contacts , self . resultmodulations , self . businessimpactmodulations , self . escalations , self . servicegroups , self . checkmodulations , self . macromodulations ) self . servicesextinfo . merge ( self . services ) self . servicegroups . linkify ( self . hosts , self . services ) self . notificationways . linkify ( self . timeperiods , self . commands ) self . checkmodulations . linkify ( self . timeperiods , self . commands ) self . macromodulations . linkify ( self . timeperiods ) self . contactgroups . linkify ( self . contacts ) self . contacts . linkify ( self . commands , self . notificationways ) self . timeperiods . linkify ( ) self . servicedependencies . linkify ( self . hosts , self . services , self . timeperiods ) self . hostdependencies . linkify ( self . hosts , self . timeperiods ) self . resultmodulations . linkify ( self . timeperiods ) self . businessimpactmodulations . linkify ( self . timeperiods ) self . escalations . linkify ( self . timeperiods , self . contacts , self . services , self . hosts ) self . schedulers . linkify ( self . modules ) self . brokers . linkify ( self . modules ) self . receivers . linkify ( self . modules ) self . reactionners . linkify ( self . modules ) self . pollers . linkify ( self . modules ) satellites = { } for sat in self . schedulers : satellites [ sat . uuid ] = sat for sat in self . pollers : satellites [ sat . uuid ] = sat for sat in self . reactionners : satellites [ sat . uuid ] = sat for sat in self . receivers : satellites [ sat . uuid ] = sat for sat in self . brokers : satellites [ sat . uuid ] = sat self . realms . prepare_satellites ( satellites ) | Make links between elements like a host got a services list with all its services in it |
19,725 | def clean ( self ) : logger . debug ( "Cleaning configuration objects before configuration sending:" ) types_creations = self . __class__ . types_creations for o_type in types_creations : ( _ , _ , inner_property , _ , _ ) = types_creations [ o_type ] logger . debug ( " . for %s" , inner_property , ) inner_object = getattr ( self , inner_property ) inner_object . clean ( ) | Wrapper for calling the clean method of services attribute |
19,726 | def warn_about_unmanaged_parameters ( self ) : properties = self . __class__ . properties unmanaged = [ ] for prop , entry in list ( properties . items ( ) ) : if not entry . managed and hasattr ( self , prop ) : if entry . help : line = "%s: %s" % ( prop , entry . help ) else : line = prop unmanaged . append ( line ) if unmanaged : logger . warning ( "The following Nagios legacy parameter(s) are not currently " "managed by Alignak:" ) for line in unmanaged : logger . warning ( '- %s' , line ) logger . warning ( "Those are unmanaged configuration statements, do you really need it? " "Create an issue on the Alignak repository or submit a pull " "request: http://www.github.com/Alignak-monitoring/alignak" ) | used to raise warning if the user got parameter that we do not manage from now |
19,727 | def apply_dependencies ( self ) : self . hosts . apply_dependencies ( ) self . services . apply_dependencies ( self . hosts ) | Creates dependencies links between elements . |
19,728 | def fill_default_configuration ( self ) : logger . debug ( "Filling the unset properties with their default value:" ) types_creations = self . __class__ . types_creations for o_type in types_creations : ( _ , _ , inner_property , _ , _ ) = types_creations [ o_type ] if inner_property in [ 'realms' , 'arbiters' , 'schedulers' , 'reactionners' , 'pollers' , 'brokers' , 'receivers' ] : continue logger . debug ( " . for %s" , inner_property , ) inner_object = getattr ( self , inner_property , None ) if inner_object is None : logger . debug ( "No %s to fill with default values" , inner_property ) continue inner_object . fill_default ( ) if getattr ( self , 'realms' , None ) is not None : self . fill_default_realm ( ) self . realms . fill_default ( ) self . fill_default_satellites ( self . launch_missing_daemons ) types_creations = self . __class__ . types_creations for o_type in types_creations : ( _ , _ , inner_property , _ , _ ) = types_creations [ o_type ] if getattr ( self , inner_property , None ) is None : logger . debug ( "No %s to fill with default values" , inner_property ) continue if inner_property in [ 'schedulers' , 'reactionners' , 'pollers' , 'brokers' , 'receivers' ] : logger . debug ( " . for %s" , inner_property , ) inner_object = getattr ( self , inner_property ) inner_object . fill_default ( ) self . hosts . fill_predictive_missing_parameters ( ) self . services . fill_predictive_missing_parameters ( ) | Fill objects properties with default value if necessary |
19,729 | def log_daemons_list ( self ) : daemons = [ self . arbiters , self . schedulers , self . pollers , self . brokers , self . reactionners , self . receivers ] for daemons_list in daemons : if not daemons_list : logger . debug ( "- %ss: None" , daemons_list . inner_class . my_type ) else : logger . debug ( "- %ss: %s" , daemons_list . inner_class . my_type , ',' . join ( [ daemon . get_name ( ) for daemon in daemons_list ] ) ) | Log Alignak daemons list |
19,730 | def got_broker_module_type_defined ( self , module_type ) : for broker_link in self . brokers : for module in broker_link . modules : if module . is_a_module ( module_type ) : return True return False | Check if a module type is defined in one of the brokers |
19,731 | def got_scheduler_module_type_defined ( self , module_type ) : for scheduler_link in self . schedulers : for module in scheduler_link . modules : if module . is_a_module ( module_type ) : return True return False | Check if a module type is defined in one of the schedulers |
19,732 | def got_arbiter_module_type_defined ( self , module_type ) : for arbiter in self . arbiters : for module in getattr ( arbiter , 'modules' , [ ] ) : module_name = module . get_name ( ) for mod in self . modules : if getattr ( mod , 'python_name' , '' ) . strip ( ) == module_type . strip ( ) : if getattr ( mod , 'name' , '' ) . strip ( ) == module_name : return True return False | Check if a module type is defined in one of the arbiters Also check the module name |
19,733 | def create_business_rules ( self ) : self . hosts . create_business_rules ( self . hosts , self . services , self . hostgroups , self . servicegroups , self . macromodulations , self . timeperiods ) self . services . create_business_rules ( self . hosts , self . services , self . hostgroups , self . servicegroups , self . macromodulations , self . timeperiods ) | Create business rules for hosts and services |
19,734 | def create_business_rules_dependencies ( self ) : for item in itertools . chain ( self . hosts , self . services ) : if not item . got_business_rule : continue bp_items = item . business_rule . list_all_elements ( ) for bp_item_uuid in bp_items : if bp_item_uuid in self . hosts : bp_item = self . hosts [ bp_item_uuid ] notif_options = item . business_rule_host_notification_options else : bp_item = self . services [ bp_item_uuid ] notif_options = item . business_rule_service_notification_options if notif_options : bp_item . notification_options = notif_options bp_item . act_depend_of_me . append ( ( item . uuid , [ 'd' , 'u' , 's' , 'f' , 'c' , 'w' , 'x' ] , '' , True ) ) item . parent_dependencies . add ( bp_item . uuid ) bp_item . child_dependencies . add ( item . uuid ) | Create business rules dependencies for hosts and services |
19,735 | def propagate_timezone_option ( self ) : if self . use_timezone : os . environ [ 'TZ' ] = self . use_timezone time . tzset ( ) tab = [ self . schedulers , self . pollers , self . brokers , self . receivers , self . reactionners ] for sat_list in tab : for sat in sat_list : if sat . use_timezone == 'NOTSET' : setattr ( sat , 'use_timezone' , self . use_timezone ) | Set our timezone value and give it too to unset satellites |
19,736 | def linkify_templates ( self ) : self . hosts . linkify_templates ( ) self . contacts . linkify_templates ( ) self . services . linkify_templates ( ) self . servicedependencies . linkify_templates ( ) self . hostdependencies . linkify_templates ( ) self . timeperiods . linkify_templates ( ) self . hostsextinfo . linkify_templates ( ) self . servicesextinfo . linkify_templates ( ) self . escalations . linkify_templates ( ) self . serviceescalations . linkify_templates ( ) self . hostescalations . linkify_templates ( ) | Like for normal object we link templates with each others |
19,737 | def remove_templates ( self ) : self . hosts . remove_templates ( ) self . contacts . remove_templates ( ) self . services . remove_templates ( ) self . servicedependencies . remove_templates ( ) self . hostdependencies . remove_templates ( ) self . timeperiods . remove_templates ( ) | Clean useless elements like templates because they are not needed anymore |
19,738 | def show_errors ( self ) : if self . configuration_warnings : logger . warning ( "Configuration warnings:" ) for msg in self . configuration_warnings : logger . warning ( msg ) if self . configuration_errors : logger . warning ( "Configuration errors:" ) for msg in self . configuration_errors : logger . warning ( msg ) | Loop over configuration warnings and log them as INFO log Loop over configuration errors and log them as INFO log |
19,739 | def prepare_for_sending ( self ) : if [ arbiter_link for arbiter_link in self . arbiters if arbiter_link . spare ] : logger . info ( 'Serializing the configuration for my spare arbiter...' ) self . spare_arbiter_conf = serialize ( self ) | The configuration needs to be serialized before being sent to a spare arbiter |
19,740 | def dump ( self , dump_file_name = None ) : config_dump = { } for _ , _ , category , _ , _ in list ( self . types_creations . values ( ) ) : try : objs = [ jsonify_r ( i ) for i in getattr ( self , category ) ] except ( TypeError , AttributeError ) : logger . warning ( "Dumping configuration, '%s' not present in the configuration" , category ) continue container = getattr ( self , category ) if category == "services" : objs = sorted ( objs , key = lambda o : "%s/%s" % ( o [ "host_name" ] , o [ "service_description" ] ) ) elif hasattr ( container , "name_property" ) : name_prop = container . name_property objs = sorted ( objs , key = lambda o , prop = name_prop : getattr ( o , prop , '' ) ) config_dump [ category ] = objs if not dump_file_name : dump_file_name = os . path . join ( tempfile . gettempdir ( ) , 'alignak-%s-cfg-dump-%d.json' % ( self . name , int ( time . time ( ) ) ) ) try : logger . info ( 'Dumping configuration to: %s' , dump_file_name ) fd = open ( dump_file_name , "w" ) fd . write ( json . dumps ( config_dump , indent = 4 , separators = ( ',' , ': ' ) , sort_keys = True ) ) fd . close ( ) logger . info ( 'Dumped' ) except ( OSError , IndexError ) as exp : logger . critical ( "Error when dumping configuration to %s: %s" , dump_file_name , str ( exp ) ) | Dump configuration to a file in a JSON format |
19,741 | def push_broks_to_broker ( self ) : someone_is_concerned = False sent = False for broker_link in self . conf . brokers : if not broker_link . manage_arbiters : continue someone_is_concerned = True if broker_link . reachable : logger . debug ( "Sending %d broks to the broker %s" , len ( self . broks ) , broker_link . name ) if broker_link . push_broks ( self . broks ) : statsmgr . counter ( 'broks.pushed.count' , len ( self . broks ) ) sent = True if not someone_is_concerned or sent : del self . broks [ : ] | Send all broks from arbiter internal list to broker |
19,742 | def push_external_commands_to_schedulers ( self ) : for external_command in self . external_commands : self . external_commands_manager . resolve_command ( external_command ) sent = False for scheduler_link in self . conf . schedulers : ext_cmds = scheduler_link . external_commands if ext_cmds and scheduler_link . reachable : logger . debug ( "Sending %d commands to the scheduler %s" , len ( ext_cmds ) , scheduler_link . name ) if scheduler_link . push_external_commands ( ext_cmds ) : statsmgr . counter ( 'external-commands.pushed.count' , len ( ext_cmds ) ) sent = True if sent : scheduler_link . external_commands . clear ( ) | Send external commands to schedulers |
19,743 | def get_broks_from_satellites ( self ) : for satellites in [ self . conf . brokers , self . conf . schedulers , self . conf . pollers , self . conf . reactionners , self . conf . receivers ] : for satellite in satellites : if not satellite . reachable : continue logger . debug ( "Getting broks from: %s" , satellite . name ) new_broks = satellite . get_and_clear_broks ( ) if new_broks : logger . debug ( "Got %d broks from: %s" , len ( new_broks ) , satellite . name ) for brok in new_broks : self . add ( brok ) | Get broks from my all internal satellite links |
19,744 | def get_initial_broks_from_satellites ( self ) : for satellites in [ self . conf . brokers , self . conf . schedulers , self . conf . pollers , self . conf . reactionners , self . conf . receivers ] : for satellite in satellites : if not satellite . reachable : continue logger . debug ( "Getting initial brok from: %s" , satellite . name ) brok = satellite . get_initial_status_brok ( ) logger . debug ( "Satellite '%s' initial brok: %s" , satellite . name , brok ) self . add ( brok ) | Get initial broks from my internal satellite links |
19,745 | def load_modules_configuration_objects ( self , raw_objects ) : for instance in self . modules_manager . instances : logger . debug ( "Getting objects from the module: %s" , instance . name ) if not hasattr ( instance , 'get_objects' ) : logger . debug ( "The module '%s' do not provide any objects." , instance . name ) return try : logger . info ( "Getting Alignak monitored configuration objects from module '%s'" , instance . name ) got_objects = instance . get_objects ( ) except Exception as exp : logger . exception ( "Module %s get_objects raised an exception %s. " "Log and continue to run." , instance . name , exp ) continue if not got_objects : logger . warning ( "The module '%s' did not provided any objects." , instance . name ) return types_creations = self . conf . types_creations for o_type in types_creations : ( _ , _ , prop , _ , _ ) = types_creations [ o_type ] if prop in [ 'arbiters' , 'brokers' , 'schedulers' , 'pollers' , 'reactionners' , 'receivers' , 'modules' ] : continue if prop not in got_objects : logger . warning ( "Did not get any '%s' objects from %s" , prop , instance . name ) continue for obj in got_objects [ prop ] : if o_type not in raw_objects : raw_objects [ o_type ] = [ ] if 'imported_from' not in obj : obj [ 'imported_from' ] = 'module:%s' % instance . name raw_objects [ o_type ] . append ( obj ) logger . debug ( "Added %i %s objects from %s" , len ( got_objects [ prop ] ) , o_type , instance . name ) | Load configuration objects from arbiter modules If module implements get_objects arbiter will call it and add create objects |
19,746 | def load_modules_alignak_configuration ( self ) : alignak_cfg = { } for instance in self . modules_manager . instances : if not hasattr ( instance , 'get_alignak_configuration' ) : return try : logger . info ( "Getting Alignak global configuration from module '%s'" , instance . name ) cfg = instance . get_alignak_configuration ( ) alignak_cfg . update ( cfg ) except Exception as exp : logger . error ( "Module %s get_alignak_configuration raised an exception %s. " "Log and continue to run" , instance . name , str ( exp ) ) output = io . StringIO ( ) traceback . print_exc ( file = output ) logger . error ( "Back trace of this remove: %s" , output . getvalue ( ) ) output . close ( ) continue params = [ ] if alignak_cfg : logger . info ( "Got Alignak global configuration:" ) for key , value in sorted ( alignak_cfg . items ( ) ) : logger . info ( "- %s = %s" , key , value ) if key . startswith ( '_' ) : key = '$' + key [ 1 : ] . upper ( ) + '$' if value is None : continue if value == 'None' : continue if value == '' : continue params . append ( "%s=%s" % ( key , value ) ) self . conf . load_params ( params ) | Load Alignak configuration from the arbiter modules If module implements get_alignak_configuration call this function |
19,747 | def request_stop ( self , message = '' , exit_code = 0 ) : if self . is_master : self . daemons_stop ( timeout = self . conf . daemons_stop_timeout ) super ( Arbiter , self ) . request_stop ( message , exit_code ) | Stop the Arbiter daemon |
19,748 | def start_daemon ( self , satellite ) : logger . info ( " launching a daemon for: %s/%s..." , satellite . type , satellite . name ) daemon_script_location = getattr ( self . conf , 'daemons_script_location' , self . bindir ) if not daemon_script_location : daemon_script_location = "alignak-%s" % satellite . type else : daemon_script_location = "%s/alignak-%s" % ( daemon_script_location , satellite . type ) daemon_arguments = getattr ( self . conf , 'daemons_arguments' , '' ) args = [ daemon_script_location , "--name" , satellite . name , "--environment" , self . env_filename , "--host" , str ( satellite . host ) , "--port" , str ( satellite . port ) ] if daemon_arguments : args . append ( daemon_arguments ) logger . info ( " ... with some arguments: %s" , args ) try : process = psutil . Popen ( args , stdin = None , stdout = None , stderr = None ) time . sleep ( 0.1 ) except Exception as exp : logger . error ( "Error when launching %s: %s" , satellite . name , exp ) logger . error ( "Command: %s" , args ) return False logger . info ( " %s launched (pid=%d, gids=%s)" , satellite . name , process . pid , process . gids ( ) ) self . my_daemons [ satellite . name ] = { 'satellite' : satellite , 'process' : process } return True | Manage the list of detected missing daemons |
19,749 | def daemons_start ( self , run_daemons = True ) : result = True if run_daemons : logger . info ( "Alignak configured daemons start:" ) else : logger . info ( "Alignak configured daemons check:" ) for satellites_list in [ self . conf . arbiters , self . conf . receivers , self . conf . reactionners , self . conf . pollers , self . conf . brokers , self . conf . schedulers ] : for satellite in satellites_list : logger . info ( "- found %s, to be launched: %s, address: %s" , satellite . name , satellite . alignak_launched , satellite . uri ) if satellite == self . link_to_myself : continue if satellite . alignak_launched and satellite . address not in [ '127.0.0.1' , 'localhost' ] : logger . error ( "Alignak is required to launch a daemon for %s %s " "but the satelitte is defined on an external address: %s" , satellite . type , satellite . name , satellite . address ) result = False continue if not run_daemons : continue if not satellite . alignak_launched : logger . debug ( "Alignak will not launch '%s'" ) continue if not satellite . active : logger . warning ( "- daemon '%s' is declared but not set as active, " "do not start..." , satellite . name ) continue if satellite . name in self . my_daemons : logger . warning ( "- daemon '%s' is already running" , satellite . name ) continue started = self . start_daemon ( satellite ) result = result and started return result | Manage the list of the daemons in the configuration |
19,750 | def daemons_stop ( self , timeout = 30 , kill_children = False ) : def on_terminate ( proc ) : logger . debug ( "process %s terminated with exit code %s" , proc . pid , proc . returncode ) result = True if self . my_daemons : logger . info ( "Alignak self-launched daemons stop:" ) start = time . time ( ) for daemon in list ( self . my_daemons . values ( ) ) : procs = [ ] if kill_children : procs = daemon [ 'process' ] . children ( ) procs . append ( daemon [ 'process' ] ) for process in procs : try : logger . info ( "- terminating process %s" , process . name ( ) ) process . terminate ( ) except psutil . AccessDenied : logger . warning ( "Process %s is %s" , process . name ( ) , process . status ( ) ) procs = [ ] for daemon in list ( self . my_daemons . values ( ) ) : if kill_children : procs = daemon [ 'process' ] . children ( ) procs . append ( daemon [ 'process' ] ) _ , alive = psutil . wait_procs ( procs , timeout = timeout , callback = on_terminate ) if alive : for process in alive : logger . warning ( "Process %s did not stopped, trying to kill" , process . name ( ) ) process . kill ( ) _ , alive = psutil . wait_procs ( alive , timeout = timeout , callback = on_terminate ) if alive : for process in alive : logger . warning ( "process %s survived SIGKILL; giving up" , process . name ( ) ) result = False logger . debug ( "Stopping daemons duration: %.2f seconds" , time . time ( ) - start ) return result | Stop the Alignak daemons |
19,751 | def setup_new_conf ( self ) : super ( Arbiter , self ) . setup_new_conf ( ) with self . conf_lock : logger . info ( "I received a new configuration from my master" ) self . cur_conf = self . new_conf self_conf = self . cur_conf . get ( 'self_conf' , None ) if not self_conf : self_conf = self . conf whole_conf = self . cur_conf [ 'whole_conf' ] logger . debug ( "Received a new configuration, containing:" ) for key in self . cur_conf : logger . debug ( "- %s: %s" , key , self . cur_conf [ key ] ) logger . debug ( "satellite self configuration part: %s" , self_conf ) self . alignak_name = self . cur_conf [ 'alignak_name' ] logger . info ( "My Alignak instance: %s" , self . alignak_name ) self . new_conf = { } t00 = time . time ( ) try : received_conf_part = unserialize ( whole_conf ) except AlignakClassLookupException as exp : self . new_conf = { "_status" : "Cannot un-serialize configuration received from arbiter" } logger . error ( self . new_conf [ '_status' ] ) logger . error ( "Back trace of the error:\n%s" , traceback . format_exc ( ) ) return except Exception as exp : self . new_conf = { "_status" : "Cannot un-serialize configuration received from arbiter" } logger . error ( self . new_conf [ '_status' ] ) logger . error ( self . new_conf ) self . exit_on_exception ( exp , self . new_conf ) logger . info ( "Monitored configuration %s received at %d. Un-serialized in %d secs" , received_conf_part , t00 , time . time ( ) - t00 ) my_satellites = getattr ( self , 'arbiters' , { } ) received_satellites = self . cur_conf [ 'arbiters' ] for link_uuid in received_satellites : rs_conf = received_satellites [ link_uuid ] logger . debug ( "- received %s - %s: %s" , rs_conf [ 'instance_id' ] , rs_conf [ 'type' ] , rs_conf [ 'name' ] ) already_got = rs_conf [ 'instance_id' ] in my_satellites broks = [ ] actions = { } wait_homerun = { } external_commands = { } running_id = 0 if already_got : logger . warning ( "I already got: %s" , rs_conf [ 'instance_id' ] ) running_id = my_satellites [ link_uuid ] . running_id ( broks , actions , wait_homerun , external_commands ) = my_satellites [ link_uuid ] . get_and_clear_context ( ) del my_satellites [ link_uuid ] new_link = SatelliteLink . get_a_satellite_link ( 'arbiter' , rs_conf ) my_satellites [ new_link . uuid ] = new_link logger . info ( "I got a new arbiter satellite: %s" , new_link ) new_link . running_id = running_id new_link . external_commands = external_commands new_link . broks = broks new_link . wait_homerun = wait_homerun new_link . actions = actions self . have_conf = True | Setup a new configuration received from a Master arbiter . |
19,752 | def wait_for_master_death ( self ) : logger . info ( "Waiting for master death" ) timeout = 1.0 self . last_master_ping = time . time ( ) master_timeout = 300 for arbiter_link in self . conf . arbiters : if not arbiter_link . spare : master_timeout = arbiter_link . spare_check_interval * arbiter_link . spare_max_check_attempts logger . info ( "I'll wait master death for %d seconds" , master_timeout ) while not self . interrupted : _ , tcdiff = self . make_a_pause ( timeout ) if tcdiff : self . last_master_ping += tcdiff if self . new_conf : self . setup_new_conf ( ) sys . stdout . write ( "." ) sys . stdout . flush ( ) now = time . time ( ) if now - self . last_master_ping > master_timeout : logger . info ( "Arbiter Master is dead. The arbiter %s takes the lead!" , self . link_to_myself . name ) for arbiter_link in self . conf . arbiters : if not arbiter_link . spare : arbiter_link . alive = False self . must_run = True break | Wait for a master timeout and take the lead if necessary |
19,753 | def manage_signal ( self , sig , frame ) : if sig in [ signal . SIGINT , signal . SIGTERM ] : logger . info ( "received a signal: %s" , SIGNALS_TO_NAMES_DICT [ sig ] ) self . kill_request = True self . kill_timestamp = time . time ( ) logger . info ( "request to stop in progress" ) else : Daemon . manage_signal ( self , sig , frame ) | Manage signals caught by the process Specific behavior for the arbiter when it receives a sigkill or sigterm |
19,754 | def configuration_dispatch ( self , not_configured = None ) : if not not_configured : self . dispatcher = Dispatcher ( self . conf , self . link_to_myself ) self . cur_conf = self . conf first_connection_try_count = 0 logger . info ( "Connecting to my satellites..." ) while True : first_connection_try_count += 1 self . all_connected = True for satellite in self . dispatcher . all_daemons_links : if satellite == self . link_to_myself : continue if not satellite . active : continue connected = self . daemon_connection_init ( satellite , set_wait_new_conf = True ) logger . debug ( " %s is %s" , satellite , connected ) self . all_connected = self . all_connected and connected if self . all_connected : logger . info ( "- satellites connection #%s is ok" , first_connection_try_count ) break else : logger . warning ( "- satellites connection #%s is not correct; " "let's give another chance after %d seconds..." , first_connection_try_count , self . link_to_myself . polling_interval ) if first_connection_try_count >= 3 : self . request_stop ( "All the daemons connections could not be established " "despite %d tries! " "Sorry, I bail out!" % first_connection_try_count , exit_code = 4 ) time . sleep ( self . link_to_myself . polling_interval ) _t0 = time . time ( ) self . all_connected = self . dispatcher . check_reachable ( ) statsmgr . timer ( 'dispatcher.check-alive' , time . time ( ) - _t0 ) _t0 = time . time ( ) logger . info ( "Preparing the configuration for dispatching..." ) self . dispatcher . prepare_dispatch ( ) statsmgr . timer ( 'dispatcher.prepare-dispatch' , time . time ( ) - _t0 ) logger . info ( "- configuration is ready to dispatch" ) first_dispatch_try_count = 0 logger . info ( "Dispatching the configuration to my satellites..." ) while True : first_dispatch_try_count += 1 _t0 = time . time ( ) logger . info ( "- configuration dispatching #%s..." , first_dispatch_try_count ) self . dispatcher . check_reachable ( forced = True ) statsmgr . timer ( 'dispatcher.dispatch' , time . time ( ) - _t0 ) pause = max ( 1 , max ( self . conf . daemons_dispatch_timeout , len ( self . my_daemons ) * 0.5 ) ) logger . info ( "- pausing %d seconds..." , pause ) time . sleep ( pause ) _t0 = time . time ( ) logger . info ( "- checking configuration dispatch..." ) self . dispatcher . check_dispatch ( ) statsmgr . timer ( 'dispatcher.check-dispatch' , time . time ( ) - _t0 ) if self . dispatcher . dispatch_ok : logger . info ( "- configuration dispatching #%s is ok" , first_dispatch_try_count ) break else : logger . warning ( "- configuration dispatching #%s is not correct; " "let's give another chance..." , first_dispatch_try_count ) if first_dispatch_try_count >= 3 : self . request_stop ( "The configuration could not be dispatched despite %d tries! " "Sorry, I bail out!" % first_connection_try_count , exit_code = 4 ) | Monitored configuration preparation and dispatch |
19,755 | def do_before_loop ( self ) : logger . info ( "I am the arbiter: %s" , self . link_to_myself . name ) if not self . is_master : logger . debug ( "Waiting for my master death..." ) return if not self . daemons_start ( run_daemons = True ) : self . request_stop ( message = "Some Alignak daemons did not started correctly." , exit_code = 4 ) if not self . daemons_check ( ) : self . request_stop ( message = "Some Alignak daemons cannot be checked." , exit_code = 4 ) pause = max ( 1 , max ( self . conf . daemons_start_timeout , len ( self . my_daemons ) * 0.5 ) ) if pause : logger . info ( "Pausing %.2f seconds..." , pause ) time . sleep ( pause ) self . configuration_dispatch ( ) _t0 = time . time ( ) self . get_initial_broks_from_satellites ( ) statsmgr . timer ( 'broks.get-initial' , time . time ( ) - _t0 ) self . external_commands_manager = ExternalCommandManager ( self . conf , 'dispatcher' , self , self . conf . accept_passive_unknown_check_results , self . conf . log_external_commands ) | Called before the main daemon loop . |
19,756 | def get_monitoring_problems ( self ) : res = self . get_id ( ) res [ 'problems' ] = { } if getattr ( self , 'dispatcher' , None ) is None : return res for satellite in self . dispatcher . all_daemons_links : if satellite . type not in [ 'scheduler' ] : continue if not satellite . active : continue if satellite . statistics and 'problems' in satellite . statistics : res [ 'problems' ] [ satellite . name ] = { '_freshness' : satellite . statistics [ '_freshness' ] , 'problems' : satellite . statistics [ 'problems' ] } return res | Get the schedulers satellites problems list |
19,757 | def get_livesynthesis ( self ) : res = self . get_id ( ) res [ 'livesynthesis' ] = { '_overall' : { '_freshness' : int ( time . time ( ) ) , 'livesynthesis' : { 'hosts_total' : 0 , 'hosts_not_monitored' : 0 , 'hosts_up_hard' : 0 , 'hosts_up_soft' : 0 , 'hosts_down_hard' : 0 , 'hosts_down_soft' : 0 , 'hosts_unreachable_hard' : 0 , 'hosts_unreachable_soft' : 0 , 'hosts_problems' : 0 , 'hosts_acknowledged' : 0 , 'hosts_in_downtime' : 0 , 'hosts_flapping' : 0 , 'services_total' : 0 , 'services_not_monitored' : 0 , 'services_ok_hard' : 0 , 'services_ok_soft' : 0 , 'services_warning_hard' : 0 , 'services_warning_soft' : 0 , 'services_critical_hard' : 0 , 'services_critical_soft' : 0 , 'services_unknown_hard' : 0 , 'services_unknown_soft' : 0 , 'services_unreachable_hard' : 0 , 'services_unreachable_soft' : 0 , 'services_problems' : 0 , 'services_acknowledged' : 0 , 'services_in_downtime' : 0 , 'services_flapping' : 0 , } } } if getattr ( self , 'dispatcher' , None ) is None : return res for satellite in self . dispatcher . all_daemons_links : if satellite . type not in [ 'scheduler' ] : continue if not satellite . active : continue if 'livesynthesis' in satellite . statistics : res [ 'livesynthesis' ] [ satellite . name ] = { '_freshness' : satellite . statistics [ '_freshness' ] , 'livesynthesis' : satellite . statistics [ 'livesynthesis' ] } for prop in res [ 'livesynthesis' ] [ '_overall' ] [ 'livesynthesis' ] : if prop in satellite . statistics [ 'livesynthesis' ] : res [ 'livesynthesis' ] [ '_overall' ] [ 'livesynthesis' ] [ prop ] += satellite . statistics [ 'livesynthesis' ] [ prop ] return res | Get the schedulers satellites live synthesis |
19,758 | def overall_state_id ( self ) : overall_state = 0 if not self . monitored : overall_state = 5 elif self . acknowledged : overall_state = 1 elif self . downtimed : overall_state = 2 elif self . state_type == 'HARD' : if self . state == 'WARNING' : overall_state = 3 elif self . state == 'CRITICAL' : overall_state = 4 elif self . state == 'UNKNOWN' : overall_state = 3 elif self . state == 'UNREACHABLE' : overall_state = 4 return overall_state | Get the service overall state . |
19,759 | def fill_predictive_missing_parameters ( self ) : if self . initial_state == 'w' : self . state = u'WARNING' elif self . initial_state == 'u' : self . state = u'UNKNOWN' elif self . initial_state == 'c' : self . state = u'CRITICAL' elif self . initial_state == 'x' : self . state = u'UNREACHABLE' | define state with initial_state |
19,760 | def get_name ( self ) : if hasattr ( self , 'service_description' ) : return self . service_description if hasattr ( self , 'name' ) : return self . name return 'SERVICE-DESCRIPTION-MISSING' | Accessor to service_description attribute or name if first not defined |
19,761 | def get_groupnames ( self , sgs ) : return ',' . join ( [ sgs [ sg ] . get_name ( ) for sg in self . servicegroups ] ) | Get servicegroups list |
19,762 | def duplicate ( self , host ) : duplicates = [ ] prop = self . duplicate_foreach . strip ( ) . upper ( ) if prop not in host . customs : return duplicates entry = host . customs [ prop ] not_entry = host . customs . get ( '_' + '!' + prop [ 1 : ] , '' ) . split ( ',' ) not_keys = strip_and_uniq ( not_entry ) default_value = getattr ( self , 'default_value' , '' ) try : key_values = tuple ( generate_key_value_sequences ( entry , default_value ) ) except KeyValueSyntaxError as exc : fmt_dict = { 'prop' : self . duplicate_foreach , 'host' : host . get_name ( ) , 'svc' : self . service_description , 'entry' : entry , 'exc' : exc , } err = ( "The custom property %(prop)r of the " "host %(host)r is not a valid entry for a service generator: %(exc)s, " "with entry=%(entry)r" ) % fmt_dict logger . warning ( err ) host . add_error ( err ) return duplicates for key_value in key_values : key = key_value [ 'KEY' ] if key in not_keys : continue new_s = self . copy ( ) new_s . host_name = host . get_name ( ) if self . is_tpl ( ) : new_s . register = 1 for key in key_value : if key == 'KEY' : if hasattr ( self , 'service_description' ) : safe_key_value = re . sub ( r'[' + "`~!$%^&*\"|'<>?,()=" + ']+' , '_' , key_value [ key ] ) new_s . service_description = self . service_description . replace ( '$' + key + '$' , safe_key_value ) _the_expandables = [ 'check_command' , 'aggregation' , 'event_handler' ] for prop in _the_expandables : if hasattr ( self , prop ) : setattr ( new_s , prop , getattr ( new_s , prop ) . replace ( '$' + key + '$' , key_value [ key ] ) ) if hasattr ( self , 'service_dependencies' ) : for i , servicedep in enumerate ( new_s . service_dependencies ) : new_s . service_dependencies [ i ] = servicedep . replace ( '$' + key + '$' , key_value [ key ] ) duplicates . append ( new_s ) return duplicates | For a given host look for all copy we must create for for_each property |
19,763 | def set_state_from_exit_status ( self , status , notif_period , hosts , services ) : now = time . time ( ) cls = self . __class__ if cls . enable_problem_impacts_states_change and self . is_impact and not self . state_changed_since_impact : self . last_state = self . state_before_impact else : self . last_state = self . state if status == 0 : self . state = u'OK' self . state_id = 0 self . last_time_ok = int ( self . last_state_update ) state_code = 'o' elif status == 1 : self . state = u'WARNING' self . state_id = 1 self . last_time_warning = int ( self . last_state_update ) state_code = 'w' elif status == 2 : self . state = u'CRITICAL' self . state_id = 2 self . last_time_critical = int ( self . last_state_update ) state_code = 'c' elif status == 3 : self . state = u'UNKNOWN' self . state_id = 3 self . last_time_unknown = int ( self . last_state_update ) state_code = 'u' elif status == 4 : self . state = u'UNREACHABLE' self . state_id = 4 self . last_time_unreachable = int ( self . last_state_update ) state_code = 'x' else : self . state = u'CRITICAL' self . state_id = 2 self . last_time_critical = int ( self . last_state_update ) state_code = 'c' if state_code in self . flap_detection_options : self . add_flapping_change ( self . state != self . last_state ) self . update_flapping ( notif_period , hosts , services ) if self . state != self . last_state : self . last_state_change = self . last_state_update self . duration_sec = now - self . last_state_change | Set the state in UP WARNING CRITICAL UNKNOWN or UNREACHABLE according to the status of a check result . |
19,764 | def is_state ( self , status ) : if status == self . state : return True if status == 'o' and self . state == u'OK' : return True if status == 'c' and self . state == u'CRITICAL' : return True if status == 'w' and self . state == u'WARNING' : return True if status == 'u' and self . state == u'UNKNOWN' : return True if status == 'x' and self . state == u'UNREACHABLE' : return True return False | Return True if status match the current service status |
19,765 | def last_time_non_ok_or_up ( self ) : non_ok_times = [ x for x in [ self . last_time_warning , self . last_time_critical , self . last_time_unknown ] if x > self . last_time_ok ] if not non_ok_times : last_time_non_ok = 0 else : last_time_non_ok = min ( non_ok_times ) return last_time_non_ok | Get the last time the service was in a non - OK state |
19,766 | def get_data_for_notifications ( self , contact , notif , host_ref ) : if not host_ref : return [ self , contact , notif ] return [ host_ref , self , contact , notif ] | Get data for a notification |
19,767 | def get_short_status ( self , hosts , services ) : mapping = { 0 : "O" , 1 : "W" , 2 : "C" , 3 : "U" , 4 : "N" , } if self . got_business_rule : return mapping . get ( self . business_rule . get_state ( hosts , services ) , "n/a" ) return mapping . get ( self . state_id , "n/a" ) | Get the short status of this host |
19,768 | def add_template ( self , tpl ) : objcls = self . inner_class . my_type name = getattr ( tpl , 'name' , '' ) sdesc = getattr ( tpl , 'service_description' , '' ) hname = getattr ( tpl , 'host_name' , '' ) logger . debug ( "Adding a %s template: host_name: %s, name: %s, service_description: %s" , objcls , hname , name , sdesc ) if not name and not hname : msg = "a %s template has been defined without name nor host_name. from: %s" % ( objcls , tpl . imported_from ) tpl . add_error ( msg ) elif not name and not sdesc : msg = "a %s template has been defined without name nor service_description. from: %s" % ( objcls , tpl . imported_from ) tpl . add_error ( msg ) elif not name : setattr ( tpl , 'name' , "%s_%s" % ( hname , sdesc ) ) tpl = self . index_template ( tpl ) elif name : tpl = self . index_template ( tpl ) self . templates [ tpl . uuid ] = tpl logger . debug ( '\tAdded service template #%d %s' , len ( self . templates ) , tpl ) | Adds and index a template into the templates container . |
19,769 | def find_srvs_by_hostname ( self , host_name ) : if hasattr ( self , 'hosts' ) : host = self . hosts . find_by_name ( host_name ) if host is None : return None return host . get_services ( ) return None | Get all services from a host based on a host_name |
19,770 | def find_srv_by_name_and_hostname ( self , host_name , sdescr ) : key = ( host_name , sdescr ) return self . name_to_item . get ( key , None ) | Get a specific service based on a host_name and service_description |
19,771 | def linkify_s_by_hst ( self , hosts ) : for serv in self : if not hasattr ( serv , 'host_name' ) : serv . host = None continue try : hst_name = serv . host_name hst = hosts . find_by_name ( hst_name ) if hst is not None : serv . host = hst . uuid hst . add_service_link ( serv . uuid ) else : err = "Warning: the service '%s' got an invalid host_name '%s'" % ( serv . get_name ( ) , hst_name ) serv . configuration_warnings . append ( err ) continue except AttributeError : pass | Link services with their parent host |
19,772 | def linkify_s_by_sg ( self , servicegroups ) : for serv in self : new_servicegroups = [ ] if hasattr ( serv , 'servicegroups' ) and serv . servicegroups != '' : for sg_name in serv . servicegroups : sg_name = sg_name . strip ( ) servicegroup = servicegroups . find_by_name ( sg_name ) if servicegroup is not None : new_servicegroups . append ( servicegroup . uuid ) else : err = "Error: the servicegroup '%s' of the service '%s' is unknown" % ( sg_name , serv . get_dbg_name ( ) ) serv . add_error ( err ) serv . servicegroups = new_servicegroups | Link services with servicegroups |
19,773 | def clean ( self ) : to_del = [ ] for serv in self : if not serv . host : to_del . append ( serv . uuid ) for service_uuid in to_del : del self . items [ service_uuid ] | Remove services without host object linked to |
19,774 | def explode_services_from_hosts ( self , hosts , service , hnames ) : duplicate_for_hosts = [ ] not_hosts = [ ] for hname in hnames : hname = hname . strip ( ) if hname . startswith ( '!' ) : not_hosts . append ( hname [ 1 : ] ) else : duplicate_for_hosts . append ( hname ) duplicate_for_hosts = list ( set ( duplicate_for_hosts ) ) for hname in not_hosts : try : duplicate_for_hosts . remove ( hname ) except IndexError : pass for hname in duplicate_for_hosts : host = hosts . find_by_name ( hname ) if host is None : service . add_error ( "Error: The hostname %s is unknown for the service %s!" % ( hname , service . get_name ( ) ) ) continue if host . is_excluded_for ( service ) : continue new_s = service . copy ( ) new_s . host_name = hname self . add_item ( new_s ) | Explodes a service based on a list of hosts . |
19,775 | def _local_create_service ( self , hosts , host_name , service ) : host = hosts . find_by_name ( host_name . strip ( ) ) if host . is_excluded_for ( service ) : return None new_s = service . copy ( ) new_s . host_name = host_name new_s . register = 1 self . add_item ( new_s ) return new_s | Create a new service based on a host_name and service instance . |
19,776 | def explode_services_from_templates ( self , hosts , service_template ) : hname = getattr ( service_template , "host_name" , None ) if not hname : logger . debug ( "Service template %s is declared without an host_name" , service_template . get_name ( ) ) return logger . debug ( "Explode services %s for the host: %s" , service_template . get_name ( ) , hname ) if is_complex_expr ( hname ) : hnames = self . evaluate_hostgroup_expression ( hname . strip ( ) , hosts , hosts . templates , look_in = 'templates' ) for name in hnames : self . _local_create_service ( hosts , name , service_template ) else : hnames = [ n . strip ( ) for n in hname . split ( ',' ) if n . strip ( ) ] for hname in hnames : for name in hosts . find_hosts_that_use_template ( hname ) : self . _local_create_service ( hosts , name , service_template ) | Explodes services from templates . All hosts holding the specified templates are bound with the service . |
19,777 | def explode_services_duplicates ( self , hosts , service ) : hname = getattr ( service , "host_name" , None ) if hname is None : return host = hosts . find_by_name ( hname . strip ( ) ) if host is None : service . add_error ( 'Error: The hostname %s is unknown for the service %s!' % ( hname , service . get_name ( ) ) ) return for new_s in service . duplicate ( host ) : if host . is_excluded_for ( new_s ) : continue self . add_item ( new_s ) | Explodes services holding a duplicate_foreach clause . |
19,778 | def register_service_into_servicegroups ( service , servicegroups ) : if hasattr ( service , 'service_description' ) : sname = service . service_description shname = getattr ( service , 'host_name' , '' ) if hasattr ( service , 'servicegroups' ) : if isinstance ( service . servicegroups , list ) : sgs = service . servicegroups else : sgs = service . servicegroups . split ( ',' ) for servicegroup in sgs : servicegroups . add_member ( [ shname , sname ] , servicegroup . strip ( ) ) | Registers a service into the service groups declared in its servicegroups attribute . |
19,779 | def register_service_dependencies ( service , servicedependencies ) : sdeps = [ d . strip ( ) for d in getattr ( service , "service_dependencies" , [ ] ) ] i = 0 hname = '' for elt in sdeps : if i % 2 == 0 : hname = elt else : desc = elt if hasattr ( service , 'service_description' ) and hasattr ( service , 'host_name' ) : if hname == '' : hname = service . host_name servicedependencies . add_service_dependency ( service . host_name , service . service_description , hname , desc ) i += 1 | Registers a service dependencies . |
19,780 | def explode ( self , hosts , hostgroups , contactgroups , servicegroups , servicedependencies ) : itemkeys = list ( self . items . keys ( ) ) for s_id in itemkeys : serv = self . items [ s_id ] self . explode_host_groups_into_hosts ( serv , hosts , hostgroups ) self . explode_contact_groups_into_contacts ( serv , contactgroups ) hnames = getattr ( serv , "host_name" , '' ) hnames = list ( set ( [ n . strip ( ) for n in hnames . split ( ',' ) if n . strip ( ) ] ) ) if len ( hnames ) == 1 : self . index_item ( serv ) else : if len ( hnames ) >= 2 : self . explode_services_from_hosts ( hosts , serv , hnames ) self . remove_item ( serv ) for s_id in self . templates : template = self . templates [ s_id ] self . explode_contact_groups_into_contacts ( template , contactgroups ) self . explode_services_from_templates ( hosts , template ) duplicates = [ serv . uuid for serv in self if getattr ( serv , 'duplicate_foreach' , '' ) ] for s_id in duplicates : serv = self . items [ s_id ] self . explode_services_duplicates ( hosts , serv ) if not serv . configuration_errors : self . remove_item ( serv ) to_remove = [ ] for service in self : host = hosts . find_by_name ( service . host_name ) if host and host . is_excluded_for ( service ) : to_remove . append ( service ) for service in to_remove : self . remove_item ( service ) for serv in self : self . register_service_into_servicegroups ( serv , servicegroups ) self . register_service_dependencies ( serv , servicedependencies ) | Explodes services from host hostgroups contactgroups servicegroups and dependencies . |
19,781 | def get_next_notif_time ( self , t_wished , status , creation_time , interval , escal_period ) : short_states = { u'WARNING' : 'w' , u'UNKNOWN' : 'u' , u'CRITICAL' : 'c' , u'RECOVERY' : 'r' , u'FLAPPING' : 'f' , u'DOWNTIME' : 's' , u'DOWN' : 'd' , u'UNREACHABLE' : 'u' , u'OK' : 'o' , u'UP' : 'o' } if not self . time_based : return None if status in short_states and short_states [ status ] not in self . escalation_options : return None start = self . first_notification_time * interval + creation_time if start > t_wished : return None if escal_period is not None and not escal_period . is_time_valid ( start ) : return None return start | Get the next notification time for the escalation Only legit for time based escalation |
19,782 | def linkify_es_by_s ( self , services ) : for escalation in self : if not hasattr ( escalation , 'host_name' ) : continue es_hname , sdesc = escalation . host_name , escalation . service_description if not es_hname . strip ( ) or not sdesc . strip ( ) : continue for hname in strip_and_uniq ( es_hname . split ( ',' ) ) : if sdesc . strip ( ) == '*' : slist = services . find_srvs_by_hostname ( hname ) if slist is not None : slist = [ services [ serv ] for serv in slist ] for serv in slist : serv . escalations . append ( escalation . uuid ) else : for sname in strip_and_uniq ( sdesc . split ( ',' ) ) : serv = services . find_srv_by_name_and_hostname ( hname , sname ) if serv is not None : serv . escalations . append ( escalation . uuid ) | Add each escalation object into service . escalation attribute |
19,783 | def linkify_es_by_h ( self , hosts ) : for escal in self : if ( not hasattr ( escal , 'host_name' ) or escal . host_name . strip ( ) == '' or ( hasattr ( escal , 'service_description' ) and escal . service_description . strip ( ) != '' ) ) : continue for hname in strip_and_uniq ( escal . host_name . split ( ',' ) ) : host = hosts . find_by_name ( hname ) if host is not None : host . escalations . append ( escal . uuid ) | Add each escalation object into host . escalation attribute |
19,784 | def explode ( self , hosts , hostgroups , contactgroups ) : for i in self : self . explode_host_groups_into_hosts ( i , hosts , hostgroups ) self . explode_contact_groups_into_contacts ( i , contactgroups ) | Loop over all escalation and explode hostsgroups in host and contactgroups in contacts |
19,785 | def get_hosts_by_explosion ( self , hostgroups ) : self . already_exploded = True if self . rec_tag : logger . error ( "[hostgroup::%s] got a loop in hostgroup definition" , self . get_name ( ) ) return self . get_hosts ( ) self . rec_tag = True hg_mbrs = self . get_hostgroup_members ( ) for hg_mbr in hg_mbrs : hostgroup = hostgroups . find_by_name ( hg_mbr . strip ( ) ) if hostgroup is not None : value = hostgroup . get_hosts_by_explosion ( hostgroups ) if value is not None : self . add_members ( value ) return self . get_hosts ( ) | Get hosts of this group |
19,786 | def add_member ( self , host_name , hostgroup_name ) : hostgroup = self . find_by_name ( hostgroup_name ) if not hostgroup : hostgroup = Hostgroup ( { 'hostgroup_name' : hostgroup_name , 'alias' : hostgroup_name , 'members' : host_name } ) self . add ( hostgroup ) else : hostgroup . add_members ( host_name ) | Add a host string to a hostgroup member if the host group do not exist create it |
19,787 | def linkify ( self , hosts = None , realms = None , forced_realms_hostgroups = True ) : self . linkify_hostgroups_hosts ( hosts ) self . linkify_hostgroups_realms_hosts ( realms , hosts , forced_realms_hostgroups ) | Link hostgroups with hosts and realms |
19,788 | def linkify_hostgroups_hosts ( self , hosts ) : for hostgroup in self : members = hostgroup . get_hosts ( ) new_members = [ ] for member in members : member = member . strip ( ) if not member : continue if member == '*' : new_members . extend ( list ( hosts . items . keys ( ) ) ) else : host = hosts . find_by_name ( member ) if host is not None : new_members . append ( host . uuid ) if hostgroup . uuid not in host . hostgroups : host . hostgroups . append ( hostgroup . uuid ) else : hostgroup . add_unknown_members ( member ) new_members = list ( set ( new_members ) ) hostgroup . replace_members ( new_members ) | We just search for each hostgroup the id of the hosts and replace the names by the found identifiers |
19,789 | def explode ( self ) : for tmp_hg in list ( self . items . values ( ) ) : tmp_hg . already_exploded = False for hostgroup in list ( self . items . values ( ) ) : if hostgroup . already_exploded : continue for tmp_hg in list ( self . items . values ( ) ) : tmp_hg . rec_tag = False hostgroup . get_hosts_by_explosion ( self ) for tmp_hg in list ( self . items . values ( ) ) : if hasattr ( tmp_hg , 'rec_tag' ) : del tmp_hg . rec_tag del tmp_hg . already_exploded | Fill members with hostgroup_members |
19,790 | def run ( self ) : def _started_callback ( ) : cherrypy . log ( "CherryPy engine started and listening..." ) self . cherrypy_thread = None try : cherrypy . log ( "Starting CherryPy engine on %s" % ( self . uri ) ) self . cherrypy_thread = cherrypy . engine . start_with_callback ( _started_callback ) cherrypy . engine . block ( ) cherrypy . log ( "Exited from the engine block" ) except socket . error as exp : raise PortNotFree ( "Error: Sorry, the HTTP server did not started correctly: error: %s" % ( str ( exp ) ) ) | Wrapper to start the CherryPy server |
19,791 | def stop ( self ) : cherrypy . log ( "Stopping CherryPy engine (current state: %s)..." % cherrypy . engine . state ) try : cherrypy . engine . exit ( ) except RuntimeWarning : pass except SystemExit : cherrypy . log ( 'SystemExit raised: shutting down bus' ) cherrypy . log ( "Stopped" ) | Wrapper to stop the CherryPy server |
19,792 | def create_queues ( self , manager = None ) : self . clear_queues ( manager ) if not manager : self . from_q = Queue ( ) self . to_q = Queue ( ) else : self . from_q = manager . Queue ( ) self . to_q = manager . Queue ( ) | Create the shared queues that will be used by alignak daemon process and this module process . But clear queues if they were already set before recreating new one . |
19,793 | def clear_queues ( self , manager ) : for queue in ( self . to_q , self . from_q ) : if queue is None : continue if not manager : try : queue . close ( ) queue . join_thread ( ) except AttributeError : pass self . to_q = self . from_q = None | Release the resources associated to the queues of this instance |
19,794 | def start_module ( self ) : try : self . _main ( ) except Exception as exp : logger . exception ( '%s' , traceback . format_exc ( ) ) raise Exception ( exp ) | Wrapper for _main function . Catch and raise any exception occurring in the main function |
19,795 | def start ( self , http_daemon = None ) : if not self . is_external : return if self . process : self . stop_process ( ) logger . info ( "Starting external process for module %s..." , self . name ) proc = Process ( target = self . start_module , args = ( ) , group = None ) try : del self . properties [ 'process' ] except KeyError : pass proc . start ( ) self . process = proc self . properties [ 'process' ] = proc logger . info ( "%s is now started (pid=%d)" , self . name , proc . pid ) | Actually restart the process if the module is external Try first to stop the process and create a new Process instance with target start_module . Finally start process . |
19,796 | def stop_process ( self ) : if not self . process : return logger . info ( "I'm stopping module %r (pid=%d)" , self . name , self . process . pid ) self . kill ( ) self . process = None | Request the module process to stop and release it |
19,797 | def manage_brok ( self , brok ) : manage = getattr ( self , 'manage_' + brok . type + '_brok' , None ) if not manage : return False brok . prepare ( ) return manage ( brok ) | Request the module to manage the given brok . There are a lot of different possible broks to manage . The list is defined in the Brok class . |
19,798 | def manage_signal ( self , sig , frame ) : logger . info ( "received a signal: %s" , SIGNALS_TO_NAMES_DICT [ sig ] ) if sig == signal . SIGHUP : logger . info ( "Modules are not able to reload their configuration. " "Stopping the module..." ) logger . info ( "Request to stop the module" ) self . interrupted = True | Generic function to handle signals |
19,799 | def _main ( self ) : self . set_proctitle ( self . name ) self . set_signal_handler ( ) logger . info ( "process for module %s is now running (pid=%d)" , self . name , os . getpid ( ) ) try : self . main ( ) except ( IOError , EOFError ) : pass except Exception as exp : logger . exception ( 'main function exception: %s' , exp ) self . do_stop ( ) logger . info ( "process for module %s is now exiting (pid=%d)" , self . name , os . getpid ( ) ) exit ( ) | module main method . Only used by external modules . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.