idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
19,500 | def set_log_level ( self , log_level = None ) : if log_level is None : log_level = cherrypy . request . json [ 'log_level' ] if log_level not in [ 'DEBUG' , 'INFO' , 'WARNING' , 'ERROR' , 'CRITICAL' ] : return { '_status' : u'ERR' , '_message' : u"Required log level is not allowed: %s" % log_level } alignak_logger = logging . getLogger ( ALIGNAK_LOGGER_NAME ) alignak_logger . setLevel ( log_level ) return self . get_log_level ( ) | Set the current log level for the daemon |
19,501 | def stats ( self , details = False ) : if details is not False : details = bool ( details ) res = self . identity ( ) res . update ( self . app . get_daemon_stats ( details = details ) ) return res | Get statistics and information from the daemon |
19,502 | def _have_conf ( self , magic_hash = None ) : self . app . have_conf = getattr ( self . app , 'cur_conf' , None ) not in [ None , { } ] if magic_hash is not None : magic_hash = int ( magic_hash ) return self . app . have_conf and self . app . cur_conf . magic_hash == magic_hash return self . app . have_conf | Get the daemon current configuration state |
19,503 | def _results ( self , scheduler_instance_id ) : with self . app . lock : res = self . app . get_results_from_passive ( scheduler_instance_id ) return serialize ( res , True ) | Get the results of the executed actions for the scheduler which instance id is provided |
19,504 | def _broks ( self , broker_name ) : with self . app . broks_lock : res = self . app . get_broks ( ) return serialize ( res , True ) | Get the broks from the daemon |
19,505 | def _events ( self ) : with self . app . events_lock : res = self . app . get_events ( ) return serialize ( res , True ) | Get the monitoring events from the daemon |
19,506 | def get_state ( self , hosts , services ) : if self . operand == 'host' : host = hosts [ self . sons [ 0 ] ] return self . get_host_node_state ( host . last_hard_state_id , host . problem_has_been_acknowledged , host . in_scheduled_downtime ) if self . operand == 'service' : service = services [ self . sons [ 0 ] ] return self . get_service_node_state ( service . last_hard_state_id , service . problem_has_been_acknowledged , service . in_scheduled_downtime ) if self . operand == '|' : return self . get_complex_or_node_state ( hosts , services ) if self . operand == '&' : return self . get_complex_and_node_state ( hosts , services ) if self . operand == 'of:' : return self . get_complex_xof_node_state ( hosts , services ) return 4 | Get node state by looking recursively over sons and applying operand |
19,507 | def eval_cor_pattern ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : pattern = pattern . strip ( ) complex_node = False for char in '()&|' : if char in pattern : complex_node = True if complex_node is False : return self . eval_simple_cor_pattern ( pattern , hosts , services , hostgroups , servicegroups , running ) return self . eval_complex_cor_pattern ( pattern , hosts , services , hostgroups , servicegroups , running ) | Parse and build recursively a tree of DependencyNode from pattern |
19,508 | def eval_complex_cor_pattern ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : node = DependencyNode ( ) pattern = self . eval_xof_pattern ( node , pattern ) in_par = False tmp = '' son_is_not = False stacked_parenthesis = 0 for char in pattern : if char == '(' : stacked_parenthesis += 1 in_par = True tmp = tmp . strip ( ) if stacked_parenthesis == 1 and tmp != '' : print ( "ERROR : bad expression near" , tmp ) continue if stacked_parenthesis > 1 : tmp += char elif char == ')' : stacked_parenthesis -= 1 if stacked_parenthesis < 0 : print ( "Error : bad expression near" , tmp , "too much ')'" ) continue if stacked_parenthesis == 0 : tmp = tmp . strip ( ) son = self . eval_cor_pattern ( tmp , hosts , services , hostgroups , servicegroups , running ) if son_is_not : son . not_value = True son_is_not = False node . sons . append ( son ) in_par = False tmp = '' continue tmp += char elif in_par : tmp += char elif char == '!' : tmp = tmp . strip ( ) if tmp and tmp [ 0 ] != '!' : print ( "Error : bad expression near" , tmp , "wrong position for '!'" ) continue son_is_not = True elif char in [ '&' , '|' ] : tmp = tmp . strip ( ) if node . operand is not None and node . operand != 'of:' and char != node . operand : return None if node . operand != 'of:' : node . operand = char if tmp != '' : son = self . eval_cor_pattern ( tmp , hosts , services , hostgroups , servicegroups , running ) if son_is_not : son . not_value = True son_is_not = False node . sons . append ( son ) tmp = '' else : tmp += char tmp = tmp . strip ( ) if tmp != '' : son = self . eval_cor_pattern ( tmp , hosts , services , hostgroups , servicegroups , running ) if son_is_not : son . not_value = True son_is_not = False node . sons . append ( son ) node . switch_zeros_of_values ( ) return node | Parse and build recursively a tree of DependencyNode from a complex pattern |
19,509 | def eval_simple_cor_pattern ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : node = DependencyNode ( ) pattern = self . eval_xof_pattern ( node , pattern ) if pattern . startswith ( '!' ) : node . not_value = True pattern = pattern [ 1 : ] if re . search ( r"^([%s]+|\*):" % self . host_flags , pattern ) or re . search ( r",\s*([%s]+:.*|\*)$" % self . service_flags , pattern ) : son = self . expand_expression ( pattern , hosts , services , hostgroups , servicegroups , running ) if node . operand != 'of:' : node . operand = '&' node . sons . extend ( son . sons ) node . configuration_errors . extend ( son . configuration_errors ) node . switch_zeros_of_values ( ) else : node . operand = 'object' obj , error = self . find_object ( pattern , hosts , services ) if obj is not None : node . operand = obj . __class__ . my_type node . sons . append ( obj . uuid ) else : if running is False : node . configuration_errors . append ( error ) else : raise Exception ( error ) return node | Parse and build recursively a tree of DependencyNode from a simple pattern |
19,510 | def find_object ( self , pattern , hosts , services ) : obj = None error = None is_service = False elts = pattern . split ( ',' ) host_name = elts [ 0 ] . strip ( ) if not host_name : host_name = self . bound_item . host_name if len ( elts ) > 1 : is_service = True service_description = elts [ 1 ] . strip ( ) if is_service : obj = services . find_srv_by_name_and_hostname ( host_name , service_description ) if not obj : error = "Business rule uses unknown service %s/%s" % ( host_name , service_description ) else : obj = hosts . find_by_name ( host_name ) if not obj : error = "Business rule uses unknown host %s" % ( host_name , ) return obj , error | Find object from pattern |
19,511 | def is_time_valid ( self , timestamp ) : if hasattr ( self , 'exclude' ) : for daterange in self . exclude : if daterange . is_time_valid ( timestamp ) : return False for daterange in self . dateranges : if daterange . is_time_valid ( timestamp ) : return True return False | Check if a time is valid or not |
19,512 | def get_min_from_t ( self , timestamp ) : mins_incl = [ ] for daterange in self . dateranges : mins_incl . append ( daterange . get_min_from_t ( timestamp ) ) return min ( mins_incl ) | Get the first time > timestamp which is valid |
19,513 | def clean_cache ( self ) : now = int ( time . time ( ) ) t_to_del = [ ] for timestamp in self . cache : if timestamp < now : t_to_del . append ( timestamp ) for timestamp in t_to_del : del self . cache [ timestamp ] t_to_del = [ ] for timestamp in self . invalid_cache : if timestamp < now : t_to_del . append ( timestamp ) for timestamp in t_to_del : del self . invalid_cache [ timestamp ] | Clean cache with entries older than now because not used in future ; ) |
19,514 | def get_next_valid_time_from_t ( self , timestamp ) : timestamp = int ( timestamp ) original_t = timestamp res_from_cache = self . find_next_valid_time_from_cache ( timestamp ) if res_from_cache is not None : return res_from_cache still_loop = True while still_loop : local_min = None dr_mins = [ ] for daterange in self . dateranges : dr_mins . append ( daterange . get_next_valid_time_from_t ( timestamp ) ) s_dr_mins = sorted ( [ d for d in dr_mins if d is not None ] ) for t01 in s_dr_mins : if not self . exclude and still_loop : local_min = t01 still_loop = False else : for timeperiod in self . exclude : if not timeperiod . is_time_valid ( t01 ) and still_loop : local_min = t01 still_loop = False if local_min is None : exc_mins = [ ] if s_dr_mins != [ ] : for timeperiod in self . exclude : exc_mins . append ( timeperiod . get_next_invalid_time_from_t ( s_dr_mins [ 0 ] ) ) s_exc_mins = sorted ( [ d for d in exc_mins if d is not None ] ) if s_exc_mins != [ ] : local_min = s_exc_mins [ 0 ] if local_min is None : still_loop = False else : timestamp = local_min if timestamp > original_t + 3600 * 24 * 366 + 1 : still_loop = False local_min = None self . cache [ original_t ] = local_min return local_min | Get next valid time . If it s in cache get it otherwise define it . The limit to find it is 1 year . |
19,515 | def get_next_invalid_time_from_t ( self , timestamp ) : timestamp = int ( timestamp ) original_t = timestamp dr_mins = [ ] for daterange in self . dateranges : timestamp = original_t cont = True while cont : start = daterange . get_next_valid_time_from_t ( timestamp ) if start is not None : end = daterange . get_next_invalid_time_from_t ( start ) dr_mins . append ( ( start , end ) ) timestamp = end else : cont = False if timestamp > original_t + ( 3600 * 24 * 365 ) : cont = False periods = merge_periods ( dr_mins ) dr_mins = [ ] for exclude in self . exclude : for daterange in exclude . dateranges : timestamp = original_t cont = True while cont : start = daterange . get_next_valid_time_from_t ( timestamp ) if start is not None : end = daterange . get_next_invalid_time_from_t ( start ) dr_mins . append ( ( start , end ) ) timestamp = end else : cont = False if timestamp > original_t + ( 3600 * 24 * 365 ) : cont = False if not dr_mins : periods_exclude = [ ] else : periods_exclude = merge_periods ( dr_mins ) if len ( periods ) >= 1 : if periods [ 0 ] [ 0 ] > original_t : return original_t if len ( periods_exclude ) >= 1 : if periods_exclude [ 0 ] [ 0 ] < periods [ 0 ] [ 1 ] : return periods_exclude [ 0 ] [ 0 ] return periods [ 0 ] [ 1 ] return original_t | Get the next invalid time |
19,516 | def explode ( self ) : for entry in self . unresolved : self . resolve_daterange ( self . dateranges , entry ) self . unresolved = [ ] | Try to resolve all unresolved elements |
19,517 | def linkify ( self , timeperiods ) : new_exclude = [ ] if hasattr ( self , 'exclude' ) and self . exclude != [ ] : logger . debug ( "[timeentry::%s] have excluded %s" , self . get_name ( ) , self . exclude ) excluded_tps = self . exclude for tp_name in excluded_tps : timepriod = timeperiods . find_by_name ( tp_name . strip ( ) ) if timepriod is not None : new_exclude . append ( timepriod . uuid ) else : msg = "[timeentry::%s] unknown %s timeperiod" % ( self . get_name ( ) , tp_name ) self . add_error ( msg ) self . exclude = new_exclude | Will make timeperiod in exclude with id of the timeperiods |
19,518 | def check_exclude_rec ( self ) : if self . rec_tag : msg = "[timeentry::%s] is in a loop in exclude parameter" % ( self . get_name ( ) ) self . add_error ( msg ) return False self . rec_tag = True for timeperiod in self . exclude : timeperiod . check_exclude_rec ( ) return True | Check if this timeperiod is tagged |
19,519 | def explode ( self ) : for t_id in self . items : timeperiod = self . items [ t_id ] timeperiod . explode ( ) | Try to resolve each timeperiod |
19,520 | def linkify ( self ) : for t_id in self . items : timeperiod = self . items [ t_id ] timeperiod . linkify ( self ) | Check exclusion for each timeperiod |
19,521 | def apply_inheritance ( self ) : self . apply_partial_inheritance ( 'exclude' ) for i in self : self . get_customs_properties_by_inheritance ( i ) for timeperiod in self : self . get_unresolved_properties_by_inheritance ( timeperiod ) | The only interesting property to inherit is exclude |
19,522 | def is_correct ( self ) : valid = True for timeperiod in list ( self . items . values ( ) ) : timeperiod . rec_tag = False for timeperiod in list ( self . items . values ( ) ) : for tmp_tp in list ( self . items . values ( ) ) : tmp_tp . rec_tag = False valid = timeperiod . check_exclude_rec ( ) and valid for timeperiod in list ( self . items . values ( ) ) : del timeperiod . rec_tag if not timeperiod . is_correct ( ) : valid = False source = getattr ( timeperiod , 'imported_from' , "unknown source" ) msg = "Configuration in %s::%s is incorrect; from: %s" % ( timeperiod . my_type , timeperiod . get_name ( ) , source ) self . add_error ( msg ) self . configuration_errors += timeperiod . configuration_errors self . configuration_warnings += timeperiod . configuration_warnings for timeperiod in self : valid = timeperiod . is_correct ( ) and valid return valid | check if each properties of timeperiods are valid |
19,523 | def check_status_and_get_events ( self ) : statistics = { } events = [ ] for daemon_link in self . all_daemons_links : if daemon_link == self . arbiter_link : continue if not daemon_link . active : continue try : daemon_link . statistics = daemon_link . get_daemon_stats ( details = False ) if daemon_link . statistics : daemon_link . statistics [ '_freshness' ] = int ( time . time ( ) ) statistics [ daemon_link . name ] = daemon_link . statistics logger . debug ( "Daemon %s statistics: %s" , daemon_link . name , daemon_link . statistics ) except LinkError : logger . warning ( "Daemon connection failed, I could not get statistics." ) try : got = daemon_link . get_events ( ) if got : events . extend ( got ) logger . debug ( "Daemon %s has %d events: %s" , daemon_link . name , len ( got ) , got ) except LinkError : logger . warning ( "Daemon connection failed, I could not get events." ) return events | Get all the daemons status |
19,524 | def get_scheduler_ordered_list ( self , realm ) : scheduler_links = [ ] for scheduler_link_uuid in realm . schedulers : scheduler_links . append ( self . schedulers [ scheduler_link_uuid ] ) alive = [ ] spare = [ ] deads = [ ] for sdata in scheduler_links : if sdata . alive and not sdata . spare : alive . append ( sdata ) elif sdata . alive and sdata . spare : spare . append ( sdata ) else : deads . append ( sdata ) scheduler_links = [ ] scheduler_links . extend ( alive ) scheduler_links . extend ( spare ) scheduler_links . extend ( deads ) scheduler_links . reverse ( ) return scheduler_links | Get sorted scheduler list for a specific realm |
19,525 | def dispatch ( self , test = False ) : if not self . new_to_dispatch : raise DispatcherError ( "Dispatcher cannot dispatch, " "because no configuration is prepared!" ) if self . first_dispatch_done : raise DispatcherError ( "Dispatcher cannot dispatch, " "because the configuration is still dispatched!" ) if self . dispatch_ok : logger . info ( "Dispatching is already done and ok..." ) return logger . info ( "Trying to send configuration to the satellites..." ) self . dispatch_ok = True for link in self . arbiters : if link == self . arbiter_link : continue if not link . active : continue if not link . spare : continue if link . configuration_sent : logger . debug ( "Arbiter %s already sent!" , link . name ) continue if not link . reachable : logger . debug ( "Arbiter %s is not reachable to receive its configuration" , link . name ) continue logger . info ( "Sending configuration to the arbiter %s" , link . name ) logger . debug ( "- %s" , link . cfg ) link . put_conf ( link . cfg , test = test ) link . configuration_sent = True logger . info ( "- sent" ) link . do_not_run ( ) for link in self . schedulers : if link . configuration_sent : logger . debug ( "Scheduler %s already sent!" , link . name ) continue if not link . active : continue if not link . reachable : logger . debug ( "Scheduler %s is not reachable to receive its configuration" , link . name ) continue logger . info ( "Sending configuration to the scheduler %s" , link . name ) logger . debug ( "- %s" , link . cfg ) link . put_conf ( link . cfg , test = test ) link . configuration_sent = True logger . info ( "- sent" ) for link in self . satellites : if link . configuration_sent : logger . debug ( "%s %s already sent!" , link . type , link . name ) continue if not link . active : continue if not link . reachable : logger . warning ( "%s %s is not reachable to receive its configuration" , link . type , link . name ) continue logger . info ( "Sending configuration to the %s %s" , link . type , link . name ) logger . debug ( "- %s" , link . cfg ) link . put_conf ( link . cfg , test = test ) link . configuration_sent = True logger . info ( "- sent" ) if self . dispatch_ok : self . new_to_dispatch = False self . first_dispatch_done = True | Send configuration to satellites |
19,526 | def stop_request ( self , stop_now = False ) : all_ok = True for daemon_link in self . all_daemons_links : logger . debug ( "Stopping: %s (%s)" , daemon_link , stop_now ) if daemon_link == self . arbiter_link : continue if not daemon_link . active : continue try : stop_ok = daemon_link . stop_request ( stop_now = stop_now ) except LinkError : stop_ok = True logger . warning ( "Daemon stop request failed, %s probably stopped!" , daemon_link ) all_ok = all_ok and stop_ok daemon_link . stopping = True self . stop_request_sent = all_ok return self . stop_request_sent | Send a stop request to all the daemons |
19,527 | def pythonize ( self , val ) : __boolean_states__ = { '1' : True , 'yes' : True , 'true' : True , 'on' : True , '0' : False , 'no' : False , 'false' : False , 'off' : False } if isinstance ( val , bool ) : return val val = unique_value ( val ) . lower ( ) if val in list ( __boolean_states__ . keys ( ) ) : return __boolean_states__ [ val ] raise PythonizeError ( "Cannot convert '%s' to a boolean value" % val ) | Convert value into a boolean |
19,528 | def pythonize ( self , val ) : if isinstance ( val , list ) and len ( set ( val ) ) == 1 : return val [ 0 ] return val | If value is a single list element just return the element does nothing otherwise |
19,529 | def login ( self , username , password ) : logger . debug ( "login for: %s" , username ) if not username and not password : self . set_token ( token = None ) return False if not username or not password : logger . error ( "Username or password cannot be None!" ) self . set_token ( token = None ) return False endpoint = 'login' json = { 'username' : username , 'password' : password } response = self . get_response ( method = 'POST' , endpoint = endpoint , json = json ) if response . status_code == 401 : logger . error ( "Access denied to %s" , self . url_endpoint_root ) self . set_token ( token = None ) return False resp = self . decode ( response = response ) if 'token' in resp : self . set_token ( token = resp [ 'token' ] ) return True return False | Log into the WS interface and get the authentication token |
19,530 | def logout ( self ) : logger . debug ( "request backend logout" ) if not self . authenticated : logger . warning ( "Unnecessary logout ..." ) return True endpoint = 'logout' _ = self . get_response ( method = 'POST' , endpoint = endpoint ) self . session . close ( ) self . set_token ( token = None ) return True | Logout from the backend |
19,531 | def get ( self , endpoint , params = None ) : response = self . get_response ( method = 'GET' , endpoint = endpoint , params = params ) resp = self . decode ( response = response ) if '_status' not in resp : resp [ '_status' ] = u'OK' return resp | Get items or item in alignak backend |
19,532 | def post ( self , endpoint , data , files = None , headers = None ) : response = self . get_response ( method = 'POST' , endpoint = endpoint , json = data , headers = headers ) resp = self . decode ( response = response ) return resp | Create a new item |
19,533 | def patch ( self , endpoint , data ) : response = self . get_response ( method = 'PATCH' , endpoint = endpoint , json = data , headers = { 'Content-Type' : 'application/json' } ) if response . status_code == 200 : return self . decode ( response = response ) return response | Method to update an item |
19,534 | def init ( self , conf ) : self . my_conf = conf self . lists_on_demand = [ ] self . hosts = self . my_conf . hosts self . host_class = self . hosts . inner_class self . lists_on_demand . append ( self . hosts ) self . services = self . my_conf . services self . contacts = self . my_conf . contacts self . lists_on_demand . append ( self . contacts ) self . hostgroups = self . my_conf . hostgroups self . lists_on_demand . append ( self . hostgroups ) self . commands = self . my_conf . commands self . servicegroups = self . my_conf . servicegroups self . lists_on_demand . append ( self . servicegroups ) self . contactgroups = self . my_conf . contactgroups self . lists_on_demand . append ( self . contactgroups ) self . illegal_macro_output_chars = self . my_conf . illegal_macro_output_chars self . env_prefix = self . my_conf . env_variables_prefix | Initialize MacroResolver instance with conf . Must be called at least once . |
19,535 | def _get_value_from_element ( self , elt , prop ) : args = None if isinstance ( prop , tuple ) : prop , args = prop value = getattr ( elt , prop , None ) if value is None : return 'n/a' try : if isinstance ( value , list ) : return "[%s]" % ',' . join ( value ) if not isinstance ( value , collections . Callable ) : return value if not args : return value ( ) real_args = [ ] for arg in args : real_args . append ( getattr ( self , arg , None ) ) return value ( * real_args ) except AttributeError : return 'n/a' except UnicodeError : if isinstance ( value , string_types ) : return str ( value , 'utf8' , errors = 'ignore' ) return 'n/a' | Get value from an element s property . |
19,536 | def _delete_unwanted_caracters ( self , chain ) : try : chain = chain . decode ( 'utf8' , 'replace' ) except UnicodeEncodeError : pass except AttributeError : pass for char in self . illegal_macro_output_chars : chain = chain . replace ( char , '' ) return chain | Remove not wanted char from chain unwanted char are illegal_macro_output_chars attribute |
19,537 | def resolve_command ( self , com , data , macromodulations , timeperiods ) : logger . debug ( "Resolving: macros in: %s, arguments: %s" , com . command . command_line , com . args ) return self . resolve_simple_macros_in_string ( com . command . command_line , data , macromodulations , timeperiods , args = com . args ) | Resolve command macros with data |
19,538 | def _get_type_of_macro ( macros , objs ) : r for macro in macros : if re . match ( r'ARG\d' , macro ) : macros [ macro ] [ 'type' ] = 'ARGN' continue elif re . match ( r'_HOST\w' , macro ) : macros [ macro ] [ 'type' ] = 'CUSTOM' macros [ macro ] [ 'class' ] = 'HOST' continue elif re . match ( r'_SERVICE\w' , macro ) : macros [ macro ] [ 'type' ] = 'CUSTOM' macros [ macro ] [ 'class' ] = 'SERVICE' continue elif re . match ( r'_CONTACT\w' , macro ) : macros [ macro ] [ 'type' ] = 'CUSTOM' macros [ macro ] [ 'class' ] = 'CONTACT' continue elif len ( macro . split ( ':' ) ) > 1 : macros [ macro ] [ 'type' ] = 'ONDEMAND' continue for obj in objs : if macro in obj . macros : macros [ macro ] [ 'type' ] = 'object' macros [ macro ] [ 'object' ] = obj continue | r Set macros types |
19,539 | def _resolve_ondemand ( self , macro , data ) : elts = macro . split ( ':' ) nb_parts = len ( elts ) macro_name = elts [ 0 ] if nb_parts == 3 : val = '' ( host_name , service_description ) = ( elts [ 1 ] , elts [ 2 ] ) if host_name == '' : for elt in data : if elt is not None and elt . __class__ == self . host_class : host_name = elt . host_name serv = self . services . find_srv_by_name_and_hostname ( host_name , service_description ) if serv is not None : cls = serv . __class__ prop = cls . macros [ macro_name ] val = self . _get_value_from_element ( serv , prop ) return val else : val = '' elt_name = elts [ 1 ] if elt_name == '' : for elt in data : if elt is not None and elt . __class__ == self . host_class : elt_name = elt . host_name for od_list in self . lists_on_demand : cls = od_list . inner_class if macro_name in cls . macros : prop = cls . macros [ macro_name ] i = od_list . find_by_name ( elt_name ) if i is not None : val = self . _get_value_from_element ( i , prop ) break return val return 'n/a' | Get on demand macro value |
19,540 | def _tot_hosts_by_state ( self , state = None , state_type = None ) : if state is None and state_type is None : return len ( self . hosts ) if state_type : return sum ( 1 for h in self . hosts if h . state == state and h . state_type == state_type ) return sum ( 1 for h in self . hosts if h . state == state ) | Generic function to get the number of host in the specified state |
19,541 | def _tot_unhandled_hosts_by_state ( self , state ) : return sum ( 1 for h in self . hosts if h . state == state and h . state_type == u'HARD' and h . is_problem and not h . problem_has_been_acknowledged ) | Generic function to get the number of unhandled problem hosts in the specified state |
19,542 | def _tot_services_by_state ( self , state = None , state_type = None ) : if state is None and state_type is None : return len ( self . services ) if state_type : return sum ( 1 for s in self . services if s . state == state and s . state_type == state_type ) return sum ( 1 for s in self . services if s . state == state ) | Generic function to get the number of services in the specified state |
19,543 | def _tot_unhandled_services_by_state ( self , state ) : return sum ( 1 for s in self . services if s . state == state and s . is_problem and not s . problem_has_been_acknowledged ) | Generic function to get the number of unhandled problem services in the specified state |
19,544 | def _get_total_services_problems_unhandled ( self ) : return sum ( 1 for s in self . services if s . is_problem and not s . problem_has_been_acknowledged ) | Get the number of services that are a problem and that are not acknowledged |
19,545 | def _get_total_services_problems_handled ( self ) : return sum ( 1 for s in self . services if s . is_problem and s . problem_has_been_acknowledged ) | Get the number of service problems not handled |
19,546 | def add_data ( self , metric , value , ts = None ) : if not ts : ts = time . time ( ) if self . __data_lock . acquire ( ) : self . __data . append ( ( metric , ( ts , value ) ) ) self . __data_lock . release ( ) return True return False | Add data to queue |
19,547 | def set_daemon_name ( self , daemon_name ) : self . daemon_name = daemon_name for instance in self . instances : instance . set_loaded_into ( daemon_name ) | Set the daemon name of the daemon which this manager is attached to and propagate this daemon name to our managed modules |
19,548 | def load_and_init ( self , modules ) : self . load ( modules ) self . get_instances ( ) return len ( self . configuration_errors ) == 0 | Import instantiate & init the modules we manage |
19,549 | def load ( self , modules ) : self . modules_assoc = [ ] for module in modules : if not module . enabled : logger . info ( "Module %s is declared but not enabled" , module . name ) self . modules [ module . uuid ] = module continue logger . info ( "Importing Python module '%s' for %s..." , module . python_name , module . name ) try : python_module = importlib . import_module ( module . python_name ) if not hasattr ( python_module , 'properties' ) : self . configuration_errors . append ( "Module %s is missing a 'properties' " "dictionary" % module . python_name ) raise AttributeError logger . info ( "Module properties: %s" , getattr ( python_module , 'properties' ) ) if not hasattr ( python_module , 'get_instance' ) or not isinstance ( getattr ( python_module , 'get_instance' ) , collections . Callable ) : self . configuration_errors . append ( "Module %s is missing a 'get_instance' " "function" % module . python_name ) raise AttributeError self . modules_assoc . append ( ( module , python_module ) ) logger . info ( "Imported '%s' for %s" , module . python_name , module . name ) except ImportError as exp : self . configuration_errors . append ( "Module %s (%s) can't be loaded, Python " "importation error: %s" % ( module . python_name , module . name , str ( exp ) ) ) except AttributeError : self . configuration_errors . append ( "Module %s (%s) can't be loaded, " "module configuration" % ( module . python_name , module . name ) ) else : logger . info ( "Loaded Python module '%s' (%s)" , module . python_name , module . name ) | Load Python modules and check their usability |
19,550 | def try_instance_init ( self , instance , late_start = False ) : try : instance . init_try += 1 if not late_start and instance . init_try > 1 : if instance . last_init_try > time . time ( ) - MODULE_INIT_PERIOD : logger . info ( "Too early to retry initialization, retry period is %d seconds" , MODULE_INIT_PERIOD ) return False instance . last_init_try = time . time ( ) logger . info ( "Trying to initialize module: %s" , instance . name ) if instance . is_external : instance . create_queues ( self . daemon . sync_manager ) if not instance . init ( ) : logger . warning ( "Module %s initialisation failed." , instance . name ) return False logger . info ( "Module %s is initialized." , instance . name ) except Exception as exp : msg = "The module instance %s raised an exception " "on initialization: %s, I remove it!" % ( instance . name , str ( exp ) ) self . configuration_errors . append ( msg ) logger . error ( msg ) logger . exception ( exp ) return False return True | Try to initialize the given module instance . |
19,551 | def clear_instances ( self , instances = None ) : if instances is None : instances = self . instances [ : ] for instance in instances : self . remove_instance ( instance ) | Request to remove the given instances list or all if not provided |
19,552 | def set_to_restart ( self , instance ) : self . to_restart . append ( instance ) if instance . is_external : instance . proc = None | Put an instance to the restart queue |
19,553 | def get_instances ( self ) : self . clear_instances ( ) for ( alignak_module , python_module ) in self . modules_assoc : alignak_module . properties = python_module . properties . copy ( ) alignak_module . my_daemon = self . daemon logger . info ( "Alignak starting module '%s'" , alignak_module . get_name ( ) ) if getattr ( alignak_module , 'modules' , None ) : modules = [ ] for module_uuid in alignak_module . modules : if module_uuid in self . modules : modules . append ( self . modules [ module_uuid ] ) alignak_module . modules = modules logger . debug ( "Module '%s', parameters: %s" , alignak_module . get_name ( ) , alignak_module . __dict__ ) try : instance = python_module . get_instance ( alignak_module ) if not isinstance ( instance , BaseModule ) : self . configuration_errors . append ( "Module %s instance is not a " "BaseModule instance: %s" % ( alignak_module . get_name ( ) , type ( instance ) ) ) raise AttributeError except Exception as exp : logger . error ( "The module %s raised an exception on loading, I remove it!" , alignak_module . get_name ( ) ) logger . exception ( "Exception: %s" , exp ) self . configuration_errors . append ( "The module %s raised an exception on " "loading: %s, I remove it!" % ( alignak_module . get_name ( ) , str ( exp ) ) ) else : instance . set_loaded_into ( self . daemon . name ) self . instances . append ( instance ) for instance in self . instances : if not instance . is_external and not self . try_instance_init ( instance ) : logger . warning ( "The module '%s' failed to initialize, " "I will try to restart it later" , instance . name ) self . set_to_restart ( instance ) return self . instances | Create init and then returns the list of module instances that the caller needs . |
19,554 | def start_external_instances ( self , late_start = False ) : for instance in [ i for i in self . instances if i . is_external ] : if not self . try_instance_init ( instance , late_start = late_start ) : logger . warning ( "The module '%s' failed to init, I will try to restart it later" , instance . name ) self . set_to_restart ( instance ) continue logger . info ( "Starting external module %s" , instance . name ) instance . start ( ) | Launch external instances that are load correctly |
19,555 | def remove_instance ( self , instance ) : if instance . is_external : logger . info ( "Request external process to stop for %s" , instance . name ) instance . stop_process ( ) logger . info ( "External process stopped." ) instance . clear_queues ( self . daemon . sync_manager ) self . instances . remove ( instance ) | Request to cleanly remove the given instance . If instance is external also shutdown it cleanly |
19,556 | def check_alive_instances ( self ) : for instance in self . instances : if instance in self . to_restart : continue if instance . is_external and instance . process and not instance . process . is_alive ( ) : logger . error ( "The external module %s died unexpectedly!" , instance . name ) logger . info ( "Setting the module %s to restart" , instance . name ) instance . clear_queues ( self . daemon . sync_manager ) self . set_to_restart ( instance ) continue if self . daemon . max_queue_size == 0 : continue queue_size = 0 try : queue_size = instance . to_q . qsize ( ) except Exception : pass if queue_size > self . daemon . max_queue_size : logger . error ( "The module %s has a too important queue size (%s > %s max)!" , instance . name , queue_size , self . daemon . max_queue_size ) logger . info ( "Setting the module %s to restart" , instance . name ) instance . clear_queues ( self . daemon . sync_manager ) self . set_to_restart ( instance ) | Check alive instances . If not log error and try to restart it |
19,557 | def try_to_restart_deads ( self ) : to_restart = self . to_restart [ : ] del self . to_restart [ : ] for instance in to_restart : logger . warning ( "Trying to restart module: %s" , instance . name ) if self . try_instance_init ( instance ) : logger . warning ( "Restarting %s..." , instance . name ) instance . process = None instance . start ( ) else : self . to_restart . append ( instance ) | Try to reinit and restart dead instances |
19,558 | def stop_all ( self ) : logger . info ( 'Shutting down modules...' ) for instance in self . get_internal_instances ( ) : if hasattr ( instance , 'quit' ) and isinstance ( instance . quit , collections . Callable ) : instance . quit ( ) self . clear_instances ( [ instance for instance in self . instances if instance . is_external ] ) | Stop all module instances |
19,559 | def parse ( self ) : sub_directory = 'alignak.d' dir_name = os . path . dirname ( self . configuration_file ) dir_name = os . path . join ( dir_name , sub_directory ) self . cfg_files = [ self . configuration_file ] if os . path . exists ( dir_name ) : for root , _ , walk_files in os . walk ( dir_name , followlinks = True ) : for found_file in walk_files : if not re . search ( r"\.ini$" , found_file ) : continue self . cfg_files . append ( os . path . join ( root , found_file ) ) print ( "Loading configuration files: %s " % self . cfg_files ) self . config = configparser . ConfigParser ( ) try : self . config . read ( self . cfg_files ) if self . config . _sections == { } : print ( "* bad formatted configuration file: %s " % self . configuration_file ) if self . embedded : raise ValueError sys . exit ( 2 ) for section in self . config . sections ( ) : if self . verbose : print ( "- section: %s" % section ) for ( key , value ) in self . config . items ( section ) : inner_property = "%s.%s" % ( section , key ) setattr ( self , inner_property , value ) os . environ [ inner_property ] = value if self . verbose : print ( " %s = %s" % ( inner_property , value ) ) if self . export : inner_property = re . sub ( '[^0-9a-zA-Z]+' , '_' , inner_property ) inner_property = inner_property . upper ( ) print ( "export %s=%s" % ( inner_property , cmd_quote ( value ) ) ) except configparser . ParsingError as exp : print ( "* parsing error in config file : %s\n%s" % ( self . configuration_file , exp . message ) ) if self . embedded : return False sys . exit ( 3 ) except configparser . InterpolationMissingOptionError as exp : print ( "* incorrect or missing variable: %s" % str ( exp ) ) if self . embedded : return False sys . exit ( 3 ) if self . verbose : print ( "Configuration file parsed correctly" ) return True | Check if some extra configuration files are existing in an alignak . d sub directory near the found configuration file . |
19,560 | def write ( self , env_file ) : try : with open ( env_file , "w" ) as out_file : self . config . write ( out_file ) except Exception as exp : print ( "Dumping environment file raised an error: %s. " % exp ) | Write the Alignak configuration to a file |
19,561 | def get_alignak_macros ( self ) : macros = self . get_alignak_configuration ( macros = True ) sections = self . _search_sections ( 'pack.' ) for name , _ in list ( sections . items ( ) ) : section_macros = self . get_alignak_configuration ( section = name , macros = True ) macros . update ( section_macros ) return macros | Get the Alignak macros . |
19,562 | def get_alignak_configuration ( self , section = SECTION_CONFIGURATION , legacy_cfg = False , macros = False ) : configuration = self . _search_sections ( section ) if section not in configuration : return [ ] for prop , _ in list ( configuration [ section ] . items ( ) ) : if legacy_cfg : if not prop . startswith ( 'cfg' ) : configuration [ section ] . pop ( prop ) continue if macros : if not prop . startswith ( '_' ) and not prop . startswith ( '$' ) : configuration [ section ] . pop ( prop ) continue if prop . startswith ( 'cfg' ) or prop . startswith ( '_' ) or prop . startswith ( '$' ) : configuration [ section ] . pop ( prop ) return configuration [ section ] | Get the Alignak configuration parameters . All the variables included in the SECTION_CONFIGURATION section except the variables starting with cfg and the macros . |
19,563 | def get_daemons ( self , daemon_name = None , daemon_type = None ) : if daemon_name is not None : sections = self . _search_sections ( 'daemon.%s' % daemon_name ) if 'daemon.%s' % daemon_name in sections : return sections [ 'daemon.' + daemon_name ] return { } if daemon_type is not None : sections = self . _search_sections ( 'daemon.' ) for name , daemon in list ( sections . items ( ) ) : if 'type' not in daemon or not daemon [ 'type' ] == daemon_type : sections . pop ( name ) return sections return self . _search_sections ( 'daemon.' ) | Get the daemons configuration parameters |
19,564 | def get_modules ( self , name = None , daemon_name = None , names_only = True ) : if name is not None : sections = self . _search_sections ( 'module.' + name ) if 'module.' + name in sections : return sections [ 'module.' + name ] return { } if daemon_name is not None : section = self . get_daemons ( daemon_name ) if 'modules' in section and section [ 'modules' ] : modules = [ ] for module_name in section [ 'modules' ] . split ( ',' ) : if names_only : modules . append ( module_name ) else : modules . append ( self . get_modules ( name = module_name ) ) return modules return [ ] return self . _search_sections ( 'module.' ) | Get the modules configuration parameters |
19,565 | def copy_shell ( self ) : cls = self . __class__ new_i = cls ( ) new_i . uuid = self . uuid for prop in cls . properties : if hasattr ( self , prop ) : if prop in [ 'members' , 'unknown_members' ] : setattr ( new_i , prop , [ ] ) else : setattr ( new_i , prop , getattr ( self , prop ) ) return new_i | Copy the group properties EXCEPT the members . Members need to be filled after manually |
19,566 | def add_members ( self , members ) : if not isinstance ( members , list ) : members = [ members ] if not getattr ( self , 'members' , None ) : self . members = members else : self . members . extend ( members ) | Add a new member to the members list |
19,567 | def add_unknown_members ( self , members ) : if not isinstance ( members , list ) : members = [ members ] if not hasattr ( self , 'unknown_members' ) : self . unknown_members = members else : self . unknown_members . extend ( members ) | Add a new member to the unknown members list |
19,568 | def is_correct ( self ) : state = True if self . members : self . members = list ( set ( self . members ) ) if self . unknown_members : for member in self . unknown_members : msg = "[%s::%s] as %s, got unknown member '%s'" % ( self . my_type , self . get_name ( ) , self . __class__ . my_type , member ) self . add_error ( msg ) state = False return super ( Itemgroup , self ) . is_correct ( ) and state | Check if a group is valid . Valid mean all members exists so list of unknown_members is empty |
19,569 | def get_initial_status_brok ( self , extra = None ) : if extra and isinstance ( extra , Items ) : members = [ ] for member_id in self . members : member = extra [ member_id ] members . append ( ( member . uuid , member . get_name ( ) ) ) extra = { 'members' : members } return super ( Itemgroup , self ) . get_initial_status_brok ( extra = extra ) | Get a brok with the group properties |
19,570 | def check_dir ( self , dirname ) : try : os . makedirs ( dirname ) dir_stat = os . stat ( dirname ) print ( "Created the directory: %s, stat: %s" % ( dirname , dir_stat ) ) if not dir_stat . st_uid == self . uid : os . chown ( dirname , self . uid , self . gid ) os . chmod ( dirname , 0o775 ) dir_stat = os . stat ( dirname ) print ( "Changed directory ownership and permissions: %s, stat: %s" % ( dirname , dir_stat ) ) self . pre_log . append ( ( "DEBUG" , "Daemon '%s' directory %s checking... " "User uid: %s, directory stat: %s." % ( self . name , dirname , os . getuid ( ) , dir_stat ) ) ) self . pre_log . append ( ( "INFO" , "Daemon '%s' directory %s did not exist, I created it. " "I set ownership for this directory to %s:%s." % ( self . name , dirname , self . user , self . group ) ) ) except OSError as exp : if exp . errno == errno . EEXIST and os . path . isdir ( dirname ) : pass else : self . pre_log . append ( ( "ERROR" , "Daemon directory '%s' did not exist, " "and I could not create. Exception: %s" % ( dirname , exp ) ) ) self . exit_on_error ( "Daemon directory '%s' did not exist, " "and I could not create.'. Exception: %s" % ( dirname , exp ) , exit_code = 3 ) | Check and create directory |
19,571 | def request_stop ( self , message = '' , exit_code = 0 ) : if exit_code : if message : logger . error ( message ) try : sys . stderr . write ( message ) except Exception : pass logger . error ( "Sorry, I bail out, exit code: %d" , exit_code ) try : sys . stderr . write ( "Sorry, I bail out, exit code: %d" % exit_code ) except Exception : pass else : if message : logger . info ( message ) self . unlink ( ) self . do_stop ( ) logger . info ( "Stopped %s." , self . name ) sys . exit ( exit_code ) | Remove pid and stop daemon |
19,572 | def daemon_connection_init ( self , s_link , set_wait_new_conf = False ) : logger . debug ( "Daemon connection initialization: %s %s" , s_link . type , s_link . name ) if not s_link . active : logger . warning ( "%s '%s' is not active, do not initialize its connection!" , s_link . type , s_link . name ) return False s_link . create_connection ( ) logger . debug ( "[%s] Getting running identifier for '%s'" , self . name , s_link . name ) s_link . alive = True s_link . reachable = True got_a_running_id = None for _ in range ( 0 , s_link . max_check_attempts ) : got_a_running_id = s_link . get_running_id ( ) if got_a_running_id : s_link . last_connection = time . time ( ) if set_wait_new_conf : s_link . wait_new_conf ( ) break time . sleep ( 0.3 ) return got_a_running_id | Initialize a connection with the daemon for the provided satellite link |
19,573 | def do_load_modules ( self , modules ) : _ts = time . time ( ) logger . info ( "Loading modules..." ) if self . modules_manager . load_and_init ( modules ) : if self . modules_manager . instances : logger . info ( "I correctly loaded my modules: [%s]" , ',' . join ( [ inst . name for inst in self . modules_manager . instances ] ) ) else : logger . info ( "I do not have any module" ) else : logger . error ( "Errors were encountered when checking and loading modules:" ) for msg in self . modules_manager . configuration_errors : logger . error ( msg ) if self . modules_manager . configuration_warnings : for msg in self . modules_manager . configuration_warnings : logger . warning ( msg ) statsmgr . gauge ( 'modules.count' , len ( modules ) ) statsmgr . timer ( 'modules.load-time' , time . time ( ) - _ts ) | Wrapper for calling load_and_init method of modules_manager attribute |
19,574 | def dump_environment ( self ) : path = os . path . join ( tempfile . gettempdir ( ) , 'dump-env-%s-%s-%d.ini' % ( self . type , self . name , int ( time . time ( ) ) ) ) try : with open ( path , "w" ) as out_file : self . alignak_env . write ( out_file ) except Exception as exp : logger . error ( "Dumping daemon environment raised an error: %s. " , exp ) | Try to dump memory |
19,575 | def change_to_workdir ( self ) : logger . info ( "Changing working directory to: %s" , self . workdir ) self . check_dir ( self . workdir ) try : os . chdir ( self . workdir ) except OSError as exp : self . exit_on_error ( "Error changing to working directory: %s. Error: %s. " "Check the existence of %s and the %s/%s account " "permissions on this directory." % ( self . workdir , str ( exp ) , self . workdir , self . user , self . group ) , exit_code = 3 ) self . pre_log . append ( ( "INFO" , "Using working directory: %s" % os . path . abspath ( self . workdir ) ) ) | Change working directory to working attribute |
19,576 | def unlink ( self ) : logger . debug ( "Unlinking %s" , self . pid_filename ) try : os . unlink ( self . pid_filename ) except OSError as exp : logger . debug ( "Got an error unlinking our pid file: %s" , exp ) | Remove the daemon s pid file |
19,577 | def __open_pidfile ( self , write = False ) : try : self . pre_log . append ( ( "DEBUG" , "Opening %s pid file: %s" % ( 'existing' if os . path . exists ( self . pid_filename ) else 'missing' , self . pid_filename ) ) ) if not write and os . path . exists ( self . pid_filename ) : self . fpid = open ( self . pid_filename , 'r+' ) else : self . fpid = open ( self . pid_filename , 'w+' ) except Exception as exp : self . exit_on_error ( "Error opening pid file: %s. Error: %s. " "Check the %s:%s account permissions to write this file." % ( self . pid_filename , str ( exp ) , self . user , self . group ) , exit_code = 3 ) | Open pid file in read or write mod |
19,578 | def write_pid ( self , pid ) : self . fpid . seek ( 0 ) self . fpid . truncate ( ) self . fpid . write ( "%d" % pid ) self . fpid . close ( ) del self . fpid | Write pid to the pid file |
19,579 | def close_fds ( self , skip_close_fds ) : max_fds = resource . getrlimit ( resource . RLIMIT_NOFILE ) [ 1 ] if max_fds == resource . RLIM_INFINITY : max_fds = 1024 self . pre_log . append ( ( "DEBUG" , "Maximum file descriptors: %d" % max_fds ) ) for file_d in range ( 0 , max_fds ) : if file_d in skip_close_fds : self . pre_log . append ( ( "INFO" , "Do not close fd: %s" % file_d ) ) continue try : os . close ( file_d ) except OSError : pass | Close all the process file descriptors . Skip the descriptors present in the skip_close_fds list |
19,580 | def do_daemon_init_and_start ( self , set_proc_title = True ) : if set_proc_title : self . set_proctitle ( self . name ) self . change_to_user_group ( ) self . change_to_workdir ( ) self . check_parallel_run ( ) if self . is_daemon : if not self . daemonize ( ) : logger . error ( "I could not daemonize myself :(" ) return False else : self . write_pid ( os . getpid ( ) ) self . setup_alignak_logger ( ) if not self . setup_communication_daemon ( ) : logger . error ( "I could not setup my communication daemon :(" ) return False self . sync_manager = self . _create_manager ( ) logger . info ( "Starting http_daemon thread" ) self . http_thread = threading . Thread ( target = self . http_daemon_thread , name = '%s-http_thread' % self . name ) self . http_thread . daemon = True self . http_thread . start ( ) logger . info ( "HTTP daemon thread started" ) return True | Main daemon function . Clean allocates initializes and starts all necessary resources to go in daemon mode . |
19,581 | def setup_communication_daemon ( self ) : ca_cert = ssl_cert = ssl_key = server_dh = None if self . use_ssl : ssl_cert = os . path . abspath ( self . server_cert ) if not os . path . exists ( ssl_cert ) : self . exit_on_error ( "The configured SSL server certificate file '%s' " "does not exist." % ssl_cert , exit_code = 2 ) logger . info ( "Using SSL server certificate: %s" , ssl_cert ) ssl_key = os . path . abspath ( self . server_key ) if not os . path . exists ( ssl_key ) : self . exit_on_error ( "The configured SSL server key file '%s' " "does not exist." % ssl_key , exit_code = 2 ) logger . info ( "Using SSL server key: %s" , ssl_key ) if self . server_dh : server_dh = os . path . abspath ( self . server_dh ) logger . info ( "Using ssl dh cert file: %s" , server_dh ) self . exit_on_error ( "Sorry, but using a DH configuration " "is not currently supported!" , exit_code = 2 ) if self . ca_cert : ca_cert = os . path . abspath ( self . ca_cert ) logger . info ( "Using ssl ca cert file: %s" , ca_cert ) if self . hard_ssl_name_check : logger . info ( "Enabling hard SSL server name verification" ) try : logger . info ( 'Setting up HTTP daemon (%s:%d), %d threads' , self . host , self . port , self . thread_pool_size ) self . http_daemon = HTTPDaemon ( self . host , self . port , self . http_interface , self . use_ssl , ca_cert , ssl_key , ssl_cert , server_dh , self . thread_pool_size , self . log_cherrypy , self . favicon ) except PortNotFree : logger . error ( 'The HTTP daemon port (%s:%d) is not free...' , self . host , self . port ) return False except Exception as exp : print ( 'Setting up HTTP daemon, exception: %s' , str ( exp ) ) logger . exception ( 'Setting up HTTP daemon, exception: %s' , str ( exp ) ) return False return True | Setup HTTP server daemon to listen for incoming HTTP requests from other Alignak daemons |
19,582 | def set_proctitle ( self , daemon_name = None ) : logger . debug ( "Setting my process name: %s" , daemon_name ) if daemon_name : setproctitle ( "alignak-%s %s" % ( self . type , daemon_name ) ) if self . modules_manager : self . modules_manager . set_daemon_name ( daemon_name ) else : setproctitle ( "alignak-%s" % self . type ) | Set the proctitle of the daemon |
19,583 | def http_daemon_thread ( self ) : logger . debug ( "HTTP thread running" ) try : self . http_daemon . run ( ) except PortNotFree as exp : logger . exception ( 'The HTTP daemon port is not free: %s' , exp ) raise except Exception as exp : self . exit_on_exception ( exp ) logger . debug ( "HTTP thread exiting" ) | Main function of the http daemon thread will loop forever unless we stop the root daemon |
19,584 | def make_a_pause ( self , timeout = 0.0001 , check_time_change = True ) : if timeout == 0 : timeout = 0.0001 if not check_time_change : time . sleep ( timeout ) self . sleep_time += timeout return 0 , 0 before = time . time ( ) time_changed = self . check_for_system_time_change ( ) after = time . time ( ) elapsed = after - before if elapsed > timeout : return elapsed , time_changed time . sleep ( timeout - elapsed ) before += time_changed self . sleep_time += time . time ( ) - before return elapsed , time_changed | Wait up to timeout and check for system time change . |
19,585 | def wait_for_initial_conf ( self , timeout = 1.0 ) : logger . info ( "Waiting for initial configuration" ) _ts = time . time ( ) while not self . new_conf and not self . interrupted : _ , _ = self . make_a_pause ( timeout , check_time_change = True ) if not self . interrupted : logger . info ( "Got initial configuration, waited for: %.2f seconds" , time . time ( ) - _ts ) statsmgr . timer ( 'configuration.initial' , time . time ( ) - _ts ) else : logger . info ( "Interrupted before getting the initial configuration" ) | Wait initial configuration from the arbiter . Basically sleep 1 . 0 and check if new_conf is here |
19,586 | def watch_for_new_conf ( self , timeout = 0 ) : logger . debug ( "Watching for a new configuration, timeout: %s" , timeout ) self . make_a_pause ( timeout = timeout , check_time_change = False ) return any ( self . new_conf ) | Check if a new configuration was sent to the daemon |
19,587 | def hook_point ( self , hook_name , handle = None ) : full_hook_name = 'hook_' + hook_name for module in self . modules_manager . instances : _ts = time . time ( ) if not hasattr ( module , full_hook_name ) : continue fun = getattr ( module , full_hook_name ) try : fun ( handle if handle is not None else self ) except Exception as exp : logger . warning ( 'The instance %s raised an exception %s. I disabled it,' ' and set it to restart later' , module . name , str ( exp ) ) logger . exception ( 'Exception %s' , exp ) self . modules_manager . set_to_restart ( module ) else : statsmgr . timer ( 'hook.%s.%s' % ( hook_name , module . name ) , time . time ( ) - _ts ) | Used to call module function that may define a hook function for hook_name |
19,588 | def get_id ( self , details = False ) : res = { "alignak" : getattr ( self , 'alignak_name' , 'unknown' ) , "type" : getattr ( self , 'type' , 'unknown' ) , "name" : getattr ( self , 'name' , 'unknown' ) , "version" : VERSION } return res | Get daemon identification information |
19,589 | def exit_ok ( self , message , exit_code = None ) : logger . info ( "Exiting..." ) if message : logger . info ( "-----" ) logger . error ( "Exit message: %s" , message ) logger . info ( "-----" ) self . request_stop ( ) if exit_code is not None : exit ( exit_code ) | Log a message and exit |
19,590 | def exit_on_error ( self , message , exit_code = 1 ) : log = "I got an unrecoverable error. I have to exit." if message : log += "\n-----\nError message: %s" % message print ( "Error message: %s" % message ) log += "-----\n" log += "You can get help at https://github.com/Alignak-monitoring/alignak\n" log += "If you think this is a bug, create a new issue including as much " "details as possible (version, configuration,...)" if exit_code is not None : exit ( exit_code ) | Log generic message when getting an error and exit |
19,591 | def exit_on_exception ( self , raised_exception , message = '' , exit_code = 99 ) : self . exit_on_error ( message = message , exit_code = None ) logger . critical ( "-----\nException: %s\nBack trace of the error:\n%s" , str ( raised_exception ) , traceback . format_exc ( ) ) exit ( exit_code ) | Log generic message when getting an unrecoverable error |
19,592 | def get_objects_from_from_queues ( self ) : _t0 = time . time ( ) had_some_objects = False for module in self . modules_manager . get_external_instances ( ) : queue = module . from_q if not queue : continue while True : queue_size = queue . qsize ( ) if queue_size : statsmgr . gauge ( 'queues.from.%s.count' % module . get_name ( ) , queue_size ) try : obj = queue . get_nowait ( ) except Full : logger . warning ( "Module %s from queue is full" , module . get_name ( ) ) except Empty : break except ( IOError , EOFError ) as exp : logger . warning ( "Module %s from queue is no more available: %s" , module . get_name ( ) , str ( exp ) ) except Exception as exp : logger . error ( "An external module queue got a problem '%s'" , str ( exp ) ) else : had_some_objects = True self . add ( obj ) statsmgr . timer ( 'queues.time' , time . time ( ) - _t0 ) return had_some_objects | Get objects from from queues and add them . |
19,593 | def add_automatic_comment ( self , ref ) : if self . fixed is True : text = ( DOWNTIME_FIXED_MESSAGE % ( ref . my_type , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . start_time ) ) , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . end_time ) ) , ref . my_type ) ) else : hours , remainder = divmod ( self . duration , 3600 ) minutes , _ = divmod ( remainder , 60 ) text = ( DOWNTIME_FLEXIBLE_MESSAGE % ( ref . my_type , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . start_time ) ) , time . strftime ( "%Y-%m-%d %H:%M:%S" , time . localtime ( self . end_time ) ) , hours , minutes , ref . my_type ) ) data = { 'comment' : text , 'comment_type' : 1 if ref . my_type == 'host' else 2 , 'entry_type' : 2 , 'source' : 0 , 'expires' : False , 'ref' : ref . uuid } comment = Comment ( data ) self . comment_id = comment . uuid ref . comments [ comment . uuid ] = comment return comment | Add comment on ref for downtime |
19,594 | def get_raise_brok ( self , host_name , service_name = '' ) : data = self . serialize ( ) data [ 'host' ] = host_name if service_name != '' : data [ 'service' ] = service_name return Brok ( { 'type' : 'downtime_raise' , 'data' : data } ) | Get a start downtime brok |
19,595 | def get_expire_brok ( self , host_name , service_name = '' ) : data = self . serialize ( ) data [ 'host' ] = host_name if service_name != '' : data [ 'service' ] = service_name return Brok ( { 'type' : 'downtime_expire' , 'data' : data } ) | Get an expire downtime brok |
19,596 | def fill_data_brok_from ( self , data , brok_type ) : cls = self . __class__ for prop , entry in list ( cls . properties . items ( ) ) : if brok_type in entry . fill_brok : if hasattr ( self , prop ) : data [ prop ] = getattr ( self , prop ) | Add properties to data if fill_brok of these class properties is same as brok_type |
19,597 | def get_name ( self ) : return getattr ( self , 'dependent_host_name' , '' ) + '/' + getattr ( self , 'dependent_service_description' , '' ) + '..' + getattr ( self , 'host_name' , '' ) + '/' + getattr ( self , 'service_description' , '' ) | Get name based on 4 class attributes Each attribute is substituted by if attribute does not exist |
19,598 | def explode_hostgroup ( self , svc_dep , hostgroups ) : snames = [ d . strip ( ) for d in svc_dep . service_description . split ( ',' ) ] dep_snames = [ d . strip ( ) for d in svc_dep . dependent_service_description . split ( ',' ) ] hg_names = [ n . strip ( ) for n in svc_dep . hostgroup_name . split ( ',' ) ] for hg_name in hg_names : hostgroup = hostgroups . find_by_name ( hg_name ) if hostgroup is None : err = "ERROR: the servicedependecy got an unknown hostgroup_name '%s'" % hg_name self . add_error ( err ) continue hnames = [ ] hnames . extend ( [ m . strip ( ) for m in hostgroup . get_hosts ( ) ] ) for hname in hnames : for dep_sname in dep_snames : for sname in snames : new_sd = svc_dep . copy ( ) new_sd . host_name = hname new_sd . service_description = sname new_sd . dependent_host_name = hname new_sd . dependent_service_description = dep_sname self . add_item ( new_sd ) | Explode a service dependency for each member of hostgroup |
19,599 | def linkify_sd_by_s ( self , hosts , services ) : to_del = [ ] errors = self . configuration_errors warns = self . configuration_warnings for servicedep in self : try : s_name = servicedep . dependent_service_description hst_name = servicedep . dependent_host_name serv = services . find_srv_by_name_and_hostname ( hst_name , s_name ) if serv is None : host = hosts . find_by_name ( hst_name ) if not ( host and host . is_excluded_for_sdesc ( s_name ) ) : errors . append ( "Service %s not found for host %s" % ( s_name , hst_name ) ) elif host : warns . append ( "Service %s is excluded from host %s ; " "removing this servicedependency as it's unusuable." % ( s_name , hst_name ) ) to_del . append ( servicedep ) continue servicedep . dependent_service_description = serv . uuid s_name = servicedep . service_description hst_name = servicedep . host_name serv = services . find_srv_by_name_and_hostname ( hst_name , s_name ) if serv is None : host = hosts . find_by_name ( hst_name ) if not ( host and host . is_excluded_for_sdesc ( s_name ) ) : errors . append ( "Service %s not found for host %s" % ( s_name , hst_name ) ) elif host : warns . append ( "Service %s is excluded from host %s ; " "removing this servicedependency as it's unusuable." % ( s_name , hst_name ) ) to_del . append ( servicedep ) continue servicedep . service_description = serv . uuid except AttributeError as err : logger . error ( "[servicedependency] fail to linkify by service %s: %s" , servicedep , err ) to_del . append ( servicedep ) for servicedep in to_del : self . remove_item ( servicedep ) | Replace dependent_service_description and service_description in service dependency by the real object |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.