idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
19,400
def explode_host_groups_into_hosts ( self , item , hosts , hostgroups ) : hnames_list = [ ] # Gets item's hostgroup_name hgnames = getattr ( item , "hostgroup_name" , '' ) or '' # Defines if hostgroup is a complex expression # Expands hostgroups if is_complex_expr ( hgnames ) : hnames_list . extend ( self . evaluate_hostgroup_expression ( item . hostgroup_name , hosts , hostgroups ) ) elif hgnames : try : hnames_list . extend ( self . get_hosts_from_hostgroups ( hgnames , hostgroups ) ) except ValueError as err : # pragma: no cover, simple protection item . add_error ( str ( err ) ) # Expands host names hname = getattr ( item , "host_name" , '' ) hnames_list . extend ( [ n . strip ( ) for n in hname . split ( ',' ) if n . strip ( ) ] ) hnames = set ( ) for host in hnames_list : # If the host start with a !, it's to be removed from # the hostgroup get list if host . startswith ( '!' ) : hst_to_remove = host [ 1 : ] . strip ( ) try : hnames . remove ( hst_to_remove ) except KeyError : pass elif host == '*' : hnames . update ( [ host . host_name for host in hosts . items . values ( ) if getattr ( host , 'host_name' , '' ) ] ) # Else it's a host to add, but maybe it's ALL else : hnames . add ( host ) item . host_name = ',' . join ( hnames )
Get all hosts of hostgroups and add all in host_name container
390
14
19,401
def get_customs_properties_by_inheritance ( self , obj ) : for t_id in obj . templates : template = self . templates [ t_id ] tpl_cv = self . get_customs_properties_by_inheritance ( template ) if tpl_cv : for prop in tpl_cv : if prop not in obj . customs : value = tpl_cv [ prop ] else : value = obj . customs [ prop ] if obj . has_plus ( prop ) : value . insert ( 0 , obj . get_plus_and_delete ( prop ) ) # value = self.get_plus_and_delete(prop) + ',' + value obj . customs [ prop ] = value for prop in obj . customs : value = obj . customs [ prop ] if obj . has_plus ( prop ) : value . insert ( 0 , obj . get_plus_and_delete ( prop ) ) obj . customs [ prop ] = value # We can get custom properties in plus, we need to get all # entires and put # them into customs cust_in_plus = obj . get_all_plus_and_delete ( ) for prop in cust_in_plus : obj . customs [ prop ] = cust_in_plus [ prop ] return obj . customs
Get custom properties from the templates defined in this object
280
10
19,402
def add_edge ( self , from_node , to_node ) : # Maybe to_node is unknown if to_node not in self . nodes : self . add_node ( to_node ) try : self . nodes [ from_node ] [ "sons" ] . append ( to_node ) # If from_node does not exist, add it with its son except KeyError : self . nodes [ from_node ] = { "dfs_loop_status" : "" , "sons" : [ to_node ] }
Add edge between two node The edge is oriented
116
9
19,403
def loop_check ( self ) : in_loop = [ ] # Add the tag for dfs check for node in list ( self . nodes . values ( ) ) : node [ 'dfs_loop_status' ] = 'DFS_UNCHECKED' # Now do the job for node_id , node in self . nodes . items ( ) : # Run the dfs only if the node has not been already done */ if node [ 'dfs_loop_status' ] == 'DFS_UNCHECKED' : self . dfs_loop_search ( node_id ) # If LOOP_INSIDE, must be returned if node [ 'dfs_loop_status' ] == 'DFS_LOOP_INSIDE' : in_loop . append ( node_id ) # Remove the tag for node in list ( self . nodes . values ( ) ) : del node [ 'dfs_loop_status' ] return in_loop
Check if we have a loop in the graph
205
9
19,404
def dfs_loop_search ( self , root ) : # Make the root temporary checked self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_TEMPORARY_CHECKED' # We are scanning the sons for child in self . nodes [ root ] [ "sons" ] : child_status = self . nodes [ child ] [ 'dfs_loop_status' ] # If a child is not checked, check it if child_status == 'DFS_UNCHECKED' : self . dfs_loop_search ( child ) child_status = self . nodes [ child ] [ 'dfs_loop_status' ] # If a child has already been temporary checked, it's a problem, # loop inside, and its a checked status if child_status == 'DFS_TEMPORARY_CHECKED' : self . nodes [ child ] [ 'dfs_loop_status' ] = 'DFS_LOOP_INSIDE' self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_LOOP_INSIDE' # If a child has already been temporary checked, it's a problem, loop inside if child_status in ( 'DFS_NEAR_LOOP' , 'DFS_LOOP_INSIDE' ) : # if a node is known to be part of a loop, do not let it be less if self . nodes [ root ] [ 'dfs_loop_status' ] != 'DFS_LOOP_INSIDE' : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_NEAR_LOOP' # We've already seen this child, it's a problem self . nodes [ child ] [ 'dfs_loop_status' ] = 'DFS_LOOP_INSIDE' # If root have been modified, do not set it OK # A node is OK if and only if all of its children are OK # if it does not have a child, goes ok if self . nodes [ root ] [ 'dfs_loop_status' ] == 'DFS_TEMPORARY_CHECKED' : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_OK'
Main algorithm to look for loop . It tags nodes and find ones stuck in loop .
493
17
19,405
def dfs_get_all_childs ( self , root ) : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_CHECKED' ret = set ( ) # Me ret . add ( root ) # And my sons ret . update ( self . nodes [ root ] [ 'sons' ] ) for child in self . nodes [ root ] [ 'sons' ] : # I just don't care about already checked children if self . nodes [ child ] [ 'dfs_loop_status' ] == 'DFS_UNCHECKED' : ret . update ( self . dfs_get_all_childs ( child ) ) return list ( ret )
Recursively get all sons of this node
151
9
19,406
def identity ( self ) : res = self . app . get_id ( ) res . update ( { "start_time" : self . start_time } ) res . update ( { "running_id" : self . running_id } ) return res
Get the daemon identity
55
4
19,407
def api ( self ) : functions = [ x [ 0 ] for x in inspect . getmembers ( self , predicate = inspect . ismethod ) if not x [ 0 ] . startswith ( '_' ) ] full_api = { 'doc' : u"When posting data you have to use the JSON format." , 'api' : [ ] } my_daemon_type = "%s" % getattr ( self . app , 'type' , 'unknown' ) my_address = getattr ( self . app , 'host_name' , getattr ( self . app , 'name' , 'unknown' ) ) if getattr ( self . app , 'address' , '127.0.0.1' ) not in [ '127.0.0.1' ] : # If an address is explicitely specified, I must use it! my_address = self . app . address for fun in functions : endpoint = { 'daemon' : my_daemon_type , 'name' : fun , 'doc' : getattr ( self , fun ) . __doc__ , 'uri' : '%s://%s:%s/%s' % ( getattr ( self . app , 'scheme' , 'http' ) , my_address , self . app . port , fun ) , 'args' : { } } try : spec = inspect . getfullargspec ( getattr ( self , fun ) ) except Exception : # pylint: disable=broad-except # pylint: disable=deprecated-method spec = inspect . getargspec ( getattr ( self , fun ) ) args = [ a for a in spec . args if a not in ( 'self' , 'cls' ) ] if spec . defaults : a_dict = dict ( list ( zip ( args , spec . defaults ) ) ) else : a_dict = dict ( list ( zip ( args , ( "No default value" , ) * len ( args ) ) ) ) endpoint [ "args" ] = a_dict full_api [ 'api' ] . append ( endpoint ) return full_api
List the methods available on the daemon Web service interface
457
10
19,408
def stop_request ( self , stop_now = '0' ) : self . app . interrupted = ( stop_now == '1' ) self . app . will_stop = True return True
Request the daemon to stop
42
5
19,409
def get_log_level ( self ) : level_names = { logging . DEBUG : 'DEBUG' , logging . INFO : 'INFO' , logging . WARNING : 'WARNING' , logging . ERROR : 'ERROR' , logging . CRITICAL : 'CRITICAL' } alignak_logger = logging . getLogger ( ALIGNAK_LOGGER_NAME ) res = self . identity ( ) res . update ( { "log_level" : alignak_logger . getEffectiveLevel ( ) , "log_level_name" : level_names [ alignak_logger . getEffectiveLevel ( ) ] } ) return res
Get the current daemon log level
139
6
19,410
def set_log_level ( self , log_level = None ) : if log_level is None : log_level = cherrypy . request . json [ 'log_level' ] if log_level not in [ 'DEBUG' , 'INFO' , 'WARNING' , 'ERROR' , 'CRITICAL' ] : return { '_status' : u'ERR' , '_message' : u"Required log level is not allowed: %s" % log_level } alignak_logger = logging . getLogger ( ALIGNAK_LOGGER_NAME ) alignak_logger . setLevel ( log_level ) return self . get_log_level ( )
Set the current log level for the daemon
150
8
19,411
def stats ( self , details = False ) : if details is not False : details = bool ( details ) res = self . identity ( ) res . update ( self . app . get_daemon_stats ( details = details ) ) return res
Get statistics and information from the daemon
51
7
19,412
def _have_conf ( self , magic_hash = None ) : self . app . have_conf = getattr ( self . app , 'cur_conf' , None ) not in [ None , { } ] if magic_hash is not None : # Beware, we got an str in entry, not an int magic_hash = int ( magic_hash ) # I've got a conf and a good one return self . app . have_conf and self . app . cur_conf . magic_hash == magic_hash return self . app . have_conf
Get the daemon current configuration state
119
6
19,413
def _results ( self , scheduler_instance_id ) : with self . app . lock : res = self . app . get_results_from_passive ( scheduler_instance_id ) return serialize ( res , True )
Get the results of the executed actions for the scheduler which instance id is provided
51
16
19,414
def _broks ( self , broker_name ) : # pylint: disable=unused-argument with self . app . broks_lock : res = self . app . get_broks ( ) return serialize ( res , True )
Get the broks from the daemon
53
7
19,415
def _events ( self ) : with self . app . events_lock : res = self . app . get_events ( ) return serialize ( res , True )
Get the monitoring events from the daemon
35
7
19,416
def get_state ( self , hosts , services ) : # If we are a host or a service, we just got the host/service # hard state if self . operand == 'host' : host = hosts [ self . sons [ 0 ] ] return self . get_host_node_state ( host . last_hard_state_id , host . problem_has_been_acknowledged , host . in_scheduled_downtime ) if self . operand == 'service' : service = services [ self . sons [ 0 ] ] return self . get_service_node_state ( service . last_hard_state_id , service . problem_has_been_acknowledged , service . in_scheduled_downtime ) if self . operand == '|' : return self . get_complex_or_node_state ( hosts , services ) if self . operand == '&' : return self . get_complex_and_node_state ( hosts , services ) # It's an Xof rule if self . operand == 'of:' : return self . get_complex_xof_node_state ( hosts , services ) # We have an unknown node. Code is not reachable because we validate operands return 4
Get node state by looking recursively over sons and applying operand
272
14
19,417
def eval_cor_pattern ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : pattern = pattern . strip ( ) complex_node = False # Look if it's a complex pattern (with rule) or # if it's a leaf of it, like a host/service for char in '()&|' : if char in pattern : complex_node = True # If it's a simple node, evaluate it directly if complex_node is False : return self . eval_simple_cor_pattern ( pattern , hosts , services , hostgroups , servicegroups , running ) return self . eval_complex_cor_pattern ( pattern , hosts , services , hostgroups , servicegroups , running )
Parse and build recursively a tree of DependencyNode from pattern
152
15
19,418
def eval_complex_cor_pattern ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : # pylint: disable=too-many-branches node = DependencyNode ( ) pattern = self . eval_xof_pattern ( node , pattern ) in_par = False tmp = '' son_is_not = False # We keep is the next son will be not or not stacked_parenthesis = 0 for char in pattern : if char == '(' : stacked_parenthesis += 1 in_par = True tmp = tmp . strip ( ) # Maybe we just start a par, but we got some things in tmp # that should not be good in fact ! if stacked_parenthesis == 1 and tmp != '' : # TODO : real error print ( "ERROR : bad expression near" , tmp ) continue # If we are already in a par, add this ( # but not if it's the first one so if stacked_parenthesis > 1 : tmp += char elif char == ')' : stacked_parenthesis -= 1 if stacked_parenthesis < 0 : # TODO : real error print ( "Error : bad expression near" , tmp , "too much ')'" ) continue if stacked_parenthesis == 0 : tmp = tmp . strip ( ) son = self . eval_cor_pattern ( tmp , hosts , services , hostgroups , servicegroups , running ) # Maybe our son was notted if son_is_not : son . not_value = True son_is_not = False node . sons . append ( son ) in_par = False # OK now clean the tmp so we start clean tmp = '' continue # ok here we are still in a huge par, we just close one sub one tmp += char # Expressions in par will be parsed in a sub node after. So just # stack pattern elif in_par : tmp += char # Until here, we're not in par # Manage the NOT for an expression. Only allow ! at the beginning # of a host or a host,service expression. elif char == '!' : tmp = tmp . strip ( ) if tmp and tmp [ 0 ] != '!' : print ( "Error : bad expression near" , tmp , "wrong position for '!'" ) continue # Flags next node not state son_is_not = True # DO NOT keep the c in tmp, we consumed it elif char in [ '&' , '|' ] : # Oh we got a real cut in an expression, if so, cut it tmp = tmp . strip ( ) # Look at the rule viability if node . operand is not None and node . operand != 'of:' and char != node . operand : # Should be logged as a warning / info? :) return None if node . operand != 'of:' : node . operand = char if tmp != '' : son = self . eval_cor_pattern ( tmp , hosts , services , hostgroups , servicegroups , running ) # Maybe our son was notted if son_is_not : son . not_value = True son_is_not = False node . sons . append ( son ) tmp = '' # Maybe it's a classic character or we're in par, if so, continue else : tmp += char # Be sure to manage the trainling part when the line is done tmp = tmp . strip ( ) if tmp != '' : son = self . eval_cor_pattern ( tmp , hosts , services , hostgroups , servicegroups , running ) # Maybe our son was notted if son_is_not : son . not_value = True son_is_not = False node . sons . append ( son ) # We got our nodes, so we can update 0 values of of_values # with the number of sons node . switch_zeros_of_values ( ) return node
Parse and build recursively a tree of DependencyNode from a complex pattern
816
17
19,419
def eval_simple_cor_pattern ( self , pattern , hosts , services , hostgroups , servicegroups , running = False ) : node = DependencyNode ( ) pattern = self . eval_xof_pattern ( node , pattern ) # If it's a not value, tag the node and find # the name without this ! operator if pattern . startswith ( '!' ) : node . not_value = True pattern = pattern [ 1 : ] # Is the pattern an expression to be expanded? if re . search ( r"^([%s]+|\*):" % self . host_flags , pattern ) or re . search ( r",\s*([%s]+:.*|\*)$" % self . service_flags , pattern ) : # o is just extracted its attributes, then trashed. son = self . expand_expression ( pattern , hosts , services , hostgroups , servicegroups , running ) if node . operand != 'of:' : node . operand = '&' node . sons . extend ( son . sons ) node . configuration_errors . extend ( son . configuration_errors ) node . switch_zeros_of_values ( ) else : node . operand = 'object' obj , error = self . find_object ( pattern , hosts , services ) # here we have Alignak SchedulingItem object (Host/Service) if obj is not None : # Set host or service # pylint: disable=E1101 node . operand = obj . __class__ . my_type node . sons . append ( obj . uuid ) # Only store the uuid, not the full object. else : if running is False : node . configuration_errors . append ( error ) else : # As business rules are re-evaluated at run time on # each scheduling loop, if the rule becomes invalid # because of a badly written macro modulation, it # should be notified upper for the error to be # displayed in the check output. raise Exception ( error ) return node
Parse and build recursively a tree of DependencyNode from a simple pattern
426
17
19,420
def find_object ( self , pattern , hosts , services ) : obj = None error = None is_service = False # h_name, service_desc are , separated elts = pattern . split ( ',' ) host_name = elts [ 0 ] . strip ( ) # If host_name is empty, use the host_name the business rule is bound to if not host_name : host_name = self . bound_item . host_name # Look if we have a service if len ( elts ) > 1 : is_service = True service_description = elts [ 1 ] . strip ( ) if is_service : obj = services . find_srv_by_name_and_hostname ( host_name , service_description ) if not obj : error = "Business rule uses unknown service %s/%s" % ( host_name , service_description ) else : obj = hosts . find_by_name ( host_name ) if not obj : error = "Business rule uses unknown host %s" % ( host_name , ) return obj , error
Find object from pattern
232
4
19,421
def is_time_valid ( self , timestamp ) : if hasattr ( self , 'exclude' ) : for daterange in self . exclude : if daterange . is_time_valid ( timestamp ) : return False for daterange in self . dateranges : if daterange . is_time_valid ( timestamp ) : return True return False
Check if a time is valid or not
78
8
19,422
def get_min_from_t ( self , timestamp ) : mins_incl = [ ] for daterange in self . dateranges : mins_incl . append ( daterange . get_min_from_t ( timestamp ) ) return min ( mins_incl )
Get the first time > timestamp which is valid
62
9
19,423
def clean_cache ( self ) : now = int ( time . time ( ) ) t_to_del = [ ] for timestamp in self . cache : if timestamp < now : t_to_del . append ( timestamp ) for timestamp in t_to_del : del self . cache [ timestamp ] # same for the invalid cache t_to_del = [ ] for timestamp in self . invalid_cache : if timestamp < now : t_to_del . append ( timestamp ) for timestamp in t_to_del : del self . invalid_cache [ timestamp ]
Clean cache with entries older than now because not used in future ; )
120
14
19,424
def get_next_valid_time_from_t ( self , timestamp ) : # pylint: disable=too-many-branches timestamp = int ( timestamp ) original_t = timestamp res_from_cache = self . find_next_valid_time_from_cache ( timestamp ) if res_from_cache is not None : return res_from_cache still_loop = True # Loop for all minutes... while still_loop : local_min = None # Ok, not in cache... dr_mins = [ ] for daterange in self . dateranges : dr_mins . append ( daterange . get_next_valid_time_from_t ( timestamp ) ) s_dr_mins = sorted ( [ d for d in dr_mins if d is not None ] ) for t01 in s_dr_mins : if not self . exclude and still_loop : # No Exclude so we are good local_min = t01 still_loop = False else : for timeperiod in self . exclude : if not timeperiod . is_time_valid ( t01 ) and still_loop : # OK we found a date that is not valid in any exclude timeperiod local_min = t01 still_loop = False if local_min is None : # Looking for next invalid date exc_mins = [ ] if s_dr_mins != [ ] : for timeperiod in self . exclude : exc_mins . append ( timeperiod . get_next_invalid_time_from_t ( s_dr_mins [ 0 ] ) ) s_exc_mins = sorted ( [ d for d in exc_mins if d is not None ] ) if s_exc_mins != [ ] : local_min = s_exc_mins [ 0 ] if local_min is None : still_loop = False else : timestamp = local_min # No loop more than one year if timestamp > original_t + 3600 * 24 * 366 + 1 : still_loop = False local_min = None # Ok, we update the cache... self . cache [ original_t ] = local_min return local_min
Get next valid time . If it s in cache get it otherwise define it . The limit to find it is 1 year .
457
25
19,425
def get_next_invalid_time_from_t ( self , timestamp ) : # pylint: disable=too-many-branches timestamp = int ( timestamp ) original_t = timestamp dr_mins = [ ] for daterange in self . dateranges : timestamp = original_t cont = True while cont : start = daterange . get_next_valid_time_from_t ( timestamp ) if start is not None : end = daterange . get_next_invalid_time_from_t ( start ) dr_mins . append ( ( start , end ) ) timestamp = end else : cont = False if timestamp > original_t + ( 3600 * 24 * 365 ) : cont = False periods = merge_periods ( dr_mins ) # manage exclude periods dr_mins = [ ] for exclude in self . exclude : for daterange in exclude . dateranges : timestamp = original_t cont = True while cont : start = daterange . get_next_valid_time_from_t ( timestamp ) if start is not None : end = daterange . get_next_invalid_time_from_t ( start ) dr_mins . append ( ( start , end ) ) timestamp = end else : cont = False if timestamp > original_t + ( 3600 * 24 * 365 ) : cont = False if not dr_mins : periods_exclude = [ ] else : periods_exclude = merge_periods ( dr_mins ) if len ( periods ) >= 1 : # if first valid period is after original timestamp, the first invalid time # is the original timestamp if periods [ 0 ] [ 0 ] > original_t : return original_t # check the first period + first period of exclude if len ( periods_exclude ) >= 1 : if periods_exclude [ 0 ] [ 0 ] < periods [ 0 ] [ 1 ] : return periods_exclude [ 0 ] [ 0 ] return periods [ 0 ] [ 1 ] return original_t
Get the next invalid time
430
5
19,426
def explode ( self ) : for entry in self . unresolved : self . resolve_daterange ( self . dateranges , entry ) self . unresolved = [ ]
Try to resolve all unresolved elements
35
6
19,427
def linkify ( self , timeperiods ) : new_exclude = [ ] if hasattr ( self , 'exclude' ) and self . exclude != [ ] : logger . debug ( "[timeentry::%s] have excluded %s" , self . get_name ( ) , self . exclude ) excluded_tps = self . exclude for tp_name in excluded_tps : timepriod = timeperiods . find_by_name ( tp_name . strip ( ) ) if timepriod is not None : new_exclude . append ( timepriod . uuid ) else : msg = "[timeentry::%s] unknown %s timeperiod" % ( self . get_name ( ) , tp_name ) self . add_error ( msg ) self . exclude = new_exclude
Will make timeperiod in exclude with id of the timeperiods
178
13
19,428
def check_exclude_rec ( self ) : # pylint: disable=access-member-before-definition if self . rec_tag : msg = "[timeentry::%s] is in a loop in exclude parameter" % ( self . get_name ( ) ) self . add_error ( msg ) return False self . rec_tag = True for timeperiod in self . exclude : timeperiod . check_exclude_rec ( ) return True
Check if this timeperiod is tagged
97
7
19,429
def explode ( self ) : for t_id in self . items : timeperiod = self . items [ t_id ] timeperiod . explode ( )
Try to resolve each timeperiod
32
6
19,430
def linkify ( self ) : for t_id in self . items : timeperiod = self . items [ t_id ] timeperiod . linkify ( self )
Check exclusion for each timeperiod
35
6
19,431
def apply_inheritance ( self ) : self . apply_partial_inheritance ( 'exclude' ) for i in self : self . get_customs_properties_by_inheritance ( i ) # And now apply inheritance for unresolved properties # like the dateranges in fact for timeperiod in self : self . get_unresolved_properties_by_inheritance ( timeperiod )
The only interesting property to inherit is exclude
88
8
19,432
def is_correct ( self ) : valid = True # We do not want a same hg to be explode again and again # so we tag it for timeperiod in list ( self . items . values ( ) ) : timeperiod . rec_tag = False for timeperiod in list ( self . items . values ( ) ) : for tmp_tp in list ( self . items . values ( ) ) : tmp_tp . rec_tag = False valid = timeperiod . check_exclude_rec ( ) and valid # We clean the tags and collect the warning/erro messages for timeperiod in list ( self . items . values ( ) ) : del timeperiod . rec_tag # Now other checks if not timeperiod . is_correct ( ) : valid = False source = getattr ( timeperiod , 'imported_from' , "unknown source" ) msg = "Configuration in %s::%s is incorrect; from: %s" % ( timeperiod . my_type , timeperiod . get_name ( ) , source ) self . add_error ( msg ) self . configuration_errors += timeperiod . configuration_errors self . configuration_warnings += timeperiod . configuration_warnings # And check all timeperiods for correct (sunday is false) for timeperiod in self : valid = timeperiod . is_correct ( ) and valid return valid
check if each properties of timeperiods are valid
290
10
19,433
def check_status_and_get_events ( self ) : # pylint: disable=too-many-branches statistics = { } events = [ ] for daemon_link in self . all_daemons_links : if daemon_link == self . arbiter_link : # I exclude myself from the polling, sure I am reachable ;) continue if not daemon_link . active : # I exclude the daemons that are not active continue try : # Do not get the details to avoid overloading the communication daemon_link . statistics = daemon_link . get_daemon_stats ( details = False ) if daemon_link . statistics : daemon_link . statistics [ '_freshness' ] = int ( time . time ( ) ) statistics [ daemon_link . name ] = daemon_link . statistics logger . debug ( "Daemon %s statistics: %s" , daemon_link . name , daemon_link . statistics ) except LinkError : logger . warning ( "Daemon connection failed, I could not get statistics." ) try : got = daemon_link . get_events ( ) if got : events . extend ( got ) logger . debug ( "Daemon %s has %d events: %s" , daemon_link . name , len ( got ) , got ) except LinkError : logger . warning ( "Daemon connection failed, I could not get events." ) return events
Get all the daemons status
298
7
19,434
def get_scheduler_ordered_list ( self , realm ) : # Get the schedulers for the required realm scheduler_links = [ ] for scheduler_link_uuid in realm . schedulers : scheduler_links . append ( self . schedulers [ scheduler_link_uuid ] ) # Now we sort the schedulers so we take alive, then spare, then dead, alive = [ ] spare = [ ] deads = [ ] for sdata in scheduler_links : if sdata . alive and not sdata . spare : alive . append ( sdata ) elif sdata . alive and sdata . spare : spare . append ( sdata ) else : deads . append ( sdata ) scheduler_links = [ ] scheduler_links . extend ( alive ) scheduler_links . extend ( spare ) scheduler_links . extend ( deads ) scheduler_links . reverse ( ) # I need to pop the list, so reverse the list... return scheduler_links
Get sorted scheduler list for a specific realm
220
9
19,435
def dispatch ( self , test = False ) : # pylint: disable=too-many-branches if not self . new_to_dispatch : raise DispatcherError ( "Dispatcher cannot dispatch, " "because no configuration is prepared!" ) if self . first_dispatch_done : raise DispatcherError ( "Dispatcher cannot dispatch, " "because the configuration is still dispatched!" ) if self . dispatch_ok : logger . info ( "Dispatching is already done and ok..." ) return logger . info ( "Trying to send configuration to the satellites..." ) self . dispatch_ok = True # todo: the 3 loops hereunder may be factorized for link in self . arbiters : # If not me and a spare arbiter... if link == self . arbiter_link : # I exclude myself from the dispatching, I have my configuration ;) continue if not link . active : # I exclude the daemons that are not active continue if not link . spare : # Do not dispatch to a master arbiter! continue if link . configuration_sent : logger . debug ( "Arbiter %s already sent!" , link . name ) continue if not link . reachable : logger . debug ( "Arbiter %s is not reachable to receive its configuration" , link . name ) continue logger . info ( "Sending configuration to the arbiter %s" , link . name ) logger . debug ( "- %s" , link . cfg ) link . put_conf ( link . cfg , test = test ) link . configuration_sent = True logger . info ( "- sent" ) # Now that the spare arbiter has a configuration, tell him it must not run, # because I'm not dead ;) link . do_not_run ( ) for link in self . schedulers : if link . configuration_sent : logger . debug ( "Scheduler %s already sent!" , link . name ) continue if not link . active : # I exclude the daemons that are not active continue if not link . reachable : logger . debug ( "Scheduler %s is not reachable to receive its configuration" , link . name ) continue logger . info ( "Sending configuration to the scheduler %s" , link . name ) logger . debug ( "- %s" , link . cfg ) link . put_conf ( link . cfg , test = test ) link . configuration_sent = True logger . info ( "- sent" ) for link in self . satellites : if link . configuration_sent : logger . debug ( "%s %s already sent!" , link . type , link . name ) continue if not link . active : # I exclude the daemons that are not active continue if not link . reachable : logger . warning ( "%s %s is not reachable to receive its configuration" , link . type , link . name ) continue logger . info ( "Sending configuration to the %s %s" , link . type , link . name ) logger . debug ( "- %s" , link . cfg ) link . put_conf ( link . cfg , test = test ) link . configuration_sent = True logger . info ( "- sent" ) if self . dispatch_ok : # Newly prepared configuration got dispatched correctly self . new_to_dispatch = False self . first_dispatch_done = True
Send configuration to satellites
718
4
19,436
def stop_request ( self , stop_now = False ) : all_ok = True for daemon_link in self . all_daemons_links : logger . debug ( "Stopping: %s (%s)" , daemon_link , stop_now ) if daemon_link == self . arbiter_link : # I exclude myself from the process, I know we are going to stop ;) continue if not daemon_link . active : # I exclude the daemons that are not active continue # Send a stop request to the daemon try : stop_ok = daemon_link . stop_request ( stop_now = stop_now ) except LinkError : stop_ok = True logger . warning ( "Daemon stop request failed, %s probably stopped!" , daemon_link ) all_ok = all_ok and stop_ok daemon_link . stopping = True self . stop_request_sent = all_ok return self . stop_request_sent
Send a stop request to all the daemons
202
10
19,437
def pythonize ( self , val ) : __boolean_states__ = { '1' : True , 'yes' : True , 'true' : True , 'on' : True , '0' : False , 'no' : False , 'false' : False , 'off' : False } if isinstance ( val , bool ) : return val val = unique_value ( val ) . lower ( ) if val in list ( __boolean_states__ . keys ( ) ) : return __boolean_states__ [ val ] raise PythonizeError ( "Cannot convert '%s' to a boolean value" % val )
Convert value into a boolean
136
6
19,438
def pythonize ( self , val ) : if isinstance ( val , list ) and len ( set ( val ) ) == 1 : # If we have a list with a unique value just use it return val [ 0 ] # Well, can't choose to remove something. return val
If value is a single list element just return the element does nothing otherwise
58
14
19,439
def login ( self , username , password ) : logger . debug ( "login for: %s" , username ) # Configured as not authenticated WS if not username and not password : self . set_token ( token = None ) return False if not username or not password : logger . error ( "Username or password cannot be None!" ) self . set_token ( token = None ) return False endpoint = 'login' json = { 'username' : username , 'password' : password } response = self . get_response ( method = 'POST' , endpoint = endpoint , json = json ) if response . status_code == 401 : logger . error ( "Access denied to %s" , self . url_endpoint_root ) self . set_token ( token = None ) return False resp = self . decode ( response = response ) if 'token' in resp : self . set_token ( token = resp [ 'token' ] ) return True return False
Log into the WS interface and get the authentication token
203
10
19,440
def logout ( self ) : logger . debug ( "request backend logout" ) if not self . authenticated : logger . warning ( "Unnecessary logout ..." ) return True endpoint = 'logout' _ = self . get_response ( method = 'POST' , endpoint = endpoint ) self . session . close ( ) self . set_token ( token = None ) return True
Logout from the backend
80
5
19,441
def get ( self , endpoint , params = None ) : response = self . get_response ( method = 'GET' , endpoint = endpoint , params = params ) resp = self . decode ( response = response ) if '_status' not in resp : # pragma: no cover - need specific backend tests resp [ '_status' ] = u'OK' # TODO: Sure?? return resp
Get items or item in alignak backend
84
8
19,442
def post ( self , endpoint , data , files = None , headers = None ) : # pylint: disable=unused-argument # We let Requests encode data to json response = self . get_response ( method = 'POST' , endpoint = endpoint , json = data , headers = headers ) resp = self . decode ( response = response ) return resp
Create a new item
76
4
19,443
def patch ( self , endpoint , data ) : response = self . get_response ( method = 'PATCH' , endpoint = endpoint , json = data , headers = { 'Content-Type' : 'application/json' } ) if response . status_code == 200 : return self . decode ( response = response ) return response
Method to update an item
69
5
19,444
def init ( self , conf ) : # For searching class and elements for on-demand # we need link to types self . my_conf = conf self . lists_on_demand = [ ] self . hosts = self . my_conf . hosts # For special void host_name handling... self . host_class = self . hosts . inner_class self . lists_on_demand . append ( self . hosts ) self . services = self . my_conf . services self . contacts = self . my_conf . contacts self . lists_on_demand . append ( self . contacts ) self . hostgroups = self . my_conf . hostgroups self . lists_on_demand . append ( self . hostgroups ) self . commands = self . my_conf . commands self . servicegroups = self . my_conf . servicegroups self . lists_on_demand . append ( self . servicegroups ) self . contactgroups = self . my_conf . contactgroups self . lists_on_demand . append ( self . contactgroups ) self . illegal_macro_output_chars = self . my_conf . illegal_macro_output_chars self . env_prefix = self . my_conf . env_variables_prefix
Initialize MacroResolver instance with conf . Must be called at least once .
264
16
19,445
def _get_value_from_element ( self , elt , prop ) : # pylint: disable=too-many-return-statements args = None # We have args to provide to the function if isinstance ( prop , tuple ) : prop , args = prop value = getattr ( elt , prop , None ) if value is None : return 'n/a' try : # If the macro is set to a list property if isinstance ( value , list ) : # Return the list items, comma separated and bracketed return "[%s]" % ',' . join ( value ) # If the macro is not set as a function to call if not isinstance ( value , collections . Callable ) : return value # Case of a function call with no arguments if not args : return value ( ) # Case where we need args to the function # ex : HOSTGROUPNAME (we need hostgroups) # ex : SHORTSTATUS (we need hosts and services if bp_rule) real_args = [ ] for arg in args : real_args . append ( getattr ( self , arg , None ) ) return value ( * real_args ) except AttributeError : # Commented because there are many unresolved macros and this log is spamming :/ # # Raise a warning and return a strange value when macro cannot be resolved # warnings.warn( # 'Error when getting the property value for a macro: %s', # MacroWarning, stacklevel=2) # Return a strange value when macro cannot be resolved return 'n/a' except UnicodeError : if isinstance ( value , string_types ) : return str ( value , 'utf8' , errors = 'ignore' ) return 'n/a'
Get value from an element s property .
368
8
19,446
def _delete_unwanted_caracters ( self , chain ) : try : chain = chain . decode ( 'utf8' , 'replace' ) except UnicodeEncodeError : # If it is still encoded correctly, ignore... pass except AttributeError : # Python 3 will raise an exception because the line is still unicode pass for char in self . illegal_macro_output_chars : chain = chain . replace ( char , '' ) return chain
Remove not wanted char from chain unwanted char are illegal_macro_output_chars attribute
97
19
19,447
def resolve_command ( self , com , data , macromodulations , timeperiods ) : logger . debug ( "Resolving: macros in: %s, arguments: %s" , com . command . command_line , com . args ) return self . resolve_simple_macros_in_string ( com . command . command_line , data , macromodulations , timeperiods , args = com . args )
Resolve command macros with data
92
6
19,448
def _get_type_of_macro ( macros , objs ) : for macro in macros : # ARGN Macros if re . match ( r'ARG\d' , macro ) : macros [ macro ] [ 'type' ] = 'ARGN' continue # USERN macros # are managed in the Config class, so no # need to look that here elif re . match ( r'_HOST\w' , macro ) : macros [ macro ] [ 'type' ] = 'CUSTOM' macros [ macro ] [ 'class' ] = 'HOST' continue elif re . match ( r'_SERVICE\w' , macro ) : macros [ macro ] [ 'type' ] = 'CUSTOM' macros [ macro ] [ 'class' ] = 'SERVICE' # value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1] continue elif re . match ( r'_CONTACT\w' , macro ) : macros [ macro ] [ 'type' ] = 'CUSTOM' macros [ macro ] [ 'class' ] = 'CONTACT' continue # On demand macro elif len ( macro . split ( ':' ) ) > 1 : macros [ macro ] [ 'type' ] = 'ONDEMAND' continue # OK, classical macro... for obj in objs : if macro in obj . macros : macros [ macro ] [ 'type' ] = 'object' macros [ macro ] [ 'object' ] = obj continue
r Set macros types
327
4
19,449
def _resolve_ondemand ( self , macro , data ) : # pylint: disable=too-many-locals elts = macro . split ( ':' ) nb_parts = len ( elts ) macro_name = elts [ 0 ] # 3 parts for a service, 2 for all others types... if nb_parts == 3 : val = '' ( host_name , service_description ) = ( elts [ 1 ] , elts [ 2 ] ) # host_name can be void, so it's the host in data # that is important. We use our self.host_class to # find the host in the data :) if host_name == '' : for elt in data : if elt is not None and elt . __class__ == self . host_class : host_name = elt . host_name # Ok now we get service serv = self . services . find_srv_by_name_and_hostname ( host_name , service_description ) if serv is not None : cls = serv . __class__ prop = cls . macros [ macro_name ] val = self . _get_value_from_element ( serv , prop ) return val # Ok, service was easy, now hard part else : val = '' elt_name = elts [ 1 ] # Special case: elt_name can be void # so it's the host where it apply if elt_name == '' : for elt in data : if elt is not None and elt . __class__ == self . host_class : elt_name = elt . host_name for od_list in self . lists_on_demand : cls = od_list . inner_class # We search our type by looking at the macro if macro_name in cls . macros : prop = cls . macros [ macro_name ] i = od_list . find_by_name ( elt_name ) if i is not None : val = self . _get_value_from_element ( i , prop ) # Ok we got our value :) break return val # Return a strange value in this case rather than an empty string return 'n/a'
Get on demand macro value
476
5
19,450
def _tot_hosts_by_state ( self , state = None , state_type = None ) : if state is None and state_type is None : return len ( self . hosts ) if state_type : return sum ( 1 for h in self . hosts if h . state == state and h . state_type == state_type ) return sum ( 1 for h in self . hosts if h . state == state )
Generic function to get the number of host in the specified state
92
12
19,451
def _tot_unhandled_hosts_by_state ( self , state ) : return sum ( 1 for h in self . hosts if h . state == state and h . state_type == u'HARD' and h . is_problem and not h . problem_has_been_acknowledged )
Generic function to get the number of unhandled problem hosts in the specified state
68
15
19,452
def _tot_services_by_state ( self , state = None , state_type = None ) : if state is None and state_type is None : return len ( self . services ) if state_type : return sum ( 1 for s in self . services if s . state == state and s . state_type == state_type ) return sum ( 1 for s in self . services if s . state == state )
Generic function to get the number of services in the specified state
91
12
19,453
def _tot_unhandled_services_by_state ( self , state ) : return sum ( 1 for s in self . services if s . state == state and s . is_problem and not s . problem_has_been_acknowledged )
Generic function to get the number of unhandled problem services in the specified state
55
15
19,454
def _get_total_services_problems_unhandled ( self ) : return sum ( 1 for s in self . services if s . is_problem and not s . problem_has_been_acknowledged )
Get the number of services that are a problem and that are not acknowledged
47
14
19,455
def _get_total_services_problems_handled ( self ) : return sum ( 1 for s in self . services if s . is_problem and s . problem_has_been_acknowledged )
Get the number of service problems not handled
45
8
19,456
def add_data ( self , metric , value , ts = None ) : if not ts : ts = time . time ( ) if self . __data_lock . acquire ( ) : self . __data . append ( ( metric , ( ts , value ) ) ) self . __data_lock . release ( ) return True return False
Add data to queue
70
4
19,457
def set_daemon_name ( self , daemon_name ) : self . daemon_name = daemon_name for instance in self . instances : instance . set_loaded_into ( daemon_name )
Set the daemon name of the daemon which this manager is attached to and propagate this daemon name to our managed modules
43
22
19,458
def load_and_init ( self , modules ) : self . load ( modules ) self . get_instances ( ) return len ( self . configuration_errors ) == 0
Import instantiate & init the modules we manage
37
9
19,459
def load ( self , modules ) : self . modules_assoc = [ ] for module in modules : if not module . enabled : logger . info ( "Module %s is declared but not enabled" , module . name ) # Store in our modules list but do not try to load # Probably someone else will load this module later... self . modules [ module . uuid ] = module continue logger . info ( "Importing Python module '%s' for %s..." , module . python_name , module . name ) try : python_module = importlib . import_module ( module . python_name ) # Check existing module properties # Todo: check all mandatory properties if not hasattr ( python_module , 'properties' ) : # pragma: no cover self . configuration_errors . append ( "Module %s is missing a 'properties' " "dictionary" % module . python_name ) raise AttributeError logger . info ( "Module properties: %s" , getattr ( python_module , 'properties' ) ) # Check existing module get_instance method if not hasattr ( python_module , 'get_instance' ) or not isinstance ( getattr ( python_module , 'get_instance' ) , collections . Callable ) : # pragma: no cover self . configuration_errors . append ( "Module %s is missing a 'get_instance' " "function" % module . python_name ) raise AttributeError self . modules_assoc . append ( ( module , python_module ) ) logger . info ( "Imported '%s' for %s" , module . python_name , module . name ) except ImportError as exp : # pragma: no cover, simple protection self . configuration_errors . append ( "Module %s (%s) can't be loaded, Python " "importation error: %s" % ( module . python_name , module . name , str ( exp ) ) ) except AttributeError : # pragma: no cover, simple protection self . configuration_errors . append ( "Module %s (%s) can't be loaded, " "module configuration" % ( module . python_name , module . name ) ) else : logger . info ( "Loaded Python module '%s' (%s)" , module . python_name , module . name )
Load Python modules and check their usability
495
7
19,460
def try_instance_init ( self , instance , late_start = False ) : try : instance . init_try += 1 # Maybe it's a retry if not late_start and instance . init_try > 1 : # Do not try until too frequently, or it's too loopy if instance . last_init_try > time . time ( ) - MODULE_INIT_PERIOD : logger . info ( "Too early to retry initialization, retry period is %d seconds" , MODULE_INIT_PERIOD ) # logger.info("%s / %s", instance.last_init_try, time.time()) return False instance . last_init_try = time . time ( ) logger . info ( "Trying to initialize module: %s" , instance . name ) # If it's an external module, create/update Queues() if instance . is_external : instance . create_queues ( self . daemon . sync_manager ) # The module instance init function says if initialization is ok if not instance . init ( ) : logger . warning ( "Module %s initialisation failed." , instance . name ) return False logger . info ( "Module %s is initialized." , instance . name ) except Exception as exp : # pylint: disable=broad-except # pragma: no cover, simple protection msg = "The module instance %s raised an exception " "on initialization: %s, I remove it!" % ( instance . name , str ( exp ) ) self . configuration_errors . append ( msg ) logger . error ( msg ) logger . exception ( exp ) return False return True
Try to initialize the given module instance .
347
8
19,461
def clear_instances ( self , instances = None ) : if instances is None : instances = self . instances [ : ] # have to make a copy of the list for instance in instances : self . remove_instance ( instance )
Request to remove the given instances list or all if not provided
48
12
19,462
def set_to_restart ( self , instance ) : self . to_restart . append ( instance ) if instance . is_external : instance . proc = None
Put an instance to the restart queue
36
7
19,463
def get_instances ( self ) : self . clear_instances ( ) for ( alignak_module , python_module ) in self . modules_assoc : alignak_module . properties = python_module . properties . copy ( ) alignak_module . my_daemon = self . daemon logger . info ( "Alignak starting module '%s'" , alignak_module . get_name ( ) ) if getattr ( alignak_module , 'modules' , None ) : modules = [ ] for module_uuid in alignak_module . modules : if module_uuid in self . modules : modules . append ( self . modules [ module_uuid ] ) alignak_module . modules = modules logger . debug ( "Module '%s', parameters: %s" , alignak_module . get_name ( ) , alignak_module . __dict__ ) try : instance = python_module . get_instance ( alignak_module ) if not isinstance ( instance , BaseModule ) : # pragma: no cover, simple protection self . configuration_errors . append ( "Module %s instance is not a " "BaseModule instance: %s" % ( alignak_module . get_name ( ) , type ( instance ) ) ) raise AttributeError # pragma: no cover, simple protection except Exception as exp : # pylint: disable=broad-except logger . error ( "The module %s raised an exception on loading, I remove it!" , alignak_module . get_name ( ) ) logger . exception ( "Exception: %s" , exp ) self . configuration_errors . append ( "The module %s raised an exception on " "loading: %s, I remove it!" % ( alignak_module . get_name ( ) , str ( exp ) ) ) else : # Give the module the data to which daemon/module it is loaded into instance . set_loaded_into ( self . daemon . name ) self . instances . append ( instance ) for instance in self . instances : # External instances are not initialized now, but only when they are started if not instance . is_external and not self . try_instance_init ( instance ) : # If the init failed, we put in in the restart queue logger . warning ( "The module '%s' failed to initialize, " "I will try to restart it later" , instance . name ) self . set_to_restart ( instance ) return self . instances
Create init and then returns the list of module instances that the caller needs .
530
15
19,464
def start_external_instances ( self , late_start = False ) : for instance in [ i for i in self . instances if i . is_external ] : # But maybe the init failed a bit, so bypass this ones from now if not self . try_instance_init ( instance , late_start = late_start ) : logger . warning ( "The module '%s' failed to init, I will try to restart it later" , instance . name ) self . set_to_restart ( instance ) continue # ok, init succeed logger . info ( "Starting external module %s" , instance . name ) instance . start ( )
Launch external instances that are load correctly
138
7
19,465
def remove_instance ( self , instance ) : # External instances need to be close before (process + queues) if instance . is_external : logger . info ( "Request external process to stop for %s" , instance . name ) instance . stop_process ( ) logger . info ( "External process stopped." ) instance . clear_queues ( self . daemon . sync_manager ) # Then do not listen anymore about it self . instances . remove ( instance )
Request to cleanly remove the given instance . If instance is external also shutdown it cleanly
97
18
19,466
def check_alive_instances ( self ) : # Only for external for instance in self . instances : if instance in self . to_restart : continue if instance . is_external and instance . process and not instance . process . is_alive ( ) : logger . error ( "The external module %s died unexpectedly!" , instance . name ) logger . info ( "Setting the module %s to restart" , instance . name ) # We clean its queues, they are no more useful instance . clear_queues ( self . daemon . sync_manager ) self . set_to_restart ( instance ) # Ok, no need to look at queue size now continue # Now look for maximum queue size. If above the defined value, the module may have # a huge problem and so bailout. It's not a perfect solution, more a watchdog # If max_queue_size is 0, don't check this if self . daemon . max_queue_size == 0 : continue # Check for module queue size queue_size = 0 try : queue_size = instance . to_q . qsize ( ) except Exception : # pylint: disable=broad-except pass if queue_size > self . daemon . max_queue_size : logger . error ( "The module %s has a too important queue size (%s > %s max)!" , instance . name , queue_size , self . daemon . max_queue_size ) logger . info ( "Setting the module %s to restart" , instance . name ) # We clean its queues, they are no more useful instance . clear_queues ( self . daemon . sync_manager ) self . set_to_restart ( instance )
Check alive instances . If not log error and try to restart it
359
13
19,467
def try_to_restart_deads ( self ) : to_restart = self . to_restart [ : ] del self . to_restart [ : ] for instance in to_restart : logger . warning ( "Trying to restart module: %s" , instance . name ) if self . try_instance_init ( instance ) : logger . warning ( "Restarting %s..." , instance . name ) # Because it is a restart, clean the module inner process reference instance . process = None # If it's an external module, it will start the process instance . start ( ) # Ok it's good now :) else : # Will retry later... self . to_restart . append ( instance )
Try to reinit and restart dead instances
155
8
19,468
def stop_all ( self ) : logger . info ( 'Shutting down modules...' ) # Ask internal to quit if they can for instance in self . get_internal_instances ( ) : if hasattr ( instance , 'quit' ) and isinstance ( instance . quit , collections . Callable ) : instance . quit ( ) self . clear_instances ( [ instance for instance in self . instances if instance . is_external ] )
Stop all module instances
94
4
19,469
def parse ( self ) : # pylint: disable=too-many-branches # Search if some ini files existe in an alignak.d sub-directory sub_directory = 'alignak.d' dir_name = os . path . dirname ( self . configuration_file ) dir_name = os . path . join ( dir_name , sub_directory ) self . cfg_files = [ self . configuration_file ] if os . path . exists ( dir_name ) : for root , _ , walk_files in os . walk ( dir_name , followlinks = True ) : for found_file in walk_files : if not re . search ( r"\.ini$" , found_file ) : continue self . cfg_files . append ( os . path . join ( root , found_file ) ) print ( "Loading configuration files: %s " % self . cfg_files ) # Read and parse the found configuration files self . config = configparser . ConfigParser ( ) try : self . config . read ( self . cfg_files ) if self . config . _sections == { } : print ( "* bad formatted configuration file: %s " % self . configuration_file ) if self . embedded : raise ValueError sys . exit ( 2 ) for section in self . config . sections ( ) : if self . verbose : print ( "- section: %s" % section ) for ( key , value ) in self . config . items ( section ) : inner_property = "%s.%s" % ( section , key ) # Set object property setattr ( self , inner_property , value ) # Set environment variable os . environ [ inner_property ] = value if self . verbose : print ( " %s = %s" % ( inner_property , value ) ) if self . export : # Allowed shell variables may only contain: [a-zA-z0-9_] inner_property = re . sub ( '[^0-9a-zA-Z]+' , '_' , inner_property ) inner_property = inner_property . upper ( ) print ( "export %s=%s" % ( inner_property , cmd_quote ( value ) ) ) except configparser . ParsingError as exp : print ( "* parsing error in config file : %s\n%s" % ( self . configuration_file , exp . message ) ) if self . embedded : return False sys . exit ( 3 ) except configparser . InterpolationMissingOptionError as exp : print ( "* incorrect or missing variable: %s" % str ( exp ) ) if self . embedded : return False sys . exit ( 3 ) if self . verbose : print ( "Configuration file parsed correctly" ) return True
Check if some extra configuration files are existing in an alignak . d sub directory near the found configuration file .
596
22
19,470
def write ( self , env_file ) : try : with open ( env_file , "w" ) as out_file : self . config . write ( out_file ) except Exception as exp : # pylint: disable=broad-except print ( "Dumping environment file raised an error: %s. " % exp )
Write the Alignak configuration to a file
71
9
19,471
def get_alignak_macros ( self ) : macros = self . get_alignak_configuration ( macros = True ) sections = self . _search_sections ( 'pack.' ) for name , _ in list ( sections . items ( ) ) : section_macros = self . get_alignak_configuration ( section = name , macros = True ) macros . update ( section_macros ) return macros
Get the Alignak macros .
89
7
19,472
def get_alignak_configuration ( self , section = SECTION_CONFIGURATION , legacy_cfg = False , macros = False ) : configuration = self . _search_sections ( section ) if section not in configuration : return [ ] for prop , _ in list ( configuration [ section ] . items ( ) ) : # Only legacy configuration items if legacy_cfg : if not prop . startswith ( 'cfg' ) : configuration [ section ] . pop ( prop ) continue # Only macro definitions if macros : if not prop . startswith ( '_' ) and not prop . startswith ( '$' ) : configuration [ section ] . pop ( prop ) continue # All values except legacy configuration and macros if prop . startswith ( 'cfg' ) or prop . startswith ( '_' ) or prop . startswith ( '$' ) : configuration [ section ] . pop ( prop ) return configuration [ section ]
Get the Alignak configuration parameters . All the variables included in the SECTION_CONFIGURATION section except the variables starting with cfg and the macros .
199
32
19,473
def get_daemons ( self , daemon_name = None , daemon_type = None ) : if daemon_name is not None : sections = self . _search_sections ( 'daemon.%s' % daemon_name ) if 'daemon.%s' % daemon_name in sections : return sections [ 'daemon.' + daemon_name ] return { } if daemon_type is not None : sections = self . _search_sections ( 'daemon.' ) for name , daemon in list ( sections . items ( ) ) : if 'type' not in daemon or not daemon [ 'type' ] == daemon_type : sections . pop ( name ) return sections return self . _search_sections ( 'daemon.' )
Get the daemons configuration parameters
158
7
19,474
def get_modules ( self , name = None , daemon_name = None , names_only = True ) : if name is not None : sections = self . _search_sections ( 'module.' + name ) if 'module.' + name in sections : return sections [ 'module.' + name ] return { } if daemon_name is not None : section = self . get_daemons ( daemon_name ) if 'modules' in section and section [ 'modules' ] : modules = [ ] for module_name in section [ 'modules' ] . split ( ',' ) : if names_only : modules . append ( module_name ) else : modules . append ( self . get_modules ( name = module_name ) ) return modules return [ ] return self . _search_sections ( 'module.' )
Get the modules configuration parameters
173
5
19,475
def copy_shell ( self ) : cls = self . __class__ new_i = cls ( ) # create a new group new_i . uuid = self . uuid # with the same id # Copy all properties for prop in cls . properties : if hasattr ( self , prop ) : if prop in [ 'members' , 'unknown_members' ] : setattr ( new_i , prop , [ ] ) else : setattr ( new_i , prop , getattr ( self , prop ) ) return new_i
Copy the group properties EXCEPT the members . Members need to be filled after manually
116
16
19,476
def add_members ( self , members ) : if not isinstance ( members , list ) : members = [ members ] if not getattr ( self , 'members' , None ) : self . members = members else : self . members . extend ( members )
Add a new member to the members list
54
8
19,477
def add_unknown_members ( self , members ) : if not isinstance ( members , list ) : members = [ members ] if not hasattr ( self , 'unknown_members' ) : self . unknown_members = members else : self . unknown_members . extend ( members )
Add a new member to the unknown members list
60
9
19,478
def is_correct ( self ) : state = True # Make members unique, remove duplicates if self . members : self . members = list ( set ( self . members ) ) if self . unknown_members : for member in self . unknown_members : msg = "[%s::%s] as %s, got unknown member '%s'" % ( self . my_type , self . get_name ( ) , self . __class__ . my_type , member ) self . add_error ( msg ) state = False return super ( Itemgroup , self ) . is_correct ( ) and state
Check if a group is valid . Valid mean all members exists so list of unknown_members is empty
128
20
19,479
def get_initial_status_brok ( self , extra = None ) : # Here members is a list of identifiers and we need their names if extra and isinstance ( extra , Items ) : members = [ ] for member_id in self . members : member = extra [ member_id ] members . append ( ( member . uuid , member . get_name ( ) ) ) extra = { 'members' : members } return super ( Itemgroup , self ) . get_initial_status_brok ( extra = extra )
Get a brok with the group properties
112
8
19,480
def check_dir ( self , dirname ) : try : os . makedirs ( dirname ) dir_stat = os . stat ( dirname ) print ( "Created the directory: %s, stat: %s" % ( dirname , dir_stat ) ) if not dir_stat . st_uid == self . uid : os . chown ( dirname , self . uid , self . gid ) os . chmod ( dirname , 0o775 ) dir_stat = os . stat ( dirname ) print ( "Changed directory ownership and permissions: %s, stat: %s" % ( dirname , dir_stat ) ) self . pre_log . append ( ( "DEBUG" , "Daemon '%s' directory %s checking... " "User uid: %s, directory stat: %s." % ( self . name , dirname , os . getuid ( ) , dir_stat ) ) ) self . pre_log . append ( ( "INFO" , "Daemon '%s' directory %s did not exist, I created it. " "I set ownership for this directory to %s:%s." % ( self . name , dirname , self . user , self . group ) ) ) except OSError as exp : if exp . errno == errno . EEXIST and os . path . isdir ( dirname ) : # Directory still exists... pass else : self . pre_log . append ( ( "ERROR" , "Daemon directory '%s' did not exist, " "and I could not create. Exception: %s" % ( dirname , exp ) ) ) self . exit_on_error ( "Daemon directory '%s' did not exist, " "and I could not create.'. Exception: %s" % ( dirname , exp ) , exit_code = 3 )
Check and create directory
403
4
19,481
def request_stop ( self , message = '' , exit_code = 0 ) : # Log an error message if exit code is not 0 # Force output to stderr if exit_code : if message : logger . error ( message ) try : sys . stderr . write ( message ) except Exception : # pylint: disable=broad-except pass logger . error ( "Sorry, I bail out, exit code: %d" , exit_code ) try : sys . stderr . write ( "Sorry, I bail out, exit code: %d" % exit_code ) except Exception : # pylint: disable=broad-except pass else : if message : logger . info ( message ) self . unlink ( ) self . do_stop ( ) logger . info ( "Stopped %s." , self . name ) sys . exit ( exit_code )
Remove pid and stop daemon
187
5
19,482
def daemon_connection_init ( self , s_link , set_wait_new_conf = False ) : logger . debug ( "Daemon connection initialization: %s %s" , s_link . type , s_link . name ) # If the link is not not active, I do not try to initialize the connection, just useless ;) if not s_link . active : logger . warning ( "%s '%s' is not active, do not initialize its connection!" , s_link . type , s_link . name ) return False # Create the daemon connection s_link . create_connection ( ) # Get the connection running identifier - first client / server communication logger . debug ( "[%s] Getting running identifier for '%s'" , self . name , s_link . name ) # Assume the daemon should be alive and reachable # because we are initializing the connection... s_link . alive = True s_link . reachable = True got_a_running_id = None for _ in range ( 0 , s_link . max_check_attempts ) : got_a_running_id = s_link . get_running_id ( ) if got_a_running_id : s_link . last_connection = time . time ( ) if set_wait_new_conf : s_link . wait_new_conf ( ) break time . sleep ( 0.3 ) return got_a_running_id
Initialize a connection with the daemon for the provided satellite link
311
12
19,483
def do_load_modules ( self , modules ) : _ts = time . time ( ) logger . info ( "Loading modules..." ) if self . modules_manager . load_and_init ( modules ) : if self . modules_manager . instances : logger . info ( "I correctly loaded my modules: [%s]" , ',' . join ( [ inst . name for inst in self . modules_manager . instances ] ) ) else : logger . info ( "I do not have any module" ) else : # pragma: no cover, not with unit tests... logger . error ( "Errors were encountered when checking and loading modules:" ) for msg in self . modules_manager . configuration_errors : logger . error ( msg ) if self . modules_manager . configuration_warnings : # pragma: no cover, not tested for msg in self . modules_manager . configuration_warnings : logger . warning ( msg ) statsmgr . gauge ( 'modules.count' , len ( modules ) ) statsmgr . timer ( 'modules.load-time' , time . time ( ) - _ts )
Wrapper for calling load_and_init method of modules_manager attribute
237
15
19,484
def dump_environment ( self ) : # Dump the Alignak configuration to a temporary ini file path = os . path . join ( tempfile . gettempdir ( ) , 'dump-env-%s-%s-%d.ini' % ( self . type , self . name , int ( time . time ( ) ) ) ) try : with open ( path , "w" ) as out_file : self . alignak_env . write ( out_file ) except Exception as exp : # pylint: disable=broad-except logger . error ( "Dumping daemon environment raised an error: %s. " , exp )
Try to dump memory
139
4
19,485
def change_to_workdir ( self ) : logger . info ( "Changing working directory to: %s" , self . workdir ) self . check_dir ( self . workdir ) try : os . chdir ( self . workdir ) except OSError as exp : self . exit_on_error ( "Error changing to working directory: %s. Error: %s. " "Check the existence of %s and the %s/%s account " "permissions on this directory." % ( self . workdir , str ( exp ) , self . workdir , self . user , self . group ) , exit_code = 3 ) self . pre_log . append ( ( "INFO" , "Using working directory: %s" % os . path . abspath ( self . workdir ) ) )
Change working directory to working attribute
176
6
19,486
def unlink ( self ) : logger . debug ( "Unlinking %s" , self . pid_filename ) try : os . unlink ( self . pid_filename ) except OSError as exp : logger . debug ( "Got an error unlinking our pid file: %s" , exp )
Remove the daemon s pid file
66
6
19,487
def __open_pidfile ( self , write = False ) : # if problem on opening or creating file it'll be raised to the caller: try : self . pre_log . append ( ( "DEBUG" , "Opening %s pid file: %s" % ( 'existing' if os . path . exists ( self . pid_filename ) else 'missing' , self . pid_filename ) ) ) # Windows do not manage the rw+ mode, # so we must open in read mode first, then reopen it write mode... if not write and os . path . exists ( self . pid_filename ) : self . fpid = open ( self . pid_filename , 'r+' ) else : # If it doesn't exist too, we create it as void self . fpid = open ( self . pid_filename , 'w+' ) except Exception as exp : # pylint: disable=broad-except self . exit_on_error ( "Error opening pid file: %s. Error: %s. " "Check the %s:%s account permissions to write this file." % ( self . pid_filename , str ( exp ) , self . user , self . group ) , exit_code = 3 )
Open pid file in read or write mod
262
8
19,488
def write_pid ( self , pid ) : self . fpid . seek ( 0 ) self . fpid . truncate ( ) self . fpid . write ( "%d" % pid ) self . fpid . close ( ) del self . fpid
Write pid to the pid file
54
6
19,489
def close_fds ( self , skip_close_fds ) : # pragma: no cover, not with unit tests... # First we manage the file descriptor, because debug file can be # relative to pwd max_fds = resource . getrlimit ( resource . RLIMIT_NOFILE ) [ 1 ] if max_fds == resource . RLIM_INFINITY : max_fds = 1024 self . pre_log . append ( ( "DEBUG" , "Maximum file descriptors: %d" % max_fds ) ) # Iterate through and close all file descriptors. for file_d in range ( 0 , max_fds ) : if file_d in skip_close_fds : self . pre_log . append ( ( "INFO" , "Do not close fd: %s" % file_d ) ) continue try : os . close ( file_d ) except OSError : # ERROR, fd wasn't open to begin with (ignored) pass
Close all the process file descriptors . Skip the descriptors present in the skip_close_fds list
219
22
19,490
def do_daemon_init_and_start ( self , set_proc_title = True ) : if set_proc_title : self . set_proctitle ( self . name ) # Change to configured user/group account self . change_to_user_group ( ) # Change the working directory self . change_to_workdir ( ) # Check if I am still running self . check_parallel_run ( ) # If we must daemonize, let's do it! if self . is_daemon : if not self . daemonize ( ) : logger . error ( "I could not daemonize myself :(" ) return False else : # Else, I set my own pid as the reference one self . write_pid ( os . getpid ( ) ) # # TODO: check if really necessary! # # ------- # # Set ownership on some default log files. It may happen that these default # # files are owned by a privileged user account # try: # for log_file in ['alignak.log', 'alignak-events.log']: # if os.path.exists('/tmp/%s' % log_file): # with open('/tmp/%s' % log_file, "w") as file_log_file: # os.fchown(file_log_file.fileno(), self.uid, self.gid) # if os.path.exists('/tmp/monitoring-log/%s' % log_file): # with open('/tmp/monitoring-log/%s' % log_file, "w") as file_log_file: # os.fchown(file_log_file.fileno(), self.uid, self.gid) # except Exception as exp: # pylint: disable=broad-except # # pragma: no cover # print("Could not set default log files ownership, exception: %s" % str(exp)) # Configure the daemon logger self . setup_alignak_logger ( ) # Setup the Web Services daemon if not self . setup_communication_daemon ( ) : logger . error ( "I could not setup my communication daemon :(" ) return False # Creating synchonisation manager (inter-daemon queues...) self . sync_manager = self . _create_manager ( ) # Start the CherryPy server through a detached thread logger . info ( "Starting http_daemon thread" ) # pylint: disable=bad-thread-instantiation self . http_thread = threading . Thread ( target = self . http_daemon_thread , name = '%s-http_thread' % self . name ) # Setting the thread as a daemon allows to Ctrl+C to kill the main daemon self . http_thread . daemon = True self . http_thread . start ( ) # time.sleep(1) logger . info ( "HTTP daemon thread started" ) return True
Main daemon function . Clean allocates initializes and starts all necessary resources to go in daemon mode .
632
20
19,491
def setup_communication_daemon ( self ) : # pylint: disable=no-member ca_cert = ssl_cert = ssl_key = server_dh = None # The SSL part if self . use_ssl : ssl_cert = os . path . abspath ( self . server_cert ) if not os . path . exists ( ssl_cert ) : self . exit_on_error ( "The configured SSL server certificate file '%s' " "does not exist." % ssl_cert , exit_code = 2 ) logger . info ( "Using SSL server certificate: %s" , ssl_cert ) ssl_key = os . path . abspath ( self . server_key ) if not os . path . exists ( ssl_key ) : self . exit_on_error ( "The configured SSL server key file '%s' " "does not exist." % ssl_key , exit_code = 2 ) logger . info ( "Using SSL server key: %s" , ssl_key ) if self . server_dh : server_dh = os . path . abspath ( self . server_dh ) logger . info ( "Using ssl dh cert file: %s" , server_dh ) self . exit_on_error ( "Sorry, but using a DH configuration " "is not currently supported!" , exit_code = 2 ) if self . ca_cert : ca_cert = os . path . abspath ( self . ca_cert ) logger . info ( "Using ssl ca cert file: %s" , ca_cert ) if self . hard_ssl_name_check : logger . info ( "Enabling hard SSL server name verification" ) # Let's create the HTTPDaemon, it will be started later # pylint: disable=E1101 try : logger . info ( 'Setting up HTTP daemon (%s:%d), %d threads' , self . host , self . port , self . thread_pool_size ) self . http_daemon = HTTPDaemon ( self . host , self . port , self . http_interface , self . use_ssl , ca_cert , ssl_key , ssl_cert , server_dh , self . thread_pool_size , self . log_cherrypy , self . favicon ) except PortNotFree : logger . error ( 'The HTTP daemon port (%s:%d) is not free...' , self . host , self . port ) return False except Exception as exp : # pylint: disable=broad-except print ( 'Setting up HTTP daemon, exception: %s' , str ( exp ) ) logger . exception ( 'Setting up HTTP daemon, exception: %s' , str ( exp ) ) return False return True
Setup HTTP server daemon to listen for incoming HTTP requests from other Alignak daemons
601
18
19,492
def set_proctitle ( self , daemon_name = None ) : logger . debug ( "Setting my process name: %s" , daemon_name ) if daemon_name : setproctitle ( "alignak-%s %s" % ( self . type , daemon_name ) ) if self . modules_manager : self . modules_manager . set_daemon_name ( daemon_name ) else : setproctitle ( "alignak-%s" % self . type )
Set the proctitle of the daemon
107
8
19,493
def http_daemon_thread ( self ) : logger . debug ( "HTTP thread running" ) try : # This function is a blocking function serving HTTP protocol self . http_daemon . run ( ) except PortNotFree as exp : logger . exception ( 'The HTTP daemon port is not free: %s' , exp ) raise except Exception as exp : # pylint: disable=broad-except self . exit_on_exception ( exp ) logger . debug ( "HTTP thread exiting" )
Main function of the http daemon thread will loop forever unless we stop the root daemon
106
16
19,494
def make_a_pause ( self , timeout = 0.0001 , check_time_change = True ) : if timeout == 0 : timeout = 0.0001 if not check_time_change : # Time to sleep time . sleep ( timeout ) self . sleep_time += timeout return 0 , 0 # Check is system time changed before = time . time ( ) time_changed = self . check_for_system_time_change ( ) after = time . time ( ) elapsed = after - before if elapsed > timeout : return elapsed , time_changed # Time to sleep time . sleep ( timeout - elapsed ) # Increase our sleep time for the time we slept before += time_changed self . sleep_time += time . time ( ) - before return elapsed , time_changed
Wait up to timeout and check for system time change .
162
11
19,495
def wait_for_initial_conf ( self , timeout = 1.0 ) : logger . info ( "Waiting for initial configuration" ) # Arbiter do not already set our have_conf param _ts = time . time ( ) while not self . new_conf and not self . interrupted : # Make a pause and check if the system time changed _ , _ = self . make_a_pause ( timeout , check_time_change = True ) if not self . interrupted : logger . info ( "Got initial configuration, waited for: %.2f seconds" , time . time ( ) - _ts ) statsmgr . timer ( 'configuration.initial' , time . time ( ) - _ts ) else : logger . info ( "Interrupted before getting the initial configuration" )
Wait initial configuration from the arbiter . Basically sleep 1 . 0 and check if new_conf is here
168
21
19,496
def watch_for_new_conf ( self , timeout = 0 ) : logger . debug ( "Watching for a new configuration, timeout: %s" , timeout ) self . make_a_pause ( timeout = timeout , check_time_change = False ) return any ( self . new_conf )
Check if a new configuration was sent to the daemon
65
10
19,497
def hook_point ( self , hook_name , handle = None ) : full_hook_name = 'hook_' + hook_name for module in self . modules_manager . instances : _ts = time . time ( ) if not hasattr ( module , full_hook_name ) : continue fun = getattr ( module , full_hook_name ) try : fun ( handle if handle is not None else self ) # pylint: disable=broad-except except Exception as exp : # pragma: no cover, never happen during unit tests... logger . warning ( 'The instance %s raised an exception %s. I disabled it,' ' and set it to restart later' , module . name , str ( exp ) ) logger . exception ( 'Exception %s' , exp ) self . modules_manager . set_to_restart ( module ) else : statsmgr . timer ( 'hook.%s.%s' % ( hook_name , module . name ) , time . time ( ) - _ts )
Used to call module function that may define a hook function for hook_name
219
15
19,498
def get_id ( self , details = False ) : # pylint: disable=unused-argument # Modules information res = { "alignak" : getattr ( self , 'alignak_name' , 'unknown' ) , "type" : getattr ( self , 'type' , 'unknown' ) , "name" : getattr ( self , 'name' , 'unknown' ) , "version" : VERSION } return res
Get daemon identification information
97
4
19,499
def exit_ok ( self , message , exit_code = None ) : logger . info ( "Exiting..." ) if message : logger . info ( "-----" ) logger . error ( "Exit message: %s" , message ) logger . info ( "-----" ) self . request_stop ( ) if exit_code is not None : exit ( exit_code )
Log a message and exit
79
5