idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
19,300
def get_next_invalid_time_from_t ( self , timestamp ) : if not self . is_time_valid ( timestamp ) : return timestamp # First we search for the day of time range t_day = self . get_next_invalid_day ( timestamp ) # We search for the min of all tr.start > sec_from_morning # if it's the next day, use a start of the day search for timerange if timestamp < t_day : sec_from_morning = self . get_next_future_timerange_invalid ( t_day ) else : # it is in this day, so look from t (can be in the evening or so) sec_from_morning = self . get_next_future_timerange_invalid ( timestamp ) # tr can't be valid, or it will be return at the beginning # sec_from_morning = self.get_next_future_timerange_invalid(t) # Ok we've got a next invalid day and a invalid possibility in # timerange, so the next invalid is this day+sec_from_morning if t_day is not None and sec_from_morning is not None : return t_day + sec_from_morning + 1 # We've got a day but no sec_from_morning: the timerange is full (0->24h) # so the next invalid is this day at the day_start if t_day is not None and sec_from_morning is None : return t_day # Then we search for the next day of t # The sec will be the min of the day timestamp = get_day ( timestamp ) + 86400 t_day2 = self . get_next_invalid_day ( timestamp ) sec_from_morning = self . get_next_future_timerange_invalid ( t_day2 ) if t_day2 is not None and sec_from_morning is not None : return t_day2 + sec_from_morning + 1 if t_day2 is not None and sec_from_morning is None : return t_day2 # I did not found any valid time return None
Get next invalid time for time range
465
7
19,301
def get_start_and_end_time ( self , ref = None ) : return ( get_start_of_day ( self . syear , int ( self . smon ) , self . smday ) , get_end_of_day ( self . eyear , int ( self . emon ) , self . emday ) )
Specific function to get start time and end time for CalendarDaterange
74
14
19,302
def get_start_and_end_time ( self , ref = None ) : now = time . localtime ( ref ) self . syear = now . tm_year self . month = now . tm_mon self . wday = now . tm_wday day_id = Daterange . get_weekday_id ( self . day ) today_morning = get_start_of_day ( now . tm_year , now . tm_mon , now . tm_mday ) tonight = get_end_of_day ( now . tm_year , now . tm_mon , now . tm_mday ) day_diff = ( day_id - now . tm_wday ) % 7 morning = datetime . fromtimestamp ( today_morning ) + timedelta ( days = day_diff ) night = datetime . fromtimestamp ( tonight ) + timedelta ( days = day_diff ) return ( int ( morning . strftime ( "%s" ) ) , int ( night . strftime ( "%s" ) ) )
Specific function to get start time and end time for StandardDaterange
236
14
19,303
def get_start_and_end_time ( self , ref = None ) : now = time . localtime ( ref ) if self . syear == 0 : self . syear = now . tm_year day_start = find_day_by_weekday_offset ( self . syear , self . smon , self . swday , self . swday_offset ) start_time = get_start_of_day ( self . syear , self . smon , day_start ) if self . eyear == 0 : self . eyear = now . tm_year day_end = find_day_by_weekday_offset ( self . eyear , self . emon , self . ewday , self . ewday_offset ) end_time = get_end_of_day ( self . eyear , self . emon , day_end ) now_epoch = time . mktime ( now ) if start_time > end_time : # the period is between years if now_epoch > end_time : # check for next year day_end = find_day_by_weekday_offset ( self . eyear + 1 , self . emon , self . ewday , self . ewday_offset ) end_time = get_end_of_day ( self . eyear + 1 , self . emon , day_end ) else : # it s just that the start was the last year day_start = find_day_by_weekday_offset ( self . syear - 1 , self . smon , self . swday , self . swday_offset ) start_time = get_start_of_day ( self . syear - 1 , self . smon , day_start ) else : if now_epoch > end_time : # just have to check for next year if necessary day_start = find_day_by_weekday_offset ( self . syear + 1 , self . smon , self . swday , self . swday_offset ) start_time = get_start_of_day ( self . syear + 1 , self . smon , day_start ) day_end = find_day_by_weekday_offset ( self . eyear + 1 , self . emon , self . ewday , self . ewday_offset ) end_time = get_end_of_day ( self . eyear + 1 , self . emon , day_end ) return ( start_time , end_time )
Specific function to get start time and end time for MonthWeekDayDaterange
552
16
19,304
def get_start_and_end_time ( self , ref = None ) : now = time . localtime ( ref ) if self . syear == 0 : self . syear = now . tm_year day_start = find_day_by_offset ( self . syear , self . smon , self . smday ) start_time = get_start_of_day ( self . syear , self . smon , day_start ) if self . eyear == 0 : self . eyear = now . tm_year day_end = find_day_by_offset ( self . eyear , self . emon , self . emday ) end_time = get_end_of_day ( self . eyear , self . emon , day_end ) now_epoch = time . mktime ( now ) if start_time > end_time : # the period is between years if now_epoch > end_time : # check for next year day_end = find_day_by_offset ( self . eyear + 1 , self . emon , self . emday ) end_time = get_end_of_day ( self . eyear + 1 , self . emon , day_end ) else : # it s just that start was the last year day_start = find_day_by_offset ( self . syear - 1 , self . smon , self . emday ) start_time = get_start_of_day ( self . syear - 1 , self . smon , day_start ) else : if now_epoch > end_time : # just have to check for next year if necessary day_start = find_day_by_offset ( self . syear + 1 , self . smon , self . smday ) start_time = get_start_of_day ( self . syear + 1 , self . smon , day_start ) day_end = find_day_by_offset ( self . eyear + 1 , self . emon , self . emday ) end_time = get_end_of_day ( self . eyear + 1 , self . emon , day_end ) return ( start_time , end_time )
Specific function to get start time and end time for MonthDateDaterange
485
15
19,305
def get_start_and_end_time ( self , ref = None ) : now = time . localtime ( ref ) # If no year, it's our year if self . syear == 0 : self . syear = now . tm_year month_start_id = now . tm_mon day_start = find_day_by_weekday_offset ( self . syear , month_start_id , self . swday , self . swday_offset ) start_time = get_start_of_day ( self . syear , month_start_id , day_start ) # Same for end year if self . eyear == 0 : self . eyear = now . tm_year month_end_id = now . tm_mon day_end = find_day_by_weekday_offset ( self . eyear , month_end_id , self . ewday , self . ewday_offset ) end_time = get_end_of_day ( self . eyear , month_end_id , day_end ) # Maybe end_time is before start. So look for the # next month if start_time > end_time : month_end_id += 1 if month_end_id > 12 : month_end_id = 1 self . eyear += 1 day_end = find_day_by_weekday_offset ( self . eyear , month_end_id , self . ewday , self . ewday_offset ) end_time = get_end_of_day ( self . eyear , month_end_id , day_end ) now_epoch = time . mktime ( now ) # But maybe we look not enought far. We should add a month if end_time < now_epoch : month_end_id += 1 month_start_id += 1 if month_end_id > 12 : month_end_id = 1 self . eyear += 1 if month_start_id > 12 : month_start_id = 1 self . syear += 1 # First start day_start = find_day_by_weekday_offset ( self . syear , month_start_id , self . swday , self . swday_offset ) start_time = get_start_of_day ( self . syear , month_start_id , day_start ) # Then end day_end = find_day_by_weekday_offset ( self . eyear , month_end_id , self . ewday , self . ewday_offset ) end_time = get_end_of_day ( self . eyear , month_end_id , day_end ) return ( start_time , end_time )
Specific function to get start time and end time for WeekDayDaterange
599
15
19,306
def get_start_and_end_time ( self , ref = None ) : now = time . localtime ( ref ) if self . syear == 0 : self . syear = now . tm_year month_start_id = now . tm_mon day_start = find_day_by_offset ( self . syear , month_start_id , self . smday ) start_time = get_start_of_day ( self . syear , month_start_id , day_start ) if self . eyear == 0 : self . eyear = now . tm_year month_end_id = now . tm_mon day_end = find_day_by_offset ( self . eyear , month_end_id , self . emday ) end_time = get_end_of_day ( self . eyear , month_end_id , day_end ) now_epoch = time . mktime ( now ) if start_time > end_time : month_start_id -= 1 if month_start_id < 1 : month_start_id = 12 self . syear -= 1 day_start = find_day_by_offset ( self . syear , month_start_id , self . smday ) start_time = get_start_of_day ( self . syear , month_start_id , day_start ) if end_time < now_epoch : month_end_id += 1 month_start_id += 1 if month_end_id > 12 : month_end_id = 1 self . eyear += 1 if month_start_id > 12 : month_start_id = 1 self . syear += 1 # For the start day_start = find_day_by_offset ( self . syear , month_start_id , self . smday ) start_time = get_start_of_day ( self . syear , month_start_id , day_start ) # For the end day_end = find_day_by_offset ( self . eyear , month_end_id , self . emday ) end_time = get_end_of_day ( self . eyear , month_end_id , day_end ) return ( start_time , end_time )
Specific function to get start time and end time for MonthDayDaterange
500
15
19,307
def get_unknown_check_result_brok ( cmd_line ) : match = re . match ( r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;' r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?' , cmd_line ) if not match : match = re . match ( r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;' r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?' , cmd_line ) if not match : return None data = { 'time_stamp' : int ( match . group ( 1 ) ) , 'host_name' : match . group ( 3 ) , } if match . group ( 2 ) == 'SERVICE' : data [ 'service_description' ] = match . group ( 4 ) data [ 'return_code' ] = match . group ( 5 ) data [ 'output' ] = match . group ( 6 ) data [ 'perf_data' ] = match . group ( 7 ) else : data [ 'return_code' ] = match . group ( 4 ) data [ 'output' ] = match . group ( 5 ) data [ 'perf_data' ] = match . group ( 6 ) return Brok ( { 'type' : 'unknown_%s_check_result' % match . group ( 2 ) . lower ( ) , 'data' : data } )
Create unknown check result brok and fill it with command data
360
12
19,308
def get_name ( self ) : dependent_host_name = 'unknown' if getattr ( self , 'dependent_host_name' , None ) : dependent_host_name = getattr ( getattr ( self , 'dependent_host_name' ) , 'host_name' , 'unknown' ) host_name = 'unknown' if getattr ( self , 'host_name' , None ) : host_name = getattr ( getattr ( self , 'host_name' ) , 'host_name' , 'unknown' ) return dependent_host_name + '/' + host_name
Get name based on dependent_host_name and host_name attributes Each attribute is substituted by unknown if attribute does not exist
131
25
19,309
def linkify_hd_by_h ( self , hosts ) : for hostdep in self : try : h_name = hostdep . host_name dh_name = hostdep . dependent_host_name host = hosts . find_by_name ( h_name ) if host is None : err = "Error: the host dependency got a bad host_name definition '%s'" % h_name hostdep . add_error ( err ) dephost = hosts . find_by_name ( dh_name ) if dephost is None : err = "Error: the host dependency got " "a bad dependent_host_name definition '%s'" % dh_name hostdep . add_error ( err ) if host : hostdep . host_name = host . uuid if dephost : hostdep . dependent_host_name = dephost . uuid except AttributeError as exp : err = "Error: the host dependency miss a property '%s'" % exp hostdep . add_error ( err )
Replace dependent_host_name and host_name in host dependency by the real object
222
18
19,310
def linkify_hd_by_tp ( self , timeperiods ) : for hostdep in self : try : tp_name = hostdep . dependency_period timeperiod = timeperiods . find_by_name ( tp_name ) if timeperiod : hostdep . dependency_period = timeperiod . uuid else : hostdep . dependency_period = '' except AttributeError as exp : # pragma: no cover, simple protectionn logger . error ( "[hostdependency] fail to linkify by timeperiod: %s" , exp )
Replace dependency_period by a real object in host dependency
120
12
19,311
def merge_extinfo ( host , extinfo ) : # Note that 2d_coords and 3d_coords are never merged, so not usable ! properties = [ 'notes' , 'notes_url' , 'icon_image' , 'icon_image_alt' , 'vrml_image' , 'statusmap_image' ] # host properties have precedence over hostextinfo properties for prop in properties : if getattr ( host , prop ) == '' and getattr ( extinfo , prop ) != '' : setattr ( host , prop , getattr ( extinfo , prop ) )
Merge extended host information into a host
128
8
19,312
def set_proxy ( self , proxy ) : # pragma: no cover, not with unit tests if proxy : logger . debug ( 'PROXY SETTING PROXY %s' , proxy ) self . _requests_con . proxies = { 'http' : proxy , 'https' : proxy , }
Set HTTP proxy
65
3
19,313
def post ( self , path , args , wait = False ) : uri = self . make_uri ( path ) timeout = self . make_timeout ( wait ) for ( key , value ) in list ( args . items ( ) ) : args [ key ] = serialize ( value , True ) try : logger . debug ( "post: %s, timeout: %s, params: %s" , uri , timeout , args ) rsp = self . _requests_con . post ( uri , json = args , timeout = timeout , verify = self . strong_ssl ) logger . debug ( "got: %d - %s" , rsp . status_code , rsp . text ) if rsp . status_code != 200 : raise HTTPClientDataException ( rsp . status_code , rsp . text , uri ) return rsp . content except ( requests . Timeout , requests . ConnectTimeout ) : raise HTTPClientTimeoutException ( timeout , uri ) except requests . ConnectionError as exp : raise HTTPClientConnectionException ( uri , exp . args [ 0 ] ) except Exception as exp : raise HTTPClientException ( 'Request error to %s: %s' % ( uri , exp ) )
POST an HTTP request to a daemon
263
7
19,314
def put ( self , path , args , wait = False ) : # pragma: no cover, looks never used! # todo: remove this because it looks never used anywhere... uri = self . make_uri ( path ) timeout = self . make_timeout ( wait ) try : logger . debug ( "put: %s, timeout: %s, params: %s" , uri , timeout , args ) rsp = self . _requests_con . put ( uri , args , timeout = timeout , verify = self . strong_ssl ) logger . debug ( "got: %d - %s" , rsp . status_code , rsp . text ) if rsp . status_code != 200 : raise HTTPClientDataException ( rsp . status_code , rsp . text , uri ) return rsp . content except ( requests . Timeout , requests . ConnectTimeout ) : raise HTTPClientTimeoutException ( timeout , uri ) except requests . ConnectionError as exp : raise HTTPClientConnectionException ( uri , exp . args [ 0 ] ) except Exception as exp : raise HTTPClientException ( 'Request error to %s: %s' % ( uri , exp ) )
PUT and HTTP request to a daemon
257
7
19,315
def explode ( self , escalations ) : # Now we explode all escalations (host_name, hostgroup_name) to escalations for escalation in self : properties = escalation . __class__ . properties name = getattr ( escalation , 'host_name' , getattr ( escalation , 'hostgroup_name' , '' ) ) creation_dict = { 'escalation_name' : 'Generated-HE-%s-%s' % ( name , escalation . uuid ) } for prop in properties : if hasattr ( escalation , prop ) : creation_dict [ prop ] = getattr ( escalation , prop ) escalations . add_escalation ( Escalation ( creation_dict ) )
Create instance of Escalation for each HostEscalation object
150
13
19,316
def register ( self , name , _type , statsd_host = 'localhost' , statsd_port = 8125 , statsd_prefix = 'alignak' , statsd_enabled = False , broks_enabled = False ) : self . name = name # This attribute is not used, but I keep ascending compatibility with former interface! self . _type = _type # local statsd part self . statsd_host = statsd_host self . statsd_port = int ( statsd_port ) self . statsd_prefix = statsd_prefix self . statsd_enabled = statsd_enabled # local broks part self . broks_enabled = broks_enabled logger . debug ( "StatsD configuration for %s - %s:%s, prefix: %s, " "enabled: %s, broks: %s, file: %s" , self . name , self . statsd_host , self . statsd_port , self . statsd_prefix , self . statsd_enabled , self . broks_enabled , self . stats_file ) if self . statsd_enabled and self . statsd_host is not None and self . statsd_host != 'None' : logger . info ( "Sending %s statistics to: %s:%s, prefix: %s" , self . name , self . statsd_host , self . statsd_port , self . statsd_prefix ) if self . load_statsd ( ) : logger . info ( 'Alignak internal statistics are sent to StatsD.' ) else : logger . info ( 'StatsD server is not available.' ) if self . stats_file : try : self . file_d = open ( self . stats_file , 'a' ) logger . info ( "Alignak internal statistics are written in the file %s" , self . stats_file ) except OSError as exp : # pragma: no cover, should never happen... logger . exception ( "Error when opening the file '%s' : %s" , self . stats_file , exp ) self . file_d = None return self . statsd_enabled
Init instance with real values
467
5
19,317
def load_statsd ( self ) : if not self . statsd_enabled : logger . info ( 'Stats reporting is not enabled, connection is not allowed' ) return False if self . statsd_enabled and self . carbon : self . my_metrics . append ( ( '.' . join ( [ self . statsd_prefix , self . name , 'connection-test' ] ) , ( int ( time . time ( ) ) , int ( time . time ( ) ) ) ) ) self . carbon . add_data_list ( self . my_metrics ) self . flush ( log = True ) else : try : logger . info ( 'Trying to contact StatsD server...' ) self . statsd_addr = ( socket . gethostbyname ( self . statsd_host . encode ( 'utf-8' ) ) , self . statsd_port ) self . statsd_sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) except ( socket . error , socket . gaierror ) as exp : logger . warning ( 'Cannot create StatsD socket: %s' , exp ) return False except Exception as exp : # pylint: disable=broad-except logger . exception ( 'Cannot create StatsD socket (other): %s' , exp ) return False logger . info ( 'StatsD server contacted' ) return True
Create socket connection to statsd host
301
7
19,318
def flush ( self , log = False ) : if not self . my_metrics : logger . debug ( "Flushing - no metrics to send" ) return True now = int ( time . time ( ) ) if self . last_failure and self . last_failure + self . metrics_flush_pause > now : if not self . log_metrics_flush_pause : date = datetime . datetime . fromtimestamp ( self . last_failure ) . strftime ( self . date_fmt ) logger . warning ( "Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send..." , date , self . metrics_count ) self . log_metrics_flush_pause = True return True try : logger . debug ( "Flushing %d metrics to Graphite/carbon" , self . metrics_count ) if self . carbon . send_data ( ) : self . my_metrics = [ ] else : logger . warning ( "Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d" , self . metrics_count ) if log : logger . warning ( "Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d" , self . metrics_count ) return False if self . log_metrics_flush_pause : logger . warning ( "Metrics flush restored. " "Remaining stored metric: %d" , self . metrics_count ) self . last_failure = 0 self . log_metrics_flush_pause = False except Exception as exp : # pylint: disable=broad-except if not self . log_metrics_flush_pause : logger . warning ( "Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d" , self . metrics_count ) else : date = datetime . datetime . fromtimestamp ( self . last_failure ) . strftime ( self . date_fmt ) logger . warning ( "Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send..." , date , self . metrics_count ) logger . warning ( "Exception: %s" , str ( exp ) ) self . last_failure = now return False return True
Send inner stored metrics to the defined Graphite
508
9
19,319
def send_to_graphite ( self , metric , value , timestamp = None ) : # Manage Graphite part if not self . statsd_enabled or not self . carbon : return if timestamp is None : timestamp = int ( time . time ( ) ) self . my_metrics . append ( ( '.' . join ( [ self . statsd_prefix , self . name , metric ] ) , ( timestamp , value ) ) ) if self . metrics_count >= self . metrics_flush_count : self . carbon . add_data_list ( self . my_metrics ) self . flush ( )
Inner store a new metric and flush to Graphite if the flush threshold is reached .
130
18
19,320
def counter ( self , key , value , timestamp = None ) : _min , _max , count , _sum = self . stats . get ( key , ( None , None , 0 , 0 ) ) count += 1 _sum += value if _min is None or value < _min : _min = value if _max is None or value > _max : _max = value self . stats [ key ] = ( _min , _max , count , _sum ) # Manage local statsd part if self . statsd_enabled and self . statsd_sock : # beware, we are sending ms here, timer is in seconds packet = '%s.%s.%s:%d|c' % ( self . statsd_prefix , self . name , key , value ) packet = packet . encode ( 'utf-8' ) try : self . statsd_sock . sendto ( packet , self . statsd_addr ) except ( socket . error , socket . gaierror ) : pass # cannot send? ok not a huge problem here and we cannot # log because it will be far too verbose :p # Manage Graphite part if self . statsd_enabled and self . carbon : self . send_to_graphite ( key , value , timestamp = timestamp ) # Manage file part if self . statsd_enabled and self . file_d : if timestamp is None : timestamp = int ( time . time ( ) ) packet = self . line_fmt if not self . date_fmt : date = "%s" % timestamp else : date = datetime . datetime . fromtimestamp ( timestamp ) . strftime ( self . date_fmt ) packet = packet . replace ( "#date#" , date ) packet = packet . replace ( "#counter#" , '%s.%s.%s' % ( self . statsd_prefix , self . name , key ) ) packet = packet . replace ( "#value#" , '%d' % value ) packet = packet . replace ( "#uom#" , 'c' ) try : self . file_d . write ( packet ) except IOError : logger . warning ( "Could not write to the file: %s" , packet ) if self . broks_enabled : logger . debug ( "alignak stat brok: %s = %s" , key , value ) if timestamp is None : timestamp = int ( time . time ( ) ) return Brok ( { 'type' : 'alignak_stat' , 'data' : { 'ts' : timestamp , 'type' : 'counter' , 'metric' : '%s.%s.%s' % ( self . statsd_prefix , self . name , key ) , 'value' : value , 'uom' : 'c' } } ) return None
Set a counter value
610
4
19,321
def get_managed_configurations ( self ) : res = { } for scheduler_link in list ( self . schedulers . values ( ) ) : res [ scheduler_link . instance_id ] = { 'hash' : scheduler_link . hash , 'push_flavor' : scheduler_link . push_flavor , 'managed_conf_id' : scheduler_link . managed_conf_id } logger . debug ( "Get managed configuration: %s" , res ) return res
Get the configurations managed by this satellite
111
7
19,322
def get_scheduler_from_hostname ( self , host_name ) : scheduler_uuid = self . hosts_schedulers . get ( host_name , None ) return self . schedulers . get ( scheduler_uuid , None )
Get scheduler linked to the given host_name
59
10
19,323
def get_external_commands ( self ) : res = self . external_commands logger . debug ( "Get and clear external commands list: %s" , res ) self . external_commands = [ ] return res
Get the external commands
48
4
19,324
def get_results_from_passive ( self , scheduler_instance_id ) : # Do I know this scheduler? # logger.info("My schedulers: %s %s", self.schedulers, type(self.schedulers)) if not self . schedulers : # Probably not yet configured ... logger . debug ( "I do not have any scheduler: %s" , self . schedulers ) return [ ] scheduler_link = None for link in list ( self . schedulers . values ( ) ) : if scheduler_instance_id == link . instance_id : scheduler_link = link break else : logger . warning ( "I do not know this scheduler: %s" , scheduler_instance_id ) return [ ] logger . debug ( "Get results for the scheduler: %s" , scheduler_instance_id ) ret , scheduler_link . wait_homerun = scheduler_link . wait_homerun , { } logger . debug ( "Results: %s" % ( list ( ret . values ( ) ) ) if ret else "No results available" ) return list ( ret . values ( ) )
Get executed actions results from a passive satellite for a specific scheduler
259
13
19,325
def get_events ( self ) : res = copy . copy ( self . events ) del self . events [ : ] return res
Get event list from satellite
27
5
19,326
def manage_action_return ( self , action ) : # Maybe our workers send us something else than an action # if so, just add this in other queues and return # todo: test a class instance if action . __class__ . my_type not in [ 'check' , 'notification' , 'eventhandler' ] : self . add ( action ) return # Ok, it's a result. Get the concerned scheduler uuid scheduler_uuid = action . my_scheduler logger . debug ( "Got action return: %s / %s" , scheduler_uuid , action . uuid ) try : # Now that we know where to put the action result, we do not need any reference to # the scheduler nor the worker del action . my_scheduler del action . my_worker except AttributeError : # pragma: no cover, simple protection logger . error ( "AttributeError Got action return: %s / %s" , scheduler_uuid , action ) # And we remove it from the actions queue of the scheduler too try : del self . schedulers [ scheduler_uuid ] . actions [ action . uuid ] except KeyError as exp : logger . error ( "KeyError del scheduler action: %s / %s - %s" , scheduler_uuid , action . uuid , str ( exp ) ) # We tag it as "return wanted", and move it in the wait return queue try : self . schedulers [ scheduler_uuid ] . wait_homerun [ action . uuid ] = action except KeyError : # pragma: no cover, simple protection logger . error ( "KeyError Add home run action: %s / %s - %s" , scheduler_uuid , action . uuid , str ( exp ) )
Manage action return from Workers We just put them into the corresponding sched and we clean unused properties like my_scheduler
392
25
19,327
def create_and_launch_worker ( self , module_name = 'fork' ) : logger . info ( "Allocating new '%s' worker..." , module_name ) # If we are in the fork module, we do not specify a target target = None __warned = [ ] if module_name == 'fork' : target = None else : for module in self . modules_manager . instances : # First, see if the module name matches... if module . get_name ( ) == module_name : # ... and then if is a 'worker' module one or not if not module . properties . get ( 'worker_capable' , False ) : raise NotWorkerMod target = module . work if target is None : if module_name not in __warned : logger . warning ( "No target found for %s, NOT creating a worker for it..." , module_name ) __warned . append ( module_name ) return # We give to the Worker the instance name of the daemon (eg. poller-master) # and not the daemon type (poller) queue = Queue ( ) worker = Worker ( module_name , queue , self . returns_queue , self . processes_by_worker , max_plugins_output_length = self . max_plugins_output_length , target = target , loaded_into = self . name ) # worker.module_name = module_name # save this worker self . workers [ worker . get_id ( ) ] = worker # And save the Queue of this worker, with key = worker id # self.q_by_mod[module_name][worker.uuid] = queue self . q_by_mod [ module_name ] [ worker . get_id ( ) ] = queue # Ok, all is good. Start it! worker . start ( ) logger . info ( "Started '%s' worker: %s (pid=%d)" , module_name , worker . get_id ( ) , worker . get_pid ( ) )
Create and launch a new worker and put it into self . workers It can be mortal or not
436
19
19,328
def do_stop_workers ( self ) : logger . info ( "Stopping all workers (%d)" , len ( self . workers ) ) for worker in list ( self . workers . values ( ) ) : try : logger . info ( " - stopping '%s'" , worker . get_id ( ) ) worker . terminate ( ) worker . join ( timeout = 1 ) logger . info ( " - stopped" ) # A already dead worker or in a worker except ( AttributeError , AssertionError ) : pass except Exception as exp : # pylint: disable=broad-except logger . error ( "exception: %s" , str ( exp ) )
Stop all workers
142
3
19,329
def get_broks ( self ) : res = copy . copy ( self . broks ) del self . broks [ : ] return res
Get brok list from satellite
30
6
19,330
def check_and_del_zombie_workers ( self ) : # pragma: no cover, not with unit tests... # pylint: disable= not-callable # Active children make a join with everyone, useful :) # active_children() for p in active_children ( ) : logger . debug ( "got child: %s" , p ) w_to_del = [ ] for worker in list ( self . workers . values ( ) ) : # If a worker goes down and we did not ask him, it's not # good: we can think that we have a worker and it's not True # So we del it logger . debug ( "checking if worker %s (pid=%d) is alive" , worker . get_id ( ) , worker . get_pid ( ) ) if not self . interrupted and not worker . is_alive ( ) : logger . warning ( "The worker %s (pid=%d) went down unexpectedly!" , worker . get_id ( ) , worker . get_pid ( ) ) # Terminate immediately worker . terminate ( ) worker . join ( timeout = 1 ) w_to_del . append ( worker . get_id ( ) ) # OK, now really del workers from queues # And requeue the actions it was managed for worker_id in w_to_del : worker = self . workers [ worker_id ] # Del the queue of the module queue del self . q_by_mod [ worker . module_name ] [ worker . get_id ( ) ] for scheduler_uuid in self . schedulers : sched = self . schedulers [ scheduler_uuid ] for act in list ( sched . actions . values ( ) ) : if act . status == ACT_STATUS_QUEUED and act . my_worker == worker_id : # Got a check that will NEVER return if we do not restart it self . assign_to_a_queue ( act ) # So now we can really forgot it del self . workers [ worker_id ]
Check if worker are fine and kill them if not . Dispatch the actions in the worker to another one
438
20
19,331
def adjust_worker_number_by_load ( self ) : if self . interrupted : logger . debug ( "Trying to adjust worker number. Ignoring because we are stopping." ) return to_del = [ ] logger . debug ( "checking worker count." " Currently: %d workers, min per module : %d, max per module : %d" , len ( self . workers ) , self . min_workers , self . max_workers ) # I want at least min_workers by module then if I can, I add worker for load balancing for mod in self . q_by_mod : # At least min_workers todo = max ( 0 , self . min_workers - len ( self . q_by_mod [ mod ] ) ) for _ in range ( todo ) : try : self . create_and_launch_worker ( module_name = mod ) # Maybe this modules is not a true worker one. # if so, just delete if from q_by_mod except NotWorkerMod : to_del . append ( mod ) break for mod in to_del : logger . warning ( "The module %s is not a worker one, I remove it from the worker list." , mod ) del self . q_by_mod [ mod ]
Try to create the minimum workers specified in the configuration
271
10
19,332
def _get_queue_for_the_action ( self , action ) : # get the module name, if not, take fork mod = getattr ( action , 'module_type' , 'fork' ) queues = list ( self . q_by_mod [ mod ] . items ( ) ) # Maybe there is no more queue, it's very bad! if not queues : return ( 0 , None ) # if not get action round robin index to get action queue based # on the action id self . rr_qid = ( self . rr_qid + 1 ) % len ( queues ) ( worker_id , queue ) = queues [ self . rr_qid ] # return the id of the worker (i), and its queue return ( worker_id , queue )
Find action queue for the action depending on the module . The id is found with action modulo on action id
169
22
19,333
def add_actions ( self , actions_list , scheduler_instance_id ) : # We check for new check in each schedulers and put the result in new_checks scheduler_link = None for scheduler_id in self . schedulers : logger . debug ( "Trying to add an action, scheduler: %s" , self . schedulers [ scheduler_id ] ) if scheduler_instance_id == self . schedulers [ scheduler_id ] . instance_id : scheduler_link = self . schedulers [ scheduler_id ] break else : logger . error ( "Trying to add actions from an unknwown scheduler: %s" , scheduler_instance_id ) return if not scheduler_link : logger . error ( "Trying to add actions, but scheduler link is not found for: %s, " "actions: %s" , scheduler_instance_id , actions_list ) return logger . debug ( "Found scheduler link: %s" , scheduler_link ) for action in actions_list : # First we look if the action is identified uuid = getattr ( action , 'uuid' , None ) if uuid is None : try : action = unserialize ( action , no_load = True ) uuid = action . uuid except AlignakClassLookupException : logger . error ( 'Cannot un-serialize action: %s' , action ) continue # If we already have this action, we are already working for it! if uuid in scheduler_link . actions : continue # Action is attached to a scheduler action . my_scheduler = scheduler_link . uuid scheduler_link . actions [ action . uuid ] = action self . assign_to_a_queue ( action )
Add a list of actions to the satellite queues
395
9
19,334
def assign_to_a_queue ( self , action ) : ( worker_id , queue ) = self . _get_queue_for_the_action ( action ) if not worker_id : return # Tag the action as "in the worker i" action . my_worker = worker_id action . status = ACT_STATUS_QUEUED msg = Message ( _type = 'Do' , data = action , source = self . name ) logger . debug ( "Queuing message: %s" , msg ) queue . put_nowait ( msg ) logger . debug ( "Queued" )
Take an action and put it to a worker actions queue
131
11
19,335
def get_new_actions ( self ) : try : _t0 = time . time ( ) self . do_get_new_actions ( ) statsmgr . timer ( 'actions.got.time' , time . time ( ) - _t0 ) except RuntimeError : logger . error ( "Exception like issue #1007" )
Wrapper function for do_get_new_actions For stats purpose
72
14
19,336
def main ( self ) : try : # Start the daemon mode if not self . do_daemon_init_and_start ( ) : self . exit_on_error ( message = "Daemon initialization error" , exit_code = 3 ) self . do_post_daemon_init ( ) # We wait for initial conf self . wait_for_initial_conf ( ) if self . new_conf : # Setup the received configuration self . setup_new_conf ( ) # Allocate Mortal Threads self . adjust_worker_number_by_load ( ) # Now main loop self . do_main_loop ( ) logger . info ( "Exited from the main loop." ) self . request_stop ( ) except Exception : # pragma: no cover, this should never happen indeed ;) self . exit_on_exception ( traceback . format_exc ( ) ) raise
Main satellite function . Do init and then mainloop
190
10
19,337
def check_activation ( self , contacts ) : now = time . time ( ) was_is_in_effect = self . is_in_effect self . is_in_effect = ( self . start_time <= now <= self . end_time ) # Raise a log entry when we get in the downtime if not was_is_in_effect and self . is_in_effect : self . enter ( contacts ) # Same for exit purpose if was_is_in_effect and not self . is_in_effect : self . exit ( contacts )
Enter or exit downtime if necessary
119
6
19,338
def split_semicolon ( line , maxsplit = None ) : # Split on ';' character split_line = line . split ( ';' ) split_line_size = len ( split_line ) # if maxsplit is not specified, we set it to the number of part if maxsplit is None or maxsplit < 0 : maxsplit = split_line_size # Join parts to the next one, if ends with a '\' # because we mustn't split if the semicolon is escaped i = 0 while i < split_line_size - 1 : # for each part, check if its ends with a '\' ends = split_line [ i ] . endswith ( '\\' ) if ends : # remove the last character '\' split_line [ i ] = split_line [ i ] [ : - 1 ] # append the next part to the current if it is not the last and the current # ends with '\' or if there is more than maxsplit parts if ( ends or i >= maxsplit ) and i < split_line_size - 1 : split_line [ i ] = ";" . join ( [ split_line [ i ] , split_line [ i + 1 ] ] ) # delete the next part del split_line [ i + 1 ] split_line_size -= 1 # increase i only if we don't have append because after append the new # string can end with '\' else : i += 1 return split_line
r Split a line on semicolons characters but not on the escaped semicolons
314
17
19,339
def format_t_into_dhms_format ( timestamp ) : mins , timestamp = divmod ( timestamp , 60 ) hour , mins = divmod ( mins , 60 ) day , hour = divmod ( hour , 24 ) return '%sd %sh %sm %ss' % ( day , hour , mins , timestamp )
Convert an amount of second into day hour min and sec
69
12
19,340
def merge_periods ( data ) : # sort by start date newdata = sorted ( data , key = lambda drange : drange [ 0 ] ) end = 0 for period in newdata : if period [ 0 ] != end and period [ 0 ] != ( end - 1 ) : end = period [ 1 ] # dat = np.array(newdata) dat = newdata new_intervals = [ ] cur_start = None cur_end = None for ( dt_start , dt_end ) in dat : if cur_end is None : cur_start = dt_start cur_end = dt_end continue else : if cur_end >= dt_start : # merge, keep existing cur_start, extend cur_end cur_end = dt_end else : # new interval, save previous and reset current to this new_intervals . append ( ( cur_start , cur_end ) ) cur_start = dt_start cur_end = dt_end # make sure final interval is saved new_intervals . append ( ( cur_start , cur_end ) ) return new_intervals
Merge periods to have better continous periods . Like 350 - 450 400 - 600 = > 350 - 600
245
22
19,341
def list_split ( val , split_on_comma = True ) : if not split_on_comma : return val new_val = [ ] for subval in val : # This may happen when re-serializing if isinstance ( subval , list ) : continue new_val . extend ( subval . split ( ',' ) ) return new_val
Try to split each member of a list with comma separator . If we don t have to split just return val
79
23
19,342
def to_best_int_float ( val ) : integer = int ( float ( val ) ) flt = float ( val ) # If the f is a .0 value, # best match is int if integer == flt : return integer return flt
Get best type for value between int and float
54
9
19,343
def dict_to_serialized_dict ( ref , the_dict ) : result = { } for elt in list ( the_dict . values ( ) ) : if not getattr ( elt , 'serialize' , None ) : continue result [ elt . uuid ] = elt . serialize ( ) return result
Serialize the list of elements to a dictionary
71
9
19,344
def list_to_serialized ( ref , the_list ) : result = [ ] for elt in the_list : if not getattr ( elt , 'serialize' , None ) : continue result . append ( elt . serialize ( ) ) return result
Serialize the list of elements
58
6
19,345
def to_hostnames_list ( ref , tab ) : # pragma: no cover, to be deprecated? res = [ ] for host in tab : if hasattr ( host , 'host_name' ) : res . append ( host . host_name ) return res
Convert Host list into a list of host_name
58
11
19,346
def sort_by_number_values ( x00 , y00 ) : # pragma: no cover, looks like not used! if len ( x00 ) < len ( y00 ) : return 1 if len ( x00 ) > len ( y00 ) : return - 1 # So is equal return 0
Compare x00 y00 base on number of values
65
10
19,347
def strip_and_uniq ( tab ) : _list = [ ] for elt in tab : val = elt . strip ( ) if val and val not in _list : _list . append ( val ) return _list
Strip every element of a list and keep a list of ordered unique values
49
15
19,348
def filter_host_by_name ( name ) : def inner_filter ( items ) : """Inner filter for host. Accept if host_name == name""" host = items [ "host" ] if host is None : return False return host . host_name == name return inner_filter
Filter for host Filter on name
62
6
19,349
def filter_host_by_regex ( regex ) : host_re = re . compile ( regex ) def inner_filter ( items ) : """Inner filter for host. Accept if regex match host_name""" host = items [ "host" ] if host is None : return False return host_re . match ( host . host_name ) is not None return inner_filter
Filter for host Filter on regex
81
6
19,350
def filter_host_by_group ( group ) : def inner_filter ( items ) : """Inner filter for host. Accept if group in host.hostgroups""" host = items [ "host" ] if host is None : return False return group in [ items [ "hostgroups" ] [ g ] . hostgroup_name for g in host . hostgroups ] return inner_filter
Filter for host Filter on group
82
6
19,351
def filter_host_by_tag ( tpl ) : def inner_filter ( items ) : """Inner filter for host. Accept if tag in host.tags""" host = items [ "host" ] if host is None : return False return tpl in [ t . strip ( ) for t in host . tags ] return inner_filter
Filter for host Filter on tag
72
6
19,352
def filter_service_by_name ( name ) : def inner_filter ( items ) : """Inner filter for service. Accept if service_description == name""" service = items [ "service" ] if service is None : return False return service . service_description == name return inner_filter
Filter for service Filter on name
62
6
19,353
def filter_service_by_regex_name ( regex ) : host_re = re . compile ( regex ) def inner_filter ( items ) : """Inner filter for service. Accept if regex match service_description""" service = items [ "service" ] if service is None : return False return host_re . match ( service . service_description ) is not None return inner_filter
Filter for service Filter on regex
83
6
19,354
def filter_service_by_host_name ( host_name ) : def inner_filter ( items ) : """Inner filter for service. Accept if service.host.host_name == host_name""" service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return host . host_name == host_name return inner_filter
Filter for service Filter on host_name
92
8
19,355
def filter_service_by_regex_host_name ( regex ) : host_re = re . compile ( regex ) def inner_filter ( items ) : """Inner filter for service. Accept if regex match service.host.host_name""" service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return host_re . match ( host . host_name ) is not None return inner_filter
Filter for service Filter on regex host_name
107
9
19,356
def filter_service_by_hostgroup_name ( group ) : def inner_filter ( items ) : """Inner filter for service. Accept if hostgroup in service.host.hostgroups""" service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return group in [ items [ "hostgroups" ] [ g ] . hostgroup_name for g in host . hostgroups ] return inner_filter
Filter for service Filter on hostgroup
106
7
19,357
def filter_service_by_host_tag_name ( tpl ) : def inner_filter ( items ) : """Inner filter for service. Accept if tpl in service.host.tags""" service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return tpl in [ t . strip ( ) for t in host . tags ] return inner_filter
Filter for service Filter on tag
97
6
19,358
def filter_service_by_servicegroup_name ( group ) : def inner_filter ( items ) : """Inner filter for service. Accept if group in service.servicegroups""" service = items [ "service" ] if service is None : return False return group in [ items [ "servicegroups" ] [ g ] . servicegroup_name for g in service . servicegroups ] return inner_filter
Filter for service Filter on group
85
6
19,359
def filter_host_by_bp_rule_label ( label ) : def inner_filter ( items ) : """Inner filter for host. Accept if label in host.labels""" host = items [ "host" ] if host is None : return False return label in host . labels return inner_filter
Filter for host Filter on label
65
6
19,360
def manage_signal ( self , sig , frame ) : # pylint: disable=unused-argument logger . info ( "worker '%s' (pid=%d) received a signal: %s" , self . _id , os . getpid ( ) , SIGNALS_TO_NAMES_DICT [ sig ] ) # Do not do anything... our master daemon is managing our termination. self . interrupted = True
Manage signals caught by the process but I do not do anything ... our master daemon is managing our termination .
93
22
19,361
def check_for_system_time_change ( self ) : # pragma: no cover, hardly testable with unit tests... now = time . time ( ) difference = now - self . t_each_loop # Now set the new value for the tick loop self . t_each_loop = now # If we have more than 15 min time change, we need to compensate it # todo: confirm that 15 minutes is a good choice... if abs ( difference ) > 900 : # pragma: no cover, not with unit tests... return difference return 0
Check if our system time change . If so change our
118
11
19,362
def work ( self , actions_queue , returns_queue , control_queue = None ) : # pragma: no cover try : logger . info ( "[%s] (pid=%d) starting my job..." , self . _id , os . getpid ( ) ) self . do_work ( actions_queue , returns_queue , control_queue ) logger . info ( "[%s] (pid=%d) stopped" , self . _id , os . getpid ( ) ) except ActionError as exp : logger . error ( "[%s] exited with an ActionError exception : %s" , self . _id , str ( exp ) ) logger . exception ( exp ) raise # Catch any exception, log the exception and exit anyway except Exception as exp : # pragma: no cover, this should never happen indeed ;) logger . error ( "[%s] exited with an unmanaged exception : %s" , self . _id , str ( exp ) ) logger . exception ( exp ) raise
Wrapper function for do_work in order to catch the exception to see the real work look at do_work
213
23
19,363
def read_requirements ( filename = 'requirements.txt' ) : # allow for some leeway with the argument if not filename . startswith ( 'requirements' ) : filename = 'requirements-' + filename if not os . path . splitext ( filename ) [ 1 ] : filename += '.txt' # no extension, add default def valid_line ( line ) : line = line . strip ( ) return line and not any ( line . startswith ( p ) for p in ( '#' , '-' ) ) def extract_requirement ( line ) : egg_eq = '#egg=' if egg_eq in line : _ , requirement = line . split ( egg_eq , 1 ) return requirement return line with open ( filename ) as f : lines = f . readlines ( ) return list ( map ( extract_requirement , filter ( valid_line , lines ) ) )
Reads the list of requirements from given file .
193
10
19,364
def init_running_properties ( self ) : for prop , entry in list ( self . __class__ . running_properties . items ( ) ) : val = entry . default # Make a copy of the value for complex iterable types # As such, each instance has its own copy and not a simple reference setattr ( self , prop , copy ( val ) if isinstance ( val , ( set , list , dict ) ) else val )
Initialize the running_properties . Each instance have own property .
92
13
19,365
def copy ( self ) : # New dummy item with it's own running properties copied_item = self . __class__ ( { } ) # Now, copy the properties for prop in self . __class__ . properties : if prop in [ 'uuid' ] : continue val = getattr ( self , prop , None ) if val is not None : setattr ( copied_item , prop , val ) # Also copy some running properties # The custom variables if hasattr ( self , "customs" ) : copied_item . customs = copy ( self . customs ) # And tags/templates if hasattr ( self , "tags" ) : copied_item . tags = copy ( self . tags ) if hasattr ( self , "templates" ) : copied_item . templates = copy ( self . templates ) return copied_item
Get a copy of this item but with a new id
176
11
19,366
def clean ( self ) : for prop in ( 'imported_from' , 'use' , 'plus' , 'templates' , 'register' ) : try : delattr ( self , prop ) except AttributeError : pass for prop in ( 'configuration_warnings' , 'configuration_errors' ) : try : if getattr ( self , prop , None ) is not None and not getattr ( self , prop ) : delattr ( self , prop ) except AttributeError : pass
Clean properties only needed for initialization and configuration
108
8
19,367
def load_global_conf ( cls , global_configuration ) : logger . debug ( "Propagate global parameter for %s:" , cls ) for prop , entry in global_configuration . properties . items ( ) : # If some global managed configuration properties have a class_inherit clause, if not entry . managed or not getattr ( entry , 'class_inherit' ) : continue for ( cls_dest , change_name ) in entry . class_inherit : if cls_dest == cls : # ok, we've got something to get value = getattr ( global_configuration , prop ) logger . debug ( "- global parameter %s=%s -> %s=%s" , prop , getattr ( global_configuration , prop ) , change_name , value ) if change_name is None : setattr ( cls , prop , value ) else : setattr ( cls , change_name , value )
Apply global Alignak configuration .
207
7
19,368
def get_templates ( self ) : use = getattr ( self , 'use' , '' ) if isinstance ( use , list ) : return [ n . strip ( ) for n in use if n . strip ( ) ] return [ n . strip ( ) for n in use . split ( ',' ) if n . strip ( ) ]
Get list of templates this object use
73
7
19,369
def get_all_plus_and_delete ( self ) : res = { } props = list ( self . plus . keys ( ) ) # we delete entries, so no for ... in ... for prop in props : res [ prop ] = self . get_plus_and_delete ( prop ) return res
Get all self . plus items of list . We copy it delete the original and return the copy list
65
20
19,370
def add_error ( self , txt ) : self . configuration_errors . append ( txt ) self . conf_is_correct = False
Add a message in the configuration errors list so we can print them all in one place
31
17
19,371
def is_correct ( self ) : state = self . conf_is_correct properties = self . __class__ . properties for prop , entry in list ( properties . items ( ) ) : if hasattr ( self , 'special_properties' ) and prop in getattr ( self , 'special_properties' ) : continue if not hasattr ( self , prop ) and entry . required : msg = "[%s::%s] %s property is missing" % ( self . my_type , self . get_name ( ) , prop ) self . add_error ( msg ) state = state & self . conf_is_correct return state
Check if this object is correct
136
6
19,372
def old_properties_names_to_new ( self ) : old_properties = getattr ( self . __class__ , "old_properties" , { } ) for old_name , new_name in list ( old_properties . items ( ) ) : # Ok, if we got old_name and NO new name, # we switch the name if hasattr ( self , old_name ) and not hasattr ( self , new_name ) : value = getattr ( self , old_name ) setattr ( self , new_name , value ) delattr ( self , old_name )
This function is used by service and hosts to transform Nagios2 parameters to Nagios3 ones like normal_check_interval to check_interval . There is a old_parameters tab in Classes that give such modifications to do .
128
49
19,373
def get_raw_import_values ( self ) : # pragma: no cover, never used res = { } properties = list ( self . __class__ . properties . keys ( ) ) # Register is not by default in the properties if 'register' not in properties : properties . append ( 'register' ) for prop in properties : if hasattr ( self , prop ) : val = getattr ( self , prop ) res [ prop ] = val return res
Get properties = > values of this object
97
8
19,374
def del_downtime ( self , downtime_id ) : if downtime_id in self . downtimes : self . downtimes [ downtime_id ] . can_be_deleted = True del self . downtimes [ downtime_id ]
Delete a downtime in this object
52
6
19,375
def get_property_value_for_brok ( self , prop , tab ) : entry = tab [ prop ] # Get the current value, or the default if need value = getattr ( self , prop , entry . default ) # Apply brok_transformation if need # Look if we must preprocess the value first pre_op = entry . brok_transformation if pre_op is not None : value = pre_op ( self , value ) return value
Get the property of an object and brok_transformation if needed and return the value
99
18
19,376
def fill_data_brok_from ( self , data , brok_type ) : cls = self . __class__ # Configuration properties for prop , entry in list ( cls . properties . items ( ) ) : # Is this property intended for broking? if brok_type in entry . fill_brok : data [ prop ] = self . get_property_value_for_brok ( prop , cls . properties ) # And the running properties if hasattr ( cls , 'running_properties' ) : # We've got prop in running_properties too for prop , entry in list ( cls . running_properties . items ( ) ) : # if 'fill_brok' in cls.running_properties[prop]: if brok_type in entry . fill_brok : data [ prop ] = self . get_property_value_for_brok ( prop , cls . running_properties )
Add properties to data parameter with properties of this object when brok_type parameter is defined in fill_brok of these properties
201
26
19,377
def get_initial_status_brok ( self , extra = None ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'full_status' ) if extra : data . update ( extra ) return Brok ( { 'type' : 'initial_' + self . my_type + '_status' , 'data' : data } )
Create an initial status brok
90
6
19,378
def get_update_status_brok ( self ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'full_status' ) return Brok ( { 'type' : 'update_' + self . my_type + '_status' , 'data' : data } )
Create an update item brok
77
6
19,379
def get_check_result_brok ( self ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'check_result' ) return Brok ( { 'type' : self . my_type + '_check_result' , 'data' : data } )
Create check_result brok
74
6
19,380
def dump ( self , dump_file_name = None ) : # pragma: no cover, never called # pylint: disable=unused-argument dump = { } for prop in self . properties : if not hasattr ( self , prop ) : continue attr = getattr ( self , prop ) if isinstance ( attr , list ) and attr and isinstance ( attr [ 0 ] , Item ) : dump [ prop ] = [ i . dump ( ) for i in attr ] elif isinstance ( attr , Item ) : dump [ prop ] = attr . dump ( ) elif attr : dump [ prop ] = getattr ( self , prop ) return dump
Dump Item object properties
149
5
19,381
def add_items ( self , items , index_items ) : count_templates = 0 count_items = 0 generated_items = [ ] for item in items : if item . is_tpl ( ) : self . add_template ( item ) count_templates = count_templates + 1 else : new_items = self . add_item ( item , index_items ) count_items = count_items + max ( 1 , len ( new_items ) ) if new_items : generated_items . extend ( new_items ) if count_templates : logger . info ( ' indexed %d template(s)' , count_templates ) if count_items : logger . info ( ' created %d %s(s).' , count_items , self . inner_class . my_type )
Add items to template if is template else add in item list
175
12
19,382
def manage_conflict ( self , item , name ) : if item . is_tpl ( ) : existing = self . name_to_template [ name ] else : existing = self . name_to_item [ name ] if existing == item : return item existing_prio = getattr ( existing , "definition_order" , existing . properties [ "definition_order" ] . default ) item_prio = getattr ( item , "definition_order" , item . properties [ "definition_order" ] . default ) if existing_prio < item_prio : # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio : # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else : # Don't know which one to keep, lastly defined has precedence objcls = getattr ( self . inner_class , "my_type" , "[unknown]" ) mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " "You may manually set the definition_order parameter to avoid this message." % ( objcls , name , item . imported_from , existing . imported_from ) item . configuration_warnings . append ( mesg ) if item . is_tpl ( ) : self . remove_template ( existing ) else : self . remove_item ( existing ) return item
Checks if an object holding the same name already exists in the index .
318
15
19,383
def add_template ( self , tpl ) : tpl = self . index_template ( tpl ) self . templates [ tpl . uuid ] = tpl
Add and index a template into the templates container .
36
10
19,384
def index_template ( self , tpl ) : objcls = self . inner_class . my_type name = getattr ( tpl , 'name' , '' ) if not name : mesg = "a %s template has been defined without name, from: %s" % ( objcls , tpl . imported_from ) tpl . add_error ( mesg ) elif name in self . name_to_template : tpl = self . manage_conflict ( tpl , name ) self . name_to_template [ name ] = tpl logger . debug ( "Indexed a %s template: %s, uses: %s" , tpl . my_type , name , getattr ( tpl , 'use' , 'Nothing' ) ) return tpl
Indexes a template by name into the name_to_template dictionary .
172
15
19,385
def remove_template ( self , tpl ) : try : del self . templates [ tpl . uuid ] except KeyError : # pragma: no cover, simple protection pass self . unindex_template ( tpl )
Removes and un - index a template from the templates container .
48
13
19,386
def unindex_template ( self , tpl ) : name = getattr ( tpl , 'name' , '' ) try : del self . name_to_template [ name ] except KeyError : # pragma: no cover, simple protection pass
Unindex a template from the templates container .
53
9
19,387
def add_item ( self , item , index = True ) : # pylint: disable=too-many-branches, too-many-locals, too-many-nested-blocks name_property = getattr ( self . __class__ , "name_property" , None ) # Check if some hosts are to be self-generated... generated_hosts = [ ] if name_property : name = getattr ( item , name_property , None ) if name and '[' in name and ']' in name : # We can create several objects from the same configuration! pattern = name [ name . find ( "[" ) + 1 : name . find ( "]" ) ] if '-' in pattern : logger . debug ( "Found an host with a patterned name: %s" , pattern ) # pattern is format-min-max # format is optional limits = pattern . split ( '-' ) fmt = "%d" min_v = 1 max_v = 1 if len ( limits ) == 3 : fmt = limits [ 2 ] new_name = name . replace ( '[%s-%s-%s]' % ( limits [ 0 ] , limits [ 1 ] , fmt ) , '***' ) else : new_name = name . replace ( '[%s-%s]' % ( limits [ 0 ] , limits [ 1 ] ) , '***' ) try : min_v = int ( limits [ 0 ] ) except ValueError : pass try : max_v = int ( limits [ 1 ] ) except ValueError : pass for idx in range ( min_v , max_v + 1 ) : logger . debug ( "- cloning host: %s" , new_name . replace ( '***' , fmt % idx ) ) new_host = deepcopy ( item ) new_host . uuid = get_a_new_object_id ( ) new_host . host_name = new_name . replace ( '***' , fmt % idx ) # Update some fields with the newly generated host name for prop in [ 'display_name' , 'alias' , 'notes' , 'notes_url' , 'action_url' ] : if getattr ( new_host , prop , None ) is None : continue value = getattr ( new_host , prop ) if '$HOSTNAME$' in value : setattr ( new_host , prop , value . replace ( '$HOSTNAME$' , new_host . host_name ) ) generated_hosts . append ( new_host ) if generated_hosts : for new_host in generated_hosts : if index is True : new_host = self . index_item ( new_host ) self . items [ new_host . uuid ] = new_host logger . info ( " cloned %d hosts from %s" , len ( generated_hosts ) , item . get_name ( ) ) else : if index is True and name_property : item = self . index_item ( item ) self . items [ item . uuid ] = item return generated_hosts
Add an item into our containers and index it depending on the index flag .
663
15
19,388
def old_properties_names_to_new ( self ) : # pragma: no cover, never called for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) : i . old_properties_names_to_new ( )
Convert old Nagios2 names to Nagios3 new names
74
13
19,389
def get_all_tags ( self , item ) : all_tags = item . get_templates ( ) for template_id in item . templates : template = self . templates [ template_id ] all_tags . append ( template . name ) all_tags . extend ( self . get_all_tags ( template ) ) return list ( set ( all_tags ) )
Get all tags of an item
80
6
19,390
def linkify_templates ( self ) : # First we create a list of all templates for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) : self . linkify_item_templates ( i ) for i in self : i . tags = self . get_all_tags ( i )
Link all templates and create the template graph too
88
9
19,391
def apply_partial_inheritance ( self , prop ) : for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) : self . get_property_by_inheritance ( i , prop ) # If a "null" attribute was inherited, delete it try : if getattr ( i , prop ) == 'null' : delattr ( i , prop ) except AttributeError : # pragma: no cover, simple protection pass
Define property with inheritance value of the property
117
9
19,392
def linkify_with_contacts ( self , contacts ) : for i in self : if not hasattr ( i , 'contacts' ) : continue links_list = strip_and_uniq ( i . contacts ) new = [ ] for name in [ e for e in links_list if e ] : contact = contacts . find_by_name ( name ) if contact is not None and contact . uuid not in new : new . append ( contact . uuid ) else : i . add_error ( "the contact '%s' defined for '%s' is unknown" % ( name , i . get_name ( ) ) ) i . contacts = new
Link items with contacts items
144
5
19,393
def linkify_with_escalations ( self , escalations ) : for i in self : if not hasattr ( i , 'escalations' ) : continue links_list = strip_and_uniq ( i . escalations ) new = [ ] for name in [ e for e in links_list if e ] : escalation = escalations . find_by_name ( name ) if escalation is not None and escalation . uuid not in new : new . append ( escalation . uuid ) else : i . add_error ( "the escalation '%s' defined for '%s' is unknown" % ( name , i . get_name ( ) ) ) i . escalations = new
Link with escalations
148
4
19,394
def explode_contact_groups_into_contacts ( item , contactgroups ) : if not hasattr ( item , 'contact_groups' ) : return # TODO : See if we can remove this if cgnames = '' if item . contact_groups : if isinstance ( item . contact_groups , list ) : cgnames = item . contact_groups else : cgnames = item . contact_groups . split ( ',' ) cgnames = strip_and_uniq ( cgnames ) for cgname in cgnames : contactgroup = contactgroups . find_by_name ( cgname ) if not contactgroup : item . add_error ( "The contact group '%s' defined on the %s '%s' do not exist" % ( cgname , item . __class__ . my_type , item . get_name ( ) ) ) continue cnames = contactgroups . get_members_of_group ( cgname ) # We add contacts into our contacts if cnames : if hasattr ( item , 'contacts' ) : # Fix #1054 - bad contact explosion # item.contacts.extend(cnames) item . contacts = item . contacts + cnames else : item . contacts = cnames
Get all contacts of contact_groups and put them in contacts container
273
13
19,395
def linkify_with_timeperiods ( self , timeperiods , prop ) : for i in self : if not hasattr ( i , prop ) : continue tpname = getattr ( i , prop ) . strip ( ) # some default values are '', so set None if not tpname : setattr ( i , prop , '' ) continue # Ok, get a real name, search for it timeperiod = timeperiods . find_by_name ( tpname ) if timeperiod is None : i . add_error ( "The %s of the %s '%s' named '%s' is unknown!" % ( prop , i . __class__ . my_type , i . get_name ( ) , tpname ) ) continue setattr ( i , prop , timeperiod . uuid )
Link items with timeperiods items
177
7
19,396
def linkify_with_checkmodulations ( self , checkmodulations ) : for i in self : if not hasattr ( i , 'checkmodulations' ) : continue links_list = strip_and_uniq ( i . checkmodulations ) new = [ ] for name in [ e for e in links_list if e ] : modulation = checkmodulations . find_by_name ( name ) if modulation is not None and modulation . uuid not in new : new . append ( modulation . uuid ) else : i . add_error ( "The checkmodulations of the %s '%s' named " "'%s' is unknown!" % ( i . __class__ . my_type , i . get_name ( ) , name ) ) i . checkmodulations = new
Link checkmodulation object
170
5
19,397
def linkify_s_by_module ( self , modules ) : for i in self : links_list = strip_and_uniq ( i . modules ) new = [ ] for name in [ e for e in links_list if e ] : module = modules . find_by_name ( name ) if module is not None and module . uuid not in new : new . append ( module ) else : i . add_error ( "Error: the module %s is unknown for %s" % ( name , i . get_name ( ) ) ) i . modules = new
Link modules to items
125
4
19,398
def evaluate_hostgroup_expression ( expr , hosts , hostgroups , look_in = 'hostgroups' ) : # Maybe exp is a list, like numerous hostgroups entries in a service, link them if isinstance ( expr , list ) : expr = '|' . join ( expr ) if look_in == 'hostgroups' : node = ComplexExpressionFactory ( look_in , hostgroups , hosts ) else : # templates node = ComplexExpressionFactory ( look_in , hosts , hosts ) expr_tree = node . eval_cor_pattern ( expr ) set_res = expr_tree . resolve_elements ( ) # HOOK DBG return list ( set_res )
Evaluate hostgroup expression
147
6
19,399
def get_hosts_from_hostgroups ( hgname , hostgroups ) : if not isinstance ( hgname , list ) : hgname = [ e . strip ( ) for e in hgname . split ( ',' ) if e . strip ( ) ] host_names = [ ] for name in hgname : hostgroup = hostgroups . find_by_name ( name ) if hostgroup is None : raise ValueError ( "the hostgroup '%s' is unknown" % hgname ) mbrs = [ h . strip ( ) for h in hostgroup . get_hosts ( ) if h . strip ( ) ] host_names . extend ( mbrs ) return host_names
Get hosts of hostgroups
156
5