idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
19,400
def linkify_hd_by_tp ( self , timeperiods ) : for hostdep in self : try : tp_name = hostdep . dependency_period timeperiod = timeperiods . find_by_name ( tp_name ) if timeperiod : hostdep . dependency_period = timeperiod . uuid else : hostdep . dependency_period = '' except AttributeError as exp : logger . error ( "[hostdependency] fail to linkify by timeperiod: %s" , exp )
Replace dependency_period by a real object in host dependency
19,401
def merge_extinfo ( host , extinfo ) : properties = [ 'notes' , 'notes_url' , 'icon_image' , 'icon_image_alt' , 'vrml_image' , 'statusmap_image' ] for prop in properties : if getattr ( host , prop ) == '' and getattr ( extinfo , prop ) != '' : setattr ( host , prop , getattr ( extinfo , prop ) )
Merge extended host information into a host
19,402
def set_proxy ( self , proxy ) : if proxy : logger . debug ( 'PROXY SETTING PROXY %s' , proxy ) self . _requests_con . proxies = { 'http' : proxy , 'https' : proxy , }
Set HTTP proxy
19,403
def post ( self , path , args , wait = False ) : uri = self . make_uri ( path ) timeout = self . make_timeout ( wait ) for ( key , value ) in list ( args . items ( ) ) : args [ key ] = serialize ( value , True ) try : logger . debug ( "post: %s, timeout: %s, params: %s" , uri , timeout , args ) rsp = self . _requests_con . post ( uri , json = args , timeout = timeout , verify = self . strong_ssl ) logger . debug ( "got: %d - %s" , rsp . status_code , rsp . text ) if rsp . status_code != 200 : raise HTTPClientDataException ( rsp . status_code , rsp . text , uri ) return rsp . content except ( requests . Timeout , requests . ConnectTimeout ) : raise HTTPClientTimeoutException ( timeout , uri ) except requests . ConnectionError as exp : raise HTTPClientConnectionException ( uri , exp . args [ 0 ] ) except Exception as exp : raise HTTPClientException ( 'Request error to %s: %s' % ( uri , exp ) )
POST an HTTP request to a daemon
19,404
def put ( self , path , args , wait = False ) : uri = self . make_uri ( path ) timeout = self . make_timeout ( wait ) try : logger . debug ( "put: %s, timeout: %s, params: %s" , uri , timeout , args ) rsp = self . _requests_con . put ( uri , args , timeout = timeout , verify = self . strong_ssl ) logger . debug ( "got: %d - %s" , rsp . status_code , rsp . text ) if rsp . status_code != 200 : raise HTTPClientDataException ( rsp . status_code , rsp . text , uri ) return rsp . content except ( requests . Timeout , requests . ConnectTimeout ) : raise HTTPClientTimeoutException ( timeout , uri ) except requests . ConnectionError as exp : raise HTTPClientConnectionException ( uri , exp . args [ 0 ] ) except Exception as exp : raise HTTPClientException ( 'Request error to %s: %s' % ( uri , exp ) )
PUT and HTTP request to a daemon
19,405
def explode ( self , escalations ) : for escalation in self : properties = escalation . __class__ . properties name = getattr ( escalation , 'host_name' , getattr ( escalation , 'hostgroup_name' , '' ) ) creation_dict = { 'escalation_name' : 'Generated-HE-%s-%s' % ( name , escalation . uuid ) } for prop in properties : if hasattr ( escalation , prop ) : creation_dict [ prop ] = getattr ( escalation , prop ) escalations . add_escalation ( Escalation ( creation_dict ) )
Create instance of Escalation for each HostEscalation object
19,406
def register ( self , name , _type , statsd_host = 'localhost' , statsd_port = 8125 , statsd_prefix = 'alignak' , statsd_enabled = False , broks_enabled = False ) : self . name = name self . _type = _type self . statsd_host = statsd_host self . statsd_port = int ( statsd_port ) self . statsd_prefix = statsd_prefix self . statsd_enabled = statsd_enabled self . broks_enabled = broks_enabled logger . debug ( "StatsD configuration for %s - %s:%s, prefix: %s, " "enabled: %s, broks: %s, file: %s" , self . name , self . statsd_host , self . statsd_port , self . statsd_prefix , self . statsd_enabled , self . broks_enabled , self . stats_file ) if self . statsd_enabled and self . statsd_host is not None and self . statsd_host != 'None' : logger . info ( "Sending %s statistics to: %s:%s, prefix: %s" , self . name , self . statsd_host , self . statsd_port , self . statsd_prefix ) if self . load_statsd ( ) : logger . info ( 'Alignak internal statistics are sent to StatsD.' ) else : logger . info ( 'StatsD server is not available.' ) if self . stats_file : try : self . file_d = open ( self . stats_file , 'a' ) logger . info ( "Alignak internal statistics are written in the file %s" , self . stats_file ) except OSError as exp : logger . exception ( "Error when opening the file '%s' : %s" , self . stats_file , exp ) self . file_d = None return self . statsd_enabled
Init instance with real values
19,407
def load_statsd ( self ) : if not self . statsd_enabled : logger . info ( 'Stats reporting is not enabled, connection is not allowed' ) return False if self . statsd_enabled and self . carbon : self . my_metrics . append ( ( '.' . join ( [ self . statsd_prefix , self . name , 'connection-test' ] ) , ( int ( time . time ( ) ) , int ( time . time ( ) ) ) ) ) self . carbon . add_data_list ( self . my_metrics ) self . flush ( log = True ) else : try : logger . info ( 'Trying to contact StatsD server...' ) self . statsd_addr = ( socket . gethostbyname ( self . statsd_host . encode ( 'utf-8' ) ) , self . statsd_port ) self . statsd_sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) except ( socket . error , socket . gaierror ) as exp : logger . warning ( 'Cannot create StatsD socket: %s' , exp ) return False except Exception as exp : logger . exception ( 'Cannot create StatsD socket (other): %s' , exp ) return False logger . info ( 'StatsD server contacted' ) return True
Create socket connection to statsd host
19,408
def flush ( self , log = False ) : if not self . my_metrics : logger . debug ( "Flushing - no metrics to send" ) return True now = int ( time . time ( ) ) if self . last_failure and self . last_failure + self . metrics_flush_pause > now : if not self . log_metrics_flush_pause : date = datetime . datetime . fromtimestamp ( self . last_failure ) . strftime ( self . date_fmt ) logger . warning ( "Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send..." , date , self . metrics_count ) self . log_metrics_flush_pause = True return True try : logger . debug ( "Flushing %d metrics to Graphite/carbon" , self . metrics_count ) if self . carbon . send_data ( ) : self . my_metrics = [ ] else : logger . warning ( "Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d" , self . metrics_count ) if log : logger . warning ( "Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d" , self . metrics_count ) return False if self . log_metrics_flush_pause : logger . warning ( "Metrics flush restored. " "Remaining stored metric: %d" , self . metrics_count ) self . last_failure = 0 self . log_metrics_flush_pause = False except Exception as exp : if not self . log_metrics_flush_pause : logger . warning ( "Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d" , self . metrics_count ) else : date = datetime . datetime . fromtimestamp ( self . last_failure ) . strftime ( self . date_fmt ) logger . warning ( "Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send..." , date , self . metrics_count ) logger . warning ( "Exception: %s" , str ( exp ) ) self . last_failure = now return False return True
Send inner stored metrics to the defined Graphite
19,409
def send_to_graphite ( self , metric , value , timestamp = None ) : if not self . statsd_enabled or not self . carbon : return if timestamp is None : timestamp = int ( time . time ( ) ) self . my_metrics . append ( ( '.' . join ( [ self . statsd_prefix , self . name , metric ] ) , ( timestamp , value ) ) ) if self . metrics_count >= self . metrics_flush_count : self . carbon . add_data_list ( self . my_metrics ) self . flush ( )
Inner store a new metric and flush to Graphite if the flush threshold is reached .
19,410
def counter ( self , key , value , timestamp = None ) : _min , _max , count , _sum = self . stats . get ( key , ( None , None , 0 , 0 ) ) count += 1 _sum += value if _min is None or value < _min : _min = value if _max is None or value > _max : _max = value self . stats [ key ] = ( _min , _max , count , _sum ) if self . statsd_enabled and self . statsd_sock : packet = '%s.%s.%s:%d|c' % ( self . statsd_prefix , self . name , key , value ) packet = packet . encode ( 'utf-8' ) try : self . statsd_sock . sendto ( packet , self . statsd_addr ) except ( socket . error , socket . gaierror ) : pass if self . statsd_enabled and self . carbon : self . send_to_graphite ( key , value , timestamp = timestamp ) if self . statsd_enabled and self . file_d : if timestamp is None : timestamp = int ( time . time ( ) ) packet = self . line_fmt if not self . date_fmt : date = "%s" % timestamp else : date = datetime . datetime . fromtimestamp ( timestamp ) . strftime ( self . date_fmt ) packet = packet . replace ( "#date#" , date ) packet = packet . replace ( "#counter#" , '%s.%s.%s' % ( self . statsd_prefix , self . name , key ) ) packet = packet . replace ( "#value#" , '%d' % value ) packet = packet . replace ( "#uom#" , 'c' ) try : self . file_d . write ( packet ) except IOError : logger . warning ( "Could not write to the file: %s" , packet ) if self . broks_enabled : logger . debug ( "alignak stat brok: %s = %s" , key , value ) if timestamp is None : timestamp = int ( time . time ( ) ) return Brok ( { 'type' : 'alignak_stat' , 'data' : { 'ts' : timestamp , 'type' : 'counter' , 'metric' : '%s.%s.%s' % ( self . statsd_prefix , self . name , key ) , 'value' : value , 'uom' : 'c' } } ) return None
Set a counter value
19,411
def get_managed_configurations ( self ) : res = { } for scheduler_link in list ( self . schedulers . values ( ) ) : res [ scheduler_link . instance_id ] = { 'hash' : scheduler_link . hash , 'push_flavor' : scheduler_link . push_flavor , 'managed_conf_id' : scheduler_link . managed_conf_id } logger . debug ( "Get managed configuration: %s" , res ) return res
Get the configurations managed by this satellite
19,412
def get_scheduler_from_hostname ( self , host_name ) : scheduler_uuid = self . hosts_schedulers . get ( host_name , None ) return self . schedulers . get ( scheduler_uuid , None )
Get scheduler linked to the given host_name
19,413
def get_external_commands ( self ) : res = self . external_commands logger . debug ( "Get and clear external commands list: %s" , res ) self . external_commands = [ ] return res
Get the external commands
19,414
def get_results_from_passive ( self , scheduler_instance_id ) : if not self . schedulers : logger . debug ( "I do not have any scheduler: %s" , self . schedulers ) return [ ] scheduler_link = None for link in list ( self . schedulers . values ( ) ) : if scheduler_instance_id == link . instance_id : scheduler_link = link break else : logger . warning ( "I do not know this scheduler: %s" , scheduler_instance_id ) return [ ] logger . debug ( "Get results for the scheduler: %s" , scheduler_instance_id ) ret , scheduler_link . wait_homerun = scheduler_link . wait_homerun , { } logger . debug ( "Results: %s" % ( list ( ret . values ( ) ) ) if ret else "No results available" ) return list ( ret . values ( ) )
Get executed actions results from a passive satellite for a specific scheduler
19,415
def get_events ( self ) : res = copy . copy ( self . events ) del self . events [ : ] return res
Get event list from satellite
19,416
def manage_action_return ( self , action ) : if action . __class__ . my_type not in [ 'check' , 'notification' , 'eventhandler' ] : self . add ( action ) return scheduler_uuid = action . my_scheduler logger . debug ( "Got action return: %s / %s" , scheduler_uuid , action . uuid ) try : del action . my_scheduler del action . my_worker except AttributeError : logger . error ( "AttributeError Got action return: %s / %s" , scheduler_uuid , action ) try : del self . schedulers [ scheduler_uuid ] . actions [ action . uuid ] except KeyError as exp : logger . error ( "KeyError del scheduler action: %s / %s - %s" , scheduler_uuid , action . uuid , str ( exp ) ) try : self . schedulers [ scheduler_uuid ] . wait_homerun [ action . uuid ] = action except KeyError : logger . error ( "KeyError Add home run action: %s / %s - %s" , scheduler_uuid , action . uuid , str ( exp ) )
Manage action return from Workers We just put them into the corresponding sched and we clean unused properties like my_scheduler
19,417
def create_and_launch_worker ( self , module_name = 'fork' ) : logger . info ( "Allocating new '%s' worker..." , module_name ) target = None __warned = [ ] if module_name == 'fork' : target = None else : for module in self . modules_manager . instances : if module . get_name ( ) == module_name : if not module . properties . get ( 'worker_capable' , False ) : raise NotWorkerMod target = module . work if target is None : if module_name not in __warned : logger . warning ( "No target found for %s, NOT creating a worker for it..." , module_name ) __warned . append ( module_name ) return queue = Queue ( ) worker = Worker ( module_name , queue , self . returns_queue , self . processes_by_worker , max_plugins_output_length = self . max_plugins_output_length , target = target , loaded_into = self . name ) self . workers [ worker . get_id ( ) ] = worker self . q_by_mod [ module_name ] [ worker . get_id ( ) ] = queue worker . start ( ) logger . info ( "Started '%s' worker: %s (pid=%d)" , module_name , worker . get_id ( ) , worker . get_pid ( ) )
Create and launch a new worker and put it into self . workers It can be mortal or not
19,418
def do_stop_workers ( self ) : logger . info ( "Stopping all workers (%d)" , len ( self . workers ) ) for worker in list ( self . workers . values ( ) ) : try : logger . info ( " - stopping '%s'" , worker . get_id ( ) ) worker . terminate ( ) worker . join ( timeout = 1 ) logger . info ( " - stopped" ) except ( AttributeError , AssertionError ) : pass except Exception as exp : logger . error ( "exception: %s" , str ( exp ) )
Stop all workers
19,419
def get_broks ( self ) : res = copy . copy ( self . broks ) del self . broks [ : ] return res
Get brok list from satellite
19,420
def check_and_del_zombie_workers ( self ) : for p in active_children ( ) : logger . debug ( "got child: %s" , p ) w_to_del = [ ] for worker in list ( self . workers . values ( ) ) : logger . debug ( "checking if worker %s (pid=%d) is alive" , worker . get_id ( ) , worker . get_pid ( ) ) if not self . interrupted and not worker . is_alive ( ) : logger . warning ( "The worker %s (pid=%d) went down unexpectedly!" , worker . get_id ( ) , worker . get_pid ( ) ) worker . terminate ( ) worker . join ( timeout = 1 ) w_to_del . append ( worker . get_id ( ) ) for worker_id in w_to_del : worker = self . workers [ worker_id ] del self . q_by_mod [ worker . module_name ] [ worker . get_id ( ) ] for scheduler_uuid in self . schedulers : sched = self . schedulers [ scheduler_uuid ] for act in list ( sched . actions . values ( ) ) : if act . status == ACT_STATUS_QUEUED and act . my_worker == worker_id : self . assign_to_a_queue ( act ) del self . workers [ worker_id ]
Check if worker are fine and kill them if not . Dispatch the actions in the worker to another one
19,421
def adjust_worker_number_by_load ( self ) : if self . interrupted : logger . debug ( "Trying to adjust worker number. Ignoring because we are stopping." ) return to_del = [ ] logger . debug ( "checking worker count." " Currently: %d workers, min per module : %d, max per module : %d" , len ( self . workers ) , self . min_workers , self . max_workers ) for mod in self . q_by_mod : todo = max ( 0 , self . min_workers - len ( self . q_by_mod [ mod ] ) ) for _ in range ( todo ) : try : self . create_and_launch_worker ( module_name = mod ) except NotWorkerMod : to_del . append ( mod ) break for mod in to_del : logger . warning ( "The module %s is not a worker one, I remove it from the worker list." , mod ) del self . q_by_mod [ mod ]
Try to create the minimum workers specified in the configuration
19,422
def _get_queue_for_the_action ( self , action ) : mod = getattr ( action , 'module_type' , 'fork' ) queues = list ( self . q_by_mod [ mod ] . items ( ) ) if not queues : return ( 0 , None ) self . rr_qid = ( self . rr_qid + 1 ) % len ( queues ) ( worker_id , queue ) = queues [ self . rr_qid ] return ( worker_id , queue )
Find action queue for the action depending on the module . The id is found with action modulo on action id
19,423
def add_actions ( self , actions_list , scheduler_instance_id ) : scheduler_link = None for scheduler_id in self . schedulers : logger . debug ( "Trying to add an action, scheduler: %s" , self . schedulers [ scheduler_id ] ) if scheduler_instance_id == self . schedulers [ scheduler_id ] . instance_id : scheduler_link = self . schedulers [ scheduler_id ] break else : logger . error ( "Trying to add actions from an unknwown scheduler: %s" , scheduler_instance_id ) return if not scheduler_link : logger . error ( "Trying to add actions, but scheduler link is not found for: %s, " "actions: %s" , scheduler_instance_id , actions_list ) return logger . debug ( "Found scheduler link: %s" , scheduler_link ) for action in actions_list : uuid = getattr ( action , 'uuid' , None ) if uuid is None : try : action = unserialize ( action , no_load = True ) uuid = action . uuid except AlignakClassLookupException : logger . error ( 'Cannot un-serialize action: %s' , action ) continue if uuid in scheduler_link . actions : continue action . my_scheduler = scheduler_link . uuid scheduler_link . actions [ action . uuid ] = action self . assign_to_a_queue ( action )
Add a list of actions to the satellite queues
19,424
def assign_to_a_queue ( self , action ) : ( worker_id , queue ) = self . _get_queue_for_the_action ( action ) if not worker_id : return action . my_worker = worker_id action . status = ACT_STATUS_QUEUED msg = Message ( _type = 'Do' , data = action , source = self . name ) logger . debug ( "Queuing message: %s" , msg ) queue . put_nowait ( msg ) logger . debug ( "Queued" )
Take an action and put it to a worker actions queue
19,425
def get_new_actions ( self ) : try : _t0 = time . time ( ) self . do_get_new_actions ( ) statsmgr . timer ( 'actions.got.time' , time . time ( ) - _t0 ) except RuntimeError : logger . error ( "Exception like issue #1007" )
Wrapper function for do_get_new_actions For stats purpose
19,426
def main ( self ) : try : if not self . do_daemon_init_and_start ( ) : self . exit_on_error ( message = "Daemon initialization error" , exit_code = 3 ) self . do_post_daemon_init ( ) self . wait_for_initial_conf ( ) if self . new_conf : self . setup_new_conf ( ) self . adjust_worker_number_by_load ( ) self . do_main_loop ( ) logger . info ( "Exited from the main loop." ) self . request_stop ( ) except Exception : self . exit_on_exception ( traceback . format_exc ( ) ) raise
Main satellite function . Do init and then mainloop
19,427
def check_activation ( self , contacts ) : now = time . time ( ) was_is_in_effect = self . is_in_effect self . is_in_effect = ( self . start_time <= now <= self . end_time ) if not was_is_in_effect and self . is_in_effect : self . enter ( contacts ) if was_is_in_effect and not self . is_in_effect : self . exit ( contacts )
Enter or exit downtime if necessary
19,428
def split_semicolon ( line , maxsplit = None ) : r split_line = line . split ( ';' ) split_line_size = len ( split_line ) if maxsplit is None or maxsplit < 0 : maxsplit = split_line_size i = 0 while i < split_line_size - 1 : ends = split_line [ i ] . endswith ( '\\' ) if ends : split_line [ i ] = split_line [ i ] [ : - 1 ] if ( ends or i >= maxsplit ) and i < split_line_size - 1 : split_line [ i ] = ";" . join ( [ split_line [ i ] , split_line [ i + 1 ] ] ) del split_line [ i + 1 ] split_line_size -= 1 else : i += 1 return split_line
r Split a line on semicolons characters but not on the escaped semicolons
19,429
def format_t_into_dhms_format ( timestamp ) : mins , timestamp = divmod ( timestamp , 60 ) hour , mins = divmod ( mins , 60 ) day , hour = divmod ( hour , 24 ) return '%sd %sh %sm %ss' % ( day , hour , mins , timestamp )
Convert an amount of second into day hour min and sec
19,430
def merge_periods ( data ) : newdata = sorted ( data , key = lambda drange : drange [ 0 ] ) end = 0 for period in newdata : if period [ 0 ] != end and period [ 0 ] != ( end - 1 ) : end = period [ 1 ] dat = newdata new_intervals = [ ] cur_start = None cur_end = None for ( dt_start , dt_end ) in dat : if cur_end is None : cur_start = dt_start cur_end = dt_end continue else : if cur_end >= dt_start : cur_end = dt_end else : new_intervals . append ( ( cur_start , cur_end ) ) cur_start = dt_start cur_end = dt_end new_intervals . append ( ( cur_start , cur_end ) ) return new_intervals
Merge periods to have better continous periods . Like 350 - 450 400 - 600 = > 350 - 600
19,431
def list_split ( val , split_on_comma = True ) : if not split_on_comma : return val new_val = [ ] for subval in val : if isinstance ( subval , list ) : continue new_val . extend ( subval . split ( ',' ) ) return new_val
Try to split each member of a list with comma separator . If we don t have to split just return val
19,432
def to_best_int_float ( val ) : integer = int ( float ( val ) ) flt = float ( val ) if integer == flt : return integer return flt
Get best type for value between int and float
19,433
def dict_to_serialized_dict ( ref , the_dict ) : result = { } for elt in list ( the_dict . values ( ) ) : if not getattr ( elt , 'serialize' , None ) : continue result [ elt . uuid ] = elt . serialize ( ) return result
Serialize the list of elements to a dictionary
19,434
def list_to_serialized ( ref , the_list ) : result = [ ] for elt in the_list : if not getattr ( elt , 'serialize' , None ) : continue result . append ( elt . serialize ( ) ) return result
Serialize the list of elements
19,435
def to_hostnames_list ( ref , tab ) : res = [ ] for host in tab : if hasattr ( host , 'host_name' ) : res . append ( host . host_name ) return res
Convert Host list into a list of host_name
19,436
def sort_by_number_values ( x00 , y00 ) : if len ( x00 ) < len ( y00 ) : return 1 if len ( x00 ) > len ( y00 ) : return - 1 return 0
Compare x00 y00 base on number of values
19,437
def strip_and_uniq ( tab ) : _list = [ ] for elt in tab : val = elt . strip ( ) if val and val not in _list : _list . append ( val ) return _list
Strip every element of a list and keep a list of ordered unique values
19,438
def filter_host_by_name ( name ) : def inner_filter ( items ) : host = items [ "host" ] if host is None : return False return host . host_name == name return inner_filter
Filter for host Filter on name
19,439
def filter_host_by_regex ( regex ) : host_re = re . compile ( regex ) def inner_filter ( items ) : host = items [ "host" ] if host is None : return False return host_re . match ( host . host_name ) is not None return inner_filter
Filter for host Filter on regex
19,440
def filter_host_by_group ( group ) : def inner_filter ( items ) : host = items [ "host" ] if host is None : return False return group in [ items [ "hostgroups" ] [ g ] . hostgroup_name for g in host . hostgroups ] return inner_filter
Filter for host Filter on group
19,441
def filter_host_by_tag ( tpl ) : def inner_filter ( items ) : host = items [ "host" ] if host is None : return False return tpl in [ t . strip ( ) for t in host . tags ] return inner_filter
Filter for host Filter on tag
19,442
def filter_service_by_name ( name ) : def inner_filter ( items ) : service = items [ "service" ] if service is None : return False return service . service_description == name return inner_filter
Filter for service Filter on name
19,443
def filter_service_by_regex_name ( regex ) : host_re = re . compile ( regex ) def inner_filter ( items ) : service = items [ "service" ] if service is None : return False return host_re . match ( service . service_description ) is not None return inner_filter
Filter for service Filter on regex
19,444
def filter_service_by_host_name ( host_name ) : def inner_filter ( items ) : service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return host . host_name == host_name return inner_filter
Filter for service Filter on host_name
19,445
def filter_service_by_regex_host_name ( regex ) : host_re = re . compile ( regex ) def inner_filter ( items ) : service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return host_re . match ( host . host_name ) is not None return inner_filter
Filter for service Filter on regex host_name
19,446
def filter_service_by_hostgroup_name ( group ) : def inner_filter ( items ) : service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return group in [ items [ "hostgroups" ] [ g ] . hostgroup_name for g in host . hostgroups ] return inner_filter
Filter for service Filter on hostgroup
19,447
def filter_service_by_host_tag_name ( tpl ) : def inner_filter ( items ) : service = items [ "service" ] host = items [ "hosts" ] [ service . host ] if service is None or host is None : return False return tpl in [ t . strip ( ) for t in host . tags ] return inner_filter
Filter for service Filter on tag
19,448
def filter_service_by_servicegroup_name ( group ) : def inner_filter ( items ) : service = items [ "service" ] if service is None : return False return group in [ items [ "servicegroups" ] [ g ] . servicegroup_name for g in service . servicegroups ] return inner_filter
Filter for service Filter on group
19,449
def filter_host_by_bp_rule_label ( label ) : def inner_filter ( items ) : host = items [ "host" ] if host is None : return False return label in host . labels return inner_filter
Filter for host Filter on label
19,450
def manage_signal ( self , sig , frame ) : logger . info ( "worker '%s' (pid=%d) received a signal: %s" , self . _id , os . getpid ( ) , SIGNALS_TO_NAMES_DICT [ sig ] ) self . interrupted = True
Manage signals caught by the process but I do not do anything ... our master daemon is managing our termination .
19,451
def check_for_system_time_change ( self ) : now = time . time ( ) difference = now - self . t_each_loop self . t_each_loop = now if abs ( difference ) > 900 : return difference return 0
Check if our system time change . If so change our
19,452
def work ( self , actions_queue , returns_queue , control_queue = None ) : try : logger . info ( "[%s] (pid=%d) starting my job..." , self . _id , os . getpid ( ) ) self . do_work ( actions_queue , returns_queue , control_queue ) logger . info ( "[%s] (pid=%d) stopped" , self . _id , os . getpid ( ) ) except ActionError as exp : logger . error ( "[%s] exited with an ActionError exception : %s" , self . _id , str ( exp ) ) logger . exception ( exp ) raise except Exception as exp : logger . error ( "[%s] exited with an unmanaged exception : %s" , self . _id , str ( exp ) ) logger . exception ( exp ) raise
Wrapper function for do_work in order to catch the exception to see the real work look at do_work
19,453
def read_requirements ( filename = 'requirements.txt' ) : if not filename . startswith ( 'requirements' ) : filename = 'requirements-' + filename if not os . path . splitext ( filename ) [ 1 ] : filename += '.txt' def valid_line ( line ) : line = line . strip ( ) return line and not any ( line . startswith ( p ) for p in ( '#' , '-' ) ) def extract_requirement ( line ) : egg_eq = '#egg=' if egg_eq in line : _ , requirement = line . split ( egg_eq , 1 ) return requirement return line with open ( filename ) as f : lines = f . readlines ( ) return list ( map ( extract_requirement , filter ( valid_line , lines ) ) )
Reads the list of requirements from given file .
19,454
def init_running_properties ( self ) : for prop , entry in list ( self . __class__ . running_properties . items ( ) ) : val = entry . default setattr ( self , prop , copy ( val ) if isinstance ( val , ( set , list , dict ) ) else val )
Initialize the running_properties . Each instance have own property .
19,455
def copy ( self ) : copied_item = self . __class__ ( { } ) for prop in self . __class__ . properties : if prop in [ 'uuid' ] : continue val = getattr ( self , prop , None ) if val is not None : setattr ( copied_item , prop , val ) if hasattr ( self , "customs" ) : copied_item . customs = copy ( self . customs ) if hasattr ( self , "tags" ) : copied_item . tags = copy ( self . tags ) if hasattr ( self , "templates" ) : copied_item . templates = copy ( self . templates ) return copied_item
Get a copy of this item but with a new id
19,456
def clean ( self ) : for prop in ( 'imported_from' , 'use' , 'plus' , 'templates' , 'register' ) : try : delattr ( self , prop ) except AttributeError : pass for prop in ( 'configuration_warnings' , 'configuration_errors' ) : try : if getattr ( self , prop , None ) is not None and not getattr ( self , prop ) : delattr ( self , prop ) except AttributeError : pass
Clean properties only needed for initialization and configuration
19,457
def load_global_conf ( cls , global_configuration ) : logger . debug ( "Propagate global parameter for %s:" , cls ) for prop , entry in global_configuration . properties . items ( ) : if not entry . managed or not getattr ( entry , 'class_inherit' ) : continue for ( cls_dest , change_name ) in entry . class_inherit : if cls_dest == cls : value = getattr ( global_configuration , prop ) logger . debug ( "- global parameter %s=%s -> %s=%s" , prop , getattr ( global_configuration , prop ) , change_name , value ) if change_name is None : setattr ( cls , prop , value ) else : setattr ( cls , change_name , value )
Apply global Alignak configuration .
19,458
def get_templates ( self ) : use = getattr ( self , 'use' , '' ) if isinstance ( use , list ) : return [ n . strip ( ) for n in use if n . strip ( ) ] return [ n . strip ( ) for n in use . split ( ',' ) if n . strip ( ) ]
Get list of templates this object use
19,459
def get_all_plus_and_delete ( self ) : res = { } props = list ( self . plus . keys ( ) ) for prop in props : res [ prop ] = self . get_plus_and_delete ( prop ) return res
Get all self . plus items of list . We copy it delete the original and return the copy list
19,460
def add_error ( self , txt ) : self . configuration_errors . append ( txt ) self . conf_is_correct = False
Add a message in the configuration errors list so we can print them all in one place
19,461
def is_correct ( self ) : state = self . conf_is_correct properties = self . __class__ . properties for prop , entry in list ( properties . items ( ) ) : if hasattr ( self , 'special_properties' ) and prop in getattr ( self , 'special_properties' ) : continue if not hasattr ( self , prop ) and entry . required : msg = "[%s::%s] %s property is missing" % ( self . my_type , self . get_name ( ) , prop ) self . add_error ( msg ) state = state & self . conf_is_correct return state
Check if this object is correct
19,462
def old_properties_names_to_new ( self ) : old_properties = getattr ( self . __class__ , "old_properties" , { } ) for old_name , new_name in list ( old_properties . items ( ) ) : if hasattr ( self , old_name ) and not hasattr ( self , new_name ) : value = getattr ( self , old_name ) setattr ( self , new_name , value ) delattr ( self , old_name )
This function is used by service and hosts to transform Nagios2 parameters to Nagios3 ones like normal_check_interval to check_interval . There is a old_parameters tab in Classes that give such modifications to do .
19,463
def get_raw_import_values ( self ) : res = { } properties = list ( self . __class__ . properties . keys ( ) ) if 'register' not in properties : properties . append ( 'register' ) for prop in properties : if hasattr ( self , prop ) : val = getattr ( self , prop ) res [ prop ] = val return res
Get properties = > values of this object
19,464
def del_downtime ( self , downtime_id ) : if downtime_id in self . downtimes : self . downtimes [ downtime_id ] . can_be_deleted = True del self . downtimes [ downtime_id ]
Delete a downtime in this object
19,465
def get_property_value_for_brok ( self , prop , tab ) : entry = tab [ prop ] value = getattr ( self , prop , entry . default ) pre_op = entry . brok_transformation if pre_op is not None : value = pre_op ( self , value ) return value
Get the property of an object and brok_transformation if needed and return the value
19,466
def fill_data_brok_from ( self , data , brok_type ) : cls = self . __class__ for prop , entry in list ( cls . properties . items ( ) ) : if brok_type in entry . fill_brok : data [ prop ] = self . get_property_value_for_brok ( prop , cls . properties ) if hasattr ( cls , 'running_properties' ) : for prop , entry in list ( cls . running_properties . items ( ) ) : if brok_type in entry . fill_brok : data [ prop ] = self . get_property_value_for_brok ( prop , cls . running_properties )
Add properties to data parameter with properties of this object when brok_type parameter is defined in fill_brok of these properties
19,467
def get_initial_status_brok ( self , extra = None ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'full_status' ) if extra : data . update ( extra ) return Brok ( { 'type' : 'initial_' + self . my_type + '_status' , 'data' : data } )
Create an initial status brok
19,468
def get_update_status_brok ( self ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'full_status' ) return Brok ( { 'type' : 'update_' + self . my_type + '_status' , 'data' : data } )
Create an update item brok
19,469
def get_check_result_brok ( self ) : data = { 'uuid' : self . uuid } self . fill_data_brok_from ( data , 'check_result' ) return Brok ( { 'type' : self . my_type + '_check_result' , 'data' : data } )
Create check_result brok
19,470
def dump ( self , dump_file_name = None ) : dump = { } for prop in self . properties : if not hasattr ( self , prop ) : continue attr = getattr ( self , prop ) if isinstance ( attr , list ) and attr and isinstance ( attr [ 0 ] , Item ) : dump [ prop ] = [ i . dump ( ) for i in attr ] elif isinstance ( attr , Item ) : dump [ prop ] = attr . dump ( ) elif attr : dump [ prop ] = getattr ( self , prop ) return dump
Dump Item object properties
19,471
def add_items ( self , items , index_items ) : count_templates = 0 count_items = 0 generated_items = [ ] for item in items : if item . is_tpl ( ) : self . add_template ( item ) count_templates = count_templates + 1 else : new_items = self . add_item ( item , index_items ) count_items = count_items + max ( 1 , len ( new_items ) ) if new_items : generated_items . extend ( new_items ) if count_templates : logger . info ( ' indexed %d template(s)' , count_templates ) if count_items : logger . info ( ' created %d %s(s).' , count_items , self . inner_class . my_type )
Add items to template if is template else add in item list
19,472
def manage_conflict ( self , item , name ) : if item . is_tpl ( ) : existing = self . name_to_template [ name ] else : existing = self . name_to_item [ name ] if existing == item : return item existing_prio = getattr ( existing , "definition_order" , existing . properties [ "definition_order" ] . default ) item_prio = getattr ( item , "definition_order" , item . properties [ "definition_order" ] . default ) if existing_prio < item_prio : return existing if existing_prio > item_prio : pass else : objcls = getattr ( self . inner_class , "my_type" , "[unknown]" ) mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " "You may manually set the definition_order parameter to avoid this message." % ( objcls , name , item . imported_from , existing . imported_from ) item . configuration_warnings . append ( mesg ) if item . is_tpl ( ) : self . remove_template ( existing ) else : self . remove_item ( existing ) return item
Checks if an object holding the same name already exists in the index .
19,473
def add_template ( self , tpl ) : tpl = self . index_template ( tpl ) self . templates [ tpl . uuid ] = tpl
Add and index a template into the templates container .
19,474
def index_template ( self , tpl ) : objcls = self . inner_class . my_type name = getattr ( tpl , 'name' , '' ) if not name : mesg = "a %s template has been defined without name, from: %s" % ( objcls , tpl . imported_from ) tpl . add_error ( mesg ) elif name in self . name_to_template : tpl = self . manage_conflict ( tpl , name ) self . name_to_template [ name ] = tpl logger . debug ( "Indexed a %s template: %s, uses: %s" , tpl . my_type , name , getattr ( tpl , 'use' , 'Nothing' ) ) return tpl
Indexes a template by name into the name_to_template dictionary .
19,475
def remove_template ( self , tpl ) : try : del self . templates [ tpl . uuid ] except KeyError : pass self . unindex_template ( tpl )
Removes and un - index a template from the templates container .
19,476
def unindex_template ( self , tpl ) : name = getattr ( tpl , 'name' , '' ) try : del self . name_to_template [ name ] except KeyError : pass
Unindex a template from the templates container .
19,477
def add_item ( self , item , index = True ) : name_property = getattr ( self . __class__ , "name_property" , None ) generated_hosts = [ ] if name_property : name = getattr ( item , name_property , None ) if name and '[' in name and ']' in name : pattern = name [ name . find ( "[" ) + 1 : name . find ( "]" ) ] if '-' in pattern : logger . debug ( "Found an host with a patterned name: %s" , pattern ) limits = pattern . split ( '-' ) fmt = "%d" min_v = 1 max_v = 1 if len ( limits ) == 3 : fmt = limits [ 2 ] new_name = name . replace ( '[%s-%s-%s]' % ( limits [ 0 ] , limits [ 1 ] , fmt ) , '***' ) else : new_name = name . replace ( '[%s-%s]' % ( limits [ 0 ] , limits [ 1 ] ) , '***' ) try : min_v = int ( limits [ 0 ] ) except ValueError : pass try : max_v = int ( limits [ 1 ] ) except ValueError : pass for idx in range ( min_v , max_v + 1 ) : logger . debug ( "- cloning host: %s" , new_name . replace ( '***' , fmt % idx ) ) new_host = deepcopy ( item ) new_host . uuid = get_a_new_object_id ( ) new_host . host_name = new_name . replace ( '***' , fmt % idx ) for prop in [ 'display_name' , 'alias' , 'notes' , 'notes_url' , 'action_url' ] : if getattr ( new_host , prop , None ) is None : continue value = getattr ( new_host , prop ) if '$HOSTNAME$' in value : setattr ( new_host , prop , value . replace ( '$HOSTNAME$' , new_host . host_name ) ) generated_hosts . append ( new_host ) if generated_hosts : for new_host in generated_hosts : if index is True : new_host = self . index_item ( new_host ) self . items [ new_host . uuid ] = new_host logger . info ( " cloned %d hosts from %s" , len ( generated_hosts ) , item . get_name ( ) ) else : if index is True and name_property : item = self . index_item ( item ) self . items [ item . uuid ] = item return generated_hosts
Add an item into our containers and index it depending on the index flag .
19,478
def old_properties_names_to_new ( self ) : for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) : i . old_properties_names_to_new ( )
Convert old Nagios2 names to Nagios3 new names
19,479
def get_all_tags ( self , item ) : all_tags = item . get_templates ( ) for template_id in item . templates : template = self . templates [ template_id ] all_tags . append ( template . name ) all_tags . extend ( self . get_all_tags ( template ) ) return list ( set ( all_tags ) )
Get all tags of an item
19,480
def linkify_templates ( self ) : for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) : self . linkify_item_templates ( i ) for i in self : i . tags = self . get_all_tags ( i )
Link all templates and create the template graph too
19,481
def apply_partial_inheritance ( self , prop ) : for i in itertools . chain ( iter ( list ( self . items . values ( ) ) ) , iter ( list ( self . templates . values ( ) ) ) ) : self . get_property_by_inheritance ( i , prop ) try : if getattr ( i , prop ) == 'null' : delattr ( i , prop ) except AttributeError : pass
Define property with inheritance value of the property
19,482
def linkify_with_contacts ( self , contacts ) : for i in self : if not hasattr ( i , 'contacts' ) : continue links_list = strip_and_uniq ( i . contacts ) new = [ ] for name in [ e for e in links_list if e ] : contact = contacts . find_by_name ( name ) if contact is not None and contact . uuid not in new : new . append ( contact . uuid ) else : i . add_error ( "the contact '%s' defined for '%s' is unknown" % ( name , i . get_name ( ) ) ) i . contacts = new
Link items with contacts items
19,483
def linkify_with_escalations ( self , escalations ) : for i in self : if not hasattr ( i , 'escalations' ) : continue links_list = strip_and_uniq ( i . escalations ) new = [ ] for name in [ e for e in links_list if e ] : escalation = escalations . find_by_name ( name ) if escalation is not None and escalation . uuid not in new : new . append ( escalation . uuid ) else : i . add_error ( "the escalation '%s' defined for '%s' is unknown" % ( name , i . get_name ( ) ) ) i . escalations = new
Link with escalations
19,484
def explode_contact_groups_into_contacts ( item , contactgroups ) : if not hasattr ( item , 'contact_groups' ) : return cgnames = '' if item . contact_groups : if isinstance ( item . contact_groups , list ) : cgnames = item . contact_groups else : cgnames = item . contact_groups . split ( ',' ) cgnames = strip_and_uniq ( cgnames ) for cgname in cgnames : contactgroup = contactgroups . find_by_name ( cgname ) if not contactgroup : item . add_error ( "The contact group '%s' defined on the %s '%s' do not exist" % ( cgname , item . __class__ . my_type , item . get_name ( ) ) ) continue cnames = contactgroups . get_members_of_group ( cgname ) if cnames : if hasattr ( item , 'contacts' ) : item . contacts = item . contacts + cnames else : item . contacts = cnames
Get all contacts of contact_groups and put them in contacts container
19,485
def linkify_with_timeperiods ( self , timeperiods , prop ) : for i in self : if not hasattr ( i , prop ) : continue tpname = getattr ( i , prop ) . strip ( ) if not tpname : setattr ( i , prop , '' ) continue timeperiod = timeperiods . find_by_name ( tpname ) if timeperiod is None : i . add_error ( "The %s of the %s '%s' named '%s' is unknown!" % ( prop , i . __class__ . my_type , i . get_name ( ) , tpname ) ) continue setattr ( i , prop , timeperiod . uuid )
Link items with timeperiods items
19,486
def linkify_with_checkmodulations ( self , checkmodulations ) : for i in self : if not hasattr ( i , 'checkmodulations' ) : continue links_list = strip_and_uniq ( i . checkmodulations ) new = [ ] for name in [ e for e in links_list if e ] : modulation = checkmodulations . find_by_name ( name ) if modulation is not None and modulation . uuid not in new : new . append ( modulation . uuid ) else : i . add_error ( "The checkmodulations of the %s '%s' named " "'%s' is unknown!" % ( i . __class__ . my_type , i . get_name ( ) , name ) ) i . checkmodulations = new
Link checkmodulation object
19,487
def linkify_s_by_module ( self , modules ) : for i in self : links_list = strip_and_uniq ( i . modules ) new = [ ] for name in [ e for e in links_list if e ] : module = modules . find_by_name ( name ) if module is not None and module . uuid not in new : new . append ( module ) else : i . add_error ( "Error: the module %s is unknown for %s" % ( name , i . get_name ( ) ) ) i . modules = new
Link modules to items
19,488
def evaluate_hostgroup_expression ( expr , hosts , hostgroups , look_in = 'hostgroups' ) : if isinstance ( expr , list ) : expr = '|' . join ( expr ) if look_in == 'hostgroups' : node = ComplexExpressionFactory ( look_in , hostgroups , hosts ) else : node = ComplexExpressionFactory ( look_in , hosts , hosts ) expr_tree = node . eval_cor_pattern ( expr ) set_res = expr_tree . resolve_elements ( ) return list ( set_res )
Evaluate hostgroup expression
19,489
def get_hosts_from_hostgroups ( hgname , hostgroups ) : if not isinstance ( hgname , list ) : hgname = [ e . strip ( ) for e in hgname . split ( ',' ) if e . strip ( ) ] host_names = [ ] for name in hgname : hostgroup = hostgroups . find_by_name ( name ) if hostgroup is None : raise ValueError ( "the hostgroup '%s' is unknown" % hgname ) mbrs = [ h . strip ( ) for h in hostgroup . get_hosts ( ) if h . strip ( ) ] host_names . extend ( mbrs ) return host_names
Get hosts of hostgroups
19,490
def explode_host_groups_into_hosts ( self , item , hosts , hostgroups ) : hnames_list = [ ] hgnames = getattr ( item , "hostgroup_name" , '' ) or '' if is_complex_expr ( hgnames ) : hnames_list . extend ( self . evaluate_hostgroup_expression ( item . hostgroup_name , hosts , hostgroups ) ) elif hgnames : try : hnames_list . extend ( self . get_hosts_from_hostgroups ( hgnames , hostgroups ) ) except ValueError as err : item . add_error ( str ( err ) ) hname = getattr ( item , "host_name" , '' ) hnames_list . extend ( [ n . strip ( ) for n in hname . split ( ',' ) if n . strip ( ) ] ) hnames = set ( ) for host in hnames_list : if host . startswith ( '!' ) : hst_to_remove = host [ 1 : ] . strip ( ) try : hnames . remove ( hst_to_remove ) except KeyError : pass elif host == '*' : hnames . update ( [ host . host_name for host in hosts . items . values ( ) if getattr ( host , 'host_name' , '' ) ] ) else : hnames . add ( host ) item . host_name = ',' . join ( hnames )
Get all hosts of hostgroups and add all in host_name container
19,491
def get_customs_properties_by_inheritance ( self , obj ) : for t_id in obj . templates : template = self . templates [ t_id ] tpl_cv = self . get_customs_properties_by_inheritance ( template ) if tpl_cv : for prop in tpl_cv : if prop not in obj . customs : value = tpl_cv [ prop ] else : value = obj . customs [ prop ] if obj . has_plus ( prop ) : value . insert ( 0 , obj . get_plus_and_delete ( prop ) ) obj . customs [ prop ] = value for prop in obj . customs : value = obj . customs [ prop ] if obj . has_plus ( prop ) : value . insert ( 0 , obj . get_plus_and_delete ( prop ) ) obj . customs [ prop ] = value cust_in_plus = obj . get_all_plus_and_delete ( ) for prop in cust_in_plus : obj . customs [ prop ] = cust_in_plus [ prop ] return obj . customs
Get custom properties from the templates defined in this object
19,492
def add_edge ( self , from_node , to_node ) : if to_node not in self . nodes : self . add_node ( to_node ) try : self . nodes [ from_node ] [ "sons" ] . append ( to_node ) except KeyError : self . nodes [ from_node ] = { "dfs_loop_status" : "" , "sons" : [ to_node ] }
Add edge between two node The edge is oriented
19,493
def loop_check ( self ) : in_loop = [ ] for node in list ( self . nodes . values ( ) ) : node [ 'dfs_loop_status' ] = 'DFS_UNCHECKED' for node_id , node in self . nodes . items ( ) : if node [ 'dfs_loop_status' ] == 'DFS_UNCHECKED' : self . dfs_loop_search ( node_id ) if node [ 'dfs_loop_status' ] == 'DFS_LOOP_INSIDE' : in_loop . append ( node_id ) for node in list ( self . nodes . values ( ) ) : del node [ 'dfs_loop_status' ] return in_loop
Check if we have a loop in the graph
19,494
def dfs_loop_search ( self , root ) : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_TEMPORARY_CHECKED' for child in self . nodes [ root ] [ "sons" ] : child_status = self . nodes [ child ] [ 'dfs_loop_status' ] if child_status == 'DFS_UNCHECKED' : self . dfs_loop_search ( child ) child_status = self . nodes [ child ] [ 'dfs_loop_status' ] if child_status == 'DFS_TEMPORARY_CHECKED' : self . nodes [ child ] [ 'dfs_loop_status' ] = 'DFS_LOOP_INSIDE' self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_LOOP_INSIDE' if child_status in ( 'DFS_NEAR_LOOP' , 'DFS_LOOP_INSIDE' ) : if self . nodes [ root ] [ 'dfs_loop_status' ] != 'DFS_LOOP_INSIDE' : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_NEAR_LOOP' self . nodes [ child ] [ 'dfs_loop_status' ] = 'DFS_LOOP_INSIDE' if self . nodes [ root ] [ 'dfs_loop_status' ] == 'DFS_TEMPORARY_CHECKED' : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_OK'
Main algorithm to look for loop . It tags nodes and find ones stuck in loop .
19,495
def dfs_get_all_childs ( self , root ) : self . nodes [ root ] [ 'dfs_loop_status' ] = 'DFS_CHECKED' ret = set ( ) ret . add ( root ) ret . update ( self . nodes [ root ] [ 'sons' ] ) for child in self . nodes [ root ] [ 'sons' ] : if self . nodes [ child ] [ 'dfs_loop_status' ] == 'DFS_UNCHECKED' : ret . update ( self . dfs_get_all_childs ( child ) ) return list ( ret )
Recursively get all sons of this node
19,496
def identity ( self ) : res = self . app . get_id ( ) res . update ( { "start_time" : self . start_time } ) res . update ( { "running_id" : self . running_id } ) return res
Get the daemon identity
19,497
def api ( self ) : functions = [ x [ 0 ] for x in inspect . getmembers ( self , predicate = inspect . ismethod ) if not x [ 0 ] . startswith ( '_' ) ] full_api = { 'doc' : u"When posting data you have to use the JSON format." , 'api' : [ ] } my_daemon_type = "%s" % getattr ( self . app , 'type' , 'unknown' ) my_address = getattr ( self . app , 'host_name' , getattr ( self . app , 'name' , 'unknown' ) ) if getattr ( self . app , 'address' , '127.0.0.1' ) not in [ '127.0.0.1' ] : my_address = self . app . address for fun in functions : endpoint = { 'daemon' : my_daemon_type , 'name' : fun , 'doc' : getattr ( self , fun ) . __doc__ , 'uri' : '%s://%s:%s/%s' % ( getattr ( self . app , 'scheme' , 'http' ) , my_address , self . app . port , fun ) , 'args' : { } } try : spec = inspect . getfullargspec ( getattr ( self , fun ) ) except Exception : spec = inspect . getargspec ( getattr ( self , fun ) ) args = [ a for a in spec . args if a not in ( 'self' , 'cls' ) ] if spec . defaults : a_dict = dict ( list ( zip ( args , spec . defaults ) ) ) else : a_dict = dict ( list ( zip ( args , ( "No default value" , ) * len ( args ) ) ) ) endpoint [ "args" ] = a_dict full_api [ 'api' ] . append ( endpoint ) return full_api
List the methods available on the daemon Web service interface
19,498
def stop_request ( self , stop_now = '0' ) : self . app . interrupted = ( stop_now == '1' ) self . app . will_stop = True return True
Request the daemon to stop
19,499
def get_log_level ( self ) : level_names = { logging . DEBUG : 'DEBUG' , logging . INFO : 'INFO' , logging . WARNING : 'WARNING' , logging . ERROR : 'ERROR' , logging . CRITICAL : 'CRITICAL' } alignak_logger = logging . getLogger ( ALIGNAK_LOGGER_NAME ) res = self . identity ( ) res . update ( { "log_level" : alignak_logger . getEffectiveLevel ( ) , "log_level_name" : level_names [ alignak_logger . getEffectiveLevel ( ) ] } ) return res
Get the current daemon log level