idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
19,700
def clear_queues ( self , manager ) : for queue in ( self . to_q , self . from_q ) : if queue is None : continue # If we got no manager, we directly call the clean if not manager : try : queue . close ( ) queue . join_thread ( ) except AttributeError : pass # else: # q._callmethod('close') # q._callmethod('join_thread') self . to_q = self . from_q = None
Release the resources associated to the queues of this instance
104
10
19,701
def start_module ( self ) : try : self . _main ( ) except Exception as exp : logger . exception ( '%s' , traceback . format_exc ( ) ) raise Exception ( exp )
Wrapper for _main function . Catch and raise any exception occurring in the main function
44
17
19,702
def start ( self , http_daemon = None ) : # pylint: disable=unused-argument if not self . is_external : return if self . process : self . stop_process ( ) logger . info ( "Starting external process for module %s..." , self . name ) proc = Process ( target = self . start_module , args = ( ) , group = None ) # Under windows we should not call start() on an object that got its process # as an object, so we remove it and we set it in a earlier start try : del self . properties [ 'process' ] except KeyError : pass proc . start ( ) # We save the process data AFTER the fork() self . process = proc self . properties [ 'process' ] = proc logger . info ( "%s is now started (pid=%d)" , self . name , proc . pid )
Actually restart the process if the module is external Try first to stop the process and create a new Process instance with target start_module . Finally start process .
187
31
19,703
def stop_process ( self ) : if not self . process : return logger . info ( "I'm stopping module %r (pid=%d)" , self . name , self . process . pid ) self . kill ( ) # Clean inner process reference self . process = None
Request the module process to stop and release it
58
9
19,704
def manage_brok ( self , brok ) : manage = getattr ( self , 'manage_' + brok . type + '_brok' , None ) if not manage : return False # Be sure the brok is prepared before calling the function brok . prepare ( ) return manage ( brok )
Request the module to manage the given brok . There are a lot of different possible broks to manage . The list is defined in the Brok class .
68
32
19,705
def manage_signal ( self , sig , frame ) : # pylint: disable=unused-argument logger . info ( "received a signal: %s" , SIGNALS_TO_NAMES_DICT [ sig ] ) if sig == signal . SIGHUP : # if SIGHUP, reload configuration in arbiter logger . info ( "Modules are not able to reload their configuration. " "Stopping the module..." ) logger . info ( "Request to stop the module" ) self . interrupted = True
Generic function to handle signals
111
5
19,706
def _main ( self ) : self . set_proctitle ( self . name ) self . set_signal_handler ( ) logger . info ( "process for module %s is now running (pid=%d)" , self . name , os . getpid ( ) ) # Will block here! try : self . main ( ) except ( IOError , EOFError ) : pass # logger.warning('[%s] EOF exception: %s', self.name, traceback.format_exc()) except Exception as exp : # pylint: disable=broad-except logger . exception ( 'main function exception: %s' , exp ) self . do_stop ( ) logger . info ( "process for module %s is now exiting (pid=%d)" , self . name , os . getpid ( ) ) exit ( )
module main method . Only used by external modules .
181
10
19,707
def no_block_read ( output ) : _buffer = "" if not fcntl : return _buffer o_fd = output . fileno ( ) o_fl = fcntl . fcntl ( o_fd , fcntl . F_GETFL ) fcntl . fcntl ( o_fd , fcntl . F_SETFL , o_fl | os . O_NONBLOCK ) try : _buffer = output . read ( ) except Exception : # pylint: disable=broad-except pass return _buffer
Try to read a file descriptor in a non blocking mode
126
11
19,708
def get_local_environnement ( self ) : # Do not use copy.copy() here, as the resulting copy still # changes the real environment (it is still a os._Environment # instance). local_env = os . environ . copy ( ) for local_var in self . env : local_env [ local_var ] = self . env [ local_var ] return local_env
Mix the environment and the environment variables into a new local environment dictionary
86
13
19,709
def execute ( self ) : self . status = ACT_STATUS_LAUNCHED self . check_time = time . time ( ) self . wait_time = 0.0001 self . last_poll = self . check_time # Get a local env variables with our additional values self . local_env = self . get_local_environnement ( ) # Initialize stdout and stderr. self . stdoutdata = '' self . stderrdata = '' logger . debug ( "Launch command: '%s', ref: %s, timeout: %s" , self . command , self . ref , self . timeout ) if self . log_actions : if os . environ [ 'ALIGNAK_LOG_ACTIONS' ] == 'WARNING' : logger . warning ( "Launch command: '%s'" , self . command ) else : logger . info ( "Launch command: '%s'" , self . command ) return self . _execute ( )
Start this action command in a subprocess .
210
9
19,710
def copy_shell__ ( self , new_i ) : for prop in ONLY_COPY_PROP : setattr ( new_i , prop , getattr ( self , prop ) ) return new_i
Create all attributes listed in ONLY_COPY_PROP and return self with these attributes .
46
20
19,711
def get_contacts_by_explosion ( self , contactgroups ) : # pylint: disable=access-member-before-definition # First we tag the hg so it will not be explode # if a son of it already call it self . already_exploded = True # Now the recursive part # rec_tag is set to False every CG we explode # so if True here, it must be a loop in HG # calls... not GOOD! if self . rec_tag : logger . error ( "[contactgroup::%s] got a loop in contactgroup definition" , self . get_name ( ) ) if hasattr ( self , 'members' ) : return self . members return '' # Ok, not a loop, we tag it and continue self . rec_tag = True cg_mbrs = self . get_contactgroup_members ( ) for cg_mbr in cg_mbrs : contactgroup = contactgroups . find_by_name ( cg_mbr . strip ( ) ) if contactgroup is not None : value = contactgroup . get_contacts_by_explosion ( contactgroups ) if value is not None : self . add_members ( value ) if hasattr ( self , 'members' ) : return self . members return ''
Get contacts of this group
277
5
19,712
def add_member ( self , contact_name , contactgroup_name ) : contactgroup = self . find_by_name ( contactgroup_name ) if not contactgroup : contactgroup = Contactgroup ( { 'contactgroup_name' : contactgroup_name , 'alias' : contactgroup_name , 'members' : contact_name } ) self . add_contactgroup ( contactgroup ) else : contactgroup . add_members ( contact_name )
Add a contact string to a contact member if the contact group do not exist create it
98
17
19,713
def linkify_contactgroups_contacts ( self , contacts ) : for contactgroup in self : mbrs = contactgroup . get_contacts ( ) # The new member list, in id new_mbrs = [ ] for mbr in mbrs : mbr = mbr . strip ( ) # protect with strip at the beginning so don't care about spaces if mbr == '' : # void entry, skip this continue member = contacts . find_by_name ( mbr ) # Maybe the contact is missing, if so, must be put in unknown_members if member is not None : new_mbrs . append ( member . uuid ) else : contactgroup . add_unknown_members ( mbr ) # Make members uniq new_mbrs = list ( set ( new_mbrs ) ) # We find the id, we replace the names contactgroup . replace_members ( new_mbrs )
Link the contacts with contactgroups
201
6
19,714
def explode ( self ) : # We do not want a same hg to be explode again and again # so we tag it for tmp_cg in list ( self . items . values ( ) ) : tmp_cg . already_exploded = False for contactgroup in list ( self . items . values ( ) ) : if contactgroup . already_exploded : continue # get_contacts_by_explosion is a recursive # function, so we must tag hg so we do not loop for tmp_cg in list ( self . items . values ( ) ) : tmp_cg . rec_tag = False contactgroup . get_contacts_by_explosion ( self ) # We clean the tags for tmp_cg in list ( self . items . values ( ) ) : if hasattr ( tmp_cg , 'rec_tag' ) : del tmp_cg . rec_tag del tmp_cg . already_exploded
Fill members with contactgroup_members
205
7
19,715
def add_flapping_change ( self , sample ) : cls = self . __class__ # If this element is not in flapping check, or # the flapping is globally disable, bailout if not self . flap_detection_enabled or not cls . enable_flap_detection : return self . flapping_changes . append ( sample ) # Keep just 20 changes (global flap_history value) flap_history = cls . flap_history if len ( self . flapping_changes ) > flap_history : self . flapping_changes . pop ( 0 )
Add a flapping sample and keep cls . flap_history samples
125
14
19,716
def add_attempt ( self ) : self . attempt += 1 self . attempt = min ( self . attempt , self . max_check_attempts )
Add an attempt when a object is a non - ok state
34
12
19,717
def do_check_freshness ( self , hosts , services , timeperiods , macromodulations , checkmodulations , checks , when ) : # pylint: disable=too-many-nested-blocks, too-many-branches now = when # Before, check if class (host or service) have check_freshness OK # Then check if item want freshness, then check freshness cls = self . __class__ if not self . in_checking and self . freshness_threshold and not self . freshness_expired : # logger.debug("Checking freshness for %s, last state update: %s, now: %s.", # self.get_full_name(), self.last_state_update, now) if os . getenv ( 'ALIGNAK_LOG_CHECKS' , None ) : logger . info ( "--ALC-- -> checking freshness for: %s" , self . get_full_name ( ) ) # If we never checked this item, we begin the freshness period if not self . last_state_update : self . last_state_update = int ( now ) if self . last_state_update < now - ( self . freshness_threshold + cls . additional_freshness_latency ) : timeperiod = timeperiods [ self . check_period ] if timeperiod is None or timeperiod . is_time_valid ( now ) : # Create a new check for the scheduler chk = self . launch_check ( now , hosts , services , timeperiods , macromodulations , checkmodulations , checks ) if not chk : logger . warning ( "No raised freshness check for: %s" , self ) return None chk . freshness_expiry_check = True chk . check_time = time . time ( ) chk . output = "Freshness period expired: %s" % ( datetime . utcfromtimestamp ( int ( chk . check_time ) ) . strftime ( "%Y-%m-%d %H:%M:%S %Z" ) ) if self . my_type == 'host' : if self . freshness_state == 'o' : chk . exit_status = 0 elif self . freshness_state == 'd' : chk . exit_status = 2 elif self . freshness_state in [ 'u' , 'x' ] : chk . exit_status = 4 else : chk . exit_status = 3 else : if self . freshness_state == 'o' : chk . exit_status = 0 elif self . freshness_state == 'w' : chk . exit_status = 1 elif self . freshness_state == 'c' : chk . exit_status = 2 elif self . freshness_state == 'u' : chk . exit_status = 3 elif self . freshness_state == 'x' : chk . exit_status = 4 else : chk . exit_status = 3 return chk else : logger . debug ( "Ignored freshness check for %s, because " "we are not in the check period." , self . get_full_name ( ) ) return None
Check freshness and schedule a check now if necessary .
710
11
19,718
def update_business_impact_value ( self , hosts , services , timeperiods , bi_modulations ) : # First save our business_impact if not already do if self . my_own_business_impact == - 1 : self . my_own_business_impact = self . business_impact # We look at our crit modulations. If one apply, we take apply it # and it's done in_modulation = False for bi_modulation_id in self . business_impact_modulations : bi_modulation = bi_modulations [ bi_modulation_id ] now = time . time ( ) period = timeperiods [ bi_modulation . modulation_period ] if period is None or period . is_time_valid ( now ) : self . business_impact = bi_modulation . business_impact in_modulation = True # We apply the first available, that's all break # If we truly have impacts, we get the max business_impact # if it's huge than ourselves if self . impacts : bp_impacts = [ hosts [ elem ] . business_impact for elem in self . impacts if elem in hosts ] bp_impacts . extend ( [ services [ elem ] . business_impact for elem in self . impacts if elem in services ] ) self . business_impact = max ( self . business_impact , max ( bp_impacts ) ) return # If we are not a problem, we setup our own_crit if we are not in a # modulation period if self . my_own_business_impact != - 1 and not in_modulation : self . business_impact = self . my_own_business_impact
We update our business_impact value with the max of the impacts business_impact if we got impacts . And save our configuration business_impact if we do not have do it before If we do not have impacts we revert our value
365
46
19,719
def no_more_a_problem ( self , hosts , services , timeperiods , bi_modulations ) : was_pb = self . is_problem if self . is_problem : self . is_problem = False # we warn impacts that we are no more a problem for impact_id in self . impacts : if impact_id in hosts : impact = hosts [ impact_id ] else : impact = services [ impact_id ] impact . unregister_a_problem ( self ) # we can just drop our impacts list self . impacts = [ ] # We update our business_impact value, it's not a huge thing :) self . update_business_impact_value ( hosts , services , timeperiods , bi_modulations ) # If we were a problem, we say to everyone # our new status, with good business_impact value if was_pb : # And we register a new broks for update status self . broks . append ( self . get_update_status_brok ( ) )
Remove this objects as an impact for other schedulingitem .
215
11
19,720
def register_a_problem ( self , prob , hosts , services , timeperiods , bi_modulations ) : # pylint: disable=too-many-locals # Maybe we already have this problem? If so, bailout too if prob . uuid in self . source_problems : return [ ] now = time . time ( ) was_an_impact = self . is_impact # Our father already look if he impacts us. So if we are here, # it's that we really are impacted self . is_impact = True impacts = [ ] # Ok, if we are impacted, we can add it in our # problem list # TODO: remove this unused check if self . is_impact : logger . debug ( "I am impacted: %s" , self ) # Maybe I was a problem myself, now I can say: not my fault! if self . is_problem : self . no_more_a_problem ( hosts , services , timeperiods , bi_modulations ) # Ok, we are now impacted, we should take the good state # but only when we just go to the impacted state if not was_an_impact : self . set_impact_state ( ) # Ok now we can be a simple impact impacts . append ( self . uuid ) if prob . uuid not in self . source_problems : self . source_problems . append ( prob . uuid ) # we should send this problem to all potential impacted that # depend on us for ( impacted_item_id , status , timeperiod_id , _ ) in self . act_depend_of_me : # Check if the status is ok for impact if impacted_item_id in hosts : impact = hosts [ impacted_item_id ] else : impact = services [ impacted_item_id ] timeperiod = timeperiods [ timeperiod_id ] for stat in status : if self . is_state ( stat ) : # now check if we should bailout because of a # not good timeperiod for dep if timeperiod is None or timeperiod . is_time_valid ( now ) : new_impacts = impact . register_a_problem ( prob , hosts , services , timeperiods , bi_modulations ) impacts . extend ( new_impacts ) # And we register a new broks for update status self . broks . append ( self . get_update_status_brok ( ) ) # now we return all impacts (can be void of course) return impacts
Call recursively by potentials impacts so they update their source_problems list . But do not go below if the problem is not a real one for me like If I ve got multiple parents for examples
530
42
19,721
def unregister_a_problem ( self , prob ) : self . source_problems . remove ( prob . uuid ) # For know if we are still an impact, maybe our dependencies # are not aware of the remove of the impact state because it's not ordered # so we can just look at if we still have some problem in our list if not self . source_problems : self . is_impact = False # No more an impact, we can unset the impact state self . unset_impact_state ( ) # And we register a new broks for update status self . broks . append ( self . get_update_status_brok ( ) )
Remove the problem from our problems list and check if we are still impacted
142
14
19,722
def is_enable_action_dependent ( self , hosts , services ) : # Use to know if notification is raise or not enable_action = False for ( dep_id , status , _ , _ ) in self . act_depend_of : if 'n' in status : enable_action = True else : if dep_id in hosts : dep = hosts [ dep_id ] else : dep = services [ dep_id ] p_is_down = False dep_match = [ dep . is_state ( stat ) for stat in status ] # check if the parent match a case, so he is down if True in dep_match : p_is_down = True if not p_is_down : enable_action = True return enable_action
Check if dependencies states match dependencies statuses This basically means that a dependency is in a bad state and it can explain this object state .
160
27
19,723
def check_and_set_unreachability ( self , hosts , services ) : parent_is_down = [ ] for ( dep_id , _ , _ , _ ) in self . act_depend_of : if dep_id in hosts : dep = hosts [ dep_id ] else : dep = services [ dep_id ] if dep . state in [ 'd' , 'DOWN' , 'c' , 'CRITICAL' , 'u' , 'UNKNOWN' , 'x' , 'UNREACHABLE' ] : parent_is_down . append ( True ) else : parent_is_down . append ( False ) if False in parent_is_down : return # all parents down self . set_unreachable ( )
Check if all dependencies are down if yes set this object as unreachable .
162
15
19,724
def compensate_system_time_change ( self , difference ) : # pragma: no cover, # not with unit tests # We only need to change some value for prop in ( 'last_notification' , 'last_state_change' , 'last_hard_state_change' ) : val = getattr ( self , prop ) # current value # Do not go below 1970 :) val = max ( 0 , val + difference ) # diff may be negative setattr ( self , prop , val )
If a system time change occurs we have to update properties time related to reflect change
107
16
19,725
def remove_in_progress_check ( self , check ) : # The check is consumed, update the in_checking properties if check in self . checks_in_progress : self . checks_in_progress . remove ( check ) self . update_in_checking ( )
Remove check from check in progress
58
6
19,726
def remove_in_progress_notification ( self , notification ) : if notification . uuid in self . notifications_in_progress : notification . status = ACT_STATUS_ZOMBIE del self . notifications_in_progress [ notification . uuid ]
Remove a notification and mark them as zombie
56
8
19,727
def remove_in_progress_notifications ( self , master = True ) : for notification in list ( self . notifications_in_progress . values ( ) ) : if master and notification . contact : continue # Do not remove some specific notifications if notification . type in [ u'DOWNTIMESTART' , u'DOWNTIMEEND' , u'DOWNTIMECANCELLED' , u'CUSTOM' , u'ACKNOWLEDGEMENT' ] : continue self . remove_in_progress_notification ( notification )
Remove all notifications from notifications_in_progress
118
9
19,728
def check_for_flexible_downtime ( self , timeperiods , hosts , services ) : status_updated = False for downtime_id in self . downtimes : downtime = self . downtimes [ downtime_id ] # Activate flexible downtimes (do not activate triggered downtimes) # Note: only activate if we are between downtime start and end time! if downtime . fixed or downtime . is_in_effect : continue if downtime . start_time <= self . last_chk and downtime . end_time >= self . last_chk and self . state_id != 0 and downtime . trigger_id in [ '' , '0' ] : # returns downtimestart notifications self . broks . extend ( downtime . enter ( timeperiods , hosts , services ) ) status_updated = True if status_updated is True : self . broks . append ( self . get_update_status_brok ( ) )
Enter in a downtime if necessary and raise start notification When a non Ok state occurs we try to raise a flexible downtime .
199
24
19,729
def update_hard_unknown_phase_state ( self ) : self . was_in_hard_unknown_reach_phase = self . in_hard_unknown_reach_phase # We do not care about SOFT state at all # and we are sure we are no more in such a phase if self . state_type != 'HARD' or self . last_state_type != 'HARD' : self . in_hard_unknown_reach_phase = False # So if we are not in already in such a phase, we check for # a start or not. So here we are sure to be in a HARD/HARD following # state if not self . in_hard_unknown_reach_phase : if self . state == 'UNKNOWN' and self . last_state != 'UNKNOWN' or self . state == 'UNREACHABLE' and self . last_state != 'UNREACHABLE' : self . in_hard_unknown_reach_phase = True # We also backup with which state we was before enter this phase self . state_before_hard_unknown_reach_phase = self . last_state return else : # if we were already in such a phase, look for its end if self . state != 'UNKNOWN' and self . state != 'UNREACHABLE' : self . in_hard_unknown_reach_phase = False # If we just exit the phase, look if we exit with a different state # than we enter or not. If so, lie and say we were not in such phase # because we need so to raise a new notif if not self . in_hard_unknown_reach_phase and self . was_in_hard_unknown_reach_phase : if self . state != self . state_before_hard_unknown_reach_phase : self . was_in_hard_unknown_reach_phase = False
Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important and they should not raise notif about it
403
43
19,730
def update_notification_command ( self , notif , contact , macromodulations , timeperiods , host_ref = None ) : cls = self . __class__ macrosolver = MacroResolver ( ) data = self . get_data_for_notifications ( contact , notif , host_ref ) notif . command = macrosolver . resolve_command ( notif . command_call , data , macromodulations , timeperiods ) if cls . enable_environment_macros or notif . enable_environment_macros : notif . env = macrosolver . get_env_macros ( data )
Update the notification command by resolving Macros And because we are just launching the notification we can say that this contact has been notified
137
25
19,731
def is_escalable ( self , notification , escalations , timeperiods ) : cls = self . __class__ # We search since when we are in notification for escalations # that are based on time in_notif_time = time . time ( ) - notification . creation_time # Check is an escalation match the current_notification_number for escalation_id in self . escalations : escalation = escalations [ escalation_id ] escalation_period = timeperiods [ escalation . escalation_period ] if escalation . is_eligible ( notification . t_to_go , self . state , notification . notif_nb , in_notif_time , cls . interval_length , escalation_period ) : return True return False
Check if a notification can be escalated . Basically call is_eligible for each escalation
157
16
19,732
def get_next_notification_time ( self , notif , escalations , timeperiods ) : # pylint: disable=too-many-locals res = None now = time . time ( ) cls = self . __class__ # Look at the minimum notification interval notification_interval = self . notification_interval # and then look for currently active notifications, and take notification_interval # if filled and less than the self value in_notif_time = time . time ( ) - notif . creation_time for escalation_id in self . escalations : escalation = escalations [ escalation_id ] escalation_period = timeperiods [ escalation . escalation_period ] if escalation . is_eligible ( notif . t_to_go , self . state , notif . notif_nb , in_notif_time , cls . interval_length , escalation_period ) : if escalation . notification_interval != - 1 and escalation . notification_interval < notification_interval : notification_interval = escalation . notification_interval # So take the by default time std_time = notif . t_to_go + notification_interval * cls . interval_length # Maybe the notification comes from retention data and # next notification alert is in the past # if so let use the now value instead if std_time < now : std_time = now + notification_interval * cls . interval_length # standard time is a good one res = std_time creation_time = notif . creation_time in_notif_time = now - notif . creation_time for escalation_id in self . escalations : escalation = escalations [ escalation_id ] # If the escalation was already raised, we do not look for a new "early start" if escalation . get_name ( ) not in notif . already_start_escalations : escalation_period = timeperiods [ escalation . escalation_period ] next_t = escalation . get_next_notif_time ( std_time , self . state , creation_time , cls . interval_length , escalation_period ) # If we got a real result (time base escalation), we add it if next_t is not None and now < next_t < res : res = next_t # And we take the minimum of this result. Can be standard or escalation asked return res
Get the next notification time for a notification Take the standard notification_interval or ask for our escalation if one of them need a smaller value to escalade
511
31
19,733
def get_business_rule_output ( self , hosts , services , macromodulations , timeperiods ) : # pylint: disable=too-many-locals, too-many-branches got_business_rule = getattr ( self , 'got_business_rule' , False ) # Checks that the service is a business rule. if got_business_rule is False or self . business_rule is None : return "" # Checks that the business rule has a format specified. output_template = self . business_rule_output_template if not output_template : return "" macroresolver = MacroResolver ( ) # Extracts children template strings elts = re . findall ( r"\$\((.*)\)\$" , output_template ) if not elts : child_template_string = "" else : child_template_string = elts [ 0 ] # Processes child services output children_output = "" ok_count = 0 # Expands child items format string macros. items = self . business_rule . list_all_elements ( ) for item_uuid in items : if item_uuid in hosts : item = hosts [ item_uuid ] elif item_uuid in services : item = services [ item_uuid ] # Do not display children in OK state # todo: last_hard_state ? why not current state if state type is hard ? if item . last_hard_state_id == 0 : ok_count += 1 continue data = item . get_data_for_checks ( hosts ) children_output += macroresolver . resolve_simple_macros_in_string ( child_template_string , data , macromodulations , timeperiods ) if ok_count == len ( items ) : children_output = "all checks were successful." # Replaces children output string template_string = re . sub ( r"\$\(.*\)\$" , children_output , output_template ) data = self . get_data_for_checks ( hosts ) output = macroresolver . resolve_simple_macros_in_string ( template_string , data , macromodulations , timeperiods ) return output . strip ( )
Returns a status string for business rules based items formatted using business_rule_output_template attribute as template .
478
22
19,734
def business_rule_notification_is_blocked ( self , hosts , services ) : # pylint: disable=too-many-locals # Walks through problems to check if all items in non ok are # acknowledged or in downtime period. acknowledged = 0 for src_prob_id in self . source_problems : if src_prob_id in hosts : src_prob = hosts [ src_prob_id ] else : src_prob = services [ src_prob_id ] if src_prob . last_hard_state_id != 0 : if src_prob . problem_has_been_acknowledged : # Problem hast been acknowledged acknowledged += 1 # Only check problems under downtime if we are # explicitly told to do so. elif self . business_rule_downtime_as_ack is True : if src_prob . scheduled_downtime_depth > 0 : # Problem is under downtime, and downtimes should be # treated as acknowledgements acknowledged += 1 elif hasattr ( src_prob , "host" ) and hosts [ src_prob . host ] . scheduled_downtime_depth > 0 : # Host is under downtime, and downtimes should be # treated as acknowledgements acknowledged += 1 return acknowledged == len ( self . source_problems )
Process business rule notifications behaviour . If all problems have been acknowledged no notifications should be sent if state is not OK . By default downtimes are ignored unless explicitly told to be treated as acknowledgements through with the business_rule_downtime_as_ack set .
287
54
19,735
def fill_data_brok_from ( self , data , brok_type ) : super ( SchedulingItem , self ) . fill_data_brok_from ( data , brok_type ) # workaround/easy trick to have the command_name of this # SchedulingItem in its check_result brok if brok_type == 'check_result' : data [ 'command_name' ] = '' if self . check_command : data [ 'command_name' ] = self . check_command . command . command_name
Fill data brok dependent on the brok_type
118
11
19,736
def acknowledge_problem ( self , notification_period , hosts , services , sticky , notify , author , comment , end_time = 0 ) : # pylint: disable=too-many-arguments comm = None logger . debug ( "Acknowledge requested for %s %s." , self . my_type , self . get_name ( ) ) if self . state != self . ok_up : # case have yet an acknowledge if self . problem_has_been_acknowledged and self . acknowledgement : self . del_comment ( getattr ( self . acknowledgement , 'comment_id' , None ) ) if notify : self . create_notifications ( 'ACKNOWLEDGEMENT' , notification_period , hosts , services ) self . problem_has_been_acknowledged = True sticky = sticky == 2 data = { 'ref' : self . uuid , 'sticky' : sticky , 'author' : author , 'comment' : comment , 'end_time' : end_time , 'notify' : notify } self . acknowledgement = Acknowledge ( data ) if self . my_type == 'host' : comment_type = 1 self . broks . append ( self . acknowledgement . get_raise_brok ( self . get_name ( ) ) ) else : comment_type = 2 self . broks . append ( self . acknowledgement . get_raise_brok ( self . host_name , self . get_name ( ) ) ) data = { 'author' : author , 'comment' : comment , 'comment_type' : comment_type , 'entry_type' : 4 , 'source' : 0 , 'expires' : False , 'ref' : self . uuid } comm = Comment ( data ) self . acknowledgement . comment_id = comm . uuid self . comments [ comm . uuid ] = comm self . broks . append ( self . get_update_status_brok ( ) ) self . raise_acknowledge_log_entry ( ) else : logger . debug ( "Acknowledge requested for %s %s but element state is OK/UP." , self . my_type , self . get_name ( ) ) # For an host, acknowledge all its services that are problems if self . my_type == 'host' : for service_uuid in self . services : if service_uuid not in services : continue services [ service_uuid ] . acknowledge_problem ( notification_period , hosts , services , sticky , notify , author , comment , end_time ) return comm
Add an acknowledge
550
3
19,737
def check_for_expire_acknowledge ( self ) : if ( self . acknowledgement and self . acknowledgement . end_time != 0 and self . acknowledgement . end_time < time . time ( ) ) : self . unacknowledge_problem ( )
If have acknowledge and is expired delete it
56
8
19,738
def unacknowledge_problem ( self ) : if self . problem_has_been_acknowledged : logger . debug ( "[item::%s] deleting acknowledge of %s" , self . get_name ( ) , self . get_full_name ( ) ) self . problem_has_been_acknowledged = False if self . my_type == 'host' : self . broks . append ( self . acknowledgement . get_expire_brok ( self . get_name ( ) ) ) else : self . broks . append ( self . acknowledgement . get_expire_brok ( self . host_name , self . get_name ( ) ) ) # delete the comment of the item related with the acknowledge if hasattr ( self . acknowledgement , 'comment_id' ) and self . acknowledgement . comment_id in self . comments : del self . comments [ self . acknowledgement . comment_id ] # Should not be deleted, a None is Good self . acknowledgement = None self . broks . append ( self . get_update_status_brok ( ) ) self . raise_unacknowledge_log_entry ( )
Remove the acknowledge reset the flag . The comment is deleted
247
11
19,739
def unacknowledge_problem_if_not_sticky ( self ) : if hasattr ( self , 'acknowledgement' ) and self . acknowledgement is not None : if not self . acknowledgement . sticky : self . unacknowledge_problem ( )
Remove the acknowledge if it is not sticky
57
8
19,740
def set_impact_state ( self ) : cls = self . __class__ if cls . enable_problem_impacts_states_change : logger . debug ( "%s is impacted and goes UNREACHABLE" , self ) # Track the old state (problem occured before a new check) self . state_before_impact = self . state self . state_id_before_impact = self . state_id # This flag will know if we override the impact state self . state_changed_since_impact = False # Set unreachable self . set_unreachable ( )
We just go an impact so we go unreachable But only if we enable this state change in the conf
126
21
19,741
def unset_impact_state ( self ) : cls = self . __class__ if cls . enable_problem_impacts_states_change and not self . state_changed_since_impact : self . state = self . state_before_impact self . state_id = self . state_id_before_impact
Unset impact only if impact state change is set in configuration
71
12
19,742
def find_by_filter ( self , filters , all_items ) : items = [ ] for i in self : failed = False if hasattr ( i , "host" ) : all_items [ "service" ] = i else : all_items [ "host" ] = i for filt in filters : if not filt ( all_items ) : failed = True break if failed is False : items . append ( i ) return items
Find items by filters
94
4
19,743
def add_act_dependency ( self , son_id , parent_id , notif_failure_criteria , dep_period , inherits_parents ) : if son_id in self : son = self [ son_id ] else : msg = "Dependency son (%s) unknown, configuration error" % son_id self . add_error ( msg ) parent = self [ parent_id ] son . act_depend_of . append ( ( parent_id , notif_failure_criteria , dep_period , inherits_parents ) ) parent . act_depend_of_me . append ( ( son_id , notif_failure_criteria , dep_period , inherits_parents ) ) # TODO: Is it necessary? We already have this info in act_depend_* attributes son . parent_dependencies . add ( parent_id ) parent . child_dependencies . add ( son_id )
Add a logical dependency for actions between two hosts or services .
205
12
19,744
def del_act_dependency ( self , son_id , parent_id ) : # pragma: no cover, not yet tested son = self [ son_id ] parent = self [ parent_id ] to_del = [ ] # First we remove in my list for ( host , status , timeperiod , inherits_parent ) in son . act_depend_of : if host == parent_id : to_del . append ( ( host , status , timeperiod , inherits_parent ) ) for tup in to_del : son . act_depend_of . remove ( tup ) # And now in the father part to_del = [ ] for ( host , status , timeperiod , inherits_parent ) in parent . act_depend_of_me : if host == son_id : to_del . append ( ( host , status , timeperiod , inherits_parent ) ) for tup in to_del : parent . act_depend_of_me . remove ( tup ) # Remove in child/parents dependencies too # Me in father list parent . child_dependencies . remove ( son_id ) # and father list in mine son . parent_dependencies . remove ( parent_id )
Remove act_dependency between two hosts or services .
262
11
19,745
def add_chk_dependency ( self , son_id , parent_id , notif_failure_criteria , dep_period , inherits_parents ) : son = self [ son_id ] parent = self [ parent_id ] son . chk_depend_of . append ( ( parent_id , notif_failure_criteria , 'logic_dep' , dep_period , inherits_parents ) ) parent . chk_depend_of_me . append ( ( son_id , notif_failure_criteria , 'logic_dep' , dep_period , inherits_parents ) ) # TODO: Is it necessary? We already have this info in act_depend_* attributes son . parent_dependencies . add ( parent_id ) parent . child_dependencies . add ( son_id )
Add a logical dependency for checks between two hosts or services .
186
12
19,746
def create_business_rules ( self , hosts , services , hostgroups , servicegroups , macromodulations , timeperiods ) : for item in self : item . create_business_rules ( hosts , services , hostgroups , servicegroups , macromodulations , timeperiods )
Loop on hosts or services and call SchedulingItem . create_business_rules
61
16
19,747
def get_services_by_explosion ( self , servicegroups ) : # pylint: disable=access-member-before-definition # First we tag the hg so it will not be explode # if a son of it already call it self . already_exploded = True # Now the recursive part # rec_tag is set to False every HG we explode # so if True here, it must be a loop in HG # calls... not GOOD! if self . rec_tag : logger . error ( "[servicegroup::%s] got a loop in servicegroup definition" , self . get_name ( ) ) if hasattr ( self , 'members' ) : return self . members return '' # Ok, not a loop, we tag it and continue self . rec_tag = True sg_mbrs = self . get_servicegroup_members ( ) for sg_mbr in sg_mbrs : servicegroup = servicegroups . find_by_name ( sg_mbr . strip ( ) ) if servicegroup is not None : value = servicegroup . get_services_by_explosion ( servicegroups ) if value is not None : self . add_members ( value ) if hasattr ( self , 'members' ) : return self . members return ''
Get all services of this servicegroup and add it in members container
275
13
19,748
def explode ( self ) : # We do not want a same service group to be exploded again and again # so we tag it for tmp_sg in list ( self . items . values ( ) ) : tmp_sg . already_exploded = False for servicegroup in list ( self . items . values ( ) ) : if servicegroup . already_exploded : continue # get_services_by_explosion is a recursive # function, so we must tag hg so we do not loop for tmp_sg in list ( self . items . values ( ) ) : tmp_sg . rec_tag = False servicegroup . get_services_by_explosion ( self ) # We clean the tags for tmp_sg in list ( self . items . values ( ) ) : if hasattr ( tmp_sg , 'rec_tag' ) : del tmp_sg . rec_tag del tmp_sg . already_exploded
Get services and put them in members container
195
8
19,749
def setup_logger ( logger_configuration_file , log_dir = None , process_name = '' , log_file = '' ) : # pylint: disable=too-many-branches logger_ = logging . getLogger ( ALIGNAK_LOGGER_NAME ) for handler in logger_ . handlers : if not process_name : break # Logger is already configured? if getattr ( handler , '_name' , None ) == 'daemons' : # Update the declared formats and file names with the process name # This is for unit tests purpose only: alignak_tests will be replaced # with the provided process name for hdlr in logger_ . handlers : # print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt)) if 'alignak_tests' in hdlr . formatter . _fmt : formatter = logging . Formatter ( hdlr . formatter . _fmt . replace ( "alignak_tests" , process_name ) ) hdlr . setFormatter ( formatter ) if getattr ( hdlr , 'filename' , None ) and 'alignak_tests' in hdlr . filename : hdlr . filename = hdlr . filename . _fmt . replace ( "alignak_tests" , process_name ) # print("- handler : %s (%s) -> %s" % (hdlr, hdlr.formatter._fmt, # hdlr.filename)) # else: # print("- handler : %s (%s)" % (hdlr, hdlr.formatter._fmt)) break else : if not logger_configuration_file or not os . path . exists ( logger_configuration_file ) : print ( "The logger configuration file does not exist: %s" % logger_configuration_file ) return with open ( logger_configuration_file , 'rt' ) as _file : config = json . load ( _file ) truncate = False if not process_name and not log_dir : truncate = True if not process_name : process_name = 'alignak_tests' if not log_dir : log_dir = '/tmp' # Update the declared formats with the process name for formatter in config [ 'formatters' ] : if 'format' not in config [ 'formatters' ] [ formatter ] : continue config [ 'formatters' ] [ formatter ] [ 'format' ] = config [ 'formatters' ] [ formatter ] [ 'format' ] . replace ( "%(daemon)s" , process_name ) # Update the declared log file names with the log directory for hdlr in config [ 'handlers' ] : if 'filename' not in config [ 'handlers' ] [ hdlr ] : continue if log_file and hdlr == 'daemons' : config [ 'handlers' ] [ hdlr ] [ 'filename' ] = log_file else : config [ 'handlers' ] [ hdlr ] [ 'filename' ] = config [ 'handlers' ] [ hdlr ] [ 'filename' ] . replace ( "%(logdir)s" , log_dir ) config [ 'handlers' ] [ hdlr ] [ 'filename' ] = config [ 'handlers' ] [ hdlr ] [ 'filename' ] . replace ( "%(daemon)s" , process_name ) if truncate and os . path . exists ( config [ 'handlers' ] [ hdlr ] [ 'filename' ] ) : with open ( config [ 'handlers' ] [ hdlr ] [ 'filename' ] , "w" ) as file_log_file : file_log_file . truncate ( ) # Configure the logger, any error will raise an exception logger_dictConfig ( config )
Configure the provided logger - get and update the content of the Json configuration file - configure the logger with this file
847
24
19,750
def set_log_console ( log_level = logging . INFO ) : # Change the logger and all its handlers log level logger_ = logging . getLogger ( ALIGNAK_LOGGER_NAME ) logger_ . setLevel ( log_level ) # Adding a console logger... csh = ColorStreamHandler ( sys . stdout ) csh . setFormatter ( Formatter ( '[%(asctime)s] %(levelname)s: [%(name)s] %(message)s' , "%Y-%m-%d %H:%M:%S" ) ) logger_ . addHandler ( csh )
Set the Alignak daemons logger have a console log handler .
141
15
19,751
def set_log_level ( log_level = logging . INFO , handlers = None ) : # print("Setting log level: %s" % (log_level)) # Change the logger and all its handlers log level logger_ = logging . getLogger ( ALIGNAK_LOGGER_NAME ) logger_ . setLevel ( log_level ) if handlers is not None : for handler in logger_ . handlers : if getattr ( handler , '_name' , None ) in handlers : handler . setLevel ( log_level )
Set the Alignak logger log level . This is mainly used for the arbiter verify code to set the log level at INFO level whatever the configured log level is set .
114
35
19,752
def make_monitoring_log ( level , message , timestamp = None , to_logger = False ) : level = level . lower ( ) if level not in [ 'debug' , 'info' , 'warning' , 'error' , 'critical' ] : return False if to_logger : logging . getLogger ( ALIGNAK_LOGGER_NAME ) . debug ( "Monitoring log: %s / %s" , level , message ) # Emit to our monitoring log logger message = message . replace ( '\r' , '\\r' ) message = message . replace ( '\n' , '\\n' ) logger_ = logging . getLogger ( MONITORING_LOGGER_NAME ) logging_function = getattr ( logger_ , level ) try : message = message . decode ( 'utf8' , 'ignore' ) except UnicodeEncodeError : pass except AttributeError : # Python 3 raises an exception! pass if timestamp : st = datetime . datetime . fromtimestamp ( timestamp ) . strftime ( '%Y-%m-%d %H:%M:%S' ) logging_function ( message , extra = { 'my_date' : st } ) else : logging_function ( message ) return True # ... and returns a brok return Brok ( { 'type' : 'monitoring_log' , 'data' : { 'level' : level , 'message' : message } } )
Function used to build the monitoring log .
318
8
19,753
def want_service_notification ( self , notifways , timeperiods , timestamp , state , n_type , business_impact , cmd = None ) : if not self . service_notifications_enabled : return False # If we are in downtime, we do not want notification for downtime_id in self . downtimes : downtime = self . downtimes [ downtime_id ] if downtime . is_in_effect : self . in_scheduled_downtime = True return False self . in_scheduled_downtime = False # Now the rest is for sub notificationways. If one is OK, we are ok # We will filter in another phase for notifway_id in self . notificationways : notifway = notifways [ notifway_id ] nw_b = notifway . want_service_notification ( timeperiods , timestamp , state , n_type , business_impact , cmd ) if nw_b : return True # Oh... no one is ok for it? so no, sorry return False
Check if notification options match the state of the service
227
10
19,754
def want_host_notification ( self , notifways , timeperiods , timestamp , state , n_type , business_impact , cmd = None ) : if not self . host_notifications_enabled : return False # If we are in downtime, we do not want notification for downtime in self . downtimes : if downtime . is_in_effect : self . in_scheduled_downtime = True return False self . in_scheduled_downtime = False # Now it's all for sub notificationways. If one is OK, we are OK # We will filter in another phase for notifway_id in self . notificationways : notifway = notifways [ notifway_id ] nw_b = notifway . want_host_notification ( timeperiods , timestamp , state , n_type , business_impact , cmd ) if nw_b : return True # Oh, nobody..so NO :) return False
Check if notification options match the state of the host
208
10
19,755
def explode ( self , contactgroups , notificationways ) : # Contactgroups property need to be fulfill for got the information self . apply_partial_inheritance ( 'contactgroups' ) # _special properties maybe came from a template, so # import them before grok ourselves for prop in Contact . special_properties : if prop == 'contact_name' : continue self . apply_partial_inheritance ( prop ) # Register ourselves into the contactsgroups we are in for contact in self : if not ( hasattr ( contact , 'contact_name' ) and hasattr ( contact , 'contactgroups' ) ) : continue for contactgroup in contact . contactgroups : contactgroups . add_member ( contact . contact_name , contactgroup . strip ( ) ) # Now create a notification way with the simple parameter of the # contacts for contact in self : need_notificationway = False params = { } for param in Contact . simple_way_parameters : if hasattr ( contact , param ) : need_notificationway = True params [ param ] = getattr ( contact , param ) elif contact . properties [ param ] . has_default : # put a default text value # Remove the value and put a default value setattr ( contact , param , contact . properties [ param ] . default ) if need_notificationway : cname = getattr ( contact , 'contact_name' , getattr ( contact , 'alias' , '' ) ) nw_name = cname + '_inner_nw' notificationways . new_inner_member ( nw_name , params ) if not hasattr ( contact , 'notificationways' ) : contact . notificationways = [ nw_name ] else : contact . notificationways = list ( contact . notificationways ) contact . notificationways . append ( nw_name )
Explode all contact for each contactsgroup
388
8
19,756
def hook_save_retention ( self , scheduler ) : if not self . enabled : logger . warning ( "Alignak retention module is not enabled." "Saving objects state is not possible." ) return None try : start_time = time . time ( ) # Get retention data from the scheduler data_to_save = scheduler . get_retention_data ( ) if not data_to_save : logger . warning ( "Alignak retention data to save are not containing any information." ) return None # Move services data to their respective hosts dictionary # Alignak scheduler do not merge the services into the host dictionary! for host_name in data_to_save [ 'hosts' ] : data_to_save [ 'hosts' ] [ host_name ] [ 'services' ] = { } data_to_save [ 'hosts' ] [ host_name ] [ 'name' ] = host_name for host_name , service_description in data_to_save [ 'services' ] : data_to_save [ 'hosts' ] [ host_name ] [ 'services' ] [ service_description ] = data_to_save [ 'services' ] [ ( host_name , service_description ) ] try : if not self . retention_file : logger . info ( 'Saving retention data to: %s' , self . retention_dir ) for host_name in data_to_save [ 'hosts' ] : file_name = os . path . join ( self . retention_dir , self . retention_file , "%s.json" % host_name ) with open ( file_name , "w" ) as fd : fd . write ( json . dumps ( data_to_save [ 'hosts' ] [ host_name ] , indent = 2 , separators = ( ',' , ': ' ) , sort_keys = True ) ) logger . debug ( '- saved: %s' , file_name ) logger . info ( 'Saved' ) else : logger . info ( 'Saving retention data to: %s' , self . retention_file ) with open ( self . retention_file , "w" ) as fd : fd . write ( json . dumps ( data_to_save [ 'hosts' ] , indent = 2 , separators = ( ',' , ': ' ) , sort_keys = True ) ) logger . info ( 'Saved' ) except Exception as exp : # pylint: disable=broad-except # pragma: no cover, should never happen... logger . warning ( "Error when saving retention data to %s" , self . retention_file ) logger . exception ( exp ) logger . info ( '%d hosts saved in retention' , len ( data_to_save [ 'hosts' ] ) ) self . statsmgr . counter ( 'retention-save.hosts' , len ( data_to_save [ 'hosts' ] ) ) logger . info ( '%d services saved in retention' , len ( data_to_save [ 'services' ] ) ) self . statsmgr . counter ( 'retention-save.services' , len ( data_to_save [ 'services' ] ) ) self . statsmgr . timer ( 'retention-save.time' , time . time ( ) - start_time ) logger . info ( "Retention data saved in %s seconds" , ( time . time ( ) - start_time ) ) except Exception as exp : # pylint: disable=broad-except self . enabled = False logger . warning ( "Retention save failed: %s" , exp ) logger . exception ( exp ) return False return True
Save retention data to a Json formated file
804
10
19,757
def get_check_command ( self , timeperiods , t_to_go ) : if not self . check_period or timeperiods [ self . check_period ] . is_time_valid ( t_to_go ) : return self . check_command return None
Get the check_command if we are in the check period modulation
60
13
19,758
def linkify ( self , timeperiods , commands ) : self . linkify_with_timeperiods ( timeperiods , 'check_period' ) self . linkify_one_command_with_commands ( commands , 'check_command' )
Replace check_period by real Timeperiod object into each CheckModulation Replace check_command by real Command object into each CheckModulation
56
28
19,759
def new_inner_member ( self , name = None , params = None ) : if name is None : name = 'Generated_checkmodulation_%s' % uuid . uuid4 ( ) if params is None : params = { } params [ 'checkmodulation_name' ] = name checkmodulation = CheckModulation ( params ) self . add_item ( checkmodulation )
Create a CheckModulation object and add it to items
86
11
19,760
def open ( self ) : if not self . _is_connected : print ( "Connecting to arduino on {}... " . format ( self . device ) , end = "" ) self . comm = serial . Serial ( ) self . comm . port = self . device self . comm . baudrate = self . baud_rate self . comm . timeout = self . timeout self . dtr = self . enable_dtr self . comm . open ( ) time . sleep ( self . settle_time ) self . _is_connected = True print ( "done." )
Open the serial connection .
122
5
19,761
def close ( self ) : if self . _is_connected : self . comm . close ( ) self . _is_connected = False
Close serial connection .
29
4
19,762
def receive ( self , arg_formats = None ) : # Read serial input until a command separator or empty character is # reached msg = [ [ ] ] raw_msg = [ ] escaped = False command_sep_found = False while True : tmp = self . board . read ( ) raw_msg . append ( tmp ) if escaped : # Either drop the escape character or, if this wasn't really # an escape, keep previous escape character and new character if tmp in self . _escaped_characters : msg [ - 1 ] . append ( tmp ) escaped = False else : msg [ - 1 ] . append ( self . _byte_escape_sep ) msg [ - 1 ] . append ( tmp ) escaped = False else : # look for escape character if tmp == self . _byte_escape_sep : escaped = True # or field separator elif tmp == self . _byte_field_sep : msg . append ( [ ] ) # or command separator elif tmp == self . _byte_command_sep : command_sep_found = True break # or any empty characater elif tmp == b'' : break # okay, must be something else : msg [ - 1 ] . append ( tmp ) # No message received given timeouts if len ( msg ) == 1 and len ( msg [ 0 ] ) == 0 : return None # Make sure the message terminated properly if not command_sep_found : # empty message (likely from line endings being included) joined_raw = b'' . join ( raw_msg ) if joined_raw . strip ( ) == b'' : return None err = "Incomplete message ({})" . format ( joined_raw . decode ( ) ) raise EOFError ( err ) # Turn message into fields fields = [ b'' . join ( m ) for m in msg ] # Get the command name. cmd = fields [ 0 ] . strip ( ) . decode ( ) try : cmd_name = self . _int_to_cmd_name [ int ( cmd ) ] except ( ValueError , IndexError ) : if self . give_warnings : cmd_name = "unknown" w = "Recieved unrecognized command ({})." . format ( cmd ) warnings . warn ( w , Warning ) # Figure out what formats to use for each argument. arg_format_list = [ ] if arg_formats != None : # The user specified formats arg_format_list = list ( arg_formats ) else : try : # See if class was initialized with a format for arguments to this # command arg_format_list = self . _cmd_name_to_format [ cmd_name ] except KeyError : # if not, guess for all arguments arg_format_list = [ "g" for i in range ( len ( fields [ 1 : ] ) ) ] # Deal with "*" format arg_format_list = self . _treat_star_format ( arg_format_list , fields [ 1 : ] ) if len ( fields [ 1 : ] ) > 0 : if len ( arg_format_list ) != len ( fields [ 1 : ] ) : err = "Number of argument formats must match the number of recieved arguments." raise ValueError ( err ) received = [ ] for i , f in enumerate ( fields [ 1 : ] ) : received . append ( self . _recv_methods [ arg_format_list [ i ] ] ( f ) ) # Record the time the message arrived message_time = time . time ( ) return cmd_name , received , message_time
Recieve commands coming off the serial port .
765
9
19,763
def _send_char ( self , value ) : if type ( value ) != str and type ( value ) != bytes : err = "char requires a string or bytes array of length 1" raise ValueError ( err ) if len ( value ) != 1 : err = "char must be a single character, not \"{}\"" . format ( value ) raise ValueError ( err ) if type ( value ) != bytes : value = value . encode ( "ascii" ) if value in self . _escaped_characters : err = "Cannot send a control character as a single char to arduino. Send as string instead." raise OverflowError ( err ) return struct . pack ( 'c' , value )
Convert a single char to a bytes object .
153
10
19,764
def _send_byte ( self , value ) : # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type ( value ) != int : new_value = int ( value ) if self . give_warnings : w = "Coercing {} into int ({})" . format ( value , new_value ) warnings . warn ( w , Warning ) value = new_value # Range check if value > 255 or value < 0 : err = "Value {} exceeds the size of the board's byte." . format ( value ) raise OverflowError ( err ) return struct . pack ( "B" , value )
Convert a numerical value into an integer then to a byte object . Check bounds for byte .
141
19
19,765
def _send_int ( self , value ) : # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type ( value ) != int : new_value = int ( value ) if self . give_warnings : w = "Coercing {} into int ({})" . format ( value , new_value ) warnings . warn ( w , Warning ) value = new_value # Range check if value > self . board . int_max or value < self . board . int_min : err = "Value {} exceeds the size of the board's int." . format ( value ) raise OverflowError ( err ) return struct . pack ( self . board . int_type , value )
Convert a numerical value into an integer then to a bytes object Check bounds for signed int .
157
19
19,766
def _send_unsigned_int ( self , value ) : # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type ( value ) != int : new_value = int ( value ) if self . give_warnings : w = "Coercing {} into int ({})" . format ( value , new_value ) warnings . warn ( w , Warning ) value = new_value # Range check if value > self . board . unsigned_int_max or value < self . board . unsigned_int_min : err = "Value {} exceeds the size of the board's unsigned int." . format ( value ) raise OverflowError ( err ) return struct . pack ( self . board . unsigned_int_type , value )
Convert a numerical value into an integer then to a bytes object . Check bounds for unsigned int .
166
20
19,767
def _send_long ( self , value ) : # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type ( value ) != int : new_value = int ( value ) if self . give_warnings : w = "Coercing {} into int ({})" . format ( value , new_value ) warnings . warn ( w , Warning ) value = new_value # Range check if value > self . board . long_max or value < self . board . long_min : err = "Value {} exceeds the size of the board's long." . format ( value ) raise OverflowError ( err ) return struct . pack ( self . board . long_type , value )
Convert a numerical value into an integer then to a bytes object . Check bounds for signed long .
157
20
19,768
def _send_unsigned_long ( self , value ) : # Coerce to int. This will throw a ValueError if the value can't # actually be converted. if type ( value ) != int : new_value = int ( value ) if self . give_warnings : w = "Coercing {} into int ({})" . format ( value , new_value ) warnings . warn ( w , Warning ) value = new_value # Range check if value > self . board . unsigned_long_max or value < self . board . unsigned_long_min : err = "Value {} exceeds the size of the board's unsigned long." . format ( value ) raise OverflowError ( err ) return struct . pack ( self . board . unsigned_long_type , value )
Convert a numerical value into an integer then to a bytes object . Check bounds for unsigned long .
166
20
19,769
def _send_string ( self , value ) : if type ( value ) != bytes : value = "{}" . format ( value ) . encode ( "ascii" ) return value
Convert a string to a bytes object . If value is not a string it is be converted to one with a standard string . format call .
39
29
19,770
def _send_bool ( self , value ) : # Sanity check. if type ( value ) != bool and value not in [ 0 , 1 ] : err = "{} is not boolean." . format ( value ) raise ValueError ( err ) return struct . pack ( "?" , value )
Convert a boolean value into a bytes object . Uses 0 and 1 as output .
62
17
19,771
def _recv_guess ( self , value ) : if self . give_warnings : w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data." . format ( value ) warnings . warn ( w , Warning ) tmp_value = value . decode ( ) try : float ( tmp_value ) if len ( tmp_value . split ( "." ) ) == 1 : # integer return int ( tmp_value ) else : # float return float ( tmp_value ) except ValueError : pass # Return as string return self . _recv_string ( value )
Take the binary spew and try to make it into a float or integer . If that can t be done return a string .
135
25
19,772
def _add_full_message ( gelf_dict , record ) : # if a traceback exists add it to the log as the full_message field full_message = None # format exception information if present if record . exc_info : full_message = '\n' . join ( traceback . format_exception ( * record . exc_info ) ) # use pre-formatted exception information in cases where the primary # exception information was removed, eg. for LogRecord serialization if record . exc_text : full_message = record . exc_text if full_message : gelf_dict [ "full_message" ] = full_message
Add the full_message field to the gelf_dict if any traceback information exists within the logging record
140
22
19,773
def _resolve_host ( fqdn , localname ) : if fqdn : return socket . getfqdn ( ) elif localname is not None : return localname return socket . gethostname ( )
Resolve the host GELF field
48
7
19,774
def _add_debugging_fields ( gelf_dict , record ) : gelf_dict . update ( { 'file' : record . pathname , 'line' : record . lineno , '_function' : record . funcName , '_pid' : record . process , '_thread_name' : record . threadName , } ) # record.processName was added in Python 2.6.2 pn = getattr ( record , 'processName' , None ) if pn is not None : gelf_dict [ '_process_name' ] = pn
Add debugging fields to the given gelf_dict
127
10
19,775
def _add_extra_fields ( gelf_dict , record ) : # skip_list is used to filter additional fields in a log message. skip_list = ( 'args' , 'asctime' , 'created' , 'exc_info' , 'exc_text' , 'filename' , 'funcName' , 'id' , 'levelname' , 'levelno' , 'lineno' , 'module' , 'msecs' , 'message' , 'msg' , 'name' , 'pathname' , 'process' , 'processName' , 'relativeCreated' , 'thread' , 'threadName' ) for key , value in record . __dict__ . items ( ) : if key not in skip_list and not key . startswith ( '_' ) : gelf_dict [ '_%s' % key ] = value
Add extra fields to the given gelf_dict
190
10
19,776
def _pack_gelf_dict ( gelf_dict ) : gelf_dict = BaseGELFHandler . _sanitize_to_unicode ( gelf_dict ) packed = json . dumps ( gelf_dict , separators = ',:' , default = BaseGELFHandler . _object_to_json ) return packed . encode ( 'utf-8' )
Convert a given gelf_dict to a JSON - encoded string thus creating an uncompressed GELF log ready for consumption by Graylog .
82
29
19,777
def _sanitize_to_unicode ( obj ) : if isinstance ( obj , dict ) : return dict ( ( BaseGELFHandler . _sanitize_to_unicode ( k ) , BaseGELFHandler . _sanitize_to_unicode ( v ) ) for k , v in obj . items ( ) ) if isinstance ( obj , ( list , tuple ) ) : return obj . __class__ ( [ BaseGELFHandler . _sanitize_to_unicode ( i ) for i in obj ] ) if isinstance ( obj , data ) : obj = obj . decode ( 'utf-8' , errors = 'replace' ) return obj
Convert all strings records of the object to unicode
147
11
19,778
def _object_to_json ( obj ) : if isinstance ( obj , datetime . datetime ) : return obj . isoformat ( ) return repr ( obj )
Convert objects that cannot be natively serialized into JSON into their string representation
36
16
19,779
def makeSocket ( self , timeout = 1 ) : plain_socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) if hasattr ( plain_socket , 'settimeout' ) : plain_socket . settimeout ( timeout ) wrapped_socket = ssl . wrap_socket ( plain_socket , ca_certs = self . ca_certs , cert_reqs = self . reqs , keyfile = self . keyfile , certfile = self . certfile ) wrapped_socket . connect ( ( self . host , self . port ) ) return wrapped_socket
Override SocketHandler . makeSocket to allow creating wrapped TLS sockets
130
12
19,780
def to_unicode ( string ) : if isinstance ( string , six . binary_type ) : return string . decode ( 'utf8' ) if isinstance ( string , six . text_type ) : return string if six . PY2 : return unicode ( string ) return str ( string )
Ensure a passed string is unicode
65
8
19,781
def to_utf8 ( string ) : if isinstance ( string , six . text_type ) : return string . encode ( 'utf8' ) if isinstance ( string , six . binary_type ) : return string return str ( string )
Encode a string as a UTF8 bytestring . This function could be passed a bytestring or unicode string so must distinguish between the two .
52
32
19,782
def dict_to_unicode ( raw_dict ) : decoded = { } for key , value in raw_dict . items ( ) : decoded [ to_unicode ( key ) ] = map ( to_unicode , value ) return decoded
Ensure all keys and values in a dict are unicode .
55
13
19,783
def unicode_urlencode ( query , doseq = True ) : pairs = [ ] for key , value in query . items ( ) : if isinstance ( value , list ) : value = list ( map ( to_utf8 , value ) ) else : value = to_utf8 ( value ) pairs . append ( ( to_utf8 ( key ) , value ) ) encoded_query = dict ( pairs ) xx = urlencode ( encoded_query , doseq ) return xx
Custom wrapper around urlencode to support unicode
104
10
19,784
def parse ( url_str ) : url_str = to_unicode ( url_str ) result = urlparse ( url_str ) netloc_parts = result . netloc . rsplit ( '@' , 1 ) if len ( netloc_parts ) == 1 : username = password = None host = netloc_parts [ 0 ] else : user_and_pass = netloc_parts [ 0 ] . split ( ':' ) if len ( user_and_pass ) == 2 : username , password = user_and_pass elif len ( user_and_pass ) == 1 : username = user_and_pass [ 0 ] password = None host = netloc_parts [ 1 ] if host and ':' in host : host = host . split ( ':' ) [ 0 ] return { 'host' : host , 'username' : username , 'password' : password , 'scheme' : result . scheme , 'port' : result . port , 'path' : result . path , 'query' : result . query , 'fragment' : result . fragment }
Extract all parts from a URL string and return them as a dictionary
234
14
19,785
def netloc ( self ) : url = self . _tuple if url . username and url . password : netloc = '%s:%s@%s' % ( url . username , url . password , url . host ) elif url . username and not url . password : netloc = '%s@%s' % ( url . username , url . host ) else : netloc = url . host if url . port : netloc = '%s:%s' % ( netloc , url . port ) return netloc
Return the netloc
116
4
19,786
def host ( self , value = None ) : if value is not None : return URL . _mutate ( self , host = value ) return self . _tuple . host
Return the host
37
3
19,787
def username ( self , value = None ) : if value is not None : return URL . _mutate ( self , username = value ) return unicode_unquote ( self . _tuple . username )
Return or set the username
44
5
19,788
def password ( self , value = None ) : if value is not None : return URL . _mutate ( self , password = value ) return unicode_unquote ( self . _tuple . password )
Return or set the password
44
5
19,789
def scheme ( self , value = None ) : if value is not None : return URL . _mutate ( self , scheme = value ) return self . _tuple . scheme
Return or set the scheme .
37
6
19,790
def path ( self , value = None ) : if value is not None : if not value . startswith ( '/' ) : value = '/' + value encoded_value = unicode_quote ( value ) return URL . _mutate ( self , path = encoded_value ) return self . _tuple . path
Return or set the path
68
5
19,791
def query ( self , value = None ) : if value is not None : return URL . _mutate ( self , query = value ) return self . _tuple . query
Return or set the query string
37
6
19,792
def port ( self , value = None ) : if value is not None : return URL . _mutate ( self , port = value ) return self . _tuple . port
Return or set the port
37
5
19,793
def path_segment ( self , index , value = None , default = None ) : if value is not None : segments = list ( self . path_segments ( ) ) segments [ index ] = unicode_quote_path_segment ( value ) new_path = '/' + '/' . join ( segments ) if self . _tuple . path . endswith ( '/' ) : new_path += '/' return URL . _mutate ( self , path = new_path ) try : return self . path_segments ( ) [ index ] except IndexError : return default
Return the path segment at the given index
127
8
19,794
def path_segments ( self , value = None ) : if value is not None : encoded_values = map ( unicode_quote_path_segment , value ) new_path = '/' + '/' . join ( encoded_values ) return URL . _mutate ( self , path = new_path ) parts = self . _tuple . path . split ( '/' ) segments = parts [ 1 : ] if self . _tuple . path . endswith ( '/' ) : segments . pop ( ) segments = map ( unicode_unquote , segments ) return tuple ( segments )
Return the path segments
129
4
19,795
def add_path_segment ( self , value ) : segments = self . path_segments ( ) + ( to_unicode ( value ) , ) return self . path_segments ( segments )
Add a new path segment to the end of the current string
44
12
19,796
def query_param ( self , key , value = None , default = None , as_list = False ) : parse_result = self . query_params ( ) if value is not None : # Need to ensure all strings are unicode if isinstance ( value , ( list , tuple ) ) : value = list ( map ( to_unicode , value ) ) else : value = to_unicode ( value ) parse_result [ to_unicode ( key ) ] = value return URL . _mutate ( self , query = unicode_urlencode ( parse_result , doseq = True ) ) try : result = parse_result [ key ] except KeyError : return default if as_list : return result return result [ 0 ] if len ( result ) == 1 else result
Return or set a query parameter for the given key
167
10
19,797
def append_query_param ( self , key , value ) : values = self . query_param ( key , as_list = True , default = [ ] ) values . append ( value ) return self . query_param ( key , values )
Append a query parameter
52
5
19,798
def query_params ( self , value = None ) : if value is not None : return URL . _mutate ( self , query = unicode_urlencode ( value , doseq = True ) ) query = '' if self . _tuple . query is None else self . _tuple . query # In Python 2.6, urlparse needs a bytestring so we encode and then # decode the result. if not six . PY3 : result = parse_qs ( to_utf8 ( query ) , True ) return dict_to_unicode ( result ) return parse_qs ( query , True )
Return or set a dictionary of query params
132
8
19,799
def remove_query_param ( self , key , value = None ) : parse_result = self . query_params ( ) if value is not None : index = parse_result [ key ] . index ( value ) del parse_result [ key ] [ index ] else : del parse_result [ key ] return URL . _mutate ( self , query = unicode_urlencode ( parse_result , doseq = True ) )
Remove a query param from a URL
93
7