idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
20,300
def _update_bird_conf_file ( self , operation ) : conf_updated = False prefixes = [ ] ip_version = operation . ip_version config_file = self . bird_configuration [ ip_version ] [ 'config_file' ] variable_name = self . bird_configuration [ ip_version ] [ 'variable_name' ] changes_counter = self . bird_configuration [ ip_version ] [ 'changes_counter' ] dummy_ip_prefix = self . bird_configuration [ ip_version ] [ 'dummy_ip_prefix' ] try : prefixes = get_ip_prefixes_from_bird ( config_file ) except OSError as error : self . log . error ( "failed to open Bird configuration %s, this is a " "FATAL error, thus exiting main program" , error ) sys . exit ( 1 ) if not prefixes : self . log . error ( "found empty bird configuration %s, this is a FATAL" " error, thus exiting main program" , config_file ) sys . exit ( 1 ) if dummy_ip_prefix not in prefixes : self . log . warning ( "dummy IP prefix %s wasn't found in bird " "configuration, adding it. This shouldn't have " "happened!" , dummy_ip_prefix ) prefixes . insert ( 0 , dummy_ip_prefix ) conf_updated = True ip_prefixes_without_check = set ( prefixes ) . difference ( self . ip_prefixes [ ip_version ] ) if ip_prefixes_without_check : self . log . warning ( "found %s IP prefixes in Bird configuration but " "we aren't configured to run health checks on " "them. Either someone modified the configuration " "manually or something went horrible wrong. We " "remove them from Bird configuration" , ',' . join ( ip_prefixes_without_check ) ) # This is faster than using lambda and filter. # NOTE: We don't use remove method as we want to remove more than # occurrences of the IP prefixes without check. prefixes [ : ] = ( ip for ip in prefixes if ip not in ip_prefixes_without_check ) conf_updated = True # Update the list of IP prefixes based on the status of health check. if operation . update ( prefixes ) : conf_updated = True if not conf_updated : self . log . info ( 'no updates for bird configuration' ) return conf_updated if self . bird_configuration [ ip_version ] [ 'keep_changes' ] : archive_bird_conf ( config_file , changes_counter ) # some IP prefixes are either removed or added, create # configuration with new data. tempname = write_temp_bird_conf ( dummy_ip_prefix , config_file , variable_name , prefixes ) try : os . rename ( tempname , config_file ) except OSError as error : self . log . critical ( "failed to create Bird configuration %s, this " "is a FATAL error, thus exiting main program" , error ) sys . exit ( 1 ) else : self . log . info ( "Bird configuration for IPv%s is updated" , ip_version ) # dummy_ip_prefix is always there if len ( prefixes ) == 1 : self . log . warning ( "Bird configuration doesn't have IP prefixes for " "any of the services we monitor! It means local " "node doesn't receive any traffic" ) return conf_updated
Update BIRD configuration .
764
5
20,301
def run ( self ) : # Lunch a thread for each configuration if not self . services : self . log . warning ( "no service checks are configured" ) else : self . log . info ( "going to lunch %s threads" , len ( self . services ) ) if self . config . has_option ( 'daemon' , 'splay_startup' ) : splay_startup = self . config . getfloat ( 'daemon' , 'splay_startup' ) else : splay_startup = None for service in self . services : self . log . debug ( "lunching thread for %s" , service ) _config = { } for option , getter in SERVICE_OPTIONS_TYPE . items ( ) : try : _config [ option ] = getattr ( self . config , getter ) ( service , option ) except NoOptionError : pass # for optional settings _thread = ServiceCheck ( service , _config , self . action , splay_startup ) _thread . start ( ) # Stay running until we are stopped while True : # Fetch items from action queue operation = self . action . get ( block = True ) if isinstance ( operation , ServiceCheckDiedError ) : self . log . critical ( operation ) self . log . critical ( "This is a fatal error and the only way to " "recover is to restart, thus exiting with a " "non-zero code and let systemd act by " "triggering a restart" ) sys . exit ( 1 ) self . log . info ( "returned an item from the queue for %s with IP " "prefix %s and action to %s Bird configuration" , operation . name , operation . ip_prefix , operation ) bird_updated = self . _update_bird_conf_file ( operation ) self . action . task_done ( ) if bird_updated : ip_version = operation . ip_version if operation . bird_reconfigure_cmd is None : reconfigure_bird ( self . bird_configuration [ ip_version ] [ 'reconfigure_cmd' ] ) else : run_custom_bird_reconfigure ( operation )
Lunch checks and triggers updates on BIRD configuration .
465
11
20,302
def valid_ip_prefix ( ip_prefix ) : try : ip_prefix = ipaddress . ip_network ( ip_prefix ) except ValueError : return False else : if ip_prefix . version == 4 and ip_prefix . max_prefixlen != 32 : return False if ip_prefix . version == 6 and ip_prefix . max_prefixlen != 128 : return False return True
Perform a sanity check on ip_prefix .
83
10
20,303
def get_ip_prefixes_from_config ( config , services , ip_version ) : ip_prefixes = set ( ) for service in services : ip_prefix = ipaddress . ip_network ( config . get ( service , 'ip_prefix' ) ) if ip_prefix . version == ip_version : ip_prefixes . add ( ip_prefix . with_prefixlen ) return ip_prefixes
Build a set of IP prefixes found in service configuration files .
90
13
20,304
def ip_prefixes_sanity_check ( config , bird_configuration ) : for ip_version in bird_configuration : modify_ip_prefixes ( config , bird_configuration [ ip_version ] [ 'config_file' ] , bird_configuration [ ip_version ] [ 'variable_name' ] , bird_configuration [ ip_version ] [ 'dummy_ip_prefix' ] , bird_configuration [ ip_version ] [ 'reconfigure_cmd' ] , bird_configuration [ ip_version ] [ 'keep_changes' ] , bird_configuration [ ip_version ] [ 'changes_counter' ] , ip_version )
Sanity check on IP prefixes .
149
8
20,305
def modify_ip_prefixes ( config , config_file , variable_name , dummy_ip_prefix , reconfigure_cmd , keep_changes , changes_counter , ip_version ) : log = logging . getLogger ( PROGRAM_NAME ) services = config . sections ( ) services . remove ( 'daemon' ) # not needed during sanity check for IP-Prefixes update_bird_conf = False try : ip_prefixes_in_bird = get_ip_prefixes_from_bird ( config_file ) except OSError as error : log . error ( "failed to open Bird configuration %s, this is a FATAL " "error, thus exiting main program" , error ) sys . exit ( 1 ) _name = get_variable_name_from_bird ( config_file ) if _name is None : log . warning ( "failed to find variable name in %s, going to add it" , config_file ) update_bird_conf = True elif _name != variable_name : log . warning ( "found incorrect variable name in %s, going to add the " "correct one %s" , _name , variable_name ) update_bird_conf = True if dummy_ip_prefix not in ip_prefixes_in_bird : log . warning ( "dummy IP prefix %s is missing from bird configuration " "%s, adding it" , dummy_ip_prefix , config_file ) ip_prefixes_in_bird . insert ( 0 , dummy_ip_prefix ) update_bird_conf = True # Find IP prefixes in Bird configuration without a check. ip_prefixes_with_check = get_ip_prefixes_from_config ( config , services , ip_version ) # dummy_ip_prefix doesn't have a config by design ip_prefixes_with_check . add ( dummy_ip_prefix ) ip_prefixes_without_check = set ( ip_prefixes_in_bird ) . difference ( ip_prefixes_with_check ) if ip_prefixes_without_check : if config . getboolean ( 'daemon' , 'purge_ip_prefixes' ) : log . warning ( "removing IP prefix(es) %s from %s because they don't " "have a service check configured" , ',' . join ( ip_prefixes_without_check ) , config_file ) ip_prefixes_in_bird [ : ] = ( ip for ip in ip_prefixes_in_bird if ip not in ip_prefixes_without_check ) update_bird_conf = True else : log . warning ( "found IP prefixes %s in %s without a service " "check configured" , ',' . join ( ip_prefixes_without_check ) , config_file ) if update_bird_conf : if keep_changes : archive_bird_conf ( config_file , changes_counter ) tempname = write_temp_bird_conf ( dummy_ip_prefix , config_file , variable_name , ip_prefixes_in_bird ) try : os . rename ( tempname , config_file ) except OSError as error : msg = ( "CRITICAL: failed to create Bird configuration {e}, " "this is FATAL error, thus exiting main program" . format ( e = error ) ) sys . exit ( "{m}" . format ( m = msg ) ) else : log . info ( "Bird configuration for IPv%s is updated" , ip_version ) reconfigure_bird ( reconfigure_cmd )
Modify IP prefixes in Bird configuration .
775
9
20,306
def load_configuration ( config_file , config_dir , service_file ) : config_files = [ config_file ] config = configparser . ConfigParser ( ) config . read_dict ( DEFAULT_OPTIONS ) if not os . path . isfile ( config_file ) : raise ValueError ( "{f} configuration file either isn't readable or " "doesn't exist" . format ( f = config_file ) ) if service_file is not None : if not os . path . isfile ( service_file ) : raise ValueError ( "{f} configuration file for a service check " "doesn't exist" . format ( f = service_file ) ) else : config_files . append ( service_file ) elif config_dir is not None : if not os . path . isdir ( config_dir ) : raise ValueError ( "{d} directory with configuration files for " "service checks doesn't exist" . format ( d = config_dir ) ) else : config_files . extend ( glob . glob ( os . path . join ( config_dir , '*.conf' ) ) ) try : config . read ( config_files ) except configparser . Error as exc : raise ValueError ( exc ) configuration_check ( config ) bird_configuration = build_bird_configuration ( config ) create_bird_config_files ( bird_configuration ) return config , bird_configuration
Build configuration objects .
303
4
20,307
def configuration_check ( config ) : log_level = config . get ( 'daemon' , 'loglevel' ) num_level = getattr ( logging , log_level . upper ( ) , None ) pidfile = config . get ( 'daemon' , 'pidfile' ) # Catch the case where the directory, under which we store the pid file, is # missing. if not os . path . isdir ( os . path . dirname ( pidfile ) ) : raise ValueError ( "{d} doesn't exit" . format ( d = os . path . dirname ( pidfile ) ) ) if not isinstance ( num_level , int ) : raise ValueError ( 'Invalid log level: {}' . format ( log_level ) ) for _file in 'log_file' , 'stderr_file' : if config . has_option ( 'daemon' , _file ) : try : touch ( config . get ( 'daemon' , _file ) ) except OSError as exc : raise ValueError ( exc ) for option , getter in DAEMON_OPTIONS_TYPE . items ( ) : try : getattr ( config , getter ) ( 'daemon' , option ) except configparser . NoOptionError as error : if option not in DAEMON_OPTIONAL_OPTIONS : raise ValueError ( error ) except configparser . Error as error : raise ValueError ( error ) except ValueError as exc : msg = ( "invalid data for '{opt}' option in daemon section: {err}" . format ( opt = option , err = exc ) ) raise ValueError ( msg ) service_configuration_check ( config )
Perform a sanity check on configuration .
366
8
20,308
def build_bird_configuration ( config ) : bird_configuration = { } if config . getboolean ( 'daemon' , 'ipv4' ) : if os . path . islink ( config . get ( 'daemon' , 'bird_conf' ) ) : config_file = os . path . realpath ( config . get ( 'daemon' , 'bird_conf' ) ) print ( "'bird_conf' is set to a symbolic link ({s} -> {d}, but we " "will use the canonical path of that link" . format ( s = config . get ( 'daemon' , 'bird_conf' ) , d = config_file ) ) else : config_file = config . get ( 'daemon' , 'bird_conf' ) dummy_ip_prefix = config . get ( 'daemon' , 'dummy_ip_prefix' ) if not valid_ip_prefix ( dummy_ip_prefix ) : raise ValueError ( "invalid dummy IPv4 prefix: {i}" . format ( i = dummy_ip_prefix ) ) bird_configuration [ 4 ] = { 'config_file' : config_file , 'variable_name' : config . get ( 'daemon' , 'bird_variable' ) , 'dummy_ip_prefix' : dummy_ip_prefix , 'reconfigure_cmd' : config . get ( 'daemon' , 'bird_reconfigure_cmd' ) , 'keep_changes' : config . getboolean ( 'daemon' , 'bird_keep_changes' ) , 'changes_counter' : config . getint ( 'daemon' , 'bird_changes_counter' ) } if config . getboolean ( 'daemon' , 'ipv6' ) : if os . path . islink ( config . get ( 'daemon' , 'bird6_conf' ) ) : config_file = os . path . realpath ( config . get ( 'daemon' , 'bird6_conf' ) ) print ( "'bird6_conf' is set to a symbolic link ({s} -> {d}, but we " "will use the canonical path of that link" . format ( s = config . get ( 'daemon' , 'bird6_conf' ) , d = config_file ) ) else : config_file = config . get ( 'daemon' , 'bird6_conf' ) dummy_ip_prefix = config . get ( 'daemon' , 'dummy_ip6_prefix' ) if not valid_ip_prefix ( dummy_ip_prefix ) : raise ValueError ( "invalid dummy IPv6 prefix: {i}" . format ( i = dummy_ip_prefix ) ) bird_configuration [ 6 ] = { 'config_file' : config_file , 'variable_name' : config . get ( 'daemon' , 'bird6_variable' ) , 'dummy_ip_prefix' : dummy_ip_prefix , 'reconfigure_cmd' : config . get ( 'daemon' , 'bird6_reconfigure_cmd' ) , 'keep_changes' : config . getboolean ( 'daemon' , 'bird6_keep_changes' ) , 'changes_counter' : config . getint ( 'daemon' , 'bird6_changes_counter' ) } return bird_configuration
Build bird configuration structure .
741
5
20,309
def get_variable_name_from_bird ( bird_conf ) : bird_variable_pattern = re . compile ( r''' ^\s* define\s+ (?P<name>\S+\b) \s+ = ''' , re . VERBOSE ) with open ( bird_conf , 'r' ) as content : for line in content . readlines ( ) : variable_match = bird_variable_pattern . search ( line ) if variable_match : return variable_match . group ( 'name' ) return None
Return the variable name set in Bird configuration .
118
9
20,310
def create_bird_config_files ( bird_configuration ) : for ip_version in bird_configuration : # This creates the file if it doesn't exist. config_file = bird_configuration [ ip_version ] [ 'config_file' ] try : touch ( config_file ) except OSError as exc : raise ValueError ( "failed to create {f}:{e}" . format ( f = config_file , e = exc ) ) if bird_configuration [ ip_version ] [ 'keep_changes' ] : history_dir = os . path . join ( os . path . dirname ( config_file ) , 'history' ) try : os . mkdir ( history_dir ) except FileExistsError : pass except OSError as exc : raise ValueError ( "failed to make directory {d} for keeping a " "history of changes for {b}:{e}" . format ( d = history_dir , b = config_file , e = exc ) ) else : print ( "{d} is created" . format ( d = history_dir ) )
Create bird configuration files per IP version .
237
8
20,311
def running ( processid ) : try : # From kill(2) # If sig is 0 (the null signal), error checking is performed but no # signal is actually sent. The null signal can be used to check the # validity of pid os . kill ( processid , 0 ) except OverflowError as exc : print ( "checking validity of pid ({p}) failed with: {e}" . format ( p = processid , e = exc ) ) sys . exit ( 1 ) except OSError : return False else : return True
Check the validity of a process ID .
113
8
20,312
def get_ip_prefixes_from_bird ( filename ) : prefixes = [ ] with open ( filename , 'r' ) as bird_conf : lines = bird_conf . read ( ) for line in lines . splitlines ( ) : line = line . strip ( ', ' ) if valid_ip_prefix ( line ) : prefixes . append ( line ) return prefixes
Build a list of IP prefixes found in Bird configuration .
82
12
20,313
def reconfigure_bird ( cmd ) : log = logging . getLogger ( PROGRAM_NAME ) cmd = shlex . split ( cmd ) log . info ( "reconfiguring BIRD by running %s" , ' ' . join ( cmd ) ) try : output = subprocess . check_output ( cmd , timeout = 2 , stderr = subprocess . STDOUT , universal_newlines = True , ) except subprocess . TimeoutExpired : log . error ( "reconfiguring bird timed out" ) return except subprocess . CalledProcessError as error : # birdc returns 0 even when it fails due to invalid config, # but it returns 1 when BIRD is down. log . error ( "reconfiguring BIRD failed, either BIRD daemon is down or " "we don't have privileges to reconfigure it (sudo problems?)" ":%s" , error . output . strip ( ) ) return except FileNotFoundError as error : log . error ( "reconfiguring BIRD failed with: %s" , error ) return # 'Reconfigured' string will be in the output if and only if conf is valid. pattern = re . compile ( '^Reconfigured$' , re . MULTILINE ) if pattern . search ( str ( output ) ) : log . info ( 'reconfigured BIRD daemon' ) else : # We will end up here only if we generated an invalid conf # or someone broke bird.conf. log . error ( "reconfiguring BIRD returned error, most likely we generated" " an invalid configuration file or Bird configuration in is " "broken:%s" , output )
Reconfigure BIRD daemon .
351
7
20,314
def write_temp_bird_conf ( dummy_ip_prefix , config_file , variable_name , prefixes ) : log = logging . getLogger ( PROGRAM_NAME ) comment = ( "# {i} is a dummy IP Prefix. It should NOT be used and " "REMOVED from the constant." . format ( i = dummy_ip_prefix ) ) # the temporary file must be on the same filesystem as the bird config # as we use os.rename to perform an atomic update on the bird config. # Thus, we create it in the same directory that bird config is stored. tm_file = os . path . join ( os . path . dirname ( config_file ) , str ( time . time ( ) ) ) log . debug ( "going to write to %s" , tm_file ) try : with open ( tm_file , 'w' ) as tmpf : tmpf . write ( "# Generated {t} by {n} (pid={p})\n" . format ( t = datetime . datetime . now ( ) , n = PROGRAM_NAME , p = os . getpid ( ) ) ) tmpf . write ( "{c}\n" . format ( c = comment ) ) tmpf . write ( "define {n} =\n" . format ( n = variable_name ) ) tmpf . write ( "{s}[\n" . format ( s = 4 * ' ' ) ) # all entries of the array need a trailing comma except the last # one. A single element array doesn't need a trailing comma. tmpf . write ( ',\n' . join ( [ ' ' * 8 + n for n in prefixes ] ) ) tmpf . write ( "\n{s}];\n" . format ( s = 4 * ' ' ) ) except OSError as error : log . critical ( "failed to write temporary file %s: %s. This is a FATAL " "error, this exiting main program" , tm_file , error ) sys . exit ( 1 ) else : return tm_file
Write in a temporary file the list of IP - Prefixes .
453
13
20,315
def archive_bird_conf ( config_file , changes_counter ) : log = logging . getLogger ( PROGRAM_NAME ) history_dir = os . path . join ( os . path . dirname ( config_file ) , 'history' ) dst = os . path . join ( history_dir , str ( time . time ( ) ) ) log . debug ( "coping %s to %s" , config_file , dst ) history = [ x for x in os . listdir ( history_dir ) if os . path . isfile ( os . path . join ( history_dir , x ) ) ] if len ( history ) > changes_counter : log . info ( "threshold of %s is reached, removing old files" , changes_counter ) for _file in sorted ( history , reverse = True ) [ changes_counter - 1 : ] : _path = os . path . join ( history_dir , _file ) try : os . remove ( _path ) except OSError as exc : log . warning ( "failed to remove %s: %s" , _file , exc ) else : log . info ( "removed %s" , _path ) try : shutil . copy2 ( config_file , dst ) except OSError as exc : log . warning ( "failed to copy %s to %s: %s" , config_file , dst , exc )
Keep a history of Bird configuration files .
303
8
20,316
def update_pidfile ( pidfile ) : try : with open ( pidfile , mode = 'r' ) as _file : pid = _file . read ( 1024 ) . rstrip ( ) try : pid = int ( pid ) except ValueError : print ( "cleaning stale pidfile with invalid data:'{}'" . format ( pid ) ) write_pid ( pidfile ) else : if running ( pid ) : # This is to catch migration issues from 0.7.x to 0.8.x # version, where old process is still around as it failed to # be stopped. Since newer version has a different locking # mechanism, we can end up with both versions running. # In order to avoid this situation we refuse to startup. sys . exit ( "process {} is already running" . format ( pid ) ) else : # pidfile exists with a PID for a process that is not running. # Let's update PID. print ( "updating stale processID({}) in pidfile" . format ( pid ) ) write_pid ( pidfile ) except FileNotFoundError : # Either it's 1st time we run or previous run was terminated # successfully. print ( "creating pidfile {f}" . format ( f = pidfile ) ) write_pid ( pidfile ) except OSError as exc : sys . exit ( "failed to update pidfile:{e}" . format ( e = exc ) )
Update pidfile .
302
4
20,317
def write_pid ( pidfile ) : pid = str ( os . getpid ( ) ) try : with open ( pidfile , mode = 'w' ) as _file : print ( "writing processID {p} to pidfile" . format ( p = pid ) ) _file . write ( pid ) except OSError as exc : sys . exit ( "failed to write pidfile:{e}" . format ( e = exc ) )
Write processID to the pidfile .
95
8
20,318
def shutdown ( pidfile , signalnb = None , frame = None ) : log = logging . getLogger ( PROGRAM_NAME ) log . info ( "received %s at %s" , signalnb , frame ) log . info ( "going to remove pidfile %s" , pidfile ) # no point to catch possible errors when we delete the pid file os . unlink ( pidfile ) log . info ( 'shutdown is complete' ) sys . exit ( 0 )
Clean up pidfile upon shutdown .
101
7
20,319
def run_custom_bird_reconfigure ( operation ) : log = logging . getLogger ( PROGRAM_NAME ) if isinstance ( operation , AddOperation ) : status = 'up' else : status = 'down' cmd = shlex . split ( operation . bird_reconfigure_cmd + " " + status ) log . info ( "reconfiguring BIRD by running custom command %s" , ' ' . join ( cmd ) ) try : proc = subprocess . Popen ( cmd , start_new_session = True , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) _ , errs = proc . communicate ( timeout = operation . bird_reconfigure_timeout ) except OSError as exc : log . error ( "reconfiguring BIRD failed with: %s" , exc ) except subprocess . TimeoutExpired as exc : log . error ( "reconfiguring bird timed out" ) if proc . poll ( ) is None : # if process is still alive try : os . killpg ( os . getpgid ( proc . pid ) , signal . SIGTERM ) except PermissionError as exc : log . error ( "failed to terminate custom bird command: %s" , exc ) else : if proc . returncode != 0 : log . error ( "reconfiguring BIRD failed with return code: %s and " "stderr: %s" , proc . returncode , errs ) else : log . info ( "custom command successfully reconfigured Bird" )
Reconfigure BIRD daemon by running a custom command .
334
12
20,320
def update ( self , prefixes ) : if self . ip_prefix not in prefixes : prefixes . append ( self . ip_prefix ) self . log . info ( "announcing %s for %s" , self . ip_prefix , self . name ) return True return False
Add a value to the list .
61
7
20,321
def write ( self , string ) : string = string . rstrip ( ) if string : # Don't log empty lines self . logger . critical ( string )
Erase newline from a string and write to the logger .
33
13
20,322
def process_log_record ( self , log_record ) : log_record [ "version" ] = __version__ log_record [ "program" ] = PROGRAM_NAME log_record [ "service_name" ] = log_record . pop ( 'threadName' , None ) # return jsonlogger.JsonFormatter.process_log_record(self, log_record) return log_record
Add customer record keys and rename threadName key .
89
10
20,323
def get_vexrc ( options , environ ) : # Complain if user specified nonexistent file with --config. # But we don't want to complain just because ~/.vexrc doesn't exist. if options . config and not os . path . exists ( options . config ) : raise exceptions . InvalidVexrc ( "nonexistent config: {0!r}" . format ( options . config ) ) filename = options . config or os . path . expanduser ( '~/.vexrc' ) vexrc = config . Vexrc . from_file ( filename , environ ) return vexrc
Get a representation of the contents of the config file .
129
11
20,324
def get_cwd ( options ) : if not options . cwd : return None if not os . path . exists ( options . cwd ) : raise exceptions . InvalidCwd ( "can't --cwd to invalid path {0!r}" . format ( options . cwd ) ) return options . cwd
Discover what directory the command should run in .
67
9
20,325
def get_virtualenv_path ( ve_base , ve_name ) : if not ve_base : raise exceptions . NoVirtualenvsDirectory ( "could not figure out a virtualenvs directory. " "make sure $HOME is set, or $WORKON_HOME," " or set virtualenvs=something in your .vexrc" ) # Using this requires get_ve_base to pass through nonexistent dirs if not os . path . exists ( ve_base ) : message = ( "virtualenvs directory {0!r} not found. " "Create it or use vex --make to get started." ) . format ( ve_base ) raise exceptions . NoVirtualenvsDirectory ( message ) if not ve_name : raise exceptions . InvalidVirtualenv ( "no virtualenv name" ) # n.b.: if ve_name is absolute, ve_base is discarded by os.path.join, # and an absolute path will be accepted as first arg. # So we check if they gave an absolute path as ve_name. # But we don't want this error if $PWD == $WORKON_HOME, # in which case 'foo' is a valid relative path to virtualenv foo. ve_path = os . path . join ( ve_base , ve_name ) if ve_path == ve_name and os . path . basename ( ve_name ) != ve_name : raise exceptions . InvalidVirtualenv ( 'To run in a virtualenv by its path, ' 'use "vex --path {0}"' . format ( ve_path ) ) ve_path = os . path . abspath ( ve_path ) if not os . path . exists ( ve_path ) : raise exceptions . InvalidVirtualenv ( "no virtualenv found at {0!r}." . format ( ve_path ) ) return ve_path
Check a virtualenv path raising exceptions to explain problems .
398
11
20,326
def get_command ( options , vexrc , environ ) : command = options . rest if not command : command = vexrc . get_shell ( environ ) if command and command [ 0 ] . startswith ( '--' ) : raise exceptions . InvalidCommand ( "don't put flags like '%s' after the virtualenv name." % command [ 0 ] ) if not command : raise exceptions . InvalidCommand ( "no command given" ) return command
Get a command to run .
98
6
20,327
def main ( ) : argv = sys . argv [ 1 : ] returncode = 1 try : returncode = _main ( os . environ , argv ) except exceptions . InvalidArgument as error : if error . message : sys . stderr . write ( "Error: " + error . message + '\n' ) else : raise sys . exit ( returncode )
The main command - line entry point with system interactions .
82
11
20,328
def get_processid ( config ) : pidfile = config . get ( 'daemon' , 'pidfile' , fallback = None ) if pidfile is None : raise ValueError ( "Configuration doesn't have pidfile option!" ) try : with open ( pidfile , 'r' ) as _file : pid = _file . read ( ) . rstrip ( ) try : pid = int ( pid ) except ValueError : raise ValueError ( "stale pid file with invalid data:{}" . format ( pid ) ) else : if pid in [ - 1 , 1 ] : raise ValueError ( "invalid PID ({})" . format ( pid ) ) else : return pid except OSError as exc : if exc . errno == 2 : print ( "CRITICAL: anycast-healthchecker could be down as pid file " "{} doesn't exist" . format ( pidfile ) ) sys . exit ( 2 ) else : raise ValueError ( "error while reading pid file:{}" . format ( exc ) )
Return process id of anycast - healthchecker .
220
11
20,329
def parse_services ( config , services ) : enabled = 0 for service in services : check_disabled = config . getboolean ( service , 'check_disabled' ) if not check_disabled : enabled += 1 return enabled
Parse configuration to return number of enabled service checks .
47
11
20,330
def main ( ) : arguments = docopt ( __doc__ ) config_file = '/etc/anycast-healthchecker.conf' config_dir = '/etc/anycast-healthchecker.d' config = configparser . ConfigParser ( ) config_files = [ config_file ] config_files . extend ( glob . glob ( os . path . join ( config_dir , '*.conf' ) ) ) config . read ( config_files ) try : pid = get_processid ( config ) except ValueError as exc : print ( "UNKNOWN: {e}" . format ( e = exc ) ) sys . exit ( 3 ) else : process_up = running ( pid ) if not process_up : print ( "CRITICAL: anycast-healthchecker with pid ({p}) isn't running" . format ( p = pid ) ) sys . exit ( 3 ) services = config . sections ( ) services . remove ( 'daemon' ) if not services : print ( "UNKNOWN: No service checks are configured" ) sys . exit ( 3 ) enabled_service_checks = parse_services ( config , services ) if enabled_service_checks == 0 : print ( "OK: Number of service checks is zero, no threads are running" ) sys . exit ( 0 ) else : # parent process plus nummber of threads for each service check configured_threads = enabled_service_checks + 1 cmd = [ '/bin/ps' , 'h' , '-T' , '-p' , '{n}' . format ( n = pid ) ] try : if arguments [ '-v' ] : print ( "running {}" . format ( ' ' . join ( cmd ) ) ) out = subprocess . check_output ( cmd , timeout = 1 ) except subprocess . CalledProcessError as exc : print ( "UNKNOWN: running '{c}' failed with return code: {r}" . format ( c = ' ' . join ( cmd ) , r = exc . returncode ) ) sys . exit ( 3 ) except subprocess . TimeoutExpired : print ( "UNKNOWN: running '{}' timed out" . format ( ' ' . join ( cmd ) ) ) sys . exit ( 3 ) else : output_lines = out . splitlines ( ) if arguments [ '-v' ] : for line in output_lines : print ( line ) running_threads = len ( output_lines ) if running_threads == configured_threads : print ( "OK: UP (pid={p}) and all threads ({t}) are running" . format ( p = pid , t = configured_threads - 1 ) ) sys . exit ( 0 ) elif running_threads - 1 == 0 : # minus parent process print ( "CRITICAL: No threads are running OpDocs ANYCAST-03" ) sys . exit ( 2 ) else : print ( "CRITICAL: Found {n} running threads while configured " "number of threads is {c} OpDocs ANYCAST-03" . format ( n = running_threads - 1 , c = configured_threads - 1 ) ) sys . exit ( 2 )
Run check .
681
3
20,331
def scary_path ( path ) : if not path : return True assert isinstance ( path , bytes ) return not NOT_SCARY . match ( path )
Whitelist the WORKON_HOME strings we re willing to substitute in to strings that we provide for user s shell to evaluate .
33
26
20,332
def shell_config_for ( shell , vexrc , environ ) : here = os . path . dirname ( os . path . abspath ( __file__ ) ) path = os . path . join ( here , 'shell_configs' , shell ) try : with open ( path , 'rb' ) as inp : data = inp . read ( ) except FileNotFoundError as error : if error . errno != 2 : raise return b'' ve_base = vexrc . get_ve_base ( environ ) . encode ( 'ascii' ) if ve_base and not scary_path ( ve_base ) and os . path . exists ( ve_base ) : data = data . replace ( b'$WORKON_HOME' , ve_base ) return data
return completion config for the named shell .
171
8
20,333
def _run_check ( self ) : cmd = shlex . split ( self . config [ 'check_cmd' ] ) self . log . info ( "running %s" , ' ' . join ( cmd ) ) proc = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) start_time = time . time ( ) try : outs , errs = proc . communicate ( timeout = self . config [ 'check_timeout' ] ) except subprocess . TimeoutExpired : self . log . error ( "check timed out" ) if proc . poll ( ) is None : try : proc . kill ( ) except PermissionError : self . log . warning ( "failed to kill check due to adequate " "access rights, check could be running " "under another user(root) via sudo" ) return False else : msg = "check duration {t:.3f}ms" . format ( t = ( time . time ( ) - start_time ) * 1000 ) self . log . info ( msg ) if proc . returncode != 0 : self . log . info ( "stderr from the check %s" , errs ) self . log . info ( "stdout from the check %s" , outs ) return proc . returncode == 0
Execute a check command .
286
6
20,334
def _ip_assigned ( self ) : output = [ ] cmd = [ '/sbin/ip' , 'address' , 'show' , 'dev' , self . config [ 'interface' ] , 'to' , self . ip_with_prefixlen , ] if self . ip_check_disabled : self . log . info ( "checking for IP assignment on interface %s is " "disabled" , self . config [ 'interface' ] ) return True self . log . debug ( "running %s" , ' ' . join ( cmd ) ) try : output = subprocess . check_output ( cmd , universal_newlines = True , timeout = 1 ) except subprocess . CalledProcessError as error : self . log . error ( "error checking IP-PREFIX %s: %s" , cmd , error . output ) # Because it is unlikely to ever get an error we return True return True except subprocess . TimeoutExpired : self . log . error ( "timeout running %s" , ' ' . join ( cmd ) ) # Because it is unlikely to ever get a timeout we return True return True except ValueError as error : # We have been getting intermittent ValueErrors, see here # gist.github.com/unixsurfer/67db620d87f667423f6f6e3a04e0bff5 # It has happened ~5 times and this code is executed from multiple # threads and every ~10secs on several (~40) production servers for # more than 18months. # It could be a bug in Python or system returns corrupted data. # As a consequence of the raised exception thread dies and the # service isn't monitored anymore!. So, we now catch the exception. # While checking if an IP is assigned, we get an error unrelated to # that prevents us from knowing if it's assigned. We simply don't # know. A retry logic could be a more proper solution. self . log . error ( "running %s raised ValueError exception:%s" , ' ' . join ( cmd ) , error ) return True else : if self . ip_with_prefixlen in output : # pylint: disable=E1135,R1705 msg = "{i} assigned to loopback interface" . format ( i = self . ip_with_prefixlen ) self . log . debug ( msg ) return True else : msg = ( "{i} isn't assigned to {d} interface" . format ( i = self . ip_with_prefixlen , d = self . config [ 'interface' ] ) ) self . log . warning ( msg ) return False self . log . debug ( "I shouldn't land here!, it is a BUG" ) return False
Check if IP prefix is assigned to loopback interface .
583
11
20,335
def _check_disabled ( self ) : if self . config [ 'check_disabled' ] : if self . config [ 'on_disabled' ] == 'withdraw' : self . log . info ( "Check is disabled and ip_prefix will be " "withdrawn" ) self . log . info ( "adding %s in the queue" , self . ip_with_prefixlen ) self . action . put ( self . del_operation ) self . log . info ( "Check is now permanently disabled" ) elif self . config [ 'on_disabled' ] == 'advertise' : self . log . info ( "check is disabled, ip_prefix wont be withdrawn" ) self . log . info ( "adding %s in the queue" , self . ip_with_prefixlen ) self . action . put ( self . add_operation ) self . log . info ( 'check is now permanently disabled' ) return True return False
Check if health check is disabled .
201
7
20,336
def run ( self ) : # Catch all possible exceptions raised by the running thread # and let parent process know about it. try : self . _run ( ) except Exception : # pylint: disable=broad-except self . action . put ( ServiceCheckDiedError ( self . name , traceback . format_exc ( ) ) )
Wrap _run method .
72
6
20,337
def main ( ) : args = docopt ( __doc__ , version = __version__ ) if args [ '--print' ] : for section in DEFAULT_OPTIONS : print ( "[{}]" . format ( section ) ) for key , value in DEFAULT_OPTIONS [ section ] . items ( ) : print ( "{k} = {v}" . format ( k = key , v = value ) ) print ( ) sys . exit ( 0 ) try : config , bird_configuration = load_configuration ( args [ '--file' ] , args [ '--dir' ] , args [ '--service-file' ] ) except ValueError as exc : sys . exit ( 'Invalid configuration: ' + str ( exc ) ) if args [ '--check' ] : print ( "OK" ) sys . exit ( 0 ) if args [ '--print-conf' ] : for section in config : print ( "[{}]" . format ( section ) ) for key , value in config [ section ] . items ( ) : print ( "{k} = {v}" . format ( k = key , v = value ) ) print ( ) sys . exit ( 0 ) try : lock_socket = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM ) lock_socket . bind ( '\0' + "{}" . format ( PROGRAM_NAME ) ) except socket . error as exc : sys . exit ( "failed to acquire a lock by creating an abstract namespace" " socket: {}" . format ( exc ) ) else : print ( "acquired a lock by creating an abstract namespace socket: {}" . format ( lock_socket ) ) # Clean old pidfile, if it exists, and write PID to it. pidfile = config . get ( 'daemon' , 'pidfile' ) update_pidfile ( pidfile ) # Register our shutdown handler to various termination signals. shutdown_handler = partial ( shutdown , pidfile ) signal . signal ( signal . SIGHUP , shutdown_handler ) signal . signal ( signal . SIGTERM , shutdown_handler ) signal . signal ( signal . SIGABRT , shutdown_handler ) signal . signal ( signal . SIGINT , shutdown_handler ) # Set up loggers. logger = setup_logger ( config ) # Perform a sanity check on IP-Prefixes ip_prefixes_sanity_check ( config , bird_configuration ) # Create our master process. checker = healthchecker . HealthChecker ( config , bird_configuration ) logger . info ( "starting %s version %s" , PROGRAM_NAME , __version__ ) checker . run ( )
Parse CLI and starts main program .
572
8
20,338
def get_environ ( environ , defaults , ve_path ) : # Copy the parent environment, add in defaults from .vexrc. env = environ . copy ( ) env . update ( defaults ) # Leaving in existing PYTHONHOME can cause some errors if 'PYTHONHOME' in env : del env [ 'PYTHONHOME' ] # Now we have to adjust PATH to find scripts for the virtualenv... # PATH being unset/empty is OK, but ve_path must be set # or there is nothing for us to do here and it's bad. if not ve_path : raise exceptions . BadConfig ( 've_path must be set' ) if platform . system ( ) == 'Windows' : ve_bin = os . path . join ( ve_path , 'Scripts' ) else : ve_bin = os . path . join ( ve_path , 'bin' ) # If user is currently in a virtualenv, DON'T just prepend # to its path (vex foo; echo $PATH -> " /foo/bin:/bar/bin") # but don't incur this cost unless we're already in one. # activate handles this by running 'deactivate' first, we don't # have that so we have to use other ways. # This would not be necessary and things would be simpler if vex # did not have to interoperate with a ubiquitous existing tool. # virtualenv doesn't... current_ve = env . get ( 'VIRTUAL_ENV' , '' ) system_path = environ . get ( 'PATH' , '' ) segments = system_path . split ( os . pathsep ) if current_ve : # Since activate doesn't export _OLD_VIRTUAL_PATH, we are going to # manually remove the virtualenv's bin. # A virtualenv's bin should not normally be on PATH except # via activate or similar, so I'm OK with this solution. current_ve_bin = os . path . join ( current_ve , 'bin' ) try : segments . remove ( current_ve_bin ) except ValueError : raise exceptions . BadConfig ( "something set VIRTUAL_ENV prior to this vex execution, " "implying that a virtualenv is already activated " "and PATH should contain the virtualenv's bin directory. " "Unfortunately, it doesn't: it's {0!r}. " "You might want to check that PATH is not " "getting clobbered somewhere, e.g. in your shell's configs." . format ( system_path ) ) segments . insert ( 0 , ve_bin ) env [ 'PATH' ] = os . pathsep . join ( segments ) env [ 'VIRTUAL_ENV' ] = ve_path return env
Make an environment to run with .
596
7
20,339
def extract_key_value ( line , environ ) : segments = line . split ( "=" , 1 ) if len ( segments ) < 2 : return None key , value = segments # foo passes through as-is (with spaces stripped) # '{foo}' passes through literally # "{foo}" substitutes from environ's foo value = value . strip ( ) if value [ 0 ] == "'" and _SQUOTE_RE . match ( value ) : value = value [ 1 : - 1 ] elif value [ 0 ] == '"' and _DQUOTE_RE . match ( value ) : template = value [ 1 : - 1 ] value = template . format ( * * environ ) key = key . strip ( ) value = value . strip ( ) return key , value
Return key value from given line if present else return None .
167
12
20,340
def from_file ( cls , path , environ ) : instance = cls ( ) instance . read ( path , environ ) return instance
Make a Vexrc instance from given file in given environ .
31
14
20,341
def read ( self , path , environ ) : try : inp = open ( path , 'rb' ) except FileNotFoundError as error : if error . errno != 2 : raise return None parsing = parse_vexrc ( inp , environ ) for heading , key , value in parsing : heading = self . default_heading if heading is None else heading if heading not in self . headings : self . headings [ heading ] = OrderedDict ( ) self . headings [ heading ] [ key ] = value parsing . close ( )
Read data from file into this vexrc instance .
119
10
20,342
def get_ve_base ( self , environ ) : # set ve_base to a path we can look for virtualenvs: # 1. .vexrc # 2. WORKON_HOME (as defined for virtualenvwrapper's benefit) # 3. $HOME/.virtualenvs # (unless we got --path, then we don't need it) ve_base_value = self . headings [ self . default_heading ] . get ( 'virtualenvs' ) if ve_base_value : ve_base = os . path . expanduser ( ve_base_value ) else : ve_base = environ . get ( 'WORKON_HOME' , '' ) if not ve_base : # On Cygwin os.name == 'posix' and we want $HOME. if platform . system ( ) == 'Windows' and os . name == 'nt' : _win_drive = environ . get ( 'HOMEDRIVE' ) home = environ . get ( 'HOMEPATH' , '' ) if home : home = os . path . join ( _win_drive , home ) else : home = environ . get ( 'HOME' , '' ) if not home : home = os . path . expanduser ( '~' ) if not home : return '' ve_base = os . path . join ( home , '.virtualenvs' ) # pass through invalid paths so messages can be generated # if not os.path.exists(ve_base) or os.path.isfile(ve_base): # return '' return ve_base or ''
Find a directory to look for virtualenvs in .
344
11
20,343
def get_shell ( self , environ ) : command = self . headings [ self . default_heading ] . get ( 'shell' ) if not command and os . name != 'nt' : command = environ . get ( 'SHELL' , '' ) command = shlex . split ( command ) if command else None return command
Find a command to run .
72
6
20,344
def name ( self ) : if 'name' not in self . metainfo [ 'info' ] and self . path is not None : self . metainfo [ 'info' ] [ 'name' ] = os . path . basename ( self . path ) return self . metainfo [ 'info' ] . get ( 'name' , None )
Name of the torrent
77
4
20,345
def trackers ( self ) : announce_list = self . metainfo . get ( 'announce-list' , None ) if not announce_list : announce = self . metainfo . get ( 'announce' , None ) if announce : return [ [ announce ] ] else : return announce_list
List of tiers of announce URLs or None for no trackers
66
12
20,346
def infohash ( self ) : self . validate ( ) info = self . convert ( ) [ b'info' ] return sha1 ( bencode ( info ) ) . hexdigest ( )
SHA1 info hash
43
4
20,347
def infohash_base32 ( self ) : self . validate ( ) info = self . convert ( ) [ b'info' ] return b32encode ( sha1 ( bencode ( info ) ) . digest ( ) )
Base32 encoded SHA1 info hash
50
7
20,348
def generate ( self , callback = None , interval = 0 ) : if self . path is None : raise RuntimeError ( 'generate() called with no path specified' ) elif self . size <= 0 : raise error . PathEmptyError ( self . path ) elif not os . path . exists ( self . path ) : raise error . PathNotFoundError ( self . path ) if callback is not None : cancel = lambda * status : callback ( * status ) is not None else : cancel = lambda * status : False if os . path . isfile ( self . path ) : pieces = self . _set_pieces_singlefile ( ) elif os . path . isdir ( self . path ) : pieces = self . _set_pieces_multifile ( ) # Iterate over hashed pieces and send status information last_cb_call = 0 for filepath , pieces_done , pieces_total in pieces : now = time . time ( ) if now - last_cb_call >= interval or pieces_done >= pieces_total : last_cb_call = now if cancel ( self , filepath , pieces_done , pieces_total ) : return False return True
Hash pieces and report progress to callback
250
7
20,349
def magnet ( self , name = True , size = True , trackers = True , tracker = False , validate = True ) : if validate : self . validate ( ) parts = [ f'xt=urn:btih:{self.infohash}' ] if name : parts . append ( f'dn={utils.urlquote(self.name)}' ) if size : parts . append ( f'xl={self.size}' ) if self . trackers is not None : if tracker : parts . append ( f'tr={utils.urlquote(self.trackers[0][0])}' ) elif trackers : for tier in self . trackers : for url in tier : parts . append ( f'tr={utils.urlquote(url)}' ) return 'magnet:?' + '&' . join ( parts )
BTIH Magnet URI
182
5
20,350
def read_stream ( cls , stream , validate = True ) : try : content = stream . read ( cls . MAX_TORRENT_FILE_SIZE ) except OSError as e : raise error . ReadError ( e . errno ) else : try : metainfo_enc = bdecode ( content ) except BTFailure as e : raise error . ParseError ( ) if validate : if b'info' not in metainfo_enc : raise error . MetainfoError ( "Missing 'info'" ) elif not isinstance ( metainfo_enc [ b'info' ] , abc . Mapping ) : raise error . MetainfoError ( "'info' is not a dictionary" ) elif b'pieces' not in metainfo_enc [ b'info' ] : raise error . MetainfoError ( "Missing 'pieces' in ['info']" ) # Extract 'pieces' from metainfo because it's the only byte string # that isn't supposed to be decoded to unicode. if b'info' in metainfo_enc and b'pieces' in metainfo_enc [ b'info' ] : pieces = metainfo_enc [ b'info' ] . pop ( b'pieces' ) metainfo = utils . decode_dict ( metainfo_enc ) metainfo [ 'info' ] [ 'pieces' ] = pieces else : metainfo = utils . decode_dict ( metainfo_enc ) torrent = cls ( ) torrent . _metainfo = metainfo # Convert some values from official types to something nicer # (e.g. int -> datetime) for attr in ( 'creation_date' , 'private' ) : setattr ( torrent , attr , getattr ( torrent , attr ) ) # Auto-set 'include_md5' info = torrent . metainfo [ 'info' ] torrent . include_md5 = ( 'length' in info and 'md5sum' in info ) or ( 'files' in info and all ( 'md5sum' in fileinfo for fileinfo in info [ 'files' ] ) ) if validate : torrent . validate ( ) return torrent
Read torrent metainfo from file - like object
482
10
20,351
def read ( cls , filepath , validate = True ) : try : with open ( filepath , 'rb' ) as fh : return cls . read_stream ( fh ) except ( OSError , error . ReadError ) as e : raise error . ReadError ( e . errno , filepath ) except error . ParseError : raise error . ParseError ( filepath )
Read torrent metainfo from file
87
7
20,352
def copy ( self ) : from copy import deepcopy cp = type ( self ) ( ) cp . _metainfo = deepcopy ( self . _metainfo ) return cp
Return a new object with the same metainfo
38
10
20,353
def validated_url ( url ) : try : u = urlparse ( url ) u . port # Trigger 'invalid port' exception except Exception : raise error . URLError ( url ) else : if not u . scheme or not u . netloc : raise error . URLError ( url ) return url
Return url if valid raise URLError otherwise
67
10
20,354
def read_chunks ( filepath , chunk_size ) : try : with open ( filepath , 'rb' ) as f : while True : chunk = f . read ( chunk_size ) if chunk : yield chunk else : break # EOF except OSError as e : raise error . ReadError ( e . errno , filepath )
Generator that yields chunks from file
75
7
20,355
def calc_piece_size ( total_size , max_pieces , min_piece_size , max_piece_size ) : ps = 1 << max ( 0 , math . ceil ( math . log ( total_size / max_pieces , 2 ) ) ) if ps < min_piece_size : ps = min_piece_size if ps > max_piece_size : ps = max_piece_size return ps
Calculate piece size
91
5
20,356
def is_power_of_2 ( num ) : log = math . log2 ( num ) return int ( log ) == float ( log )
Return whether num is a power of two
31
8
20,357
def is_hidden ( path ) : for name in path . split ( os . sep ) : if name != '.' and name != '..' and name and name [ 0 ] == '.' : return True return False
Whether file or directory is hidden
46
6
20,358
def filepaths ( path , exclude = ( ) , hidden = True , empty = True ) : if not os . path . exists ( path ) : raise error . PathNotFoundError ( path ) elif not os . access ( path , os . R_OK , effective_ids = os . access in os . supports_effective_ids ) : raise error . ReadError ( errno . EACCES , path ) if os . path . isfile ( path ) : return [ path ] else : filepaths = [ ] for dirpath , dirnames , filenames in os . walk ( path ) : # Ignore hidden directory if not hidden and is_hidden ( dirpath ) : continue for filename in filenames : # Ignore hidden file if not hidden and is_hidden ( filename ) : continue filepath = os . path . join ( dirpath , filename ) # Ignore excluded file if any ( is_match ( filepath , pattern ) for pattern in exclude ) : continue else : # Ignore empty file if empty or os . path . getsize ( os . path . realpath ( filepath ) ) > 0 : filepaths . append ( filepath ) return sorted ( filepaths , key = lambda fp : fp . casefold ( ) )
Return list of absolute sorted file paths
267
7
20,359
def assert_type ( lst_or_dct , keys , exp_types , must_exist = True , check = None ) : keys = list ( keys ) keychain = [ ] while len ( keys [ : - 1 ] ) > 0 : key = keys . pop ( 0 ) try : lst_or_dct = lst_or_dct [ key ] except ( KeyError , IndexError ) : break keychain . append ( key ) keychain_str = '' . join ( f'[{key!r}]' for key in keychain ) key = keys . pop ( 0 ) if not key_exists_in_list_or_dict ( key , lst_or_dct ) : if not must_exist : return raise error . MetainfoError ( f"Missing {key!r} in {keychain_str}" ) elif not isinstance ( lst_or_dct [ key ] , exp_types ) : exp_types_str = ' or ' . join ( t . __name__ for t in exp_types ) type_str = type ( lst_or_dct [ key ] ) . __name__ raise error . MetainfoError ( f"{keychain_str}[{key!r}] must be {exp_types_str}, " f"not {type_str}: {lst_or_dct[key]!r}" ) elif check is not None and not check ( lst_or_dct [ key ] ) : raise error . MetainfoError ( f"{keychain_str}[{key!r}] is invalid: {lst_or_dct[key]!r}" )
Raise MetainfoError is not of a particular type
374
12
20,360
def error_message_and_exit ( message , error_result ) : if message : error_message ( message ) puts ( json . dumps ( error_result , indent = 2 ) ) sys . exit ( 1 )
Prints error messages in blue the failed task result and quits .
46
14
20,361
def print_prompt_values ( values , message = None , sub_attr = None ) : if message : prompt_message ( message ) for index , entry in enumerate ( values ) : if sub_attr : line = '{:2d}: {}' . format ( index , getattr ( utf8 ( entry ) , sub_attr ) ) else : line = '{:2d}: {}' . format ( index , utf8 ( entry ) ) with indent ( 3 ) : print_message ( line )
Prints prompt title and choices with a bit of formatting .
112
12
20,362
def prompt_for_input ( message , input_type = None ) : while True : output = prompt . query ( message ) if input_type : try : output = input_type ( output ) except ValueError : error_message ( 'Invalid input type' ) continue break return output
Prints prompt instruction and does basic input parsing .
60
10
20,363
def prompt_for_choice ( values , message , input_type = int , output_type = None ) : output = None while not output : index = prompt_for_input ( message , input_type = input_type ) try : output = utf8 ( values [ index ] ) except IndexError : error_message ( 'Selection out of range' ) continue if output_type : output = output_type ( output ) return output
Prints prompt with a list of choices to choose from .
94
12
20,364
def _retrieve_result ( endpoints , token_header ) : request_list = [ ( url , token_header ) for ( task_id , url ) in endpoints ] responses = concurrent_get ( request_list ) # Quick sanity check assert len ( endpoints ) == len ( responses ) responses_dic = { task_id : r . content for ( task_id , _ ) , r in zip ( endpoints , responses ) } return responses_dic
Prepare the request list and execute them concurrently .
101
10
20,365
def _build_endpoint ( self , endpoint_name ) : endpoint_relative = settings . get ( 'asmaster_endpoints' , endpoint_name ) return '%s%s' % ( self . host , endpoint_relative )
Generate an enpoint url from a setting name .
51
11
20,366
def _set_allowed_services_and_actions ( self , services ) : for service in services : self . services [ service [ 'name' ] ] = { } for action in service [ 'actions' ] : name = action . pop ( 'name' ) self . services [ service [ 'name' ] ] [ name ] = action
Expect services to be a list of service dictionaries each with name and actions keys .
72
18
20,367
def list_subscriptions ( self , service ) : data = { 'service' : service , } return self . _perform_post_request ( self . list_subscriptions_endpoint , data , self . token_header )
Asks for a list of all subscribed accounts and devices along with their statuses .
52
17
20,368
def subscribe_account ( self , username , password , service ) : data = { 'service' : service , 'username' : username , 'password' : password , } return self . _perform_post_request ( self . subscribe_account_endpoint , data , self . token_header )
Subscribe an account for a service .
64
7
20,369
def file_id_to_file_name ( file_id ) : if len ( file_id ) == 40 and re . match ( "^[a-f0-9]+$" , file_id ) : return file_id # prefix with "re_" to avoid name collision with real fileids return "re_{}" . format ( hashlib . sha1 ( file_id ) . hexdigest ( ) )
Sometimes file ids are not the file names on the device but are instead generated by the API . These are not guaranteed to be valid file names so need hashing .
93
33
20,370
def sync ( func ) : sync_timeout = 3600 # Match standard synchronous timeout. def wraps ( * args , * * kwargs ) : task = func ( * args , * * kwargs ) task . wait_for_result ( timeout = sync_timeout ) result = json . loads ( task . result ) return result return wraps
Decorator to make a task synchronous .
73
10
20,371
def fetch_data ( self ) : choices = self . available_data choices . insert ( 0 , 'All' ) selected_data_type = utils . select_item ( choices , 'Please select what data to fetch:' , 'Available data:' , ) if selected_data_type == 'All' : selected_data_type = ',' . join ( self . available_data ) utils . pending_message ( 'Performing fetch data task...' ) fetch_data_task = self . client . data ( account = self . account , data = selected_data_type , ) # Wait here for result as rest of sample app relies on it. fetch_data_task . wait_for_result ( timeout = self . timeout ) fetch_data_result = json . loads ( fetch_data_task . result ) # Write the result to file. task_id = fetch_data_task . uuid filepath = utils . get_or_create_filepath ( '%s.json' % task_id ) with open ( filepath , 'w' ) as out : json . dump ( fetch_data_result , out , indent = 2 ) utils . info_message ( 'Fetch data successful. Output file: %s.json' % task_id ) return fetch_data_result
Prompt for a data type choice and execute the fetch_data task . The results are saved to a file in json format .
282
26
20,372
def log_in ( self ) : if not self . password : # Password wasn't give, ask for it now self . password = getpass . getpass ( 'Password: ' ) utils . pending_message ( 'Performing login...' ) login_result = self . client . login ( account = self . account , password = self . password ) if 'error' in login_result : self . handle_failed_login ( login_result ) utils . info_message ( 'Login successful' )
Perform the log_in task to setup the API session for future data requests .
108
17
20,373
def get_devices ( self ) : utils . pending_message ( 'Fetching device list...' ) get_devices_task = self . client . devices ( account = self . account ) # We wait for device list info as this sample relies on it next. get_devices_task . wait_for_result ( timeout = self . timeout ) get_devices_result = json . loads ( get_devices_task . result ) self . devices = get_devices_result [ 'devices' ] utils . info_message ( 'Get devices successful' )
Execute the get_devices task and store the results in self . devices .
120
16
20,374
def download_files ( self , files ) : utils . pending_message ( "Downloading {nfiles} file{plural}..." . format ( nfiles = len ( files ) , plural = 's' if len ( files ) > 1 else '' ) ) for file in files : if 'file_id' not in file : continue def build_callback ( file ) : """Callback to save a download file result to a file on disk.""" def file_callback ( task ) : device_name = self . devices [ self . device_id ] [ 'device_name' ] path_chunks = file [ 'file_path' ] . split ( '/' ) directory = os . path . join ( 'files' , device_name , * path_chunks [ : - 1 ] ) filepath = utils . get_or_create_filepath ( file [ 'filename' ] , directory ) with open ( filepath , 'wb' ) as out : out . write ( task . result ) if settings . getboolean ( 'logging' , 'time_profile' ) : filepath = utils . append_profile_info ( filepath , task . timer ) with indent ( 4 ) : utils . print_message ( filepath ) return file_callback self . client . download_file ( account = self . account , device = self . device_id , file = file [ 'file_id' ] , callback = build_callback ( file ) )
This method uses the download_file task to retrieve binary files such as attachments images and videos .
316
19
20,375
def register_account ( self , username , service ) : data = { 'service' : service , 'username' : username , } return self . _perform_post_request ( self . register_account_endpoint , data , self . token_header )
Register an account against a service . The account that we re querying must be referenced during any future task requests - so we know which account to link the task too .
56
34
20,376
def perform_task ( self , service , task_name , account , payload , callback = None ) : data = { 'service' : service , 'action' : task_name , 'account' : account , } data . update ( payload ) response = self . _perform_post_request ( self . submit_endpoint , data , self . token_header ) task = Task ( uuid = response [ 'task_id' ] , callback = callback ) self . _pending_tasks [ task . uuid ] = task return task
Submit a task to the API . The task is executed asyncronously and a Task object is returned .
117
21
20,377
def task_status ( self , task_id ) : data = { 'task_ids' : task_id , } return self . _perform_post_request ( self . task_status_endpoint , data , self . token_header )
Find the status of a task .
54
7
20,378
def result_consumed ( self , task_id ) : logger . debug ( 'Sending result consumed message.' ) data = { 'task_ids' : task_id , } return self . _perform_post_request ( self . results_consumed_endpoint , data , self . token_header )
Report the result as successfully consumed .
68
7
20,379
def send_mails ( cls ) : if settings . CAS_NEW_VERSION_EMAIL_WARNING and settings . ADMINS : try : obj = cls . objects . get ( ) except cls . DoesNotExist : obj = NewVersionWarning . objects . create ( version = VERSION ) LAST_VERSION = utils . last_version ( ) if LAST_VERSION is not None and LAST_VERSION != obj . version : if utils . decode_version ( VERSION ) < utils . decode_version ( LAST_VERSION ) : try : send_mail ( ( '%sA new version of django-cas-server is available' ) % settings . EMAIL_SUBJECT_PREFIX , u''' A new version of the django-cas-server is available. Your version: %s New version: %s Upgrade using: * pip install -U django-cas-server * fetching the last release on https://github.com/nitmir/django-cas-server/ or on https://pypi.org/project/django-cas-server/ After upgrade, do not forget to run: * ./manage.py migrate * ./manage.py collectstatic and to reload your wsgi server (apache2, uwsgi, gunicord, etc…) --\u0020 django-cas-server ''' . strip ( ) % ( VERSION , LAST_VERSION ) , settings . SERVER_EMAIL , [ "%s <%s>" % admin for admin in settings . ADMINS ] , fail_silently = False , ) obj . version = LAST_VERSION obj . save ( ) except smtplib . SMTPException as error : # pragma: no cover (should not happen) logger . error ( "Unable to send new version mail: %s" % error )
For each new django - cas - server version if the current instance is not up to date send one mail to settings . ADMINS .
401
29
20,380
def get_proxy_url ( self , pgt ) : params = urllib_parse . urlencode ( { 'pgt' : pgt , 'targetService' : self . service_url } ) return "%s/proxy?%s" % ( self . server_url , params )
Returns proxy url given the proxy granting ticket
65
8
20,381
def get_conn ( cls ) : conn = cls . _conn if conn is None or conn . closed : conn = ldap3 . Connection ( settings . CAS_LDAP_SERVER , settings . CAS_LDAP_USER , settings . CAS_LDAP_PASSWORD , client_strategy = "RESTARTABLE" , auto_bind = True ) cls . _conn = conn return conn
Return a connection object to the ldap database
91
10
20,382
def json_encode ( obj ) : try : return json_encode . encoder . encode ( obj ) except AttributeError : json_encode . encoder = DjangoJSONEncoder ( default = six . text_type ) return json_encode ( obj )
Encode a python object to json
58
7
20,383
def context ( params ) : params [ "settings" ] = settings params [ "message_levels" ] = DEFAULT_MESSAGE_LEVELS if settings . CAS_NEW_VERSION_HTML_WARNING : LAST_VERSION = last_version ( ) params [ "VERSION" ] = VERSION params [ "LAST_VERSION" ] = LAST_VERSION if LAST_VERSION is not None : params [ "upgrade_available" ] = decode_version ( VERSION ) < decode_version ( LAST_VERSION ) else : params [ "upgrade_available" ] = False if settings . CAS_INFO_MESSAGES_ORDER : params [ "CAS_INFO_RENDER" ] = [ ] for msg_name in settings . CAS_INFO_MESSAGES_ORDER : if msg_name in settings . CAS_INFO_MESSAGES : if not isinstance ( settings . CAS_INFO_MESSAGES [ msg_name ] , dict ) : continue msg = settings . CAS_INFO_MESSAGES [ msg_name ] . copy ( ) if "message" in msg : msg [ "name" ] = msg_name # use info as default infox type msg [ "type" ] = msg . get ( "type" , "info" ) # make box discardable by default msg [ "discardable" ] = msg . get ( "discardable" , True ) msg_hash = ( six . text_type ( msg [ "message" ] ) . encode ( "utf-8" ) + msg [ "type" ] . encode ( "utf-8" ) ) # hash depend of the rendering language msg [ "hash" ] = hashlib . md5 ( msg_hash ) . hexdigest ( ) params [ "CAS_INFO_RENDER" ] . append ( msg ) return params
Function that add somes variable to the context before template rendering
403
12
20,384
def json_response ( request , data ) : data [ "messages" ] = [ ] for msg in messages . get_messages ( request ) : data [ "messages" ] . append ( { 'message' : msg . message , 'level' : msg . level_tag } ) return HttpResponse ( json . dumps ( data ) , content_type = "application/json" )
Wrapper dumping data to a json and sending it to the user with an HttpResponse
85
18
20,385
def import_attr ( path ) : # if we got a str, decode it to unicode (normally it should only contain ascii) if isinstance ( path , six . binary_type ) : path = path . decode ( "utf-8" ) # if path is not an unicode, return it unchanged (may be it is already the attribute to import) if not isinstance ( path , six . text_type ) : return path if u"." not in path : ValueError ( "%r should be of the form `module.attr` and we just got `attr`" % path ) module , attr = path . rsplit ( u'.' , 1 ) try : return getattr ( import_module ( module ) , attr ) except ImportError : raise ImportError ( "Module %r not found" % module ) except AttributeError : raise AttributeError ( "Module %r has not attribut %r" % ( module , attr ) )
transform a python dotted path to the attr
207
9
20,386
def redirect_params ( url_name , params = None ) : url = reverse ( url_name ) params = urlencode ( params if params else { } ) return HttpResponseRedirect ( url + "?%s" % params )
Redirect to url_name with params as querystring
52
11
20,387
def reverse_params ( url_name , params = None , * * kwargs ) : url = reverse ( url_name , * * kwargs ) params = urlencode ( params if params else { } ) if params : return u"%s?%s" % ( url , params ) else : return url
compute the reverse url of url_name and add to it parameters from params as querystring
69
19
20,388
def set_cookie ( response , key , value , max_age ) : expires = datetime . strftime ( datetime . utcnow ( ) + timedelta ( seconds = max_age ) , "%a, %d-%b-%Y %H:%M:%S GMT" ) response . set_cookie ( key , value , max_age = max_age , expires = expires , domain = settings . SESSION_COOKIE_DOMAIN , secure = settings . SESSION_COOKIE_SECURE or None )
Set the cookie key on response with value value valid for max_age secondes
117
16
20,389
def get_current_url ( request , ignore_params = None ) : if ignore_params is None : ignore_params = set ( ) protocol = u'https' if request . is_secure ( ) else u"http" service_url = u"%s://%s%s" % ( protocol , request . get_host ( ) , request . path ) if request . GET : params = copy_params ( request . GET , ignore_params ) if params : service_url += u"?%s" % urlencode ( params ) return service_url
Giving a django request return the current http url possibly ignoring some GET parameters
121
15
20,390
def update_url ( url , params ) : if not isinstance ( url , bytes ) : url = url . encode ( 'utf-8' ) for key , value in list ( params . items ( ) ) : if not isinstance ( key , bytes ) : del params [ key ] key = key . encode ( 'utf-8' ) if not isinstance ( value , bytes ) : value = value . encode ( 'utf-8' ) params [ key ] = value url_parts = list ( urlparse ( url ) ) query = dict ( parse_qsl ( url_parts [ 4 ] ) ) query . update ( params ) # make the params order deterministic query = list ( query . items ( ) ) query . sort ( ) url_query = urlencode ( query ) if not isinstance ( url_query , bytes ) : # pragma: no cover in python3 urlencode return an unicode url_query = url_query . encode ( "utf-8" ) url_parts [ 4 ] = url_query return urlunparse ( url_parts ) . decode ( 'utf-8' )
update parameters using params in the url query string
240
9
20,391
def unpack_nested_exception ( error ) : i = 0 while True : if error . args [ i : ] : if isinstance ( error . args [ i ] , Exception ) : error = error . args [ i ] i = 0 else : i += 1 else : break return error
If exception are stacked return the first one
63
8
20,392
def _gen_ticket ( prefix = None , lg = settings . CAS_TICKET_LEN ) : random_part = u'' . join ( random . choice ( string . ascii_letters + string . digits ) for _ in range ( lg - len ( prefix or "" ) - 1 ) ) if prefix is not None : return u'%s-%s' % ( prefix , random_part ) else : return random_part
Generate a ticket with prefix prefix and length lg
97
11
20,393
def crypt_salt_is_valid ( salt ) : if len ( salt ) < 2 : return False else : if salt [ 0 ] == '$' : if salt [ 1 ] == '$' : return False else : if '$' not in salt [ 1 : ] : return False else : hashed = crypt . crypt ( "" , salt ) if not hashed or '$' not in hashed [ 1 : ] : return False else : return True else : return True
Validate a salt as crypt salt
103
7
20,394
def check_password ( method , password , hashed_password , charset ) : if not isinstance ( password , six . binary_type ) : password = password . encode ( charset ) if not isinstance ( hashed_password , six . binary_type ) : hashed_password = hashed_password . encode ( charset ) if method == "plain" : return password == hashed_password elif method == "crypt" : if hashed_password . startswith ( b'$' ) : salt = b'$' . join ( hashed_password . split ( b'$' , 3 ) [ : - 1 ] ) elif hashed_password . startswith ( b'_' ) : # pragma: no cover old BSD format not supported salt = hashed_password [ : 9 ] else : salt = hashed_password [ : 2 ] if six . PY3 : password = password . decode ( charset ) salt = salt . decode ( charset ) hashed_password = hashed_password . decode ( charset ) if not crypt_salt_is_valid ( salt ) : raise ValueError ( "System crypt implementation do not support the salt %r" % salt ) crypted_password = crypt . crypt ( password , salt ) return crypted_password == hashed_password elif method == "ldap" : scheme = LdapHashUserPassword . get_scheme ( hashed_password ) salt = LdapHashUserPassword . get_salt ( hashed_password ) return LdapHashUserPassword . hash ( scheme , password , salt , charset = charset ) == hashed_password elif ( method . startswith ( "hex_" ) and method [ 4 : ] in { "md5" , "sha1" , "sha224" , "sha256" , "sha384" , "sha512" } ) : return getattr ( hashlib , method [ 4 : ] ) ( password ) . hexdigest ( ) . encode ( "ascii" ) == hashed_password . lower ( ) else : raise ValueError ( "Unknown password method check %r" % method )
Check that password match hashed_password using method assuming the encoding is charset .
472
17
20,395
def last_version ( ) : try : last_update , version , success = last_version . _cache except AttributeError : last_update = 0 version = None success = False cache_delta = 24 * 3600 if success else 600 if ( time . time ( ) - last_update ) < cache_delta : return version else : try : req = requests . get ( settings . CAS_NEW_VERSION_JSON_URL ) data = json . loads ( req . text ) version = data [ "info" ] [ "version" ] last_version . _cache = ( time . time ( ) , version , True ) return version except ( KeyError , ValueError , requests . exceptions . RequestException ) as error : # pragma: no cover (should not happen unless pypi is not available) logger . error ( "Unable to fetch %s: %s" % ( settings . CAS_NEW_VERSION_JSON_URL , error ) ) last_version . _cache = ( time . time ( ) , version , False )
Fetch the last version from pypi and return it . On successful fetch from pypi the response is cached 24h on error it is cached 10 min .
223
34
20,396
def regexpr_validator ( value ) : try : re . compile ( value ) except re . error : raise ValidationError ( _ ( '"%(value)s" is not a valid regular expression' ) , params = { 'value' : value } )
Test that value is a valid regular expression
57
8
20,397
def hash ( cls , scheme , password , salt = None , charset = "utf8" ) : scheme = scheme . upper ( ) cls . _test_scheme ( scheme ) if salt is None or salt == b"" : salt = b"" cls . _test_scheme_nosalt ( scheme ) else : cls . _test_scheme_salt ( scheme ) try : return scheme + base64 . b64encode ( cls . _schemes_to_hash [ scheme ] ( password + salt ) . digest ( ) + salt ) except KeyError : if six . PY3 : password = password . decode ( charset ) salt = salt . decode ( charset ) if not crypt_salt_is_valid ( salt ) : raise cls . BadSalt ( "System crypt implementation do not support the salt %r" % salt ) hashed_password = crypt . crypt ( password , salt ) if six . PY3 : hashed_password = hashed_password . encode ( charset ) return scheme + hashed_password
Hash password with scheme using salt . This three variable beeing encoded in charset .
231
17
20,398
def get_salt ( cls , hashed_passord ) : scheme = cls . get_scheme ( hashed_passord ) cls . _test_scheme ( scheme ) if scheme in cls . schemes_nosalt : return b"" elif scheme == b'{CRYPT}' : return b'$' . join ( hashed_passord . split ( b'$' , 3 ) [ : - 1 ] ) [ len ( scheme ) : ] else : try : hashed_passord = base64 . b64decode ( hashed_passord [ len ( scheme ) : ] ) except ( TypeError , binascii . Error ) as error : raise cls . BadHash ( "Bad base64: %s" % error ) if len ( hashed_passord ) < cls . _schemes_to_len [ scheme ] : raise cls . BadHash ( "Hash too short for the scheme %s" % scheme ) return hashed_passord [ cls . _schemes_to_len [ scheme ] : ]
Return the salt of hashed_passord possibly empty
238
11
20,399
def visit_snippet_latex ( self , node ) : code = node . rawsource . rstrip ( '\n' ) lang = self . hlsettingstack [ - 1 ] [ 0 ] linenos = code . count ( '\n' ) >= self . hlsettingstack [ - 1 ] [ 1 ] - 1 fname = node [ 'filename' ] highlight_args = node . get ( 'highlight_args' , { } ) if 'language' in node : # code-block directives lang = node [ 'language' ] highlight_args [ 'force' ] = True if 'linenos' in node : linenos = node [ 'linenos' ] def warner ( msg ) : self . builder . warn ( msg , ( self . curfilestack [ - 1 ] , node . line ) ) hlcode = self . highlighter . highlight_block ( code , lang , warn = warner , linenos = linenos , * * highlight_args ) self . body . append ( '\n{\\colorbox[rgb]{0.9,0.9,0.9}' '{\\makebox[\\textwidth][l]' '{\\small\\texttt{%s}}}}\n' % ( # Some filenames have '_', which is special in latex. fname . replace ( '_' , r'\_' ) , ) ) if self . table : hlcode = hlcode . replace ( '\\begin{Verbatim}' , '\\begin{OriginalVerbatim}' ) self . table . has_problematic = True self . table . has_verbatim = True hlcode = hlcode . rstrip ( ) [ : - 14 ] # strip \end{Verbatim} hlcode = hlcode . rstrip ( ) + '\n' self . body . append ( '\n' + hlcode + '\\end{%sVerbatim}\n' % ( self . table and 'Original' or '' ) ) # Prevent rawsource from appearing in output a second time. raise nodes . SkipNode
Latex document generator visit handler
465
6