idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
243,100
def info ( gandi , resource , id , altnames , csr , cert , all_status ) : output_keys = [ 'cn' , 'date_created' , 'date_end' , 'plan' , 'status' ] if id : output_keys . append ( 'id' ) if altnames : output_keys . append ( 'altnames' ) if csr : output_keys . append ( 'csr' ) if cert : output_keys . append ( 'cert' ) ids = [ ] for res in resource : ids . extend ( gandi . certificate . usable_ids ( res ) ) result = [ ] for num , id_ in enumerate ( set ( ids ) ) : cert = gandi . certificate . info ( id_ ) if not all_status and cert [ 'status' ] not in [ 'valid' , 'pending' ] : continue if num : gandi . separator_line ( ) cert [ 'plan' ] = package_desc ( gandi , cert [ 'package' ] ) output_cert ( gandi , cert , output_keys ) result . append ( cert ) return result
Display information about a certificate .
247
6
243,101
def update ( gandi , resource , csr , private_key , country , state , city , organisation , branch , altnames , dcv_method ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not update, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return id_ = ids [ 0 ] result = gandi . certificate . update ( id_ , csr , private_key , country , state , city , organisation , branch , altnames , dcv_method ) gandi . echo ( 'The certificate update operation is %s' % result [ 'id' ] ) gandi . echo ( 'You can follow it with:' ) gandi . echo ( '$ gandi certificate follow %s' % result [ 'id' ] ) gandi . echo ( 'When the operation is DONE, you can retrieve the .crt' ' with:' ) gandi . echo ( '$ gandi certificate export "%s"' % resource ) return result
Update a certificate CSR .
260
6
243,102
def follow ( gandi , resource ) : oper = gandi . oper . info ( int ( resource ) ) assert ( oper [ 'type' ] . startswith ( 'certificate_' ) ) output_cert_oper ( gandi , oper ) return oper
Get the operation status
56
4
243,103
def change_dcv ( gandi , resource , dcv_method ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not update, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return id_ = ids [ 0 ] opers = gandi . oper . list ( { 'cert_id' : id_ } ) if not opers : gandi . echo ( 'Can not find any operation for this certificate.' ) return oper = opers [ 0 ] if ( oper [ 'step' ] != 'RUN' and oper [ 'params' ] [ 'inner_step' ] != 'comodo_oper_updated' ) : gandi . echo ( 'This certificate operation is not in the good step to ' 'update the DCV method.' ) return gandi . certificate . change_dcv ( oper [ 'id' ] , dcv_method ) cert = gandi . certificate . info ( id_ ) csr = oper [ 'params' ] [ 'csr' ] package = cert [ 'package' ] altnames = oper [ 'params' ] . get ( 'altnames' ) gandi . certificate . advice_dcv_method ( csr , package , altnames , dcv_method , cert_id = id_ )
Change the DCV for a running certificate operation .
325
10
243,104
def resend_dcv ( gandi , resource ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not update, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return id_ = ids [ 0 ] opers = gandi . oper . list ( { 'cert_id' : id_ } ) if not opers : gandi . echo ( 'Can not find any operation for this certificate.' ) return oper = opers [ 0 ] if ( oper [ 'step' ] != 'RUN' and oper [ 'params' ] [ 'inner_step' ] != 'comodo_oper_updated' ) : gandi . echo ( 'This certificate operation is not in the good step to ' 'resend the DCV.' ) return if oper [ 'params' ] [ 'dcv_method' ] != 'email' : gandi . echo ( 'This certificate operation is not in email DCV.' ) return gandi . certificate . resend_dcv ( oper [ 'id' ] )
Resend the DCV mail .
271
7
243,105
def delete ( gandi , resource , background , force ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not delete, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return if not force : proceed = click . confirm ( "Are you sure to delete the certificate %s?" % resource ) if not proceed : return result = gandi . certificate . delete ( ids [ 0 ] , background ) return result
Revoke the certificate .
141
5
243,106
def list ( gandi , id , vhosts , dates , fqdns , limit ) : justify = 10 options = { 'items_per_page' : limit , 'state' : 'created' } output_keys = [ ] if id : output_keys . append ( 'id' ) output_keys . append ( 'subject' ) if dates : output_keys . extend ( [ 'date_created' , 'date_expire' ] ) justify = 12 if fqdns : output_keys . append ( 'fqdns' ) if vhosts : output_keys . append ( 'vhosts' ) result = gandi . hostedcert . list ( options ) for num , hcert in enumerate ( result ) : if num : gandi . separator_line ( ) if fqdns or vhosts : hcert = gandi . hostedcert . info ( hcert [ 'id' ] ) output_hostedcert ( gandi , hcert , output_keys , justify ) return result
List hosted certificates .
223
4
243,107
def info ( gandi , resource ) : output_keys = [ 'id' , 'subject' , 'date_created' , 'date_expire' , 'fqdns' , 'vhosts' ] result = gandi . hostedcert . infos ( resource ) for num , hcert in enumerate ( result ) : if num : gandi . separator_line ( ) output_hostedcert ( gandi , hcert , output_keys ) return result
Display information about a hosted certificate .
103
7
243,108
def create ( gandi , private_key , certificate , certificate_id ) : if not certificate and not certificate_id : gandi . echo ( 'One of --certificate or --certificate-id is needed.' ) return if certificate and certificate_id : gandi . echo ( 'Only one of --certificate or --certificate-id is needed.' ) if os . path . isfile ( private_key ) : with open ( private_key ) as fhandle : private_key = fhandle . read ( ) if certificate : if os . path . isfile ( certificate ) : with open ( certificate ) as fhandle : certificate = fhandle . read ( ) else : cert = gandi . certificate . info ( certificate_id ) certificate = gandi . certificate . pretty_format_cert ( cert ) result = gandi . hostedcert . create ( private_key , certificate ) output_keys = [ 'id' , 'subject' , 'date_created' , 'date_expire' , 'fqdns' , 'vhosts' ] output_hostedcert ( gandi , result , output_keys ) return result
Create a new hosted certificate .
244
6
243,109
def delete ( gandi , resource , force ) : infos = gandi . hostedcert . infos ( resource ) if not infos : return if not force : proceed = click . confirm ( 'Are you sure to delete the following hosted ' 'certificates ?\n' + '\n' . join ( [ '%s: %s' % ( res [ 'id' ] , res [ 'subject' ] ) for res in infos ] ) + '\n' ) if not proceed : return for res in infos : gandi . hostedcert . delete ( res [ 'id' ] )
Delete a hosted certificate .
130
5
243,110
def flatten ( l , types = ( list , float ) ) : l = [ item if isinstance ( item , types ) else [ item ] for item in l ] return [ item for sublist in l for item in sublist ]
Flat nested list of lists into a single list .
50
11
243,111
def get_width ( ) : # Get terminal size ws = struct . pack ( "HHHH" , 0 , 0 , 0 , 0 ) ws = fcntl . ioctl ( sys . stdout . fileno ( ) , termios . TIOCGWINSZ , ws ) lines , columns , x , y = struct . unpack ( "HHHH" , ws ) width = min ( columns * 39 // 40 , columns - 2 ) return width
Get terminal width
99
3
243,112
def groff2man ( data ) : width = get_width ( ) cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % ( width , width ) handle = subprocess . Popen ( cmd , shell = True , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) man_text , stderr = handle . communicate ( data ) return man_text
Read groff - formatted text and output man pages .
113
11
243,113
def extract_name ( self , data ) : name = re . search ( '<h1[^>]*>(.+?)</h1>' , data ) . group ( 1 ) name = re . sub ( r'<([^>]+)>' , r'' , name ) name = re . sub ( r'&gt;' , r'>' , name ) name = re . sub ( r'&lt;' , r'<' , name ) return name
Extract man page name from web page .
103
9
243,114
def cache_all ( self ) : respond = input ( 'By default, cppman fetches pages on-the-fly if corresponding ' 'page is not found in the cache. The "cache-all" option is only ' 'useful if you want to view man pages offline. ' 'Caching all contents will take several minutes, ' 'do you want to continue [y/N]? ' ) if not ( respond and 'yes' . startswith ( respond . lower ( ) ) ) : raise KeyboardInterrupt try : os . makedirs ( environ . cache_dir ) except : pass self . success_count = 0 self . failure_count = 0 if not os . path . exists ( environ . index_db ) : raise RuntimeError ( "can't find index.db" ) conn = sqlite3 . connect ( environ . index_db ) cursor = conn . cursor ( ) source = environ . config . source print ( 'Caching manpages from %s ...' % source ) data = cursor . execute ( 'SELECT * FROM "%s"' % source ) . fetchall ( ) for name , url , _ in data : print ( 'Caching %s ...' % name ) retries = 3 while retries > 0 : try : self . cache_man_page ( source , url , name ) except Exception : print ( 'Retrying ...' ) retries -= 1 else : self . success_count += 1 break else : print ( 'Error caching %s ...' % name ) self . failure_count += 1 conn . close ( ) print ( '\n%d manual pages cached successfully.' % self . success_count ) print ( '%d manual pages failed to cache.' % self . failure_count ) self . update_mandb ( False )
Cache all available man pages
383
5
243,115
def cache_man_page ( self , source , url , name ) : # Skip if already exists, override if forced flag is true outname = self . get_page_path ( source , name ) if os . path . exists ( outname ) and not self . forced : return try : os . makedirs ( os . path . join ( environ . cache_dir , source ) ) except OSError : pass # There are often some errors in the HTML, for example: missing closing # tag. We use fixupHTML to fix this. data = util . fixupHTML ( urllib . request . urlopen ( url ) . read ( ) ) formatter = importlib . import_module ( 'cppman.formatter.%s' % source [ : - 4 ] ) groff_text = formatter . html2groff ( data , name ) with gzip . open ( outname , 'w' ) as f : f . write ( groff_text . encode ( 'utf-8' ) )
callback to cache new man page
221
6
243,116
def man ( self , pattern ) : try : avail = os . listdir ( os . path . join ( environ . cache_dir , environ . source ) ) except OSError : avail = [ ] if not os . path . exists ( environ . index_db ) : raise RuntimeError ( "can't find index.db" ) conn = sqlite3 . connect ( environ . index_db ) cursor = conn . cursor ( ) # Try direct match try : page_name , url = cursor . execute ( 'SELECT name,url FROM "%s" ' 'WHERE name="%s" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchone ( ) except TypeError : # Try standard library try : page_name , url = cursor . execute ( 'SELECT name,url FROM "%s" ' 'WHERE name="std::%s" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchone ( ) except TypeError : try : page_name , url = cursor . execute ( 'SELECT name,url FROM "%s" ' 'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchone ( ) except TypeError : raise RuntimeError ( 'No manual entry for ' + pattern ) finally : conn . close ( ) page_filename = self . get_normalized_page_name ( page_name ) if self . forced or page_filename + '.3.gz' not in avail : self . cache_man_page ( environ . source , url , page_name ) pager_type = environ . pager if sys . stdout . isatty ( ) else 'pipe' # Call viewer columns = ( util . get_width ( ) if self . force_columns == - 1 else self . force_columns ) pid = os . fork ( ) if pid == 0 : os . execl ( '/bin/sh' , '/bin/sh' , environ . pager_script , pager_type , self . get_page_path ( environ . source , page_name ) , str ( columns ) , environ . pager_config , page_name ) return pid
Call viewer . sh to view man page
487
8
243,117
def find ( self , pattern ) : if not os . path . exists ( environ . index_db ) : raise RuntimeError ( "can't find index.db" ) conn = sqlite3 . connect ( environ . index_db ) cursor = conn . cursor ( ) selected = cursor . execute ( 'SELECT * FROM "%s" WHERE name ' 'LIKE "%%%s%%" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchall ( ) pat = re . compile ( '(%s)' % re . escape ( pattern ) , re . I ) if selected : for name , url , std in selected : if os . isatty ( sys . stdout . fileno ( ) ) : print ( pat . sub ( r'\033[1;31m\1\033[0m' , name ) + ( ' \033[1;33m[%s]\033[0m' % std if std else '' ) ) else : print ( name + ( ' [%s]' % std if std else '' ) ) else : raise RuntimeError ( '%s: nothing appropriate.' % pattern )
Find pages in database .
248
5
243,118
def update_mandb ( self , quiet = True ) : if not environ . config . UpdateManPath : return print ( '\nrunning mandb...' ) cmd = 'mandb %s' % ( ' -q' if quiet else '' ) subprocess . Popen ( cmd , shell = True ) . wait ( )
Update mandb .
71
4
243,119
def set_default ( self ) : try : os . makedirs ( os . path . dirname ( self . _configfile ) ) except : pass self . _config = configparser . RawConfigParser ( ) self . _config . add_section ( 'Settings' ) for key , val in self . DEFAULTS . items ( ) : self . _config . set ( 'Settings' , key , val ) with open ( self . _configfile , 'w' ) as f : self . _config . write ( f )
Set config to default .
114
5
243,120
def save ( self ) : try : os . makedirs ( os . path . dirname ( self . _configfile ) ) except : pass with open ( self . _configfile , 'w' ) as f : self . _config . write ( f )
Store config back to file .
56
6
243,121
def get_free_gpus ( max_procs = 0 ) : # Try connect with NVIDIA drivers logger = logging . getLogger ( __name__ ) try : py3nvml . nvmlInit ( ) except : str_ = """Couldn't connect to nvml drivers. Check they are installed correctly.""" warnings . warn ( str_ , RuntimeWarning ) logger . warn ( str_ ) return [ ] num_gpus = py3nvml . nvmlDeviceGetCount ( ) gpu_free = [ False ] * num_gpus for i in range ( num_gpus ) : try : h = py3nvml . nvmlDeviceGetHandleByIndex ( i ) except : continue procs = try_get_info ( py3nvml . nvmlDeviceGetComputeRunningProcesses , h , [ 'something' ] ) if len ( procs ) <= max_procs : gpu_free [ i ] = True py3nvml . nvmlShutdown ( ) return gpu_free
Checks the number of processes running on your GPUs .
226
11
243,122
def get_num_procs ( ) : # Try connect with NVIDIA drivers logger = logging . getLogger ( __name__ ) try : py3nvml . nvmlInit ( ) except : str_ = """Couldn't connect to nvml drivers. Check they are installed correctly.""" warnings . warn ( str_ , RuntimeWarning ) logger . warn ( str_ ) return [ ] num_gpus = py3nvml . nvmlDeviceGetCount ( ) gpu_procs = [ - 1 ] * num_gpus for i in range ( num_gpus ) : try : h = py3nvml . nvmlDeviceGetHandleByIndex ( i ) except : continue procs = try_get_info ( py3nvml . nvmlDeviceGetComputeRunningProcesses , h , [ 'something' ] ) gpu_procs [ i ] = len ( procs ) py3nvml . nvmlShutdown ( ) return gpu_procs
Gets the number of processes running on each gpu
216
11
243,123
def _extractNVMLErrorsAsClasses ( ) : this_module = sys . modules [ __name__ ] nvmlErrorsNames = [ x for x in dir ( this_module ) if x . startswith ( "NVML_ERROR_" ) ] for err_name in nvmlErrorsNames : # e.g. Turn NVML_ERROR_ALREADY_INITIALIZED into NVMLError_AlreadyInitialized class_name = "NVMLError_" + string . capwords ( err_name . replace ( "NVML_ERROR_" , "" ) , "_" ) . replace ( "_" , "" ) err_val = getattr ( this_module , err_name ) def gen_new ( val ) : def new ( typ ) : obj = NVMLError . __new__ ( typ , val ) return obj return new new_error_class = type ( class_name , ( NVMLError , ) , { '__new__' : gen_new ( err_val ) } ) new_error_class . __module__ = __name__ setattr ( this_module , class_name , new_error_class ) NVMLError . _valClassMapping [ err_val ] = new_error_class
Generates a hierarchy of classes on top of NVMLError class .
287
16
243,124
def _LoadNvmlLibrary ( ) : global nvmlLib if ( nvmlLib is None ) : # lock to ensure only one caller loads the library libLoadLock . acquire ( ) try : # ensure the library still isn't loaded if ( nvmlLib is None ) : try : if ( sys . platform [ : 3 ] == "win" ) : searchPaths = [ os . path . join ( os . getenv ( "ProgramFiles" , r"C:\Program Files" ) , r"NVIDIA Corporation\NVSMI\nvml.dll" ) , os . path . join ( os . getenv ( "WinDir" , r"C:\Windows" ) , r"System32\nvml.dll" ) , ] nvmlPath = next ( ( x for x in searchPaths if os . path . isfile ( x ) ) , None ) if ( nvmlPath == None ) : _nvmlCheckReturn ( NVML_ERROR_LIBRARY_NOT_FOUND ) else : # cdecl calling convention nvmlLib = CDLL ( nvmlPath ) else : # assume linux nvmlLib = CDLL ( "libnvidia-ml.so.1" ) except OSError as ose : _nvmlCheckReturn ( NVML_ERROR_LIBRARY_NOT_FOUND ) if ( nvmlLib == None ) : _nvmlCheckReturn ( NVML_ERROR_LIBRARY_NOT_FOUND ) finally : # lock is always freed libLoadLock . release ( )
Load the library if it isn t loaded already
339
9
243,125
def encode_notifications ( tokens , notifications ) : fmt = "!BH32sH%ds" structify = lambda t , p : struct . pack ( fmt % len ( p ) , 0 , 32 , t , len ( p ) , p ) binaryify = lambda t : t . decode ( 'hex' ) if type ( notifications ) is dict and type ( tokens ) in ( str , unicode ) : tokens , notifications = ( [ tokens ] , [ notifications ] ) if type ( notifications ) is list and type ( tokens ) is list : return '' . join ( map ( lambda y : structify ( * y ) , ( ( binaryify ( t ) , json . dumps ( p , separators = ( ',' , ':' ) , ensure_ascii = False ) . encode ( 'utf-8' ) ) for t , p in zip ( tokens , notifications ) ) ) )
Returns the encoded bytes of tokens and notifications tokens a list of tokens or a string of only one token notifications a list of notifications or a dictionary of only one
191
31
243,126
def write ( self , notifications ) : if not self . factory : log . msg ( 'APNSService write (connecting)' ) server , port = ( ( APNS_SERVER_SANDBOX_HOSTNAME if self . environment == 'sandbox' else APNS_SERVER_HOSTNAME ) , APNS_SERVER_PORT ) self . factory = self . clientProtocolFactory ( ) context = self . getContextFactory ( ) reactor . connectSSL ( server , port , self . factory , context ) client = self . factory . clientProtocol if client : return client . sendMessage ( notifications ) else : d = self . factory . deferred timeout = reactor . callLater ( self . timeout , lambda : d . called or d . errback ( Exception ( 'Notification timed out after %i seconds' % self . timeout ) ) ) def cancel_timeout ( r ) : try : timeout . cancel ( ) except : pass return r d . addCallback ( lambda p : p . sendMessage ( notifications ) ) d . addErrback ( log_errback ( 'apns-service-write' ) ) d . addBoth ( cancel_timeout ) return d
Connect to the APNS service and send notifications
253
9
243,127
def read ( self ) : log . msg ( 'APNSService read (connecting)' ) try : server , port = ( ( FEEDBACK_SERVER_SANDBOX_HOSTNAME if self . environment == 'sandbox' else FEEDBACK_SERVER_HOSTNAME ) , FEEDBACK_SERVER_PORT ) factory = self . feedbackProtocolFactory ( ) context = self . getContextFactory ( ) reactor . connectSSL ( server , port , factory , context ) factory . deferred . addErrback ( log_errback ( 'apns-feedback-read' ) ) timeout = reactor . callLater ( self . timeout , lambda : factory . deferred . called or factory . deferred . errback ( Exception ( 'Feedbcak fetch timed out after %i seconds' % self . timeout ) ) ) def cancel_timeout ( r ) : try : timeout . cancel ( ) except : pass return r factory . deferred . addBoth ( cancel_timeout ) except Exception , e : log . err ( 'APNService feedback error initializing: %s' % str ( e ) ) raise return factory . deferred
Connect to the feedback service and read all data .
243
10
243,128
def reprovision_and_retry ( func ) : @ functools . wraps ( func ) def wrapper ( * a , * * kw ) : errback = kw . get ( 'errback' , None ) if errback is None : def errback ( e ) : raise e def errback_wrapper ( e ) : if isinstance ( e , UnknownAppID ) and 'INITIAL' in OPTIONS : try : for initial in OPTIONS [ 'INITIAL' ] : provision ( * initial ) # retry provisioning the initial setup func ( * a , * * kw ) # and try the function once more except Exception , new_exc : errback ( new_exc ) # throwing the new exception else : errback ( e ) # not an instance of UnknownAppID - nothing we can do here kw [ 'errback' ] = errback_wrapper return func ( * a , * * kw ) return wrapper
Wraps the errback callback of the API functions automatically trying to re - provision if the app ID can not be found during the operation . If that s unsuccessful it will raise the UnknownAppID error .
203
41
243,129
def pop ( self ) : char = self . code [ self . index ] self . index += 1 return char
removes the current character then moves to the next one returning the current character
23
15
243,130
def characters ( self , numberOfCharacters ) : return self . code [ self . index : self . index + numberOfCharacters ]
Returns characters at index + number of characters
27
8
243,131
def next_content ( self , start , amount = 1 ) : while start < len ( self . code ) and self . code [ start ] in ( ' ' , '\t' , '\n' ) : start += 1 return self . code [ start : start + amount ]
Returns the next non - whitespace characters
60
8
243,132
def prev_content ( self , start , amount = 1 ) : while start > 0 and self . code [ start ] in ( ' ' , '\t' , '\n' ) : start -= 1 return self . code [ ( start or amount ) - amount : start ]
Returns the prev non - whitespace characters
59
8
243,133
def parse_mapping ( mapping_file : Optional [ str ] ) -> configparser . ConfigParser : LOGGER . debug ( 'Parsing mapping file. Command line: %s' , mapping_file ) def parse ( mapping_file ) : config = configparser . ConfigParser ( ) config . read_file ( mapping_file ) return config # give precedence to the user-specified file if mapping_file is not None : LOGGER . debug ( 'Parsing command line mapping file' ) return parse ( mapping_file ) # fall back on XDG config location xdg_config_dir = xdg . BaseDirectory . load_first_config ( 'pass-git-helper' ) if xdg_config_dir is None : raise RuntimeError ( 'No mapping configured so far at any XDG config location. ' 'Please create {config_file}' . format ( config_file = DEFAULT_CONFIG_FILE ) ) mapping_file = os . path . join ( xdg_config_dir , CONFIG_FILE_NAME ) LOGGER . debug ( 'Parsing mapping file %s' , mapping_file ) with open ( mapping_file , 'r' ) as file_handle : return parse ( file_handle )
Parse the file containing the mappings from hosts to pass entries .
271
14
243,134
def parse_request ( ) -> Dict [ str , str ] : in_lines = sys . stdin . readlines ( ) LOGGER . debug ( 'Received request "%s"' , in_lines ) request = { } for line in in_lines : # skip empty lines to be a bit resilient against protocol errors if not line . strip ( ) : continue parts = line . split ( '=' , 1 ) assert len ( parts ) == 2 request [ parts [ 0 ] . strip ( ) ] = parts [ 1 ] . strip ( ) return request
Parse the request of the git credential API from stdin .
117
13
243,135
def get_password ( request , mapping ) -> None : LOGGER . debug ( 'Received request "%s"' , request ) if 'host' not in request : LOGGER . error ( 'host= entry missing in request. ' 'Cannot query without a host' ) return host = request [ 'host' ] if 'path' in request : host = '/' . join ( [ host , request [ 'path' ] ] ) def skip ( line , skip ) : return line [ skip : ] LOGGER . debug ( 'Iterating mapping to match against host "%s"' , host ) for section in mapping . sections ( ) : if fnmatch . fnmatch ( host , section ) : LOGGER . debug ( 'Section "%s" matches requested host "%s"' , section , host ) # TODO handle exceptions pass_target = mapping . get ( section , 'target' ) . replace ( "${host}" , request [ 'host' ] ) password_extractor = SpecificLineExtractor ( 0 , 0 , option_suffix = '_password' ) password_extractor . configure ( mapping [ section ] ) # username_extractor = SpecificLineExtractor( # 1, 0, option_suffix='_username') username_extractor = _username_extractors [ mapping [ section ] . get ( 'username_extractor' , fallback = _line_extractor_name ) ] username_extractor . configure ( mapping [ section ] ) LOGGER . debug ( 'Requesting entry "%s" from pass' , pass_target ) output = subprocess . check_output ( [ 'pass' , 'show' , pass_target ] ) . decode ( 'utf-8' ) lines = output . splitlines ( ) password = password_extractor . get_value ( pass_target , lines ) username = username_extractor . get_value ( pass_target , lines ) if password : print ( 'password={password}' . format ( # noqa: T001 password = password ) ) if 'username' not in request and username : print ( 'username={username}' . format ( # noqa: T001 username = username ) ) return LOGGER . warning ( 'No mapping matched' ) sys . exit ( 1 )
Resolve the given credential request in the provided mapping definition .
480
12
243,136
def main ( argv : Optional [ Sequence [ str ] ] = None ) -> None : args = parse_arguments ( argv = argv ) if args . logging : logging . basicConfig ( level = logging . DEBUG ) handle_skip ( ) action = args . action request = parse_request ( ) LOGGER . debug ( 'Received action %s with request:\n%s' , action , request ) try : mapping = parse_mapping ( args . mapping ) except Exception as error : LOGGER . critical ( 'Unable to parse mapping file' , exc_info = True ) print ( # noqa: P101 'Unable to parse mapping file: {error}' . format ( error = error ) , file = sys . stderr ) sys . exit ( 1 ) if action == 'get' : get_password ( request , mapping ) else : LOGGER . info ( 'Action %s is currently not supported' , action ) sys . exit ( 1 )
Start the pass - git - helper script .
208
9
243,137
def configure ( self , config ) : self . _prefix_length = config . getint ( 'skip{suffix}' . format ( suffix = self . _option_suffix ) , fallback = self . _prefix_length )
Configure the amount of characters to skip .
51
9
243,138
def insert_metric_changes ( db , metrics , metric_mapping , commit ) : values = [ [ commit . sha , metric_mapping [ metric . name ] , metric . value ] for metric in metrics if metric . value != 0 ] db . executemany ( 'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)' , values , )
Insert into the metric_changes tables .
87
8
243,139
def get_commits ( self , since_sha = None ) : assert self . tempdir cmd = [ 'git' , 'log' , '--first-parent' , '--reverse' , COMMIT_FORMAT ] if since_sha : commits = [ self . get_commit ( since_sha ) ] cmd . append ( '{}..HEAD' . format ( since_sha ) ) else : commits = [ ] cmd . append ( 'HEAD' ) output = cmd_output ( * cmd , cwd = self . tempdir ) for sha , date in chunk_iter ( output . splitlines ( ) , 2 ) : commits . append ( Commit ( sha , int ( date ) ) ) return commits
Returns a list of Commit objects .
154
7
243,140
def discover ( package , cls_match_func ) : matched_classes = set ( ) for _ , module_name , _ in pkgutil . walk_packages ( package . __path__ , prefix = package . __name__ + '.' , ) : module = __import__ ( module_name , fromlist = [ str ( '__trash' ) ] , level = 0 ) # Check all the classes in that module for _ , imported_class in inspect . getmembers ( module , inspect . isclass ) : # Don't include things that are only there due to a side-effect of # importing if imported_class . __module__ != module . __name__ : continue if cls_match_func ( imported_class ) : matched_classes . add ( imported_class ) return matched_classes
Returns a set of classes in the directory matched by cls_match_func
173
16
243,141
def chunk_iter ( iterable , n ) : assert n > 0 iterable = iter ( iterable ) chunk = tuple ( itertools . islice ( iterable , n ) ) while chunk : yield chunk chunk = tuple ( itertools . islice ( iterable , n ) )
Yields an iterator in chunks
64
7
243,142
def get_metric_parsers ( metric_packages = tuple ( ) , include_defaults = True ) : metric_parsers = set ( ) if include_defaults : import git_code_debt . metrics metric_parsers . update ( discover ( git_code_debt . metrics , is_metric_cls ) ) for metric_package in metric_packages : metric_parsers . update ( discover ( metric_package , is_metric_cls ) ) return metric_parsers
Gets all of the metric parsers .
116
9
243,143
def timeago_template ( locale , index , ago_in ) : try : LOCALE = __import__ ( 'timeago.locales.' + locale ) LOCALE = locale_module ( LOCALE , locale ) except : locale = setting . DEFAULT_LOCALE LOCALE = __import__ ( 'timeago.locales.' + locale ) LOCALE = locale_module ( LOCALE , locale ) if isinstance ( LOCALE , list ) : return LOCALE [ index ] [ ago_in ] else : return LOCALE ( index , ago_in )
simple locale implement
119
3
243,144
def parse ( input ) : if isinstance ( input , datetime ) : return input if isinstance ( input , date ) : return date_to_datetime ( input ) if isinstance ( input , time ) : return time_to_datetime ( input ) if isinstance ( input , ( int , float ) ) : return timestamp_to_datetime ( input ) if isinstance ( input , ( str ) ) : return string_to_data_time ( input ) return None
parse input to datetime
103
5
243,145
def format ( date , now = None , locale = 'en' ) : if not isinstance ( date , timedelta ) : if now is None : now = datetime . now ( ) date = parser . parse ( date ) now = parser . parse ( now ) if date is None : raise ParameterUnvalid ( 'the parameter `date` should be datetime ' '/ timedelta, or datetime formated string.' ) if now is None : raise ParameterUnvalid ( 'the parameter `now` should be datetime, ' 'or datetime formated string.' ) date = now - date # the gap sec diff_seconds = int ( total_seconds ( date ) ) # is ago or in ago_in = 0 if diff_seconds < 0 : ago_in = 1 # date is later then now, is the time in future diff_seconds *= - 1 # chango to positive tmp = 0 i = 0 while i < SEC_ARRAY_LEN : tmp = SEC_ARRAY [ i ] if diff_seconds >= tmp : i += 1 diff_seconds /= tmp else : break diff_seconds = int ( diff_seconds ) i *= 2 if diff_seconds > ( i == 0 and 9 or 1 ) : i += 1 if locale is None : locale = DEFAULT_LOCALE tmp = timeago_template ( locale , i , ago_in ) if hasattr ( tmp , '__call__' ) : tmp = tmp ( diff_seconds ) return '%s' in tmp and tmp % diff_seconds or tmp
the entry method
330
3
243,146
def _is_parent_of ( parent , child ) : if child . is_partition : return child . partition_slave == parent if child . is_toplevel : return child . drive == parent and child != parent return False
Check whether the first device is the parent of the second device .
50
13
243,147
def prune_empty_node ( node , seen ) : if node . methods : return False if id ( node ) in seen : return True seen = seen | { id ( node ) } for branch in list ( node . branches ) : if prune_empty_node ( branch , seen ) : node . branches . remove ( branch ) else : return False return True
Recursively remove empty branches and return whether this makes the node itself empty .
77
16
243,148
async def browse ( self , device ) : device = self . _find_device ( device ) if not device . is_mounted : self . _log . error ( _ ( "not browsing {0}: not mounted" , device ) ) return False if not self . _browser : self . _log . error ( _ ( "not browsing {0}: no program" , device ) ) return False self . _log . debug ( _ ( 'opening {0} on {0.mount_paths[0]}' , device ) ) self . _browser ( device . mount_paths [ 0 ] ) self . _log . info ( _ ( 'opened {0} on {0.mount_paths[0]}' , device ) ) return True
Launch file manager on the mount path of the specified device .
161
12
243,149
async def terminal ( self , device ) : device = self . _find_device ( device ) if not device . is_mounted : self . _log . error ( _ ( "not opening terminal {0}: not mounted" , device ) ) return False if not self . _terminal : self . _log . error ( _ ( "not opening terminal {0}: no program" , device ) ) return False self . _log . debug ( _ ( 'opening {0} on {0.mount_paths[0]}' , device ) ) self . _terminal ( device . mount_paths [ 0 ] ) self . _log . info ( _ ( 'opened {0} on {0.mount_paths[0]}' , device ) ) return True
Launch terminal on the mount path of the specified device .
165
11
243,150
async def mount ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_filesystem : self . _log . warn ( _ ( 'not mounting {0}: unhandled device' , device ) ) return False if device . is_mounted : self . _log . info ( _ ( 'not mounting {0}: already mounted' , device ) ) return True options = match_config ( self . _config , device , 'options' , None ) kwargs = dict ( options = options ) self . _log . debug ( _ ( 'mounting {0} with {1}' , device , kwargs ) ) self . _check_device_before_mount ( device ) mount_path = await device . mount ( * * kwargs ) self . _log . info ( _ ( 'mounted {0} on {1}' , device , mount_path ) ) return True
Mount the device if not already mounted .
211
8
243,151
async def unmount ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_filesystem : self . _log . warn ( _ ( 'not unmounting {0}: unhandled device' , device ) ) return False if not device . is_mounted : self . _log . info ( _ ( 'not unmounting {0}: not mounted' , device ) ) return True self . _log . debug ( _ ( 'unmounting {0}' , device ) ) await device . unmount ( ) self . _log . info ( _ ( 'unmounted {0}' , device ) ) return True
Unmount a Device if mounted .
153
7
243,152
async def unlock ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_crypto : self . _log . warn ( _ ( 'not unlocking {0}: unhandled device' , device ) ) return False if device . is_unlocked : self . _log . info ( _ ( 'not unlocking {0}: already unlocked' , device ) ) return True if not self . _prompt : self . _log . error ( _ ( 'not unlocking {0}: no password prompt' , device ) ) return False unlocked = await self . _unlock_from_cache ( device ) if unlocked : return True unlocked = await self . _unlock_from_keyfile ( device ) if unlocked : return True options = dict ( allow_keyfile = self . udisks . keyfile_support , allow_cache = self . _cache is not None , cache_hint = self . _cache_hint ) password = await self . _prompt ( device , options ) # password can be: None, str, or udiskie.prompt.PasswordResult cache_hint = getattr ( password , 'cache_hint' , self . _cache_hint ) password = getattr ( password , 'password' , password ) if password is None : self . _log . debug ( _ ( 'not unlocking {0}: cancelled by user' , device ) ) return False if isinstance ( password , bytes ) : self . _log . debug ( _ ( 'unlocking {0} using keyfile' , device ) ) await device . unlock_keyfile ( password ) else : self . _log . debug ( _ ( 'unlocking {0}' , device ) ) await device . unlock ( password ) self . _update_cache ( device , password , cache_hint ) self . _log . info ( _ ( 'unlocked {0}' , device ) ) return True
Unlock the device if not already unlocked .
426
9
243,153
async def lock ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_crypto : self . _log . warn ( _ ( 'not locking {0}: unhandled device' , device ) ) return False if not device . is_unlocked : self . _log . info ( _ ( 'not locking {0}: not unlocked' , device ) ) return True self . _log . debug ( _ ( 'locking {0}' , device ) ) await device . lock ( ) self . _log . info ( _ ( 'locked {0}' , device ) ) return True
Lock device if unlocked .
145
5
243,154
async def add ( self , device , recursive = None ) : device , created = await self . _find_device_losetup ( device ) if created and recursive is False : return device if device . is_filesystem : success = await self . mount ( device ) elif device . is_crypto : success = await self . unlock ( device ) if success and recursive : await self . udisks . _sync ( ) device = self . udisks [ device . object_path ] success = await self . add ( device . luks_cleartext_holder , recursive = True ) elif ( recursive and device . is_partition_table and self . is_handleable ( device ) ) : tasks = [ self . add ( dev , recursive = True ) for dev in self . get_all_handleable ( ) if dev . is_partition and dev . partition_slave == device ] results = await gather ( * tasks ) success = all ( results ) else : self . _log . info ( _ ( 'not adding {0}: unhandled device' , device ) ) return False return success
Mount or unlock the device depending on its type .
239
10
243,155
async def auto_add ( self , device , recursive = None , automount = True ) : device , created = await self . _find_device_losetup ( device ) if created and recursive is False : return device if device . is_luks_cleartext and self . udisks . version_info >= ( 2 , 7 , 0 ) : await sleep ( 1.5 ) # temporary workaround for #153, unreliable success = True if not self . is_automount ( device , automount ) : pass elif device . is_filesystem : if not device . is_mounted : success = await self . mount ( device ) elif device . is_crypto : if self . _prompt and not device . is_unlocked : success = await self . unlock ( device ) if success and recursive : await self . udisks . _sync ( ) device = self . udisks [ device . object_path ] success = await self . auto_add ( device . luks_cleartext_holder , recursive = True ) elif recursive and device . is_partition_table : tasks = [ self . auto_add ( dev , recursive = True ) for dev in self . get_all_handleable ( ) if dev . is_partition and dev . partition_slave == device ] results = await gather ( * tasks ) success = all ( results ) else : self . _log . debug ( _ ( 'not adding {0}: unhandled device' , device ) ) return success
Automatically attempt to mount or unlock a device but be quiet if the device is not supported .
325
19
243,156
async def remove ( self , device , force = False , detach = False , eject = False , lock = False ) : device = self . _find_device ( device ) if device . is_filesystem : if device . is_mounted or not device . is_loop or detach is False : success = await self . unmount ( device ) elif device . is_crypto : if force and device . is_unlocked : await self . auto_remove ( device . luks_cleartext_holder , force = True ) success = await self . lock ( device ) elif ( force and ( device . is_partition_table or device . is_drive ) and self . is_handleable ( device ) ) : kw = dict ( force = True , detach = detach , eject = eject , lock = lock ) tasks = [ self . auto_remove ( child , * * kw ) for child in self . get_all_handleable ( ) if _is_parent_of ( device , child ) ] results = await gather ( * tasks ) success = all ( results ) else : self . _log . info ( _ ( 'not removing {0}: unhandled device' , device ) ) success = False # if these operations work, everything is fine, we can return True: if lock and device . is_luks_cleartext : device = device . luks_cleartext_slave if self . is_handleable ( device ) : success = await self . lock ( device ) if eject : success = await self . eject ( device ) if ( detach or detach is None ) and device . is_loop : success = await self . delete ( device , remove = False ) elif detach : success = await self . detach ( device ) return success
Unmount or lock the device depending on device type .
379
11
243,157
async def eject ( self , device , force = False ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) : self . _log . warn ( _ ( 'not ejecting {0}: unhandled device' ) ) return False drive = device . drive if not ( drive . is_drive and drive . is_ejectable ) : self . _log . warn ( _ ( 'not ejecting {0}: drive not ejectable' , drive ) ) return False if force : # Can't autoremove 'device.drive', because that will be filtered # due to block=False: await self . auto_remove ( device . root , force = True ) self . _log . debug ( _ ( 'ejecting {0}' , device ) ) await drive . eject ( ) self . _log . info ( _ ( 'ejected {0}' , device ) ) return True
Eject a device after unmounting all its mounted filesystems .
201
14
243,158
async def detach ( self , device , force = False ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) : self . _log . warn ( _ ( 'not detaching {0}: unhandled device' , device ) ) return False drive = device . root if not drive . is_detachable : self . _log . warn ( _ ( 'not detaching {0}: drive not detachable' , drive ) ) return False if force : await self . auto_remove ( drive , force = True ) self . _log . debug ( _ ( 'detaching {0}' , device ) ) await drive . detach ( ) self . _log . info ( _ ( 'detached {0}' , device ) ) return True
Detach a device after unmounting all its mounted filesystems .
168
14
243,159
async def add_all ( self , recursive = False ) : tasks = [ self . auto_add ( device , recursive = recursive ) for device in self . get_all_handleable_leaves ( ) ] results = await gather ( * tasks ) success = all ( results ) return success
Add all handleable devices that available at start .
62
10
243,160
async def remove_all ( self , detach = False , eject = False , lock = False ) : kw = dict ( force = True , detach = detach , eject = eject , lock = lock ) tasks = [ self . auto_remove ( device , * * kw ) for device in self . get_all_handleable_roots ( ) ] results = await gather ( * tasks ) success = all ( results ) return success
Remove all filesystems handleable by udiskie .
91
11
243,161
async def losetup ( self , image , read_only = True , offset = None , size = None , no_part_scan = None ) : try : device = self . udisks . find ( image ) except FileNotFoundError : pass else : self . _log . info ( _ ( 'not setting up {0}: already up' , device ) ) return device if not os . path . isfile ( image ) : self . _log . error ( _ ( 'not setting up {0}: not a file' , image ) ) return None self . _log . debug ( _ ( 'setting up {0}' , image ) ) fd = os . open ( image , os . O_RDONLY ) device = await self . udisks . loop_setup ( fd , { 'offset' : offset , 'size' : size , 'read-only' : read_only , 'no-part-scan' : no_part_scan , } ) self . _log . info ( _ ( 'set up {0} as {1}' , image , device . device_presentation ) ) return device
Setup a loop device .
245
5
243,162
async def delete ( self , device , remove = True ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_loop : self . _log . warn ( _ ( 'not deleting {0}: unhandled device' , device ) ) return False if remove : await self . auto_remove ( device , force = True ) self . _log . debug ( _ ( 'deleting {0}' , device ) ) await device . delete ( ) self . _log . info ( _ ( 'deleted {0}' , device ) ) return True
Detach the loop device .
134
6
243,163
def is_handleable ( self , device ) : # TODO: handle pathes in first argument ignored = self . _ignore_device ( device ) # propagate handleability of parent devices: if ignored is None and device is not None : return self . is_handleable ( _get_parent ( device ) ) return not ignored
Check whether this device should be handled by udiskie .
69
12
243,164
def is_addable ( self , device , automount = True ) : if not self . is_automount ( device , automount ) : return False if device . is_filesystem : return not device . is_mounted if device . is_crypto : return self . _prompt and not device . is_unlocked if device . is_partition_table : return any ( self . is_addable ( dev ) for dev in self . get_all_handleable ( ) if dev . partition_slave == device ) return False
Check if device can be added with auto_add .
117
11
243,165
def is_removable ( self , device ) : if not self . is_handleable ( device ) : return False if device . is_filesystem : return device . is_mounted if device . is_crypto : return device . is_unlocked if device . is_partition_table or device . is_drive : return any ( self . is_removable ( dev ) for dev in self . get_all_handleable ( ) if _is_parent_of ( device , dev ) ) return False
Check if device can be removed with auto_remove .
110
11
243,166
def get_all_handleable ( self ) : nodes = self . get_device_tree ( ) return [ node . device for node in sorted ( nodes . values ( ) , key = DevNode . _sort_key ) if not node . ignored and node . device ]
Get list of all known handleable devices .
58
9
243,167
def get_all_handleable_roots ( self ) : nodes = self . get_device_tree ( ) return [ node . device for node in sorted ( nodes . values ( ) , key = DevNode . _sort_key ) if not node . ignored and node . device and ( node . root == '/' or nodes [ node . root ] . ignored ) ]
Get list of all handleable devices return only those that represent root nodes within the filtered device tree .
78
20
243,168
def get_all_handleable_leaves ( self ) : nodes = self . get_device_tree ( ) return [ node . device for node in sorted ( nodes . values ( ) , key = DevNode . _sort_key ) if not node . ignored and node . device and all ( child . ignored for child in node . children ) ]
Get list of all handleable devices return only those that represent leaf nodes within the filtered device tree .
74
20
243,169
def get_device_tree ( self ) : root = DevNode ( None , None , [ ] , None ) device_nodes = { dev . object_path : DevNode ( dev , dev . parent_object_path , [ ] , self . _ignore_device ( dev ) ) for dev in self . udisks } for node in device_nodes . values ( ) : device_nodes . get ( node . root , root ) . children . append ( node ) device_nodes [ '/' ] = root for node in device_nodes . values ( ) : node . children . sort ( key = DevNode . _sort_key ) # use parent as fallback, update top->down: def propagate_ignored ( node ) : for child in node . children : if child . ignored is None : child . ignored = node . ignored propagate_ignored ( child ) propagate_ignored ( root ) return device_nodes
Get a tree of all devices .
202
7
243,170
def detect ( self , root_device = '/' ) : root = Device ( None , [ ] , None , "" , [ ] ) device_nodes = dict ( map ( self . _device_node , self . _mounter . get_all_handleable ( ) ) ) # insert child devices as branches into their roots: for node in device_nodes . values ( ) : device_nodes . get ( node . root , root ) . branches . append ( node ) device_nodes [ '/' ] = root for node in device_nodes . values ( ) : node . branches . sort ( key = lambda node : node . label ) return device_nodes [ root_device ]
Detect all currently known devices .
150
6
243,171
def _get_device_methods ( self , device ) : if device . is_filesystem : if device . is_mounted : if self . _mounter . _browser : yield 'browse' if self . _mounter . _terminal : yield 'terminal' yield 'unmount' else : yield 'mount' elif device . is_crypto : if device . is_unlocked : yield 'lock' else : yield 'unlock' cache = self . _mounter . _cache if cache and device in cache : yield 'forget_password' if device . is_ejectable and device . has_media : yield 'eject' if device . is_detachable : yield 'detach' if device . is_loop : yield 'delete'
Return an iterable over all available methods the device has .
167
12
243,172
def _device_node ( self , device ) : label = device . ui_label dev_label = device . ui_device_label # determine available methods methods = [ Action ( method , device , self . _labels [ method ] . format ( label , dev_label ) , partial ( self . _actions [ method ] , device ) ) for method in self . _get_device_methods ( device ) ] # find the root device: root = device . parent_object_path # in this first step leave branches empty return device . object_path , Device ( root , [ ] , device , dev_label , methods )
Create an empty menu node for the specified device .
136
10
243,173
def samefile ( a : str , b : str ) -> bool : try : return os . path . samefile ( a , b ) except OSError : return os . path . normpath ( a ) == os . path . normpath ( b )
Check if two pathes represent the same file .
55
10
243,174
def sameuuid ( a : str , b : str ) -> bool : return a and b and a . lower ( ) == b . lower ( )
Compare two UUIDs .
32
6
243,175
def extend ( a : dict , b : dict ) -> dict : res = a . copy ( ) res . update ( b ) return res
Merge two dicts and return a new dict . Much like subclassing works .
29
17
243,176
def decode_ay ( ay ) : if ay is None : return '' elif isinstance ( ay , str ) : return ay elif isinstance ( ay , bytes ) : return ay . decode ( 'utf-8' ) else : # dbus.Array([dbus.Byte]) or any similar sequence type: return bytearray ( ay ) . rstrip ( bytearray ( ( 0 , ) ) ) . decode ( 'utf-8' )
Convert binary blob from DBus queries to strings .
99
11
243,177
def format_exc ( * exc_info ) : typ , exc , tb = exc_info or sys . exc_info ( ) error = traceback . format_exception ( typ , exc , tb ) return "" . join ( error )
Show exception with traceback .
53
6
243,178
def trigger ( self , event , * args ) : for handler in self . _event_handlers [ event ] : handler ( * args )
Trigger event by name .
30
5
243,179
def _check ( self , args ) : if sum ( bool ( args [ arg ] ) for arg in self . _mapping ) > 1 : raise DocoptExit ( _ ( 'These options are mutually exclusive: {0}' , ', ' . join ( self . _mapping ) ) )
Exit in case of multiple exclusive arguments .
63
8
243,180
def program_options ( self , args ) : options = { } for name , rule in self . option_rules . items ( ) : val = rule ( args ) if val is not None : options [ name ] = val return options
Get program options from docopt parsed options .
49
9
243,181
def run ( self ) : self . exit_code = 1 self . mainloop = GLib . MainLoop ( ) try : future = ensure_future ( self . _start_async_tasks ( ) ) future . callbacks . append ( self . set_exit_code ) self . mainloop . run ( ) return self . exit_code except KeyboardInterrupt : return 1
Run the main loop . Returns exit code .
82
9
243,182
async def _start_async_tasks ( self ) : try : self . udisks = await udiskie . udisks2 . Daemon . create ( ) results = await self . _init ( ) return 0 if all ( results ) else 1 except Exception : traceback . print_exc ( ) return 1 finally : self . mainloop . quit ( )
Start asynchronous operations .
80
4
243,183
def device_changed ( self , old_state , new_state ) : # udisks2 sometimes adds empty devices and later updates them - which # makes is_external become true at a time later than device_added: if ( self . _mounter . is_addable ( new_state ) and not self . _mounter . is_addable ( old_state ) and not self . _mounter . is_removable ( old_state ) ) : self . auto_add ( new_state )
Mount newly mountable devices .
110
6
243,184
async def connect_service ( bus_name , object_path , interface ) : proxy = await proxy_new_for_bus ( Gio . BusType . SYSTEM , Gio . DBusProxyFlags . DO_NOT_LOAD_PROPERTIES | Gio . DBusProxyFlags . DO_NOT_CONNECT_SIGNALS , info = None , name = bus_name , object_path = object_path , interface_name = interface , ) return InterfaceProxy ( proxy )
Connect to the service object on DBus return InterfaceProxy .
105
12
243,185
def object ( self ) : proxy = self . _proxy return ObjectProxy ( proxy . get_connection ( ) , proxy . get_name ( ) , proxy . get_object_path ( ) )
Get an ObjectProxy instanec for the underlying object .
42
12
243,186
def connect ( self , interface , event , object_path , handler ) : if object_path : def callback ( connection , sender_name , object_path , interface_name , signal_name , parameters ) : return handler ( * unpack_variant ( parameters ) ) else : def callback ( connection , sender_name , object_path , interface_name , signal_name , parameters ) : return handler ( object_path , * unpack_variant ( parameters ) ) return self . connection . signal_subscribe ( self . bus_name , interface , event , object_path , None , Gio . DBusSignalFlags . NONE , callback , )
Connect to a DBus signal . If object_path is None subscribe for all objects and invoke the callback with the object_path as its first argument .
142
31
243,187
def require_Gtk ( min_version = 2 ) : if not _in_X : raise RuntimeError ( 'Not in X session.' ) if _has_Gtk < min_version : raise RuntimeError ( 'Module gi.repository.Gtk not available!' ) if _has_Gtk == 2 : logging . getLogger ( __name__ ) . warn ( _ ( "Missing runtime dependency GTK 3. Falling back to GTK 2 " "for password prompt" ) ) from gi . repository import Gtk # if we attempt to create any GUI elements with no X server running the # program will just crash, so let's make a way to catch this case: if not Gtk . init_check ( None ) [ 0 ] : raise RuntimeError ( _ ( "X server not connected!" ) ) return Gtk
Make sure Gtk is properly initialized .
179
8
243,188
def _ ( text , * args , * * kwargs ) : msg = _t . gettext ( text ) if args or kwargs : return msg . format ( * args , * * kwargs ) else : return msg
Translate and then and format the text with str . format .
50
13
243,189
def filter_opt ( opt ) : return { k : GLib . Variant ( * v ) for k , v in opt . items ( ) if v [ 1 ] is not None }
Remove None values from a dictionary .
39
7
243,190
def eject ( self , auth_no_user_interaction = None ) : return self . _assocdrive . _M . Drive . Eject ( '(a{sv})' , filter_opt ( { 'auth.no_user_interaction' : ( 'b' , auth_no_user_interaction ) , } ) )
Eject media from the device .
74
7
243,191
def device_id ( self ) : if self . is_block : for filename in self . _P . Block . Symlinks : parts = decode_ay ( filename ) . split ( '/' ) if parts [ - 2 ] == 'by-id' : return parts [ - 1 ] elif self . is_drive : return self . _assocdrive . _P . Drive . Id return ''
Return a unique and persistent identifier for the device .
85
10
243,192
def is_external ( self ) : # NOTE: Checking for equality HintSystem==False returns False if the # property is resolved to a None value (interface not available). if self . _P . Block . HintSystem == False : # noqa: E712 return True # NOTE: udisks2 seems to guess incorrectly in some cases. This # leads to HintSystem=True for unlocked devices. In order to show # the device anyway, it needs to be recursively checked if any # parent device is recognized as external. if self . is_luks_cleartext and self . luks_cleartext_slave . is_external : return True if self . is_partition and self . partition_slave . is_external : return True return False
Check if the device is external .
165
7
243,193
def drive ( self ) : if self . is_drive : return self cleartext = self . luks_cleartext_slave if cleartext : return cleartext . drive if self . is_block : return self . _daemon [ self . _P . Block . Drive ] return None
Get wrapper to the drive containing this device .
66
9
243,194
def root ( self ) : drive = self . drive for device in self . _daemon : if device . is_drive : continue if device . is_toplevel and device . drive == drive : return device return None
Get the top level block device in the ancestry of this device .
47
13
243,195
def symlinks ( self ) : if not self . _P . Block . Symlinks : return [ ] return [ decode_ay ( path ) for path in self . _P . Block . Symlinks ]
Known symlinks of the block device .
43
8
243,196
def mount ( self , fstype = None , options = None , auth_no_user_interaction = None ) : return self . _M . Filesystem . Mount ( '(a{sv})' , filter_opt ( { 'fstype' : ( 's' , fstype ) , 'options' : ( 's' , ',' . join ( options or [ ] ) ) , 'auth.no_user_interaction' : ( 'b' , auth_no_user_interaction ) , } ) )
Mount filesystem .
116
3
243,197
def unmount ( self , force = None , auth_no_user_interaction = None ) : return self . _M . Filesystem . Unmount ( '(a{sv})' , filter_opt ( { 'force' : ( 'b' , force ) , 'auth.no_user_interaction' : ( 'b' , auth_no_user_interaction ) , } ) )
Unmount filesystem .
87
4
243,198
def luks_cleartext_holder ( self ) : if not self . is_luks : return None for device in self . _daemon : if device . luks_cleartext_slave == self : return device return None
Get wrapper to the unlocked luks cleartext device .
53
13
243,199
def unlock ( self , password , auth_no_user_interaction = None ) : return self . _M . Encrypted . Unlock ( '(sa{sv})' , password , filter_opt ( { 'auth.no_user_interaction' : ( 'b' , auth_no_user_interaction ) , } ) )
Unlock Luks device .
73
6