idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
25,000
def attachfile ( self , idlist , attachfile , description , * * kwargs ) : if isinstance ( attachfile , str ) : f = open ( attachfile , "rb" ) elif hasattr ( attachfile , 'read' ) : f = attachfile else : raise TypeError ( "attachfile must be filename or file-like object" ) # Back compat if "contenttype" in kwargs : kwargs [ "content_type" ] = kwargs . pop ( "contenttype" ) if "ispatch" in kwargs : kwargs [ "is_patch" ] = kwargs . pop ( "ispatch" ) if "isprivate" in kwargs : kwargs [ "is_private" ] = kwargs . pop ( "isprivate" ) if "filename" in kwargs : kwargs [ "file_name" ] = kwargs . pop ( "filename" ) kwargs [ 'summary' ] = description data = f . read ( ) if not isinstance ( data , bytes ) : data = data . encode ( locale . getpreferredencoding ( ) ) kwargs [ 'data' ] = Binary ( data ) kwargs [ 'ids' ] = self . _listify ( idlist ) if 'file_name' not in kwargs and hasattr ( f , "name" ) : kwargs [ 'file_name' ] = os . path . basename ( f . name ) if 'content_type' not in kwargs : ctype = None if kwargs [ 'file_name' ] : ctype = mimetypes . guess_type ( kwargs [ 'file_name' ] , strict = False ) [ 0 ] kwargs [ 'content_type' ] = ctype or 'application/octet-stream' ret = self . _proxy . Bug . add_attachment ( kwargs ) if "attachments" in ret : # Up to BZ 4.2 ret = [ int ( k ) for k in ret [ "attachments" ] . keys ( ) ] elif "ids" in ret : # BZ 4.4+ ret = ret [ "ids" ] if isinstance ( ret , list ) and len ( ret ) == 1 : ret = ret [ 0 ] return ret
Attach a file to the given bug IDs . Returns the ID of the attachment or raises XMLRPC Fault if something goes wrong .
507
26
25,001
def openattachment ( self , attachid ) : attachments = self . get_attachments ( None , attachid ) data = attachments [ "attachments" ] [ str ( attachid ) ] xmlrpcbinary = data [ "data" ] ret = BytesIO ( ) ret . write ( xmlrpcbinary . data ) ret . name = data [ "file_name" ] ret . seek ( 0 ) return ret
Get the contents of the attachment with the given attachment ID . Returns a file - like object .
90
19
25,002
def get_attachments ( self , ids , attachment_ids , include_fields = None , exclude_fields = None ) : params = { "ids" : self . _listify ( ids ) or [ ] , "attachment_ids" : self . _listify ( attachment_ids ) or [ ] , } if include_fields : params [ "include_fields" ] = self . _listify ( include_fields ) if exclude_fields : params [ "exclude_fields" ] = self . _listify ( exclude_fields ) return self . _proxy . Bug . attachments ( params )
Wrapper for Bug . attachments . One of ids or attachment_ids is required
131
17
25,003
def createbug ( self , * args , * * kwargs ) : data = self . _validate_createbug ( * args , * * kwargs ) rawbug = self . _proxy . Bug . create ( data ) return Bug ( self , bug_id = rawbug [ "id" ] , autorefresh = self . bug_autorefresh )
Create a bug with the given info . Returns a new Bug object . Check bugzilla API documentation for valid values at least product component summary version and description need to be passed .
79
35
25,004
def _getusers ( self , ids = None , names = None , match = None ) : params = { } if ids : params [ 'ids' ] = self . _listify ( ids ) if names : params [ 'names' ] = self . _listify ( names ) if match : params [ 'match' ] = self . _listify ( match ) if not params : raise BugzillaError ( '_get() needs one of ids, ' ' names, or match kwarg.' ) return self . _proxy . User . get ( params )
Return a list of users that match criteria .
123
9
25,005
def getusers ( self , userlist ) : userobjs = [ User ( self , * * rawuser ) for rawuser in self . _getusers ( names = userlist ) . get ( 'users' , [ ] ) ] # Return users in same order they were passed in ret = [ ] for u in userlist : for uobj in userobjs [ : ] : if uobj . email == u : userobjs . remove ( uobj ) ret . append ( uobj ) break ret += userobjs return ret
Return a list of Users from .
113
7
25,006
def searchusers ( self , pattern ) : return [ User ( self , * * rawuser ) for rawuser in self . _getusers ( match = pattern ) . get ( 'users' , [ ] ) ]
Return a bugzilla User for the given list of patterns
45
11
25,007
def createuser ( self , email , name = '' , password = '' ) : self . _proxy . User . create ( email , name , password ) return self . getuser ( email )
Return a bugzilla User for the given username
40
9
25,008
def refresh ( self , include_fields = None , exclude_fields = None , extra_fields = None ) : # pylint: disable=protected-access r = self . bugzilla . _getbug ( self . bug_id , include_fields = include_fields , exclude_fields = exclude_fields , extra_fields = self . _bug_fields + ( extra_fields or [ ] ) ) # pylint: enable=protected-access self . _update_dict ( r )
Refresh the bug with the latest data from bugzilla
106
11
25,009
def _update_dict ( self , newdict ) : if self . bugzilla : self . bugzilla . post_translation ( { } , newdict ) # pylint: disable=protected-access aliases = self . bugzilla . _get_bug_aliases ( ) # pylint: enable=protected-access for newname , oldname in aliases : if oldname not in newdict : continue if newname not in newdict : newdict [ newname ] = newdict [ oldname ] elif newdict [ newname ] != newdict [ oldname ] : log . debug ( "Update dict contained differing alias values " "d[%s]=%s and d[%s]=%s , dropping the value " "d[%s]" , newname , newdict [ newname ] , oldname , newdict [ oldname ] , oldname ) del ( newdict [ oldname ] ) for key in newdict . keys ( ) : if key not in self . _bug_fields : self . _bug_fields . append ( key ) self . __dict__ . update ( newdict ) if 'id' not in self . __dict__ and 'bug_id' not in self . __dict__ : raise TypeError ( "Bug object needs a bug_id" )
Update internal dictionary in a way that ensures no duplicate entries are stored WRT field aliases
278
17
25,010
def deletecc ( self , cclist , comment = None ) : vals = self . bugzilla . build_update ( comment = comment , cc_remove = cclist ) log . debug ( "deletecc: update=%s" , vals ) return self . bugzilla . update_bugs ( self . bug_id , vals )
Removes the given email addresses from the CC list for this bug .
75
14
25,011
def addcomment ( self , comment , private = False ) : # Note: fedora bodhi uses this function vals = self . bugzilla . build_update ( comment = comment , comment_private = private ) log . debug ( "addcomment: update=%s" , vals ) return self . bugzilla . update_bugs ( self . bug_id , vals )
Add the given comment to this bug . Set private to True to mark this comment as private .
81
19
25,012
def getcomments ( self ) : comment_list = self . bugzilla . get_comments ( [ self . bug_id ] ) return comment_list [ 'bugs' ] [ str ( self . bug_id ) ] [ 'comments' ]
Returns an array of comment dictionaries for this bug
52
10
25,013
def get_flag_status ( self , name ) : f = self . get_flags ( name ) if not f : return None # This method works only for simple flags that have only one # value set. assert len ( f ) <= 1 return f [ 0 ] [ 'status' ]
Return a flag status field
61
5
25,014
def get_attachments ( self , include_fields = None , exclude_fields = None ) : if "attachments" in self . __dict__ : return self . attachments data = self . bugzilla . get_attachments ( [ self . bug_id ] , None , include_fields , exclude_fields ) return data [ "bugs" ] [ str ( self . bug_id ) ]
Helper call to Bugzilla . get_attachments . If you want to fetch specific attachment IDs use that function instead
84
23
25,015
def refresh ( self ) : newuser = self . bugzilla . getuser ( self . email ) self . __dict__ . update ( newuser . __dict__ )
Update User object with latest info from bugzilla
36
9
25,016
def pre_translation ( self , query ) : old = query . copy ( ) if 'bug_id' in query : if not isinstance ( query [ 'bug_id' ] , list ) : query [ 'id' ] = query [ 'bug_id' ] . split ( ',' ) else : query [ 'id' ] = query [ 'bug_id' ] del query [ 'bug_id' ] if 'component' in query : if not isinstance ( query [ 'component' ] , list ) : query [ 'component' ] = query [ 'component' ] . split ( ',' ) if 'include_fields' not in query and 'column_list' not in query : return if 'include_fields' not in query : query [ 'include_fields' ] = [ ] if 'column_list' in query : query [ 'include_fields' ] = query [ 'column_list' ] del query [ 'column_list' ] # We need to do this for users here for users that # don't call build_query query . update ( self . _process_include_fields ( query [ "include_fields" ] , None , None ) ) if old != query : log . debug ( "RHBugzilla pretranslated query to: %s" , query )
Translates the query for possible aliases
278
8
25,017
def post_translation ( self , query , bug ) : ignore = query # RHBZ _still_ returns component and version as lists, which # deviates from upstream. Copy the list values to components # and versions respectively. if 'component' in bug and "components" not in bug : val = bug [ 'component' ] bug [ 'components' ] = isinstance ( val , list ) and val or [ val ] bug [ 'component' ] = bug [ 'components' ] [ 0 ] if 'version' in bug and "versions" not in bug : val = bug [ 'version' ] bug [ 'versions' ] = isinstance ( val , list ) and val or [ val ] bug [ 'version' ] = bug [ 'versions' ] [ 0 ] # sub_components isn't too friendly of a format, add a simpler # sub_component value if 'sub_components' in bug and 'sub_component' not in bug : val = bug [ 'sub_components' ] bug [ 'sub_component' ] = "" if isinstance ( val , dict ) : values = [ ] for vallist in val . values ( ) : values += vallist bug [ 'sub_component' ] = " " . join ( values )
Convert the results of getbug back to the ancient RHBZ value formats
272
16
25,018
def delete ( self , mail ) : self . stats [ 'mail_deleted' ] += 1 if self . conf . dry_run : logger . info ( "Skip deletion of {!r}." . format ( mail ) ) return logger . debug ( "Deleting {!r}..." . format ( mail ) ) # XXX Investigate the use of maildir's .remove instead. See: https: # //github.com/python/cpython/blob/origin/2.7/Lib/mailbox.py#L329-L331 os . unlink ( mail . path ) logger . info ( "{} deleted." . format ( mail . path ) )
Delete a mail from the filesystem .
145
7
25,019
def check_differences ( self ) : logger . info ( "Check that mail differences are within the limits." ) if self . conf . size_threshold < 0 : logger . info ( "Skip checking for size differences." ) if self . conf . content_threshold < 0 : logger . info ( "Skip checking for content differences." ) if self . conf . size_threshold < 0 and self . conf . content_threshold < 0 : return # Compute differences of mail against one another. for mail_a , mail_b in combinations ( self . pool , 2 ) : # Compare mails on size. if self . conf . size_threshold > - 1 : size_difference = abs ( mail_a . size - mail_b . size ) logger . debug ( "{} and {} differs by {} bytes in size." . format ( mail_a , mail_b , size_difference ) ) if size_difference > self . conf . size_threshold : raise SizeDiffAboveThreshold # Compare mails on content. if self . conf . content_threshold > - 1 : content_difference = self . diff ( mail_a , mail_b ) logger . debug ( "{} and {} differs by {} bytes in content." . format ( mail_a , mail_b , content_difference ) ) if content_difference > self . conf . content_threshold : if self . conf . show_diff : logger . info ( self . pretty_diff ( mail_a , mail_b ) ) raise ContentDiffAboveThreshold
In - depth check of mail differences .
334
8
25,020
def diff ( self , mail_a , mail_b ) : return len ( '' . join ( unified_diff ( mail_a . body_lines , mail_b . body_lines , # Ignore difference in filename lenghts and timestamps. fromfile = 'a' , tofile = 'b' , fromfiledate = '' , tofiledate = '' , n = 0 , lineterm = '\n' ) ) )
Return difference in bytes between two mails normalized body .
97
11
25,021
def pretty_diff ( self , mail_a , mail_b ) : return '' . join ( unified_diff ( mail_a . body_lines , mail_b . body_lines , fromfile = 'Normalized body of {}' . format ( mail_a . path ) , tofile = 'Normalized body of {}' . format ( mail_b . path ) , fromfiledate = '{:0.2f}' . format ( mail_a . timestamp ) , tofiledate = '{:0.2f}' . format ( mail_b . timestamp ) , n = 0 , lineterm = '\n' ) )
Returns a verbose unified diff between two mails normalized body .
143
13
25,022
def apply_strategy ( self ) : method_id = self . conf . strategy . replace ( '-' , '_' ) if not hasattr ( DuplicateSet , method_id ) : raise NotImplementedError ( "DuplicateSet.{}() method." . format ( method_id ) ) return getattr ( self , method_id ) ( )
Apply deduplication with the configured strategy .
80
10
25,023
def dedupe ( self ) : if len ( self . pool ) == 1 : logger . debug ( "Ignore set: only one message found." ) self . stats [ 'mail_unique' ] += 1 self . stats [ 'set_ignored' ] += 1 return try : # Fine-grained checks on mail differences. self . check_differences ( ) # Call the deduplication strategy. self . apply_strategy ( ) except UnicodeDecodeError as expt : self . stats [ 'set_rejected_encoding' ] += 1 logger . warning ( "Reject set: unparseable mails due to bad encoding." ) logger . debug ( str ( expt ) ) except SizeDiffAboveThreshold : self . stats [ 'set_rejected_size' ] += 1 logger . warning ( "Reject set: mails are too dissimilar in size." ) except ContentDiffAboveThreshold : self . stats [ 'set_rejected_content' ] += 1 logger . warning ( "Reject set: mails are too dissimilar in content." ) else : # Count duplicate sets without deletion as skipped. if not self . stats [ 'mail_deleted' ] : logger . info ( "Skip set: no deletion happened." ) self . stats [ 'set_skipped' ] += 1 else : self . stats [ 'set_deduplicated' ] += 1
Performs the deduplication and its preliminary checks .
300
12
25,024
def delete_older ( self ) : logger . info ( "Deleting all mails strictly older than the {} timestamp..." "" . format ( self . newest_timestamp ) ) # Select candidates for deletion. candidates = [ mail for mail in self . pool if mail . timestamp < self . newest_timestamp ] if len ( candidates ) == self . size : logger . warning ( "Skip deletion: all {} mails share the same timestamp." "" . format ( self . size ) ) logger . info ( "{} candidates found for deletion." . format ( len ( candidates ) ) ) for mail in candidates : self . delete ( mail )
Delete all older duplicates .
133
6
25,025
def delete_oldest ( self ) : logger . info ( "Deleting all mails sharing the oldest {} timestamp..." . format ( self . oldest_timestamp ) ) # Select candidates for deletion. candidates = [ mail for mail in self . pool if mail . timestamp == self . oldest_timestamp ] if len ( candidates ) == self . size : logger . warning ( "Skip deletion: all {} mails share the same timestamp." "" . format ( self . size ) ) return logger . info ( "{} candidates found for deletion." . format ( len ( candidates ) ) ) for mail in candidates : self . delete ( mail )
Delete all the oldest duplicates .
133
7
25,026
def delete_bigger ( self ) : logger . info ( "Deleting all mails strictly bigger than {} bytes..." . format ( self . smallest_size ) ) # Select candidates for deletion. candidates = [ mail for mail in self . pool if mail . size > self . smallest_size ] if len ( candidates ) == self . size : logger . warning ( "Skip deletion: all {} mails share the same size." "" . format ( self . size ) ) logger . info ( "{} candidates found for deletion." . format ( len ( candidates ) ) ) for mail in candidates : self . delete ( mail )
Delete all bigger duplicates .
130
6
25,027
def delete_biggest ( self ) : logger . info ( "Deleting all mails sharing the biggest size of {} bytes..." "" . format ( self . biggest_size ) ) # Select candidates for deletion. candidates = [ mail for mail in self . pool if mail . size == self . biggest_size ] if len ( candidates ) == self . size : logger . warning ( "Skip deletion: all {} mails share the same size." "" . format ( self . size ) ) return logger . info ( "{} candidates found for deletion." . format ( len ( candidates ) ) ) for mail in candidates : self . delete ( mail )
Delete all the biggest duplicates .
134
7
25,028
def delete_matching_path ( self ) : logger . info ( "Deleting all mails with file path matching the {} regexp..." "" . format ( self . conf . regexp . pattern ) ) # Select candidates for deletion. candidates = [ mail for mail in self . pool if re . search ( self . conf . regexp , mail . path ) ] if len ( candidates ) == self . size : logger . warning ( "Skip deletion: all {} mails matches the rexexp." . format ( self . size ) ) return logger . info ( "{} candidates found for deletion." . format ( len ( candidates ) ) ) for mail in candidates : self . delete ( mail )
Delete all duplicates whose file path match the regexp .
146
12
25,029
def canonical_path ( path ) : return os . path . normcase ( os . path . realpath ( os . path . abspath ( os . path . expanduser ( path ) ) ) )
Return a normalized canonical path to a file or folder .
42
11
25,030
def add_maildir ( self , maildir_path ) : maildir_path = self . canonical_path ( maildir_path ) logger . info ( "Opening maildir at {} ..." . format ( maildir_path ) ) # Maildir parser requires a string, not a unicode, as path. maildir = Maildir ( str ( maildir_path ) , factory = None , create = False ) # Group folders by hash. logger . info ( "{} mails found." . format ( len ( maildir ) ) ) if self . conf . progress : bar = ProgressBar ( widgets = [ Percentage ( ) , Bar ( ) ] , max_value = len ( maildir ) , redirect_stderr = True , redirect_stdout = True ) else : def bar ( x ) : return x for mail_id in bar ( maildir . iterkeys ( ) ) : self . stats [ 'mail_found' ] += 1 mail_path = self . canonical_path ( os . path . join ( maildir . _path , maildir . _lookup ( mail_id ) ) ) mail = Mail ( mail_path , self . conf ) try : mail_hash = mail . hash_key except ( InsufficientHeadersError , MissingMessageID ) as expt : logger . warning ( "Rejecting {}: {}" . format ( mail_path , expt . args [ 0 ] ) ) self . stats [ 'mail_rejected' ] += 1 else : logger . debug ( "Hash is {} for mail {!r}." . format ( mail_hash , mail_id ) ) # Use a set to deduplicate entries pointing to the same file. self . mails . setdefault ( mail_hash , set ( ) ) . add ( mail_path ) self . stats [ 'mail_kept' ] += 1
Load up a maildir and compute hash for each mail found .
411
14
25,031
def run ( self ) : logger . info ( "The {} strategy will be applied on each duplicate set." . format ( self . conf . strategy ) ) self . stats [ 'set_total' ] = len ( self . mails ) for hash_key , mail_path_set in self . mails . items ( ) : # Print visual clue to separate duplicate sets. logger . info ( '---' ) duplicates = DuplicateSet ( hash_key , mail_path_set , self . conf ) # Perfom the deduplication. duplicates . dedupe ( ) # Merge stats resulting of actions on duplicate sets. self . stats += duplicates . stats
Run the deduplication process .
143
8
25,032
def report ( self ) : table = [ [ "Mails" , "Metric" ] ] table . append ( [ "Found" , self . stats [ 'mail_found' ] ] ) table . append ( [ "Skipped" , self . stats [ 'mail_skipped' ] ] ) table . append ( [ "Rejected" , self . stats [ 'mail_rejected' ] ] ) table . append ( [ "Kept" , self . stats [ 'mail_kept' ] ] ) table . append ( [ "Unique" , self . stats [ 'mail_unique' ] ] ) table . append ( [ "Duplicates" , self . stats [ 'mail_duplicates' ] ] ) table . append ( [ "Deleted" , self . stats [ 'mail_deleted' ] ] ) logger . info ( tabulate ( table , tablefmt = 'fancy_grid' , headers = 'firstrow' ) ) table = [ [ "Duplicate sets" , "Metric" ] ] table . append ( [ "Total" , self . stats [ 'set_total' ] ] ) table . append ( [ "Ignored" , self . stats [ 'set_ignored' ] ] ) table . append ( [ "Skipped" , self . stats [ 'set_skipped' ] ] ) table . append ( [ "Rejected (bad encoding)" , self . stats [ 'set_rejected_encoding' ] ] ) table . append ( [ "Rejected (too dissimilar in size)" , self . stats [ 'set_rejected_size' ] ] ) table . append ( [ "Rejected (too dissimilar in content)" , self . stats [ 'set_rejected_content' ] ] ) table . append ( [ "Deduplicated" , self . stats [ 'set_deduplicated' ] ] ) logger . info ( tabulate ( table , tablefmt = 'fancy_grid' , headers = 'firstrow' ) ) # Perform some high-level consistency checks on metrics. assert self . stats [ 'mail_found' ] == ( self . stats [ 'mail_skipped' ] + self . stats [ 'mail_rejected' ] + self . stats [ 'mail_kept' ] ) assert self . stats [ 'mail_kept' ] >= self . stats [ 'mail_unique' ] assert self . stats [ 'mail_kept' ] >= self . stats [ 'mail_duplicates' ] assert self . stats [ 'mail_kept' ] >= self . stats [ 'mail_deleted' ] assert self . stats [ 'mail_kept' ] == ( self . stats [ 'mail_unique' ] + self . stats [ 'mail_duplicates' ] ) assert self . stats [ 'mail_duplicates' ] > self . stats [ 'mail_deleted' ] assert self . stats [ 'set_ignored' ] == self . stats [ 'mail_unique' ] assert self . stats [ 'set_total' ] == ( self . stats [ 'set_ignored' ] + self . stats [ 'set_skipped' ] + self . stats [ 'set_rejected_encoding' ] + self . stats [ 'set_rejected_size' ] + self . stats [ 'set_rejected_content' ] + self . stats [ 'set_deduplicated' ] )
Print user - friendly statistics and metrics .
744
8
25,033
def cli ( ctx ) : level = logger . level try : level_to_name = logging . _levelToName # Fallback to pre-Python 3.4 internals. except AttributeError : level_to_name = logging . _levelNames level_name = level_to_name . get ( level , level ) logger . debug ( 'Verbosity set to {}.' . format ( level_name ) ) # Print help screen and exit if no sub-commands provided. if ctx . invoked_subcommand is None : click . echo ( ctx . get_help ( ) ) ctx . exit ( ) # Load up global options to the context. ctx . obj = { }
CLI for maildirs content analysis and deletion .
152
11
25,034
def validate_regexp ( ctx , param , value ) : if value : try : value = re . compile ( value ) except ValueError : raise click . BadParameter ( 'invalid regular expression.' ) return value
Validate and compile regular expression .
47
7
25,035
def validate_maildirs ( ctx , param , value ) : for path in value : for subdir in MD_SUBDIRS : if not os . path . isdir ( os . path . join ( path , subdir ) ) : raise click . BadParameter ( '{} is not a maildir (missing {!r} sub-directory).' . format ( path , subdir ) ) return value
Check that folders are maildirs .
90
8
25,036
def deduplicate ( ctx , strategy , time_source , regexp , dry_run , message_id , size_threshold , content_threshold , show_diff , maildirs ) : # Print help screen and exit if no maildir folder provided. if not maildirs : click . echo ( ctx . get_help ( ) ) ctx . exit ( ) # Validate exclusive options requirement depending on strategy. requirements = [ ( time_source , '-t/--time-source' , [ DELETE_OLDER , DELETE_OLDEST , DELETE_NEWER , DELETE_NEWEST ] ) , ( regexp , '-r/--regexp' , [ DELETE_MATCHING_PATH , DELETE_NON_MATCHING_PATH ] ) ] for param_value , param_name , required_strategies in requirements : if strategy in required_strategies : if not param_value : raise click . BadParameter ( '{} strategy requires the {} parameter.' . format ( strategy , param_name ) ) elif param_value : raise click . BadParameter ( '{} parameter not allowed in {} strategy.' . format ( param_name , strategy ) ) conf = Config ( strategy = strategy , time_source = time_source , regexp = regexp , dry_run = dry_run , show_diff = show_diff , message_id = message_id , size_threshold = size_threshold , content_threshold = content_threshold , # progress=progress, ) dedup = Deduplicate ( conf ) logger . info ( '=== Start phase #1: load mails and compute hashes.' ) for maildir in maildirs : dedup . add_maildir ( maildir ) logger . info ( '=== Start phase #2: deduplicate mails.' ) dedup . run ( ) dedup . report ( )
Deduplicate mails from a set of maildir folders .
426
15
25,037
def hash ( ctx , message_id , message ) : conf = Config ( message_id = message_id ) mail = Mail ( message , conf ) logger . info ( mail . header_text ) logger . info ( '-' * 70 ) logger . info ( 'Hash: {}' . format ( mail . hash_key ) )
Take a single mail message and show its canonicalised form and hash .
71
14
25,038
def read_file ( * relative_path_elements ) : file_path = path . join ( path . dirname ( __file__ ) , * relative_path_elements ) return io . open ( file_path , encoding = 'utf8' ) . read ( ) . strip ( )
Return content of a file relative to this setup . py .
64
12
25,039
def message ( self ) : logger . debug ( "Parsing mail at {} ..." . format ( self . path ) ) with open ( self . path , 'rb' ) as mail_file : if PY2 : message = email . message_from_file ( mail_file ) else : message = email . message_from_binary_file ( mail_file ) return message
Read mail parse it and return a Message instance .
81
10
25,040
def timestamp ( self ) : # XXX ctime does not refer to creation time on POSIX systems, but # rather the last time the inode data changed. Source: # https://userprimary.net/posts/2007/11/18 # /ctime-in-unix-means-last-change-time-not-create-time/ if self . conf . time_source == CTIME : return os . path . getctime ( self . path ) # Fetch from the date header. return email . utils . mktime_tz ( email . utils . parsedate_tz ( self . message . get ( 'Date' ) ) )
Compute the normalized canonical timestamp of the mail .
141
10
25,041
def body_lines ( self ) : if not self . message . is_multipart ( ) : body = self . message . get_payload ( None , decode = True ) else : _ , _ , body = self . message . as_string ( ) . partition ( "\n\n" ) if isinstance ( body , bytes ) : for enc in [ 'ascii' , 'utf-8' ] : try : body = body . decode ( enc ) break except UnicodeDecodeError : continue else : body = self . message . get_payload ( None , decode = False ) return body . splitlines ( True )
Return a normalized list of lines from message s body .
136
11
25,042
def subject ( self ) : # Fetch subject from first message. subject = self . message . get ( 'Subject' , '' ) subject , _ = re . subn ( r'\s+' , ' ' , subject ) return subject
Normalized subject .
51
4
25,043
def hash_key ( self ) : if self . conf . message_id : message_id = self . message . get ( 'Message-Id' ) if message_id : return message_id . strip ( ) logger . error ( "No Message-ID in {}: {}" . format ( self . path , self . header_text ) ) raise MissingMessageID return hashlib . sha224 ( self . canonical_headers ) . hexdigest ( )
Returns the canonical hash of a mail .
98
8
25,044
def canonical_headers ( self ) : canonical_headers = '' for header in HEADERS : if header not in self . message : continue for value in self . message . get_all ( header ) : canonical_value = self . canonical_header_value ( header , value ) if re . search ( r'\S' , canonical_value ) : canonical_headers += '{}: {}\n' . format ( header , canonical_value ) canonical_headers = canonical_headers . encode ( 'utf-8' ) if len ( canonical_headers ) > 50 : return canonical_headers # At this point we should have at absolute minimum 3 or 4 headers, e.g. # From/To/Date/Subject; if not, something went badly wrong. if len ( canonical_headers ) == 0 : raise InsufficientHeadersError ( "No canonical headers found" ) err = textwrap . dedent ( """\ Not enough data from canonical headers to compute reliable hash! Headers: --------- 8< --------- 8< --------- 8< --------- 8< --------- {} --------- 8< --------- 8< --------- 8< --------- 8< ---------""" ) raise InsufficientHeadersError ( err . format ( canonical_headers ) )
Copy selected headers into a new string .
253
8
25,045
def enumerate ( cls ) : devices = { } for d in hid . enumerate ( 0 , 0 ) : vendor_id = d [ 'vendor_id' ] product_id = d [ 'product_id' ] serial_number = d [ 'serial_number' ] interface_number = d [ 'interface_number' ] path = d [ 'path' ] # HIDAPI on Mac cannot detect correct HID interfaces, so device with # DebugLink doesn't work on Mac... if devices . get ( serial_number ) != None and devices [ serial_number ] [ 0 ] == path : raise Exception ( "Two devices with the same path and S/N found. This is Mac, right? :-/" ) if ( vendor_id , product_id ) in DEVICE_IDS : devices . setdefault ( serial_number , [ None , None , None ] ) if is_normal_link ( d ) : devices [ serial_number ] [ 0 ] = path elif is_debug_link ( d ) : devices [ serial_number ] [ 1 ] = path else : raise Exception ( "Unknown USB interface number: %d" % interface_number ) # List of two-tuples (path_normal, path_debuglink) return list ( devices . values ( ) )
Return a list of available KeepKey devices .
278
9
25,046
def is_connected ( self ) : for d in hid . enumerate ( 0 , 0 ) : if d [ 'path' ] == self . device : return True return False
Check if the device is still connected .
37
8
25,047
def session_end ( self ) : self . session_depth -= 1 self . session_depth = max ( 0 , self . session_depth ) if self . session_depth == 0 : self . _session_end ( )
End a session . Se session_begin for an in depth description of TREZOR sessions .
48
19
25,048
def read ( self ) : if not self . ready_to_read ( ) : return None data = self . _read ( ) if data is None : return None return self . _parse_message ( data )
If there is data available to be read from the transport reads the data and tries to parse it as a protobuf message . If the parsing succeeds return a protobuf object . Otherwise returns None .
45
41
25,049
def read_blocking ( self ) : while True : data = self . _read ( ) if data != None : break return self . _parse_message ( data )
Same as read except blocks untill data is available to be read .
35
14
25,050
def _get_cache_name ( function ) : module_name = _inspect . getfile ( function ) module_name = _os . path . abspath ( module_name ) cache_name = module_name # fix for '<string>' or '<stdin>' in exec or interpreter usage. cache_name = cache_name . replace ( '<' , '_lt_' ) cache_name = cache_name . replace ( '>' , '_gt_' ) tmpdir = _os . getenv ( 'TMPDIR' ) or _os . getenv ( 'TEMP' ) or _os . getenv ( 'TMP' ) if tmpdir : cache_name = tmpdir + '/filecache_' + cache_name . replace ( _os . sep , '@' ) cache_name += '.cache' return cache_name
returns a name for the module s cache db .
190
11
25,051
def filecache ( seconds_of_validity = None , fail_silently = False ) : def filecache_decorator ( function ) : @ _functools . wraps ( function ) def function_with_cache ( * args , * * kwargs ) : try : key = _args_key ( function , args , kwargs ) if key in function . _db : rv = function . _db [ key ] if seconds_of_validity is None or _time . time ( ) - rv . timesig < seconds_of_validity : return rv . data except Exception : # in any case of failure, don't let filecache break the program error_str = _traceback . format_exc ( ) _log_error ( error_str ) if not fail_silently : raise retval = function ( * args , * * kwargs ) # store in cache # NOTE: no need to _db.sync() because there was no mutation # NOTE: it's importatnt to do _db.sync() because otherwise the cache doesn't survive Ctrl-Break! try : function . _db [ key ] = _retval ( _time . time ( ) , retval ) function . _db . sync ( ) except Exception : # in any case of failure, don't let filecache break the program error_str = _traceback . format_exc ( ) _log_error ( error_str ) if not fail_silently : raise return retval # make sure cache is loaded if not hasattr ( function , '_db' ) : cache_name = _get_cache_name ( function ) if cache_name in OPEN_DBS : function . _db = OPEN_DBS [ cache_name ] else : function . _db = _shelve . open ( cache_name ) OPEN_DBS [ cache_name ] = function . _db atexit . register ( function . _db . close ) function_with_cache . _db = function . _db return function_with_cache if type ( seconds_of_validity ) == types . FunctionType : # support for when people use '@filecache.filecache' instead of '@filecache.filecache()' func = seconds_of_validity seconds_of_validity = None return filecache_decorator ( func ) return filecache_decorator
filecache is called and the decorator should be returned .
513
12
25,052
def entity_data ( self , entity_type , entity_id , history_index ) : return self . entity_history ( entity_type , entity_id ) [ history_index ]
Return the data dict for an entity at a specific index of its history .
40
15
25,053
def get_entity ( self , entity_type , entity_id , history_index = - 1 , connected = True ) : if history_index < 0 and history_index != - 1 : history_index += len ( self . entity_history ( entity_type , entity_id ) ) if history_index < 0 : return None try : self . entity_data ( entity_type , entity_id , history_index ) except IndexError : return None entity_class = get_entity_class ( entity_type ) return entity_class ( entity_id , self . model , history_index = history_index , connected = connected )
Return an object instance for the given entity_type and id .
136
13
25,054
def on_change ( self , callable_ ) : self . model . add_observer ( callable_ , self . entity_type , 'change' , self . entity_id )
Add a change observer to this entity .
41
8
25,055
def on_remove ( self , callable_ ) : self . model . add_observer ( callable_ , self . entity_type , 'remove' , self . entity_id )
Add a remove observer to this entity .
41
8
25,056
def dead ( self ) : return ( self . data is None or self . model . state . entity_data ( self . entity_type , self . entity_id , - 1 ) is None )
Returns True if this entity no longer exists in the underlying model .
42
13
25,057
def previous ( self ) : return self . model . state . get_entity ( self . entity_type , self . entity_id , self . _history_index - 1 , connected = False )
Return a copy of this object as was at its previous state in history .
42
15
25,058
def next ( self ) : if self . _history_index == - 1 : return None new_index = self . _history_index + 1 connected = ( new_index == len ( self . model . state . entity_history ( self . entity_type , self . entity_id ) ) - 1 ) return self . model . state . get_entity ( self . entity_type , self . entity_id , self . _history_index - 1 , connected = connected )
Return a copy of this object at its next state in history .
102
13
25,059
async def connect ( self , * args , * * kwargs ) : await self . disconnect ( ) if 'endpoint' not in kwargs and len ( args ) < 2 : if args and 'model_name' in kwargs : raise TypeError ( 'connect() got multiple values for model_name' ) elif args : model_name = args [ 0 ] else : model_name = kwargs . pop ( 'model_name' , None ) await self . _connector . connect_model ( model_name , * * kwargs ) else : if 'model_name' in kwargs : raise TypeError ( 'connect() got values for both ' 'model_name and endpoint' ) if args and 'endpoint' in kwargs : raise TypeError ( 'connect() got multiple values for endpoint' ) if len ( args ) < 2 and 'uuid' not in kwargs : raise TypeError ( 'connect() missing value for uuid' ) has_userpass = ( len ( args ) >= 4 or { 'username' , 'password' } . issubset ( kwargs ) ) has_macaroons = ( len ( args ) >= 6 or not { 'bakery_client' , 'macaroons' } . isdisjoint ( kwargs ) ) if not ( has_userpass or has_macaroons ) : raise TypeError ( 'connect() missing auth params' ) arg_names = [ 'endpoint' , 'uuid' , 'username' , 'password' , 'cacert' , 'bakery_client' , 'macaroons' , 'loop' , 'max_frame_size' , ] for i , arg in enumerate ( args ) : kwargs [ arg_names [ i ] ] = arg if not { 'endpoint' , 'uuid' } . issubset ( kwargs ) : raise ValueError ( 'endpoint and uuid are required ' 'if model_name not given' ) if not ( { 'username' , 'password' } . issubset ( kwargs ) or { 'bakery_client' , 'macaroons' } . intersection ( kwargs ) ) : raise ValueError ( 'Authentication parameters are required ' 'if model_name not given' ) await self . _connector . connect ( * * kwargs ) await self . _after_connect ( )
Connect to a juju model .
526
7
25,060
async def add_local_charm_dir ( self , charm_dir , series ) : fh = tempfile . NamedTemporaryFile ( ) CharmArchiveGenerator ( charm_dir ) . make_archive ( fh . name ) with fh : func = partial ( self . add_local_charm , fh , series , os . stat ( fh . name ) . st_size ) charm_url = await self . _connector . loop . run_in_executor ( None , func ) log . debug ( 'Uploaded local charm: %s -> %s' , charm_dir , charm_url ) return charm_url
Upload a local charm to the model .
142
8
25,061
def add_local_charm ( self , charm_file , series , size = None ) : conn , headers , path_prefix = self . connection ( ) . https_connection ( ) path = "%s/charms?series=%s" % ( path_prefix , series ) headers [ 'Content-Type' ] = 'application/zip' if size : headers [ 'Content-Length' ] = size conn . request ( "POST" , path , charm_file , headers ) response = conn . getresponse ( ) result = response . read ( ) . decode ( ) if not response . status == 200 : raise JujuError ( result ) result = json . loads ( result ) return result [ 'charm-url' ]
Upload a local charm archive to the model .
156
9
25,062
def all_units_idle ( self ) : for unit in self . units . values ( ) : unit_status = unit . data [ 'agent-status' ] [ 'current' ] if unit_status != 'idle' : return False return True
Return True if all units are idle .
55
8
25,063
async def reset ( self , force = False ) : log . debug ( 'Resetting model' ) for app in self . applications . values ( ) : await app . destroy ( ) for machine in self . machines . values ( ) : await machine . destroy ( force = force ) await self . block_until ( lambda : len ( self . machines ) == 0 )
Reset the model to a clean state .
77
9
25,064
async def get_info ( self ) : facade = client . ClientFacade . from_connection ( self . connection ( ) ) self . _info = await facade . ModelInfo ( ) log . debug ( 'Got ModelInfo: %s' , vars ( self . info ) ) return self . info
Return a client . ModelInfo object for this Model .
65
11
25,065
def add_observer ( self , callable_ , entity_type = None , action = None , entity_id = None , predicate = None ) : observer = _Observer ( callable_ , entity_type , action , entity_id , predicate ) self . _observers [ observer ] = callable_
Register an on - model - change callback
68
8
25,066
def _watch ( self ) : async def _all_watcher ( ) : try : allwatcher = client . AllWatcherFacade . from_connection ( self . connection ( ) ) while not self . _watch_stopping . is_set ( ) : try : results = await utils . run_with_interrupt ( allwatcher . Next ( ) , self . _watch_stopping , loop = self . _connector . loop ) except JujuAPIError as e : if 'watcher was stopped' not in str ( e ) : raise if self . _watch_stopping . is_set ( ) : # this shouldn't ever actually happen, because # the event should trigger before the controller # has a chance to tell us the watcher is stopped # but handle it gracefully, just in case break # controller stopped our watcher for some reason # but we're not actually stopping, so just restart it log . warning ( 'Watcher: watcher stopped, restarting' ) del allwatcher . Id continue except websockets . ConnectionClosed : monitor = self . connection ( ) . monitor if monitor . status == monitor . ERROR : # closed unexpectedly, try to reopen log . warning ( 'Watcher: connection closed, reopening' ) await self . connection ( ) . reconnect ( ) if monitor . status != monitor . CONNECTED : # reconnect failed; abort and shutdown log . error ( 'Watcher: automatic reconnect ' 'failed; stopping watcher' ) break del allwatcher . Id continue else : # closed on request, go ahead and shutdown break if self . _watch_stopping . is_set ( ) : try : await allwatcher . Stop ( ) except websockets . ConnectionClosed : pass # can't stop on a closed conn break for delta in results . deltas : try : delta = get_entity_delta ( delta ) old_obj , new_obj = self . state . apply_delta ( delta ) await self . _notify_observers ( delta , old_obj , new_obj ) except KeyError as e : log . debug ( "unknown delta type: %s" , e . args [ 0 ] ) self . _watch_received . set ( ) except CancelledError : pass except Exception : log . exception ( 'Error in watcher' ) raise finally : self . _watch_stopped . set ( ) log . debug ( 'Starting watcher task' ) self . _watch_received . clear ( ) self . _watch_stopping . clear ( ) self . _watch_stopped . clear ( ) self . _connector . loop . create_task ( _all_watcher ( ) )
Start an asynchronous watch against this model .
573
8
25,067
async def _notify_observers ( self , delta , old_obj , new_obj ) : if new_obj and not old_obj : delta . type = 'add' log . debug ( 'Model changed: %s %s %s' , delta . entity , delta . type , delta . get_id ( ) ) for o in self . _observers : if o . cares_about ( delta ) : asyncio . ensure_future ( o ( delta , old_obj , new_obj , self ) , loop = self . _connector . loop )
Call observing callbacks notifying them of a change in model state
125
13
25,068
async def _wait ( self , entity_type , entity_id , action , predicate = None ) : q = asyncio . Queue ( loop = self . _connector . loop ) async def callback ( delta , old , new , model ) : await q . put ( delta . get_id ( ) ) self . add_observer ( callback , entity_type , action , entity_id , predicate ) entity_id = await q . get ( ) # object might not be in the entity_map if we were waiting for a # 'remove' action return self . state . _live_entity_map ( entity_type ) . get ( entity_id )
Block the calling routine until a given action has happened to the given entity
142
14
25,069
async def _wait_for_new ( self , entity_type , entity_id ) : # if the entity is already in the model, just return it if entity_id in self . state . _live_entity_map ( entity_type ) : return self . state . _live_entity_map ( entity_type ) [ entity_id ] return await self . _wait ( entity_type , entity_id , None )
Wait for a new object to appear in the Model and return it .
93
14
25,070
async def wait_for_action ( self , action_id ) : if action_id . startswith ( "action-" ) : # if we've been passed action.tag, transform it into the # id that the api deltas will use. action_id = action_id [ 7 : ] def predicate ( delta ) : return delta . data [ 'status' ] in ( 'completed' , 'failed' ) return await self . _wait ( 'action' , action_id , None , predicate )
Given an action wait for it to complete .
111
9
25,071
async def add_machine ( self , spec = None , constraints = None , disks = None , series = None ) : params = client . AddMachineParams ( ) if spec : if spec . startswith ( "ssh:" ) : placement , target , private_key_path = spec . split ( ":" ) user , host = target . split ( "@" ) sshProvisioner = provisioner . SSHProvisioner ( host = host , user = user , private_key_path = private_key_path , ) params = sshProvisioner . provision_machine ( ) else : placement = parse_placement ( spec ) if placement : params . placement = placement [ 0 ] params . jobs = [ 'JobHostUnits' ] if constraints : params . constraints = client . Value . from_json ( constraints ) if disks : params . disks = [ client . Constraints . from_json ( o ) for o in disks ] if series : params . series = series # Submit the request. client_facade = client . ClientFacade . from_connection ( self . connection ( ) ) results = await client_facade . AddMachines ( [ params ] ) error = results . machines [ 0 ] . error if error : raise ValueError ( "Error adding machine: %s" % error . message ) machine_id = results . machines [ 0 ] . machine if spec : if spec . startswith ( "ssh:" ) : # Need to run this after AddMachines has been called, # as we need the machine_id await sshProvisioner . install_agent ( self . connection ( ) , params . nonce , machine_id , ) log . debug ( 'Added new machine %s' , machine_id ) return await self . _wait_for_new ( 'machine' , machine_id )
Start a new empty machine and optionally a container or add a container to a machine .
390
17
25,072
async def add_relation ( self , relation1 , relation2 ) : connection = self . connection ( ) app_facade = client . ApplicationFacade . from_connection ( connection ) log . debug ( 'Adding relation %s <-> %s' , relation1 , relation2 ) def _find_relation ( * specs ) : for rel in self . relations : if rel . matches ( * specs ) : return rel return None try : result = await app_facade . AddRelation ( [ relation1 , relation2 ] ) except JujuAPIError as e : if 'relation already exists' not in e . message : raise rel = _find_relation ( relation1 , relation2 ) if rel : return rel raise JujuError ( 'Relation {} {} exists but not in model' . format ( relation1 , relation2 ) ) specs = [ '{}:{}' . format ( app , data [ 'name' ] ) for app , data in result . endpoints . items ( ) ] await self . block_until ( lambda : _find_relation ( * specs ) is not None ) return _find_relation ( * specs )
Add a relation between two applications .
243
7
25,073
async def add_ssh_key ( self , user , key ) : key_facade = client . KeyManagerFacade . from_connection ( self . connection ( ) ) return await key_facade . AddKeys ( [ key ] , user )
Add a public SSH key to this model .
54
9
25,074
def debug_log ( self , no_tail = False , exclude_module = None , include_module = None , include = None , level = None , limit = 0 , lines = 10 , replay = False , exclude = None ) : raise NotImplementedError ( )
Get log messages for this model .
58
7
25,075
async def _deploy ( self , charm_url , application , series , config , constraints , endpoint_bindings , resources , storage , channel = None , num_units = None , placement = None , devices = None ) : log . info ( 'Deploying %s' , charm_url ) # stringify all config values for API, and convert to YAML config = { k : str ( v ) for k , v in config . items ( ) } config = yaml . dump ( { application : config } , default_flow_style = False ) app_facade = client . ApplicationFacade . from_connection ( self . connection ( ) ) app = client . ApplicationDeploy ( charm_url = charm_url , application = application , series = series , channel = channel , config_yaml = config , constraints = parse_constraints ( constraints ) , endpoint_bindings = endpoint_bindings , num_units = num_units , resources = resources , storage = storage , placement = placement , devices = devices , ) result = await app_facade . Deploy ( [ app ] ) errors = [ r . error . message for r in result . results if r . error ] if errors : raise JujuError ( '\n' . join ( errors ) ) return await self . _wait_for_new ( 'application' , application )
Logic shared between Model . deploy and BundleHandler . deploy .
289
13
25,076
async def destroy_unit ( self , * unit_names ) : connection = self . connection ( ) app_facade = client . ApplicationFacade . from_connection ( connection ) log . debug ( 'Destroying unit%s %s' , 's' if len ( unit_names ) == 1 else '' , ' ' . join ( unit_names ) ) return await app_facade . DestroyUnits ( list ( unit_names ) )
Destroy units by name .
96
5
25,077
async def get_config ( self ) : config_facade = client . ModelConfigFacade . from_connection ( self . connection ( ) ) result = await config_facade . ModelGet ( ) config = result . config for key , value in config . items ( ) : config [ key ] = ConfigValue . from_json ( value ) return config
Return the configuration settings for this model .
76
8
25,078
async def get_constraints ( self ) : constraints = { } client_facade = client . ClientFacade . from_connection ( self . connection ( ) ) result = await client_facade . GetModelConstraints ( ) # GetModelConstraints returns GetConstraintsResults which has a # 'constraints' attribute. If no constraints have been set # GetConstraintsResults.constraints is None. Otherwise # GetConstraintsResults.constraints has an attribute for each possible # constraint, each of these in turn will be None if they have not been # set. if result . constraints : constraint_types = [ a for a in dir ( result . constraints ) if a in Value . _toSchema . keys ( ) ] for constraint in constraint_types : value = getattr ( result . constraints , constraint ) if value is not None : constraints [ constraint ] = getattr ( result . constraints , constraint ) return constraints
Return the machine constraints for this model .
204
8
25,079
def restore_backup ( self , bootstrap = False , constraints = None , archive = None , backup_id = None , upload_tools = False ) : raise NotImplementedError ( )
Restore a backup archive to a new controller .
42
10
25,080
async def set_config ( self , config ) : config_facade = client . ModelConfigFacade . from_connection ( self . connection ( ) ) for key , value in config . items ( ) : if isinstance ( value , ConfigValue ) : config [ key ] = value . value await config_facade . ModelSet ( config )
Set configuration keys on this model .
74
7
25,081
async def set_constraints ( self , constraints ) : client_facade = client . ClientFacade . from_connection ( self . connection ( ) ) await client_facade . SetModelConstraints ( application = '' , constraints = constraints )
Set machine constraints on this model .
55
7
25,082
async def get_action_output ( self , action_uuid , wait = None ) : action_facade = client . ActionFacade . from_connection ( self . connection ( ) ) entity = [ { 'tag' : tag . action ( action_uuid ) } ] # Cannot use self.wait_for_action as the action event has probably # already happened and self.wait_for_action works by processing # model deltas and checking if they match our type. If the action # has already occured then the delta has gone. async def _wait_for_action_status ( ) : while True : action_output = await action_facade . Actions ( entity ) if action_output . results [ 0 ] . status in ( 'completed' , 'failed' ) : return else : await asyncio . sleep ( 1 ) await asyncio . wait_for ( _wait_for_action_status ( ) , timeout = wait ) action_output = await action_facade . Actions ( entity ) # ActionResult.output is None if the action produced no output if action_output . results [ 0 ] . output is None : output = { } else : output = action_output . results [ 0 ] . output return output
Get the results of an action by ID .
266
9
25,083
async def get_action_status ( self , uuid_or_prefix = None , name = None ) : results = { } action_results = [ ] action_facade = client . ActionFacade . from_connection ( self . connection ( ) ) if name : name_results = await action_facade . FindActionsByNames ( [ name ] ) action_results . extend ( name_results . actions [ 0 ] . actions ) if uuid_or_prefix : # Collect list of actions matching uuid or prefix matching_actions = await action_facade . FindActionTagsByPrefix ( [ uuid_or_prefix ] ) entities = [ ] for actions in matching_actions . matches . values ( ) : entities = [ { 'tag' : a . tag } for a in actions ] # Get action results matching action tags uuid_results = await action_facade . Actions ( entities ) action_results . extend ( uuid_results . results ) for a in action_results : results [ tag . untag ( 'action-' , a . action . tag ) ] = a . status return results
Get the status of all actions filtered by ID ID prefix or name .
240
14
25,084
async def get_status ( self , filters = None , utc = False ) : client_facade = client . ClientFacade . from_connection ( self . connection ( ) ) return await client_facade . FullStatus ( filters )
Return the status of the model .
52
7
25,085
def sync_tools ( self , all_ = False , destination = None , dry_run = False , public = False , source = None , stream = None , version = None ) : raise NotImplementedError ( )
Copy Juju tools into this model .
47
8
25,086
def upgrade_juju ( self , dry_run = False , reset_previous_upgrade = False , upload_tools = False , version = None ) : raise NotImplementedError ( )
Upgrade Juju on all machines in a model .
43
10
25,087
async def get_metrics ( self , * tags ) : log . debug ( "Retrieving metrics for %s" , ', ' . join ( tags ) if tags else "all units" ) metrics_facade = client . MetricsDebugFacade . from_connection ( self . connection ( ) ) entities = [ client . Entity ( tag ) for tag in tags ] metrics_result = await metrics_facade . GetMetrics ( entities ) metrics = collections . defaultdict ( list ) for entity_metrics in metrics_result . results : error = entity_metrics . error if error : if "is not a valid tag" in error : raise ValueError ( error . message ) else : raise Exception ( error . message ) for metric in entity_metrics . metrics : metrics [ metric . unit ] . append ( vars ( metric ) ) return metrics
Retrieve metrics .
182
4
25,088
async def scale ( self , application , scale ) : application = self . resolve ( application ) return await self . model . applications [ application ] . scale ( scale = scale )
Handle a change of scale to a k8s application .
37
12
25,089
def make_archive ( self , path ) : zf = zipfile . ZipFile ( path , 'w' , zipfile . ZIP_DEFLATED ) for dirpath , dirnames , filenames in os . walk ( self . path ) : relative_path = dirpath [ len ( self . path ) + 1 : ] if relative_path and not self . _ignore ( relative_path ) : zf . write ( dirpath , relative_path ) for name in filenames : archive_name = os . path . join ( relative_path , name ) if not self . _ignore ( archive_name ) : real_path = os . path . join ( dirpath , name ) self . _check_type ( real_path ) if os . path . islink ( real_path ) : self . _check_link ( real_path ) self . _write_symlink ( zf , os . readlink ( real_path ) , archive_name ) else : zf . write ( real_path , archive_name ) zf . close ( ) return path
Create archive of directory and write to path .
233
9
25,090
def _check_type ( self , path ) : s = os . stat ( path ) if stat . S_ISDIR ( s . st_mode ) or stat . S_ISREG ( s . st_mode ) : return path raise ValueError ( "Invalid Charm at % %s" % ( path , "Invalid file type for a charm" ) )
Check the path
76
3
25,091
def _write_symlink ( self , zf , link_target , link_path ) : info = zipfile . ZipInfo ( ) info . filename = link_path info . create_system = 3 # Magic code for symlinks / py2/3 compat # 27166663808 = (stat.S_IFLNK | 0755) << 16 info . external_attr = 2716663808 zf . writestr ( info , link_target )
Package symlinks with appropriate zipfile metadata .
102
9
25,092
async def set_password ( self , password ) : await self . controller . change_user_password ( self . username , password ) self . _user_info . password = password
Update this user s password .
39
6
25,093
async def grant ( self , acl = 'login' ) : if await self . controller . grant ( self . username , acl ) : self . _user_info . access = acl
Set access level of this user on the controller .
42
10
25,094
async def revoke ( self ) : await self . controller . revoke ( self . username ) self . _user_info . access = ''
Removes all access rights for this user from the controller .
29
12
25,095
async def disable ( self ) : await self . controller . disable_user ( self . username ) self . _user_info . disabled = True
Disable this user .
31
4
25,096
async def enable ( self ) : await self . controller . enable_user ( self . username ) self . _user_info . disabled = False
Re - enable this user .
31
6
25,097
async def destroy ( self , force = False ) : facade = client . ClientFacade . from_connection ( self . connection ) log . debug ( 'Destroying machine %s' , self . id ) await facade . DestroyMachines ( force , [ self . id ] ) return await self . model . _wait ( 'machine' , self . id , 'remove' )
Remove this machine from the model .
80
7
25,098
async def scp_to ( self , source , destination , user = 'ubuntu' , proxy = False , scp_opts = '' ) : if proxy : raise NotImplementedError ( 'proxy option is not implemented' ) address = self . dns_name destination = '%s@%s:%s' % ( user , address , destination ) await self . _scp ( source , destination , scp_opts )
Transfer files to this machine .
96
6
25,099
async def _scp ( self , source , destination , scp_opts ) : cmd = [ 'scp' , '-i' , os . path . expanduser ( '~/.local/share/juju/ssh/juju_id_rsa' ) , '-o' , 'StrictHostKeyChecking=no' , '-q' , '-B' ] cmd . extend ( scp_opts . split ( ) if isinstance ( scp_opts , str ) else scp_opts ) cmd . extend ( [ source , destination ] ) loop = self . model . loop process = await asyncio . create_subprocess_exec ( * cmd , loop = loop ) await process . wait ( ) if process . returncode != 0 : raise JujuError ( "command failed: %s" % cmd )
Execute an scp command . Requires a fully qualified source and destination .
186
15