idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
226,400
def get_graph ( self ) : st = self . status if st in ( SolverStatus . solved , SolverStatus . unsolved ) : phase = self . _latest_nonfailed_phase ( ) return phase . get_graph ( ) else : return self . get_fail_graph ( )
Returns the most recent solve graph .
63
7
226,401
def get_fail_graph ( self , failure_index = None ) : phase , _ = self . _get_failed_phase ( failure_index ) return phase . get_graph ( )
Returns a graph showing a solve failure .
41
8
226,402
def dump ( self ) : from rez . utils . formatting import columnise rows = [ ] for i , phase in enumerate ( self . phase_stack ) : rows . append ( ( self . _depth_label ( i ) , phase . status , str ( phase ) ) ) print "status: %s (%s)" % ( self . status . name , self . status . description ) print "initial request: %s" % str ( self . request_list ) print print "solve stack:" print '\n' . join ( columnise ( rows ) ) if self . failed_phase_list : rows = [ ] for i , phase in enumerate ( self . failed_phase_list ) : rows . append ( ( "#%d" % i , phase . status , str ( phase ) ) ) print print "previous failures:" print '\n' . join ( columnise ( rows ) )
Print a formatted summary of the current solve state .
195
10
226,403
def make_path_writable ( path ) : from rez . config import config try : orig_mode = os . stat ( path ) . st_mode new_mode = orig_mode if config . make_package_temporarily_writable and not os . access ( path , os . W_OK ) : new_mode = orig_mode | stat . S_IWUSR # make writable if new_mode != orig_mode : os . chmod ( path , new_mode ) except OSError : # ignore access errors here, and just do nothing. It will be more # intuitive for the calling code to fail on access instead. # orig_mode = None new_mode = None # yield, then reset mode back to original try : yield finally : if new_mode != orig_mode : os . chmod ( path , orig_mode )
Temporarily make path writable if possible .
187
10
226,404
def get_existing_path ( path , topmost_path = None ) : prev_path = None if topmost_path : topmost_path = os . path . normpath ( topmost_path ) while True : if os . path . exists ( path ) : return path path = os . path . dirname ( path ) if path == prev_path : return None if topmost_path and os . path . normpath ( path ) == topmost_path : return None prev_path = path
Get the longest parent path in path that exists .
108
10
226,405
def safe_makedirs ( path ) : if not os . path . exists ( path ) : try : os . makedirs ( path ) except OSError : if not os . path . exists ( path ) : raise
Safe makedirs .
49
5
226,406
def safe_remove ( path ) : if not os . path . exists ( path ) : return try : if os . path . isdir ( path ) and not os . path . islink ( path ) : shutil . rmtree ( path ) else : os . remove ( path ) except OSError : if os . path . exists ( path ) : raise
Safely remove the given file or directory .
78
9
226,407
def replacing_symlink ( source , link_name ) : with make_tmp_name ( link_name ) as tmp_link_name : os . symlink ( source , tmp_link_name ) replace_file_or_dir ( link_name , tmp_link_name )
Create symlink that overwrites any existing target .
64
11
226,408
def replacing_copy ( src , dest , follow_symlinks = False ) : with make_tmp_name ( dest ) as tmp_dest : if os . path . islink ( src ) and not follow_symlinks : # special case - copy just a symlink src_ = os . readlink ( src ) os . symlink ( src_ , tmp_dest ) elif os . path . isdir ( src ) : # copy a dir shutil . copytree ( src , tmp_dest , symlinks = ( not follow_symlinks ) ) else : # copy a file shutil . copy2 ( src , tmp_dest ) replace_file_or_dir ( dest , tmp_dest )
Perform copy that overwrites any existing target .
152
10
226,409
def replace_file_or_dir ( dest , source ) : from rez . vendor . atomicwrites import replace_atomic if not os . path . exists ( dest ) : try : os . rename ( source , dest ) return except : if not os . path . exists ( dest ) : raise try : replace_atomic ( source , dest ) return except : pass with make_tmp_name ( dest ) as tmp_dest : os . rename ( dest , tmp_dest ) os . rename ( source , dest )
Replace dest with source .
109
6
226,410
def make_tmp_name ( name ) : path , base = os . path . split ( name ) tmp_base = ".tmp-%s-%s" % ( base , uuid4 ( ) . hex ) tmp_name = os . path . join ( path , tmp_base ) try : yield tmp_name finally : safe_remove ( tmp_name )
Generates a tmp name for a file or dir .
79
11
226,411
def is_subdirectory ( path_a , path_b ) : path_a = os . path . realpath ( path_a ) path_b = os . path . realpath ( path_b ) relative = os . path . relpath ( path_a , path_b ) return ( not relative . startswith ( os . pardir + os . sep ) )
Returns True if path_a is a subdirectory of path_b .
81
15
226,412
def find_matching_symlink ( path , source ) : def to_abs ( target ) : if os . path . isabs ( target ) : return target else : return os . path . normpath ( os . path . join ( path , target ) ) abs_source = to_abs ( source ) for name in os . listdir ( path ) : linkpath = os . path . join ( path , name ) if os . path . islink : source_ = os . readlink ( linkpath ) if to_abs ( source_ ) == abs_source : return name return None
Find a symlink under path that points at source .
127
12
226,413
def copy_or_replace ( src , dst ) : try : shutil . copy ( src , dst ) except ( OSError , IOError ) , e : # It's possible that the file existed, but was owned by someone # else - in that situation, shutil.copy might then fail when it # tries to copy perms. # However, it's possible that we have write perms to the dir - # in which case, we can just delete and replace import errno if e . errno == errno . EPERM : import tempfile # try copying into a temporary location beside the old # file - if we have perms to do that, we should have perms # to then delete the old file, and move the new one into # place if os . path . isdir ( dst ) : dst = os . path . join ( dst , os . path . basename ( src ) ) dst_dir , dst_name = os . path . split ( dst ) dst_temp = tempfile . mktemp ( prefix = dst_name + '.' , dir = dst_dir ) shutil . copy ( src , dst_temp ) if not os . path . isfile ( dst_temp ) : raise RuntimeError ( "shutil.copy completed successfully, but path" " '%s' still did not exist" % dst_temp ) os . remove ( dst ) shutil . move ( dst_temp , dst )
try to copy with mode and if it fails try replacing
306
11
226,414
def movetree ( src , dst ) : try : shutil . move ( src , dst ) except : copytree ( src , dst , symlinks = True , hardlinks = True ) shutil . rmtree ( src )
Attempts a move and falls back to a copy + delete if this fails
48
14
226,415
def safe_chmod ( path , mode ) : if stat . S_IMODE ( os . stat ( path ) . st_mode ) != mode : os . chmod ( path , mode )
Set the permissions mode on path but only if it differs from the current mode .
42
16
226,416
def encode_filesystem_name ( input_str ) : if isinstance ( input_str , str ) : input_str = unicode ( input_str ) elif not isinstance ( input_str , unicode ) : raise TypeError ( "input_str must be a basestring" ) as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-' uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ' result = [ ] for char in input_str : if char in as_is : result . append ( char ) elif char == u'_' : result . append ( '__' ) elif char in uppercase : result . append ( '_%s' % char . lower ( ) ) else : utf8 = char . encode ( 'utf8' ) N = len ( utf8 ) if N == 1 : N = 0 HH = '' . join ( '%x' % ord ( c ) for c in utf8 ) result . append ( '_%d%s' % ( N , HH ) ) return '' . join ( result )
Encodes an arbitrary unicode string to a generic filesystem - compatible non - unicode filename .
257
19
226,417
def decode_filesystem_name ( filename ) : result = [ ] remain = filename i = 0 while remain : # use match, to ensure it matches from the start of the string... match = _FILESYSTEM_TOKEN_RE . match ( remain ) if not match : raise ValueError ( "incorrectly encoded filesystem name %r" " (bad index: %d - %r)" % ( filename , i , remain [ : 2 ] ) ) match_str = match . group ( 0 ) match_len = len ( match_str ) i += match_len remain = remain [ match_len : ] match_dict = match . groupdict ( ) if match_dict [ 'as_is' ] : result . append ( unicode ( match_str ) ) elif match_dict [ 'underscore' ] : result . append ( u'_' ) elif match_dict [ 'uppercase' ] : result . append ( unicode ( match_dict [ 'uppercase' ] . upper ( ) ) ) elif match_dict [ 'N' ] : N = int ( match_dict [ 'N' ] ) if N == 0 : N = 1 # hex-encoded, so need to grab 2*N chars bytes_len = 2 * N i += bytes_len bytes = remain [ : bytes_len ] remain = remain [ bytes_len : ] # need this check to ensure that we don't end up eval'ing # something nasty... if not _HEX_RE . match ( bytes ) : raise ValueError ( "Bad utf8 encoding in name %r" " (bad index: %d - %r)" % ( filename , i , bytes ) ) bytes_repr = '' . join ( '\\x%s' % bytes [ i : i + 2 ] for i in xrange ( 0 , bytes_len , 2 ) ) bytes_repr = "'%s'" % bytes_repr result . append ( eval ( bytes_repr ) . decode ( 'utf8' ) ) else : raise ValueError ( "Unrecognized match type in filesystem name %r" " (bad index: %d - %r)" % ( filename , i , remain [ : 2 ] ) ) return u'' . join ( result )
Decodes a filename encoded using the rules given in encode_filesystem_name to a unicode string .
489
22
226,418
def walk_up_dirs ( path ) : prev_path = None current_path = os . path . abspath ( path ) while current_path != prev_path : yield current_path prev_path = current_path current_path = os . path . dirname ( prev_path )
Yields absolute directories starting with the given path and iterating up through all it s parents until it reaches a root directory
64
25
226,419
def run ( self , * args ) : if self . prefix_char is None : prefix_char = config . suite_alias_prefix_char else : prefix_char = self . prefix_char if prefix_char == '' : # empty prefix char means we don't support the '+' args return self . _run_no_args ( args ) else : return self . _run ( prefix_char , args )
Invoke the wrapped script .
89
6
226,420
def print_about ( self ) : filepath = os . path . join ( self . suite_path , "bin" , self . tool_name ) print "Tool: %s" % self . tool_name print "Path: %s" % filepath print "Suite: %s" % self . suite_path msg = "%s (%r)" % ( self . context . load_path , self . context_name ) print "Context: %s" % msg variants = self . context . get_tool_variants ( self . tool_name ) if variants : if len ( variants ) > 1 : self . _print_conflicting ( variants ) else : variant = iter ( variants ) . next ( ) print "Package: %s" % variant . qualified_package_name return 0
Print an info message about the tool .
172
8
226,421
def print_package_versions ( self ) : variants = self . context . get_tool_variants ( self . tool_name ) if variants : if len ( variants ) > 1 : self . _print_conflicting ( variants ) return 1 else : from rez . packages_ import iter_packages variant = iter ( variants ) . next ( ) it = iter_packages ( name = variant . name ) rows = [ ] colors = [ ] for pkg in sorted ( it , key = lambda x : x . version , reverse = True ) : if pkg . version == variant . version : name = "* %s" % pkg . qualified_name col = heading else : name = " %s" % pkg . qualified_name col = local if pkg . is_local else None label = "(local)" if pkg . is_local else "" rows . append ( ( name , pkg . path , label ) ) colors . append ( col ) _pr = Printer ( ) for col , line in zip ( colors , columnise ( rows ) ) : _pr ( line , col ) return 0
Print a list of versions of the package this tool comes from and indicate which version this tool is from .
238
21
226,422
def generate_hypergraph ( num_nodes , num_edges , r = 0 ) : # Graph creation random_graph = hypergraph ( ) # Nodes nodes = list ( map ( str , list ( range ( num_nodes ) ) ) ) random_graph . add_nodes ( nodes ) # Base edges edges = list ( map ( str , list ( range ( num_nodes , num_nodes + num_edges ) ) ) ) random_graph . add_hyperedges ( edges ) # Connect the edges if 0 == r : # Add each edge with 50/50 probability for e in edges : for n in nodes : if choice ( [ True , False ] ) : random_graph . link ( n , e ) else : # Add only uniform edges for e in edges : # First shuffle the nodes shuffle ( nodes ) # Then take the first r nodes for i in range ( r ) : random_graph . link ( nodes [ i ] , e ) return random_graph
Create a random hyper graph .
213
6
226,423
def check_version ( version , range_ = None ) : if range_ and version not in range_ : raise RezBindError ( "found version %s is not within range %s" % ( str ( version ) , str ( range_ ) ) )
Check that the found software version is within supplied range .
54
11
226,424
def extract_version ( exepath , version_arg , word_index = - 1 , version_rank = 3 ) : if isinstance ( version_arg , basestring ) : version_arg = [ version_arg ] args = [ exepath ] + version_arg stdout , stderr , returncode = _run_command ( args ) if returncode : raise RezBindError ( "failed to execute %s: %s\n(error code %d)" % ( exepath , stderr , returncode ) ) stdout = stdout . strip ( ) . split ( '\n' ) [ 0 ] . strip ( ) log ( "extracting version from output: '%s'" % stdout ) try : strver = stdout . split ( ) [ word_index ] toks = strver . replace ( '.' , ' ' ) . replace ( '-' , ' ' ) . split ( ) strver = '.' . join ( toks [ : version_rank ] ) version = Version ( strver ) except Exception as e : raise RezBindError ( "failed to parse version from output '%s': %s" % ( stdout , str ( e ) ) ) log ( "extracted version: '%s'" % str ( version ) ) return version
Run an executable and get the program version .
280
9
226,425
def construct ( cls , name , version = None ) : other = VersionedObject ( None ) other . name_ = name other . version_ = Version ( ) if version is None else version return other
Create a VersionedObject directly from an object name and version .
43
13
226,426
def construct ( cls , name , range = None ) : other = Requirement ( None ) other . name_ = name other . range_ = VersionRange ( ) if range is None else range return other
Create a requirement directly from an object name and VersionRange .
43
12
226,427
def conflicts_with ( self , other ) : if isinstance ( other , Requirement ) : if ( self . name_ != other . name_ ) or ( self . range is None ) or ( other . range is None ) : return False elif self . conflict : return False if other . conflict else self . range_ . issuperset ( other . range_ ) elif other . conflict : return other . range_ . issuperset ( self . range_ ) else : return not self . range_ . intersects ( other . range_ ) else : # VersionedObject if ( self . name_ != other . name_ ) or ( self . range is None ) : return False if self . conflict : return ( other . version_ in self . range_ ) else : return ( other . version_ not in self . range_ )
Returns True if this requirement conflicts with another Requirement or VersionedObject .
179
15
226,428
def merged ( self , other ) : if self . name_ != other . name_ : return None # cannot merge across object names def _r ( r_ ) : r = Requirement ( None ) r . name_ = r_ . name_ r . negate_ = r_ . negate_ r . conflict_ = r_ . conflict_ r . sep_ = r_ . sep_ return r if self . range is None : return other elif other . range is None : return self elif self . conflict : if other . conflict : r = _r ( self ) r . range_ = self . range_ | other . range_ r . negate_ = ( self . negate_ and other . negate_ and not r . range_ . is_any ( ) ) return r else : range_ = other . range - self . range if range_ is None : return None else : r = _r ( other ) r . range_ = range_ return r elif other . conflict : range_ = self . range_ - other . range_ if range_ is None : return None else : r = _r ( self ) r . range_ = range_ return r else : range_ = self . range_ & other . range_ if range_ is None : return None else : r = _r ( self ) r . range_ = range_ return r
Returns the merged result of two requirements .
287
8
226,429
def read_graph_from_string ( txt ) : if not txt . startswith ( '{' ) : return read_dot ( txt ) # standard dot format def conv ( value ) : if isinstance ( value , basestring ) : return '"' + value + '"' else : return value # our compacted format doc = literal_eval ( txt ) g = digraph ( ) for attrs , values in doc . get ( "nodes" , [ ] ) : attrs = [ ( k , conv ( v ) ) for k , v in attrs ] for value in values : if isinstance ( value , basestring ) : node_name = value attrs_ = attrs else : node_name , label = value attrs_ = attrs + [ ( "label" , conv ( label ) ) ] g . add_node ( node_name , attrs = attrs_ ) for attrs , values in doc . get ( "edges" , [ ] ) : attrs_ = [ ( k , conv ( v ) ) for k , v in attrs ] for value in values : if len ( value ) == 3 : edge = value [ : 2 ] label = value [ - 1 ] else : edge = value label = '' g . add_edge ( edge , label = label , attrs = attrs_ ) return g
Read a graph from a string either in dot format or our own compressed format .
295
16
226,430
def write_compacted ( g ) : d_nodes = { } d_edges = { } def conv ( value ) : if isinstance ( value , basestring ) : return value . strip ( '"' ) else : return value for node in g . nodes ( ) : label = None attrs = [ ] for k , v in sorted ( g . node_attributes ( node ) ) : v_ = conv ( v ) if k == "label" : label = v_ else : attrs . append ( ( k , v_ ) ) value = ( node , label ) if label else node d_nodes . setdefault ( tuple ( attrs ) , [ ] ) . append ( value ) for edge in g . edges ( ) : attrs = [ ( k , conv ( v ) ) for k , v in sorted ( g . edge_attributes ( edge ) ) ] label = str ( g . edge_label ( edge ) ) value = tuple ( list ( edge ) + [ label ] ) if label else edge d_edges . setdefault ( tuple ( attrs ) , [ ] ) . append ( tuple ( value ) ) doc = dict ( nodes = d_nodes . items ( ) , edges = d_edges . items ( ) ) contents = str ( doc ) return contents
Write a graph in our own compacted format .
280
10
226,431
def write_dot ( g ) : lines = [ "digraph g {" ] def attrs_txt ( items ) : if items : txt = ", " . join ( ( '%s="%s"' % ( k , str ( v ) . strip ( '"' ) ) ) for k , v in items ) return '[' + txt + ']' else : return '' for node in g . nodes ( ) : atxt = attrs_txt ( g . node_attributes ( node ) ) txt = "%s %s;" % ( node , atxt ) lines . append ( txt ) for e in g . edges ( ) : edge_from , edge_to = e attrs = g . edge_attributes ( e ) label = str ( g . edge_label ( e ) ) if label : attrs . append ( ( "label" , label ) ) atxt = attrs_txt ( attrs ) txt = "%s -> %s %s;" % ( edge_from , edge_to , atxt ) lines . append ( txt ) lines . append ( "}" ) return '\n' . join ( lines )
Replacement for pygraph . readwrite . dot . write which is dog slow .
248
17
226,432
def prune_graph ( graph_str , package_name ) : # find nodes of interest g = read_dot ( graph_str ) nodes = set ( ) for node , attrs in g . node_attr . iteritems ( ) : attr = [ x for x in attrs if x [ 0 ] == "label" ] if attr : label = attr [ 0 ] [ 1 ] try : req_str = _request_from_label ( label ) request = PackageRequest ( req_str ) except PackageRequestError : continue if request . name == package_name : nodes . add ( node ) if not nodes : raise ValueError ( "The package %r does not appear in the graph." % package_name ) # find nodes upstream from these nodes g_rev = g . reverse ( ) accessible_nodes = set ( ) access = accessibility ( g_rev ) for node in nodes : nodes_ = access . get ( node , [ ] ) accessible_nodes |= set ( nodes_ ) # remove inaccessible nodes inaccessible_nodes = set ( g . nodes ( ) ) - accessible_nodes for node in inaccessible_nodes : g . del_node ( node ) return write_dot ( g )
Prune a package graph so it only contains nodes accessible from the given package .
261
16
226,433
def save_graph ( graph_str , dest_file , fmt = None , image_ratio = None ) : g = pydot . graph_from_dot_data ( graph_str ) # determine the dest format if fmt is None : fmt = os . path . splitext ( dest_file ) [ 1 ] . lower ( ) . strip ( '.' ) or "png" if hasattr ( g , "write_" + fmt ) : write_fn = getattr ( g , "write_" + fmt ) else : raise Exception ( "Unsupported graph format: '%s'" % fmt ) if image_ratio : g . set_ratio ( str ( image_ratio ) ) write_fn ( dest_file ) return fmt
Render a graph to an image file .
163
8
226,434
def view_graph ( graph_str , dest_file = None ) : from rez . system import system from rez . config import config if ( system . platform == "linux" ) and ( not os . getenv ( "DISPLAY" ) ) : print >> sys . stderr , "Unable to open display." sys . exit ( 1 ) dest_file = _write_graph ( graph_str , dest_file = dest_file ) # view graph viewed = False prog = config . image_viewer or 'browser' print "loading image viewer (%s)..." % prog if config . image_viewer : proc = popen ( [ config . image_viewer , dest_file ] ) proc . wait ( ) viewed = not bool ( proc . returncode ) if not viewed : import webbrowser webbrowser . open_new ( "file://" + dest_file )
View a dot graph in an image viewer .
190
9
226,435
def physical_cores ( self ) : try : return self . _physical_cores_base ( ) except Exception as e : from rez . utils . logging_ import print_error print_error ( "Error detecting physical core count, defaulting to 1: %s" % str ( e ) ) return 1
Return the number of physical cpu cores on the system .
68
11
226,436
def logical_cores ( self ) : try : return self . _logical_cores ( ) except Exception as e : from rez . utils . logging_ import print_error print_error ( "Error detecting logical core count, defaulting to 1: %s" % str ( e ) ) return 1
Return the number of cpu cores as reported to the os .
67
12
226,437
def write_and_convert ( self , text ) : cursor = 0 for match in self . ANSI_RE . finditer ( text ) : start , end = match . span ( ) self . write_plain_text ( text , cursor , start ) self . convert_ansi ( * match . groups ( ) ) cursor = end self . write_plain_text ( text , cursor , len ( text ) )
Write the given text to our wrapped stream stripping any ANSI sequences from the text and optionally converting them into win32 calls .
89
25
226,438
def copy ( self ) : other = ContextModel ( self . _context , self . parent ( ) ) other . _stale = self . _stale other . _modified = self . _modified other . request = self . request [ : ] other . packages_path = self . packages_path other . implicit_packages = self . implicit_packages other . package_filter = self . package_filter other . caching = self . caching other . default_patch_lock = self . default_patch_lock other . patch_locks = copy . deepcopy ( self . patch_locks ) return other
Returns a copy of the context .
126
7
226,439
def get_lock_requests ( self ) : d = defaultdict ( list ) if self . _context : for variant in self . _context . resolved_packages : name = variant . name version = variant . version lock = self . patch_locks . get ( name ) if lock is None : lock = self . default_patch_lock request = get_lock_request ( name , version , lock ) if request is not None : d [ lock ] . append ( request ) return d
Take the current context and the current patch locks and determine the effective requests that will be added to the main request .
103
23
226,440
def resolve_context ( self , verbosity = 0 , max_fails = - 1 , timestamp = None , callback = None , buf = None , package_load_callback = None ) : package_filter = PackageFilterList . from_pod ( self . package_filter ) context = ResolvedContext ( self . request , package_paths = self . packages_path , package_filter = package_filter , verbosity = verbosity , max_fails = max_fails , timestamp = timestamp , buf = buf , callback = callback , package_load_callback = package_load_callback , caching = self . caching ) if context . success : if self . _context and self . _context . load_path : context . set_load_path ( self . _context . load_path ) self . _set_context ( context ) self . _modified = True return context
Update the current context by performing a re - resolve .
188
11
226,441
def set_context ( self , context ) : self . _set_context ( context , emit = False ) self . _modified = ( not context . load_path ) self . dataChanged . emit ( self . CONTEXT_CHANGED | self . REQUEST_CHANGED | self . PACKAGES_PATH_CHANGED | self . LOCKS_CHANGED | self . LOADPATH_CHANGED | self . PACKAGE_FILTER_CHANGED | self . CACHING_CHANGED )
Replace the current context with another .
114
8
226,442
def get_resources_dests ( resources_root , rules ) : def get_rel_path ( base , path ) : # normalizes and returns a lstripped-/-separated path base = base . replace ( os . path . sep , '/' ) path = path . replace ( os . path . sep , '/' ) assert path . startswith ( base ) return path [ len ( base ) : ] . lstrip ( '/' ) destinations = { } for base , suffix , dest in rules : prefix = os . path . join ( resources_root , base ) for abs_base in iglob ( prefix ) : abs_glob = os . path . join ( abs_base , suffix ) for abs_path in iglob ( abs_glob ) : resource_file = get_rel_path ( resources_root , abs_path ) if dest is None : # remove the entry if it was here destinations . pop ( resource_file , None ) else : rel_path = get_rel_path ( abs_base , abs_path ) rel_dest = dest . replace ( os . path . sep , '/' ) . rstrip ( '/' ) destinations [ resource_file ] = rel_dest + '/' + rel_path return destinations
Find destinations for resources files
271
5
226,443
def find_cycle ( graph ) : if ( isinstance ( graph , graph_class ) ) : directed = False elif ( isinstance ( graph , digraph_class ) ) : directed = True else : raise InvalidGraphType def find_cycle_to_ancestor ( node , ancestor ) : """ Find a cycle containing both node and ancestor. """ path = [ ] while ( node != ancestor ) : if ( node is None ) : return [ ] path . append ( node ) node = spanning_tree [ node ] path . append ( node ) path . reverse ( ) return path def dfs ( node ) : """ Depth-first search subfunction. """ visited [ node ] = 1 # Explore recursively the connected component for each in graph [ node ] : if ( cycle ) : return if ( each not in visited ) : spanning_tree [ each ] = node dfs ( each ) else : if ( directed or spanning_tree [ node ] != each ) : cycle . extend ( find_cycle_to_ancestor ( node , each ) ) recursionlimit = getrecursionlimit ( ) setrecursionlimit ( max ( len ( graph . nodes ( ) ) * 2 , recursionlimit ) ) visited = { } # List for marking visited and non-visited nodes spanning_tree = { } # Spanning tree cycle = [ ] # Algorithm outer-loop for each in graph : # Select a non-visited node if ( each not in visited ) : spanning_tree [ each ] = None # Explore node's connected component dfs ( each ) if ( cycle ) : setrecursionlimit ( recursionlimit ) return cycle setrecursionlimit ( recursionlimit ) return [ ]
Find a cycle in the given graph . This function will return a list of nodes which form a cycle in the graph or an empty list if no cycle exists .
359
32
226,444
def create_release_vcs ( path , vcs_name = None ) : from rez . plugin_managers import plugin_manager vcs_types = get_release_vcs_types ( ) if vcs_name : if vcs_name not in vcs_types : raise ReleaseVCSError ( "Unknown version control system: %r" % vcs_name ) cls = plugin_manager . get_plugin_class ( 'release_vcs' , vcs_name ) return cls ( path ) classes_by_level = { } for vcs_name in vcs_types : cls = plugin_manager . get_plugin_class ( 'release_vcs' , vcs_name ) result = cls . find_vcs_root ( path ) if not result : continue vcs_path , levels_up = result classes_by_level . setdefault ( levels_up , [ ] ) . append ( ( cls , vcs_path ) ) if not classes_by_level : raise ReleaseVCSError ( "No version control system for package " "releasing is associated with the path %s" % path ) # it's ok to have multiple results, as long as there is only one at the # "closest" directory up from this dir - ie, if we start at: # /blah/foo/pkg_root # and these dirs exist: # /blah/.hg # /blah/foo/.git # ...then this is ok, because /blah/foo/.git is "closer" to the original # dir, and will be picked. However, if these two directories exist: # /blah/foo/.git # /blah/foo/.hg # ...then we error, because we can't decide which to use lowest_level = sorted ( classes_by_level ) [ 0 ] clss = classes_by_level [ lowest_level ] if len ( clss ) > 1 : clss_str = ", " . join ( x [ 0 ] . name ( ) for x in clss ) raise ReleaseVCSError ( "Several version control systems are associated " "with the path %s: %s. Use rez-release --vcs to " "choose." % ( path , clss_str ) ) else : cls , vcs_root = clss [ 0 ] return cls ( pkg_root = path , vcs_root = vcs_root )
Return a new release VCS that can release from this source path .
540
14
226,445
def find_vcs_root ( cls , path ) : if cls . search_parents_for_root ( ) : valid_dirs = walk_up_dirs ( path ) else : valid_dirs = [ path ] for i , current_path in enumerate ( valid_dirs ) : if cls . is_valid_root ( current_path ) : return current_path , i return None
Try to find a version control root directory of this type for the given path .
91
16
226,446
def _cmd ( self , * nargs ) : cmd_str = ' ' . join ( map ( quote , nargs ) ) if self . package . config . debug ( "package_release" ) : print_debug ( "Running command: %s" % cmd_str ) p = popen ( nargs , stdout = subprocess . PIPE , stderr = subprocess . PIPE , cwd = self . pkg_root ) out , err = p . communicate ( ) if p . returncode : print_debug ( "command stdout:" ) print_debug ( out ) print_debug ( "command stderr:" ) print_debug ( err ) raise ReleaseVCSError ( "command failed: %s\n%s" % ( cmd_str , err ) ) out = out . strip ( ) if out : return [ x . rstrip ( ) for x in out . split ( '\n' ) ] else : return [ ]
Convenience function for executing a program such as git etc .
210
13
226,447
def _close ( self , args ) : reply_code = args . read_short ( ) reply_text = args . read_shortstr ( ) class_id = args . read_short ( ) method_id = args . read_short ( ) self . _send_method ( ( 20 , 41 ) ) self . _do_revive ( ) raise error_for_code ( reply_code , reply_text , ( class_id , method_id ) , ChannelError , )
Request a channel close
105
4
226,448
def _x_flow_ok ( self , active ) : args = AMQPWriter ( ) args . write_bit ( active ) self . _send_method ( ( 20 , 21 ) , args )
Confirm a flow method
44
5
226,449
def _x_open ( self ) : if self . is_open : return args = AMQPWriter ( ) args . write_shortstr ( '' ) # out_of_band: deprecated self . _send_method ( ( 20 , 10 ) , args ) return self . wait ( allowed_methods = [ ( 20 , 11 ) , # Channel.open_ok ] )
Open a channel for use
82
5
226,450
def exchange_declare ( self , exchange , type , passive = False , durable = False , auto_delete = True , nowait = False , arguments = None ) : arguments = { } if arguments is None else arguments args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( exchange ) args . write_shortstr ( type ) args . write_bit ( passive ) args . write_bit ( durable ) args . write_bit ( auto_delete ) args . write_bit ( False ) # internal: deprecated args . write_bit ( nowait ) args . write_table ( arguments ) self . _send_method ( ( 40 , 10 ) , args ) if auto_delete : warn ( VDeprecationWarning ( EXCHANGE_AUTODELETE_DEPRECATED ) ) if not nowait : return self . wait ( allowed_methods = [ ( 40 , 11 ) , # Channel.exchange_declare_ok ] )
Declare exchange create if needed
211
6
226,451
def exchange_bind ( self , destination , source = '' , routing_key = '' , nowait = False , arguments = None ) : arguments = { } if arguments is None else arguments args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( destination ) args . write_shortstr ( source ) args . write_shortstr ( routing_key ) args . write_bit ( nowait ) args . write_table ( arguments ) self . _send_method ( ( 40 , 30 ) , args ) if not nowait : return self . wait ( allowed_methods = [ ( 40 , 31 ) , # Channel.exchange_bind_ok ] )
This method binds an exchange to an exchange .
149
9
226,452
def queue_declare ( self , queue = '' , passive = False , durable = False , exclusive = False , auto_delete = True , nowait = False , arguments = None ) : arguments = { } if arguments is None else arguments args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( queue ) args . write_bit ( passive ) args . write_bit ( durable ) args . write_bit ( exclusive ) args . write_bit ( auto_delete ) args . write_bit ( nowait ) args . write_table ( arguments ) self . _send_method ( ( 50 , 10 ) , args ) if not nowait : return self . wait ( allowed_methods = [ ( 50 , 11 ) , # Channel.queue_declare_ok ] )
Declare queue create if needed
174
6
226,453
def _queue_declare_ok ( self , args ) : return queue_declare_ok_t ( args . read_shortstr ( ) , args . read_long ( ) , args . read_long ( ) , )
Confirms a queue definition
50
5
226,454
def queue_delete ( self , queue = '' , if_unused = False , if_empty = False , nowait = False ) : args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( queue ) args . write_bit ( if_unused ) args . write_bit ( if_empty ) args . write_bit ( nowait ) self . _send_method ( ( 50 , 40 ) , args ) if not nowait : return self . wait ( allowed_methods = [ ( 50 , 41 ) , # Channel.queue_delete_ok ] )
Delete a queue
132
3
226,455
def queue_purge ( self , queue = '' , nowait = False ) : args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( queue ) args . write_bit ( nowait ) self . _send_method ( ( 50 , 30 ) , args ) if not nowait : return self . wait ( allowed_methods = [ ( 50 , 31 ) , # Channel.queue_purge_ok ] )
Purge a queue
100
4
226,456
def basic_ack ( self , delivery_tag , multiple = False ) : args = AMQPWriter ( ) args . write_longlong ( delivery_tag ) args . write_bit ( multiple ) self . _send_method ( ( 60 , 80 ) , args )
Acknowledge one or more messages
58
7
226,457
def basic_cancel ( self , consumer_tag , nowait = False ) : if self . connection is not None : self . no_ack_consumers . discard ( consumer_tag ) args = AMQPWriter ( ) args . write_shortstr ( consumer_tag ) args . write_bit ( nowait ) self . _send_method ( ( 60 , 30 ) , args ) return self . wait ( allowed_methods = [ ( 60 , 31 ) , # Channel.basic_cancel_ok ] )
End a queue consumer
112
4
226,458
def _basic_cancel_notify ( self , args ) : consumer_tag = args . read_shortstr ( ) callback = self . _on_cancel ( consumer_tag ) if callback : callback ( consumer_tag ) else : raise ConsumerCancelled ( consumer_tag , ( 60 , 30 ) )
Consumer cancelled by server .
68
5
226,459
def basic_consume ( self , queue = '' , consumer_tag = '' , no_local = False , no_ack = False , exclusive = False , nowait = False , callback = None , arguments = None , on_cancel = None ) : args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( queue ) args . write_shortstr ( consumer_tag ) args . write_bit ( no_local ) args . write_bit ( no_ack ) args . write_bit ( exclusive ) args . write_bit ( nowait ) args . write_table ( arguments or { } ) self . _send_method ( ( 60 , 20 ) , args ) if not nowait : consumer_tag = self . wait ( allowed_methods = [ ( 60 , 21 ) , # Channel.basic_consume_ok ] ) self . callbacks [ consumer_tag ] = callback if on_cancel : self . cancel_callbacks [ consumer_tag ] = on_cancel if no_ack : self . no_ack_consumers . add ( consumer_tag ) return consumer_tag
Start a queue consumer
247
4
226,460
def _basic_deliver ( self , args , msg ) : consumer_tag = args . read_shortstr ( ) delivery_tag = args . read_longlong ( ) redelivered = args . read_bit ( ) exchange = args . read_shortstr ( ) routing_key = args . read_shortstr ( ) msg . channel = self msg . delivery_info = { 'consumer_tag' : consumer_tag , 'delivery_tag' : delivery_tag , 'redelivered' : redelivered , 'exchange' : exchange , 'routing_key' : routing_key , } try : fun = self . callbacks [ consumer_tag ] except KeyError : pass else : fun ( msg )
Notify the client of a consumer message
157
8
226,461
def _basic_get_ok ( self , args , msg ) : delivery_tag = args . read_longlong ( ) redelivered = args . read_bit ( ) exchange = args . read_shortstr ( ) routing_key = args . read_shortstr ( ) message_count = args . read_long ( ) msg . channel = self msg . delivery_info = { 'delivery_tag' : delivery_tag , 'redelivered' : redelivered , 'exchange' : exchange , 'routing_key' : routing_key , 'message_count' : message_count } return msg
Provide client with a message
134
6
226,462
def _basic_publish ( self , msg , exchange = '' , routing_key = '' , mandatory = False , immediate = False ) : args = AMQPWriter ( ) args . write_short ( 0 ) args . write_shortstr ( exchange ) args . write_shortstr ( routing_key ) args . write_bit ( mandatory ) args . write_bit ( immediate ) self . _send_method ( ( 60 , 40 ) , args , msg )
Publish a message
99
4
226,463
def basic_qos ( self , prefetch_size , prefetch_count , a_global ) : args = AMQPWriter ( ) args . write_long ( prefetch_size ) args . write_short ( prefetch_count ) args . write_bit ( a_global ) self . _send_method ( ( 60 , 10 ) , args ) return self . wait ( allowed_methods = [ ( 60 , 11 ) , # Channel.basic_qos_ok ] )
Specify quality of service
106
5
226,464
def basic_recover ( self , requeue = False ) : args = AMQPWriter ( ) args . write_bit ( requeue ) self . _send_method ( ( 60 , 110 ) , args )
Redeliver unacknowledged messages
48
8
226,465
def basic_reject ( self , delivery_tag , requeue ) : args = AMQPWriter ( ) args . write_longlong ( delivery_tag ) args . write_bit ( requeue ) self . _send_method ( ( 60 , 90 ) , args )
Reject an incoming message
61
5
226,466
def _basic_return ( self , args , msg ) : self . returned_messages . put ( basic_return_t ( args . read_short ( ) , args . read_shortstr ( ) , args . read_shortstr ( ) , args . read_shortstr ( ) , msg , ) )
Return a failed message
67
4
226,467
def create_build_process ( process_type , working_dir , build_system , package = None , vcs = None , ensure_latest = True , skip_repo_errors = False , ignore_existing_tag = False , verbose = False , quiet = False ) : from rez . plugin_managers import plugin_manager process_types = get_build_process_types ( ) if process_type not in process_types : raise BuildProcessError ( "Unknown build process: %r" % process_type ) cls = plugin_manager . get_plugin_class ( 'build_process' , process_type ) return cls ( working_dir , # ignored (deprecated) build_system , package = package , # ignored (deprecated) vcs = vcs , ensure_latest = ensure_latest , skip_repo_errors = skip_repo_errors , ignore_existing_tag = ignore_existing_tag , verbose = verbose , quiet = quiet )
Create a BuildProcess instance .
215
6
226,468
def visit_variants ( self , func , variants = None , * * kwargs ) : if variants : present_variants = range ( self . package . num_variants ) invalid_variants = set ( variants ) - set ( present_variants ) if invalid_variants : raise BuildError ( "The package does not contain the variants: %s" % ", " . join ( str ( x ) for x in sorted ( invalid_variants ) ) ) # iterate over variants results = [ ] num_visited = 0 for variant in self . package . iter_variants ( ) : if variants and variant . index not in variants : self . _print_header ( "Skipping variant %s (%s)..." % ( variant . index , self . _n_of_m ( variant ) ) ) continue # visit the variant result = func ( variant , * * kwargs ) results . append ( result ) num_visited += 1 return num_visited , results
Iterate over variants and call a function on each .
213
11
226,469
def create_build_context ( self , variant , build_type , build_path ) : request = variant . get_requires ( build_requires = True , private_build_requires = True ) req_strs = map ( str , request ) quoted_req_strs = map ( quote , req_strs ) self . _print ( "Resolving build environment: %s" , ' ' . join ( quoted_req_strs ) ) if build_type == BuildType . local : packages_path = self . package . config . packages_path else : packages_path = self . package . config . nonlocal_packages_path if self . package . config . is_overridden ( "package_filter" ) : from rez . package_filter import PackageFilterList data = self . package . config . package_filter package_filter = PackageFilterList . from_pod ( data ) else : package_filter = None context = ResolvedContext ( request , package_paths = packages_path , package_filter = package_filter , building = True ) if self . verbose : context . print_info ( ) # save context before possible fail, so user can debug rxt_filepath = os . path . join ( build_path , "build.rxt" ) context . save ( rxt_filepath ) if context . status != ResolverStatus . solved : raise BuildContextResolveError ( context ) return context , rxt_filepath
Create a context to build the variant within .
314
9
226,470
def get_release_data ( self ) : previous_package = self . get_previous_release ( ) if previous_package : previous_version = previous_package . version previous_revision = previous_package . revision else : previous_version = None previous_revision = None if self . vcs is None : return dict ( vcs = "None" , previous_version = previous_version ) revision = None with self . repo_operation ( ) : revision = self . vcs . get_current_revision ( ) changelog = self . get_changelog ( ) # truncate changelog - very large changelogs can cause package load # times to be very high, we don't want that maxlen = config . max_package_changelog_chars if maxlen and changelog and len ( changelog ) > maxlen + 3 : changelog = changelog [ : maxlen ] + "..." return dict ( vcs = self . vcs . name ( ) , revision = revision , changelog = changelog , previous_version = previous_version , previous_revision = previous_revision )
Get release data for this release .
250
7
226,471
def minimal_spanning_tree ( graph , root = None ) : visited = [ ] # List for marking visited and non-visited nodes spanning_tree = { } # MInimal Spanning tree # Initialization if ( root is not None ) : visited . append ( root ) nroot = root spanning_tree [ root ] = None else : nroot = 1 # Algorithm loop while ( nroot is not None ) : ledge = _lightest_edge ( graph , visited ) if ( ledge == None ) : if ( root is not None ) : break nroot = _first_unvisited ( graph , visited ) if ( nroot is not None ) : spanning_tree [ nroot ] = None visited . append ( nroot ) else : spanning_tree [ ledge [ 1 ] ] = ledge [ 0 ] visited . append ( ledge [ 1 ] ) return spanning_tree
Minimal spanning tree .
187
5
226,472
def cut_value ( graph , flow , cut ) : #max flow/min cut value calculation S = [ ] T = [ ] for node in cut . keys ( ) : if cut [ node ] == 0 : S . append ( node ) elif cut [ node ] == 1 : T . append ( node ) value = 0 for node in S : for neigh in graph . neighbors ( node ) : if neigh in T : value = value + flow [ ( node , neigh ) ] for inc in graph . incidents ( node ) : if inc in T : value = value - flow [ ( inc , node ) ] return value
Calculate the value of a cut .
130
9
226,473
def cut_tree ( igraph , caps = None ) : #maximum flow needs a digraph, we get a graph #I think this conversion relies on implementation details outside the api and may break in the future graph = digraph ( ) graph . add_graph ( igraph ) #handle optional argument if not caps : caps = { } for edge in graph . edges ( ) : caps [ edge ] = igraph . edge_weight ( edge ) #temporary flow variable f = { } #we use a numbering of the nodes for easier handling n = { } N = 0 for node in graph . nodes ( ) : n [ N ] = node N = N + 1 #predecessor function p = { } . fromkeys ( range ( N ) , 0 ) p [ 0 ] = None for s in range ( 1 , N ) : t = p [ s ] S = [ ] #max flow calculation ( flow , cut ) = maximum_flow ( graph , n [ s ] , n [ t ] , caps ) for i in range ( N ) : if cut [ n [ i ] ] == 0 : S . append ( i ) value = cut_value ( graph , flow , cut ) f [ s ] = value for i in range ( N ) : if i == s : continue if i in S and p [ i ] == t : p [ i ] = s if p [ t ] in S : p [ s ] = p [ t ] p [ t ] = s f [ s ] = f [ t ] f [ t ] = value #cut tree is a dictionary, where each edge is associated with its weight b = { } for i in range ( 1 , N ) : b [ ( n [ i ] , n [ p [ i ] ] ) ] = f [ i ] return b
Construct a Gomory - Hu cut tree by applying the algorithm of Gusfield .
380
17
226,474
def locate ( self , requirement , prereleases = False ) : result = None r = parse_requirement ( requirement ) if r is None : raise DistlibException ( 'Not a valid requirement: %r' % requirement ) scheme = get_scheme ( self . scheme ) self . matcher = matcher = scheme . matcher ( r . requirement ) logger . debug ( 'matcher: %s (%s)' , matcher , type ( matcher ) . __name__ ) versions = self . get_project ( r . name ) if len ( versions ) > 2 : # urls and digests keys are present # sometimes, versions are invalid slist = [ ] vcls = matcher . version_class for k in versions : if k in ( 'urls' , 'digests' ) : continue try : if not matcher . match ( k ) : logger . debug ( '%s did not match %r' , matcher , k ) else : if prereleases or not vcls ( k ) . is_prerelease : slist . append ( k ) else : logger . debug ( 'skipping pre-release ' 'version %s of %s' , k , matcher . name ) except Exception : # pragma: no cover logger . warning ( 'error matching %s with %r' , matcher , k ) pass # slist.append(k) if len ( slist ) > 1 : slist = sorted ( slist , key = scheme . key ) if slist : logger . debug ( 'sorted list: %s' , slist ) version = slist [ - 1 ] result = versions [ version ] if result : if r . extras : result . extras = r . extras result . download_urls = versions . get ( 'urls' , { } ) . get ( version , set ( ) ) d = { } sd = versions . get ( 'digests' , { } ) for url in result . download_urls : if url in sd : d [ url ] = sd [ url ] result . digests = d self . matcher = None return result
Find the most recent distribution which matches the given requirement .
454
11
226,475
def get_bind_modules ( verbose = False ) : builtin_path = os . path . join ( module_root_path , "bind" ) searchpaths = config . bind_module_path + [ builtin_path ] bindnames = { } for path in searchpaths : if verbose : print "searching %s..." % path if not os . path . isdir ( path ) : continue for filename in os . listdir ( path ) : fpath = os . path . join ( path , filename ) fname , ext = os . path . splitext ( filename ) if os . path . isfile ( fpath ) and ext == ".py" and not fname . startswith ( '_' ) : bindnames [ fname ] = fpath return bindnames
Get available bind modules .
172
5
226,476
def find_bind_module ( name , verbose = False ) : bindnames = get_bind_modules ( verbose = verbose ) bindfile = bindnames . get ( name ) if bindfile : return bindfile if not verbose : return None # suggest close matches fuzzy_matches = get_close_pkgs ( name , bindnames . keys ( ) ) if fuzzy_matches : rows = [ ( x [ 0 ] , bindnames [ x [ 0 ] ] ) for x in fuzzy_matches ] print "'%s' not found. Close matches:" % name print '\n' . join ( columnise ( rows ) ) else : print "No matches." return None
Find the bind module matching the given name .
147
9
226,477
def bind_package ( name , path = None , version_range = None , no_deps = False , bind_args = None , quiet = False ) : pending = set ( [ name ] ) installed_variants = [ ] installed_package_names = set ( ) primary = True # bind package and possibly dependencies while pending : pending_ = pending pending = set ( ) exc_type = None for name_ in pending_ : # turn error on binding of dependencies into a warning - we don't # want to skip binding some dependencies because others failed try : variants_ = _bind_package ( name_ , path = path , version_range = version_range , bind_args = bind_args , quiet = quiet ) except exc_type as e : print_error ( "Could not bind '%s': %s: %s" % ( name_ , e . __class__ . __name__ , str ( e ) ) ) continue installed_variants . extend ( variants_ ) for variant in variants_ : installed_package_names . add ( variant . name ) # add dependencies if not no_deps : for variant in variants_ : for requirement in variant . requires : if not requirement . conflict : pending . add ( requirement . name ) # non-primary packages are treated a little differently primary = False version_range = None bind_args = None exc_type = RezBindError if installed_variants and not quiet : print "The following packages were installed:" print _print_package_list ( installed_variants ) return installed_variants
Bind software available on the current system as a rez package .
330
13
226,478
def create_release_hook ( name , source_path ) : from rez . plugin_managers import plugin_manager return plugin_manager . create_instance ( 'release_hook' , name , source_path = source_path )
Return a new release hook of the given type .
51
10
226,479
def pre_build ( self , user , install_path , variants = None , release_message = None , changelog = None , previous_version = None , previous_revision = None , * * kwargs ) : pass
Pre - build hook .
50
5
226,480
def traversal ( graph , node , order ) : visited = { } if ( order == 'pre' ) : pre = 1 post = 0 elif ( order == 'post' ) : pre = 0 post = 1 for each in _dfs ( graph , visited , node , pre , post ) : yield each
Graph traversal iterator .
66
5
226,481
def is_zipfile ( filename ) : try : fpin = open ( filename , "rb" ) endrec = _EndRecData ( fpin ) fpin . close ( ) if endrec : return True # file has correct magic number except IOError : pass return False
Quickly see if file is a ZIP file by checking the magic number .
58
15
226,482
def _EndRecData ( fpin ) : # Determine file size fpin . seek ( 0 , 2 ) filesize = fpin . tell ( ) # Check to see if this is ZIP file with no archive comment (the # "end of central directory" structure should be the last item in the # file if this is the case). try : fpin . seek ( - sizeEndCentDir , 2 ) except IOError : return None data = fpin . read ( ) if data [ 0 : 4 ] == stringEndArchive and data [ - 2 : ] == "\000\000" : # the signature is correct and there's no comment, unpack structure endrec = struct . unpack ( structEndArchive , data ) endrec = list ( endrec ) # Append a blank comment and record start offset endrec . append ( "" ) endrec . append ( filesize - sizeEndCentDir ) # Try to read the "Zip64 end of central directory" structure return _EndRecData64 ( fpin , - sizeEndCentDir , endrec ) # Either this is not a ZIP file, or it is a ZIP file with an archive # comment. Search the end of the file for the "end of central directory" # record signature. The comment is the last item in the ZIP file and may be # up to 64K long. It is assumed that the "end of central directory" magic # number does not appear in the comment. maxCommentStart = max ( filesize - ( 1 << 16 ) - sizeEndCentDir , 0 ) fpin . seek ( maxCommentStart , 0 ) data = fpin . read ( ) start = data . rfind ( stringEndArchive ) if start >= 0 : # found the magic number; attempt to unpack and interpret recData = data [ start : start + sizeEndCentDir ] endrec = list ( struct . unpack ( structEndArchive , recData ) ) comment = data [ start + sizeEndCentDir : ] # check that comment length is correct if endrec [ _ECD_COMMENT_SIZE ] == len ( comment ) : # Append the archive comment and start offset endrec . append ( comment ) endrec . append ( maxCommentStart + start ) # Try to read the "Zip64 end of central directory" structure return _EndRecData64 ( fpin , maxCommentStart + start - filesize , endrec ) # Unable to find a valid end of central directory structure return
Return data from the End of Central Directory record or None .
521
12
226,483
def _GenerateCRCTable ( ) : poly = 0xedb88320 table = [ 0 ] * 256 for i in range ( 256 ) : crc = i for j in range ( 8 ) : if crc & 1 : crc = ( ( crc >> 1 ) & 0x7FFFFFFF ) ^ poly else : crc = ( ( crc >> 1 ) & 0x7FFFFFFF ) table [ i ] = crc return table
Generate a CRC - 32 table .
100
8
226,484
def _crc32 ( self , ch , crc ) : return ( ( crc >> 8 ) & 0xffffff ) ^ self . crctable [ ( crc ^ ord ( ch ) ) & 0xff ]
Compute the CRC32 primitive on one byte .
48
10
226,485
def readline ( self , size = - 1 ) : if size < 0 : size = sys . maxint elif size == 0 : return '' # check for a newline already in buffer nl , nllen = self . _checkfornewline ( ) if nl >= 0 : # the next line was already in the buffer nl = min ( nl , size ) else : # no line break in buffer - try to read more size -= len ( self . linebuffer ) while nl < 0 and size > 0 : buf = self . read ( min ( size , 100 ) ) if not buf : break self . linebuffer += buf size -= len ( buf ) # check for a newline in buffer nl , nllen = self . _checkfornewline ( ) # we either ran out of bytes in the file, or # met the specified size limit without finding a newline, # so return current buffer if nl < 0 : s = self . linebuffer self . linebuffer = '' return s buf = self . linebuffer [ : nl ] self . lastdiscard = self . linebuffer [ nl : nl + nllen ] self . linebuffer = self . linebuffer [ nl + nllen : ] # line is always returned with \n as newline char (except possibly # for a final incomplete line in the file, which is handled above). return buf + "\n"
Read a line with approx . size . If size is negative read a whole line .
304
17
226,486
def _GetContents ( self ) : try : self . _RealGetContents ( ) except BadZipfile : if not self . _filePassed : self . fp . close ( ) self . fp = None raise
Read the directory making sure we close the file if the format is bad .
47
15
226,487
def namelist ( self ) : l = [ ] for data in self . filelist : l . append ( data . filename ) return l
Return a list of file names in the archive .
30
10
226,488
def printdir ( self ) : print "%-46s %19s %12s" % ( "File Name" , "Modified " , "Size" ) for zinfo in self . filelist : date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo . date_time [ : 6 ] print "%-46s %s %12d" % ( zinfo . filename , date , zinfo . file_size )
Print a table of contents for the zip file .
106
10
226,489
def getinfo ( self , name ) : info = self . NameToInfo . get ( name ) if info is None : raise KeyError ( 'There is no item named %r in the archive' % name ) return info
Return the instance of ZipInfo given name .
47
9
226,490
def extract ( self , member , path = None , pwd = None ) : if not isinstance ( member , ZipInfo ) : member = self . getinfo ( member ) if path is None : path = os . getcwd ( ) return self . _extract_member ( member , path , pwd )
Extract a member from the archive to the current working directory using its full name . Its file information is extracted as accurately as possible . member may be a filename or a ZipInfo object . You can specify a different directory using path .
67
47
226,491
def writepy ( self , pathname , basename = "" ) : dir , name = os . path . split ( pathname ) if os . path . isdir ( pathname ) : initname = os . path . join ( pathname , "__init__.py" ) if os . path . isfile ( initname ) : # This is a package directory, add it if basename : basename = "%s/%s" % ( basename , name ) else : basename = name if self . debug : print "Adding package in" , pathname , "as" , basename fname , arcname = self . _get_codename ( initname [ 0 : - 3 ] , basename ) if self . debug : print "Adding" , arcname self . write ( fname , arcname ) dirlist = os . listdir ( pathname ) dirlist . remove ( "__init__.py" ) # Add all *.py files and package subdirectories for filename in dirlist : path = os . path . join ( pathname , filename ) root , ext = os . path . splitext ( filename ) if os . path . isdir ( path ) : if os . path . isfile ( os . path . join ( path , "__init__.py" ) ) : # This is a package directory, add it self . writepy ( path , basename ) # Recursive call elif ext == ".py" : fname , arcname = self . _get_codename ( path [ 0 : - 3 ] , basename ) if self . debug : print "Adding" , arcname self . write ( fname , arcname ) else : # This is NOT a package directory, add its files at top level if self . debug : print "Adding files from directory" , pathname for filename in os . listdir ( pathname ) : path = os . path . join ( pathname , filename ) root , ext = os . path . splitext ( filename ) if ext == ".py" : fname , arcname = self . _get_codename ( path [ 0 : - 3 ] , basename ) if self . debug : print "Adding" , arcname self . write ( fname , arcname ) else : if pathname [ - 3 : ] != ".py" : raise RuntimeError , 'Files added with writepy() must end with ".py"' fname , arcname = self . _get_codename ( pathname [ 0 : - 3 ] , basename ) if self . debug : print "Adding file" , arcname self . write ( fname , arcname )
Add all files from pathname to the ZIP archive .
570
11
226,492
def get_fileobject ( self , dir = None , * * kwargs ) : if dir is None : dir = os . path . normpath ( os . path . dirname ( self . _path ) ) descriptor , name = tempfile . mkstemp ( dir = dir ) # io.open() will take either the descriptor or the name, but we need # the name later for commit()/replace_atomic() and couldn't find a way # to get the filename from the descriptor. os . close ( descriptor ) kwargs [ 'mode' ] = self . _mode kwargs [ 'file' ] = name return io . open ( * * kwargs )
Return the temporary file to use .
145
7
226,493
def commit ( self , f ) : if self . _overwrite : replace_atomic ( f . name , self . _path ) else : move_atomic ( f . name , self . _path )
Move the temporary file to the target location .
43
9
226,494
def read_pid_from_pidfile ( pidfile_path ) : pid = None try : pidfile = open ( pidfile_path , 'r' ) except IOError : pass else : # According to the FHS 2.3 section on PID files in /var/run: # # The file must consist of the process identifier in # ASCII-encoded decimal, followed by a newline character. # # Programs that read PID files should be somewhat flexible # in what they accept; i.e., they should ignore extra # whitespace, leading zeroes, absence of the trailing # newline, or additional lines in the PID file. line = pidfile . readline ( ) . strip ( ) try : pid = int ( line ) except ValueError : pass pidfile . close ( ) return pid
Read the PID recorded in the named PID file .
170
10
226,495
def apply_saved_layout ( self ) : num_widgets = self . config . get ( self . config_key + "/num_widgets" , int ) if num_widgets : sizes = [ ] for i in range ( num_widgets ) : key = "%s/size_%d" % ( self . config_key , i ) size = self . config . get ( key , int ) sizes . append ( size ) self . setSizes ( sizes ) return True return False
Call this after adding your child widgets .
108
8
226,496
def remove_nones ( * * kwargs ) : return dict ( ( k , v ) for k , v in kwargs . iteritems ( ) if v is not None )
Return diict copy with nones removed .
40
9
226,497
def deep_update ( dict1 , dict2 ) : def flatten ( v ) : if isinstance ( v , ModifyList ) : return v . apply ( [ ] ) elif isinstance ( v , dict ) : return dict ( ( k , flatten ( v_ ) ) for k , v_ in v . iteritems ( ) ) else : return v def merge ( v1 , v2 ) : if isinstance ( v1 , dict ) and isinstance ( v2 , dict ) : deep_update ( v1 , v2 ) return v1 elif isinstance ( v2 , ModifyList ) : v1 = flatten ( v1 ) return v2 . apply ( v1 ) else : return flatten ( v2 ) for k1 , v1 in dict1 . iteritems ( ) : if k1 not in dict2 : dict1 [ k1 ] = flatten ( v1 ) for k2 , v2 in dict2 . iteritems ( ) : v1 = dict1 . get ( k2 ) if v1 is KeyError : dict1 [ k2 ] = flatten ( v2 ) else : dict1 [ k2 ] = merge ( v1 , v2 )
Perform a deep merge of dict2 into dict1 .
259
12
226,498
def deep_del ( data , fn ) : result = { } for k , v in data . iteritems ( ) : if not fn ( v ) : if isinstance ( v , dict ) : result [ k ] = deep_del ( v , fn ) else : result [ k ] = v return result
Create dict copy with removed items .
65
7
226,499
def get_dict_diff_str ( d1 , d2 , title ) : added , removed , changed = get_dict_diff ( d1 , d2 ) lines = [ title ] if added : lines . append ( "Added attributes: %s" % [ '.' . join ( x ) for x in added ] ) if removed : lines . append ( "Removed attributes: %s" % [ '.' . join ( x ) for x in removed ] ) if changed : lines . append ( "Changed attributes: %s" % [ '.' . join ( x ) for x in changed ] ) return '\n' . join ( lines )
Returns same as get_dict_diff but as a readable string .
138
14