idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
38,400 | def read_hypergraph ( string ) : hgr = hypergraph ( ) dotG = pydot . graph_from_dot_data ( string ) for each_node in dotG . get_nodes ( ) : if 'hypernode' == each_node . get ( 'hyper_node_type' ) : hgr . add_node ( each_node . get_name ( ) ) elif 'hyperedge' == each_node . get ( 'hyper_node_type' ) : hgr . add_hyperedge ( each_node . get_name ( ) ) for each_link in dotG . get_edges ( ) : if hgr . has_node ( each_link . get_source ( ) ) : link_hypernode = each_link . get_source ( ) link_hyperedge = each_link . get_destination ( ) elif hgr . has_node ( each_link . get_destination ( ) ) : link_hypernode = each_link . get_destination ( ) link_hyperedge = each_link . get_source ( ) hgr . link ( link_hypernode , link_hyperedge ) return hgr | Read a hypergraph from a string in dot format . Nodes and edges specified in the input will be added to the current hypergraph . |
38,401 | def graph_from_dot_file ( path ) : fd = file ( path , 'rb' ) data = fd . read ( ) fd . close ( ) return graph_from_dot_data ( data ) | Load graph as defined by a DOT file . The file is assumed to be in DOT format . It will be loaded parsed and a Dot class will be returned representing the graph . |
38,402 | def __find_executables ( path ) : success = False progs = { 'dot' : '' , 'twopi' : '' , 'neato' : '' , 'circo' : '' , 'fdp' : '' , 'sfdp' : '' } was_quoted = False path = path . strip ( ) if path . startswith ( '"' ) and path . endswith ( '"' ) : path = path [ 1 : - 1 ] was_quoted = True if os . path . isdir ( path ) : for prg in progs . iterkeys ( ) : if progs [ prg ] : continue if os . path . exists ( os . path . join ( path , prg ) ) : if was_quoted : progs [ prg ] = '"' + os . path . join ( path , prg ) + '"' else : progs [ prg ] = os . path . join ( path , prg ) success = True elif os . path . exists ( os . path . join ( path , prg + '.exe' ) ) : if was_quoted : progs [ prg ] = '"' + os . path . join ( path , prg + '.exe' ) + '"' else : progs [ prg ] = os . path . join ( path , prg + '.exe' ) success = True if success : return progs else : return None | Used by find_graphviz path - single directory as a string If any of the executables are found it will return a dictionary containing the program names as keys and their paths as values . Otherwise returns None |
38,403 | def to_string ( self ) : src = self . parse_node_ref ( self . get_source ( ) ) dst = self . parse_node_ref ( self . get_destination ( ) ) if isinstance ( src , frozendict ) : edge = [ Subgraph ( obj_dict = src ) . to_string ( ) ] elif isinstance ( src , ( int , long ) ) : edge = [ str ( src ) ] else : edge = [ src ] if ( self . get_parent_graph ( ) and self . get_parent_graph ( ) . get_top_graph_type ( ) and self . get_parent_graph ( ) . get_top_graph_type ( ) == 'digraph' ) : edge . append ( '->' ) else : edge . append ( '--' ) if isinstance ( dst , frozendict ) : edge . append ( Subgraph ( obj_dict = dst ) . to_string ( ) ) elif isinstance ( dst , ( int , long ) ) : edge . append ( str ( dst ) ) else : edge . append ( dst ) edge_attr = list ( ) for attr , value in self . obj_dict [ 'attributes' ] . iteritems ( ) : if value is not None : edge_attr . append ( '%s=%s' % ( attr , quote_if_necessary ( value ) ) ) else : edge_attr . append ( attr ) edge_attr = ', ' . join ( edge_attr ) if edge_attr : edge . append ( ' [' + edge_attr + ']' ) return ' ' . join ( edge ) + ';' | Returns a string representation of the edge in dot language . |
38,404 | def get_node ( self , name ) : match = list ( ) if self . obj_dict [ 'nodes' ] . has_key ( name ) : match . extend ( [ Node ( obj_dict = obj_dict ) for obj_dict in self . obj_dict [ 'nodes' ] [ name ] ] ) return match | Retrieve a node from the graph . Given a node s name the corresponding Node instance will be returned . If one or more nodes exist with that name a list of Node instances is returned . An empty list is returned otherwise . |
38,405 | def add_edge ( self , graph_edge ) : if not isinstance ( graph_edge , Edge ) : raise TypeError ( 'add_edge() received a non edge class object: ' + str ( graph_edge ) ) edge_points = ( graph_edge . get_source ( ) , graph_edge . get_destination ( ) ) if self . obj_dict [ 'edges' ] . has_key ( edge_points ) : edge_list = self . obj_dict [ 'edges' ] [ edge_points ] edge_list . append ( graph_edge . obj_dict ) else : self . obj_dict [ 'edges' ] [ edge_points ] = [ graph_edge . obj_dict ] graph_edge . set_sequence ( self . get_next_sequence_number ( ) ) graph_edge . set_parent_graph ( self . get_parent_graph ( ) ) | Adds an edge object to the graph . It takes a edge object as its only argument and returns None . |
38,406 | def add_subgraph ( self , sgraph ) : if not isinstance ( sgraph , Subgraph ) and not isinstance ( sgraph , Cluster ) : raise TypeError ( 'add_subgraph() received a non subgraph class object:' + str ( sgraph ) ) if self . obj_dict [ 'subgraphs' ] . has_key ( sgraph . get_name ( ) ) : sgraph_list = self . obj_dict [ 'subgraphs' ] [ sgraph . get_name ( ) ] sgraph_list . append ( sgraph . obj_dict ) else : self . obj_dict [ 'subgraphs' ] [ sgraph . get_name ( ) ] = [ sgraph . obj_dict ] sgraph . set_sequence ( self . get_next_sequence_number ( ) ) sgraph . set_parent_graph ( self . get_parent_graph ( ) ) | Adds an subgraph object to the graph . It takes a subgraph object as its only argument and returns None . |
38,407 | def to_string ( self ) : graph = list ( ) if self . obj_dict . get ( 'strict' , None ) is not None : if self == self . get_parent_graph ( ) and self . obj_dict [ 'strict' ] : graph . append ( 'strict ' ) if self . obj_dict [ 'name' ] == '' : if 'show_keyword' in self . obj_dict and self . obj_dict [ 'show_keyword' ] : graph . append ( 'subgraph {\n' ) else : graph . append ( '{\n' ) else : graph . append ( '%s %s {\n' % ( self . obj_dict [ 'type' ] , self . obj_dict [ 'name' ] ) ) for attr in self . obj_dict [ 'attributes' ] . iterkeys ( ) : if self . obj_dict [ 'attributes' ] . get ( attr , None ) is not None : val = self . obj_dict [ 'attributes' ] . get ( attr ) if val is not None : graph . append ( '%s=%s' % ( attr , quote_if_necessary ( val ) ) ) else : graph . append ( attr ) graph . append ( ';\n' ) edges_done = set ( ) edge_obj_dicts = list ( ) for e in self . obj_dict [ 'edges' ] . itervalues ( ) : edge_obj_dicts . extend ( e ) if edge_obj_dicts : edge_src_set , edge_dst_set = zip ( * [ obj [ 'points' ] for obj in edge_obj_dicts ] ) edge_src_set , edge_dst_set = set ( edge_src_set ) , set ( edge_dst_set ) else : edge_src_set , edge_dst_set = set ( ) , set ( ) node_obj_dicts = list ( ) for e in self . obj_dict [ 'nodes' ] . itervalues ( ) : node_obj_dicts . extend ( e ) sgraph_obj_dicts = list ( ) for sg in self . obj_dict [ 'subgraphs' ] . itervalues ( ) : sgraph_obj_dicts . extend ( sg ) obj_list = [ ( obj [ 'sequence' ] , obj ) for obj in ( edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts ) ] obj_list . sort ( ) for idx , obj in obj_list : if obj [ 'type' ] == 'node' : node = Node ( obj_dict = obj ) if self . obj_dict . get ( 'suppress_disconnected' , False ) : if ( node . get_name ( ) not in edge_src_set and node . get_name ( ) not in edge_dst_set ) : continue graph . append ( node . to_string ( ) + '\n' ) elif obj [ 'type' ] == 'edge' : edge = Edge ( obj_dict = obj ) if self . obj_dict . get ( 'simplify' , False ) and edge in edges_done : continue graph . append ( edge . to_string ( ) + '\n' ) edges_done . add ( edge ) else : sgraph = Subgraph ( obj_dict = obj ) graph . append ( sgraph . to_string ( ) + '\n' ) graph . append ( '}\n' ) return '' . join ( graph ) | Returns a string representation of the graph in dot language . It will return the graph and all its subelements in string from . |
38,408 | def create ( self , prog = None , format = 'ps' ) : if prog is None : prog = self . prog if isinstance ( prog , ( list , tuple ) ) : prog , args = prog [ 0 ] , prog [ 1 : ] else : args = [ ] if self . progs is None : self . progs = find_graphviz ( ) if self . progs is None : raise InvocationException ( 'GraphViz\'s executables not found' ) if not self . progs . has_key ( prog ) : raise InvocationException ( 'GraphViz\'s executable "%s" not found' % prog ) if not os . path . exists ( self . progs [ prog ] ) or not os . path . isfile ( self . progs [ prog ] ) : raise InvocationException ( 'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self . progs [ prog ] ) tmp_fd , tmp_name = tempfile . mkstemp ( ) os . close ( tmp_fd ) self . write ( tmp_name ) tmp_dir = os . path . dirname ( tmp_name ) for img in self . shape_files : f = file ( img , 'rb' ) f_data = f . read ( ) f . close ( ) f = file ( os . path . join ( tmp_dir , os . path . basename ( img ) ) , 'wb' ) f . write ( f_data ) f . close ( ) cmdline = [ self . progs [ prog ] , '-T' + format , tmp_name ] + args p = subprocess . Popen ( cmdline , cwd = tmp_dir , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) stderr = p . stderr stdout = p . stdout stdout_output = list ( ) while True : data = stdout . read ( ) if not data : break stdout_output . append ( data ) stdout . close ( ) stdout_output = '' . join ( stdout_output ) if not stderr . closed : stderr_output = list ( ) while True : data = stderr . read ( ) if not data : break stderr_output . append ( data ) stderr . close ( ) if stderr_output : stderr_output = '' . join ( stderr_output ) status = p . wait ( ) if status != 0 : raise InvocationException ( 'Program terminated with status: %d. stderr follows: %s' % ( status , stderr_output ) ) elif stderr_output : print stderr_output for img in self . shape_files : os . unlink ( os . path . join ( tmp_dir , os . path . basename ( img ) ) ) os . unlink ( tmp_name ) return stdout_output | Creates and returns a Postscript representation of the graph . |
38,409 | def dump_yaml ( data , Dumper = _Dumper , default_flow_style = False ) : content = yaml . dump ( data , default_flow_style = default_flow_style , Dumper = Dumper ) return content . strip ( ) | Returns data as yaml - formatted string . |
38,410 | def load_yaml ( filepath ) : with open ( filepath ) as f : txt = f . read ( ) return yaml . load ( txt ) | Convenience function for loading yaml - encoded data from disk . |
38,411 | def memcached_client ( servers = config . memcached_uri , debug = config . debug_memcache ) : key = None try : client , key = scoped_instance_manager . acquire ( servers , debug = debug ) yield client finally : if key : scoped_instance_manager . release ( key ) | Get a shared memcached instance . |
38,412 | def pool_memcached_connections ( func ) : if isgeneratorfunction ( func ) : def wrapper ( * nargs , ** kwargs ) : with memcached_client ( ) : for result in func ( * nargs , ** kwargs ) : yield result else : def wrapper ( * nargs , ** kwargs ) : with memcached_client ( ) : return func ( * nargs , ** kwargs ) return update_wrapper ( wrapper , func ) | Function decorator to pool memcached connections . |
38,413 | def memcached ( servers , key = None , from_cache = None , to_cache = None , time = 0 , min_compress_len = 0 , debug = False ) : def default_key ( func , * nargs , ** kwargs ) : parts = [ func . __module__ ] argnames = getargspec ( func ) . args if argnames : if argnames [ 0 ] == "cls" : cls_ = nargs [ 0 ] parts . append ( cls_ . __name__ ) nargs = nargs [ 1 : ] elif argnames [ 0 ] == "self" : cls_ = nargs [ 0 ] . __class__ parts . append ( cls_ . __name__ ) nargs = nargs [ 1 : ] parts . append ( func . __name__ ) value = ( '.' . join ( parts ) , nargs , tuple ( sorted ( kwargs . items ( ) ) ) ) _ = hash ( value ) return repr ( value ) def identity ( value , * nargs , ** kwargs ) : return value from_cache = from_cache or identity to_cache = to_cache or identity def decorator ( func ) : if servers : def wrapper ( * nargs , ** kwargs ) : with memcached_client ( servers , debug = debug ) as client : if key : cache_key = key ( * nargs , ** kwargs ) else : cache_key = default_key ( func , * nargs , ** kwargs ) result = client . get ( cache_key ) if result is not client . miss : return from_cache ( result , * nargs , ** kwargs ) result = func ( * nargs , ** kwargs ) if isinstance ( result , DoNotCache ) : return result . result cache_result = to_cache ( result , * nargs , ** kwargs ) client . set ( key = cache_key , val = cache_result , time = time , min_compress_len = min_compress_len ) return result else : def wrapper ( * nargs , ** kwargs ) : result = func ( * nargs , ** kwargs ) if isinstance ( result , DoNotCache ) : return result . result return result def forget ( ) : with memcached_client ( servers , debug = debug ) as client : client . flush ( ) wrapper . forget = forget wrapper . __wrapped__ = func return update_wrapper ( wrapper , func ) return decorator | memcached memoization function decorator . |
38,414 | def client ( self ) : if self . _client is None : self . _client = Client_ ( self . servers ) return self . _client | Get the native memcache client . |
38,415 | def flush ( self , hard = False ) : if not self . servers : return if hard : self . client . flush_all ( ) self . reset_stats ( ) else : from uuid import uuid4 tag = uuid4 ( ) . hex if self . debug : tag = "flushed" + tag self . current = tag | Drop existing entries from the cache . |
38,416 | def depth_first_search ( graph , root = None , filter = null ( ) ) : recursionlimit = getrecursionlimit ( ) setrecursionlimit ( max ( len ( graph . nodes ( ) ) * 2 , recursionlimit ) ) def dfs ( node ) : visited [ node ] = 1 pre . append ( node ) for each in graph [ node ] : if ( each not in visited and filter ( each , node ) ) : spanning_tree [ each ] = node dfs ( each ) post . append ( node ) visited = { } spanning_tree = { } pre = [ ] post = [ ] filter . configure ( graph , spanning_tree ) if ( root is not None ) : if filter ( root , None ) : spanning_tree [ root ] = None dfs ( root ) setrecursionlimit ( recursionlimit ) return spanning_tree , pre , post for each in graph : if ( each not in visited and filter ( each , None ) ) : spanning_tree [ each ] = None dfs ( each ) setrecursionlimit ( recursionlimit ) return ( spanning_tree , pre , post ) | Depth - first search . |
38,417 | def breadth_first_search ( graph , root = None , filter = null ( ) ) : def bfs ( ) : while ( queue != [ ] ) : node = queue . pop ( 0 ) for other in graph [ node ] : if ( other not in spanning_tree and filter ( other , node ) ) : queue . append ( other ) ordering . append ( other ) spanning_tree [ other ] = node queue = [ ] spanning_tree = { } ordering = [ ] filter . configure ( graph , spanning_tree ) if ( root is not None ) : if filter ( root , None ) : queue . append ( root ) ordering . append ( root ) spanning_tree [ root ] = None bfs ( ) return spanning_tree , ordering for each in graph : if ( each not in spanning_tree ) : if filter ( each , None ) : queue . append ( each ) ordering . append ( each ) spanning_tree [ each ] = None bfs ( ) return spanning_tree , ordering | Breadth - first search . |
38,418 | def normalize_variables ( cls , variables ) : if variables . get ( 'version' , True ) in ( '' , False , '_NO_VERSION' , None ) : del variables [ 'version' ] return super ( PackageResource , cls ) . normalize_variables ( variables ) | Make sure version is treated consistently |
38,419 | def push_source ( self , newstream , newfile = None ) : "Push an input source onto the lexer's input source stack." if isinstance ( newstream , basestring ) : newstream = StringIO ( newstream ) self . filestack . appendleft ( ( self . infile , self . instream , self . lineno ) ) self . infile = newfile self . instream = newstream self . lineno = 1 | Push an input source onto the lexer s input source stack . |
38,420 | def error_leader ( self , infile = None , lineno = None ) : "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None : infile = self . infile if lineno is None : lineno = self . lineno return "\"%s\", line %d: " % ( infile , lineno ) | Emit a C - compiler - like Emacs - friendly error - message leader . |
38,421 | def rez_bin_path ( self ) : binpath = None if sys . argv and sys . argv [ 0 ] : executable = sys . argv [ 0 ] path = which ( "rezolve" , env = { "PATH" : os . path . dirname ( executable ) , "PATHEXT" : os . environ . get ( "PATHEXT" , "" ) } ) binpath = os . path . dirname ( path ) if path else None if not binpath : path = which ( "rezolve" ) if path : binpath = os . path . dirname ( path ) if binpath : validation_file = os . path . join ( binpath , ".rez_production_install" ) if os . path . exists ( validation_file ) : return os . path . realpath ( binpath ) return None | Get path containing rez binaries or None if no binaries are available or Rez is not a production install . |
38,422 | def get_summary_string ( self ) : from rez . plugin_managers import plugin_manager txt = "Rez %s" % __version__ txt += "\n\n%s" % plugin_manager . get_summary_string ( ) return txt | Get a string summarising the state of Rez as a whole . |
38,423 | def clear_caches ( self , hard = False ) : from rez . package_repository import package_repository_manager from rez . utils . memcached import memcached_client package_repository_manager . clear_caches ( ) if hard : with memcached_client ( ) as client : client . flush ( ) | Clear all caches in Rez . |
38,424 | def solve ( self ) : with log_duration ( self . _print , "memcache get (resolve) took %s" ) : solver_dict = self . _get_cached_solve ( ) if solver_dict : self . from_cache = True self . _set_result ( solver_dict ) else : self . from_cache = False solver = self . _solve ( ) solver_dict = self . _solver_to_dict ( solver ) self . _set_result ( solver_dict ) with log_duration ( self . _print , "memcache set (resolve) took %s" ) : self . _set_cached_solve ( solver_dict ) | Perform the solve . |
38,425 | def _set_cached_solve ( self , solver_dict ) : if self . status_ != ResolverStatus . solved : return if not ( self . caching and self . memcached_servers ) : return releases_since_solve = False release_times_dict = { } variant_states_dict = { } for variant in self . resolved_packages_ : time_ = get_last_release_time ( variant . name , self . package_paths ) if time_ == 0 : self . _print ( "Did not send memcache key: a repository could " "not provide a most recent release time for %r" , variant . name ) return if self . timestamp and self . timestamp < time_ : releases_since_solve = True release_times_dict [ variant . name ] = time_ repo = variant . resource . _repository variant_states_dict [ variant . name ] = repo . get_variant_state_handle ( variant . resource ) timestamped = ( self . timestamp and releases_since_solve ) key = self . _memcache_key ( timestamped = timestamped ) data = ( solver_dict , release_times_dict , variant_states_dict ) with self . _memcached_client ( ) as client : client . set ( key , data ) self . _print ( "Sent memcache key: %r" , key ) | Store a solve to memcached . |
38,426 | def _memcache_key ( self , timestamped = False ) : request = tuple ( map ( str , self . package_requests ) ) repo_ids = [ ] for path in self . package_paths : repo = package_repository_manager . get_repository ( path ) repo_ids . append ( repo . uid ) t = [ "resolve" , request , tuple ( repo_ids ) , self . package_filter_hash , self . package_orderers_hash , self . building , config . prune_failed_graph ] if timestamped and self . timestamp : t . append ( self . timestamp ) return str ( tuple ( t ) ) | Makes a key suitable as a memcache entry . |
38,427 | def create_shell ( shell = None , ** kwargs ) : if not shell : shell = config . default_shell if not shell : from rez . system import system shell = system . shell from rez . plugin_managers import plugin_manager return plugin_manager . create_instance ( 'shell' , shell , ** kwargs ) | Returns a Shell of the given type or the current shell type if shell is None . |
38,428 | def startup_capabilities ( cls , rcfile = False , norc = False , stdin = False , command = False ) : raise NotImplementedError | Given a set of options related to shell startup return the actual options that will be applied . |
38,429 | def upload_file ( self , metadata , filename , signer = None , sign_password = None , filetype = 'sdist' , pyversion = 'source' , keystore = None ) : self . check_credentials ( ) if not os . path . exists ( filename ) : raise DistlibException ( 'not found: %s' % filename ) metadata . validate ( ) d = metadata . todict ( ) sig_file = None if signer : if not self . gpg : logger . warning ( 'no signing program available - not signed' ) else : sig_file = self . sign_file ( filename , signer , sign_password , keystore ) with open ( filename , 'rb' ) as f : file_data = f . read ( ) md5_digest = hashlib . md5 ( file_data ) . hexdigest ( ) sha256_digest = hashlib . sha256 ( file_data ) . hexdigest ( ) d . update ( { ':action' : 'file_upload' , 'protcol_version' : '1' , 'filetype' : filetype , 'pyversion' : pyversion , 'md5_digest' : md5_digest , 'sha256_digest' : sha256_digest , } ) files = [ ( 'content' , os . path . basename ( filename ) , file_data ) ] if sig_file : with open ( sig_file , 'rb' ) as f : sig_data = f . read ( ) files . append ( ( 'gpg_signature' , os . path . basename ( sig_file ) , sig_data ) ) shutil . rmtree ( os . path . dirname ( sig_file ) ) request = self . encode_request ( d . items ( ) , files ) return self . send_request ( request ) | Upload a release file to the index . |
38,430 | def _get_dependency_order ( g , node_list ) : access_ = accessibility ( g ) deps = dict ( ( k , set ( v ) - set ( [ k ] ) ) for k , v in access_ . iteritems ( ) ) nodes = node_list + list ( set ( g . nodes ( ) ) - set ( node_list ) ) ordered_nodes = [ ] while nodes : n_ = nodes [ 0 ] n_deps = deps . get ( n_ ) if ( n_ in ordered_nodes ) or ( n_deps is None ) : nodes = nodes [ 1 : ] continue moved = False for i , n in enumerate ( nodes [ 1 : ] ) : if n in n_deps : nodes = [ nodes [ i + 1 ] ] + nodes [ : i + 1 ] + nodes [ i + 2 : ] moved = True break if not moved : ordered_nodes . append ( n_ ) nodes = nodes [ 1 : ] return ordered_nodes | Return list of nodes as close as possible to the ordering in node_list but with child nodes earlier in the list than parents . |
38,431 | def _short_req_str ( package_request ) : if not package_request . conflict : versions = package_request . range . to_versions ( ) if versions and len ( versions ) == len ( package_request . range ) and len ( versions ) > 1 : return "%s-%s(%d)" % ( package_request . name , str ( package_request . range . span ( ) ) , len ( versions ) ) return str ( package_request ) | print shortened version of == X| == Y| == Z ranged requests . |
38,432 | def requires_list ( self ) : requires = self . variant . get_requires ( build_requires = self . building ) reqlist = RequirementList ( requires ) if reqlist . conflict : raise ResolveError ( "The package %s has an internal requirements conflict: %s" % ( str ( self ) , str ( reqlist ) ) ) return reqlist | It is important that this property is calculated lazily . Getting the requires attribute may trigger a package load which may be avoided if this variant is reduced away before that happens . |
38,433 | def sort ( self ) : if self . sorted : return def key ( variant ) : requested_key = [ ] names = set ( ) for i , request in enumerate ( self . solver . request_list ) : if not request . conflict : req = variant . requires_list . get ( request . name ) if req is not None : requested_key . append ( ( - i , req . range ) ) names . add ( req . name ) additional_key = [ ] for request in variant . requires_list : if not request . conflict and request . name not in names : additional_key . append ( ( request . range , request . name ) ) if ( VariantSelectMode [ config . variant_select_mode ] == VariantSelectMode . version_priority ) : k = ( requested_key , - len ( additional_key ) , additional_key , variant . index ) else : k = ( len ( requested_key ) , requested_key , - len ( additional_key ) , additional_key , variant . index ) return k self . variants . sort ( key = key , reverse = True ) self . sorted = True | Sort variants from most correct to consume to least . |
38,434 | def get_intersection ( self , range_ ) : result = [ ] for entry in self . entries : package , value = entry if value is None : continue if package . version not in range_ : continue if isinstance ( value , list ) : variants = value entry_ = _PackageEntry ( package , variants , self . solver ) result . append ( entry_ ) continue if self . solver . package_filter : rule = self . solver . package_filter . excludes ( package ) if rule : if config . debug_package_exclusions : print_debug ( "Package '%s' was excluded by rule '%s'" % ( package . qualified_name , str ( rule ) ) ) entry [ 1 ] = None continue if self . solver . package_load_callback : self . solver . package_load_callback ( package ) variants_ = [ ] for var in package . iter_variants ( ) : variant = PackageVariant ( var , self . solver . building ) variants_ . append ( variant ) entry [ 1 ] = variants_ entry_ = _PackageEntry ( package , variants_ , self . solver ) result . append ( entry_ ) return result or None | Get a list of variants that intersect with the given range . |
38,435 | def intersect ( self , range_ ) : self . solver . intersection_broad_tests_count += 1 if range_ . is_any ( ) : return self if self . solver . optimised : if range_ in self . been_intersected_with : return self if self . pr : self . pr . passive ( "intersecting %s wrt range '%s'..." , self , range_ ) self . solver . intersection_tests_count += 1 with self . solver . timed ( self . solver . intersection_time ) : entries = [ x for x in self . entries if x . version in range_ ] if not entries : return None elif len ( entries ) < len ( self . entries ) : copy_ = self . _copy ( entries ) copy_ . been_intersected_with . add ( range_ ) return copy_ else : self . been_intersected_with . add ( range_ ) return self | Remove variants whose version fall outside of the given range . |
38,436 | def reduce_by ( self , package_request ) : if self . pr : reqstr = _short_req_str ( package_request ) self . pr . passive ( "reducing %s wrt %s..." , self , reqstr ) if self . solver . optimised : if package_request in self . been_reduced_by : return ( self , [ ] ) if ( package_request . range is None ) or ( package_request . name not in self . fam_requires ) : return ( self , [ ] ) with self . solver . timed ( self . solver . reduction_time ) : return self . _reduce_by ( package_request ) | Remove variants whos dependencies conflict with the given package request . |
38,437 | def split ( self ) : self . sort_versions ( ) def _split ( i_entry , n_variants , common_fams = None ) : result = self . entries [ i_entry ] . split ( n_variants ) if result : entry , next_entry = result entries = self . entries [ : i_entry ] + [ entry ] next_entries = [ next_entry ] + self . entries [ i_entry + 1 : ] else : entries = self . entries [ : i_entry + 1 ] next_entries = self . entries [ i_entry + 1 : ] slice_ = self . _copy ( entries ) next_slice = self . _copy ( next_entries ) if self . pr : if common_fams : if len ( common_fams ) == 1 : reason_str = iter ( common_fams ) . next ( ) else : reason_str = ", " . join ( common_fams ) else : reason_str = "first variant" self . pr ( "split (reason: %s) %s into %s and %s" , reason_str , self , slice_ , next_slice ) return slice_ , next_slice if len ( self ) > 2 : fams = self . first_variant . request_fams - self . extracted_fams else : fams = None if not fams : return _split ( 0 , 1 ) prev = None for i , entry in enumerate ( self . entries ) : entry . sort ( ) for j , variant in enumerate ( entry . variants ) : fams = fams & variant . request_fams if not fams : return _split ( * prev ) prev = ( i , j + 1 , fams ) raise RezSystemError ( "Unexpected solver error: common family(s) still in slice being " "split: slice: %s, family(s): %s" % ( self , str ( fams ) ) ) | Split the slice . |
38,438 | def sort_versions ( self ) : if self . sorted : return for orderer in ( self . solver . package_orderers or [ ] ) : entries = orderer . reorder ( self . entries , key = lambda x : x . package ) if entries is not None : self . entries = entries self . sorted = True if self . pr : self . pr ( "sorted: %s packages: %s" , self . package_name , repr ( orderer ) ) return self . entries = sorted ( self . entries , key = lambda x : x . version , reverse = True ) self . sorted = True if self . pr : self . pr ( "sorted: %s packages: version descending" , self . package_name ) | Sort entries by version . |
38,439 | def get_variant_slice ( self , package_name , range_ ) : variant_list = self . variant_lists . get ( package_name ) if variant_list is None : variant_list = _PackageVariantList ( package_name , self . solver ) self . variant_lists [ package_name ] = variant_list entries = variant_list . get_intersection ( range_ ) if not entries : return None slice_ = _PackageVariantSlice ( package_name , entries = entries , solver = self . solver ) return slice_ | Get a list of variants from the cache . |
38,440 | def intersect ( self , range_ ) : new_slice = None if self . package_request . conflict : if self . package_request . range is None : new_slice = self . solver . _get_variant_slice ( self . package_name , range_ ) else : new_range = range_ - self . package_request . range if new_range is not None : new_slice = self . solver . _get_variant_slice ( self . package_name , new_range ) else : new_slice = self . variant_slice . intersect ( range_ ) if new_slice is None : if self . pr : self . pr ( "%s intersected with range '%s' resulted in no packages" , self , range_ ) return None if new_slice is not self . variant_slice : scope = self . _copy ( new_slice ) if self . pr : self . pr ( "%s was intersected to %s by range '%s'" , self , scope , range_ ) return scope return self | Intersect this scope with a package range . |
38,441 | def reduce_by ( self , package_request ) : self . solver . reduction_broad_tests_count += 1 if self . package_request . conflict : return ( self , [ ] ) new_slice , reductions = self . variant_slice . reduce_by ( package_request ) if new_slice is None : self . solver . reductions_count += 1 if self . pr : reqstr = _short_req_str ( package_request ) self . pr ( "%s was reduced to nothing by %s" , self , reqstr ) self . pr . br ( ) return ( None , reductions ) if new_slice is not self . variant_slice : self . solver . reductions_count += 1 scope = self . _copy ( new_slice ) if self . pr : reqstr = _short_req_str ( package_request ) self . pr ( "%s was reduced to %s by %s" , self , scope , reqstr ) self . pr . br ( ) return ( scope , reductions ) return ( self , [ ] ) | Reduce this scope wrt a package request . |
38,442 | def split ( self ) : if self . package_request . conflict or ( len ( self . variant_slice ) == 1 ) : return None else : r = self . variant_slice . split ( ) if r is None : return None else : slice , next_slice = r scope = self . _copy ( slice ) next_scope = self . _copy ( next_slice ) return ( scope , next_scope ) | Split the scope . |
38,443 | def finalise ( self ) : assert ( self . _is_solved ( ) ) g = self . _get_minimal_graph ( ) scopes = dict ( ( x . package_name , x ) for x in self . scopes if not x . package_request . conflict ) fam_cycle = find_cycle ( g ) if fam_cycle : cycle = [ ] for fam in fam_cycle : scope = scopes [ fam ] variant = scope . _get_solved_variant ( ) stmt = VersionedObject . construct ( fam , variant . version ) cycle . append ( stmt ) phase = copy . copy ( self ) phase . scopes = scopes . values ( ) phase . failure_reason = Cycle ( cycle ) phase . status = SolverStatus . cyclic return phase fams = [ x . name for x in self . solver . request_list ] ordered_fams = _get_dependency_order ( g , fams ) scopes_ = [ ] for fam in ordered_fams : scope = scopes [ fam ] if not scope . package_request . conflict : scopes_ . append ( scope ) phase = copy . copy ( self ) phase . scopes = scopes_ return phase | Remove conflict requests detect cyclic dependencies and reorder packages wrt dependency and then request order . |
38,444 | def split ( self ) : assert ( self . status == SolverStatus . exhausted ) scopes = [ ] next_scopes = [ ] split_i = None for i , scope in enumerate ( self . scopes ) : if split_i is None : r = scope . split ( ) if r is not None : scope_ , next_scope = r scopes . append ( scope_ ) next_scopes . append ( next_scope ) split_i = i continue scopes . append ( scope ) next_scopes . append ( scope ) assert split_i is not None phase = copy . copy ( self ) phase . scopes = scopes phase . status = SolverStatus . pending phase . changed_scopes_i = set ( [ split_i ] ) next_phase = copy . copy ( phase ) next_phase . scopes = next_scopes return ( phase , next_phase ) | Split the phase . |
38,445 | def status ( self ) : if self . request_list . conflict : return SolverStatus . failed if self . callback_return == SolverCallbackReturn . fail : return SolverStatus . failed st = self . phase_stack [ - 1 ] . status if st == SolverStatus . cyclic : return SolverStatus . failed elif len ( self . phase_stack ) > 1 : if st == SolverStatus . solved : return SolverStatus . solved else : return SolverStatus . unsolved elif st in ( SolverStatus . pending , SolverStatus . exhausted ) : return SolverStatus . unsolved else : return st | Return the current status of the solve . |
38,446 | def num_fails ( self ) : n = len ( self . failed_phase_list ) if self . phase_stack [ - 1 ] . status in ( SolverStatus . failed , SolverStatus . cyclic ) : n += 1 return n | Return the number of failed solve steps that have been executed . Note that num_solves is inclusive of failures . |
38,447 | def resolved_packages ( self ) : if ( self . status != SolverStatus . solved ) : return None final_phase = self . phase_stack [ - 1 ] return final_phase . _get_solved_variants ( ) | Return a list of PackageVariant objects or None if the resolve did not complete or was unsuccessful . |
38,448 | def reset ( self ) : if not self . request_list . conflict : phase = _ResolvePhase ( self . request_list . requirements , solver = self ) self . pr ( "resetting..." ) self . _init ( ) self . _push_phase ( phase ) | Reset the solver removing any current solve . |
38,449 | def solve ( self ) : if self . solve_begun : raise ResolveError ( "cannot run solve() on a solve that has " "already been started" ) t1 = time . time ( ) pt1 = package_repo_stats . package_load_time while self . status == SolverStatus . unsolved : self . solve_step ( ) if self . status == SolverStatus . unsolved and not self . _do_callback ( ) : break self . load_time = package_repo_stats . package_load_time - pt1 self . solve_time = time . time ( ) - t1 if self . pr . verbosity > 2 : from pprint import pformat self . pr . subheader ( "SOLVE STATS:" ) self . pr ( pformat ( self . solve_stats ) ) elif self . print_stats : from pprint import pformat data = { "solve_stats" : self . solve_stats } print >> ( self . buf or sys . stdout ) , pformat ( data ) | Attempt to solve the request . |
38,450 | def solve_step ( self ) : self . solve_begun = True if self . status != SolverStatus . unsolved : return if self . pr : self . pr . header ( "SOLVE #%d (%d fails so far)..." , self . solve_count + 1 , self . num_fails ) phase = self . _pop_phase ( ) if phase . status == SolverStatus . failed : self . pr ( "discarded failed phase, fetching previous unsolved phase..." ) self . failed_phase_list . append ( phase ) phase = self . _pop_phase ( ) if phase . status == SolverStatus . exhausted : self . pr . subheader ( "SPLITTING:" ) phase , next_phase = phase . split ( ) self . _push_phase ( next_phase ) if self . pr : self . pr ( "new phase: %s" , phase ) new_phase = phase . solve ( ) self . solve_count += 1 if new_phase . status == SolverStatus . failed : self . pr . subheader ( "FAILED:" ) self . _push_phase ( new_phase ) if self . pr and len ( self . phase_stack ) == 1 : self . pr . header ( "FAIL: there is no solution" ) elif new_phase . status == SolverStatus . solved : self . pr . subheader ( "SOLVED:" ) final_phase = new_phase . finalise ( ) self . _push_phase ( final_phase ) if self . pr : if final_phase . status == SolverStatus . cyclic : self . pr . header ( "FAIL: a cycle was detected" ) else : self . pr . header ( "SUCCESS" ) else : self . pr . subheader ( "EXHAUSTED:" ) assert ( new_phase . status == SolverStatus . exhausted ) self . _push_phase ( new_phase ) | Perform a single solve step . |
38,451 | def failure_reason ( self , failure_index = None ) : phase , _ = self . _get_failed_phase ( failure_index ) return phase . failure_reason | Get the reason for a failure . |
38,452 | def failure_packages ( self , failure_index = None ) : phase , _ = self . _get_failed_phase ( failure_index ) fr = phase . failure_reason return fr . involved_requirements ( ) if fr else None | Get packages involved in a failure . |
38,453 | def get_graph ( self ) : st = self . status if st in ( SolverStatus . solved , SolverStatus . unsolved ) : phase = self . _latest_nonfailed_phase ( ) return phase . get_graph ( ) else : return self . get_fail_graph ( ) | Returns the most recent solve graph . |
38,454 | def get_fail_graph ( self , failure_index = None ) : phase , _ = self . _get_failed_phase ( failure_index ) return phase . get_graph ( ) | Returns a graph showing a solve failure . |
38,455 | def dump ( self ) : from rez . utils . formatting import columnise rows = [ ] for i , phase in enumerate ( self . phase_stack ) : rows . append ( ( self . _depth_label ( i ) , phase . status , str ( phase ) ) ) print "status: %s (%s)" % ( self . status . name , self . status . description ) print "initial request: %s" % str ( self . request_list ) print print "solve stack:" print '\n' . join ( columnise ( rows ) ) if self . failed_phase_list : rows = [ ] for i , phase in enumerate ( self . failed_phase_list ) : rows . append ( ( "#%d" % i , phase . status , str ( phase ) ) ) print print "previous failures:" print '\n' . join ( columnise ( rows ) ) | Print a formatted summary of the current solve state . |
38,456 | def make_path_writable ( path ) : from rez . config import config try : orig_mode = os . stat ( path ) . st_mode new_mode = orig_mode if config . make_package_temporarily_writable and not os . access ( path , os . W_OK ) : new_mode = orig_mode | stat . S_IWUSR if new_mode != orig_mode : os . chmod ( path , new_mode ) except OSError : orig_mode = None new_mode = None try : yield finally : if new_mode != orig_mode : os . chmod ( path , orig_mode ) | Temporarily make path writable if possible . |
38,457 | def get_existing_path ( path , topmost_path = None ) : prev_path = None if topmost_path : topmost_path = os . path . normpath ( topmost_path ) while True : if os . path . exists ( path ) : return path path = os . path . dirname ( path ) if path == prev_path : return None if topmost_path and os . path . normpath ( path ) == topmost_path : return None prev_path = path | Get the longest parent path in path that exists . |
38,458 | def safe_makedirs ( path ) : if not os . path . exists ( path ) : try : os . makedirs ( path ) except OSError : if not os . path . exists ( path ) : raise | Safe makedirs . |
38,459 | def safe_remove ( path ) : if not os . path . exists ( path ) : return try : if os . path . isdir ( path ) and not os . path . islink ( path ) : shutil . rmtree ( path ) else : os . remove ( path ) except OSError : if os . path . exists ( path ) : raise | Safely remove the given file or directory . |
38,460 | def replacing_symlink ( source , link_name ) : with make_tmp_name ( link_name ) as tmp_link_name : os . symlink ( source , tmp_link_name ) replace_file_or_dir ( link_name , tmp_link_name ) | Create symlink that overwrites any existing target . |
38,461 | def replacing_copy ( src , dest , follow_symlinks = False ) : with make_tmp_name ( dest ) as tmp_dest : if os . path . islink ( src ) and not follow_symlinks : src_ = os . readlink ( src ) os . symlink ( src_ , tmp_dest ) elif os . path . isdir ( src ) : shutil . copytree ( src , tmp_dest , symlinks = ( not follow_symlinks ) ) else : shutil . copy2 ( src , tmp_dest ) replace_file_or_dir ( dest , tmp_dest ) | Perform copy that overwrites any existing target . |
38,462 | def replace_file_or_dir ( dest , source ) : from rez . vendor . atomicwrites import replace_atomic if not os . path . exists ( dest ) : try : os . rename ( source , dest ) return except : if not os . path . exists ( dest ) : raise try : replace_atomic ( source , dest ) return except : pass with make_tmp_name ( dest ) as tmp_dest : os . rename ( dest , tmp_dest ) os . rename ( source , dest ) | Replace dest with source . |
38,463 | def make_tmp_name ( name ) : path , base = os . path . split ( name ) tmp_base = ".tmp-%s-%s" % ( base , uuid4 ( ) . hex ) tmp_name = os . path . join ( path , tmp_base ) try : yield tmp_name finally : safe_remove ( tmp_name ) | Generates a tmp name for a file or dir . |
38,464 | def is_subdirectory ( path_a , path_b ) : path_a = os . path . realpath ( path_a ) path_b = os . path . realpath ( path_b ) relative = os . path . relpath ( path_a , path_b ) return ( not relative . startswith ( os . pardir + os . sep ) ) | Returns True if path_a is a subdirectory of path_b . |
38,465 | def find_matching_symlink ( path , source ) : def to_abs ( target ) : if os . path . isabs ( target ) : return target else : return os . path . normpath ( os . path . join ( path , target ) ) abs_source = to_abs ( source ) for name in os . listdir ( path ) : linkpath = os . path . join ( path , name ) if os . path . islink : source_ = os . readlink ( linkpath ) if to_abs ( source_ ) == abs_source : return name return None | Find a symlink under path that points at source . |
38,466 | def copy_or_replace ( src , dst ) : try : shutil . copy ( src , dst ) except ( OSError , IOError ) , e : import errno if e . errno == errno . EPERM : import tempfile if os . path . isdir ( dst ) : dst = os . path . join ( dst , os . path . basename ( src ) ) dst_dir , dst_name = os . path . split ( dst ) dst_temp = tempfile . mktemp ( prefix = dst_name + '.' , dir = dst_dir ) shutil . copy ( src , dst_temp ) if not os . path . isfile ( dst_temp ) : raise RuntimeError ( "shutil.copy completed successfully, but path" " '%s' still did not exist" % dst_temp ) os . remove ( dst ) shutil . move ( dst_temp , dst ) | try to copy with mode and if it fails try replacing |
38,467 | def movetree ( src , dst ) : try : shutil . move ( src , dst ) except : copytree ( src , dst , symlinks = True , hardlinks = True ) shutil . rmtree ( src ) | Attempts a move and falls back to a copy + delete if this fails |
38,468 | def safe_chmod ( path , mode ) : if stat . S_IMODE ( os . stat ( path ) . st_mode ) != mode : os . chmod ( path , mode ) | Set the permissions mode on path but only if it differs from the current mode . |
38,469 | def encode_filesystem_name ( input_str ) : if isinstance ( input_str , str ) : input_str = unicode ( input_str ) elif not isinstance ( input_str , unicode ) : raise TypeError ( "input_str must be a basestring" ) as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-' uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ' result = [ ] for char in input_str : if char in as_is : result . append ( char ) elif char == u'_' : result . append ( '__' ) elif char in uppercase : result . append ( '_%s' % char . lower ( ) ) else : utf8 = char . encode ( 'utf8' ) N = len ( utf8 ) if N == 1 : N = 0 HH = '' . join ( '%x' % ord ( c ) for c in utf8 ) result . append ( '_%d%s' % ( N , HH ) ) return '' . join ( result ) | Encodes an arbitrary unicode string to a generic filesystem - compatible non - unicode filename . |
38,470 | def decode_filesystem_name ( filename ) : result = [ ] remain = filename i = 0 while remain : match = _FILESYSTEM_TOKEN_RE . match ( remain ) if not match : raise ValueError ( "incorrectly encoded filesystem name %r" " (bad index: %d - %r)" % ( filename , i , remain [ : 2 ] ) ) match_str = match . group ( 0 ) match_len = len ( match_str ) i += match_len remain = remain [ match_len : ] match_dict = match . groupdict ( ) if match_dict [ 'as_is' ] : result . append ( unicode ( match_str ) ) elif match_dict [ 'underscore' ] : result . append ( u'_' ) elif match_dict [ 'uppercase' ] : result . append ( unicode ( match_dict [ 'uppercase' ] . upper ( ) ) ) elif match_dict [ 'N' ] : N = int ( match_dict [ 'N' ] ) if N == 0 : N = 1 bytes_len = 2 * N i += bytes_len bytes = remain [ : bytes_len ] remain = remain [ bytes_len : ] if not _HEX_RE . match ( bytes ) : raise ValueError ( "Bad utf8 encoding in name %r" " (bad index: %d - %r)" % ( filename , i , bytes ) ) bytes_repr = '' . join ( '\\x%s' % bytes [ i : i + 2 ] for i in xrange ( 0 , bytes_len , 2 ) ) bytes_repr = "'%s'" % bytes_repr result . append ( eval ( bytes_repr ) . decode ( 'utf8' ) ) else : raise ValueError ( "Unrecognized match type in filesystem name %r" " (bad index: %d - %r)" % ( filename , i , remain [ : 2 ] ) ) return u'' . join ( result ) | Decodes a filename encoded using the rules given in encode_filesystem_name to a unicode string . |
38,471 | def walk_up_dirs ( path ) : prev_path = None current_path = os . path . abspath ( path ) while current_path != prev_path : yield current_path prev_path = current_path current_path = os . path . dirname ( prev_path ) | Yields absolute directories starting with the given path and iterating up through all it s parents until it reaches a root directory |
38,472 | def run ( self , * args ) : if self . prefix_char is None : prefix_char = config . suite_alias_prefix_char else : prefix_char = self . prefix_char if prefix_char == '' : return self . _run_no_args ( args ) else : return self . _run ( prefix_char , args ) | Invoke the wrapped script . |
38,473 | def print_about ( self ) : filepath = os . path . join ( self . suite_path , "bin" , self . tool_name ) print "Tool: %s" % self . tool_name print "Path: %s" % filepath print "Suite: %s" % self . suite_path msg = "%s (%r)" % ( self . context . load_path , self . context_name ) print "Context: %s" % msg variants = self . context . get_tool_variants ( self . tool_name ) if variants : if len ( variants ) > 1 : self . _print_conflicting ( variants ) else : variant = iter ( variants ) . next ( ) print "Package: %s" % variant . qualified_package_name return 0 | Print an info message about the tool . |
38,474 | def print_package_versions ( self ) : variants = self . context . get_tool_variants ( self . tool_name ) if variants : if len ( variants ) > 1 : self . _print_conflicting ( variants ) return 1 else : from rez . packages_ import iter_packages variant = iter ( variants ) . next ( ) it = iter_packages ( name = variant . name ) rows = [ ] colors = [ ] for pkg in sorted ( it , key = lambda x : x . version , reverse = True ) : if pkg . version == variant . version : name = "* %s" % pkg . qualified_name col = heading else : name = " %s" % pkg . qualified_name col = local if pkg . is_local else None label = "(local)" if pkg . is_local else "" rows . append ( ( name , pkg . path , label ) ) colors . append ( col ) _pr = Printer ( ) for col , line in zip ( colors , columnise ( rows ) ) : _pr ( line , col ) return 0 | Print a list of versions of the package this tool comes from and indicate which version this tool is from . |
38,475 | def generate_hypergraph ( num_nodes , num_edges , r = 0 ) : random_graph = hypergraph ( ) nodes = list ( map ( str , list ( range ( num_nodes ) ) ) ) random_graph . add_nodes ( nodes ) edges = list ( map ( str , list ( range ( num_nodes , num_nodes + num_edges ) ) ) ) random_graph . add_hyperedges ( edges ) if 0 == r : for e in edges : for n in nodes : if choice ( [ True , False ] ) : random_graph . link ( n , e ) else : for e in edges : shuffle ( nodes ) for i in range ( r ) : random_graph . link ( nodes [ i ] , e ) return random_graph | Create a random hyper graph . |
38,476 | def check_version ( version , range_ = None ) : if range_ and version not in range_ : raise RezBindError ( "found version %s is not within range %s" % ( str ( version ) , str ( range_ ) ) ) | Check that the found software version is within supplied range . |
38,477 | def extract_version ( exepath , version_arg , word_index = - 1 , version_rank = 3 ) : if isinstance ( version_arg , basestring ) : version_arg = [ version_arg ] args = [ exepath ] + version_arg stdout , stderr , returncode = _run_command ( args ) if returncode : raise RezBindError ( "failed to execute %s: %s\n(error code %d)" % ( exepath , stderr , returncode ) ) stdout = stdout . strip ( ) . split ( '\n' ) [ 0 ] . strip ( ) log ( "extracting version from output: '%s'" % stdout ) try : strver = stdout . split ( ) [ word_index ] toks = strver . replace ( '.' , ' ' ) . replace ( '-' , ' ' ) . split ( ) strver = '.' . join ( toks [ : version_rank ] ) version = Version ( strver ) except Exception as e : raise RezBindError ( "failed to parse version from output '%s': %s" % ( stdout , str ( e ) ) ) log ( "extracted version: '%s'" % str ( version ) ) return version | Run an executable and get the program version . |
38,478 | def construct ( cls , name , version = None ) : other = VersionedObject ( None ) other . name_ = name other . version_ = Version ( ) if version is None else version return other | Create a VersionedObject directly from an object name and version . |
38,479 | def construct ( cls , name , range = None ) : other = Requirement ( None ) other . name_ = name other . range_ = VersionRange ( ) if range is None else range return other | Create a requirement directly from an object name and VersionRange . |
38,480 | def conflicts_with ( self , other ) : if isinstance ( other , Requirement ) : if ( self . name_ != other . name_ ) or ( self . range is None ) or ( other . range is None ) : return False elif self . conflict : return False if other . conflict else self . range_ . issuperset ( other . range_ ) elif other . conflict : return other . range_ . issuperset ( self . range_ ) else : return not self . range_ . intersects ( other . range_ ) else : if ( self . name_ != other . name_ ) or ( self . range is None ) : return False if self . conflict : return ( other . version_ in self . range_ ) else : return ( other . version_ not in self . range_ ) | Returns True if this requirement conflicts with another Requirement or VersionedObject . |
38,481 | def merged ( self , other ) : if self . name_ != other . name_ : return None def _r ( r_ ) : r = Requirement ( None ) r . name_ = r_ . name_ r . negate_ = r_ . negate_ r . conflict_ = r_ . conflict_ r . sep_ = r_ . sep_ return r if self . range is None : return other elif other . range is None : return self elif self . conflict : if other . conflict : r = _r ( self ) r . range_ = self . range_ | other . range_ r . negate_ = ( self . negate_ and other . negate_ and not r . range_ . is_any ( ) ) return r else : range_ = other . range - self . range if range_ is None : return None else : r = _r ( other ) r . range_ = range_ return r elif other . conflict : range_ = self . range_ - other . range_ if range_ is None : return None else : r = _r ( self ) r . range_ = range_ return r else : range_ = self . range_ & other . range_ if range_ is None : return None else : r = _r ( self ) r . range_ = range_ return r | Returns the merged result of two requirements . |
38,482 | def read_graph_from_string ( txt ) : if not txt . startswith ( '{' ) : return read_dot ( txt ) def conv ( value ) : if isinstance ( value , basestring ) : return '"' + value + '"' else : return value doc = literal_eval ( txt ) g = digraph ( ) for attrs , values in doc . get ( "nodes" , [ ] ) : attrs = [ ( k , conv ( v ) ) for k , v in attrs ] for value in values : if isinstance ( value , basestring ) : node_name = value attrs_ = attrs else : node_name , label = value attrs_ = attrs + [ ( "label" , conv ( label ) ) ] g . add_node ( node_name , attrs = attrs_ ) for attrs , values in doc . get ( "edges" , [ ] ) : attrs_ = [ ( k , conv ( v ) ) for k , v in attrs ] for value in values : if len ( value ) == 3 : edge = value [ : 2 ] label = value [ - 1 ] else : edge = value label = '' g . add_edge ( edge , label = label , attrs = attrs_ ) return g | Read a graph from a string either in dot format or our own compressed format . |
38,483 | def write_compacted ( g ) : d_nodes = { } d_edges = { } def conv ( value ) : if isinstance ( value , basestring ) : return value . strip ( '"' ) else : return value for node in g . nodes ( ) : label = None attrs = [ ] for k , v in sorted ( g . node_attributes ( node ) ) : v_ = conv ( v ) if k == "label" : label = v_ else : attrs . append ( ( k , v_ ) ) value = ( node , label ) if label else node d_nodes . setdefault ( tuple ( attrs ) , [ ] ) . append ( value ) for edge in g . edges ( ) : attrs = [ ( k , conv ( v ) ) for k , v in sorted ( g . edge_attributes ( edge ) ) ] label = str ( g . edge_label ( edge ) ) value = tuple ( list ( edge ) + [ label ] ) if label else edge d_edges . setdefault ( tuple ( attrs ) , [ ] ) . append ( tuple ( value ) ) doc = dict ( nodes = d_nodes . items ( ) , edges = d_edges . items ( ) ) contents = str ( doc ) return contents | Write a graph in our own compacted format . |
38,484 | def write_dot ( g ) : lines = [ "digraph g {" ] def attrs_txt ( items ) : if items : txt = ", " . join ( ( '%s="%s"' % ( k , str ( v ) . strip ( '"' ) ) ) for k , v in items ) return '[' + txt + ']' else : return '' for node in g . nodes ( ) : atxt = attrs_txt ( g . node_attributes ( node ) ) txt = "%s %s;" % ( node , atxt ) lines . append ( txt ) for e in g . edges ( ) : edge_from , edge_to = e attrs = g . edge_attributes ( e ) label = str ( g . edge_label ( e ) ) if label : attrs . append ( ( "label" , label ) ) atxt = attrs_txt ( attrs ) txt = "%s -> %s %s;" % ( edge_from , edge_to , atxt ) lines . append ( txt ) lines . append ( "}" ) return '\n' . join ( lines ) | Replacement for pygraph . readwrite . dot . write which is dog slow . |
38,485 | def prune_graph ( graph_str , package_name ) : g = read_dot ( graph_str ) nodes = set ( ) for node , attrs in g . node_attr . iteritems ( ) : attr = [ x for x in attrs if x [ 0 ] == "label" ] if attr : label = attr [ 0 ] [ 1 ] try : req_str = _request_from_label ( label ) request = PackageRequest ( req_str ) except PackageRequestError : continue if request . name == package_name : nodes . add ( node ) if not nodes : raise ValueError ( "The package %r does not appear in the graph." % package_name ) g_rev = g . reverse ( ) accessible_nodes = set ( ) access = accessibility ( g_rev ) for node in nodes : nodes_ = access . get ( node , [ ] ) accessible_nodes |= set ( nodes_ ) inaccessible_nodes = set ( g . nodes ( ) ) - accessible_nodes for node in inaccessible_nodes : g . del_node ( node ) return write_dot ( g ) | Prune a package graph so it only contains nodes accessible from the given package . |
38,486 | def save_graph ( graph_str , dest_file , fmt = None , image_ratio = None ) : g = pydot . graph_from_dot_data ( graph_str ) if fmt is None : fmt = os . path . splitext ( dest_file ) [ 1 ] . lower ( ) . strip ( '.' ) or "png" if hasattr ( g , "write_" + fmt ) : write_fn = getattr ( g , "write_" + fmt ) else : raise Exception ( "Unsupported graph format: '%s'" % fmt ) if image_ratio : g . set_ratio ( str ( image_ratio ) ) write_fn ( dest_file ) return fmt | Render a graph to an image file . |
38,487 | def view_graph ( graph_str , dest_file = None ) : from rez . system import system from rez . config import config if ( system . platform == "linux" ) and ( not os . getenv ( "DISPLAY" ) ) : print >> sys . stderr , "Unable to open display." sys . exit ( 1 ) dest_file = _write_graph ( graph_str , dest_file = dest_file ) viewed = False prog = config . image_viewer or 'browser' print "loading image viewer (%s)..." % prog if config . image_viewer : proc = popen ( [ config . image_viewer , dest_file ] ) proc . wait ( ) viewed = not bool ( proc . returncode ) if not viewed : import webbrowser webbrowser . open_new ( "file://" + dest_file ) | View a dot graph in an image viewer . |
38,488 | def physical_cores ( self ) : try : return self . _physical_cores_base ( ) except Exception as e : from rez . utils . logging_ import print_error print_error ( "Error detecting physical core count, defaulting to 1: %s" % str ( e ) ) return 1 | Return the number of physical cpu cores on the system . |
38,489 | def logical_cores ( self ) : try : return self . _logical_cores ( ) except Exception as e : from rez . utils . logging_ import print_error print_error ( "Error detecting logical core count, defaulting to 1: %s" % str ( e ) ) return 1 | Return the number of cpu cores as reported to the os . |
38,490 | def write_and_convert ( self , text ) : cursor = 0 for match in self . ANSI_RE . finditer ( text ) : start , end = match . span ( ) self . write_plain_text ( text , cursor , start ) self . convert_ansi ( * match . groups ( ) ) cursor = end self . write_plain_text ( text , cursor , len ( text ) ) | Write the given text to our wrapped stream stripping any ANSI sequences from the text and optionally converting them into win32 calls . |
38,491 | def copy ( self ) : other = ContextModel ( self . _context , self . parent ( ) ) other . _stale = self . _stale other . _modified = self . _modified other . request = self . request [ : ] other . packages_path = self . packages_path other . implicit_packages = self . implicit_packages other . package_filter = self . package_filter other . caching = self . caching other . default_patch_lock = self . default_patch_lock other . patch_locks = copy . deepcopy ( self . patch_locks ) return other | Returns a copy of the context . |
38,492 | def get_lock_requests ( self ) : d = defaultdict ( list ) if self . _context : for variant in self . _context . resolved_packages : name = variant . name version = variant . version lock = self . patch_locks . get ( name ) if lock is None : lock = self . default_patch_lock request = get_lock_request ( name , version , lock ) if request is not None : d [ lock ] . append ( request ) return d | Take the current context and the current patch locks and determine the effective requests that will be added to the main request . |
38,493 | def resolve_context ( self , verbosity = 0 , max_fails = - 1 , timestamp = None , callback = None , buf = None , package_load_callback = None ) : package_filter = PackageFilterList . from_pod ( self . package_filter ) context = ResolvedContext ( self . request , package_paths = self . packages_path , package_filter = package_filter , verbosity = verbosity , max_fails = max_fails , timestamp = timestamp , buf = buf , callback = callback , package_load_callback = package_load_callback , caching = self . caching ) if context . success : if self . _context and self . _context . load_path : context . set_load_path ( self . _context . load_path ) self . _set_context ( context ) self . _modified = True return context | Update the current context by performing a re - resolve . |
38,494 | def set_context ( self , context ) : self . _set_context ( context , emit = False ) self . _modified = ( not context . load_path ) self . dataChanged . emit ( self . CONTEXT_CHANGED | self . REQUEST_CHANGED | self . PACKAGES_PATH_CHANGED | self . LOCKS_CHANGED | self . LOADPATH_CHANGED | self . PACKAGE_FILTER_CHANGED | self . CACHING_CHANGED ) | Replace the current context with another . |
38,495 | def get_resources_dests ( resources_root , rules ) : def get_rel_path ( base , path ) : base = base . replace ( os . path . sep , '/' ) path = path . replace ( os . path . sep , '/' ) assert path . startswith ( base ) return path [ len ( base ) : ] . lstrip ( '/' ) destinations = { } for base , suffix , dest in rules : prefix = os . path . join ( resources_root , base ) for abs_base in iglob ( prefix ) : abs_glob = os . path . join ( abs_base , suffix ) for abs_path in iglob ( abs_glob ) : resource_file = get_rel_path ( resources_root , abs_path ) if dest is None : destinations . pop ( resource_file , None ) else : rel_path = get_rel_path ( abs_base , abs_path ) rel_dest = dest . replace ( os . path . sep , '/' ) . rstrip ( '/' ) destinations [ resource_file ] = rel_dest + '/' + rel_path return destinations | Find destinations for resources files |
38,496 | def find_cycle ( graph ) : if ( isinstance ( graph , graph_class ) ) : directed = False elif ( isinstance ( graph , digraph_class ) ) : directed = True else : raise InvalidGraphType def find_cycle_to_ancestor ( node , ancestor ) : path = [ ] while ( node != ancestor ) : if ( node is None ) : return [ ] path . append ( node ) node = spanning_tree [ node ] path . append ( node ) path . reverse ( ) return path def dfs ( node ) : visited [ node ] = 1 for each in graph [ node ] : if ( cycle ) : return if ( each not in visited ) : spanning_tree [ each ] = node dfs ( each ) else : if ( directed or spanning_tree [ node ] != each ) : cycle . extend ( find_cycle_to_ancestor ( node , each ) ) recursionlimit = getrecursionlimit ( ) setrecursionlimit ( max ( len ( graph . nodes ( ) ) * 2 , recursionlimit ) ) visited = { } spanning_tree = { } cycle = [ ] for each in graph : if ( each not in visited ) : spanning_tree [ each ] = None dfs ( each ) if ( cycle ) : setrecursionlimit ( recursionlimit ) return cycle setrecursionlimit ( recursionlimit ) return [ ] | Find a cycle in the given graph . This function will return a list of nodes which form a cycle in the graph or an empty list if no cycle exists . |
38,497 | def create_release_vcs ( path , vcs_name = None ) : from rez . plugin_managers import plugin_manager vcs_types = get_release_vcs_types ( ) if vcs_name : if vcs_name not in vcs_types : raise ReleaseVCSError ( "Unknown version control system: %r" % vcs_name ) cls = plugin_manager . get_plugin_class ( 'release_vcs' , vcs_name ) return cls ( path ) classes_by_level = { } for vcs_name in vcs_types : cls = plugin_manager . get_plugin_class ( 'release_vcs' , vcs_name ) result = cls . find_vcs_root ( path ) if not result : continue vcs_path , levels_up = result classes_by_level . setdefault ( levels_up , [ ] ) . append ( ( cls , vcs_path ) ) if not classes_by_level : raise ReleaseVCSError ( "No version control system for package " "releasing is associated with the path %s" % path ) lowest_level = sorted ( classes_by_level ) [ 0 ] clss = classes_by_level [ lowest_level ] if len ( clss ) > 1 : clss_str = ", " . join ( x [ 0 ] . name ( ) for x in clss ) raise ReleaseVCSError ( "Several version control systems are associated " "with the path %s: %s. Use rez-release --vcs to " "choose." % ( path , clss_str ) ) else : cls , vcs_root = clss [ 0 ] return cls ( pkg_root = path , vcs_root = vcs_root ) | Return a new release VCS that can release from this source path . |
38,498 | def find_vcs_root ( cls , path ) : if cls . search_parents_for_root ( ) : valid_dirs = walk_up_dirs ( path ) else : valid_dirs = [ path ] for i , current_path in enumerate ( valid_dirs ) : if cls . is_valid_root ( current_path ) : return current_path , i return None | Try to find a version control root directory of this type for the given path . |
38,499 | def _cmd ( self , * nargs ) : cmd_str = ' ' . join ( map ( quote , nargs ) ) if self . package . config . debug ( "package_release" ) : print_debug ( "Running command: %s" % cmd_str ) p = popen ( nargs , stdout = subprocess . PIPE , stderr = subprocess . PIPE , cwd = self . pkg_root ) out , err = p . communicate ( ) if p . returncode : print_debug ( "command stdout:" ) print_debug ( out ) print_debug ( "command stderr:" ) print_debug ( err ) raise ReleaseVCSError ( "command failed: %s\n%s" % ( cmd_str , err ) ) out = out . strip ( ) if out : return [ x . rstrip ( ) for x in out . split ( '\n' ) ] else : return [ ] | Convenience function for executing a program such as git etc . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.