idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
226,300
def execute_function ( self , func , * nargs , * * kwargs ) : # makes a copy of the func import types fn = types . FunctionType ( func . func_code , func . func_globals . copy ( ) , name = func . func_name , argdefs = func . func_defaults , closure = func . func_closure ) fn . func_globals . update ( self . globals ) error_class = Exception if config . catch_rex_errors else None try : return fn ( * nargs , * * kwargs ) except RexError : raise except error_class as e : from inspect import getfile stack = traceback . format_exc ( ) filename = getfile ( func ) raise RexError ( "Failed to exec %s:\n\n%s" % ( filename , stack ) )
Execute a function object within the execution context .
185
10
226,301
def get_output ( self , style = OutputStyle . file ) : return self . manager . get_output ( style = style )
Returns the result of all previous calls to execute_code .
28
12
226,302
def configure ( self , graph , spanning_tree ) : self . graph = graph self . spanning_tree = spanning_tree
Configure the filter .
26
5
226,303
def iter_packages ( self , name , range_ = None , paths = None ) : for package in iter_packages ( name , range_ , paths ) : if not self . excludes ( package ) : yield package
Same as iter_packages in packages . py but also applies this filter .
45
15
226,304
def copy ( self ) : other = PackageFilter . __new__ ( PackageFilter ) other . _excludes = self . _excludes . copy ( ) other . _includes = self . _includes . copy ( ) return other
Return a shallow copy of the filter .
48
8
226,305
def cost ( self ) : total = 0.0 for family , rules in self . _excludes . iteritems ( ) : cost = sum ( x . cost ( ) for x in rules ) if family : cost = cost / float ( 10 ) total += cost return total
Get the approximate cost of this filter .
57
8
226,306
def add_filter ( self , package_filter ) : filters = self . filters + [ package_filter ] self . filters = sorted ( filters , key = lambda x : x . cost )
Add a filter to the list .
40
7
226,307
def copy ( self ) : other = PackageFilterList . __new__ ( PackageFilterList ) other . filters = [ x . copy ( ) for x in self . filters ] return other
Return a copy of the filter list .
39
8
226,308
def parse_rule ( cls , txt ) : types = { "glob" : GlobRule , "regex" : RegexRule , "range" : RangeRule , "before" : TimestampRule , "after" : TimestampRule } # parse form 'x(y)' into x, y label , txt = Rule . _parse_label ( txt ) if label is None : if '*' in txt : label = "glob" else : label = "range" elif label not in types : raise ConfigurationError ( "'%s' is not a valid package filter type" % label ) rule_cls = types [ label ] txt_ = "%s(%s)" % ( label , txt ) try : rule = rule_cls . _parse ( txt_ ) except Exception as e : raise ConfigurationError ( "Error parsing package filter '%s': %s: %s" % ( txt_ , e . __class__ . __name__ , str ( e ) ) ) return rule
Parse a rule from a string .
224
8
226,309
def read_bit ( self ) : if not self . bitcount : self . bits = ord ( self . input . read ( 1 ) ) self . bitcount = 8 result = ( self . bits & 1 ) == 1 self . bits >>= 1 self . bitcount -= 1 return result
Read a single boolean value .
61
6
226,310
def read_long ( self ) : self . bitcount = self . bits = 0 return unpack ( '>I' , self . input . read ( 4 ) ) [ 0 ]
Read an unsigned 32 - bit integer
39
7
226,311
def read_longlong ( self ) : self . bitcount = self . bits = 0 return unpack ( '>Q' , self . input . read ( 8 ) ) [ 0 ]
Read an unsigned 64 - bit integer
40
7
226,312
def read_float ( self ) : self . bitcount = self . bits = 0 return unpack ( '>d' , self . input . read ( 8 ) ) [ 0 ]
Read float value .
39
4
226,313
def read_shortstr ( self ) : self . bitcount = self . bits = 0 slen = unpack ( 'B' , self . input . read ( 1 ) ) [ 0 ] return self . input . read ( slen ) . decode ( 'utf-8' )
Read a short string that s stored in up to 255 bytes .
60
13
226,314
def _load_properties ( self , raw_bytes ) : r = AMQPReader ( raw_bytes ) # # Read 16-bit shorts until we get one with a low bit set to zero # flags = [ ] while 1 : flag_bits = r . read_short ( ) flags . append ( flag_bits ) if flag_bits & 1 == 0 : break shift = 0 d = { } for key , proptype in self . PROPERTIES : if shift == 0 : if not flags : break flag_bits , flags = flags [ 0 ] , flags [ 1 : ] shift = 15 if flag_bits & ( 1 << shift ) : d [ key ] = getattr ( r , 'read_' + proptype ) ( ) shift -= 1 self . properties = d
Given the raw bytes containing the property - flags and property - list from a content - frame - header parse and insert into a dictionary stored in this object as an attribute named properties .
170
36
226,315
def create_executable_script ( filepath , body , program = None ) : program = program or "python" if callable ( body ) : from rez . utils . sourcecode import SourceCode code = SourceCode ( func = body ) body = code . source if not body . endswith ( '\n' ) : body += '\n' with open ( filepath , 'w' ) as f : # TODO: make cross platform f . write ( "#!/usr/bin/env %s\n" % program ) f . write ( body ) # TODO: Although Windows supports os.chmod you can only set the readonly # flag. Setting the file readonly breaks the unit tests that expect to # clean up the files once the test has run. Temporarily we don't bother # setting the permissions, but this will need to change. if os . name == "posix" : os . chmod ( filepath , stat . S_IRUSR | stat . S_IRGRP | stat . S_IROTH | stat . S_IXUSR | stat . S_IXGRP | stat . S_IXOTH )
Create an executable script .
250
5
226,316
def create_forwarding_script ( filepath , module , func_name , * nargs , * * kwargs ) : doc = dict ( module = module , func_name = func_name ) if nargs : doc [ "nargs" ] = nargs if kwargs : doc [ "kwargs" ] = kwargs body = dump_yaml ( doc ) create_executable_script ( filepath , body , "_rez_fwd" )
Create a forwarding script .
102
5
226,317
def dedup ( seq ) : seen = set ( ) for item in seq : if item not in seen : seen . add ( item ) yield item
Remove duplicates from a list while keeping order .
31
10
226,318
def find_last_sublist ( list_ , sublist ) : for i in reversed ( range ( len ( list_ ) - len ( sublist ) + 1 ) ) : if list_ [ i ] == sublist [ 0 ] and list_ [ i : i + len ( sublist ) ] == sublist : return i return None
Given a list find the last occurance of a sublist within it .
72
15
226,319
def open ( self , section_index = 0 ) : uri = self . _sections [ section_index ] [ 1 ] if len ( uri . split ( ) ) == 1 : self . _open_url ( uri ) else : if self . _verbose : print "running command: %s" % uri p = popen ( uri , shell = True ) p . wait ( )
Launch a help section .
87
5
226,320
def print_info ( self , buf = None ) : buf = buf or sys . stdout print >> buf , "Sections:" for i , section in enumerate ( self . _sections ) : print >> buf , " %s:\t%s (%s)" % ( i + 1 , section [ 0 ] , section [ 1 ] )
Print help sections .
73
4
226,321
def set_servers ( self , servers ) : self . servers = [ _Host ( s , self . debug , dead_retry = self . dead_retry , socket_timeout = self . socket_timeout , flush_on_reconnect = self . flush_on_reconnect ) for s in servers ] self . _init_buckets ( )
Set the pool of servers used by this client .
77
10
226,322
def get_stats ( self , stat_args = None ) : data = [ ] for s in self . servers : if not s . connect ( ) : continue if s . family == socket . AF_INET : name = '%s:%s (%s)' % ( s . ip , s . port , s . weight ) elif s . family == socket . AF_INET6 : name = '[%s]:%s (%s)' % ( s . ip , s . port , s . weight ) else : name = 'unix:%s (%s)' % ( s . address , s . weight ) if not stat_args : s . send_cmd ( 'stats' ) else : s . send_cmd ( 'stats ' + stat_args ) serverData = { } data . append ( ( name , serverData ) ) readline = s . readline while 1 : line = readline ( ) if not line or line . strip ( ) in ( 'END' , 'RESET' ) : break stats = line . split ( ' ' , 2 ) serverData [ stats [ 1 ] ] = stats [ 2 ] return ( data )
Get statistics from each of the servers .
247
8
226,323
def delete_multi ( self , keys , time = 0 , key_prefix = '' ) : self . _statlog ( 'delete_multi' ) server_keys , prefixed_to_orig_key = self . _map_and_prefix_keys ( keys , key_prefix ) # send out all requests on each server before reading anything dead_servers = [ ] rc = 1 for server in server_keys . iterkeys ( ) : bigcmd = [ ] write = bigcmd . append if time != None : for key in server_keys [ server ] : # These are mangled keys write ( "delete %s %d\r\n" % ( key , time ) ) else : for key in server_keys [ server ] : # These are mangled keys write ( "delete %s\r\n" % key ) try : server . send_cmds ( '' . join ( bigcmd ) ) except socket . error , msg : rc = 0 if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) dead_servers . append ( server ) # if any servers died on the way, don't expect them to respond. for server in dead_servers : del server_keys [ server ] for server , keys in server_keys . iteritems ( ) : try : for key in keys : server . expect ( "DELETED" ) except socket . error , msg : if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) rc = 0 return rc
Delete multiple keys in the memcache doing just one query .
335
12
226,324
def delete ( self , key , time = 0 ) : if self . do_check_key : self . check_key ( key ) server , key = self . _get_server ( key ) if not server : return 0 self . _statlog ( 'delete' ) if time != None and time != 0 : cmd = "delete %s %d" % ( key , time ) else : cmd = "delete %s" % key try : server . send_cmd ( cmd ) line = server . readline ( ) if line and line . strip ( ) in [ 'DELETED' , 'NOT_FOUND' ] : return 1 self . debuglog ( 'Delete expected DELETED or NOT_FOUND, got: %s' % repr ( line ) ) except socket . error , msg : if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) return 0
Deletes a key from the memcache .
197
9
226,325
def add ( self , key , val , time = 0 , min_compress_len = 0 ) : return self . _set ( "add" , key , val , time , min_compress_len )
Add new key with value .
46
6
226,326
def append ( self , key , val , time = 0 , min_compress_len = 0 ) : return self . _set ( "append" , key , val , time , min_compress_len )
Append the value to the end of the existing key s value .
46
14
226,327
def prepend ( self , key , val , time = 0 , min_compress_len = 0 ) : return self . _set ( "prepend" , key , val , time , min_compress_len )
Prepend the value to the beginning of the existing key s value .
48
14
226,328
def replace ( self , key , val , time = 0 , min_compress_len = 0 ) : return self . _set ( "replace" , key , val , time , min_compress_len )
Replace existing key with value .
46
7
226,329
def set ( self , key , val , time = 0 , min_compress_len = 0 ) : return self . _set ( "set" , key , val , time , min_compress_len )
Unconditionally sets a key to a given value in the memcache .
46
15
226,330
def set_multi ( self , mapping , time = 0 , key_prefix = '' , min_compress_len = 0 ) : self . _statlog ( 'set_multi' ) server_keys , prefixed_to_orig_key = self . _map_and_prefix_keys ( mapping . iterkeys ( ) , key_prefix ) # send out all requests on each server before reading anything dead_servers = [ ] notstored = [ ] # original keys. for server in server_keys . iterkeys ( ) : bigcmd = [ ] write = bigcmd . append try : for key in server_keys [ server ] : # These are mangled keys store_info = self . _val_to_store_info ( mapping [ prefixed_to_orig_key [ key ] ] , min_compress_len ) if store_info : write ( "set %s %d %d %d\r\n%s\r\n" % ( key , store_info [ 0 ] , time , store_info [ 1 ] , store_info [ 2 ] ) ) else : notstored . append ( prefixed_to_orig_key [ key ] ) server . send_cmds ( '' . join ( bigcmd ) ) except socket . error , msg : if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) dead_servers . append ( server ) # if any servers died on the way, don't expect them to respond. for server in dead_servers : del server_keys [ server ] # short-circuit if there are no servers, just return all keys if not server_keys : return ( mapping . keys ( ) ) for server , keys in server_keys . iteritems ( ) : try : for key in keys : if server . readline ( ) == 'STORED' : continue else : notstored . append ( prefixed_to_orig_key [ key ] ) #un-mangle. except ( _Error , socket . error ) , msg : if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) return notstored
Sets multiple keys in the memcache doing just one query .
472
13
226,331
def get_multi ( self , keys , key_prefix = '' ) : self . _statlog ( 'get_multi' ) server_keys , prefixed_to_orig_key = self . _map_and_prefix_keys ( keys , key_prefix ) # send out all requests on each server before reading anything dead_servers = [ ] for server in server_keys . iterkeys ( ) : try : server . send_cmd ( "get %s" % " " . join ( server_keys [ server ] ) ) except socket . error , msg : if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) dead_servers . append ( server ) # if any servers died on the way, don't expect them to respond. for server in dead_servers : del server_keys [ server ] retvals = { } for server in server_keys . iterkeys ( ) : try : line = server . readline ( ) while line and line != 'END' : rkey , flags , rlen = self . _expectvalue ( server , line ) # Bo Yang reports that this can sometimes be None if rkey is not None : val = self . _recv_value ( server , flags , rlen ) retvals [ prefixed_to_orig_key [ rkey ] ] = val # un-prefix returned key. line = server . readline ( ) except ( _Error , socket . error ) , msg : if isinstance ( msg , tuple ) : msg = msg [ 1 ] server . mark_dead ( msg ) return retvals
Retrieves multiple keys from the memcache doing just one query .
345
14
226,332
def readline ( self , raise_exception = False ) : buf = self . buffer if self . socket : recv = self . socket . recv else : recv = lambda bufsize : '' while True : index = buf . find ( '\r\n' ) if index >= 0 : break data = recv ( 4096 ) if not data : # connection close, let's kill it and raise self . mark_dead ( 'connection closed in readline()' ) if raise_exception : raise _ConnectionDeadError ( ) else : return '' buf += data self . buffer = buf [ index + 2 : ] return buf [ : index ]
Read a line and return it . If raise_exception is set raise _ConnectionDeadError if the read fails otherwise return an empty string .
138
29
226,333
def create_repository ( cls , repository_data ) : location = "memory{%s}" % hex ( id ( repository_data ) ) resource_pool = ResourcePool ( cache_size = None ) repo = MemoryPackageRepository ( location , resource_pool ) repo . data = repository_data return repo
Create a standalone in - memory repository .
68
8
226,334
def read_file ( self , fileob ) : msg = message_from_file ( fileob ) self . _fields [ 'Metadata-Version' ] = msg [ 'metadata-version' ] # When reading, get all the fields we can for field in _ALL_FIELDS : if field not in msg : continue if field in _LISTFIELDS : # we can have multiple lines values = msg . get_all ( field ) if field in _LISTTUPLEFIELDS and values is not None : values = [ tuple ( value . split ( ',' ) ) for value in values ] self . set ( field , values ) else : # single line value = msg [ field ] if value is not None and value != 'UNKNOWN' : self . set ( field , value ) self . set_metadata_version ( )
Read the metadata values from a file object .
179
9
226,335
def check ( self , strict = False ) : self . set_metadata_version ( ) # XXX should check the versions (if the file was loaded) missing , warnings = [ ] , [ ] for attr in ( 'Name' , 'Version' ) : # required by PEP 345 if attr not in self : missing . append ( attr ) if strict and missing != [ ] : msg = 'missing required metadata: %s' % ', ' . join ( missing ) raise MetadataMissingError ( msg ) for attr in ( 'Home-page' , 'Author' ) : if attr not in self : missing . append ( attr ) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self [ 'Metadata-Version' ] != '1.2' : return missing , warnings scheme = get_scheme ( self . scheme ) def are_valid_constraints ( value ) : for v in value : if not scheme . is_valid_matcher ( v . split ( ';' ) [ 0 ] ) : return False return True for fields , controller in ( ( _PREDICATE_FIELDS , are_valid_constraints ) , ( _VERSIONS_FIELDS , scheme . is_valid_constraint_list ) , ( _VERSION_FIELDS , scheme . is_valid_version ) ) : for field in fields : value = self . get ( field , None ) if value is not None and not controller ( value ) : warnings . append ( 'Wrong value for %r: %s' % ( field , value ) ) return missing , warnings
Check if the metadata is compliant . If strict is True then raise if no Name or Version are provided
354
20
226,336
def create_pane ( widgets , horizontal , parent_widget = None , compact = False , compact_spacing = 2 ) : pane = parent_widget or QtGui . QWidget ( ) type_ = QtGui . QHBoxLayout if horizontal else QtGui . QVBoxLayout layout = type_ ( ) if compact : layout . setSpacing ( compact_spacing ) layout . setContentsMargins ( compact_spacing , compact_spacing , compact_spacing , compact_spacing ) for widget in widgets : stretch = 0 if isinstance ( widget , tuple ) : widget , stretch = widget if isinstance ( widget , int ) : layout . addSpacing ( widget ) elif widget : layout . addWidget ( widget , stretch ) else : layout . addStretch ( ) pane . setLayout ( layout ) return pane
Create a widget containing an aligned set of widgets .
180
10
226,337
def get_icon ( name , as_qicon = False ) : filename = name + ".png" icon = icons . get ( filename ) if not icon : path = os . path . dirname ( __file__ ) path = os . path . join ( path , "icons" ) filepath = os . path . join ( path , filename ) if not os . path . exists ( filepath ) : filepath = os . path . join ( path , "pink.png" ) icon = QtGui . QPixmap ( filepath ) icons [ filename ] = icon return QtGui . QIcon ( icon ) if as_qicon else icon
Returns a QPixmap containing the given image or a QIcon if as_qicon is True
141
21
226,338
def interp_color ( a , b , f ) : a_ = ( a . redF ( ) , a . greenF ( ) , a . blueF ( ) ) b_ = ( b . redF ( ) , b . greenF ( ) , b . blueF ( ) ) a_ = [ x * ( 1 - f ) for x in a_ ] b_ = [ x * f for x in b_ ] c = [ x + y for x , y in zip ( a_ , b_ ) ] return QtGui . QColor . fromRgbF ( * c )
Interpolate between two colors .
129
7
226,339
def create_toolbutton ( entries , parent = None ) : btn = QtGui . QToolButton ( parent ) menu = QtGui . QMenu ( ) actions = [ ] for label , slot in entries : action = add_menu_action ( menu , label , slot ) actions . append ( action ) btn . setPopupMode ( QtGui . QToolButton . MenuButtonPopup ) btn . setDefaultAction ( actions [ 0 ] ) btn . setMenu ( menu ) return btn , actions
Create a toolbutton .
113
5
226,340
def get_page ( self , url ) : # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme , netloc , path , _ , _ , _ = urlparse ( url ) if scheme == 'file' and os . path . isdir ( url2pathname ( path ) ) : url = urljoin ( ensure_slash ( url ) , 'index.html' ) if url in self . _page_cache : result = self . _page_cache [ url ] logger . debug ( 'Returning %s from cache: %s' , url , result ) else : host = netloc . split ( ':' , 1 ) [ 0 ] result = None if host in self . _bad_hosts : logger . debug ( 'Skipping %s due to bad host %s' , url , host ) else : req = Request ( url , headers = { 'Accept-encoding' : 'identity' } ) try : logger . debug ( 'Fetching %s' , url ) resp = self . opener . open ( req , timeout = self . timeout ) logger . debug ( 'Fetched %s' , url ) headers = resp . info ( ) content_type = headers . get ( 'Content-Type' , '' ) if HTML_CONTENT_TYPE . match ( content_type ) : final_url = resp . geturl ( ) data = resp . read ( ) encoding = headers . get ( 'Content-Encoding' ) if encoding : decoder = self . decoders [ encoding ] # fail if not found data = decoder ( data ) encoding = 'utf-8' m = CHARSET . search ( content_type ) if m : encoding = m . group ( 1 ) try : data = data . decode ( encoding ) except UnicodeError : data = data . decode ( 'latin-1' ) # fallback result = Page ( data , final_url ) self . _page_cache [ final_url ] = result except HTTPError as e : if e . code != 404 : logger . exception ( 'Fetch failed: %s: %s' , url , e ) except URLError as e : logger . exception ( 'Fetch failed: %s: %s' , url , e ) with self . _lock : self . _bad_hosts . add ( host ) except Exception as e : logger . exception ( 'Fetch failed: %s: %s' , url , e ) finally : self . _page_cache [ url ] = result # even if None (failure) return result
Get the HTML for an URL possibly from an in - memory cache .
559
14
226,341
def get_reverse_dependency_tree ( package_name , depth = None , paths = None , build_requires = False , private_build_requires = False ) : pkgs_list = [ [ package_name ] ] g = digraph ( ) g . add_node ( package_name ) # build reverse lookup it = iter_package_families ( paths ) package_names = set ( x . name for x in it ) if package_name not in package_names : raise PackageFamilyNotFoundError ( "No such package family %r" % package_name ) if depth == 0 : return pkgs_list , g bar = ProgressBar ( "Searching" , len ( package_names ) ) lookup = defaultdict ( set ) for i , package_name_ in enumerate ( package_names ) : it = iter_packages ( name = package_name_ , paths = paths ) packages = list ( it ) if not packages : continue pkg = max ( packages , key = lambda x : x . version ) requires = [ ] for variant in pkg . iter_variants ( ) : pbr = ( private_build_requires and pkg . name == package_name ) requires += variant . get_requires ( build_requires = build_requires , private_build_requires = pbr ) for req in requires : if not req . conflict : lookup [ req . name ] . add ( package_name_ ) bar . next ( ) bar . finish ( ) # perform traversal n = 0 consumed = set ( [ package_name ] ) working_set = set ( [ package_name ] ) node_color = "#F6F6F6" node_fontsize = 10 node_attrs = [ ( "fillcolor" , node_color ) , ( "style" , "filled" ) , ( "fontsize" , node_fontsize ) ] while working_set and ( depth is None or n < depth ) : working_set_ = set ( ) for child in working_set : parents = lookup [ child ] - consumed working_set_ . update ( parents ) consumed . update ( parents ) for parent in parents : g . add_node ( parent , attrs = node_attrs ) g . add_edge ( ( parent , child ) ) if working_set_ : pkgs_list . append ( sorted ( list ( working_set_ ) ) ) working_set = working_set_ n += 1 return pkgs_list , g
Find packages that depend on the given package .
534
9
226,342
def get_plugins ( package_name , paths = None ) : pkg = get_latest_package ( package_name , paths = paths , error = True ) if not pkg . has_plugins : return [ ] it = iter_package_families ( paths ) package_names = set ( x . name for x in it ) bar = ProgressBar ( "Searching" , len ( package_names ) ) plugin_pkgs = [ ] for package_name_ in package_names : bar . next ( ) if package_name_ == package_name : continue # not a plugin of itself plugin_pkg = get_latest_package ( package_name_ , paths = paths ) if not plugin_pkg . plugin_for : continue for plugin_for in plugin_pkg . plugin_for : if plugin_for == pkg . name : plugin_pkgs . append ( package_name_ ) bar . finish ( ) return plugin_pkgs
Find packages that are plugins of the given package .
203
10
226,343
def search ( self , resources_request = None ) : # Find matching package families name_pattern , version_range = self . _parse_request ( resources_request ) family_names = set ( x . name for x in iter_package_families ( paths = self . package_paths ) if fnmatch . fnmatch ( x . name , name_pattern ) ) family_names = sorted ( family_names ) # determine what type of resource we're searching for if self . resource_type : resource_type = self . resource_type elif version_range or len ( family_names ) == 1 : resource_type = "package" else : resource_type = "family" if not family_names : return resource_type , [ ] # return list of family names (validation is n/a in this case) if resource_type == "family" : results = [ ResourceSearchResult ( x , "family" ) for x in family_names ] return "family" , results results = [ ] # iterate over packages/variants for name in family_names : it = iter_packages ( name , version_range , paths = self . package_paths ) packages = sorted ( it , key = lambda x : x . version ) if self . latest and packages : packages = [ packages [ - 1 ] ] for package in packages : # validate and check time (accessing timestamp may cause # validation fail) try : if package . timestamp : if self . after_time and package . timestamp < self . after_time : continue if self . before_time and package . timestamp >= self . before_time : continue if self . validate : package . validate_data ( ) except ResourceContentError as e : if resource_type == "package" : result = ResourceSearchResult ( package , "package" , str ( e ) ) results . append ( result ) continue if resource_type == "package" : result = ResourceSearchResult ( package , "package" ) results . append ( result ) continue # iterate variants try : for variant in package . iter_variants ( ) : if self . validate : try : variant . validate_data ( ) except ResourceContentError as e : result = ResourceSearchResult ( variant , "variant" , str ( e ) ) results . append ( result ) continue result = ResourceSearchResult ( variant , "variant" ) results . append ( result ) except ResourceContentError : # this may happen if 'variants' in package is malformed continue return resource_type , results
Search for resources .
535
4
226,344
def print_search_results ( self , search_results , buf = sys . stdout ) : formatted_lines = self . format_search_results ( search_results ) pr = Printer ( buf ) for txt , style in formatted_lines : pr ( txt , style )
Print formatted search results .
61
5
226,345
def format_search_results ( self , search_results ) : formatted_lines = [ ] for search_result in search_results : lines = self . _format_search_result ( search_result ) formatted_lines . extend ( lines ) return formatted_lines
Format search results .
56
4
226,346
def read ( string ) : dotG = pydot . graph_from_dot_data ( string ) if ( dotG . get_type ( ) == "graph" ) : G = graph ( ) elif ( dotG . get_type ( ) == "digraph" ) : G = digraph ( ) elif ( dotG . get_type ( ) == "hypergraph" ) : return read_hypergraph ( string ) else : raise InvalidGraphType # Read nodes... # Note: If the nodes aren't explicitly listed, they need to be for each_node in dotG . get_nodes ( ) : G . add_node ( each_node . get_name ( ) ) for each_attr_key , each_attr_val in each_node . get_attributes ( ) . items ( ) : G . add_node_attribute ( each_node . get_name ( ) , ( each_attr_key , each_attr_val ) ) # Read edges... for each_edge in dotG . get_edges ( ) : # Check if the nodes have been added if not G . has_node ( each_edge . get_source ( ) ) : G . add_node ( each_edge . get_source ( ) ) if not G . has_node ( each_edge . get_destination ( ) ) : G . add_node ( each_edge . get_destination ( ) ) # See if there's a weight if 'weight' in each_edge . get_attributes ( ) . keys ( ) : _wt = each_edge . get_attributes ( ) [ 'weight' ] else : _wt = 1 # See if there is a label if 'label' in each_edge . get_attributes ( ) . keys ( ) : _label = each_edge . get_attributes ( ) [ 'label' ] else : _label = '' G . add_edge ( ( each_edge . get_source ( ) , each_edge . get_destination ( ) ) , wt = _wt , label = _label ) for each_attr_key , each_attr_val in each_edge . get_attributes ( ) . items ( ) : if not each_attr_key in [ 'weight' , 'label' ] : G . add_edge_attribute ( ( each_edge . get_source ( ) , each_edge . get_destination ( ) ) , ( each_attr_key , each_attr_val ) ) return G
Read a graph from a string in Dot language and return it . Nodes and edges specified in the input will be added to the current graph .
544
29
226,347
def read_hypergraph ( string ) : hgr = hypergraph ( ) dotG = pydot . graph_from_dot_data ( string ) # Read the hypernode nodes... # Note 1: We need to assume that all of the nodes are listed since we need to know if they # are a hyperedge or a normal node # Note 2: We should read in all of the nodes before putting in the links for each_node in dotG . get_nodes ( ) : if 'hypernode' == each_node . get ( 'hyper_node_type' ) : hgr . add_node ( each_node . get_name ( ) ) elif 'hyperedge' == each_node . get ( 'hyper_node_type' ) : hgr . add_hyperedge ( each_node . get_name ( ) ) # Now read in the links to connect the hyperedges for each_link in dotG . get_edges ( ) : if hgr . has_node ( each_link . get_source ( ) ) : link_hypernode = each_link . get_source ( ) link_hyperedge = each_link . get_destination ( ) elif hgr . has_node ( each_link . get_destination ( ) ) : link_hypernode = each_link . get_destination ( ) link_hyperedge = each_link . get_source ( ) hgr . link ( link_hypernode , link_hyperedge ) return hgr
Read a hypergraph from a string in dot format . Nodes and edges specified in the input will be added to the current hypergraph .
335
28
226,348
def graph_from_dot_file ( path ) : fd = file ( path , 'rb' ) data = fd . read ( ) fd . close ( ) return graph_from_dot_data ( data )
Load graph as defined by a DOT file . The file is assumed to be in DOT format . It will be loaded parsed and a Dot class will be returned representing the graph .
48
35
226,349
def __find_executables ( path ) : success = False progs = { 'dot' : '' , 'twopi' : '' , 'neato' : '' , 'circo' : '' , 'fdp' : '' , 'sfdp' : '' } was_quoted = False path = path . strip ( ) if path . startswith ( '"' ) and path . endswith ( '"' ) : path = path [ 1 : - 1 ] was_quoted = True if os . path . isdir ( path ) : for prg in progs . iterkeys ( ) : if progs [ prg ] : continue if os . path . exists ( os . path . join ( path , prg ) ) : if was_quoted : progs [ prg ] = '"' + os . path . join ( path , prg ) + '"' else : progs [ prg ] = os . path . join ( path , prg ) success = True elif os . path . exists ( os . path . join ( path , prg + '.exe' ) ) : if was_quoted : progs [ prg ] = '"' + os . path . join ( path , prg + '.exe' ) + '"' else : progs [ prg ] = os . path . join ( path , prg + '.exe' ) success = True if success : return progs else : return None
Used by find_graphviz path - single directory as a string If any of the executables are found it will return a dictionary containing the program names as keys and their paths as values . Otherwise returns None
312
42
226,350
def to_string ( self ) : src = self . parse_node_ref ( self . get_source ( ) ) dst = self . parse_node_ref ( self . get_destination ( ) ) if isinstance ( src , frozendict ) : edge = [ Subgraph ( obj_dict = src ) . to_string ( ) ] elif isinstance ( src , ( int , long ) ) : edge = [ str ( src ) ] else : edge = [ src ] if ( self . get_parent_graph ( ) and self . get_parent_graph ( ) . get_top_graph_type ( ) and self . get_parent_graph ( ) . get_top_graph_type ( ) == 'digraph' ) : edge . append ( '->' ) else : edge . append ( '--' ) if isinstance ( dst , frozendict ) : edge . append ( Subgraph ( obj_dict = dst ) . to_string ( ) ) elif isinstance ( dst , ( int , long ) ) : edge . append ( str ( dst ) ) else : edge . append ( dst ) edge_attr = list ( ) for attr , value in self . obj_dict [ 'attributes' ] . iteritems ( ) : if value is not None : edge_attr . append ( '%s=%s' % ( attr , quote_if_necessary ( value ) ) ) else : edge_attr . append ( attr ) edge_attr = ', ' . join ( edge_attr ) if edge_attr : edge . append ( ' [' + edge_attr + ']' ) return ' ' . join ( edge ) + ';'
Returns a string representation of the edge in dot language .
363
11
226,351
def get_node ( self , name ) : match = list ( ) if self . obj_dict [ 'nodes' ] . has_key ( name ) : match . extend ( [ Node ( obj_dict = obj_dict ) for obj_dict in self . obj_dict [ 'nodes' ] [ name ] ] ) return match
Retrieve a node from the graph . Given a node s name the corresponding Node instance will be returned . If one or more nodes exist with that name a list of Node instances is returned . An empty list is returned otherwise .
73
45
226,352
def add_edge ( self , graph_edge ) : if not isinstance ( graph_edge , Edge ) : raise TypeError ( 'add_edge() received a non edge class object: ' + str ( graph_edge ) ) edge_points = ( graph_edge . get_source ( ) , graph_edge . get_destination ( ) ) if self . obj_dict [ 'edges' ] . has_key ( edge_points ) : edge_list = self . obj_dict [ 'edges' ] [ edge_points ] edge_list . append ( graph_edge . obj_dict ) else : self . obj_dict [ 'edges' ] [ edge_points ] = [ graph_edge . obj_dict ] graph_edge . set_sequence ( self . get_next_sequence_number ( ) ) graph_edge . set_parent_graph ( self . get_parent_graph ( ) )
Adds an edge object to the graph . It takes a edge object as its only argument and returns None .
199
21
226,353
def add_subgraph ( self , sgraph ) : if not isinstance ( sgraph , Subgraph ) and not isinstance ( sgraph , Cluster ) : raise TypeError ( 'add_subgraph() received a non subgraph class object:' + str ( sgraph ) ) if self . obj_dict [ 'subgraphs' ] . has_key ( sgraph . get_name ( ) ) : sgraph_list = self . obj_dict [ 'subgraphs' ] [ sgraph . get_name ( ) ] sgraph_list . append ( sgraph . obj_dict ) else : self . obj_dict [ 'subgraphs' ] [ sgraph . get_name ( ) ] = [ sgraph . obj_dict ] sgraph . set_sequence ( self . get_next_sequence_number ( ) ) sgraph . set_parent_graph ( self . get_parent_graph ( ) )
Adds an subgraph object to the graph . It takes a subgraph object as its only argument and returns None .
199
23
226,354
def to_string ( self ) : graph = list ( ) if self . obj_dict . get ( 'strict' , None ) is not None : if self == self . get_parent_graph ( ) and self . obj_dict [ 'strict' ] : graph . append ( 'strict ' ) if self . obj_dict [ 'name' ] == '' : if 'show_keyword' in self . obj_dict and self . obj_dict [ 'show_keyword' ] : graph . append ( 'subgraph {\n' ) else : graph . append ( '{\n' ) else : graph . append ( '%s %s {\n' % ( self . obj_dict [ 'type' ] , self . obj_dict [ 'name' ] ) ) for attr in self . obj_dict [ 'attributes' ] . iterkeys ( ) : if self . obj_dict [ 'attributes' ] . get ( attr , None ) is not None : val = self . obj_dict [ 'attributes' ] . get ( attr ) if val is not None : graph . append ( '%s=%s' % ( attr , quote_if_necessary ( val ) ) ) else : graph . append ( attr ) graph . append ( ';\n' ) edges_done = set ( ) edge_obj_dicts = list ( ) for e in self . obj_dict [ 'edges' ] . itervalues ( ) : edge_obj_dicts . extend ( e ) if edge_obj_dicts : edge_src_set , edge_dst_set = zip ( * [ obj [ 'points' ] for obj in edge_obj_dicts ] ) edge_src_set , edge_dst_set = set ( edge_src_set ) , set ( edge_dst_set ) else : edge_src_set , edge_dst_set = set ( ) , set ( ) node_obj_dicts = list ( ) for e in self . obj_dict [ 'nodes' ] . itervalues ( ) : node_obj_dicts . extend ( e ) sgraph_obj_dicts = list ( ) for sg in self . obj_dict [ 'subgraphs' ] . itervalues ( ) : sgraph_obj_dicts . extend ( sg ) obj_list = [ ( obj [ 'sequence' ] , obj ) for obj in ( edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts ) ] obj_list . sort ( ) for idx , obj in obj_list : if obj [ 'type' ] == 'node' : node = Node ( obj_dict = obj ) if self . obj_dict . get ( 'suppress_disconnected' , False ) : if ( node . get_name ( ) not in edge_src_set and node . get_name ( ) not in edge_dst_set ) : continue graph . append ( node . to_string ( ) + '\n' ) elif obj [ 'type' ] == 'edge' : edge = Edge ( obj_dict = obj ) if self . obj_dict . get ( 'simplify' , False ) and edge in edges_done : continue graph . append ( edge . to_string ( ) + '\n' ) edges_done . add ( edge ) else : sgraph = Subgraph ( obj_dict = obj ) graph . append ( sgraph . to_string ( ) + '\n' ) graph . append ( '}\n' ) return '' . join ( graph )
Returns a string representation of the graph in dot language . It will return the graph and all its subelements in string from .
795
26
226,355
def create ( self , prog = None , format = 'ps' ) : if prog is None : prog = self . prog if isinstance ( prog , ( list , tuple ) ) : prog , args = prog [ 0 ] , prog [ 1 : ] else : args = [ ] if self . progs is None : self . progs = find_graphviz ( ) if self . progs is None : raise InvocationException ( 'GraphViz\'s executables not found' ) if not self . progs . has_key ( prog ) : raise InvocationException ( 'GraphViz\'s executable "%s" not found' % prog ) if not os . path . exists ( self . progs [ prog ] ) or not os . path . isfile ( self . progs [ prog ] ) : raise InvocationException ( 'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self . progs [ prog ] ) tmp_fd , tmp_name = tempfile . mkstemp ( ) os . close ( tmp_fd ) self . write ( tmp_name ) tmp_dir = os . path . dirname ( tmp_name ) # For each of the image files... # for img in self . shape_files : # Get its data # f = file ( img , 'rb' ) f_data = f . read ( ) f . close ( ) # And copy it under a file with the same name in the temporary directory # f = file ( os . path . join ( tmp_dir , os . path . basename ( img ) ) , 'wb' ) f . write ( f_data ) f . close ( ) cmdline = [ self . progs [ prog ] , '-T' + format , tmp_name ] + args p = subprocess . Popen ( cmdline , cwd = tmp_dir , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) stderr = p . stderr stdout = p . stdout stdout_output = list ( ) while True : data = stdout . read ( ) if not data : break stdout_output . append ( data ) stdout . close ( ) stdout_output = '' . join ( stdout_output ) if not stderr . closed : stderr_output = list ( ) while True : data = stderr . read ( ) if not data : break stderr_output . append ( data ) stderr . close ( ) if stderr_output : stderr_output = '' . join ( stderr_output ) #pid, status = os.waitpid(p.pid, 0) status = p . wait ( ) if status != 0 : raise InvocationException ( 'Program terminated with status: %d. stderr follows: %s' % ( status , stderr_output ) ) elif stderr_output : print stderr_output # For each of the image files... # for img in self . shape_files : # remove it # os . unlink ( os . path . join ( tmp_dir , os . path . basename ( img ) ) ) os . unlink ( tmp_name ) return stdout_output
Creates and returns a Postscript representation of the graph .
703
12
226,356
def dump_yaml ( data , Dumper = _Dumper , default_flow_style = False ) : content = yaml . dump ( data , default_flow_style = default_flow_style , Dumper = Dumper ) return content . strip ( )
Returns data as yaml - formatted string .
57
9
226,357
def load_yaml ( filepath ) : with open ( filepath ) as f : txt = f . read ( ) return yaml . load ( txt )
Convenience function for loading yaml - encoded data from disk .
36
14
226,358
def memcached_client ( servers = config . memcached_uri , debug = config . debug_memcache ) : key = None try : client , key = scoped_instance_manager . acquire ( servers , debug = debug ) yield client finally : if key : scoped_instance_manager . release ( key )
Get a shared memcached instance .
69
8
226,359
def pool_memcached_connections ( func ) : if isgeneratorfunction ( func ) : def wrapper ( * nargs , * * kwargs ) : with memcached_client ( ) : for result in func ( * nargs , * * kwargs ) : yield result else : def wrapper ( * nargs , * * kwargs ) : with memcached_client ( ) : return func ( * nargs , * * kwargs ) return update_wrapper ( wrapper , func )
Function decorator to pool memcached connections .
110
10
226,360
def memcached ( servers , key = None , from_cache = None , to_cache = None , time = 0 , min_compress_len = 0 , debug = False ) : def default_key ( func , * nargs , * * kwargs ) : parts = [ func . __module__ ] argnames = getargspec ( func ) . args if argnames : if argnames [ 0 ] == "cls" : cls_ = nargs [ 0 ] parts . append ( cls_ . __name__ ) nargs = nargs [ 1 : ] elif argnames [ 0 ] == "self" : cls_ = nargs [ 0 ] . __class__ parts . append ( cls_ . __name__ ) nargs = nargs [ 1 : ] parts . append ( func . __name__ ) value = ( '.' . join ( parts ) , nargs , tuple ( sorted ( kwargs . items ( ) ) ) ) # make sure key is hashable. We don't strictly need it to be, but this # is a way of hopefully avoiding object types that are not ordered (these # would give an unreliable key). If you need to key on unhashable args, # you should provide your own `key` functor. _ = hash ( value ) return repr ( value ) def identity ( value , * nargs , * * kwargs ) : return value from_cache = from_cache or identity to_cache = to_cache or identity def decorator ( func ) : if servers : def wrapper ( * nargs , * * kwargs ) : with memcached_client ( servers , debug = debug ) as client : if key : cache_key = key ( * nargs , * * kwargs ) else : cache_key = default_key ( func , * nargs , * * kwargs ) # get result = client . get ( cache_key ) if result is not client . miss : return from_cache ( result , * nargs , * * kwargs ) # cache miss - run target function result = func ( * nargs , * * kwargs ) if isinstance ( result , DoNotCache ) : return result . result # store cache_result = to_cache ( result , * nargs , * * kwargs ) client . set ( key = cache_key , val = cache_result , time = time , min_compress_len = min_compress_len ) return result else : def wrapper ( * nargs , * * kwargs ) : result = func ( * nargs , * * kwargs ) if isinstance ( result , DoNotCache ) : return result . result return result def forget ( ) : """Forget entries in the cache. Note that this does not delete entries from a memcached server - that would be slow and error-prone. Calling this function only ensures that entries set by the current process will no longer be seen during this process. """ with memcached_client ( servers , debug = debug ) as client : client . flush ( ) wrapper . forget = forget wrapper . __wrapped__ = func return update_wrapper ( wrapper , func ) return decorator
memcached memoization function decorator .
683
9
226,361
def client ( self ) : if self . _client is None : self . _client = Client_ ( self . servers ) return self . _client
Get the native memcache client .
31
7
226,362
def flush ( self , hard = False ) : if not self . servers : return if hard : self . client . flush_all ( ) self . reset_stats ( ) else : from uuid import uuid4 tag = uuid4 ( ) . hex if self . debug : tag = "flushed" + tag self . current = tag
Drop existing entries from the cache .
72
7
226,363
def depth_first_search ( graph , root = None , filter = null ( ) ) : recursionlimit = getrecursionlimit ( ) setrecursionlimit ( max ( len ( graph . nodes ( ) ) * 2 , recursionlimit ) ) def dfs ( node ) : """ Depth-first search subfunction. """ visited [ node ] = 1 pre . append ( node ) # Explore recursively the connected component for each in graph [ node ] : if ( each not in visited and filter ( each , node ) ) : spanning_tree [ each ] = node dfs ( each ) post . append ( node ) visited = { } # List for marking visited and non-visited nodes spanning_tree = { } # Spanning tree pre = [ ] # Graph's preordering post = [ ] # Graph's postordering filter . configure ( graph , spanning_tree ) # DFS from one node only if ( root is not None ) : if filter ( root , None ) : spanning_tree [ root ] = None dfs ( root ) setrecursionlimit ( recursionlimit ) return spanning_tree , pre , post # Algorithm loop for each in graph : # Select a non-visited node if ( each not in visited and filter ( each , None ) ) : spanning_tree [ each ] = None # Explore node's connected component dfs ( each ) setrecursionlimit ( recursionlimit ) return ( spanning_tree , pre , post )
Depth - first search .
307
5
226,364
def breadth_first_search ( graph , root = None , filter = null ( ) ) : def bfs ( ) : """ Breadth-first search subfunction. """ while ( queue != [ ] ) : node = queue . pop ( 0 ) for other in graph [ node ] : if ( other not in spanning_tree and filter ( other , node ) ) : queue . append ( other ) ordering . append ( other ) spanning_tree [ other ] = node queue = [ ] # Visiting queue spanning_tree = { } # Spanning tree ordering = [ ] filter . configure ( graph , spanning_tree ) # BFS from one node only if ( root is not None ) : if filter ( root , None ) : queue . append ( root ) ordering . append ( root ) spanning_tree [ root ] = None bfs ( ) return spanning_tree , ordering # Algorithm for each in graph : if ( each not in spanning_tree ) : if filter ( each , None ) : queue . append ( each ) ordering . append ( each ) spanning_tree [ each ] = None bfs ( ) return spanning_tree , ordering
Breadth - first search .
238
7
226,365
def normalize_variables ( cls , variables ) : # if the version is False, empty string, etc, throw it out if variables . get ( 'version' , True ) in ( '' , False , '_NO_VERSION' , None ) : del variables [ 'version' ] return super ( PackageResource , cls ) . normalize_variables ( variables )
Make sure version is treated consistently
80
6
226,366
def push_source ( self , newstream , newfile = None ) : if isinstance ( newstream , basestring ) : newstream = StringIO ( newstream ) self . filestack . appendleft ( ( self . infile , self . instream , self . lineno ) ) self . infile = newfile self . instream = newstream self . lineno = 1
Push an input source onto the lexer s input source stack .
83
13
226,367
def error_leader ( self , infile = None , lineno = None ) : if infile is None : infile = self . infile if lineno is None : lineno = self . lineno return "\"%s\", line %d: " % ( infile , lineno )
Emit a C - compiler - like Emacs - friendly error - message leader .
63
16
226,368
def rez_bin_path ( self ) : binpath = None if sys . argv and sys . argv [ 0 ] : executable = sys . argv [ 0 ] path = which ( "rezolve" , env = { "PATH" : os . path . dirname ( executable ) , "PATHEXT" : os . environ . get ( "PATHEXT" , "" ) } ) binpath = os . path . dirname ( path ) if path else None # TODO: improve this, could still pick up non-production 'rezolve' if not binpath : path = which ( "rezolve" ) if path : binpath = os . path . dirname ( path ) if binpath : validation_file = os . path . join ( binpath , ".rez_production_install" ) if os . path . exists ( validation_file ) : return os . path . realpath ( binpath ) return None
Get path containing rez binaries or None if no binaries are available or Rez is not a production install .
198
21
226,369
def get_summary_string ( self ) : from rez . plugin_managers import plugin_manager txt = "Rez %s" % __version__ txt += "\n\n%s" % plugin_manager . get_summary_string ( ) return txt
Get a string summarising the state of Rez as a whole .
60
13
226,370
def clear_caches ( self , hard = False ) : from rez . package_repository import package_repository_manager from rez . utils . memcached import memcached_client package_repository_manager . clear_caches ( ) if hard : with memcached_client ( ) as client : client . flush ( )
Clear all caches in Rez .
79
6
226,371
def solve ( self ) : with log_duration ( self . _print , "memcache get (resolve) took %s" ) : solver_dict = self . _get_cached_solve ( ) if solver_dict : self . from_cache = True self . _set_result ( solver_dict ) else : self . from_cache = False solver = self . _solve ( ) solver_dict = self . _solver_to_dict ( solver ) self . _set_result ( solver_dict ) with log_duration ( self . _print , "memcache set (resolve) took %s" ) : self . _set_cached_solve ( solver_dict )
Perform the solve .
161
5
226,372
def _set_cached_solve ( self , solver_dict ) : if self . status_ != ResolverStatus . solved : return # don't cache failed solves if not ( self . caching and self . memcached_servers ) : return # most recent release times get stored with solve result in the cache releases_since_solve = False release_times_dict = { } variant_states_dict = { } for variant in self . resolved_packages_ : time_ = get_last_release_time ( variant . name , self . package_paths ) # don't cache if a release time isn't known if time_ == 0 : self . _print ( "Did not send memcache key: a repository could " "not provide a most recent release time for %r" , variant . name ) return if self . timestamp and self . timestamp < time_ : releases_since_solve = True release_times_dict [ variant . name ] = time_ repo = variant . resource . _repository variant_states_dict [ variant . name ] = repo . get_variant_state_handle ( variant . resource ) timestamped = ( self . timestamp and releases_since_solve ) key = self . _memcache_key ( timestamped = timestamped ) data = ( solver_dict , release_times_dict , variant_states_dict ) with self . _memcached_client ( ) as client : client . set ( key , data ) self . _print ( "Sent memcache key: %r" , key )
Store a solve to memcached .
338
8
226,373
def _memcache_key ( self , timestamped = False ) : request = tuple ( map ( str , self . package_requests ) ) repo_ids = [ ] for path in self . package_paths : repo = package_repository_manager . get_repository ( path ) repo_ids . append ( repo . uid ) t = [ "resolve" , request , tuple ( repo_ids ) , self . package_filter_hash , self . package_orderers_hash , self . building , config . prune_failed_graph ] if timestamped and self . timestamp : t . append ( self . timestamp ) return str ( tuple ( t ) )
Makes a key suitable as a memcache entry .
149
11
226,374
def create_shell ( shell = None , * * kwargs ) : if not shell : shell = config . default_shell if not shell : from rez . system import system shell = system . shell from rez . plugin_managers import plugin_manager return plugin_manager . create_instance ( 'shell' , shell , * * kwargs )
Returns a Shell of the given type or the current shell type if shell is None .
76
17
226,375
def startup_capabilities ( cls , rcfile = False , norc = False , stdin = False , command = False ) : raise NotImplementedError
Given a set of options related to shell startup return the actual options that will be applied .
35
18
226,376
def upload_file ( self , metadata , filename , signer = None , sign_password = None , filetype = 'sdist' , pyversion = 'source' , keystore = None ) : self . check_credentials ( ) if not os . path . exists ( filename ) : raise DistlibException ( 'not found: %s' % filename ) metadata . validate ( ) d = metadata . todict ( ) sig_file = None if signer : if not self . gpg : logger . warning ( 'no signing program available - not signed' ) else : sig_file = self . sign_file ( filename , signer , sign_password , keystore ) with open ( filename , 'rb' ) as f : file_data = f . read ( ) md5_digest = hashlib . md5 ( file_data ) . hexdigest ( ) sha256_digest = hashlib . sha256 ( file_data ) . hexdigest ( ) d . update ( { ':action' : 'file_upload' , 'protcol_version' : '1' , 'filetype' : filetype , 'pyversion' : pyversion , 'md5_digest' : md5_digest , 'sha256_digest' : sha256_digest , } ) files = [ ( 'content' , os . path . basename ( filename ) , file_data ) ] if sig_file : with open ( sig_file , 'rb' ) as f : sig_data = f . read ( ) files . append ( ( 'gpg_signature' , os . path . basename ( sig_file ) , sig_data ) ) shutil . rmtree ( os . path . dirname ( sig_file ) ) request = self . encode_request ( d . items ( ) , files ) return self . send_request ( request )
Upload a release file to the index .
411
8
226,377
def _get_dependency_order ( g , node_list ) : access_ = accessibility ( g ) deps = dict ( ( k , set ( v ) - set ( [ k ] ) ) for k , v in access_ . iteritems ( ) ) nodes = node_list + list ( set ( g . nodes ( ) ) - set ( node_list ) ) ordered_nodes = [ ] while nodes : n_ = nodes [ 0 ] n_deps = deps . get ( n_ ) if ( n_ in ordered_nodes ) or ( n_deps is None ) : nodes = nodes [ 1 : ] continue moved = False for i , n in enumerate ( nodes [ 1 : ] ) : if n in n_deps : nodes = [ nodes [ i + 1 ] ] + nodes [ : i + 1 ] + nodes [ i + 2 : ] moved = True break if not moved : ordered_nodes . append ( n_ ) nodes = nodes [ 1 : ] return ordered_nodes
Return list of nodes as close as possible to the ordering in node_list but with child nodes earlier in the list than parents .
220
26
226,378
def _short_req_str ( package_request ) : if not package_request . conflict : versions = package_request . range . to_versions ( ) if versions and len ( versions ) == len ( package_request . range ) and len ( versions ) > 1 : return "%s-%s(%d)" % ( package_request . name , str ( package_request . range . span ( ) ) , len ( versions ) ) return str ( package_request )
print shortened version of == X| == Y| == Z ranged requests .
101
15
226,379
def requires_list ( self ) : requires = self . variant . get_requires ( build_requires = self . building ) reqlist = RequirementList ( requires ) if reqlist . conflict : raise ResolveError ( "The package %s has an internal requirements conflict: %s" % ( str ( self ) , str ( reqlist ) ) ) return reqlist
It is important that this property is calculated lazily . Getting the requires attribute may trigger a package load which may be avoided if this variant is reduced away before that happens .
77
34
226,380
def sort ( self ) : if self . sorted : return def key ( variant ) : requested_key = [ ] names = set ( ) for i , request in enumerate ( self . solver . request_list ) : if not request . conflict : req = variant . requires_list . get ( request . name ) if req is not None : requested_key . append ( ( - i , req . range ) ) names . add ( req . name ) additional_key = [ ] for request in variant . requires_list : if not request . conflict and request . name not in names : additional_key . append ( ( request . range , request . name ) ) if ( VariantSelectMode [ config . variant_select_mode ] == VariantSelectMode . version_priority ) : k = ( requested_key , - len ( additional_key ) , additional_key , variant . index ) else : # VariantSelectMode.intersection_priority k = ( len ( requested_key ) , requested_key , - len ( additional_key ) , additional_key , variant . index ) return k self . variants . sort ( key = key , reverse = True ) self . sorted = True
Sort variants from most correct to consume to least .
249
10
226,381
def get_intersection ( self , range_ ) : result = [ ] for entry in self . entries : package , value = entry if value is None : continue # package was blocked by package filters if package . version not in range_ : continue if isinstance ( value , list ) : variants = value entry_ = _PackageEntry ( package , variants , self . solver ) result . append ( entry_ ) continue # apply package filter if self . solver . package_filter : rule = self . solver . package_filter . excludes ( package ) if rule : if config . debug_package_exclusions : print_debug ( "Package '%s' was excluded by rule '%s'" % ( package . qualified_name , str ( rule ) ) ) entry [ 1 ] = None continue # expand package entry into list of variants if self . solver . package_load_callback : self . solver . package_load_callback ( package ) variants_ = [ ] for var in package . iter_variants ( ) : variant = PackageVariant ( var , self . solver . building ) variants_ . append ( variant ) entry [ 1 ] = variants_ entry_ = _PackageEntry ( package , variants_ , self . solver ) result . append ( entry_ ) return result or None
Get a list of variants that intersect with the given range .
275
12
226,382
def intersect ( self , range_ ) : self . solver . intersection_broad_tests_count += 1 if range_ . is_any ( ) : return self if self . solver . optimised : if range_ in self . been_intersected_with : return self if self . pr : self . pr . passive ( "intersecting %s wrt range '%s'..." , self , range_ ) self . solver . intersection_tests_count += 1 with self . solver . timed ( self . solver . intersection_time ) : # this is faster than iter_intersecting :( entries = [ x for x in self . entries if x . version in range_ ] if not entries : return None elif len ( entries ) < len ( self . entries ) : copy_ = self . _copy ( entries ) copy_ . been_intersected_with . add ( range_ ) return copy_ else : self . been_intersected_with . add ( range_ ) return self
Remove variants whose version fall outside of the given range .
218
11
226,383
def reduce_by ( self , package_request ) : if self . pr : reqstr = _short_req_str ( package_request ) self . pr . passive ( "reducing %s wrt %s..." , self , reqstr ) if self . solver . optimised : if package_request in self . been_reduced_by : return ( self , [ ] ) if ( package_request . range is None ) or ( package_request . name not in self . fam_requires ) : return ( self , [ ] ) with self . solver . timed ( self . solver . reduction_time ) : return self . _reduce_by ( package_request )
Remove variants whos dependencies conflict with the given package request .
147
12
226,384
def split ( self ) : # We sort here in the split in order to sort as late as possible. # Because splits usually happen after intersections/reductions, this # means there can be less entries to sort. # self . sort_versions ( ) def _split ( i_entry , n_variants , common_fams = None ) : # perform a split at a specific point result = self . entries [ i_entry ] . split ( n_variants ) if result : entry , next_entry = result entries = self . entries [ : i_entry ] + [ entry ] next_entries = [ next_entry ] + self . entries [ i_entry + 1 : ] else : entries = self . entries [ : i_entry + 1 ] next_entries = self . entries [ i_entry + 1 : ] slice_ = self . _copy ( entries ) next_slice = self . _copy ( next_entries ) if self . pr : if common_fams : if len ( common_fams ) == 1 : reason_str = iter ( common_fams ) . next ( ) else : reason_str = ", " . join ( common_fams ) else : reason_str = "first variant" self . pr ( "split (reason: %s) %s into %s and %s" , reason_str , self , slice_ , next_slice ) return slice_ , next_slice # determine if we need to find first variant without common dependency if len ( self ) > 2 : fams = self . first_variant . request_fams - self . extracted_fams else : fams = None if not fams : # trivial case, split on first variant return _split ( 0 , 1 ) # find split point - first variant with no dependency shared with previous prev = None for i , entry in enumerate ( self . entries ) : # sort the variants. This is done here in order to do the sort as # late as possible, simply to avoid the cost. entry . sort ( ) for j , variant in enumerate ( entry . variants ) : fams = fams & variant . request_fams if not fams : return _split ( * prev ) prev = ( i , j + 1 , fams ) # should never get here - it's only possible if there's a common # dependency, but if there's a common dependency, split() should never # have been called. raise RezSystemError ( "Unexpected solver error: common family(s) still in slice being " "split: slice: %s, family(s): %s" % ( self , str ( fams ) ) )
Split the slice .
567
4
226,385
def sort_versions ( self ) : if self . sorted : return for orderer in ( self . solver . package_orderers or [ ] ) : entries = orderer . reorder ( self . entries , key = lambda x : x . package ) if entries is not None : self . entries = entries self . sorted = True if self . pr : self . pr ( "sorted: %s packages: %s" , self . package_name , repr ( orderer ) ) return # default ordering is version descending self . entries = sorted ( self . entries , key = lambda x : x . version , reverse = True ) self . sorted = True if self . pr : self . pr ( "sorted: %s packages: version descending" , self . package_name )
Sort entries by version .
165
5
226,386
def get_variant_slice ( self , package_name , range_ ) : variant_list = self . variant_lists . get ( package_name ) if variant_list is None : variant_list = _PackageVariantList ( package_name , self . solver ) self . variant_lists [ package_name ] = variant_list entries = variant_list . get_intersection ( range_ ) if not entries : return None slice_ = _PackageVariantSlice ( package_name , entries = entries , solver = self . solver ) return slice_
Get a list of variants from the cache .
123
9
226,387
def intersect ( self , range_ ) : new_slice = None if self . package_request . conflict : if self . package_request . range is None : new_slice = self . solver . _get_variant_slice ( self . package_name , range_ ) else : new_range = range_ - self . package_request . range if new_range is not None : new_slice = self . solver . _get_variant_slice ( self . package_name , new_range ) else : new_slice = self . variant_slice . intersect ( range_ ) # intersection reduced the scope to nothing if new_slice is None : if self . pr : self . pr ( "%s intersected with range '%s' resulted in no packages" , self , range_ ) return None # intersection narrowed the scope if new_slice is not self . variant_slice : scope = self . _copy ( new_slice ) if self . pr : self . pr ( "%s was intersected to %s by range '%s'" , self , scope , range_ ) return scope # intersection did not change the scope return self
Intersect this scope with a package range .
244
9
226,388
def reduce_by ( self , package_request ) : self . solver . reduction_broad_tests_count += 1 if self . package_request . conflict : # conflict scopes don't reduce. Instead, other scopes will be # reduced against a conflict scope. return ( self , [ ] ) # perform the reduction new_slice , reductions = self . variant_slice . reduce_by ( package_request ) # there was total reduction if new_slice is None : self . solver . reductions_count += 1 if self . pr : reqstr = _short_req_str ( package_request ) self . pr ( "%s was reduced to nothing by %s" , self , reqstr ) self . pr . br ( ) return ( None , reductions ) # there was some reduction if new_slice is not self . variant_slice : self . solver . reductions_count += 1 scope = self . _copy ( new_slice ) if self . pr : reqstr = _short_req_str ( package_request ) self . pr ( "%s was reduced to %s by %s" , self , scope , reqstr ) self . pr . br ( ) return ( scope , reductions ) # there was no reduction return ( self , [ ] )
Reduce this scope wrt a package request .
267
10
226,389
def split ( self ) : if self . package_request . conflict or ( len ( self . variant_slice ) == 1 ) : return None else : r = self . variant_slice . split ( ) if r is None : return None else : slice , next_slice = r scope = self . _copy ( slice ) next_scope = self . _copy ( next_slice ) return ( scope , next_scope )
Split the scope .
89
4
226,390
def finalise ( self ) : assert ( self . _is_solved ( ) ) g = self . _get_minimal_graph ( ) scopes = dict ( ( x . package_name , x ) for x in self . scopes if not x . package_request . conflict ) # check for cyclic dependencies fam_cycle = find_cycle ( g ) if fam_cycle : cycle = [ ] for fam in fam_cycle : scope = scopes [ fam ] variant = scope . _get_solved_variant ( ) stmt = VersionedObject . construct ( fam , variant . version ) cycle . append ( stmt ) phase = copy . copy ( self ) phase . scopes = scopes . values ( ) phase . failure_reason = Cycle ( cycle ) phase . status = SolverStatus . cyclic return phase # reorder wrt dependencies, keeping original request order where possible fams = [ x . name for x in self . solver . request_list ] ordered_fams = _get_dependency_order ( g , fams ) scopes_ = [ ] for fam in ordered_fams : scope = scopes [ fam ] if not scope . package_request . conflict : scopes_ . append ( scope ) phase = copy . copy ( self ) phase . scopes = scopes_ return phase
Remove conflict requests detect cyclic dependencies and reorder packages wrt dependency and then request order .
286
19
226,391
def split ( self ) : assert ( self . status == SolverStatus . exhausted ) scopes = [ ] next_scopes = [ ] split_i = None for i , scope in enumerate ( self . scopes ) : if split_i is None : r = scope . split ( ) if r is not None : scope_ , next_scope = r scopes . append ( scope_ ) next_scopes . append ( next_scope ) split_i = i continue scopes . append ( scope ) next_scopes . append ( scope ) assert split_i is not None phase = copy . copy ( self ) phase . scopes = scopes phase . status = SolverStatus . pending phase . changed_scopes_i = set ( [ split_i ] ) # because a scope was narrowed by a split, other scopes need to be # reduced against it #for i in range(len(phase.scopes)): # if i != split_i: # phase.pending_reducts.add((i, split_i)) next_phase = copy . copy ( phase ) next_phase . scopes = next_scopes return ( phase , next_phase )
Split the phase .
255
4
226,392
def status ( self ) : if self . request_list . conflict : return SolverStatus . failed if self . callback_return == SolverCallbackReturn . fail : # the solve has failed because a callback has nominated the most # recent failure as the reason. return SolverStatus . failed st = self . phase_stack [ - 1 ] . status if st == SolverStatus . cyclic : return SolverStatus . failed elif len ( self . phase_stack ) > 1 : if st == SolverStatus . solved : return SolverStatus . solved else : return SolverStatus . unsolved elif st in ( SolverStatus . pending , SolverStatus . exhausted ) : return SolverStatus . unsolved else : return st
Return the current status of the solve .
153
8
226,393
def num_fails ( self ) : n = len ( self . failed_phase_list ) if self . phase_stack [ - 1 ] . status in ( SolverStatus . failed , SolverStatus . cyclic ) : n += 1 return n
Return the number of failed solve steps that have been executed . Note that num_solves is inclusive of failures .
54
23
226,394
def resolved_packages ( self ) : if ( self . status != SolverStatus . solved ) : return None final_phase = self . phase_stack [ - 1 ] return final_phase . _get_solved_variants ( )
Return a list of PackageVariant objects or None if the resolve did not complete or was unsuccessful .
51
20
226,395
def reset ( self ) : if not self . request_list . conflict : phase = _ResolvePhase ( self . request_list . requirements , solver = self ) self . pr ( "resetting..." ) self . _init ( ) self . _push_phase ( phase )
Reset the solver removing any current solve .
60
10
226,396
def solve ( self ) : if self . solve_begun : raise ResolveError ( "cannot run solve() on a solve that has " "already been started" ) t1 = time . time ( ) pt1 = package_repo_stats . package_load_time # iteratively solve phases while self . status == SolverStatus . unsolved : self . solve_step ( ) if self . status == SolverStatus . unsolved and not self . _do_callback ( ) : break self . load_time = package_repo_stats . package_load_time - pt1 self . solve_time = time . time ( ) - t1 # print stats if self . pr . verbosity > 2 : from pprint import pformat self . pr . subheader ( "SOLVE STATS:" ) self . pr ( pformat ( self . solve_stats ) ) elif self . print_stats : from pprint import pformat data = { "solve_stats" : self . solve_stats } print >> ( self . buf or sys . stdout ) , pformat ( data )
Attempt to solve the request .
236
6
226,397
def solve_step ( self ) : self . solve_begun = True if self . status != SolverStatus . unsolved : return if self . pr : self . pr . header ( "SOLVE #%d (%d fails so far)..." , self . solve_count + 1 , self . num_fails ) phase = self . _pop_phase ( ) if phase . status == SolverStatus . failed : # a previously failed phase self . pr ( "discarded failed phase, fetching previous unsolved phase..." ) self . failed_phase_list . append ( phase ) phase = self . _pop_phase ( ) if phase . status == SolverStatus . exhausted : self . pr . subheader ( "SPLITTING:" ) phase , next_phase = phase . split ( ) self . _push_phase ( next_phase ) if self . pr : self . pr ( "new phase: %s" , phase ) new_phase = phase . solve ( ) self . solve_count += 1 if new_phase . status == SolverStatus . failed : self . pr . subheader ( "FAILED:" ) self . _push_phase ( new_phase ) if self . pr and len ( self . phase_stack ) == 1 : self . pr . header ( "FAIL: there is no solution" ) elif new_phase . status == SolverStatus . solved : # solved, but there may be cyclic dependencies self . pr . subheader ( "SOLVED:" ) final_phase = new_phase . finalise ( ) self . _push_phase ( final_phase ) if self . pr : if final_phase . status == SolverStatus . cyclic : self . pr . header ( "FAIL: a cycle was detected" ) else : self . pr . header ( "SUCCESS" ) else : self . pr . subheader ( "EXHAUSTED:" ) assert ( new_phase . status == SolverStatus . exhausted ) self . _push_phase ( new_phase )
Perform a single solve step .
436
7
226,398
def failure_reason ( self , failure_index = None ) : phase , _ = self . _get_failed_phase ( failure_index ) return phase . failure_reason
Get the reason for a failure .
37
7
226,399
def failure_packages ( self , failure_index = None ) : phase , _ = self . _get_failed_phase ( failure_index ) fr = phase . failure_reason return fr . involved_requirements ( ) if fr else None
Get packages involved in a failure .
51
7