repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
nerdvegas/rez
src/rez/serialise.py
process_python_objects
def process_python_objects(data, filepath=None): """Replace certain values in the given package data dict. Does things like: * evaluates @early decorated functions, and replaces with return value; * converts functions into `SourceCode` instances so they can be serialized out to installed packages, and evaluated later; * strips some values (modules, __-leading variables) that are never to be part of installed packages. Returns: dict: Updated dict. """ def _process(value): if isinstance(value, dict): for k, v in value.items(): value[k] = _process(v) return value elif isfunction(value): func = value if hasattr(func, "_early"): # run the function now, and replace with return value # # make a copy of the func with its own globals, and add 'this' import types fn = types.FunctionType(func.func_code, func.func_globals.copy(), name=func.func_name, argdefs=func.func_defaults, closure=func.func_closure) # apply globals fn.func_globals["this"] = EarlyThis(data) fn.func_globals.update(get_objects()) # execute the function spec = getargspec(func) args = spec.args or [] if len(args) not in (0, 1): raise ResourceError("@early decorated function must " "take zero or one args only") if args: # this 'data' arg support isn't needed anymore, but I'm # supporting it til I know nobody is using it... # value_ = fn(data) else: value_ = fn() # process again in case this is a function returning a function return _process(value_) elif hasattr(func, "_late"): return SourceCode(func=func, filepath=filepath, eval_as_function=True) elif func.__name__ in package_rex_keys: # if a rex function, the code has to be eval'd NOT as a function, # otherwise the globals dict doesn't get updated with any vars # defined in the code, and that means rex code like this: # # rr = 'test' # env.RR = '{rr}' # # ..won't work. It was never intentional that the above work, but # it does, so now we have to keep it so. # return SourceCode(func=func, filepath=filepath, eval_as_function=False) else: # a normal function. Leave unchanged, it will be stripped after return func else: return value def _trim(value): if isinstance(value, dict): for k, v in value.items(): if isfunction(v): if v.__name__ == "preprocess": # preprocess is a special case. It has to stay intact # until the `DeveloperPackage` has a chance to apply it; # after which it gets removed from the package attributes. # pass else: del value[k] elif ismodule(v) or k.startswith("__"): del value[k] else: value[k] = _trim(v) return value data = _process(data) data = _trim(data) return data
python
def process_python_objects(data, filepath=None): """Replace certain values in the given package data dict. Does things like: * evaluates @early decorated functions, and replaces with return value; * converts functions into `SourceCode` instances so they can be serialized out to installed packages, and evaluated later; * strips some values (modules, __-leading variables) that are never to be part of installed packages. Returns: dict: Updated dict. """ def _process(value): if isinstance(value, dict): for k, v in value.items(): value[k] = _process(v) return value elif isfunction(value): func = value if hasattr(func, "_early"): # run the function now, and replace with return value # # make a copy of the func with its own globals, and add 'this' import types fn = types.FunctionType(func.func_code, func.func_globals.copy(), name=func.func_name, argdefs=func.func_defaults, closure=func.func_closure) # apply globals fn.func_globals["this"] = EarlyThis(data) fn.func_globals.update(get_objects()) # execute the function spec = getargspec(func) args = spec.args or [] if len(args) not in (0, 1): raise ResourceError("@early decorated function must " "take zero or one args only") if args: # this 'data' arg support isn't needed anymore, but I'm # supporting it til I know nobody is using it... # value_ = fn(data) else: value_ = fn() # process again in case this is a function returning a function return _process(value_) elif hasattr(func, "_late"): return SourceCode(func=func, filepath=filepath, eval_as_function=True) elif func.__name__ in package_rex_keys: # if a rex function, the code has to be eval'd NOT as a function, # otherwise the globals dict doesn't get updated with any vars # defined in the code, and that means rex code like this: # # rr = 'test' # env.RR = '{rr}' # # ..won't work. It was never intentional that the above work, but # it does, so now we have to keep it so. # return SourceCode(func=func, filepath=filepath, eval_as_function=False) else: # a normal function. Leave unchanged, it will be stripped after return func else: return value def _trim(value): if isinstance(value, dict): for k, v in value.items(): if isfunction(v): if v.__name__ == "preprocess": # preprocess is a special case. It has to stay intact # until the `DeveloperPackage` has a chance to apply it; # after which it gets removed from the package attributes. # pass else: del value[k] elif ismodule(v) or k.startswith("__"): del value[k] else: value[k] = _trim(v) return value data = _process(data) data = _trim(data) return data
[ "def", "process_python_objects", "(", "data", ",", "filepath", "=", "None", ")", ":", "def", "_process", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", "...
Replace certain values in the given package data dict. Does things like: * evaluates @early decorated functions, and replaces with return value; * converts functions into `SourceCode` instances so they can be serialized out to installed packages, and evaluated later; * strips some values (modules, __-leading variables) that are never to be part of installed packages. Returns: dict: Updated dict.
[ "Replace", "certain", "values", "in", "the", "given", "package", "data", "dict", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/serialise.py#L266-L366
train
227,200
nerdvegas/rez
src/rez/serialise.py
load_yaml
def load_yaml(stream, **kwargs): """Load yaml-formatted data from a stream. Args: stream (file-like object). Returns: dict. """ # if there's an error parsing the yaml, and you pass yaml.load a string, # it will print lines of context, but will print "<string>" instead of a # filename; if you pass a stream, it will print the filename, but no lines # of context. # Get the best of both worlds, by passing it a string, then replacing # "<string>" with the filename if there's an error... content = stream.read() try: return yaml.load(content) or {} except Exception, e: if stream.name and stream.name != '<string>': for mark_name in 'context_mark', 'problem_mark': mark = getattr(e, mark_name, None) if mark is None: continue if getattr(mark, 'name') == '<string>': mark.name = stream.name raise e
python
def load_yaml(stream, **kwargs): """Load yaml-formatted data from a stream. Args: stream (file-like object). Returns: dict. """ # if there's an error parsing the yaml, and you pass yaml.load a string, # it will print lines of context, but will print "<string>" instead of a # filename; if you pass a stream, it will print the filename, but no lines # of context. # Get the best of both worlds, by passing it a string, then replacing # "<string>" with the filename if there's an error... content = stream.read() try: return yaml.load(content) or {} except Exception, e: if stream.name and stream.name != '<string>': for mark_name in 'context_mark', 'problem_mark': mark = getattr(e, mark_name, None) if mark is None: continue if getattr(mark, 'name') == '<string>': mark.name = stream.name raise e
[ "def", "load_yaml", "(", "stream", ",", "*", "*", "kwargs", ")", ":", "# if there's an error parsing the yaml, and you pass yaml.load a string,", "# it will print lines of context, but will print \"<string>\" instead of a", "# filename; if you pass a stream, it will print the filename, but n...
Load yaml-formatted data from a stream. Args: stream (file-like object). Returns: dict.
[ "Load", "yaml", "-", "formatted", "data", "from", "a", "stream", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/serialise.py#L369-L395
train
227,201
nerdvegas/rez
src/rez/vendor/amqp/connection.py
Connection._blocked
def _blocked(self, args): """RabbitMQ Extension.""" reason = args.read_shortstr() if self.on_blocked: return self.on_blocked(reason)
python
def _blocked(self, args): """RabbitMQ Extension.""" reason = args.read_shortstr() if self.on_blocked: return self.on_blocked(reason)
[ "def", "_blocked", "(", "self", ",", "args", ")", ":", "reason", "=", "args", ".", "read_shortstr", "(", ")", "if", "self", ".", "on_blocked", ":", "return", "self", ".", "on_blocked", "(", "reason", ")" ]
RabbitMQ Extension.
[ "RabbitMQ", "Extension", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L532-L536
train
227,202
nerdvegas/rez
src/rez/vendor/amqp/connection.py
Connection._x_secure_ok
def _x_secure_ok(self, response): """Security mechanism response This method attempts to authenticate, passing a block of SASL data for the security mechanism at the server side. PARAMETERS: response: longstr security response data A block of opaque data passed to the security mechanism. The contents of this data are defined by the SASL security mechanism. """ args = AMQPWriter() args.write_longstr(response) self._send_method((10, 21), args)
python
def _x_secure_ok(self, response): """Security mechanism response This method attempts to authenticate, passing a block of SASL data for the security mechanism at the server side. PARAMETERS: response: longstr security response data A block of opaque data passed to the security mechanism. The contents of this data are defined by the SASL security mechanism. """ args = AMQPWriter() args.write_longstr(response) self._send_method((10, 21), args)
[ "def", "_x_secure_ok", "(", "self", ",", "response", ")", ":", "args", "=", "AMQPWriter", "(", ")", "args", ".", "write_longstr", "(", "response", ")", "self", ".", "_send_method", "(", "(", "10", ",", "21", ")", ",", "args", ")" ]
Security mechanism response This method attempts to authenticate, passing a block of SASL data for the security mechanism at the server side. PARAMETERS: response: longstr security response data A block of opaque data passed to the security mechanism. The contents of this data are defined by the SASL security mechanism.
[ "Security", "mechanism", "response" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L662-L680
train
227,203
nerdvegas/rez
src/rez/vendor/amqp/connection.py
Connection._x_start_ok
def _x_start_ok(self, client_properties, mechanism, response, locale): """Select security mechanism and locale This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to negotiate authentication and encryption. PARAMETERS: client_properties: table client properties mechanism: shortstr selected security mechanism A single security mechanisms selected by the client, which must be one of those specified by the server. RULE: The client SHOULD authenticate using the highest- level security profile it can handle from the list provided by the server. RULE: The mechanism field MUST contain one of the security mechanisms proposed by the server in the Start method. If it doesn't, the server MUST close the socket. response: longstr security response data A block of opaque data passed to the security mechanism. The contents of this data are defined by the SASL security mechanism. For the PLAIN security mechanism this is defined as a field table holding two fields, LOGIN and PASSWORD. locale: shortstr selected message locale A single message local selected by the client, which must be one of those specified by the server. """ if self.server_capabilities.get('consumer_cancel_notify'): if 'capabilities' not in client_properties: client_properties['capabilities'] = {} client_properties['capabilities']['consumer_cancel_notify'] = True if self.server_capabilities.get('connection.blocked'): if 'capabilities' not in client_properties: client_properties['capabilities'] = {} client_properties['capabilities']['connection.blocked'] = True args = AMQPWriter() args.write_table(client_properties) args.write_shortstr(mechanism) args.write_longstr(response) args.write_shortstr(locale) self._send_method((10, 11), args)
python
def _x_start_ok(self, client_properties, mechanism, response, locale): """Select security mechanism and locale This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to negotiate authentication and encryption. PARAMETERS: client_properties: table client properties mechanism: shortstr selected security mechanism A single security mechanisms selected by the client, which must be one of those specified by the server. RULE: The client SHOULD authenticate using the highest- level security profile it can handle from the list provided by the server. RULE: The mechanism field MUST contain one of the security mechanisms proposed by the server in the Start method. If it doesn't, the server MUST close the socket. response: longstr security response data A block of opaque data passed to the security mechanism. The contents of this data are defined by the SASL security mechanism. For the PLAIN security mechanism this is defined as a field table holding two fields, LOGIN and PASSWORD. locale: shortstr selected message locale A single message local selected by the client, which must be one of those specified by the server. """ if self.server_capabilities.get('consumer_cancel_notify'): if 'capabilities' not in client_properties: client_properties['capabilities'] = {} client_properties['capabilities']['consumer_cancel_notify'] = True if self.server_capabilities.get('connection.blocked'): if 'capabilities' not in client_properties: client_properties['capabilities'] = {} client_properties['capabilities']['connection.blocked'] = True args = AMQPWriter() args.write_table(client_properties) args.write_shortstr(mechanism) args.write_longstr(response) args.write_shortstr(locale) self._send_method((10, 11), args)
[ "def", "_x_start_ok", "(", "self", ",", "client_properties", ",", "mechanism", ",", "response", ",", "locale", ")", ":", "if", "self", ".", "server_capabilities", ".", "get", "(", "'consumer_cancel_notify'", ")", ":", "if", "'capabilities'", "not", "in", "clie...
Select security mechanism and locale This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to negotiate authentication and encryption. PARAMETERS: client_properties: table client properties mechanism: shortstr selected security mechanism A single security mechanisms selected by the client, which must be one of those specified by the server. RULE: The client SHOULD authenticate using the highest- level security profile it can handle from the list provided by the server. RULE: The mechanism field MUST contain one of the security mechanisms proposed by the server in the Start method. If it doesn't, the server MUST close the socket. response: longstr security response data A block of opaque data passed to the security mechanism. The contents of this data are defined by the SASL security mechanism. For the PLAIN security mechanism this is defined as a field table holding two fields, LOGIN and PASSWORD. locale: shortstr selected message locale A single message local selected by the client, which must be one of those specified by the server.
[ "Select", "security", "mechanism", "and", "locale" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L758-L820
train
227,204
nerdvegas/rez
src/rez/vendor/amqp/connection.py
Connection._tune
def _tune(self, args): """Propose connection tuning parameters This method proposes a set of connection configuration values to the client. The client can accept and/or adjust these. PARAMETERS: channel_max: short proposed maximum channels The maximum total number of channels that the server allows per connection. Zero means that the server does not impose a fixed limit, but the number of allowed channels may be limited by available server resources. frame_max: long proposed maximum frame size The largest frame size that the server proposes for the connection. The client can negotiate a lower value. Zero means that the server does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. RULE: Until the frame-max has been negotiated, both peers MUST accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the server wants. Zero means the server does not want a heartbeat. """ client_heartbeat = self.client_heartbeat or 0 self.channel_max = args.read_short() or self.channel_max self.frame_max = args.read_long() or self.frame_max self.method_writer.frame_max = self.frame_max self.server_heartbeat = args.read_short() or 0 # negotiate the heartbeat interval to the smaller of the # specified values if self.server_heartbeat == 0 or client_heartbeat == 0: self.heartbeat = max(self.server_heartbeat, client_heartbeat) else: self.heartbeat = min(self.server_heartbeat, client_heartbeat) # Ignore server heartbeat if client_heartbeat is disabled if not self.client_heartbeat: self.heartbeat = 0 self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat)
python
def _tune(self, args): """Propose connection tuning parameters This method proposes a set of connection configuration values to the client. The client can accept and/or adjust these. PARAMETERS: channel_max: short proposed maximum channels The maximum total number of channels that the server allows per connection. Zero means that the server does not impose a fixed limit, but the number of allowed channels may be limited by available server resources. frame_max: long proposed maximum frame size The largest frame size that the server proposes for the connection. The client can negotiate a lower value. Zero means that the server does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. RULE: Until the frame-max has been negotiated, both peers MUST accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the server wants. Zero means the server does not want a heartbeat. """ client_heartbeat = self.client_heartbeat or 0 self.channel_max = args.read_short() or self.channel_max self.frame_max = args.read_long() or self.frame_max self.method_writer.frame_max = self.frame_max self.server_heartbeat = args.read_short() or 0 # negotiate the heartbeat interval to the smaller of the # specified values if self.server_heartbeat == 0 or client_heartbeat == 0: self.heartbeat = max(self.server_heartbeat, client_heartbeat) else: self.heartbeat = min(self.server_heartbeat, client_heartbeat) # Ignore server heartbeat if client_heartbeat is disabled if not self.client_heartbeat: self.heartbeat = 0 self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat)
[ "def", "_tune", "(", "self", ",", "args", ")", ":", "client_heartbeat", "=", "self", ".", "client_heartbeat", "or", "0", "self", ".", "channel_max", "=", "args", ".", "read_short", "(", ")", "or", "self", ".", "channel_max", "self", ".", "frame_max", "="...
Propose connection tuning parameters This method proposes a set of connection configuration values to the client. The client can accept and/or adjust these. PARAMETERS: channel_max: short proposed maximum channels The maximum total number of channels that the server allows per connection. Zero means that the server does not impose a fixed limit, but the number of allowed channels may be limited by available server resources. frame_max: long proposed maximum frame size The largest frame size that the server proposes for the connection. The client can negotiate a lower value. Zero means that the server does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. RULE: Until the frame-max has been negotiated, both peers MUST accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the server wants. Zero means the server does not want a heartbeat.
[ "Propose", "connection", "tuning", "parameters" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L822-L881
train
227,205
nerdvegas/rez
src/rez/vendor/amqp/connection.py
Connection.heartbeat_tick
def heartbeat_tick(self, rate=2): """Send heartbeat packets, if necessary, and fail if none have been received recently. This should be called frequently, on the order of once per second. :keyword rate: Ignored """ if not self.heartbeat: return # treat actual data exchange in either direction as a heartbeat sent_now = self.method_writer.bytes_sent recv_now = self.method_reader.bytes_recv if self.prev_sent is None or self.prev_sent != sent_now: self.last_heartbeat_sent = monotonic() if self.prev_recv is None or self.prev_recv != recv_now: self.last_heartbeat_received = monotonic() self.prev_sent, self.prev_recv = sent_now, recv_now # send a heartbeat if it's time to do so if monotonic() > self.last_heartbeat_sent + self.heartbeat: self.send_heartbeat() self.last_heartbeat_sent = monotonic() # if we've missed two intervals' heartbeats, fail; this gives the # server enough time to send heartbeats a little late if (self.last_heartbeat_received and self.last_heartbeat_received + 2 * self.heartbeat < monotonic()): raise ConnectionForced('Too many heartbeats missed')
python
def heartbeat_tick(self, rate=2): """Send heartbeat packets, if necessary, and fail if none have been received recently. This should be called frequently, on the order of once per second. :keyword rate: Ignored """ if not self.heartbeat: return # treat actual data exchange in either direction as a heartbeat sent_now = self.method_writer.bytes_sent recv_now = self.method_reader.bytes_recv if self.prev_sent is None or self.prev_sent != sent_now: self.last_heartbeat_sent = monotonic() if self.prev_recv is None or self.prev_recv != recv_now: self.last_heartbeat_received = monotonic() self.prev_sent, self.prev_recv = sent_now, recv_now # send a heartbeat if it's time to do so if monotonic() > self.last_heartbeat_sent + self.heartbeat: self.send_heartbeat() self.last_heartbeat_sent = monotonic() # if we've missed two intervals' heartbeats, fail; this gives the # server enough time to send heartbeats a little late if (self.last_heartbeat_received and self.last_heartbeat_received + 2 * self.heartbeat < monotonic()): raise ConnectionForced('Too many heartbeats missed')
[ "def", "heartbeat_tick", "(", "self", ",", "rate", "=", "2", ")", ":", "if", "not", "self", ".", "heartbeat", ":", "return", "# treat actual data exchange in either direction as a heartbeat", "sent_now", "=", "self", ".", "method_writer", ".", "bytes_sent", "recv_no...
Send heartbeat packets, if necessary, and fail if none have been received recently. This should be called frequently, on the order of once per second. :keyword rate: Ignored
[ "Send", "heartbeat", "packets", "if", "necessary", "and", "fail", "if", "none", "have", "been", "received", "recently", ".", "This", "should", "be", "called", "frequently", "on", "the", "order", "of", "once", "per", "second", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L886-L915
train
227,206
nerdvegas/rez
src/rez/vendor/amqp/connection.py
Connection._x_tune_ok
def _x_tune_ok(self, channel_max, frame_max, heartbeat): """Negotiate connection tuning parameters This method sends the client's connection tuning parameters to the server. Certain fields are negotiated, others provide capability information. PARAMETERS: channel_max: short negotiated maximum channels The maximum total number of channels that the client will use per connection. May not be higher than the value specified by the server. RULE: The server MAY ignore the channel-max value or MAY use it for tuning its resource allocation. frame_max: long negotiated maximum frame size The largest frame size that the client and server will use for the connection. Zero means that the client does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. Note that the frame-max limit applies principally to content frames, where large contents can be broken into frames of arbitrary size. RULE: Until the frame-max has been negotiated, both peers must accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the client wants. Zero means the client does not want a heartbeat. """ args = AMQPWriter() args.write_short(channel_max) args.write_long(frame_max) args.write_short(heartbeat or 0) self._send_method((10, 31), args) self._wait_tune_ok = False
python
def _x_tune_ok(self, channel_max, frame_max, heartbeat): """Negotiate connection tuning parameters This method sends the client's connection tuning parameters to the server. Certain fields are negotiated, others provide capability information. PARAMETERS: channel_max: short negotiated maximum channels The maximum total number of channels that the client will use per connection. May not be higher than the value specified by the server. RULE: The server MAY ignore the channel-max value or MAY use it for tuning its resource allocation. frame_max: long negotiated maximum frame size The largest frame size that the client and server will use for the connection. Zero means that the client does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. Note that the frame-max limit applies principally to content frames, where large contents can be broken into frames of arbitrary size. RULE: Until the frame-max has been negotiated, both peers must accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the client wants. Zero means the client does not want a heartbeat. """ args = AMQPWriter() args.write_short(channel_max) args.write_long(frame_max) args.write_short(heartbeat or 0) self._send_method((10, 31), args) self._wait_tune_ok = False
[ "def", "_x_tune_ok", "(", "self", ",", "channel_max", ",", "frame_max", ",", "heartbeat", ")", ":", "args", "=", "AMQPWriter", "(", ")", "args", ".", "write_short", "(", "channel_max", ")", "args", ".", "write_long", "(", "frame_max", ")", "args", ".", "...
Negotiate connection tuning parameters This method sends the client's connection tuning parameters to the server. Certain fields are negotiated, others provide capability information. PARAMETERS: channel_max: short negotiated maximum channels The maximum total number of channels that the client will use per connection. May not be higher than the value specified by the server. RULE: The server MAY ignore the channel-max value or MAY use it for tuning its resource allocation. frame_max: long negotiated maximum frame size The largest frame size that the client and server will use for the connection. Zero means that the client does not impose any specific limit but may reject very large frames if it cannot allocate resources for them. Note that the frame-max limit applies principally to content frames, where large contents can be broken into frames of arbitrary size. RULE: Until the frame-max has been negotiated, both peers must accept frames of up to 4096 octets large. The minimum non-zero value for the frame- max field is 4096. heartbeat: short desired heartbeat delay The delay, in seconds, of the connection heartbeat that the client wants. Zero means the client does not want a heartbeat.
[ "Negotiate", "connection", "tuning", "parameters" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L917-L971
train
227,207
nerdvegas/rez
src/rez/status.py
Status.parent_suite
def parent_suite(self): """Get the current parent suite. A parent suite exists when a context within a suite is active. That is, during execution of a tool within a suite, or after a user has entered an interactive shell in a suite context, for example via the command- line syntax 'tool +i', where 'tool' is an alias in a suite. Returns: `Suite` object, or None if there is no current parent suite. """ if self.context and self.context.parent_suite_path: return Suite.load(self.context.parent_suite_path) return None
python
def parent_suite(self): """Get the current parent suite. A parent suite exists when a context within a suite is active. That is, during execution of a tool within a suite, or after a user has entered an interactive shell in a suite context, for example via the command- line syntax 'tool +i', where 'tool' is an alias in a suite. Returns: `Suite` object, or None if there is no current parent suite. """ if self.context and self.context.parent_suite_path: return Suite.load(self.context.parent_suite_path) return None
[ "def", "parent_suite", "(", "self", ")", ":", "if", "self", ".", "context", "and", "self", ".", "context", ".", "parent_suite_path", ":", "return", "Suite", ".", "load", "(", "self", ".", "context", ".", "parent_suite_path", ")", "return", "None" ]
Get the current parent suite. A parent suite exists when a context within a suite is active. That is, during execution of a tool within a suite, or after a user has entered an interactive shell in a suite context, for example via the command- line syntax 'tool +i', where 'tool' is an alias in a suite. Returns: `Suite` object, or None if there is no current parent suite.
[ "Get", "the", "current", "parent", "suite", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/status.py#L56-L69
train
227,208
nerdvegas/rez
src/rez/status.py
Status.print_info
def print_info(self, obj=None, buf=sys.stdout): """Print a status message about the given object. If an object is not provided, status info is shown about the current environment - what the active context is if any, and what suites are visible. Args: obj (str): String which may be one of the following: - A tool name; - A package name, possibly versioned; - A context filepath; - A suite filepath; - The name of a context in a visible suite. """ if not obj: self._print_info(buf) return True b = False for fn in (self._print_tool_info, self._print_package_info, self._print_suite_info, self._print_context_info): b_ = fn(obj, buf, b) b |= b_ if b_: print >> buf, '' if not b: print >> buf, "Rez does not know what '%s' is" % obj return b
python
def print_info(self, obj=None, buf=sys.stdout): """Print a status message about the given object. If an object is not provided, status info is shown about the current environment - what the active context is if any, and what suites are visible. Args: obj (str): String which may be one of the following: - A tool name; - A package name, possibly versioned; - A context filepath; - A suite filepath; - The name of a context in a visible suite. """ if not obj: self._print_info(buf) return True b = False for fn in (self._print_tool_info, self._print_package_info, self._print_suite_info, self._print_context_info): b_ = fn(obj, buf, b) b |= b_ if b_: print >> buf, '' if not b: print >> buf, "Rez does not know what '%s' is" % obj return b
[ "def", "print_info", "(", "self", ",", "obj", "=", "None", ",", "buf", "=", "sys", ".", "stdout", ")", ":", "if", "not", "obj", ":", "self", ".", "_print_info", "(", "buf", ")", "return", "True", "b", "=", "False", "for", "fn", "in", "(", "self",...
Print a status message about the given object. If an object is not provided, status info is shown about the current environment - what the active context is if any, and what suites are visible. Args: obj (str): String which may be one of the following: - A tool name; - A package name, possibly versioned; - A context filepath; - A suite filepath; - The name of a context in a visible suite.
[ "Print", "a", "status", "message", "about", "the", "given", "object", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/status.py#L87-L118
train
227,209
nerdvegas/rez
src/rez/status.py
Status.print_tools
def print_tools(self, pattern=None, buf=sys.stdout): """Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern. """ seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for _, (variant, tools) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if pattern and not fnmatch(tool, pattern): continue if tool in conflicts: label = "(in conflict)" color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, "active context", label, color]) seen.add(tool) for suite in self.suites: for tool, d in suite.get_tools().iteritems(): if tool in seen: continue if pattern and not fnmatch(tool, pattern): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if path != path_: label.append("(hidden by unknown tool '%s')" % path) color = warning variant = d["variant"] if isinstance(variant, set): pkg_str = ", ".join(variant) label.append("(in conflict)") color = critical else: pkg_str = variant.qualified_package_name orig_tool = d["tool_name"] if orig_tool == tool: orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d["context_name"], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if not rows: _pr("No matching tools.") return False headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None], ["----", "--------", "-------", "------", "", None]] rows = headers + sorted(rows, key=lambda x: x[0].lower()) print_colored_columns(_pr, rows) return True
python
def print_tools(self, pattern=None, buf=sys.stdout): """Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern. """ seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for _, (variant, tools) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if pattern and not fnmatch(tool, pattern): continue if tool in conflicts: label = "(in conflict)" color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, "active context", label, color]) seen.add(tool) for suite in self.suites: for tool, d in suite.get_tools().iteritems(): if tool in seen: continue if pattern and not fnmatch(tool, pattern): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if path != path_: label.append("(hidden by unknown tool '%s')" % path) color = warning variant = d["variant"] if isinstance(variant, set): pkg_str = ", ".join(variant) label.append("(in conflict)") color = critical else: pkg_str = variant.qualified_package_name orig_tool = d["tool_name"] if orig_tool == tool: orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d["context_name"], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if not rows: _pr("No matching tools.") return False headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None], ["----", "--------", "-------", "------", "", None]] rows = headers + sorted(rows, key=lambda x: x[0].lower()) print_colored_columns(_pr, rows) return True
[ "def", "print_tools", "(", "self", ",", "pattern", "=", "None", ",", "buf", "=", "sys", ".", "stdout", ")", ":", "seen", "=", "set", "(", ")", "rows", "=", "[", "]", "context", "=", "self", ".", "context", "if", "context", ":", "data", "=", "cont...
Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern.
[ "Print", "a", "list", "of", "visible", "tools", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/status.py#L120-L193
train
227,210
nerdvegas/rez
src/rez/developer_package.py
DeveloperPackage.from_path
def from_path(cls, path, format=None): """Load a developer package. A developer package may for example be a package.yaml or package.py in a user's source directory. Args: path: Directory containing the package definition file, or file path for the package file itself format: which FileFormat to use, or None to check both .py and .yaml Returns: `Package` object. """ name = None data = None if format is None: formats = (FileFormat.py, FileFormat.yaml) else: formats = (format,) try: mode = os.stat(path).st_mode except (IOError, OSError): raise PackageMetadataError( "Path %r did not exist, or was not accessible" % path) is_dir = stat.S_ISDIR(mode) for name_ in config.plugins.package_repository.filesystem.package_filenames: for format_ in formats: if is_dir: filepath = os.path.join(path, "%s.%s" % (name_, format_.extension)) exists = os.path.isfile(filepath) else: # if format was not specified, verify that it has the # right extension before trying to load if format is None: if os.path.splitext(path)[1] != format_.extension: continue filepath = path exists = True if exists: data = load_from_file(filepath, format_, disable_memcache=True) break if data: name = data.get("name") if name is not None or isinstance(name, basestring): break if data is None: raise PackageMetadataError("No package definition file found at %s" % path) if name is None or not isinstance(name, basestring): raise PackageMetadataError( "Error in %r - missing or non-string field 'name'" % filepath) package = create_package(name, data, package_cls=cls) # preprocessing result = package._get_preprocessed(data) if result: package, data = result package.filepath = filepath # find all includes, this is needed at install time to copy the right # py sourcefiles into the package installation package.includes = set() def visit(d): for k, v in d.iteritems(): if isinstance(v, SourceCode): package.includes |= (v.includes or set()) elif isinstance(v, dict): visit(v) visit(data) package._validate_includes() return package
python
def from_path(cls, path, format=None): """Load a developer package. A developer package may for example be a package.yaml or package.py in a user's source directory. Args: path: Directory containing the package definition file, or file path for the package file itself format: which FileFormat to use, or None to check both .py and .yaml Returns: `Package` object. """ name = None data = None if format is None: formats = (FileFormat.py, FileFormat.yaml) else: formats = (format,) try: mode = os.stat(path).st_mode except (IOError, OSError): raise PackageMetadataError( "Path %r did not exist, or was not accessible" % path) is_dir = stat.S_ISDIR(mode) for name_ in config.plugins.package_repository.filesystem.package_filenames: for format_ in formats: if is_dir: filepath = os.path.join(path, "%s.%s" % (name_, format_.extension)) exists = os.path.isfile(filepath) else: # if format was not specified, verify that it has the # right extension before trying to load if format is None: if os.path.splitext(path)[1] != format_.extension: continue filepath = path exists = True if exists: data = load_from_file(filepath, format_, disable_memcache=True) break if data: name = data.get("name") if name is not None or isinstance(name, basestring): break if data is None: raise PackageMetadataError("No package definition file found at %s" % path) if name is None or not isinstance(name, basestring): raise PackageMetadataError( "Error in %r - missing or non-string field 'name'" % filepath) package = create_package(name, data, package_cls=cls) # preprocessing result = package._get_preprocessed(data) if result: package, data = result package.filepath = filepath # find all includes, this is needed at install time to copy the right # py sourcefiles into the package installation package.includes = set() def visit(d): for k, v in d.iteritems(): if isinstance(v, SourceCode): package.includes |= (v.includes or set()) elif isinstance(v, dict): visit(v) visit(data) package._validate_includes() return package
[ "def", "from_path", "(", "cls", ",", "path", ",", "format", "=", "None", ")", ":", "name", "=", "None", "data", "=", "None", "if", "format", "is", "None", ":", "formats", "=", "(", "FileFormat", ".", "py", ",", "FileFormat", ".", "yaml", ")", "else...
Load a developer package. A developer package may for example be a package.yaml or package.py in a user's source directory. Args: path: Directory containing the package definition file, or file path for the package file itself format: which FileFormat to use, or None to check both .py and .yaml Returns: `Package` object.
[ "Load", "a", "developer", "package", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/developer_package.py#L35-L119
train
227,211
nerdvegas/rez
src/rez/vendor/yaml/__init__.py
dump_all
def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for data in documents: dumper.represent(data) dumper.close() finally: dumper.dispose() if getvalue: return getvalue()
python
def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding='utf-8', explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: from StringIO import StringIO else: from cStringIO import StringIO stream = StringIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for data in documents: dumper.represent(data) dumper.close() finally: dumper.dispose() if getvalue: return getvalue()
[ "def", "dump_all", "(", "documents", ",", "stream", "=", "None", ",", "Dumper", "=", "Dumper", ",", "default_style", "=", "None", ",", "default_flow_style", "=", "None", ",", "canonical", "=", "None", ",", "indent", "=", "None", ",", "width", "=", "None"...
Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead.
[ "Serialize", "a", "sequence", "of", "Python", "objects", "into", "a", "YAML", "stream", ".", "If", "stream", "is", "None", "return", "the", "produced", "string", "instead", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/yaml/__init__.py#L163-L195
train
227,212
nerdvegas/rez
src/rezgui/objects/ProcessTrackerThread.py
ProcessTrackerThread.running_instances
def running_instances(self, context, process_name): """Get a list of running instances. Args: context (`ResolvedContext`): Context the process is running in. process_name (str): Name of the process. Returns: List of (`subprocess.Popen`, start-time) 2-tuples, where start_time is the epoch time the process was added. """ handle = (id(context), process_name) it = self.processes.get(handle, {}).itervalues() entries = [x for x in it if x[0].poll() is None] return entries
python
def running_instances(self, context, process_name): """Get a list of running instances. Args: context (`ResolvedContext`): Context the process is running in. process_name (str): Name of the process. Returns: List of (`subprocess.Popen`, start-time) 2-tuples, where start_time is the epoch time the process was added. """ handle = (id(context), process_name) it = self.processes.get(handle, {}).itervalues() entries = [x for x in it if x[0].poll() is None] return entries
[ "def", "running_instances", "(", "self", ",", "context", ",", "process_name", ")", ":", "handle", "=", "(", "id", "(", "context", ")", ",", "process_name", ")", "it", "=", "self", ".", "processes", ".", "get", "(", "handle", ",", "{", "}", ")", ".", ...
Get a list of running instances. Args: context (`ResolvedContext`): Context the process is running in. process_name (str): Name of the process. Returns: List of (`subprocess.Popen`, start-time) 2-tuples, where start_time is the epoch time the process was added.
[ "Get", "a", "list", "of", "running", "instances", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/objects/ProcessTrackerThread.py#L24-L38
train
227,213
nerdvegas/rez
src/rez/rex.py
ActionManager.get_public_methods
def get_public_methods(self): """ return a list of methods on this class which should be exposed in the rex API. """ return self.get_action_methods() + [ ('getenv', self.getenv), ('expandvars', self.expandvars), ('defined', self.defined), ('undefined', self.undefined)]
python
def get_public_methods(self): """ return a list of methods on this class which should be exposed in the rex API. """ return self.get_action_methods() + [ ('getenv', self.getenv), ('expandvars', self.expandvars), ('defined', self.defined), ('undefined', self.undefined)]
[ "def", "get_public_methods", "(", "self", ")", ":", "return", "self", ".", "get_action_methods", "(", ")", "+", "[", "(", "'getenv'", ",", "self", ".", "getenv", ")", ",", "(", "'expandvars'", ",", "self", ".", "expandvars", ")", ",", "(", "'defined'", ...
return a list of methods on this class which should be exposed in the rex API.
[ "return", "a", "list", "of", "methods", "on", "this", "class", "which", "should", "be", "exposed", "in", "the", "rex", "API", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L205-L214
train
227,214
nerdvegas/rez
src/rez/rex.py
Python.apply_environ
def apply_environ(self): """Apply changes to target environ. """ if self.manager is None: raise RezSystemError("You must call 'set_manager' on a Python rex " "interpreter before using it.") self.target_environ.update(self.manager.environ)
python
def apply_environ(self): """Apply changes to target environ. """ if self.manager is None: raise RezSystemError("You must call 'set_manager' on a Python rex " "interpreter before using it.") self.target_environ.update(self.manager.environ)
[ "def", "apply_environ", "(", "self", ")", ":", "if", "self", ".", "manager", "is", "None", ":", "raise", "RezSystemError", "(", "\"You must call 'set_manager' on a Python rex \"", "\"interpreter before using it.\"", ")", "self", ".", "target_environ", ".", "update", "...
Apply changes to target environ.
[ "Apply", "changes", "to", "target", "environ", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L564-L571
train
227,215
nerdvegas/rez
src/rez/rex.py
EscapedString.formatted
def formatted(self, func): """Return the string with non-literal parts formatted. Args: func (callable): Callable that translates a string into a formatted string. Returns: `EscapedString` object. """ other = EscapedString.__new__(EscapedString) other.strings = [] for is_literal, value in self.strings: if not is_literal: value = func(value) other.strings.append((is_literal, value)) return other
python
def formatted(self, func): """Return the string with non-literal parts formatted. Args: func (callable): Callable that translates a string into a formatted string. Returns: `EscapedString` object. """ other = EscapedString.__new__(EscapedString) other.strings = [] for is_literal, value in self.strings: if not is_literal: value = func(value) other.strings.append((is_literal, value)) return other
[ "def", "formatted", "(", "self", ",", "func", ")", ":", "other", "=", "EscapedString", ".", "__new__", "(", "EscapedString", ")", "other", ".", "strings", "=", "[", "]", "for", "is_literal", ",", "value", "in", "self", ".", "strings", ":", "if", "not",...
Return the string with non-literal parts formatted. Args: func (callable): Callable that translates a string into a formatted string. Returns: `EscapedString` object.
[ "Return", "the", "string", "with", "non", "-", "literal", "parts", "formatted", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L764-L781
train
227,216
nerdvegas/rez
src/rez/rex.py
RexExecutor.execute_code
def execute_code(self, code, filename=None, isolate=False): """Execute code within the execution context. Args: code (str or SourceCode): Rex code to execute. filename (str): Filename to report if there are syntax errors. isolate (bool): If True, do not affect `self.globals` by executing this code. """ def _apply(): self.compile_code(code=code, filename=filename, exec_namespace=self.globals) # we want to execute the code using self.globals - if for no other # reason that self.formatter is pointing at self.globals, so if we # passed in a copy, we would also need to make self.formatter "look" at # the same copy - but we don't want to "pollute" our namespace, because # the same executor may be used to run multiple packages. Therefore, # we save a copy of self.globals before execution, and restore it after # if isolate: saved_globals = dict(self.globals) try: _apply() finally: self.globals.clear() self.globals.update(saved_globals) else: _apply()
python
def execute_code(self, code, filename=None, isolate=False): """Execute code within the execution context. Args: code (str or SourceCode): Rex code to execute. filename (str): Filename to report if there are syntax errors. isolate (bool): If True, do not affect `self.globals` by executing this code. """ def _apply(): self.compile_code(code=code, filename=filename, exec_namespace=self.globals) # we want to execute the code using self.globals - if for no other # reason that self.formatter is pointing at self.globals, so if we # passed in a copy, we would also need to make self.formatter "look" at # the same copy - but we don't want to "pollute" our namespace, because # the same executor may be used to run multiple packages. Therefore, # we save a copy of self.globals before execution, and restore it after # if isolate: saved_globals = dict(self.globals) try: _apply() finally: self.globals.clear() self.globals.update(saved_globals) else: _apply()
[ "def", "execute_code", "(", "self", ",", "code", ",", "filename", "=", "None", ",", "isolate", "=", "False", ")", ":", "def", "_apply", "(", ")", ":", "self", ".", "compile_code", "(", "code", "=", "code", ",", "filename", "=", "filename", ",", "exec...
Execute code within the execution context. Args: code (str or SourceCode): Rex code to execute. filename (str): Filename to report if there are syntax errors. isolate (bool): If True, do not affect `self.globals` by executing this code.
[ "Execute", "code", "within", "the", "execution", "context", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L1187-L1217
train
227,217
nerdvegas/rez
src/rez/rex.py
RexExecutor.execute_function
def execute_function(self, func, *nargs, **kwargs): """ Execute a function object within the execution context. @returns The result of the function call. """ # makes a copy of the func import types fn = types.FunctionType(func.func_code, func.func_globals.copy(), name=func.func_name, argdefs=func.func_defaults, closure=func.func_closure) fn.func_globals.update(self.globals) error_class = Exception if config.catch_rex_errors else None try: return fn(*nargs, **kwargs) except RexError: raise except error_class as e: from inspect import getfile stack = traceback.format_exc() filename = getfile(func) raise RexError("Failed to exec %s:\n\n%s" % (filename, stack))
python
def execute_function(self, func, *nargs, **kwargs): """ Execute a function object within the execution context. @returns The result of the function call. """ # makes a copy of the func import types fn = types.FunctionType(func.func_code, func.func_globals.copy(), name=func.func_name, argdefs=func.func_defaults, closure=func.func_closure) fn.func_globals.update(self.globals) error_class = Exception if config.catch_rex_errors else None try: return fn(*nargs, **kwargs) except RexError: raise except error_class as e: from inspect import getfile stack = traceback.format_exc() filename = getfile(func) raise RexError("Failed to exec %s:\n\n%s" % (filename, stack))
[ "def", "execute_function", "(", "self", ",", "func", ",", "*", "nargs", ",", "*", "*", "kwargs", ")", ":", "# makes a copy of the func", "import", "types", "fn", "=", "types", ".", "FunctionType", "(", "func", ".", "func_code", ",", "func", ".", "func_glob...
Execute a function object within the execution context. @returns The result of the function call.
[ "Execute", "a", "function", "object", "within", "the", "execution", "context", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L1219-L1245
train
227,218
nerdvegas/rez
src/rez/rex.py
RexExecutor.get_output
def get_output(self, style=OutputStyle.file): """Returns the result of all previous calls to execute_code.""" return self.manager.get_output(style=style)
python
def get_output(self, style=OutputStyle.file): """Returns the result of all previous calls to execute_code.""" return self.manager.get_output(style=style)
[ "def", "get_output", "(", "self", ",", "style", "=", "OutputStyle", ".", "file", ")", ":", "return", "self", ".", "manager", ".", "get_output", "(", "style", "=", "style", ")" ]
Returns the result of all previous calls to execute_code.
[ "Returns", "the", "result", "of", "all", "previous", "calls", "to", "execute_code", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L1247-L1249
train
227,219
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/filters/find.py
find.configure
def configure(self, graph, spanning_tree): """ Configure the filter. @type graph: graph @param graph: Graph. @type spanning_tree: dictionary @param spanning_tree: Spanning tree. """ self.graph = graph self.spanning_tree = spanning_tree
python
def configure(self, graph, spanning_tree): """ Configure the filter. @type graph: graph @param graph: Graph. @type spanning_tree: dictionary @param spanning_tree: Spanning tree. """ self.graph = graph self.spanning_tree = spanning_tree
[ "def", "configure", "(", "self", ",", "graph", ",", "spanning_tree", ")", ":", "self", ".", "graph", "=", "graph", "self", ".", "spanning_tree", "=", "spanning_tree" ]
Configure the filter. @type graph: graph @param graph: Graph. @type spanning_tree: dictionary @param spanning_tree: Spanning tree.
[ "Configure", "the", "filter", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/filters/find.py#L47-L58
train
227,220
nerdvegas/rez
src/rez/package_filter.py
PackageFilterBase.iter_packages
def iter_packages(self, name, range_=None, paths=None): """Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator. """ for package in iter_packages(name, range_, paths): if not self.excludes(package): yield package
python
def iter_packages(self, name, range_=None, paths=None): """Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator. """ for package in iter_packages(name, range_, paths): if not self.excludes(package): yield package
[ "def", "iter_packages", "(", "self", ",", "name", ",", "range_", "=", "None", ",", "paths", "=", "None", ")", ":", "for", "package", "in", "iter_packages", "(", "name", ",", "range_", ",", "paths", ")", ":", "if", "not", "self", ".", "excludes", "(",...
Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator.
[ "Same", "as", "iter_packages", "in", "packages", ".", "py", "but", "also", "applies", "this", "filter", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_filter.py#L49-L64
train
227,221
nerdvegas/rez
src/rez/package_filter.py
PackageFilter.copy
def copy(self): """Return a shallow copy of the filter. Adding rules to the copy will not alter the source. """ other = PackageFilter.__new__(PackageFilter) other._excludes = self._excludes.copy() other._includes = self._includes.copy() return other
python
def copy(self): """Return a shallow copy of the filter. Adding rules to the copy will not alter the source. """ other = PackageFilter.__new__(PackageFilter) other._excludes = self._excludes.copy() other._includes = self._includes.copy() return other
[ "def", "copy", "(", "self", ")", ":", "other", "=", "PackageFilter", ".", "__new__", "(", "PackageFilter", ")", "other", ".", "_excludes", "=", "self", ".", "_excludes", ".", "copy", "(", ")", "other", ".", "_includes", "=", "self", ".", "_includes", "...
Return a shallow copy of the filter. Adding rules to the copy will not alter the source.
[ "Return", "a", "shallow", "copy", "of", "the", "filter", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_filter.py#L126-L134
train
227,222
nerdvegas/rez
src/rez/package_filter.py
PackageFilter.cost
def cost(self): """Get the approximate cost of this filter. Cost is the total cost of the exclusion rules in this filter. The cost of family-specific filters is divided by 10. Returns: float: The approximate cost of the filter. """ total = 0.0 for family, rules in self._excludes.iteritems(): cost = sum(x.cost() for x in rules) if family: cost = cost / float(10) total += cost return total
python
def cost(self): """Get the approximate cost of this filter. Cost is the total cost of the exclusion rules in this filter. The cost of family-specific filters is divided by 10. Returns: float: The approximate cost of the filter. """ total = 0.0 for family, rules in self._excludes.iteritems(): cost = sum(x.cost() for x in rules) if family: cost = cost / float(10) total += cost return total
[ "def", "cost", "(", "self", ")", ":", "total", "=", "0.0", "for", "family", ",", "rules", "in", "self", ".", "_excludes", ".", "iteritems", "(", ")", ":", "cost", "=", "sum", "(", "x", ".", "cost", "(", ")", "for", "x", "in", "rules", ")", "if"...
Get the approximate cost of this filter. Cost is the total cost of the exclusion rules in this filter. The cost of family-specific filters is divided by 10. Returns: float: The approximate cost of the filter.
[ "Get", "the", "approximate", "cost", "of", "this", "filter", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_filter.py#L149-L164
train
227,223
nerdvegas/rez
src/rez/package_filter.py
PackageFilterList.add_filter
def add_filter(self, package_filter): """Add a filter to the list. Args: package_filter (`PackageFilter`): Filter to add. """ filters = self.filters + [package_filter] self.filters = sorted(filters, key=lambda x: x.cost)
python
def add_filter(self, package_filter): """Add a filter to the list. Args: package_filter (`PackageFilter`): Filter to add. """ filters = self.filters + [package_filter] self.filters = sorted(filters, key=lambda x: x.cost)
[ "def", "add_filter", "(", "self", ",", "package_filter", ")", ":", "filters", "=", "self", ".", "filters", "+", "[", "package_filter", "]", "self", ".", "filters", "=", "sorted", "(", "filters", ",", "key", "=", "lambda", "x", ":", "x", ".", "cost", ...
Add a filter to the list. Args: package_filter (`PackageFilter`): Filter to add.
[ "Add", "a", "filter", "to", "the", "list", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_filter.py#L210-L217
train
227,224
nerdvegas/rez
src/rez/package_filter.py
PackageFilterList.copy
def copy(self): """Return a copy of the filter list. Adding rules to the copy will not alter the source. """ other = PackageFilterList.__new__(PackageFilterList) other.filters = [x.copy() for x in self.filters] return other
python
def copy(self): """Return a copy of the filter list. Adding rules to the copy will not alter the source. """ other = PackageFilterList.__new__(PackageFilterList) other.filters = [x.copy() for x in self.filters] return other
[ "def", "copy", "(", "self", ")", ":", "other", "=", "PackageFilterList", ".", "__new__", "(", "PackageFilterList", ")", "other", ".", "filters", "=", "[", "x", ".", "copy", "(", ")", "for", "x", "in", "self", ".", "filters", "]", "return", "other" ]
Return a copy of the filter list. Adding rules to the copy will not alter the source.
[ "Return", "a", "copy", "of", "the", "filter", "list", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_filter.py#L244-L251
train
227,225
nerdvegas/rez
src/rez/package_filter.py
Rule.parse_rule
def parse_rule(cls, txt): """Parse a rule from a string. See rezconfig.package_filter for an overview of valid strings. Args: txt (str): String to parse. Returns: `Rule` instance. """ types = {"glob": GlobRule, "regex": RegexRule, "range": RangeRule, "before": TimestampRule, "after": TimestampRule} # parse form 'x(y)' into x, y label, txt = Rule._parse_label(txt) if label is None: if '*' in txt: label = "glob" else: label = "range" elif label not in types: raise ConfigurationError( "'%s' is not a valid package filter type" % label) rule_cls = types[label] txt_ = "%s(%s)" % (label, txt) try: rule = rule_cls._parse(txt_) except Exception as e: raise ConfigurationError("Error parsing package filter '%s': %s: %s" % (txt_, e.__class__.__name__, str(e))) return rule
python
def parse_rule(cls, txt): """Parse a rule from a string. See rezconfig.package_filter for an overview of valid strings. Args: txt (str): String to parse. Returns: `Rule` instance. """ types = {"glob": GlobRule, "regex": RegexRule, "range": RangeRule, "before": TimestampRule, "after": TimestampRule} # parse form 'x(y)' into x, y label, txt = Rule._parse_label(txt) if label is None: if '*' in txt: label = "glob" else: label = "range" elif label not in types: raise ConfigurationError( "'%s' is not a valid package filter type" % label) rule_cls = types[label] txt_ = "%s(%s)" % (label, txt) try: rule = rule_cls._parse(txt_) except Exception as e: raise ConfigurationError("Error parsing package filter '%s': %s: %s" % (txt_, e.__class__.__name__, str(e))) return rule
[ "def", "parse_rule", "(", "cls", ",", "txt", ")", ":", "types", "=", "{", "\"glob\"", ":", "GlobRule", ",", "\"regex\"", ":", "RegexRule", ",", "\"range\"", ":", "RangeRule", ",", "\"before\"", ":", "TimestampRule", ",", "\"after\"", ":", "TimestampRule", ...
Parse a rule from a string. See rezconfig.package_filter for an overview of valid strings. Args: txt (str): String to parse. Returns: `Rule` instance.
[ "Parse", "a", "rule", "from", "a", "string", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_filter.py#L309-L345
train
227,226
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
AMQPReader.read_bit
def read_bit(self): """Read a single boolean value.""" if not self.bitcount: self.bits = ord(self.input.read(1)) self.bitcount = 8 result = (self.bits & 1) == 1 self.bits >>= 1 self.bitcount -= 1 return result
python
def read_bit(self): """Read a single boolean value.""" if not self.bitcount: self.bits = ord(self.input.read(1)) self.bitcount = 8 result = (self.bits & 1) == 1 self.bits >>= 1 self.bitcount -= 1 return result
[ "def", "read_bit", "(", "self", ")", ":", "if", "not", "self", ".", "bitcount", ":", "self", ".", "bits", "=", "ord", "(", "self", ".", "input", ".", "read", "(", "1", ")", ")", "self", ".", "bitcount", "=", "8", "result", "=", "(", "self", "."...
Read a single boolean value.
[ "Read", "a", "single", "boolean", "value", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L76-L84
train
227,227
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
AMQPReader.read_long
def read_long(self): """Read an unsigned 32-bit integer""" self.bitcount = self.bits = 0 return unpack('>I', self.input.read(4))[0]
python
def read_long(self): """Read an unsigned 32-bit integer""" self.bitcount = self.bits = 0 return unpack('>I', self.input.read(4))[0]
[ "def", "read_long", "(", "self", ")", ":", "self", ".", "bitcount", "=", "self", ".", "bits", "=", "0", "return", "unpack", "(", "'>I'", ",", "self", ".", "input", ".", "read", "(", "4", ")", ")", "[", "0", "]" ]
Read an unsigned 32-bit integer
[ "Read", "an", "unsigned", "32", "-", "bit", "integer" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L96-L99
train
227,228
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
AMQPReader.read_longlong
def read_longlong(self): """Read an unsigned 64-bit integer""" self.bitcount = self.bits = 0 return unpack('>Q', self.input.read(8))[0]
python
def read_longlong(self): """Read an unsigned 64-bit integer""" self.bitcount = self.bits = 0 return unpack('>Q', self.input.read(8))[0]
[ "def", "read_longlong", "(", "self", ")", ":", "self", ".", "bitcount", "=", "self", ".", "bits", "=", "0", "return", "unpack", "(", "'>Q'", ",", "self", ".", "input", ".", "read", "(", "8", ")", ")", "[", "0", "]" ]
Read an unsigned 64-bit integer
[ "Read", "an", "unsigned", "64", "-", "bit", "integer" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L101-L104
train
227,229
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
AMQPReader.read_float
def read_float(self): """Read float value.""" self.bitcount = self.bits = 0 return unpack('>d', self.input.read(8))[0]
python
def read_float(self): """Read float value.""" self.bitcount = self.bits = 0 return unpack('>d', self.input.read(8))[0]
[ "def", "read_float", "(", "self", ")", ":", "self", ".", "bitcount", "=", "self", ".", "bits", "=", "0", "return", "unpack", "(", "'>d'", ",", "self", ".", "input", ".", "read", "(", "8", ")", ")", "[", "0", "]" ]
Read float value.
[ "Read", "float", "value", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L106-L109
train
227,230
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
AMQPReader.read_shortstr
def read_shortstr(self): """Read a short string that's stored in up to 255 bytes. The encoding isn't specified in the AMQP spec, so assume it's utf-8 """ self.bitcount = self.bits = 0 slen = unpack('B', self.input.read(1))[0] return self.input.read(slen).decode('utf-8')
python
def read_shortstr(self): """Read a short string that's stored in up to 255 bytes. The encoding isn't specified in the AMQP spec, so assume it's utf-8 """ self.bitcount = self.bits = 0 slen = unpack('B', self.input.read(1))[0] return self.input.read(slen).decode('utf-8')
[ "def", "read_shortstr", "(", "self", ")", ":", "self", ".", "bitcount", "=", "self", ".", "bits", "=", "0", "slen", "=", "unpack", "(", "'B'", ",", "self", ".", "input", ".", "read", "(", "1", ")", ")", "[", "0", "]", "return", "self", ".", "in...
Read a short string that's stored in up to 255 bytes. The encoding isn't specified in the AMQP spec, so assume it's utf-8
[ "Read", "a", "short", "string", "that", "s", "stored", "in", "up", "to", "255", "bytes", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L111-L120
train
227,231
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
GenericContent._load_properties
def _load_properties(self, raw_bytes): """Given the raw bytes containing the property-flags and property-list from a content-frame-header, parse and insert into a dictionary stored in this object as an attribute named 'properties'.""" r = AMQPReader(raw_bytes) # # Read 16-bit shorts until we get one with a low bit set to zero # flags = [] while 1: flag_bits = r.read_short() flags.append(flag_bits) if flag_bits & 1 == 0: break shift = 0 d = {} for key, proptype in self.PROPERTIES: if shift == 0: if not flags: break flag_bits, flags = flags[0], flags[1:] shift = 15 if flag_bits & (1 << shift): d[key] = getattr(r, 'read_' + proptype)() shift -= 1 self.properties = d
python
def _load_properties(self, raw_bytes): """Given the raw bytes containing the property-flags and property-list from a content-frame-header, parse and insert into a dictionary stored in this object as an attribute named 'properties'.""" r = AMQPReader(raw_bytes) # # Read 16-bit shorts until we get one with a low bit set to zero # flags = [] while 1: flag_bits = r.read_short() flags.append(flag_bits) if flag_bits & 1 == 0: break shift = 0 d = {} for key, proptype in self.PROPERTIES: if shift == 0: if not flags: break flag_bits, flags = flags[0], flags[1:] shift = 15 if flag_bits & (1 << shift): d[key] = getattr(r, 'read_' + proptype)() shift -= 1 self.properties = d
[ "def", "_load_properties", "(", "self", ",", "raw_bytes", ")", ":", "r", "=", "AMQPReader", "(", "raw_bytes", ")", "#", "# Read 16-bit shorts until we get one with a low bit set to zero", "#", "flags", "=", "[", "]", "while", "1", ":", "flag_bits", "=", "r", "."...
Given the raw bytes containing the property-flags and property-list from a content-frame-header, parse and insert into a dictionary stored in this object as an attribute named 'properties'.
[ "Given", "the", "raw", "bytes", "containing", "the", "property", "-", "flags", "and", "property", "-", "list", "from", "a", "content", "-", "frame", "-", "header", "parse", "and", "insert", "into", "a", "dictionary", "stored", "in", "this", "object", "as",...
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L451-L479
train
227,232
nerdvegas/rez
src/rez/util.py
create_executable_script
def create_executable_script(filepath, body, program=None): """Create an executable script. Args: filepath (str): File to create. body (str or callable): Contents of the script. If a callable, its code is used as the script body. program (str): Name of program to launch the script, 'python' if None """ program = program or "python" if callable(body): from rez.utils.sourcecode import SourceCode code = SourceCode(func=body) body = code.source if not body.endswith('\n'): body += '\n' with open(filepath, 'w') as f: # TODO: make cross platform f.write("#!/usr/bin/env %s\n" % program) f.write(body) # TODO: Although Windows supports os.chmod you can only set the readonly # flag. Setting the file readonly breaks the unit tests that expect to # clean up the files once the test has run. Temporarily we don't bother # setting the permissions, but this will need to change. if os.name == "posix": os.chmod(filepath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
python
def create_executable_script(filepath, body, program=None): """Create an executable script. Args: filepath (str): File to create. body (str or callable): Contents of the script. If a callable, its code is used as the script body. program (str): Name of program to launch the script, 'python' if None """ program = program or "python" if callable(body): from rez.utils.sourcecode import SourceCode code = SourceCode(func=body) body = code.source if not body.endswith('\n'): body += '\n' with open(filepath, 'w') as f: # TODO: make cross platform f.write("#!/usr/bin/env %s\n" % program) f.write(body) # TODO: Although Windows supports os.chmod you can only set the readonly # flag. Setting the file readonly breaks the unit tests that expect to # clean up the files once the test has run. Temporarily we don't bother # setting the permissions, but this will need to change. if os.name == "posix": os.chmod(filepath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
[ "def", "create_executable_script", "(", "filepath", ",", "body", ",", "program", "=", "None", ")", ":", "program", "=", "program", "or", "\"python\"", "if", "callable", "(", "body", ")", ":", "from", "rez", ".", "utils", ".", "sourcecode", "import", "Sourc...
Create an executable script. Args: filepath (str): File to create. body (str or callable): Contents of the script. If a callable, its code is used as the script body. program (str): Name of program to launch the script, 'python' if None
[ "Create", "an", "executable", "script", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/util.py#L31-L60
train
227,233
nerdvegas/rez
src/rez/util.py
create_forwarding_script
def create_forwarding_script(filepath, module, func_name, *nargs, **kwargs): """Create a 'forwarding' script. A forwarding script is one that executes some arbitrary Rez function. This is used internally by Rez to dynamically create a script that uses Rez, even though the parent environment may not be configured to do so. """ doc = dict( module=module, func_name=func_name) if nargs: doc["nargs"] = nargs if kwargs: doc["kwargs"] = kwargs body = dump_yaml(doc) create_executable_script(filepath, body, "_rez_fwd")
python
def create_forwarding_script(filepath, module, func_name, *nargs, **kwargs): """Create a 'forwarding' script. A forwarding script is one that executes some arbitrary Rez function. This is used internally by Rez to dynamically create a script that uses Rez, even though the parent environment may not be configured to do so. """ doc = dict( module=module, func_name=func_name) if nargs: doc["nargs"] = nargs if kwargs: doc["kwargs"] = kwargs body = dump_yaml(doc) create_executable_script(filepath, body, "_rez_fwd")
[ "def", "create_forwarding_script", "(", "filepath", ",", "module", ",", "func_name", ",", "*", "nargs", ",", "*", "*", "kwargs", ")", ":", "doc", "=", "dict", "(", "module", "=", "module", ",", "func_name", "=", "func_name", ")", "if", "nargs", ":", "d...
Create a 'forwarding' script. A forwarding script is one that executes some arbitrary Rez function. This is used internally by Rez to dynamically create a script that uses Rez, even though the parent environment may not be configured to do so.
[ "Create", "a", "forwarding", "script", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/util.py#L63-L80
train
227,234
nerdvegas/rez
src/rez/util.py
dedup
def dedup(seq): """Remove duplicates from a list while keeping order.""" seen = set() for item in seq: if item not in seen: seen.add(item) yield item
python
def dedup(seq): """Remove duplicates from a list while keeping order.""" seen = set() for item in seq: if item not in seen: seen.add(item) yield item
[ "def", "dedup", "(", "seq", ")", ":", "seen", "=", "set", "(", ")", "for", "item", "in", "seq", ":", "if", "item", "not", "in", "seen", ":", "seen", ".", "add", "(", "item", ")", "yield", "item" ]
Remove duplicates from a list while keeping order.
[ "Remove", "duplicates", "from", "a", "list", "while", "keeping", "order", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/util.py#L83-L89
train
227,235
nerdvegas/rez
src/rez/util.py
find_last_sublist
def find_last_sublist(list_, sublist): """Given a list, find the last occurance of a sublist within it. Returns: Index where the sublist starts, or None if there is no match. """ for i in reversed(range(len(list_) - len(sublist) + 1)): if list_[i] == sublist[0] and list_[i:i + len(sublist)] == sublist: return i return None
python
def find_last_sublist(list_, sublist): """Given a list, find the last occurance of a sublist within it. Returns: Index where the sublist starts, or None if there is no match. """ for i in reversed(range(len(list_) - len(sublist) + 1)): if list_[i] == sublist[0] and list_[i:i + len(sublist)] == sublist: return i return None
[ "def", "find_last_sublist", "(", "list_", ",", "sublist", ")", ":", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "list_", ")", "-", "len", "(", "sublist", ")", "+", "1", ")", ")", ":", "if", "list_", "[", "i", "]", "==", "sublist"...
Given a list, find the last occurance of a sublist within it. Returns: Index where the sublist starts, or None if there is no match.
[ "Given", "a", "list", "find", "the", "last", "occurance", "of", "a", "sublist", "within", "it", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/util.py#L157-L166
train
227,236
nerdvegas/rez
src/rez/package_help.py
PackageHelp.open
def open(self, section_index=0): """Launch a help section.""" uri = self._sections[section_index][1] if len(uri.split()) == 1: self._open_url(uri) else: if self._verbose: print "running command: %s" % uri p = popen(uri, shell=True) p.wait()
python
def open(self, section_index=0): """Launch a help section.""" uri = self._sections[section_index][1] if len(uri.split()) == 1: self._open_url(uri) else: if self._verbose: print "running command: %s" % uri p = popen(uri, shell=True) p.wait()
[ "def", "open", "(", "self", ",", "section_index", "=", "0", ")", ":", "uri", "=", "self", ".", "_sections", "[", "section_index", "]", "[", "1", "]", "if", "len", "(", "uri", ".", "split", "(", ")", ")", "==", "1", ":", "self", ".", "_open_url", ...
Launch a help section.
[ "Launch", "a", "help", "section", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_help.py#L88-L97
train
227,237
nerdvegas/rez
src/rez/package_help.py
PackageHelp.print_info
def print_info(self, buf=None): """Print help sections.""" buf = buf or sys.stdout print >> buf, "Sections:" for i, section in enumerate(self._sections): print >> buf, " %s:\t%s (%s)" % (i + 1, section[0], section[1])
python
def print_info(self, buf=None): """Print help sections.""" buf = buf or sys.stdout print >> buf, "Sections:" for i, section in enumerate(self._sections): print >> buf, " %s:\t%s (%s)" % (i + 1, section[0], section[1])
[ "def", "print_info", "(", "self", ",", "buf", "=", "None", ")", ":", "buf", "=", "buf", "or", "sys", ".", "stdout", "print", ">>", "buf", ",", "\"Sections:\"", "for", "i", ",", "section", "in", "enumerate", "(", "self", ".", "_sections", ")", ":", ...
Print help sections.
[ "Print", "help", "sections", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_help.py#L99-L104
train
227,238
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.set_servers
def set_servers(self, servers): """ Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry, socket_timeout=self.socket_timeout, flush_on_reconnect=self.flush_on_reconnect) for s in servers] self._init_buckets()
python
def set_servers(self, servers): """ Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry, socket_timeout=self.socket_timeout, flush_on_reconnect=self.flush_on_reconnect) for s in servers] self._init_buckets()
[ "def", "set_servers", "(", "self", ",", "servers", ")", ":", "self", ".", "servers", "=", "[", "_Host", "(", "s", ",", "self", ".", "debug", ",", "dead_retry", "=", "self", ".", "dead_retry", ",", "socket_timeout", "=", "self", ".", "socket_timeout", "...
Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value.
[ "Set", "the", "pool", "of", "servers", "used", "by", "this", "client", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L240-L254
train
227,239
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.get_stats
def get_stats(self, stat_args = None): '''Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. ''' data = [] for s in self.servers: if not s.connect(): continue if s.family == socket.AF_INET: name = '%s:%s (%s)' % ( s.ip, s.port, s.weight ) elif s.family == socket.AF_INET6: name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight ) else: name = 'unix:%s (%s)' % ( s.address, s.weight ) if not stat_args: s.send_cmd('stats') else: s.send_cmd('stats ' + stat_args) serverData = {} data.append(( name, serverData )) readline = s.readline while 1: line = readline() if not line or line.strip() in ('END', 'RESET'): break stats = line.split(' ', 2) serverData[stats[1]] = stats[2] return(data)
python
def get_stats(self, stat_args = None): '''Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. ''' data = [] for s in self.servers: if not s.connect(): continue if s.family == socket.AF_INET: name = '%s:%s (%s)' % ( s.ip, s.port, s.weight ) elif s.family == socket.AF_INET6: name = '[%s]:%s (%s)' % ( s.ip, s.port, s.weight ) else: name = 'unix:%s (%s)' % ( s.address, s.weight ) if not stat_args: s.send_cmd('stats') else: s.send_cmd('stats ' + stat_args) serverData = {} data.append(( name, serverData )) readline = s.readline while 1: line = readline() if not line or line.strip() in ('END', 'RESET'): break stats = line.split(' ', 2) serverData[stats[1]] = stats[2] return(data)
[ "def", "get_stats", "(", "self", ",", "stat_args", "=", "None", ")", ":", "data", "=", "[", "]", "for", "s", "in", "self", ".", "servers", ":", "if", "not", "s", ".", "connect", "(", ")", ":", "continue", "if", "s", ".", "family", "==", "socket",...
Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings.
[ "Get", "statistics", "from", "each", "of", "the", "servers", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L256-L290
train
227,240
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.delete_multi
def delete_multi(self, keys, time=0, key_prefix=''): ''' Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @return: 1 if no failure in communication with any memcacheds. @rtype: int ''' self._statlog('delete_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) # send out all requests on each server before reading anything dead_servers = [] rc = 1 for server in server_keys.iterkeys(): bigcmd = [] write = bigcmd.append if time != None: for key in server_keys[server]: # These are mangled keys write("delete %s %d\r\n" % (key, time)) else: for key in server_keys[server]: # These are mangled keys write("delete %s\r\n" % key) try: server.send_cmds(''.join(bigcmd)) except socket.error, msg: rc = 0 if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] for server, keys in server_keys.iteritems(): try: for key in keys: server.expect("DELETED") except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) rc = 0 return rc
python
def delete_multi(self, keys, time=0, key_prefix=''): ''' Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @return: 1 if no failure in communication with any memcacheds. @rtype: int ''' self._statlog('delete_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) # send out all requests on each server before reading anything dead_servers = [] rc = 1 for server in server_keys.iterkeys(): bigcmd = [] write = bigcmd.append if time != None: for key in server_keys[server]: # These are mangled keys write("delete %s %d\r\n" % (key, time)) else: for key in server_keys[server]: # These are mangled keys write("delete %s\r\n" % key) try: server.send_cmds(''.join(bigcmd)) except socket.error, msg: rc = 0 if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] for server, keys in server_keys.iteritems(): try: for key in keys: server.expect("DELETED") except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) rc = 0 return rc
[ "def", "delete_multi", "(", "self", ",", "keys", ",", "time", "=", "0", ",", "key_prefix", "=", "''", ")", ":", "self", ".", "_statlog", "(", "'delete_multi'", ")", "server_keys", ",", "prefixed_to_orig_key", "=", "self", ".", "_map_and_prefix_keys", "(", ...
Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @return: 1 if no failure in communication with any memcacheds. @rtype: int
[ "Delete", "multiple", "keys", "in", "the", "memcache", "doing", "just", "one", "query", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L365-L429
train
227,241
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.delete
def delete(self, key, time=0): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @rtype: int ''' if self.do_check_key: self.check_key(key) server, key = self._get_server(key) if not server: return 0 self._statlog('delete') if time != None and time != 0: cmd = "delete %s %d" % (key, time) else: cmd = "delete %s" % key try: server.send_cmd(cmd) line = server.readline() if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1 self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s' % repr(line)) except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) return 0
python
def delete(self, key, time=0): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @rtype: int ''' if self.do_check_key: self.check_key(key) server, key = self._get_server(key) if not server: return 0 self._statlog('delete') if time != None and time != 0: cmd = "delete %s %d" % (key, time) else: cmd = "delete %s" % key try: server.send_cmd(cmd) line = server.readline() if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1 self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s' % repr(line)) except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) return 0
[ "def", "delete", "(", "self", ",", "key", ",", "time", "=", "0", ")", ":", "if", "self", ".", "do_check_key", ":", "self", ".", "check_key", "(", "key", ")", "server", ",", "key", "=", "self", ".", "_get_server", "(", "key", ")", "if", "not", "se...
Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @rtype: int
[ "Deletes", "a", "key", "from", "the", "memcache", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L431-L459
train
227,242
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.add
def add(self, key, val, time = 0, min_compress_len = 0): ''' Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' return self._set("add", key, val, time, min_compress_len)
python
def add(self, key, val, time = 0, min_compress_len = 0): ''' Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' return self._set("add", key, val, time, min_compress_len)
[ "def", "add", "(", "self", ",", "key", ",", "val", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ")", ":", "return", "self", ".", "_set", "(", "\"add\"", ",", "key", ",", "val", ",", "time", ",", "min_compress_len", ")" ]
Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int
[ "Add", "new", "key", "with", "value", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L517-L526
train
227,243
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.append
def append(self, key, val, time=0, min_compress_len=0): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' return self._set("append", key, val, time, min_compress_len)
python
def append(self, key, val, time=0, min_compress_len=0): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' return self._set("append", key, val, time, min_compress_len)
[ "def", "append", "(", "self", ",", "key", ",", "val", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ")", ":", "return", "self", ".", "_set", "(", "\"append\"", ",", "key", ",", "val", ",", "time", ",", "min_compress_len", ")" ]
Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int
[ "Append", "the", "value", "to", "the", "end", "of", "the", "existing", "key", "s", "value", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L528-L537
train
227,244
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.prepend
def prepend(self, key, val, time=0, min_compress_len=0): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' return self._set("prepend", key, val, time, min_compress_len)
python
def prepend(self, key, val, time=0, min_compress_len=0): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' return self._set("prepend", key, val, time, min_compress_len)
[ "def", "prepend", "(", "self", ",", "key", ",", "val", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ")", ":", "return", "self", ".", "_set", "(", "\"prepend\"", ",", "key", ",", "val", ",", "time", ",", "min_compress_len", ")" ]
Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int
[ "Prepend", "the", "value", "to", "the", "beginning", "of", "the", "existing", "key", "s", "value", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L539-L548
train
227,245
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.replace
def replace(self, key, val, time=0, min_compress_len=0): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' return self._set("replace", key, val, time, min_compress_len)
python
def replace(self, key, val, time=0, min_compress_len=0): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' return self._set("replace", key, val, time, min_compress_len)
[ "def", "replace", "(", "self", ",", "key", ",", "val", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ")", ":", "return", "self", ".", "_set", "(", "\"replace\"", ",", "key", ",", "val", ",", "time", ",", "min_compress_len", ")" ]
Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int
[ "Replace", "existing", "key", "with", "value", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L550-L559
train
227,246
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.set
def set(self, key, val, time=0, min_compress_len=0): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. ''' return self._set("set", key, val, time, min_compress_len)
python
def set(self, key, val, time=0, min_compress_len=0): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. ''' return self._set("set", key, val, time, min_compress_len)
[ "def", "set", "(", "self", ",", "key", ",", "val", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ")", ":", "return", "self", ".", "_set", "(", "\"set\"", ",", "key", ",", "val", ",", "time", ",", "min_compress_len", ")" ]
Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress.
[ "Unconditionally", "sets", "a", "key", "to", "a", "given", "value", "in", "the", "memcache", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L561-L585
train
227,247
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.set_multi
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0): ''' Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' self._statlog('set_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys( mapping.iterkeys(), key_prefix) # send out all requests on each server before reading anything dead_servers = [] notstored = [] # original keys. for server in server_keys.iterkeys(): bigcmd = [] write = bigcmd.append try: for key in server_keys[server]: # These are mangled keys store_info = self._val_to_store_info( mapping[prefixed_to_orig_key[key]], min_compress_len) if store_info: write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2])) else: notstored.append(prefixed_to_orig_key[key]) server.send_cmds(''.join(bigcmd)) except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] # short-circuit if there are no servers, just return all keys if not server_keys: return(mapping.keys()) for server, keys in server_keys.iteritems(): try: for key in keys: if server.readline() == 'STORED': continue else: notstored.append(prefixed_to_orig_key[key]) #un-mangle. except (_Error, socket.error), msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) return notstored
python
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0): ''' Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' self._statlog('set_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys( mapping.iterkeys(), key_prefix) # send out all requests on each server before reading anything dead_servers = [] notstored = [] # original keys. for server in server_keys.iterkeys(): bigcmd = [] write = bigcmd.append try: for key in server_keys[server]: # These are mangled keys store_info = self._val_to_store_info( mapping[prefixed_to_orig_key[key]], min_compress_len) if store_info: write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2])) else: notstored.append(prefixed_to_orig_key[key]) server.send_cmds(''.join(bigcmd)) except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] # short-circuit if there are no servers, just return all keys if not server_keys: return(mapping.keys()) for server, keys in server_keys.iteritems(): try: for key in keys: if server.readline() == 'STORED': continue else: notstored.append(prefixed_to_orig_key[key]) #un-mangle. except (_Error, socket.error), msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) return notstored
[ "def", "set_multi", "(", "self", ",", "mapping", ",", "time", "=", "0", ",", "key_prefix", "=", "''", ",", "min_compress_len", "=", "0", ")", ":", "self", ".", "_statlog", "(", "'set_multi'", ")", "server_keys", ",", "prefixed_to_orig_key", "=", "self", ...
Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'} 1 This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list
[ "Sets", "multiple", "keys", "in", "the", "memcache", "doing", "just", "one", "query", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L658-L755
train
227,248
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
Client.get_multi
def get_multi(self, keys, key_prefix=''): ''' Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42} 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'} 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present. ''' self._statlog('get_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) # send out all requests on each server before reading anything dead_servers = [] for server in server_keys.iterkeys(): try: server.send_cmd("get %s" % " ".join(server_keys[server])) except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] retvals = {} for server in server_keys.iterkeys(): try: line = server.readline() while line and line != 'END': rkey, flags, rlen = self._expectvalue(server, line) # Bo Yang reports that this can sometimes be None if rkey is not None: val = self._recv_value(server, flags, rlen) retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key. line = server.readline() except (_Error, socket.error), msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) return retvals
python
def get_multi(self, keys, key_prefix=''): ''' Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42} 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'} 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present. ''' self._statlog('get_multi') server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix) # send out all requests on each server before reading anything dead_servers = [] for server in server_keys.iterkeys(): try: server.send_cmd("get %s" % " ".join(server_keys[server])) except socket.error, msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) dead_servers.append(server) # if any servers died on the way, don't expect them to respond. for server in dead_servers: del server_keys[server] retvals = {} for server in server_keys.iterkeys(): try: line = server.readline() while line and line != 'END': rkey, flags, rlen = self._expectvalue(server, line) # Bo Yang reports that this can sometimes be None if rkey is not None: val = self._recv_value(server, flags, rlen) retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key. line = server.readline() except (_Error, socket.error), msg: if isinstance(msg, tuple): msg = msg[1] server.mark_dead(msg) return retvals
[ "def", "get_multi", "(", "self", ",", "keys", ",", "key_prefix", "=", "''", ")", ":", "self", ".", "_statlog", "(", "'get_multi'", ")", "server_keys", ",", "prefixed_to_orig_key", "=", "self", ".", "_map_and_prefix_keys", "(", "keys", ",", "key_prefix", ")",...
Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42} 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'} 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
[ "Retrieves", "multiple", "keys", "from", "the", "memcache", "doing", "just", "one", "query", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L908-L978
train
227,249
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
_Host.readline
def readline(self, raise_exception=False): """Read a line and return it. If "raise_exception" is set, raise _ConnectionDeadError if the read fails, otherwise return an empty string. """ buf = self.buffer if self.socket: recv = self.socket.recv else: recv = lambda bufsize: '' while True: index = buf.find('\r\n') if index >= 0: break data = recv(4096) if not data: # connection close, let's kill it and raise self.mark_dead('connection closed in readline()') if raise_exception: raise _ConnectionDeadError() else: return '' buf += data self.buffer = buf[index+2:] return buf[:index]
python
def readline(self, raise_exception=False): """Read a line and return it. If "raise_exception" is set, raise _ConnectionDeadError if the read fails, otherwise return an empty string. """ buf = self.buffer if self.socket: recv = self.socket.recv else: recv = lambda bufsize: '' while True: index = buf.find('\r\n') if index >= 0: break data = recv(4096) if not data: # connection close, let's kill it and raise self.mark_dead('connection closed in readline()') if raise_exception: raise _ConnectionDeadError() else: return '' buf += data self.buffer = buf[index+2:] return buf[:index]
[ "def", "readline", "(", "self", ",", "raise_exception", "=", "False", ")", ":", "buf", "=", "self", ".", "buffer", "if", "self", ".", "socket", ":", "recv", "=", "self", ".", "socket", ".", "recv", "else", ":", "recv", "=", "lambda", "bufsize", ":", ...
Read a line and return it. If "raise_exception" is set, raise _ConnectionDeadError if the read fails, otherwise return an empty string.
[ "Read", "a", "line", "and", "return", "it", ".", "If", "raise_exception", "is", "set", "raise", "_ConnectionDeadError", "if", "the", "read", "fails", "otherwise", "return", "an", "empty", "string", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L1168-L1194
train
227,250
nerdvegas/rez
src/rezplugins/package_repository/memory.py
MemoryPackageRepository.create_repository
def create_repository(cls, repository_data): """Create a standalone, in-memory repository. Using this function bypasses the `package_repository_manager` singleton. This is usually desired however, since in-memory repositories are for temporarily storing programmatically created packages, which we do not want to cache and that do not persist. Args: repository_data (dict): Repository data, see class docstring. Returns: `MemoryPackageRepository` object. """ location = "memory{%s}" % hex(id(repository_data)) resource_pool = ResourcePool(cache_size=None) repo = MemoryPackageRepository(location, resource_pool) repo.data = repository_data return repo
python
def create_repository(cls, repository_data): """Create a standalone, in-memory repository. Using this function bypasses the `package_repository_manager` singleton. This is usually desired however, since in-memory repositories are for temporarily storing programmatically created packages, which we do not want to cache and that do not persist. Args: repository_data (dict): Repository data, see class docstring. Returns: `MemoryPackageRepository` object. """ location = "memory{%s}" % hex(id(repository_data)) resource_pool = ResourcePool(cache_size=None) repo = MemoryPackageRepository(location, resource_pool) repo.data = repository_data return repo
[ "def", "create_repository", "(", "cls", ",", "repository_data", ")", ":", "location", "=", "\"memory{%s}\"", "%", "hex", "(", "id", "(", "repository_data", ")", ")", "resource_pool", "=", "ResourcePool", "(", "cache_size", "=", "None", ")", "repo", "=", "Mem...
Create a standalone, in-memory repository. Using this function bypasses the `package_repository_manager` singleton. This is usually desired however, since in-memory repositories are for temporarily storing programmatically created packages, which we do not want to cache and that do not persist. Args: repository_data (dict): Repository data, see class docstring. Returns: `MemoryPackageRepository` object.
[ "Create", "a", "standalone", "in", "-", "memory", "repository", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezplugins/package_repository/memory.py#L134-L152
train
227,251
nerdvegas/rez
src/build_utils/distlib/metadata.py
LegacyMetadata.read_file
def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) self.set_metadata_version()
python
def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) self.set_metadata_version()
[ "def", "read_file", "(", "self", ",", "fileob", ")", ":", "msg", "=", "message_from_file", "(", "fileob", ")", "self", ".", "_fields", "[", "'Metadata-Version'", "]", "=", "msg", "[", "'metadata-version'", "]", "# When reading, get all the fields we can", "for", ...
Read the metadata values from a file object.
[ "Read", "the", "metadata", "values", "from", "a", "file", "object", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/distlib/metadata.py#L334-L354
train
227,252
nerdvegas/rez
src/build_utils/distlib/metadata.py
LegacyMetadata.check
def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append('Wrong value for %r: %s' % (field, value)) return missing, warnings
python
def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append('Wrong value for %r: %s' % (field, value)) return missing, warnings
[ "def", "check", "(", "self", ",", "strict", "=", "False", ")", ":", "self", ".", "set_metadata_version", "(", ")", "# XXX should check the versions (if the file was loaded)", "missing", ",", "warnings", "=", "[", "]", ",", "[", "]", "for", "attr", "in", "(", ...
Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided
[ "Check", "if", "the", "metadata", "is", "compliant", ".", "If", "strict", "is", "True", "then", "raise", "if", "no", "Name", "or", "Version", "are", "provided" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/distlib/metadata.py#L487-L529
train
227,253
nerdvegas/rez
src/rezgui/util.py
create_pane
def create_pane(widgets, horizontal, parent_widget=None, compact=False, compact_spacing=2): """Create a widget containing an aligned set of widgets. Args: widgets (list of `QWidget`). horizontal (bool). align (str): One of: - 'left', 'right' (horizontal); - 'top', 'bottom' (vertical) parent_widget (`QWidget`): Owner widget, QWidget is created if this is not provided. Returns: `QWidget` """ pane = parent_widget or QtGui.QWidget() type_ = QtGui.QHBoxLayout if horizontal else QtGui.QVBoxLayout layout = type_() if compact: layout.setSpacing(compact_spacing) layout.setContentsMargins(compact_spacing, compact_spacing, compact_spacing, compact_spacing) for widget in widgets: stretch = 0 if isinstance(widget, tuple): widget, stretch = widget if isinstance(widget, int): layout.addSpacing(widget) elif widget: layout.addWidget(widget, stretch) else: layout.addStretch() pane.setLayout(layout) return pane
python
def create_pane(widgets, horizontal, parent_widget=None, compact=False, compact_spacing=2): """Create a widget containing an aligned set of widgets. Args: widgets (list of `QWidget`). horizontal (bool). align (str): One of: - 'left', 'right' (horizontal); - 'top', 'bottom' (vertical) parent_widget (`QWidget`): Owner widget, QWidget is created if this is not provided. Returns: `QWidget` """ pane = parent_widget or QtGui.QWidget() type_ = QtGui.QHBoxLayout if horizontal else QtGui.QVBoxLayout layout = type_() if compact: layout.setSpacing(compact_spacing) layout.setContentsMargins(compact_spacing, compact_spacing, compact_spacing, compact_spacing) for widget in widgets: stretch = 0 if isinstance(widget, tuple): widget, stretch = widget if isinstance(widget, int): layout.addSpacing(widget) elif widget: layout.addWidget(widget, stretch) else: layout.addStretch() pane.setLayout(layout) return pane
[ "def", "create_pane", "(", "widgets", ",", "horizontal", ",", "parent_widget", "=", "None", ",", "compact", "=", "False", ",", "compact_spacing", "=", "2", ")", ":", "pane", "=", "parent_widget", "or", "QtGui", ".", "QWidget", "(", ")", "type_", "=", "Qt...
Create a widget containing an aligned set of widgets. Args: widgets (list of `QWidget`). horizontal (bool). align (str): One of: - 'left', 'right' (horizontal); - 'top', 'bottom' (vertical) parent_widget (`QWidget`): Owner widget, QWidget is created if this is not provided. Returns: `QWidget`
[ "Create", "a", "widget", "containing", "an", "aligned", "set", "of", "widgets", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/util.py#L7-L44
train
227,254
nerdvegas/rez
src/rezgui/util.py
get_icon
def get_icon(name, as_qicon=False): """Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon` is True""" filename = name + ".png" icon = icons.get(filename) if not icon: path = os.path.dirname(__file__) path = os.path.join(path, "icons") filepath = os.path.join(path, filename) if not os.path.exists(filepath): filepath = os.path.join(path, "pink.png") icon = QtGui.QPixmap(filepath) icons[filename] = icon return QtGui.QIcon(icon) if as_qicon else icon
python
def get_icon(name, as_qicon=False): """Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon` is True""" filename = name + ".png" icon = icons.get(filename) if not icon: path = os.path.dirname(__file__) path = os.path.join(path, "icons") filepath = os.path.join(path, filename) if not os.path.exists(filepath): filepath = os.path.join(path, "pink.png") icon = QtGui.QPixmap(filepath) icons[filename] = icon return QtGui.QIcon(icon) if as_qicon else icon
[ "def", "get_icon", "(", "name", ",", "as_qicon", "=", "False", ")", ":", "filename", "=", "name", "+", "\".png\"", "icon", "=", "icons", ".", "get", "(", "filename", ")", "if", "not", "icon", ":", "path", "=", "os", ".", "path", ".", "dirname", "("...
Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon` is True
[ "Returns", "a", "QPixmap", "containing", "the", "given", "image", "or", "a", "QIcon", "if", "as_qicon", "is", "True" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/util.py#L50-L65
train
227,255
nerdvegas/rez
src/rezgui/util.py
interp_color
def interp_color(a, b, f): """Interpolate between two colors. Returns: `QColor` object. """ a_ = (a.redF(), a.greenF(), a.blueF()) b_ = (b.redF(), b.greenF(), b.blueF()) a_ = [x * (1 - f) for x in a_] b_ = [x * f for x in b_] c = [x + y for x, y in zip(a_, b_)] return QtGui.QColor.fromRgbF(*c)
python
def interp_color(a, b, f): """Interpolate between two colors. Returns: `QColor` object. """ a_ = (a.redF(), a.greenF(), a.blueF()) b_ = (b.redF(), b.greenF(), b.blueF()) a_ = [x * (1 - f) for x in a_] b_ = [x * f for x in b_] c = [x + y for x, y in zip(a_, b_)] return QtGui.QColor.fromRgbF(*c)
[ "def", "interp_color", "(", "a", ",", "b", ",", "f", ")", ":", "a_", "=", "(", "a", ".", "redF", "(", ")", ",", "a", ".", "greenF", "(", ")", ",", "a", ".", "blueF", "(", ")", ")", "b_", "=", "(", "b", ".", "redF", "(", ")", ",", "b", ...
Interpolate between two colors. Returns: `QColor` object.
[ "Interpolate", "between", "two", "colors", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/util.py#L105-L116
train
227,256
nerdvegas/rez
src/rezgui/util.py
create_toolbutton
def create_toolbutton(entries, parent=None): """Create a toolbutton. Args: entries: List of (label, slot) tuples. Returns: `QtGui.QToolBar`. """ btn = QtGui.QToolButton(parent) menu = QtGui.QMenu() actions = [] for label, slot in entries: action = add_menu_action(menu, label, slot) actions.append(action) btn.setPopupMode(QtGui.QToolButton.MenuButtonPopup) btn.setDefaultAction(actions[0]) btn.setMenu(menu) return btn, actions
python
def create_toolbutton(entries, parent=None): """Create a toolbutton. Args: entries: List of (label, slot) tuples. Returns: `QtGui.QToolBar`. """ btn = QtGui.QToolButton(parent) menu = QtGui.QMenu() actions = [] for label, slot in entries: action = add_menu_action(menu, label, slot) actions.append(action) btn.setPopupMode(QtGui.QToolButton.MenuButtonPopup) btn.setDefaultAction(actions[0]) btn.setMenu(menu) return btn, actions
[ "def", "create_toolbutton", "(", "entries", ",", "parent", "=", "None", ")", ":", "btn", "=", "QtGui", ".", "QToolButton", "(", "parent", ")", "menu", "=", "QtGui", ".", "QMenu", "(", ")", "actions", "=", "[", "]", "for", "label", ",", "slot", "in", ...
Create a toolbutton. Args: entries: List of (label, slot) tuples. Returns: `QtGui.QToolBar`.
[ "Create", "a", "toolbutton", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/util.py#L119-L139
train
227,257
nerdvegas/rez
src/build_utils/distlib/locators.py
SimpleScrapingLocator.get_page
def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result
python
def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result
[ "def", "get_page", "(", "self", ",", "url", ")", ":", "# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api", "scheme", ",", "netloc", ",", "path", ",", "_", ",", "_", ",", "_", "=", "urlparse", "(", "url", ")", "if", "scheme", "==", "'file'...
Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator).
[ "Get", "the", "HTML", "for", "an", "URL", "possibly", "from", "an", "in", "-", "memory", "cache", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/build_utils/distlib/locators.py#L673-L730
train
227,258
nerdvegas/rez
src/rez/package_search.py
get_reverse_dependency_tree
def get_reverse_dependency_tree(package_name, depth=None, paths=None, build_requires=False, private_build_requires=False): """Find packages that depend on the given package. This is a reverse dependency lookup. A tree is constructed, showing what packages depend on the given package, with an optional depth limit. A resolve does not occur. Only the latest version of each package is used, and requirements from all variants of that package are used. Args: package_name (str): Name of the package depended on. depth (int): Tree depth limit, unlimited if None. paths (list of str): paths to search for packages, defaults to `config.packages_path`. build_requires (bool): If True, includes packages' build_requires. private_build_requires (bool): If True, include `package_name`'s private_build_requires. Returns: A 2-tuple: - (list of list of str): Lists of package names, where each list is a single depth in the tree. The first list is always [`package_name`]. - `pygraph.digraph` object, where nodes are package names, and `package_name` is always the leaf node. """ pkgs_list = [[package_name]] g = digraph() g.add_node(package_name) # build reverse lookup it = iter_package_families(paths) package_names = set(x.name for x in it) if package_name not in package_names: raise PackageFamilyNotFoundError("No such package family %r" % package_name) if depth == 0: return pkgs_list, g bar = ProgressBar("Searching", len(package_names)) lookup = defaultdict(set) for i, package_name_ in enumerate(package_names): it = iter_packages(name=package_name_, paths=paths) packages = list(it) if not packages: continue pkg = max(packages, key=lambda x: x.version) requires = [] for variant in pkg.iter_variants(): pbr = (private_build_requires and pkg.name == package_name) requires += variant.get_requires( build_requires=build_requires, private_build_requires=pbr ) for req in requires: if not req.conflict: lookup[req.name].add(package_name_) bar.next() bar.finish() # perform traversal n = 0 consumed = set([package_name]) working_set = set([package_name]) node_color = "#F6F6F6" node_fontsize = 10 node_attrs = [("fillcolor", node_color), ("style", "filled"), ("fontsize", node_fontsize)] while working_set and (depth is None or n < depth): working_set_ = set() for child in working_set: parents = lookup[child] - consumed working_set_.update(parents) consumed.update(parents) for parent in parents: g.add_node(parent, attrs=node_attrs) g.add_edge((parent, child)) if working_set_: pkgs_list.append(sorted(list(working_set_))) working_set = working_set_ n += 1 return pkgs_list, g
python
def get_reverse_dependency_tree(package_name, depth=None, paths=None, build_requires=False, private_build_requires=False): """Find packages that depend on the given package. This is a reverse dependency lookup. A tree is constructed, showing what packages depend on the given package, with an optional depth limit. A resolve does not occur. Only the latest version of each package is used, and requirements from all variants of that package are used. Args: package_name (str): Name of the package depended on. depth (int): Tree depth limit, unlimited if None. paths (list of str): paths to search for packages, defaults to `config.packages_path`. build_requires (bool): If True, includes packages' build_requires. private_build_requires (bool): If True, include `package_name`'s private_build_requires. Returns: A 2-tuple: - (list of list of str): Lists of package names, where each list is a single depth in the tree. The first list is always [`package_name`]. - `pygraph.digraph` object, where nodes are package names, and `package_name` is always the leaf node. """ pkgs_list = [[package_name]] g = digraph() g.add_node(package_name) # build reverse lookup it = iter_package_families(paths) package_names = set(x.name for x in it) if package_name not in package_names: raise PackageFamilyNotFoundError("No such package family %r" % package_name) if depth == 0: return pkgs_list, g bar = ProgressBar("Searching", len(package_names)) lookup = defaultdict(set) for i, package_name_ in enumerate(package_names): it = iter_packages(name=package_name_, paths=paths) packages = list(it) if not packages: continue pkg = max(packages, key=lambda x: x.version) requires = [] for variant in pkg.iter_variants(): pbr = (private_build_requires and pkg.name == package_name) requires += variant.get_requires( build_requires=build_requires, private_build_requires=pbr ) for req in requires: if not req.conflict: lookup[req.name].add(package_name_) bar.next() bar.finish() # perform traversal n = 0 consumed = set([package_name]) working_set = set([package_name]) node_color = "#F6F6F6" node_fontsize = 10 node_attrs = [("fillcolor", node_color), ("style", "filled"), ("fontsize", node_fontsize)] while working_set and (depth is None or n < depth): working_set_ = set() for child in working_set: parents = lookup[child] - consumed working_set_.update(parents) consumed.update(parents) for parent in parents: g.add_node(parent, attrs=node_attrs) g.add_edge((parent, child)) if working_set_: pkgs_list.append(sorted(list(working_set_))) working_set = working_set_ n += 1 return pkgs_list, g
[ "def", "get_reverse_dependency_tree", "(", "package_name", ",", "depth", "=", "None", ",", "paths", "=", "None", ",", "build_requires", "=", "False", ",", "private_build_requires", "=", "False", ")", ":", "pkgs_list", "=", "[", "[", "package_name", "]", "]", ...
Find packages that depend on the given package. This is a reverse dependency lookup. A tree is constructed, showing what packages depend on the given package, with an optional depth limit. A resolve does not occur. Only the latest version of each package is used, and requirements from all variants of that package are used. Args: package_name (str): Name of the package depended on. depth (int): Tree depth limit, unlimited if None. paths (list of str): paths to search for packages, defaults to `config.packages_path`. build_requires (bool): If True, includes packages' build_requires. private_build_requires (bool): If True, include `package_name`'s private_build_requires. Returns: A 2-tuple: - (list of list of str): Lists of package names, where each list is a single depth in the tree. The first list is always [`package_name`]. - `pygraph.digraph` object, where nodes are package names, and `package_name` is always the leaf node.
[ "Find", "packages", "that", "depend", "on", "the", "given", "package", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_search.py#L25-L121
train
227,259
nerdvegas/rez
src/rez/package_search.py
get_plugins
def get_plugins(package_name, paths=None): """Find packages that are plugins of the given package. Args: package_name (str): Name of the package. paths (list of str): Paths to search for packages, defaults to `config.packages_path`. Returns: list of str: The packages that are plugins of the given package. """ pkg = get_latest_package(package_name, paths=paths, error=True) if not pkg.has_plugins: return [] it = iter_package_families(paths) package_names = set(x.name for x in it) bar = ProgressBar("Searching", len(package_names)) plugin_pkgs = [] for package_name_ in package_names: bar.next() if package_name_ == package_name: continue # not a plugin of itself plugin_pkg = get_latest_package(package_name_, paths=paths) if not plugin_pkg.plugin_for: continue for plugin_for in plugin_pkg.plugin_for: if plugin_for == pkg.name: plugin_pkgs.append(package_name_) bar.finish() return plugin_pkgs
python
def get_plugins(package_name, paths=None): """Find packages that are plugins of the given package. Args: package_name (str): Name of the package. paths (list of str): Paths to search for packages, defaults to `config.packages_path`. Returns: list of str: The packages that are plugins of the given package. """ pkg = get_latest_package(package_name, paths=paths, error=True) if not pkg.has_plugins: return [] it = iter_package_families(paths) package_names = set(x.name for x in it) bar = ProgressBar("Searching", len(package_names)) plugin_pkgs = [] for package_name_ in package_names: bar.next() if package_name_ == package_name: continue # not a plugin of itself plugin_pkg = get_latest_package(package_name_, paths=paths) if not plugin_pkg.plugin_for: continue for plugin_for in plugin_pkg.plugin_for: if plugin_for == pkg.name: plugin_pkgs.append(package_name_) bar.finish() return plugin_pkgs
[ "def", "get_plugins", "(", "package_name", ",", "paths", "=", "None", ")", ":", "pkg", "=", "get_latest_package", "(", "package_name", ",", "paths", "=", "paths", ",", "error", "=", "True", ")", "if", "not", "pkg", ".", "has_plugins", ":", "return", "[",...
Find packages that are plugins of the given package. Args: package_name (str): Name of the package. paths (list of str): Paths to search for packages, defaults to `config.packages_path`. Returns: list of str: The packages that are plugins of the given package.
[ "Find", "packages", "that", "are", "plugins", "of", "the", "given", "package", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_search.py#L124-L157
train
227,260
nerdvegas/rez
src/rez/package_search.py
ResourceSearcher.search
def search(self, resources_request=None): """Search for resources. Args: resources_request (str): Resource to search, glob-style patterns are supported. If None, returns all matching resource types. Returns: 2-tuple: - str: resource type (family, package, variant); - List of `ResourceSearchResult`: Matching resources. Will be in alphabetical order if families, and version ascending for packages or variants. """ # Find matching package families name_pattern, version_range = self._parse_request(resources_request) family_names = set( x.name for x in iter_package_families(paths=self.package_paths) if fnmatch.fnmatch(x.name, name_pattern) ) family_names = sorted(family_names) # determine what type of resource we're searching for if self.resource_type: resource_type = self.resource_type elif version_range or len(family_names) == 1: resource_type = "package" else: resource_type = "family" if not family_names: return resource_type, [] # return list of family names (validation is n/a in this case) if resource_type == "family": results = [ResourceSearchResult(x, "family") for x in family_names] return "family", results results = [] # iterate over packages/variants for name in family_names: it = iter_packages(name, version_range, paths=self.package_paths) packages = sorted(it, key=lambda x: x.version) if self.latest and packages: packages = [packages[-1]] for package in packages: # validate and check time (accessing timestamp may cause # validation fail) try: if package.timestamp: if self.after_time and package.timestamp < self.after_time: continue if self.before_time and package.timestamp >= self.before_time: continue if self.validate: package.validate_data() except ResourceContentError as e: if resource_type == "package": result = ResourceSearchResult(package, "package", str(e)) results.append(result) continue if resource_type == "package": result = ResourceSearchResult(package, "package") results.append(result) continue # iterate variants try: for variant in package.iter_variants(): if self.validate: try: variant.validate_data() except ResourceContentError as e: result = ResourceSearchResult( variant, "variant", str(e)) results.append(result) continue result = ResourceSearchResult(variant, "variant") results.append(result) except ResourceContentError: # this may happen if 'variants' in package is malformed continue return resource_type, results
python
def search(self, resources_request=None): """Search for resources. Args: resources_request (str): Resource to search, glob-style patterns are supported. If None, returns all matching resource types. Returns: 2-tuple: - str: resource type (family, package, variant); - List of `ResourceSearchResult`: Matching resources. Will be in alphabetical order if families, and version ascending for packages or variants. """ # Find matching package families name_pattern, version_range = self._parse_request(resources_request) family_names = set( x.name for x in iter_package_families(paths=self.package_paths) if fnmatch.fnmatch(x.name, name_pattern) ) family_names = sorted(family_names) # determine what type of resource we're searching for if self.resource_type: resource_type = self.resource_type elif version_range or len(family_names) == 1: resource_type = "package" else: resource_type = "family" if not family_names: return resource_type, [] # return list of family names (validation is n/a in this case) if resource_type == "family": results = [ResourceSearchResult(x, "family") for x in family_names] return "family", results results = [] # iterate over packages/variants for name in family_names: it = iter_packages(name, version_range, paths=self.package_paths) packages = sorted(it, key=lambda x: x.version) if self.latest and packages: packages = [packages[-1]] for package in packages: # validate and check time (accessing timestamp may cause # validation fail) try: if package.timestamp: if self.after_time and package.timestamp < self.after_time: continue if self.before_time and package.timestamp >= self.before_time: continue if self.validate: package.validate_data() except ResourceContentError as e: if resource_type == "package": result = ResourceSearchResult(package, "package", str(e)) results.append(result) continue if resource_type == "package": result = ResourceSearchResult(package, "package") results.append(result) continue # iterate variants try: for variant in package.iter_variants(): if self.validate: try: variant.validate_data() except ResourceContentError as e: result = ResourceSearchResult( variant, "variant", str(e)) results.append(result) continue result = ResourceSearchResult(variant, "variant") results.append(result) except ResourceContentError: # this may happen if 'variants' in package is malformed continue return resource_type, results
[ "def", "search", "(", "self", ",", "resources_request", "=", "None", ")", ":", "# Find matching package families", "name_pattern", ",", "version_range", "=", "self", ".", "_parse_request", "(", "resources_request", ")", "family_names", "=", "set", "(", "x", ".", ...
Search for resources. Args: resources_request (str): Resource to search, glob-style patterns are supported. If None, returns all matching resource types. Returns: 2-tuple: - str: resource type (family, package, variant); - List of `ResourceSearchResult`: Matching resources. Will be in alphabetical order if families, and version ascending for packages or variants.
[ "Search", "for", "resources", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_search.py#L210-L305
train
227,261
nerdvegas/rez
src/rez/package_search.py
ResourceSearchResultFormatter.print_search_results
def print_search_results(self, search_results, buf=sys.stdout): """Print formatted search results. Args: search_results (list of `ResourceSearchResult`): Search to format. """ formatted_lines = self.format_search_results(search_results) pr = Printer(buf) for txt, style in formatted_lines: pr(txt, style)
python
def print_search_results(self, search_results, buf=sys.stdout): """Print formatted search results. Args: search_results (list of `ResourceSearchResult`): Search to format. """ formatted_lines = self.format_search_results(search_results) pr = Printer(buf) for txt, style in formatted_lines: pr(txt, style)
[ "def", "print_search_results", "(", "self", ",", "search_results", ",", "buf", "=", "sys", ".", "stdout", ")", ":", "formatted_lines", "=", "self", ".", "format_search_results", "(", "search_results", ")", "pr", "=", "Printer", "(", "buf", ")", "for", "txt",...
Print formatted search results. Args: search_results (list of `ResourceSearchResult`): Search to format.
[ "Print", "formatted", "search", "results", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_search.py#L347-L357
train
227,262
nerdvegas/rez
src/rez/package_search.py
ResourceSearchResultFormatter.format_search_results
def format_search_results(self, search_results): """Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in. """ formatted_lines = [] for search_result in search_results: lines = self._format_search_result(search_result) formatted_lines.extend(lines) return formatted_lines
python
def format_search_results(self, search_results): """Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in. """ formatted_lines = [] for search_result in search_results: lines = self._format_search_result(search_result) formatted_lines.extend(lines) return formatted_lines
[ "def", "format_search_results", "(", "self", ",", "search_results", ")", ":", "formatted_lines", "=", "[", "]", "for", "search_result", "in", "search_results", ":", "lines", "=", "self", ".", "_format_search_result", "(", "search_result", ")", "formatted_lines", "...
Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in.
[ "Format", "search", "results", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_search.py#L359-L374
train
227,263
nerdvegas/rez
src/rez/vendor/pygraph/readwrite/dot.py
read
def read(string): """ Read a graph from a string in Dot language and return it. Nodes and edges specified in the input will be added to the current graph. @type string: string @param string: Input string in Dot format specifying a graph. @rtype: graph @return: Graph """ dotG = pydot.graph_from_dot_data(string) if (dotG.get_type() == "graph"): G = graph() elif (dotG.get_type() == "digraph"): G = digraph() elif (dotG.get_type() == "hypergraph"): return read_hypergraph(string) else: raise InvalidGraphType # Read nodes... # Note: If the nodes aren't explicitly listed, they need to be for each_node in dotG.get_nodes(): G.add_node(each_node.get_name()) for each_attr_key, each_attr_val in each_node.get_attributes().items(): G.add_node_attribute(each_node.get_name(), (each_attr_key, each_attr_val)) # Read edges... for each_edge in dotG.get_edges(): # Check if the nodes have been added if not G.has_node(each_edge.get_source()): G.add_node(each_edge.get_source()) if not G.has_node(each_edge.get_destination()): G.add_node(each_edge.get_destination()) # See if there's a weight if 'weight' in each_edge.get_attributes().keys(): _wt = each_edge.get_attributes()['weight'] else: _wt = 1 # See if there is a label if 'label' in each_edge.get_attributes().keys(): _label = each_edge.get_attributes()['label'] else: _label = '' G.add_edge((each_edge.get_source(), each_edge.get_destination()), wt = _wt, label = _label) for each_attr_key, each_attr_val in each_edge.get_attributes().items(): if not each_attr_key in ['weight', 'label']: G.add_edge_attribute((each_edge.get_source(), each_edge.get_destination()), \ (each_attr_key, each_attr_val)) return G
python
def read(string): """ Read a graph from a string in Dot language and return it. Nodes and edges specified in the input will be added to the current graph. @type string: string @param string: Input string in Dot format specifying a graph. @rtype: graph @return: Graph """ dotG = pydot.graph_from_dot_data(string) if (dotG.get_type() == "graph"): G = graph() elif (dotG.get_type() == "digraph"): G = digraph() elif (dotG.get_type() == "hypergraph"): return read_hypergraph(string) else: raise InvalidGraphType # Read nodes... # Note: If the nodes aren't explicitly listed, they need to be for each_node in dotG.get_nodes(): G.add_node(each_node.get_name()) for each_attr_key, each_attr_val in each_node.get_attributes().items(): G.add_node_attribute(each_node.get_name(), (each_attr_key, each_attr_val)) # Read edges... for each_edge in dotG.get_edges(): # Check if the nodes have been added if not G.has_node(each_edge.get_source()): G.add_node(each_edge.get_source()) if not G.has_node(each_edge.get_destination()): G.add_node(each_edge.get_destination()) # See if there's a weight if 'weight' in each_edge.get_attributes().keys(): _wt = each_edge.get_attributes()['weight'] else: _wt = 1 # See if there is a label if 'label' in each_edge.get_attributes().keys(): _label = each_edge.get_attributes()['label'] else: _label = '' G.add_edge((each_edge.get_source(), each_edge.get_destination()), wt = _wt, label = _label) for each_attr_key, each_attr_val in each_edge.get_attributes().items(): if not each_attr_key in ['weight', 'label']: G.add_edge_attribute((each_edge.get_source(), each_edge.get_destination()), \ (each_attr_key, each_attr_val)) return G
[ "def", "read", "(", "string", ")", ":", "dotG", "=", "pydot", ".", "graph_from_dot_data", "(", "string", ")", "if", "(", "dotG", ".", "get_type", "(", ")", "==", "\"graph\"", ")", ":", "G", "=", "graph", "(", ")", "elif", "(", "dotG", ".", "get_typ...
Read a graph from a string in Dot language and return it. Nodes and edges specified in the input will be added to the current graph. @type string: string @param string: Input string in Dot format specifying a graph. @rtype: graph @return: Graph
[ "Read", "a", "graph", "from", "a", "string", "in", "Dot", "language", "and", "return", "it", ".", "Nodes", "and", "edges", "specified", "in", "the", "input", "will", "be", "added", "to", "the", "current", "graph", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/readwrite/dot.py#L47-L104
train
227,264
nerdvegas/rez
src/rez/vendor/pygraph/readwrite/dot.py
read_hypergraph
def read_hypergraph(string): """ Read a hypergraph from a string in dot format. Nodes and edges specified in the input will be added to the current hypergraph. @type string: string @param string: Input string in dot format specifying a graph. @rtype: hypergraph @return: Hypergraph """ hgr = hypergraph() dotG = pydot.graph_from_dot_data(string) # Read the hypernode nodes... # Note 1: We need to assume that all of the nodes are listed since we need to know if they # are a hyperedge or a normal node # Note 2: We should read in all of the nodes before putting in the links for each_node in dotG.get_nodes(): if 'hypernode' == each_node.get('hyper_node_type'): hgr.add_node(each_node.get_name()) elif 'hyperedge' == each_node.get('hyper_node_type'): hgr.add_hyperedge(each_node.get_name()) # Now read in the links to connect the hyperedges for each_link in dotG.get_edges(): if hgr.has_node(each_link.get_source()): link_hypernode = each_link.get_source() link_hyperedge = each_link.get_destination() elif hgr.has_node(each_link.get_destination()): link_hypernode = each_link.get_destination() link_hyperedge = each_link.get_source() hgr.link(link_hypernode, link_hyperedge) return hgr
python
def read_hypergraph(string): """ Read a hypergraph from a string in dot format. Nodes and edges specified in the input will be added to the current hypergraph. @type string: string @param string: Input string in dot format specifying a graph. @rtype: hypergraph @return: Hypergraph """ hgr = hypergraph() dotG = pydot.graph_from_dot_data(string) # Read the hypernode nodes... # Note 1: We need to assume that all of the nodes are listed since we need to know if they # are a hyperedge or a normal node # Note 2: We should read in all of the nodes before putting in the links for each_node in dotG.get_nodes(): if 'hypernode' == each_node.get('hyper_node_type'): hgr.add_node(each_node.get_name()) elif 'hyperedge' == each_node.get('hyper_node_type'): hgr.add_hyperedge(each_node.get_name()) # Now read in the links to connect the hyperedges for each_link in dotG.get_edges(): if hgr.has_node(each_link.get_source()): link_hypernode = each_link.get_source() link_hyperedge = each_link.get_destination() elif hgr.has_node(each_link.get_destination()): link_hypernode = each_link.get_destination() link_hyperedge = each_link.get_source() hgr.link(link_hypernode, link_hyperedge) return hgr
[ "def", "read_hypergraph", "(", "string", ")", ":", "hgr", "=", "hypergraph", "(", ")", "dotG", "=", "pydot", ".", "graph_from_dot_data", "(", "string", ")", "# Read the hypernode nodes...", "# Note 1: We need to assume that all of the nodes are listed since we need to know if...
Read a hypergraph from a string in dot format. Nodes and edges specified in the input will be added to the current hypergraph. @type string: string @param string: Input string in dot format specifying a graph. @rtype: hypergraph @return: Hypergraph
[ "Read", "a", "hypergraph", "from", "a", "string", "in", "dot", "format", ".", "Nodes", "and", "edges", "specified", "in", "the", "input", "will", "be", "added", "to", "the", "current", "hypergraph", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/readwrite/dot.py#L179-L213
train
227,265
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
graph_from_dot_file
def graph_from_dot_file(path): """Load graph as defined by a DOT file. The file is assumed to be in DOT format. It will be loaded, parsed and a Dot class will be returned, representing the graph. """ fd = file(path, 'rb') data = fd.read() fd.close() return graph_from_dot_data(data)
python
def graph_from_dot_file(path): """Load graph as defined by a DOT file. The file is assumed to be in DOT format. It will be loaded, parsed and a Dot class will be returned, representing the graph. """ fd = file(path, 'rb') data = fd.read() fd.close() return graph_from_dot_data(data)
[ "def", "graph_from_dot_file", "(", "path", ")", ":", "fd", "=", "file", "(", "path", ",", "'rb'", ")", "data", "=", "fd", ".", "read", "(", ")", "fd", ".", "close", "(", ")", "return", "graph_from_dot_data", "(", "data", ")" ]
Load graph as defined by a DOT file. The file is assumed to be in DOT format. It will be loaded, parsed and a Dot class will be returned, representing the graph.
[ "Load", "graph", "as", "defined", "by", "a", "DOT", "file", ".", "The", "file", "is", "assumed", "to", "be", "in", "DOT", "format", ".", "It", "will", "be", "loaded", "parsed", "and", "a", "Dot", "class", "will", "be", "returned", "representing", "the"...
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L220-L232
train
227,266
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
__find_executables
def __find_executables(path): """Used by find_graphviz path - single directory as a string If any of the executables are found, it will return a dictionary containing the program names as keys and their paths as values. Otherwise returns None """ success = False progs = {'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': '', 'sfdp': ''} was_quoted = False path = path.strip() if path.startswith('"') and path.endswith('"'): path = path[1:-1] was_quoted = True if os.path.isdir(path) : for prg in progs.iterkeys(): if progs[prg]: continue if os.path.exists( os.path.join(path, prg) ): if was_quoted: progs[prg] = '"' + os.path.join(path, prg) + '"' else: progs[prg] = os.path.join(path, prg) success = True elif os.path.exists( os.path.join(path, prg + '.exe') ): if was_quoted: progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"' else: progs[prg] = os.path.join(path, prg + '.exe') success = True if success: return progs else: return None
python
def __find_executables(path): """Used by find_graphviz path - single directory as a string If any of the executables are found, it will return a dictionary containing the program names as keys and their paths as values. Otherwise returns None """ success = False progs = {'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': '', 'sfdp': ''} was_quoted = False path = path.strip() if path.startswith('"') and path.endswith('"'): path = path[1:-1] was_quoted = True if os.path.isdir(path) : for prg in progs.iterkeys(): if progs[prg]: continue if os.path.exists( os.path.join(path, prg) ): if was_quoted: progs[prg] = '"' + os.path.join(path, prg) + '"' else: progs[prg] = os.path.join(path, prg) success = True elif os.path.exists( os.path.join(path, prg + '.exe') ): if was_quoted: progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"' else: progs[prg] = os.path.join(path, prg + '.exe') success = True if success: return progs else: return None
[ "def", "__find_executables", "(", "path", ")", ":", "success", "=", "False", "progs", "=", "{", "'dot'", ":", "''", ",", "'twopi'", ":", "''", ",", "'neato'", ":", "''", ",", "'circo'", ":", "''", ",", "'fdp'", ":", "''", ",", "'sfdp'", ":", "''", ...
Used by find_graphviz path - single directory as a string If any of the executables are found, it will return a dictionary containing the program names as keys and their paths as values. Otherwise returns None
[ "Used", "by", "find_graphviz", "path", "-", "single", "directory", "as", "a", "string", "If", "any", "of", "the", "executables", "are", "found", "it", "will", "return", "a", "dictionary", "containing", "the", "program", "names", "as", "keys", "and", "their",...
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L347-L398
train
227,267
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
Edge.to_string
def to_string(self): """Returns a string representation of the edge in dot language. """ src = self.parse_node_ref( self.get_source() ) dst = self.parse_node_ref( self.get_destination() ) if isinstance(src, frozendict): edge = [ Subgraph(obj_dict=src).to_string() ] elif isinstance(src, (int, long)): edge = [ str(src) ] else: edge = [ src ] if (self.get_parent_graph() and self.get_parent_graph().get_top_graph_type() and self.get_parent_graph().get_top_graph_type() == 'digraph' ): edge.append( '->' ) else: edge.append( '--' ) if isinstance(dst, frozendict): edge.append( Subgraph(obj_dict=dst).to_string() ) elif isinstance(dst, (int, long)): edge.append( str(dst) ) else: edge.append( dst ) edge_attr = list() for attr, value in self.obj_dict['attributes'].iteritems(): if value is not None: edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) ) else: edge_attr.append( attr ) edge_attr = ', '.join(edge_attr) if edge_attr: edge.append( ' [' + edge_attr + ']' ) return ' '.join(edge) + ';'
python
def to_string(self): """Returns a string representation of the edge in dot language. """ src = self.parse_node_ref( self.get_source() ) dst = self.parse_node_ref( self.get_destination() ) if isinstance(src, frozendict): edge = [ Subgraph(obj_dict=src).to_string() ] elif isinstance(src, (int, long)): edge = [ str(src) ] else: edge = [ src ] if (self.get_parent_graph() and self.get_parent_graph().get_top_graph_type() and self.get_parent_graph().get_top_graph_type() == 'digraph' ): edge.append( '->' ) else: edge.append( '--' ) if isinstance(dst, frozendict): edge.append( Subgraph(obj_dict=dst).to_string() ) elif isinstance(dst, (int, long)): edge.append( str(dst) ) else: edge.append( dst ) edge_attr = list() for attr, value in self.obj_dict['attributes'].iteritems(): if value is not None: edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) ) else: edge_attr.append( attr ) edge_attr = ', '.join(edge_attr) if edge_attr: edge.append( ' [' + edge_attr + ']' ) return ' '.join(edge) + ';'
[ "def", "to_string", "(", "self", ")", ":", "src", "=", "self", ".", "parse_node_ref", "(", "self", ".", "get_source", "(", ")", ")", "dst", "=", "self", ".", "parse_node_ref", "(", "self", ".", "get_destination", "(", ")", ")", "if", "isinstance", "(",...
Returns a string representation of the edge in dot language.
[ "Returns", "a", "string", "representation", "of", "the", "edge", "in", "dot", "language", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L974-L1019
train
227,268
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
Graph.get_node
def get_node(self, name): """Retrieve a node from the graph. Given a node's name the corresponding Node instance will be returned. If one or more nodes exist with that name a list of Node instances is returned. An empty list is returned otherwise. """ match = list() if self.obj_dict['nodes'].has_key(name): match.extend( [ Node( obj_dict = obj_dict ) for obj_dict in self.obj_dict['nodes'][name] ]) return match
python
def get_node(self, name): """Retrieve a node from the graph. Given a node's name the corresponding Node instance will be returned. If one or more nodes exist with that name a list of Node instances is returned. An empty list is returned otherwise. """ match = list() if self.obj_dict['nodes'].has_key(name): match.extend( [ Node( obj_dict = obj_dict ) for obj_dict in self.obj_dict['nodes'][name] ]) return match
[ "def", "get_node", "(", "self", ",", "name", ")", ":", "match", "=", "list", "(", ")", "if", "self", ".", "obj_dict", "[", "'nodes'", "]", ".", "has_key", "(", "name", ")", ":", "match", ".", "extend", "(", "[", "Node", "(", "obj_dict", "=", "obj...
Retrieve a node from the graph. Given a node's name the corresponding Node instance will be returned. If one or more nodes exist with that name a list of Node instances is returned. An empty list is returned otherwise.
[ "Retrieve", "a", "node", "from", "the", "graph", ".", "Given", "a", "node", "s", "name", "the", "corresponding", "Node", "instance", "will", "be", "returned", ".", "If", "one", "or", "more", "nodes", "exist", "with", "that", "name", "a", "list", "of", ...
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L1323-L1340
train
227,269
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
Graph.add_edge
def add_edge(self, graph_edge): """Adds an edge object to the graph. It takes a edge object as its only argument and returns None. """ if not isinstance(graph_edge, Edge): raise TypeError('add_edge() received a non edge class object: ' + str(graph_edge)) edge_points = ( graph_edge.get_source(), graph_edge.get_destination() ) if self.obj_dict['edges'].has_key(edge_points): edge_list = self.obj_dict['edges'][edge_points] edge_list.append(graph_edge.obj_dict) else: self.obj_dict['edges'][edge_points] = [ graph_edge.obj_dict ] graph_edge.set_sequence( self.get_next_sequence_number() ) graph_edge.set_parent_graph( self.get_parent_graph() )
python
def add_edge(self, graph_edge): """Adds an edge object to the graph. It takes a edge object as its only argument and returns None. """ if not isinstance(graph_edge, Edge): raise TypeError('add_edge() received a non edge class object: ' + str(graph_edge)) edge_points = ( graph_edge.get_source(), graph_edge.get_destination() ) if self.obj_dict['edges'].has_key(edge_points): edge_list = self.obj_dict['edges'][edge_points] edge_list.append(graph_edge.obj_dict) else: self.obj_dict['edges'][edge_points] = [ graph_edge.obj_dict ] graph_edge.set_sequence( self.get_next_sequence_number() ) graph_edge.set_parent_graph( self.get_parent_graph() )
[ "def", "add_edge", "(", "self", ",", "graph_edge", ")", ":", "if", "not", "isinstance", "(", "graph_edge", ",", "Edge", ")", ":", "raise", "TypeError", "(", "'add_edge() received a non edge class object: '", "+", "str", "(", "graph_edge", ")", ")", "edge_points"...
Adds an edge object to the graph. It takes a edge object as its only argument and returns None.
[ "Adds", "an", "edge", "object", "to", "the", "graph", ".", "It", "takes", "a", "edge", "object", "as", "its", "only", "argument", "and", "returns", "None", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L1365-L1389
train
227,270
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
Graph.add_subgraph
def add_subgraph(self, sgraph): """Adds an subgraph object to the graph. It takes a subgraph object as its only argument and returns None. """ if not isinstance(sgraph, Subgraph) and not isinstance(sgraph, Cluster): raise TypeError('add_subgraph() received a non subgraph class object:' + str(sgraph)) if self.obj_dict['subgraphs'].has_key(sgraph.get_name()): sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ] sgraph_list.append( sgraph.obj_dict ) else: self.obj_dict['subgraphs'][ sgraph.get_name() ] = [ sgraph.obj_dict ] sgraph.set_sequence( self.get_next_sequence_number() ) sgraph.set_parent_graph( self.get_parent_graph() )
python
def add_subgraph(self, sgraph): """Adds an subgraph object to the graph. It takes a subgraph object as its only argument and returns None. """ if not isinstance(sgraph, Subgraph) and not isinstance(sgraph, Cluster): raise TypeError('add_subgraph() received a non subgraph class object:' + str(sgraph)) if self.obj_dict['subgraphs'].has_key(sgraph.get_name()): sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ] sgraph_list.append( sgraph.obj_dict ) else: self.obj_dict['subgraphs'][ sgraph.get_name() ] = [ sgraph.obj_dict ] sgraph.set_sequence( self.get_next_sequence_number() ) sgraph.set_parent_graph( self.get_parent_graph() )
[ "def", "add_subgraph", "(", "self", ",", "sgraph", ")", ":", "if", "not", "isinstance", "(", "sgraph", ",", "Subgraph", ")", "and", "not", "isinstance", "(", "sgraph", ",", "Cluster", ")", ":", "raise", "TypeError", "(", "'add_subgraph() received a non subgrap...
Adds an subgraph object to the graph. It takes a subgraph object as its only argument and returns None.
[ "Adds", "an", "subgraph", "object", "to", "the", "graph", ".", "It", "takes", "a", "subgraph", "object", "as", "its", "only", "argument", "and", "returns", "None", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L1488-L1508
train
227,271
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
Graph.to_string
def to_string(self): """Returns a string representation of the graph in dot language. It will return the graph and all its subelements in string from. """ graph = list() if self.obj_dict.get('strict', None) is not None: if self==self.get_parent_graph() and self.obj_dict['strict']: graph.append('strict ') if self.obj_dict['name'] == '': if 'show_keyword' in self.obj_dict and self.obj_dict['show_keyword']: graph.append( 'subgraph {\n' ) else: graph.append( '{\n' ) else: graph.append( '%s %s {\n' % (self.obj_dict['type'], self.obj_dict['name']) ) for attr in self.obj_dict['attributes'].iterkeys(): if self.obj_dict['attributes'].get(attr, None) is not None: val = self.obj_dict['attributes'].get(attr) if val is not None: graph.append( '%s=%s' % (attr, quote_if_necessary(val)) ) else: graph.append( attr ) graph.append( ';\n' ) edges_done = set() edge_obj_dicts = list() for e in self.obj_dict['edges'].itervalues(): edge_obj_dicts.extend(e) if edge_obj_dicts: edge_src_set, edge_dst_set = zip( *[obj['points'] for obj in edge_obj_dicts] ) edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set) else: edge_src_set, edge_dst_set = set(), set() node_obj_dicts = list() for e in self.obj_dict['nodes'].itervalues(): node_obj_dicts.extend(e) sgraph_obj_dicts = list() for sg in self.obj_dict['subgraphs'].itervalues(): sgraph_obj_dicts.extend(sg) obj_list = [ (obj['sequence'], obj) for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts) ] obj_list.sort() for idx, obj in obj_list: if obj['type'] == 'node': node = Node(obj_dict=obj) if self.obj_dict.get('suppress_disconnected', False): if (node.get_name() not in edge_src_set and node.get_name() not in edge_dst_set): continue graph.append( node.to_string()+'\n' ) elif obj['type'] == 'edge': edge = Edge(obj_dict=obj) if self.obj_dict.get('simplify', False) and edge in edges_done: continue graph.append( edge.to_string() + '\n' ) edges_done.add(edge) else: sgraph = Subgraph(obj_dict=obj) graph.append( sgraph.to_string()+'\n' ) graph.append( '}\n' ) return ''.join(graph)
python
def to_string(self): """Returns a string representation of the graph in dot language. It will return the graph and all its subelements in string from. """ graph = list() if self.obj_dict.get('strict', None) is not None: if self==self.get_parent_graph() and self.obj_dict['strict']: graph.append('strict ') if self.obj_dict['name'] == '': if 'show_keyword' in self.obj_dict and self.obj_dict['show_keyword']: graph.append( 'subgraph {\n' ) else: graph.append( '{\n' ) else: graph.append( '%s %s {\n' % (self.obj_dict['type'], self.obj_dict['name']) ) for attr in self.obj_dict['attributes'].iterkeys(): if self.obj_dict['attributes'].get(attr, None) is not None: val = self.obj_dict['attributes'].get(attr) if val is not None: graph.append( '%s=%s' % (attr, quote_if_necessary(val)) ) else: graph.append( attr ) graph.append( ';\n' ) edges_done = set() edge_obj_dicts = list() for e in self.obj_dict['edges'].itervalues(): edge_obj_dicts.extend(e) if edge_obj_dicts: edge_src_set, edge_dst_set = zip( *[obj['points'] for obj in edge_obj_dicts] ) edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set) else: edge_src_set, edge_dst_set = set(), set() node_obj_dicts = list() for e in self.obj_dict['nodes'].itervalues(): node_obj_dicts.extend(e) sgraph_obj_dicts = list() for sg in self.obj_dict['subgraphs'].itervalues(): sgraph_obj_dicts.extend(sg) obj_list = [ (obj['sequence'], obj) for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts) ] obj_list.sort() for idx, obj in obj_list: if obj['type'] == 'node': node = Node(obj_dict=obj) if self.obj_dict.get('suppress_disconnected', False): if (node.get_name() not in edge_src_set and node.get_name() not in edge_dst_set): continue graph.append( node.to_string()+'\n' ) elif obj['type'] == 'edge': edge = Edge(obj_dict=obj) if self.obj_dict.get('simplify', False) and edge in edges_done: continue graph.append( edge.to_string() + '\n' ) edges_done.add(edge) else: sgraph = Subgraph(obj_dict=obj) graph.append( sgraph.to_string()+'\n' ) graph.append( '}\n' ) return ''.join(graph)
[ "def", "to_string", "(", "self", ")", ":", "graph", "=", "list", "(", ")", "if", "self", ".", "obj_dict", ".", "get", "(", "'strict'", ",", "None", ")", "is", "not", "None", ":", "if", "self", "==", "self", ".", "get_parent_graph", "(", ")", "and",...
Returns a string representation of the graph in dot language. It will return the graph and all its subelements in string from.
[ "Returns", "a", "string", "representation", "of", "the", "graph", "in", "dot", "language", ".", "It", "will", "return", "the", "graph", "and", "all", "its", "subelements", "in", "string", "from", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L1576-L1670
train
227,272
nerdvegas/rez
src/rez/vendor/pydot/pydot.py
Dot.create
def create(self, prog=None, format='ps'): """Creates and returns a Postscript representation of the graph. create will write the graph to a temporary dot file and process it with the program given by 'prog' (which defaults to 'twopi'), reading the Postscript output and returning it as a string is the operation is successful. On failure None is returned. There's also the preferred possibility of using: create_'format'(prog='program') which are automatically defined for all the supported formats. [create_ps(), create_gif(), create_dia(), ...] If 'prog' is a list instead of a string the fist item is expected to be the program name, followed by any optional command-line arguments for it: [ 'twopi', '-Tdot', '-s10' ] """ if prog is None: prog = self.prog if isinstance(prog, (list, tuple)): prog, args = prog[0], prog[1:] else: args = [] if self.progs is None: self.progs = find_graphviz() if self.progs is None: raise InvocationException( 'GraphViz\'s executables not found' ) if not self.progs.has_key(prog): raise InvocationException( 'GraphViz\'s executable "%s" not found' % prog ) if not os.path.exists( self.progs[prog] ) or not os.path.isfile( self.progs[prog] ): raise InvocationException( 'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self.progs[prog] ) tmp_fd, tmp_name = tempfile.mkstemp() os.close(tmp_fd) self.write(tmp_name) tmp_dir = os.path.dirname(tmp_name ) # For each of the image files... # for img in self.shape_files: # Get its data # f = file(img, 'rb') f_data = f.read() f.close() # And copy it under a file with the same name in the temporary directory # f = file( os.path.join( tmp_dir, os.path.basename(img) ), 'wb' ) f.write(f_data) f.close() cmdline = [self.progs[prog], '-T'+format, tmp_name] + args p = subprocess.Popen( cmdline, cwd=tmp_dir, stderr=subprocess.PIPE, stdout=subprocess.PIPE) stderr = p.stderr stdout = p.stdout stdout_output = list() while True: data = stdout.read() if not data: break stdout_output.append(data) stdout.close() stdout_output = ''.join(stdout_output) if not stderr.closed: stderr_output = list() while True: data = stderr.read() if not data: break stderr_output.append(data) stderr.close() if stderr_output: stderr_output = ''.join(stderr_output) #pid, status = os.waitpid(p.pid, 0) status = p.wait() if status != 0 : raise InvocationException( 'Program terminated with status: %d. stderr follows: %s' % ( status, stderr_output) ) elif stderr_output: print stderr_output # For each of the image files... # for img in self.shape_files: # remove it # os.unlink( os.path.join( tmp_dir, os.path.basename(img) ) ) os.unlink(tmp_name) return stdout_output
python
def create(self, prog=None, format='ps'): """Creates and returns a Postscript representation of the graph. create will write the graph to a temporary dot file and process it with the program given by 'prog' (which defaults to 'twopi'), reading the Postscript output and returning it as a string is the operation is successful. On failure None is returned. There's also the preferred possibility of using: create_'format'(prog='program') which are automatically defined for all the supported formats. [create_ps(), create_gif(), create_dia(), ...] If 'prog' is a list instead of a string the fist item is expected to be the program name, followed by any optional command-line arguments for it: [ 'twopi', '-Tdot', '-s10' ] """ if prog is None: prog = self.prog if isinstance(prog, (list, tuple)): prog, args = prog[0], prog[1:] else: args = [] if self.progs is None: self.progs = find_graphviz() if self.progs is None: raise InvocationException( 'GraphViz\'s executables not found' ) if not self.progs.has_key(prog): raise InvocationException( 'GraphViz\'s executable "%s" not found' % prog ) if not os.path.exists( self.progs[prog] ) or not os.path.isfile( self.progs[prog] ): raise InvocationException( 'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self.progs[prog] ) tmp_fd, tmp_name = tempfile.mkstemp() os.close(tmp_fd) self.write(tmp_name) tmp_dir = os.path.dirname(tmp_name ) # For each of the image files... # for img in self.shape_files: # Get its data # f = file(img, 'rb') f_data = f.read() f.close() # And copy it under a file with the same name in the temporary directory # f = file( os.path.join( tmp_dir, os.path.basename(img) ), 'wb' ) f.write(f_data) f.close() cmdline = [self.progs[prog], '-T'+format, tmp_name] + args p = subprocess.Popen( cmdline, cwd=tmp_dir, stderr=subprocess.PIPE, stdout=subprocess.PIPE) stderr = p.stderr stdout = p.stdout stdout_output = list() while True: data = stdout.read() if not data: break stdout_output.append(data) stdout.close() stdout_output = ''.join(stdout_output) if not stderr.closed: stderr_output = list() while True: data = stderr.read() if not data: break stderr_output.append(data) stderr.close() if stderr_output: stderr_output = ''.join(stderr_output) #pid, status = os.waitpid(p.pid, 0) status = p.wait() if status != 0 : raise InvocationException( 'Program terminated with status: %d. stderr follows: %s' % ( status, stderr_output) ) elif stderr_output: print stderr_output # For each of the image files... # for img in self.shape_files: # remove it # os.unlink( os.path.join( tmp_dir, os.path.basename(img) ) ) os.unlink(tmp_name) return stdout_output
[ "def", "create", "(", "self", ",", "prog", "=", "None", ",", "format", "=", "'ps'", ")", ":", "if", "prog", "is", "None", ":", "prog", "=", "self", ".", "prog", "if", "isinstance", "(", "prog", ",", "(", "list", ",", "tuple", ")", ")", ":", "pr...
Creates and returns a Postscript representation of the graph. create will write the graph to a temporary dot file and process it with the program given by 'prog' (which defaults to 'twopi'), reading the Postscript output and returning it as a string is the operation is successful. On failure None is returned. There's also the preferred possibility of using: create_'format'(prog='program') which are automatically defined for all the supported formats. [create_ps(), create_gif(), create_dia(), ...] If 'prog' is a list instead of a string the fist item is expected to be the program name, followed by any optional command-line arguments for it: [ 'twopi', '-Tdot', '-s10' ]
[ "Creates", "and", "returns", "a", "Postscript", "representation", "of", "the", "graph", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pydot/pydot.py#L1915-L2034
train
227,273
nerdvegas/rez
src/rez/utils/yaml.py
dump_yaml
def dump_yaml(data, Dumper=_Dumper, default_flow_style=False): """Returns data as yaml-formatted string.""" content = yaml.dump(data, default_flow_style=default_flow_style, Dumper=Dumper) return content.strip()
python
def dump_yaml(data, Dumper=_Dumper, default_flow_style=False): """Returns data as yaml-formatted string.""" content = yaml.dump(data, default_flow_style=default_flow_style, Dumper=Dumper) return content.strip()
[ "def", "dump_yaml", "(", "data", ",", "Dumper", "=", "_Dumper", ",", "default_flow_style", "=", "False", ")", ":", "content", "=", "yaml", ".", "dump", "(", "data", ",", "default_flow_style", "=", "default_flow_style", ",", "Dumper", "=", "Dumper", ")", "r...
Returns data as yaml-formatted string.
[ "Returns", "data", "as", "yaml", "-", "formatted", "string", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/yaml.py#L64-L69
train
227,274
nerdvegas/rez
src/rez/utils/yaml.py
load_yaml
def load_yaml(filepath): """Convenience function for loading yaml-encoded data from disk.""" with open(filepath) as f: txt = f.read() return yaml.load(txt)
python
def load_yaml(filepath): """Convenience function for loading yaml-encoded data from disk.""" with open(filepath) as f: txt = f.read() return yaml.load(txt)
[ "def", "load_yaml", "(", "filepath", ")", ":", "with", "open", "(", "filepath", ")", "as", "f", ":", "txt", "=", "f", ".", "read", "(", ")", "return", "yaml", ".", "load", "(", "txt", ")" ]
Convenience function for loading yaml-encoded data from disk.
[ "Convenience", "function", "for", "loading", "yaml", "-", "encoded", "data", "from", "disk", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/yaml.py#L72-L76
train
227,275
nerdvegas/rez
src/rez/utils/memcached.py
memcached_client
def memcached_client(servers=config.memcached_uri, debug=config.debug_memcache): """Get a shared memcached instance. This function shares the same memcached instance across nested invocations. This is done so that memcached connections can be kept to a minimum, but at the same time unnecessary extra reconnections are avoided. Typically an initial scope (using 'with' construct) is made around parts of code that hit the cache server many times - such as a resolve, or executing a context. On exit of the topmost scope, the memcached client is disconnected. Returns: `Client`: Memcached instance. """ key = None try: client, key = scoped_instance_manager.acquire(servers, debug=debug) yield client finally: if key: scoped_instance_manager.release(key)
python
def memcached_client(servers=config.memcached_uri, debug=config.debug_memcache): """Get a shared memcached instance. This function shares the same memcached instance across nested invocations. This is done so that memcached connections can be kept to a minimum, but at the same time unnecessary extra reconnections are avoided. Typically an initial scope (using 'with' construct) is made around parts of code that hit the cache server many times - such as a resolve, or executing a context. On exit of the topmost scope, the memcached client is disconnected. Returns: `Client`: Memcached instance. """ key = None try: client, key = scoped_instance_manager.acquire(servers, debug=debug) yield client finally: if key: scoped_instance_manager.release(key)
[ "def", "memcached_client", "(", "servers", "=", "config", ".", "memcached_uri", ",", "debug", "=", "config", ".", "debug_memcache", ")", ":", "key", "=", "None", "try", ":", "client", ",", "key", "=", "scoped_instance_manager", ".", "acquire", "(", "servers"...
Get a shared memcached instance. This function shares the same memcached instance across nested invocations. This is done so that memcached connections can be kept to a minimum, but at the same time unnecessary extra reconnections are avoided. Typically an initial scope (using 'with' construct) is made around parts of code that hit the cache server many times - such as a resolve, or executing a context. On exit of the topmost scope, the memcached client is disconnected. Returns: `Client`: Memcached instance.
[ "Get", "a", "shared", "memcached", "instance", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/memcached.py#L207-L226
train
227,276
nerdvegas/rez
src/rez/utils/memcached.py
pool_memcached_connections
def pool_memcached_connections(func): """Function decorator to pool memcached connections. Use this to wrap functions that might make multiple calls to memcached. This will cause a single memcached client to be shared for all connections. """ if isgeneratorfunction(func): def wrapper(*nargs, **kwargs): with memcached_client(): for result in func(*nargs, **kwargs): yield result else: def wrapper(*nargs, **kwargs): with memcached_client(): return func(*nargs, **kwargs) return update_wrapper(wrapper, func)
python
def pool_memcached_connections(func): """Function decorator to pool memcached connections. Use this to wrap functions that might make multiple calls to memcached. This will cause a single memcached client to be shared for all connections. """ if isgeneratorfunction(func): def wrapper(*nargs, **kwargs): with memcached_client(): for result in func(*nargs, **kwargs): yield result else: def wrapper(*nargs, **kwargs): with memcached_client(): return func(*nargs, **kwargs) return update_wrapper(wrapper, func)
[ "def", "pool_memcached_connections", "(", "func", ")", ":", "if", "isgeneratorfunction", "(", "func", ")", ":", "def", "wrapper", "(", "*", "nargs", ",", "*", "*", "kwargs", ")", ":", "with", "memcached_client", "(", ")", ":", "for", "result", "in", "fun...
Function decorator to pool memcached connections. Use this to wrap functions that might make multiple calls to memcached. This will cause a single memcached client to be shared for all connections.
[ "Function", "decorator", "to", "pool", "memcached", "connections", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/memcached.py#L229-L245
train
227,277
nerdvegas/rez
src/rez/utils/memcached.py
memcached
def memcached(servers, key=None, from_cache=None, to_cache=None, time=0, min_compress_len=0, debug=False): """memcached memoization function decorator. The wrapped function is expected to return a value that is stored to a memcached server, first translated by `to_cache` if provided. In the event of a cache hit, the data is translated by `from_cache` if provided, before being returned. If you do not want a result to be cached, wrap the return value of your function in a `DoNotCache` object. Example: @memcached('127.0.0.1:11211') def _listdir(path): return os.path.listdir(path) Note: If using the default key function, ensure that repr() is implemented on all your arguments and that they are hashable. Note: `from_cache` and `to_cache` both accept the value as first parameter, then the target function's arguments follow. Args: servers (str or list of str): memcached server uri(s), eg '127.0.0.1:11211'. This arg can be None also, in which case memcaching is disabled. key (callable, optional): Function that, given the target function's args, returns the string key to use in memcached. from_cache (callable, optional): If provided, and a cache hit occurs, the cached value will be translated by this function before being returned. to_cache (callable, optional): If provided, and a cache miss occurs, the function's return value will be translated by this function before being cached. time (int): Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. min_compress_len (int): The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. debug (bool): If True, memcache keys are kept human readable, so you can read them if running a foreground memcached proc with 'memcached -vv'. However this increases chances of key clashes so should not be left turned on. """ def default_key(func, *nargs, **kwargs): parts = [func.__module__] argnames = getargspec(func).args if argnames: if argnames[0] == "cls": cls_ = nargs[0] parts.append(cls_.__name__) nargs = nargs[1:] elif argnames[0] == "self": cls_ = nargs[0].__class__ parts.append(cls_.__name__) nargs = nargs[1:] parts.append(func.__name__) value = ('.'.join(parts), nargs, tuple(sorted(kwargs.items()))) # make sure key is hashable. We don't strictly need it to be, but this # is a way of hopefully avoiding object types that are not ordered (these # would give an unreliable key). If you need to key on unhashable args, # you should provide your own `key` functor. _ = hash(value) return repr(value) def identity(value, *nargs, **kwargs): return value from_cache = from_cache or identity to_cache = to_cache or identity def decorator(func): if servers: def wrapper(*nargs, **kwargs): with memcached_client(servers, debug=debug) as client: if key: cache_key = key(*nargs, **kwargs) else: cache_key = default_key(func, *nargs, **kwargs) # get result = client.get(cache_key) if result is not client.miss: return from_cache(result, *nargs, **kwargs) # cache miss - run target function result = func(*nargs, **kwargs) if isinstance(result, DoNotCache): return result.result # store cache_result = to_cache(result, *nargs, **kwargs) client.set(key=cache_key, val=cache_result, time=time, min_compress_len=min_compress_len) return result else: def wrapper(*nargs, **kwargs): result = func(*nargs, **kwargs) if isinstance(result, DoNotCache): return result.result return result def forget(): """Forget entries in the cache. Note that this does not delete entries from a memcached server - that would be slow and error-prone. Calling this function only ensures that entries set by the current process will no longer be seen during this process. """ with memcached_client(servers, debug=debug) as client: client.flush() wrapper.forget = forget wrapper.__wrapped__ = func return update_wrapper(wrapper, func) return decorator
python
def memcached(servers, key=None, from_cache=None, to_cache=None, time=0, min_compress_len=0, debug=False): """memcached memoization function decorator. The wrapped function is expected to return a value that is stored to a memcached server, first translated by `to_cache` if provided. In the event of a cache hit, the data is translated by `from_cache` if provided, before being returned. If you do not want a result to be cached, wrap the return value of your function in a `DoNotCache` object. Example: @memcached('127.0.0.1:11211') def _listdir(path): return os.path.listdir(path) Note: If using the default key function, ensure that repr() is implemented on all your arguments and that they are hashable. Note: `from_cache` and `to_cache` both accept the value as first parameter, then the target function's arguments follow. Args: servers (str or list of str): memcached server uri(s), eg '127.0.0.1:11211'. This arg can be None also, in which case memcaching is disabled. key (callable, optional): Function that, given the target function's args, returns the string key to use in memcached. from_cache (callable, optional): If provided, and a cache hit occurs, the cached value will be translated by this function before being returned. to_cache (callable, optional): If provided, and a cache miss occurs, the function's return value will be translated by this function before being cached. time (int): Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. min_compress_len (int): The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. debug (bool): If True, memcache keys are kept human readable, so you can read them if running a foreground memcached proc with 'memcached -vv'. However this increases chances of key clashes so should not be left turned on. """ def default_key(func, *nargs, **kwargs): parts = [func.__module__] argnames = getargspec(func).args if argnames: if argnames[0] == "cls": cls_ = nargs[0] parts.append(cls_.__name__) nargs = nargs[1:] elif argnames[0] == "self": cls_ = nargs[0].__class__ parts.append(cls_.__name__) nargs = nargs[1:] parts.append(func.__name__) value = ('.'.join(parts), nargs, tuple(sorted(kwargs.items()))) # make sure key is hashable. We don't strictly need it to be, but this # is a way of hopefully avoiding object types that are not ordered (these # would give an unreliable key). If you need to key on unhashable args, # you should provide your own `key` functor. _ = hash(value) return repr(value) def identity(value, *nargs, **kwargs): return value from_cache = from_cache or identity to_cache = to_cache or identity def decorator(func): if servers: def wrapper(*nargs, **kwargs): with memcached_client(servers, debug=debug) as client: if key: cache_key = key(*nargs, **kwargs) else: cache_key = default_key(func, *nargs, **kwargs) # get result = client.get(cache_key) if result is not client.miss: return from_cache(result, *nargs, **kwargs) # cache miss - run target function result = func(*nargs, **kwargs) if isinstance(result, DoNotCache): return result.result # store cache_result = to_cache(result, *nargs, **kwargs) client.set(key=cache_key, val=cache_result, time=time, min_compress_len=min_compress_len) return result else: def wrapper(*nargs, **kwargs): result = func(*nargs, **kwargs) if isinstance(result, DoNotCache): return result.result return result def forget(): """Forget entries in the cache. Note that this does not delete entries from a memcached server - that would be slow and error-prone. Calling this function only ensures that entries set by the current process will no longer be seen during this process. """ with memcached_client(servers, debug=debug) as client: client.flush() wrapper.forget = forget wrapper.__wrapped__ = func return update_wrapper(wrapper, func) return decorator
[ "def", "memcached", "(", "servers", ",", "key", "=", "None", ",", "from_cache", "=", "None", ",", "to_cache", "=", "None", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ",", "debug", "=", "False", ")", ":", "def", "default_key", "(", "fun...
memcached memoization function decorator. The wrapped function is expected to return a value that is stored to a memcached server, first translated by `to_cache` if provided. In the event of a cache hit, the data is translated by `from_cache` if provided, before being returned. If you do not want a result to be cached, wrap the return value of your function in a `DoNotCache` object. Example: @memcached('127.0.0.1:11211') def _listdir(path): return os.path.listdir(path) Note: If using the default key function, ensure that repr() is implemented on all your arguments and that they are hashable. Note: `from_cache` and `to_cache` both accept the value as first parameter, then the target function's arguments follow. Args: servers (str or list of str): memcached server uri(s), eg '127.0.0.1:11211'. This arg can be None also, in which case memcaching is disabled. key (callable, optional): Function that, given the target function's args, returns the string key to use in memcached. from_cache (callable, optional): If provided, and a cache hit occurs, the cached value will be translated by this function before being returned. to_cache (callable, optional): If provided, and a cache miss occurs, the function's return value will be translated by this function before being cached. time (int): Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. min_compress_len (int): The threshold length to kick in auto-compression of the value using the zlib.compress() routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yeilds a larger string than the input, then it is discarded. For backwards compatability, this parameter defaults to 0, indicating don't ever try to compress. debug (bool): If True, memcache keys are kept human readable, so you can read them if running a foreground memcached proc with 'memcached -vv'. However this increases chances of key clashes so should not be left turned on.
[ "memcached", "memoization", "function", "decorator", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/memcached.py#L248-L375
train
227,278
nerdvegas/rez
src/rez/utils/memcached.py
Client.client
def client(self): """Get the native memcache client. Returns: `memcache.Client` instance. """ if self._client is None: self._client = Client_(self.servers) return self._client
python
def client(self): """Get the native memcache client. Returns: `memcache.Client` instance. """ if self._client is None: self._client = Client_(self.servers) return self._client
[ "def", "client", "(", "self", ")", ":", "if", "self", ".", "_client", "is", "None", ":", "self", ".", "_client", "=", "Client_", "(", "self", ".", "servers", ")", "return", "self", ".", "_client" ]
Get the native memcache client. Returns: `memcache.Client` instance.
[ "Get", "the", "native", "memcache", "client", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/memcached.py#L48-L56
train
227,279
nerdvegas/rez
src/rez/utils/memcached.py
Client.flush
def flush(self, hard=False): """Drop existing entries from the cache. Args: hard (bool): If True, all current entries are flushed from the server(s), which affects all users. If False, only the local process is affected. """ if not self.servers: return if hard: self.client.flush_all() self.reset_stats() else: from uuid import uuid4 tag = uuid4().hex if self.debug: tag = "flushed" + tag self.current = tag
python
def flush(self, hard=False): """Drop existing entries from the cache. Args: hard (bool): If True, all current entries are flushed from the server(s), which affects all users. If False, only the local process is affected. """ if not self.servers: return if hard: self.client.flush_all() self.reset_stats() else: from uuid import uuid4 tag = uuid4().hex if self.debug: tag = "flushed" + tag self.current = tag
[ "def", "flush", "(", "self", ",", "hard", "=", "False", ")", ":", "if", "not", "self", ".", "servers", ":", "return", "if", "hard", ":", "self", ".", "client", ".", "flush_all", "(", ")", "self", ".", "reset_stats", "(", ")", "else", ":", "from", ...
Drop existing entries from the cache. Args: hard (bool): If True, all current entries are flushed from the server(s), which affects all users. If False, only the local process is affected.
[ "Drop", "existing", "entries", "from", "the", "cache", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/memcached.py#L119-L137
train
227,280
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/searching.py
depth_first_search
def depth_first_search(graph, root=None, filter=null()): """ Depth-first search. @type graph: graph, digraph @param graph: Graph. @type root: node @param root: Optional root node (will explore only root's connected component) @rtype: tuple @return: A tupple containing a dictionary and two lists: 1. Generated spanning tree 2. Graph's preordering 3. Graph's postordering """ recursionlimit = getrecursionlimit() setrecursionlimit(max(len(graph.nodes())*2,recursionlimit)) def dfs(node): """ Depth-first search subfunction. """ visited[node] = 1 pre.append(node) # Explore recursively the connected component for each in graph[node]: if (each not in visited and filter(each, node)): spanning_tree[each] = node dfs(each) post.append(node) visited = {} # List for marking visited and non-visited nodes spanning_tree = {} # Spanning tree pre = [] # Graph's preordering post = [] # Graph's postordering filter.configure(graph, spanning_tree) # DFS from one node only if (root is not None): if filter(root, None): spanning_tree[root] = None dfs(root) setrecursionlimit(recursionlimit) return spanning_tree, pre, post # Algorithm loop for each in graph: # Select a non-visited node if (each not in visited and filter(each, None)): spanning_tree[each] = None # Explore node's connected component dfs(each) setrecursionlimit(recursionlimit) return (spanning_tree, pre, post)
python
def depth_first_search(graph, root=None, filter=null()): """ Depth-first search. @type graph: graph, digraph @param graph: Graph. @type root: node @param root: Optional root node (will explore only root's connected component) @rtype: tuple @return: A tupple containing a dictionary and two lists: 1. Generated spanning tree 2. Graph's preordering 3. Graph's postordering """ recursionlimit = getrecursionlimit() setrecursionlimit(max(len(graph.nodes())*2,recursionlimit)) def dfs(node): """ Depth-first search subfunction. """ visited[node] = 1 pre.append(node) # Explore recursively the connected component for each in graph[node]: if (each not in visited and filter(each, node)): spanning_tree[each] = node dfs(each) post.append(node) visited = {} # List for marking visited and non-visited nodes spanning_tree = {} # Spanning tree pre = [] # Graph's preordering post = [] # Graph's postordering filter.configure(graph, spanning_tree) # DFS from one node only if (root is not None): if filter(root, None): spanning_tree[root] = None dfs(root) setrecursionlimit(recursionlimit) return spanning_tree, pre, post # Algorithm loop for each in graph: # Select a non-visited node if (each not in visited and filter(each, None)): spanning_tree[each] = None # Explore node's connected component dfs(each) setrecursionlimit(recursionlimit) return (spanning_tree, pre, post)
[ "def", "depth_first_search", "(", "graph", ",", "root", "=", "None", ",", "filter", "=", "null", "(", ")", ")", ":", "recursionlimit", "=", "getrecursionlimit", "(", ")", "setrecursionlimit", "(", "max", "(", "len", "(", "graph", ".", "nodes", "(", ")", ...
Depth-first search. @type graph: graph, digraph @param graph: Graph. @type root: node @param root: Optional root node (will explore only root's connected component) @rtype: tuple @return: A tupple containing a dictionary and two lists: 1. Generated spanning tree 2. Graph's preordering 3. Graph's postordering
[ "Depth", "-", "first", "search", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/searching.py#L39-L96
train
227,281
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/searching.py
breadth_first_search
def breadth_first_search(graph, root=None, filter=null()): """ Breadth-first search. @type graph: graph, digraph @param graph: Graph. @type root: node @param root: Optional root node (will explore only root's connected component) @rtype: tuple @return: A tuple containing a dictionary and a list. 1. Generated spanning tree 2. Graph's level-based ordering """ def bfs(): """ Breadth-first search subfunction. """ while (queue != []): node = queue.pop(0) for other in graph[node]: if (other not in spanning_tree and filter(other, node)): queue.append(other) ordering.append(other) spanning_tree[other] = node queue = [] # Visiting queue spanning_tree = {} # Spanning tree ordering = [] filter.configure(graph, spanning_tree) # BFS from one node only if (root is not None): if filter(root, None): queue.append(root) ordering.append(root) spanning_tree[root] = None bfs() return spanning_tree, ordering # Algorithm for each in graph: if (each not in spanning_tree): if filter(each, None): queue.append(each) ordering.append(each) spanning_tree[each] = None bfs() return spanning_tree, ordering
python
def breadth_first_search(graph, root=None, filter=null()): """ Breadth-first search. @type graph: graph, digraph @param graph: Graph. @type root: node @param root: Optional root node (will explore only root's connected component) @rtype: tuple @return: A tuple containing a dictionary and a list. 1. Generated spanning tree 2. Graph's level-based ordering """ def bfs(): """ Breadth-first search subfunction. """ while (queue != []): node = queue.pop(0) for other in graph[node]: if (other not in spanning_tree and filter(other, node)): queue.append(other) ordering.append(other) spanning_tree[other] = node queue = [] # Visiting queue spanning_tree = {} # Spanning tree ordering = [] filter.configure(graph, spanning_tree) # BFS from one node only if (root is not None): if filter(root, None): queue.append(root) ordering.append(root) spanning_tree[root] = None bfs() return spanning_tree, ordering # Algorithm for each in graph: if (each not in spanning_tree): if filter(each, None): queue.append(each) ordering.append(each) spanning_tree[each] = None bfs() return spanning_tree, ordering
[ "def", "breadth_first_search", "(", "graph", ",", "root", "=", "None", ",", "filter", "=", "null", "(", ")", ")", ":", "def", "bfs", "(", ")", ":", "\"\"\"\n Breadth-first search subfunction.\n \"\"\"", "while", "(", "queue", "!=", "[", "]", ")",...
Breadth-first search. @type graph: graph, digraph @param graph: Graph. @type root: node @param root: Optional root node (will explore only root's connected component) @rtype: tuple @return: A tuple containing a dictionary and a list. 1. Generated spanning tree 2. Graph's level-based ordering
[ "Breadth", "-", "first", "search", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/searching.py#L101-L153
train
227,282
nerdvegas/rez
src/rez/package_resources_.py
PackageResource.normalize_variables
def normalize_variables(cls, variables): """Make sure version is treated consistently """ # if the version is False, empty string, etc, throw it out if variables.get('version', True) in ('', False, '_NO_VERSION', None): del variables['version'] return super(PackageResource, cls).normalize_variables(variables)
python
def normalize_variables(cls, variables): """Make sure version is treated consistently """ # if the version is False, empty string, etc, throw it out if variables.get('version', True) in ('', False, '_NO_VERSION', None): del variables['version'] return super(PackageResource, cls).normalize_variables(variables)
[ "def", "normalize_variables", "(", "cls", ",", "variables", ")", ":", "# if the version is False, empty string, etc, throw it out", "if", "variables", ".", "get", "(", "'version'", ",", "True", ")", "in", "(", "''", ",", "False", ",", "'_NO_VERSION'", ",", "None",...
Make sure version is treated consistently
[ "Make", "sure", "version", "is", "treated", "consistently" ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_resources_.py#L300-L306
train
227,283
nerdvegas/rez
src/rez/vendor/argcomplete/my_shlex.py
shlex.push_source
def push_source(self, newstream, newfile=None): "Push an input source onto the lexer's input source stack." if isinstance(newstream, basestring): newstream = StringIO(newstream) self.filestack.appendleft((self.infile, self.instream, self.lineno)) self.infile = newfile self.instream = newstream self.lineno = 1
python
def push_source(self, newstream, newfile=None): "Push an input source onto the lexer's input source stack." if isinstance(newstream, basestring): newstream = StringIO(newstream) self.filestack.appendleft((self.infile, self.instream, self.lineno)) self.infile = newfile self.instream = newstream self.lineno = 1
[ "def", "push_source", "(", "self", ",", "newstream", ",", "newfile", "=", "None", ")", ":", "if", "isinstance", "(", "newstream", ",", "basestring", ")", ":", "newstream", "=", "StringIO", "(", "newstream", ")", "self", ".", "filestack", ".", "appendleft",...
Push an input source onto the lexer's input source stack.
[ "Push", "an", "input", "source", "onto", "the", "lexer", "s", "input", "source", "stack", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/argcomplete/my_shlex.py#L100-L107
train
227,284
nerdvegas/rez
src/rez/vendor/argcomplete/my_shlex.py
shlex.error_leader
def error_leader(self, infile=None, lineno=None): "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None: infile = self.infile if lineno is None: lineno = self.lineno return "\"%s\", line %d: " % (infile, lineno)
python
def error_leader(self, infile=None, lineno=None): "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None: infile = self.infile if lineno is None: lineno = self.lineno return "\"%s\", line %d: " % (infile, lineno)
[ "def", "error_leader", "(", "self", ",", "infile", "=", "None", ",", "lineno", "=", "None", ")", ":", "if", "infile", "is", "None", ":", "infile", "=", "self", ".", "infile", "if", "lineno", "is", "None", ":", "lineno", "=", "self", ".", "lineno", ...
Emit a C-compiler-like, Emacs-friendly error-message leader.
[ "Emit", "a", "C", "-", "compiler", "-", "like", "Emacs", "-", "friendly", "error", "-", "message", "leader", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/argcomplete/my_shlex.py#L278-L284
train
227,285
nerdvegas/rez
src/rez/system.py
System.rez_bin_path
def rez_bin_path(self): """Get path containing rez binaries, or None if no binaries are available, or Rez is not a production install. """ binpath = None if sys.argv and sys.argv[0]: executable = sys.argv[0] path = which("rezolve", env={"PATH":os.path.dirname(executable), "PATHEXT":os.environ.get("PATHEXT", "")}) binpath = os.path.dirname(path) if path else None # TODO: improve this, could still pick up non-production 'rezolve' if not binpath: path = which("rezolve") if path: binpath = os.path.dirname(path) if binpath: validation_file = os.path.join(binpath, ".rez_production_install") if os.path.exists(validation_file): return os.path.realpath(binpath) return None
python
def rez_bin_path(self): """Get path containing rez binaries, or None if no binaries are available, or Rez is not a production install. """ binpath = None if sys.argv and sys.argv[0]: executable = sys.argv[0] path = which("rezolve", env={"PATH":os.path.dirname(executable), "PATHEXT":os.environ.get("PATHEXT", "")}) binpath = os.path.dirname(path) if path else None # TODO: improve this, could still pick up non-production 'rezolve' if not binpath: path = which("rezolve") if path: binpath = os.path.dirname(path) if binpath: validation_file = os.path.join(binpath, ".rez_production_install") if os.path.exists(validation_file): return os.path.realpath(binpath) return None
[ "def", "rez_bin_path", "(", "self", ")", ":", "binpath", "=", "None", "if", "sys", ".", "argv", "and", "sys", ".", "argv", "[", "0", "]", ":", "executable", "=", "sys", ".", "argv", "[", "0", "]", "path", "=", "which", "(", "\"rezolve\"", ",", "e...
Get path containing rez binaries, or None if no binaries are available, or Rez is not a production install.
[ "Get", "path", "containing", "rez", "binaries", "or", "None", "if", "no", "binaries", "are", "available", "or", "Rez", "is", "not", "a", "production", "install", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/system.py#L191-L214
train
227,286
nerdvegas/rez
src/rez/system.py
System.get_summary_string
def get_summary_string(self): """Get a string summarising the state of Rez as a whole. Returns: String. """ from rez.plugin_managers import plugin_manager txt = "Rez %s" % __version__ txt += "\n\n%s" % plugin_manager.get_summary_string() return txt
python
def get_summary_string(self): """Get a string summarising the state of Rez as a whole. Returns: String. """ from rez.plugin_managers import plugin_manager txt = "Rez %s" % __version__ txt += "\n\n%s" % plugin_manager.get_summary_string() return txt
[ "def", "get_summary_string", "(", "self", ")", ":", "from", "rez", ".", "plugin_managers", "import", "plugin_manager", "txt", "=", "\"Rez %s\"", "%", "__version__", "txt", "+=", "\"\\n\\n%s\"", "%", "plugin_manager", ".", "get_summary_string", "(", ")", "return", ...
Get a string summarising the state of Rez as a whole. Returns: String.
[ "Get", "a", "string", "summarising", "the", "state", "of", "Rez", "as", "a", "whole", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/system.py#L221-L231
train
227,287
nerdvegas/rez
src/rez/system.py
System.clear_caches
def clear_caches(self, hard=False): """Clear all caches in Rez. Rez caches package contents and iteration during a python session. Thus newly released packages, and changes to existing packages, may not be picked up. You need to clear the cache for these changes to become visible. Args: hard (bool): Perform a 'hard' cache clear. This just means that the memcached cache is also cleared. Generally this is not needed - this option is for debugging purposes. """ from rez.package_repository import package_repository_manager from rez.utils.memcached import memcached_client package_repository_manager.clear_caches() if hard: with memcached_client() as client: client.flush()
python
def clear_caches(self, hard=False): """Clear all caches in Rez. Rez caches package contents and iteration during a python session. Thus newly released packages, and changes to existing packages, may not be picked up. You need to clear the cache for these changes to become visible. Args: hard (bool): Perform a 'hard' cache clear. This just means that the memcached cache is also cleared. Generally this is not needed - this option is for debugging purposes. """ from rez.package_repository import package_repository_manager from rez.utils.memcached import memcached_client package_repository_manager.clear_caches() if hard: with memcached_client() as client: client.flush()
[ "def", "clear_caches", "(", "self", ",", "hard", "=", "False", ")", ":", "from", "rez", ".", "package_repository", "import", "package_repository_manager", "from", "rez", ".", "utils", ".", "memcached", "import", "memcached_client", "package_repository_manager", ".",...
Clear all caches in Rez. Rez caches package contents and iteration during a python session. Thus newly released packages, and changes to existing packages, may not be picked up. You need to clear the cache for these changes to become visible. Args: hard (bool): Perform a 'hard' cache clear. This just means that the memcached cache is also cleared. Generally this is not needed - this option is for debugging purposes.
[ "Clear", "all", "caches", "in", "Rez", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/system.py#L233-L252
train
227,288
nerdvegas/rez
src/rez/resolver.py
Resolver.solve
def solve(self): """Perform the solve. """ with log_duration(self._print, "memcache get (resolve) took %s"): solver_dict = self._get_cached_solve() if solver_dict: self.from_cache = True self._set_result(solver_dict) else: self.from_cache = False solver = self._solve() solver_dict = self._solver_to_dict(solver) self._set_result(solver_dict) with log_duration(self._print, "memcache set (resolve) took %s"): self._set_cached_solve(solver_dict)
python
def solve(self): """Perform the solve. """ with log_duration(self._print, "memcache get (resolve) took %s"): solver_dict = self._get_cached_solve() if solver_dict: self.from_cache = True self._set_result(solver_dict) else: self.from_cache = False solver = self._solve() solver_dict = self._solver_to_dict(solver) self._set_result(solver_dict) with log_duration(self._print, "memcache set (resolve) took %s"): self._set_cached_solve(solver_dict)
[ "def", "solve", "(", "self", ")", ":", "with", "log_duration", "(", "self", ".", "_print", ",", "\"memcache get (resolve) took %s\"", ")", ":", "solver_dict", "=", "self", ".", "_get_cached_solve", "(", ")", "if", "solver_dict", ":", "self", ".", "from_cache",...
Perform the solve.
[ "Perform", "the", "solve", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolver.py#L107-L123
train
227,289
nerdvegas/rez
src/rez/resolver.py
Resolver._set_cached_solve
def _set_cached_solve(self, solver_dict): """Store a solve to memcached. If there is NOT a resolve timestamp: - store the solve to a non-timestamped entry. If there IS a resolve timestamp (let us call this T): - if NO newer package in the solve has been released since T, - then store the solve to a non-timestamped entry; - else: - store the solve to a timestamped entry. """ if self.status_ != ResolverStatus.solved: return # don't cache failed solves if not (self.caching and self.memcached_servers): return # most recent release times get stored with solve result in the cache releases_since_solve = False release_times_dict = {} variant_states_dict = {} for variant in self.resolved_packages_: time_ = get_last_release_time(variant.name, self.package_paths) # don't cache if a release time isn't known if time_ == 0: self._print("Did not send memcache key: a repository could " "not provide a most recent release time for %r", variant.name) return if self.timestamp and self.timestamp < time_: releases_since_solve = True release_times_dict[variant.name] = time_ repo = variant.resource._repository variant_states_dict[variant.name] = \ repo.get_variant_state_handle(variant.resource) timestamped = (self.timestamp and releases_since_solve) key = self._memcache_key(timestamped=timestamped) data = (solver_dict, release_times_dict, variant_states_dict) with self._memcached_client() as client: client.set(key, data) self._print("Sent memcache key: %r", key)
python
def _set_cached_solve(self, solver_dict): """Store a solve to memcached. If there is NOT a resolve timestamp: - store the solve to a non-timestamped entry. If there IS a resolve timestamp (let us call this T): - if NO newer package in the solve has been released since T, - then store the solve to a non-timestamped entry; - else: - store the solve to a timestamped entry. """ if self.status_ != ResolverStatus.solved: return # don't cache failed solves if not (self.caching and self.memcached_servers): return # most recent release times get stored with solve result in the cache releases_since_solve = False release_times_dict = {} variant_states_dict = {} for variant in self.resolved_packages_: time_ = get_last_release_time(variant.name, self.package_paths) # don't cache if a release time isn't known if time_ == 0: self._print("Did not send memcache key: a repository could " "not provide a most recent release time for %r", variant.name) return if self.timestamp and self.timestamp < time_: releases_since_solve = True release_times_dict[variant.name] = time_ repo = variant.resource._repository variant_states_dict[variant.name] = \ repo.get_variant_state_handle(variant.resource) timestamped = (self.timestamp and releases_since_solve) key = self._memcache_key(timestamped=timestamped) data = (solver_dict, release_times_dict, variant_states_dict) with self._memcached_client() as client: client.set(key, data) self._print("Sent memcache key: %r", key)
[ "def", "_set_cached_solve", "(", "self", ",", "solver_dict", ")", ":", "if", "self", ".", "status_", "!=", "ResolverStatus", ".", "solved", ":", "return", "# don't cache failed solves", "if", "not", "(", "self", ".", "caching", "and", "self", ".", "memcached_s...
Store a solve to memcached. If there is NOT a resolve timestamp: - store the solve to a non-timestamped entry. If there IS a resolve timestamp (let us call this T): - if NO newer package in the solve has been released since T, - then store the solve to a non-timestamped entry; - else: - store the solve to a timestamped entry.
[ "Store", "a", "solve", "to", "memcached", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolver.py#L310-L356
train
227,290
nerdvegas/rez
src/rez/resolver.py
Resolver._memcache_key
def _memcache_key(self, timestamped=False): """Makes a key suitable as a memcache entry.""" request = tuple(map(str, self.package_requests)) repo_ids = [] for path in self.package_paths: repo = package_repository_manager.get_repository(path) repo_ids.append(repo.uid) t = ["resolve", request, tuple(repo_ids), self.package_filter_hash, self.package_orderers_hash, self.building, config.prune_failed_graph] if timestamped and self.timestamp: t.append(self.timestamp) return str(tuple(t))
python
def _memcache_key(self, timestamped=False): """Makes a key suitable as a memcache entry.""" request = tuple(map(str, self.package_requests)) repo_ids = [] for path in self.package_paths: repo = package_repository_manager.get_repository(path) repo_ids.append(repo.uid) t = ["resolve", request, tuple(repo_ids), self.package_filter_hash, self.package_orderers_hash, self.building, config.prune_failed_graph] if timestamped and self.timestamp: t.append(self.timestamp) return str(tuple(t))
[ "def", "_memcache_key", "(", "self", ",", "timestamped", "=", "False", ")", ":", "request", "=", "tuple", "(", "map", "(", "str", ",", "self", ".", "package_requests", ")", ")", "repo_ids", "=", "[", "]", "for", "path", "in", "self", ".", "package_path...
Makes a key suitable as a memcache entry.
[ "Makes", "a", "key", "suitable", "as", "a", "memcache", "entry", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolver.py#L358-L377
train
227,291
nerdvegas/rez
src/rez/shells.py
create_shell
def create_shell(shell=None, **kwargs): """Returns a Shell of the given type, or the current shell type if shell is None.""" if not shell: shell = config.default_shell if not shell: from rez.system import system shell = system.shell from rez.plugin_managers import plugin_manager return plugin_manager.create_instance('shell', shell, **kwargs)
python
def create_shell(shell=None, **kwargs): """Returns a Shell of the given type, or the current shell type if shell is None.""" if not shell: shell = config.default_shell if not shell: from rez.system import system shell = system.shell from rez.plugin_managers import plugin_manager return plugin_manager.create_instance('shell', shell, **kwargs)
[ "def", "create_shell", "(", "shell", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "shell", ":", "shell", "=", "config", ".", "default_shell", "if", "not", "shell", ":", "from", "rez", ".", "system", "import", "system", "shell", "=", "...
Returns a Shell of the given type, or the current shell type if shell is None.
[ "Returns", "a", "Shell", "of", "the", "given", "type", "or", "the", "current", "shell", "type", "if", "shell", "is", "None", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/shells.py#L25-L35
train
227,292
nerdvegas/rez
src/rez/shells.py
Shell.startup_capabilities
def startup_capabilities(cls, rcfile=False, norc=False, stdin=False, command=False): """ Given a set of options related to shell startup, return the actual options that will be applied. @returns 4-tuple representing applied value of each option. """ raise NotImplementedError
python
def startup_capabilities(cls, rcfile=False, norc=False, stdin=False, command=False): """ Given a set of options related to shell startup, return the actual options that will be applied. @returns 4-tuple representing applied value of each option. """ raise NotImplementedError
[ "def", "startup_capabilities", "(", "cls", ",", "rcfile", "=", "False", ",", "norc", "=", "False", ",", "stdin", "=", "False", ",", "command", "=", "False", ")", ":", "raise", "NotImplementedError" ]
Given a set of options related to shell startup, return the actual options that will be applied. @returns 4-tuple representing applied value of each option.
[ "Given", "a", "set", "of", "options", "related", "to", "shell", "startup", "return", "the", "actual", "options", "that", "will", "be", "applied", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/shells.py#L54-L61
train
227,293
nerdvegas/rez
src/rez/vendor/distlib/index.py
PackageIndex.upload_file
def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protcol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request)
python
def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protcol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request)
[ "def", "upload_file", "(", "self", ",", "metadata", ",", "filename", ",", "signer", "=", "None", ",", "sign_password", "=", "None", ",", "filetype", "=", "'sdist'", ",", "pyversion", "=", "'source'", ",", "keystore", "=", "None", ")", ":", "self", ".", ...
Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request.
[ "Upload", "a", "release", "file", "to", "the", "index", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/distlib/index.py#L238-L293
train
227,294
nerdvegas/rez
src/rez/solver.py
_get_dependency_order
def _get_dependency_order(g, node_list): """Return list of nodes as close as possible to the ordering in node_list, but with child nodes earlier in the list than parents.""" access_ = accessibility(g) deps = dict((k, set(v) - set([k])) for k, v in access_.iteritems()) nodes = node_list + list(set(g.nodes()) - set(node_list)) ordered_nodes = [] while nodes: n_ = nodes[0] n_deps = deps.get(n_) if (n_ in ordered_nodes) or (n_deps is None): nodes = nodes[1:] continue moved = False for i, n in enumerate(nodes[1:]): if n in n_deps: nodes = [nodes[i + 1]] + nodes[:i + 1] + nodes[i + 2:] moved = True break if not moved: ordered_nodes.append(n_) nodes = nodes[1:] return ordered_nodes
python
def _get_dependency_order(g, node_list): """Return list of nodes as close as possible to the ordering in node_list, but with child nodes earlier in the list than parents.""" access_ = accessibility(g) deps = dict((k, set(v) - set([k])) for k, v in access_.iteritems()) nodes = node_list + list(set(g.nodes()) - set(node_list)) ordered_nodes = [] while nodes: n_ = nodes[0] n_deps = deps.get(n_) if (n_ in ordered_nodes) or (n_deps is None): nodes = nodes[1:] continue moved = False for i, n in enumerate(nodes[1:]): if n in n_deps: nodes = [nodes[i + 1]] + nodes[:i + 1] + nodes[i + 2:] moved = True break if not moved: ordered_nodes.append(n_) nodes = nodes[1:] return ordered_nodes
[ "def", "_get_dependency_order", "(", "g", ",", "node_list", ")", ":", "access_", "=", "accessibility", "(", "g", ")", "deps", "=", "dict", "(", "(", "k", ",", "set", "(", "v", ")", "-", "set", "(", "[", "k", "]", ")", ")", "for", "k", ",", "v",...
Return list of nodes as close as possible to the ordering in node_list, but with child nodes earlier in the list than parents.
[ "Return", "list", "of", "nodes", "as", "close", "as", "possible", "to", "the", "ordering", "in", "node_list", "but", "with", "child", "nodes", "earlier", "in", "the", "list", "than", "parents", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1110-L1136
train
227,295
nerdvegas/rez
src/rez/solver.py
_short_req_str
def _short_req_str(package_request): """print shortened version of '==X|==Y|==Z' ranged requests.""" if not package_request.conflict: versions = package_request.range.to_versions() if versions and len(versions) == len(package_request.range) \ and len(versions) > 1: return "%s-%s(%d)" % (package_request.name, str(package_request.range.span()), len(versions)) return str(package_request)
python
def _short_req_str(package_request): """print shortened version of '==X|==Y|==Z' ranged requests.""" if not package_request.conflict: versions = package_request.range.to_versions() if versions and len(versions) == len(package_request.range) \ and len(versions) > 1: return "%s-%s(%d)" % (package_request.name, str(package_request.range.span()), len(versions)) return str(package_request)
[ "def", "_short_req_str", "(", "package_request", ")", ":", "if", "not", "package_request", ".", "conflict", ":", "versions", "=", "package_request", ".", "range", ".", "to_versions", "(", ")", "if", "versions", "and", "len", "(", "versions", ")", "==", "len"...
print shortened version of '==X|==Y|==Z' ranged requests.
[ "print", "shortened", "version", "of", "==", "X|", "==", "Y|", "==", "Z", "ranged", "requests", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2275-L2284
train
227,296
nerdvegas/rez
src/rez/solver.py
PackageVariant.requires_list
def requires_list(self): """ It is important that this property is calculated lazily. Getting the 'requires' attribute may trigger a package load, which may be avoided if this variant is reduced away before that happens. """ requires = self.variant.get_requires(build_requires=self.building) reqlist = RequirementList(requires) if reqlist.conflict: raise ResolveError( "The package %s has an internal requirements conflict: %s" % (str(self), str(reqlist))) return reqlist
python
def requires_list(self): """ It is important that this property is calculated lazily. Getting the 'requires' attribute may trigger a package load, which may be avoided if this variant is reduced away before that happens. """ requires = self.variant.get_requires(build_requires=self.building) reqlist = RequirementList(requires) if reqlist.conflict: raise ResolveError( "The package %s has an internal requirements conflict: %s" % (str(self), str(reqlist))) return reqlist
[ "def", "requires_list", "(", "self", ")", ":", "requires", "=", "self", ".", "variant", ".", "get_requires", "(", "build_requires", "=", "self", ".", "building", ")", "reqlist", "=", "RequirementList", "(", "requires", ")", "if", "reqlist", ".", "conflict", ...
It is important that this property is calculated lazily. Getting the 'requires' attribute may trigger a package load, which may be avoided if this variant is reduced away before that happens.
[ "It", "is", "important", "that", "this", "property", "is", "calculated", "lazily", ".", "Getting", "the", "requires", "attribute", "may", "trigger", "a", "package", "load", "which", "may", "be", "avoided", "if", "this", "variant", "is", "reduced", "away", "b...
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L299-L313
train
227,297
nerdvegas/rez
src/rez/solver.py
_PackageEntry.sort
def sort(self): """Sort variants from most correct to consume, to least. Sort rules: version_priority: - sort by highest versions of packages shared with request; - THEN least number of additional packages added to solve; - THEN highest versions of additional packages; - THEN alphabetical on name of additional packages; - THEN variant index. intersection_priority: - sort by highest number of packages shared with request; - THEN sort according to version_priority Note: In theory 'variant.index' should never factor into the sort unless two variants are identical (which shouldn't happen) - this is just here as a safety measure so that sorting is guaranteed repeatable regardless. """ if self.sorted: return def key(variant): requested_key = [] names = set() for i, request in enumerate(self.solver.request_list): if not request.conflict: req = variant.requires_list.get(request.name) if req is not None: requested_key.append((-i, req.range)) names.add(req.name) additional_key = [] for request in variant.requires_list: if not request.conflict and request.name not in names: additional_key.append((request.range, request.name)) if (VariantSelectMode[config.variant_select_mode] == VariantSelectMode.version_priority): k = (requested_key, -len(additional_key), additional_key, variant.index) else: # VariantSelectMode.intersection_priority k = (len(requested_key), requested_key, -len(additional_key), additional_key, variant.index) return k self.variants.sort(key=key, reverse=True) self.sorted = True
python
def sort(self): """Sort variants from most correct to consume, to least. Sort rules: version_priority: - sort by highest versions of packages shared with request; - THEN least number of additional packages added to solve; - THEN highest versions of additional packages; - THEN alphabetical on name of additional packages; - THEN variant index. intersection_priority: - sort by highest number of packages shared with request; - THEN sort according to version_priority Note: In theory 'variant.index' should never factor into the sort unless two variants are identical (which shouldn't happen) - this is just here as a safety measure so that sorting is guaranteed repeatable regardless. """ if self.sorted: return def key(variant): requested_key = [] names = set() for i, request in enumerate(self.solver.request_list): if not request.conflict: req = variant.requires_list.get(request.name) if req is not None: requested_key.append((-i, req.range)) names.add(req.name) additional_key = [] for request in variant.requires_list: if not request.conflict and request.name not in names: additional_key.append((request.range, request.name)) if (VariantSelectMode[config.variant_select_mode] == VariantSelectMode.version_priority): k = (requested_key, -len(additional_key), additional_key, variant.index) else: # VariantSelectMode.intersection_priority k = (len(requested_key), requested_key, -len(additional_key), additional_key, variant.index) return k self.variants.sort(key=key, reverse=True) self.sorted = True
[ "def", "sort", "(", "self", ")", ":", "if", "self", ".", "sorted", ":", "return", "def", "key", "(", "variant", ")", ":", "requested_key", "=", "[", "]", "names", "=", "set", "(", ")", "for", "i", ",", "request", "in", "enumerate", "(", "self", "...
Sort variants from most correct to consume, to least. Sort rules: version_priority: - sort by highest versions of packages shared with request; - THEN least number of additional packages added to solve; - THEN highest versions of additional packages; - THEN alphabetical on name of additional packages; - THEN variant index. intersection_priority: - sort by highest number of packages shared with request; - THEN sort according to version_priority Note: In theory 'variant.index' should never factor into the sort unless two variants are identical (which shouldn't happen) - this is just here as a safety measure so that sorting is guaranteed repeatable regardless.
[ "Sort", "variants", "from", "most", "correct", "to", "consume", "to", "least", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L370-L427
train
227,298
nerdvegas/rez
src/rez/solver.py
_PackageVariantList.get_intersection
def get_intersection(self, range_): """Get a list of variants that intersect with the given range. Args: range_ (`VersionRange`): Package version range. Returns: List of `_PackageEntry` objects. """ result = [] for entry in self.entries: package, value = entry if value is None: continue # package was blocked by package filters if package.version not in range_: continue if isinstance(value, list): variants = value entry_ = _PackageEntry(package, variants, self.solver) result.append(entry_) continue # apply package filter if self.solver.package_filter: rule = self.solver.package_filter.excludes(package) if rule: if config.debug_package_exclusions: print_debug("Package '%s' was excluded by rule '%s'" % (package.qualified_name, str(rule))) entry[1] = None continue # expand package entry into list of variants if self.solver.package_load_callback: self.solver.package_load_callback(package) variants_ = [] for var in package.iter_variants(): variant = PackageVariant(var, self.solver.building) variants_.append(variant) entry[1] = variants_ entry_ = _PackageEntry(package, variants_, self.solver) result.append(entry_) return result or None
python
def get_intersection(self, range_): """Get a list of variants that intersect with the given range. Args: range_ (`VersionRange`): Package version range. Returns: List of `_PackageEntry` objects. """ result = [] for entry in self.entries: package, value = entry if value is None: continue # package was blocked by package filters if package.version not in range_: continue if isinstance(value, list): variants = value entry_ = _PackageEntry(package, variants, self.solver) result.append(entry_) continue # apply package filter if self.solver.package_filter: rule = self.solver.package_filter.excludes(package) if rule: if config.debug_package_exclusions: print_debug("Package '%s' was excluded by rule '%s'" % (package.qualified_name, str(rule))) entry[1] = None continue # expand package entry into list of variants if self.solver.package_load_callback: self.solver.package_load_callback(package) variants_ = [] for var in package.iter_variants(): variant = PackageVariant(var, self.solver.building) variants_.append(variant) entry[1] = variants_ entry_ = _PackageEntry(package, variants_, self.solver) result.append(entry_) return result or None
[ "def", "get_intersection", "(", "self", ",", "range_", ")", ":", "result", "=", "[", "]", "for", "entry", "in", "self", ".", "entries", ":", "package", ",", "value", "=", "entry", "if", "value", "is", "None", ":", "continue", "# package was blocked by pack...
Get a list of variants that intersect with the given range. Args: range_ (`VersionRange`): Package version range. Returns: List of `_PackageEntry` objects.
[ "Get", "a", "list", "of", "variants", "that", "intersect", "with", "the", "given", "range", "." ]
1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L453-L502
train
227,299