INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
If the executable given isn't an absolute path, search $PATH for the interpreter
|
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
# If the "executable" is a version number, get the installed executable for
# that version
python_versions = get_installed_pythons()
if exe in python_versions:
exe = python_versions[exe]
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
|
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
|
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir, bin_dir)
fixup_pth_and_egg_link(home_dir)
|
Return a script that'll work in a relocatable environment.
|
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:]
|
Makes .pth and .egg-link files use relative paths
|
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
|
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
|
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
|
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
|
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = codecs.open(filename, 'r', encoding='utf-8')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
|
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
|
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + 'L' * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
|
If we are in a progress scope, and no log messages have been
shown, write out another '.
|
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
|
Returns the level that stdout runs at
|
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
|
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
|
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
|
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
|
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action == 'store_false':
val = not strtobool(val)
elif option.action in ('store_true', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
|
Get a section of a configuration
|
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
|
Returns a generator with all environmental vars with prefix VIRTUALENV
|
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
|
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
|
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
|
Read the next method from the source, once one complete method has
been assembled it is placed in the internal queue.
|
def _next_method(self):
"""Read the next method from the source, once one complete method has
been assembled it is placed in the internal queue."""
queue = self.queue
put = self._quick_put
read_frame = self.source.read_frame
while not queue:
try:
frame_type, channel, payload = read_frame()
except Exception as exc:
#
# Connection was closed? Framing Error?
#
put(exc)
break
self.bytes_recv += 1
if frame_type not in (self.expected_types[channel], 8):
put((
channel,
UnexpectedFrame(
'Received frame {0} while expecting type: {1}'.format(
frame_type, self.expected_types[channel]))))
elif frame_type == 1:
self._process_method_frame(channel, payload)
elif frame_type == 2:
self._process_content_header(channel, payload)
elif frame_type == 3:
self._process_content_body(channel, payload)
elif frame_type == 8:
self._process_heartbeat(channel, payload)
|
Process Content Header frames
|
def _process_content_header(self, channel, payload):
"""Process Content Header frames"""
partial = self.partial_messages[channel]
partial.add_header(payload)
if partial.complete:
#
# a bodyless message, we're done
#
self._quick_put((channel, partial.method_sig,
partial.args, partial.msg))
self.partial_messages.pop(channel, None)
self.expected_types[channel] = 1
else:
#
# wait for the content-body
#
self.expected_types[channel] = 3
|
Process Content Body frames
|
def _process_content_body(self, channel, payload):
"""Process Content Body frames"""
partial = self.partial_messages[channel]
partial.add_payload(payload)
if partial.complete:
#
# Stick the message in the queue and go back to
# waiting for method frames
#
self._quick_put((channel, partial.method_sig,
partial.args, partial.msg))
self.partial_messages.pop(channel, None)
self.expected_types[channel] = 1
|
Read a method from the peer.
|
def read_method(self):
"""Read a method from the peer."""
self._next_method()
m = self._quick_get()
if isinstance(m, Exception):
raise m
if isinstance(m, tuple) and isinstance(m[1], AMQPError):
raise m[1]
return m
|
Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later.
|
def _patch_argument_parser(self):
'''
Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later.
'''
active_parsers = [self._parser]
parsed_args = argparse.Namespace()
visited_actions = []
def patch(parser):
parser.__class__ = IntrospectiveArgumentParser
for action in parser._actions:
# TODO: accomplish this with super
class IntrospectAction(action.__class__):
def __call__(self, parser, namespace, values, option_string=None):
debug('Action stub called on', self)
debug('\targs:', parser, namespace, values, option_string)
debug('\torig class:', self._orig_class)
debug('\torig callable:', self._orig_callable)
visited_actions.append(self)
if self._orig_class == argparse._SubParsersAction:
debug('orig class is a subparsers action: patching and running it')
active_subparser = self._name_parser_map[values[0]]
patch(active_subparser)
active_parsers.append(active_subparser)
self._orig_callable(parser, namespace, values, option_string=option_string)
elif self._orig_class in safe_actions:
self._orig_callable(parser, namespace, values, option_string=option_string)
if getattr(action, "_orig_class", None):
debug("Action", action, "already patched")
action._orig_class = action.__class__
action._orig_callable = action.__call__
action.__class__ = IntrospectAction
patch(self._parser)
debug("Active parsers:", active_parsers)
debug("Visited actions:", visited_actions)
debug("Parse result namespace:", parsed_args)
return active_parsers, parsed_args
|
Visits the active parsers and their actions, executes their completers or introspects them to collect their
option strings. Returns the resulting completions as a list of strings.
This method is exposed for overriding in subclasses; there is no need to use it directly.
|
def collect_completions(self, active_parsers, parsed_args, cword_prefix, debug):
'''
Visits the active parsers and their actions, executes their completers or introspects them to collect their
option strings. Returns the resulting completions as a list of strings.
This method is exposed for overriding in subclasses; there is no need to use it directly.
'''
completions = []
for parser in active_parsers:
debug("Examining parser", parser)
for action in parser._actions:
debug("Examining action", action)
if isinstance(action, argparse._SubParsersAction):
subparser_activated = False
for subparser in action._name_parser_map.values():
if subparser in active_parsers:
subparser_activated = True
if subparser_activated:
# Parent parser completions are not valid in the subparser, so flush them
completions = []
else:
completions += [subcmd for subcmd in action.choices.keys() if subcmd.startswith(cword_prefix)]
elif self.always_complete_options or (len(cword_prefix) > 0 and cword_prefix[0] in parser.prefix_chars):
completions += [option for option in action.option_strings if option.startswith(cword_prefix)]
debug("Active actions (L={l}): {a}".format(l=len(parser.active_actions), a=parser.active_actions))
# Only run completers if current word does not start with - (is not an optional)
if len(cword_prefix) == 0 or cword_prefix[0] not in parser.prefix_chars:
for active_action in parser.active_actions:
if not active_action.option_strings: # action is a positional
if action_is_satisfied(active_action) and not action_is_open(active_action):
debug("Skipping", active_action)
continue
debug("Activating completion for", active_action, active_action._orig_class)
#completer = getattr(active_action, 'completer', DefaultCompleter())
completer = getattr(active_action, 'completer', None)
if completer is None and active_action.choices is not None:
if not isinstance(active_action, argparse._SubParsersAction):
completer = completers.ChoicesCompleter(active_action.choices)
if completer:
if len(active_action.option_strings) > 0: # only for optionals
if not action_is_satisfied(active_action):
# This means the current action will fail to parse if the word under the cursor is not given
# to it, so give it exclusive control over completions (flush previous completions)
debug("Resetting completions because", active_action, "is unsatisfied")
completions = []
if callable(completer):
completions += [c for c in completer(prefix=cword_prefix, action=active_action,
parsed_args=parsed_args)
if self.validator(c, cword_prefix)]
else:
debug("Completer is not callable, trying the readline completer protocol instead")
for i in range(9999):
next_completion = completer.complete(cword_prefix, i)
if next_completion is None:
break
if self.validator(next_completion, cword_prefix):
completions.append(next_completion)
debug("Completions:", completions)
elif not isinstance(active_action, argparse._SubParsersAction):
pass
"""
debug("Completer not available, falling back")
try:
# TODO: what happens if completions contain newlines? How do I make compgen use IFS?
bashcomp_cmd = ['bash', '-c', "compgen -A file -- '{p}'".format(p=cword_prefix)]
completions += subprocess.check_output(bashcomp_cmd).decode(sys_encoding).splitlines()
except subprocess.CalledProcessError:
pass
"""
return completions
|
Ensures collected completions are Unicode text, de-duplicates them, and excludes those specified by ``exclude``.
Returns the filtered completions as an iterable.
This method is exposed for overriding in subclasses; there is no need to use it directly.
|
def filter_completions(self, completions):
'''
Ensures collected completions are Unicode text, de-duplicates them, and excludes those specified by ``exclude``.
Returns the filtered completions as an iterable.
This method is exposed for overriding in subclasses; there is no need to use it directly.
'''
# On Python 2, we have to make sure all completions are unicode objects before we continue and output them.
# Otherwise, because python disobeys the system locale encoding and uses ascii as the default encoding, it will try
# to implicitly decode string objects using ascii, and fail.
if USING_PYTHON2:
for i in range(len(completions)):
if type(completions[i]) != unicode:
completions[i] = completions[i].decode(sys_encoding)
# De-duplicate completions and remove excluded ones
if self.exclude is None:
self.exclude = set()
seen = set(self.exclude)
return [c for c in completions if c not in seen and not seen.add(c)]
|
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly.
|
def quote_completions(self, completions, cword_prequote, first_colon_pos):
'''
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly.
'''
comp_wordbreaks = os.environ.get('_ARGCOMPLETE_COMP_WORDBREAKS', os.environ.get('COMP_WORDBREAKS', self.wordbreaks))
if USING_PYTHON2:
comp_wordbreaks = comp_wordbreaks.decode(sys_encoding)
punctuation_chars = '();<>|&!`'
for char in punctuation_chars:
if char not in comp_wordbreaks:
comp_wordbreaks += char
# If the word under the cursor was quoted, escape the quote char and add the leading quote back in.
# Otherwise, escape all COMP_WORDBREAKS chars.
if cword_prequote == '':
# Bash mangles completions which contain colons.
# This workaround has the same effect as __ltrim_colon_completions in bash_completion.
if first_colon_pos:
completions = [c[first_colon_pos+1:] for c in completions]
for wordbreak_char in comp_wordbreaks:
completions = [c.replace(wordbreak_char, '\\'+wordbreak_char) for c in completions]
else:
if cword_prequote == '"':
for char in '`$!':
completions = [c.replace(char, '\\'+char) for c in completions]
completions = [cword_prequote+c.replace(cword_prequote, '\\'+cword_prequote) for c in completions]
# Note: similar functionality in bash is turned off by supplying the "-o nospace" option to complete.
# We can't use that functionality because bash is not smart enough to recognize continuation characters (/) for
# which no space should be added.
continuation_chars = '=/:'
if len(completions) == 1 and completions[0][-1] not in continuation_chars:
if cword_prequote == '' and not completions[0].endswith(' '):
completions[0] += ' '
return completions
|
Alternate entry point for using the argcomplete completer in a readline-based REPL. See also
`rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.
Usage:
.. code-block:: python
import argcomplete, argparse, readline
parser = argparse.ArgumentParser()
...
completer = argcomplete.CompletionFinder(parser)
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
result = input("prompt> ")
(Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).
|
def complete(self, text, state):
'''
Alternate entry point for using the argcomplete completer in a readline-based REPL. See also
`rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.
Usage:
.. code-block:: python
import argcomplete, argparse, readline
parser = argparse.ArgumentParser()
...
completer = argcomplete.CompletionFinder(parser)
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
result = input("prompt> ")
(Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).
'''
if state == 0:
print("Retrieving matches for", text)
cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos = split_line(text)
print("Split line into prequote={}, prefix={}, suffix={}, words={}, fcp={}".format(cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos))
comp_words.insert(0, "prog")
self.matches = self._get_completions(comp_words, cword_prefix, cword_prequote, first_colon_pos)
print("Set matches to", self.matches)
if state < len(self.matches):
print("Returning", self.matches[state])
return self.matches[state]
else:
return None
|
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
|
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
|
Expands any strings within `data` such as '{system.user}'.
|
def expand_system_vars(data):
"""Expands any strings within `data` such as '{system.user}'."""
def _expanded(value):
if isinstance(value, basestring):
value = expandvars(value)
value = expanduser(value)
return scoped_format(value, system=system)
elif isinstance(value, (list, tuple, set)):
return [_expanded(x) for x in value]
elif isinstance(value, dict):
return dict((k, _expanded(v)) for k, v in value.iteritems())
else:
return value
return _expanded(data)
|
Create a separate copy of this config.
|
def copy(self, overrides=None, locked=False):
"""Create a separate copy of this config."""
other = copy.copy(self)
if overrides is not None:
other.overrides = overrides
other.locked = locked
other._uncache()
return other
|
Set a setting to the given value.
Note that `key` can be in dotted form, eg
'plugins.release_hook.emailer.sender'.
|
def override(self, key, value):
"""Set a setting to the given value.
Note that `key` can be in dotted form, eg
'plugins.release_hook.emailer.sender'.
"""
keys = key.split('.')
if len(keys) > 1:
if keys[0] != "plugins":
raise AttributeError("no such setting: %r" % key)
self.plugins.override(keys[1:], value)
else:
self.overrides[key] = value
self._uncache(key)
|
Remove a setting override, if one exists.
|
def remove_override(self, key):
"""Remove a setting override, if one exists."""
keys = key.split('.')
if len(keys) > 1:
raise NotImplementedError
elif key in self.overrides:
del self.overrides[key]
self._uncache(key)
|
Returns True if the warning setting is enabled.
|
def warn(self, key):
"""Returns True if the warning setting is enabled."""
return (not self.quiet and not self.warn_none and
(self.warn_all or getattr(self, "warn_%s" % key)))
|
Returns True if the debug setting is enabled.
|
def debug(self, key):
"""Returns True if the debug setting is enabled."""
return (not self.quiet and not self.debug_none and
(self.debug_all or getattr(self, "debug_%s" % key)))
|
Returns the entire configuration as a dict.
Note that this will force all plugins to be loaded.
|
def data(self):
"""Returns the entire configuration as a dict.
Note that this will force all plugins to be loaded.
"""
d = {}
for key in self._data:
if key == "plugins":
d[key] = self.plugins.data()
else:
try:
d[key] = getattr(self, key)
except AttributeError:
pass # unknown key, just leave it unchanged
return d
|
Returns package search paths with local path removed.
|
def nonlocal_packages_path(self):
"""Returns package search paths with local path removed."""
paths = self.packages_path[:]
if self.local_packages_path in paths:
paths.remove(self.local_packages_path)
return paths
|
Swap this config with another.
This is used by the unit tests to swap the config to one that is
shielded from any user config updates. Do not use this method unless
you have good reason.
|
def _swap(self, other):
"""Swap this config with another.
This is used by the unit tests to swap the config to one that is
shielded from any user config updates. Do not use this method unless
you have good reason.
"""
self.__dict__, other.__dict__ = other.__dict__, self.__dict__
|
See comment block at top of 'rezconfig' describing how the main
config is assembled.
|
def _create_main_config(cls, overrides=None):
"""See comment block at top of 'rezconfig' describing how the main
config is assembled."""
filepaths = []
filepaths.append(get_module_root_config())
filepath = os.getenv("REZ_CONFIG_FILE")
if filepath:
filepaths.extend(filepath.split(os.pathsep))
filepath = os.path.expanduser("~/.rezconfig")
filepaths.append(filepath)
return Config(filepaths, overrides)
|
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14", # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit (don't)
"amd64": "64bit",
},
}
|
def platform_mapped(func):
"""Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14", # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit (don't)
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent
# circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
entry = config.platform_map.get(func.__name__)
if entry:
for key, value in entry.iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return inner
|
Compute and return the PageRank in an directed graph.
@type graph: digraph
@param graph: Digraph.
@type damping_factor: number
@param damping_factor: PageRank dumping factor.
@type max_iterations: number
@param max_iterations: Maximum number of iterations.
@type min_delta: number
@param min_delta: Smallest variation required to have a new iteration.
@rtype: Dict
@return: Dict containing all the nodes PageRank.
|
def pagerank(graph, damping_factor=0.85, max_iterations=100, min_delta=0.00001):
"""
Compute and return the PageRank in an directed graph.
@type graph: digraph
@param graph: Digraph.
@type damping_factor: number
@param damping_factor: PageRank dumping factor.
@type max_iterations: number
@param max_iterations: Maximum number of iterations.
@type min_delta: number
@param min_delta: Smallest variation required to have a new iteration.
@rtype: Dict
@return: Dict containing all the nodes PageRank.
"""
nodes = graph.nodes()
graph_size = len(nodes)
if graph_size == 0:
return {}
min_value = (1.0-damping_factor)/graph_size #value for nodes without inbound links
# itialize the page rank dict with 1/N for all nodes
pagerank = dict.fromkeys(nodes, 1.0/graph_size)
for i in range(max_iterations):
diff = 0 #total difference compared to last iteraction
# computes each node PageRank based on inbound links
for node in nodes:
rank = min_value
for referring_page in graph.incidents(node):
rank += damping_factor * pagerank[referring_page] / len(graph.neighbors(referring_page))
diff += abs(pagerank[node] - rank)
pagerank[node] = rank
#stop if PageRank has converged
if diff < min_delta:
break
return pagerank
|
Expands a requirement string like 'python-2.*', 'foo-2.*+<*', etc.
Wildcards are expanded to the latest version that matches. There is also a
special wildcard '**' that will expand to the full version, but it cannot
be used in combination with '*'.
Wildcards MUST placehold a whole version token, not partial - while 'foo-2.*'
is valid, 'foo-2.v*' is not.
Wildcards MUST appear at the end of version numbers - while 'foo-1.*.*' is
valid, 'foo-1.*.0' is not.
It is possible that an expansion will result in an invalid request string
(such as 'foo-2+<2'). The appropriate exception will be raised if this
happens.
Examples:
>>> print expand_requirement('python-2.*')
python-2.7
>>> print expand_requirement('python==2.**')
python==2.7.12
>>> print expand_requirement('python<**')
python<3.0.5
Args:
request (str): Request to expand, eg 'python-2.*'
paths (list of str, optional): paths to search for package families,
defaults to `config.packages_path`.
Returns:
str: Expanded request string.
|
def expand_requirement(request, paths=None):
"""Expands a requirement string like 'python-2.*', 'foo-2.*+<*', etc.
Wildcards are expanded to the latest version that matches. There is also a
special wildcard '**' that will expand to the full version, but it cannot
be used in combination with '*'.
Wildcards MUST placehold a whole version token, not partial - while 'foo-2.*'
is valid, 'foo-2.v*' is not.
Wildcards MUST appear at the end of version numbers - while 'foo-1.*.*' is
valid, 'foo-1.*.0' is not.
It is possible that an expansion will result in an invalid request string
(such as 'foo-2+<2'). The appropriate exception will be raised if this
happens.
Examples:
>>> print expand_requirement('python-2.*')
python-2.7
>>> print expand_requirement('python==2.**')
python==2.7.12
>>> print expand_requirement('python<**')
python<3.0.5
Args:
request (str): Request to expand, eg 'python-2.*'
paths (list of str, optional): paths to search for package families,
defaults to `config.packages_path`.
Returns:
str: Expanded request string.
"""
if '*' not in request:
return request
from rez.vendor.version.version import VersionRange
from rez.vendor.version.requirement import Requirement
from rez.packages_ import get_latest_package
from uuid import uuid4
wildcard_map = {}
expanded_versions = {}
request_ = request
# replace wildcards with valid version tokens that can be replaced again
# afterwards. This produces a horrendous, but both valid and temporary,
# version string.
#
while "**" in request_:
uid = "_%s_" % uuid4().hex
request_ = request_.replace("**", uid, 1)
wildcard_map[uid] = "**"
while '*' in request_:
uid = "_%s_" % uuid4().hex
request_ = request_.replace('*', uid, 1)
wildcard_map[uid] = '*'
# create the requirement, then expand wildcards
#
req = Requirement(request_, invalid_bound_error=False)
def expand_version(version):
rank = len(version)
wildcard_found = False
while version and str(version[-1]) in wildcard_map:
token = wildcard_map[str(version[-1])]
version = version.trim(len(version) - 1)
if token == "**":
if wildcard_found: # catches bad syntax '**.*'
return None
else:
wildcard_found = True
rank = 0
break
wildcard_found = True
if not wildcard_found:
return None
range_ = VersionRange(str(version))
package = get_latest_package(name=req.name, range_=range_, paths=paths)
if package is None:
return version
if rank:
return package.version.trim(rank)
else:
return package.version
def visit_version(version):
# requirements like 'foo-1' are actually represented internally as
# 'foo-1+<1_' - '1_' is the next possible version after '1'. So we have
# to detect this case and remap the uid-ified wildcard back here too.
#
for v, expanded_v in expanded_versions.iteritems():
if version == v.next():
return expanded_v.next()
version_ = expand_version(version)
if version_ is None:
return None
expanded_versions[version] = version_
return version_
if req.range_ is not None:
req.range_.visit_versions(visit_version)
result = str(req)
# do some cleanup so that long uids aren't left in invalid wildcarded strings
for uid, token in wildcard_map.iteritems():
result = result.replace(uid, token)
# cast back to a Requirement again, then back to a string. This will catch
# bad verison ranges, but will also put OR'd version ranges into the correct
# order
expanded_req = Requirement(result)
return str(expanded_req)
|
Runs a subproc to calculate a package attribute.
|
def exec_command(attr, cmd):
"""Runs a subproc to calculate a package attribute.
"""
import subprocess
p = popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
from rez.exceptions import InvalidPackageError
raise InvalidPackageError(
"Error determining package attribute '%s':\n%s" % (attr, err))
return out.strip(), err.strip()
|
Runs a python subproc to calculate a package attribute.
Args:
attr (str): Name of package attribute being created.
src (list of str): Python code to execute, will be converted into
semicolon-delimited single line of code.
Returns:
str: Output of python process.
|
def exec_python(attr, src, executable="python"):
"""Runs a python subproc to calculate a package attribute.
Args:
attr (str): Name of package attribute being created.
src (list of str): Python code to execute, will be converted into
semicolon-delimited single line of code.
Returns:
str: Output of python process.
"""
import subprocess
if isinstance(src, basestring):
src = [src]
p = popen([executable, "-c", "; ".join(src)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
from rez.exceptions import InvalidPackageError
raise InvalidPackageError(
"Error determining package attribute '%s':\n%s" % (attr, err))
return out.strip()
|
Find the rez native python package that contains the given module.
This function is used by python 'native' rez installers to find the native
rez python package that represents the python installation that this module
is installed into.
Note:
This function is dependent on the behavior found in the python '_native'
package found in the 'rez-recipes' repository. Specifically, it expects
to find a python package with a '_site_paths' list attribute listing
the site directories associated with the python installation.
Args:
module_name (str): Target python module.
paths (list of str, optional): paths to search for packages,
defaults to `config.packages_path`.
Returns:
`Package`: Native python package containing the named module.
|
def find_site_python(module_name, paths=None):
"""Find the rez native python package that contains the given module.
This function is used by python 'native' rez installers to find the native
rez python package that represents the python installation that this module
is installed into.
Note:
This function is dependent on the behavior found in the python '_native'
package found in the 'rez-recipes' repository. Specifically, it expects
to find a python package with a '_site_paths' list attribute listing
the site directories associated with the python installation.
Args:
module_name (str): Target python module.
paths (list of str, optional): paths to search for packages,
defaults to `config.packages_path`.
Returns:
`Package`: Native python package containing the named module.
"""
from rez.packages_ import iter_packages
import subprocess
import ast
import os
py_cmd = 'import {x}; print {x}.__path__'.format(x=module_name)
p = popen(["python", "-c", py_cmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
raise InvalidPackageError(
"Failed to find installed python module '%s':\n%s"
% (module_name, err))
module_paths = ast.literal_eval(out.strip())
def issubdir(path, parent_path):
return path.startswith(parent_path + os.sep)
for package in iter_packages("python", paths=paths):
if not hasattr(package, "_site_paths"):
continue
contained = True
for module_path in module_paths:
if not any(issubdir(module_path, x) for x in package._site_paths):
contained = False
if contained:
return package
raise InvalidPackageError(
"Failed to find python installation containing the module '%s'. Has "
"python been installed as a rez package?" % module_name)
|
:exc:`socket.error` and :exc:`IOError` first got
the ``.errno`` attribute in Py2.7
|
def get_errno(exc):
""":exc:`socket.error` and :exc:`IOError` first got
the ``.errno`` attribute in Py2.7"""
try:
return exc.errno
except AttributeError:
try:
# e.args = (errno, reason)
if isinstance(exc.args, tuple) and len(exc.args) == 2:
return exc.args[0]
except AttributeError:
pass
return 0
|
A simple function to find an intersection between two arrays.
@type A: List
@param A: First List
@type B: List
@param B: Second List
@rtype: List
@return: List of Intersections
|
def _intersection(A,B):
"""
A simple function to find an intersection between two arrays.
@type A: List
@param A: First List
@type B: List
@param B: Second List
@rtype: List
@return: List of Intersections
"""
intersection = []
for i in A:
if i in B:
intersection.append(i)
return intersection
|
Return a list of transitive edges.
Example of transitivity within graphs: A -> B, B -> C, A -> C
in this case the transitive edge is: A -> C
@attention: This function is only meaningful for directed acyclic graphs.
@type graph: digraph
@param graph: Digraph
@rtype: List
@return: List containing tuples with transitive edges (or an empty array if the digraph
contains a cycle)
|
def transitive_edges(graph):
"""
Return a list of transitive edges.
Example of transitivity within graphs: A -> B, B -> C, A -> C
in this case the transitive edge is: A -> C
@attention: This function is only meaningful for directed acyclic graphs.
@type graph: digraph
@param graph: Digraph
@rtype: List
@return: List containing tuples with transitive edges (or an empty array if the digraph
contains a cycle)
"""
#if the graph contains a cycle we return an empty array
if not len(find_cycle(graph)) == 0:
return []
tranz_edges = [] # create an empty array that will contain all the tuples
#run trough all the nodes in the graph
for start in topological_sorting(graph):
#find all the successors on the path for the current node
successors = []
for a in traversal(graph,start,'pre'):
successors.append(a)
del successors[0] #we need all the nodes in it's path except the start node itself
for next in successors:
#look for an intersection between all the neighbors of the
#given node and all the neighbors from the given successor
intersect_array = _intersection(graph.neighbors(next), graph.neighbors(start) )
for a in intersect_array:
if graph.has_edge((start, a)):
##check for the detected edge and append it to the returned array
tranz_edges.append( (start,a) )
return tranz_edges
|
Compute and return the critical path in an acyclic directed weighted graph.
@attention: This function is only meaningful for directed weighted acyclic graphs
@type graph: digraph
@param graph: Digraph
@rtype: List
@return: List containing all the nodes in the path (or an empty array if the graph
contains a cycle)
|
def critical_path(graph):
"""
Compute and return the critical path in an acyclic directed weighted graph.
@attention: This function is only meaningful for directed weighted acyclic graphs
@type graph: digraph
@param graph: Digraph
@rtype: List
@return: List containing all the nodes in the path (or an empty array if the graph
contains a cycle)
"""
#if the graph contains a cycle we return an empty array
if not len(find_cycle(graph)) == 0:
return []
#this empty dictionary will contain a tuple for every single node
#the tuple contains the information about the most costly predecessor
#of the given node and the cost of the path to this node
#(predecessor, cost)
node_tuples = {}
topological_nodes = topological_sorting(graph)
#all the tuples must be set to a default value for every node in the graph
for node in topological_nodes:
node_tuples.update( {node :(None, 0)} )
#run trough all the nodes in a topological order
for node in topological_nodes:
predecessors =[]
#we must check all the predecessors
for pre in graph.incidents(node):
max_pre = node_tuples[pre][1]
predecessors.append( (pre, graph.edge_weight( (pre, node) ) + max_pre ) )
max = 0; max_tuple = (None, 0)
for i in predecessors:#look for the most costly predecessor
if i[1] >= max:
max = i[1]
max_tuple = i
#assign the maximum value to the given node in the node_tuples dictionary
node_tuples[node] = max_tuple
#find the critical node
max = 0; critical_node = None
for k,v in list(node_tuples.items()):
if v[1] >= max:
max= v[1]
critical_node = k
path = []
#find the critical path with backtracking trought the dictionary
def mid_critical_path(end):
if node_tuples[end][0] != None:
path.append(end)
mid_critical_path(node_tuples[end][0])
else:
path.append(end)
#call the recursive function
mid_critical_path(critical_node)
path.reverse()
return path
|
Uses a 'parse_build_args.py' file to add options, if found.
|
def bind_cli(cls, parser, group):
"""
Uses a 'parse_build_args.py' file to add options, if found.
"""
try:
with open("./parse_build_args.py") as f:
source = f.read()
except Exception as e:
return
# detect what extra args have been added
before_args = set(x.dest for x in parser._actions)
try:
exec source in {"parser": group}
except Exception as e:
print_warning("Error in ./parse_build_args.py: %s" % str(e))
after_args = set(x.dest for x in parser._actions)
extra_args = after_args - before_args
# store extra args onto parser so we can get to it in self.build()
setattr(parser, "_rezbuild_extra_args", list(extra_args))
|
Perform the build.
Note that most of the func args aren't used here - that's because this
info is already passed to the custom build command via environment
variables.
|
def build(self, context, variant, build_path, install_path, install=False,
build_type=BuildType.local):
"""Perform the build.
Note that most of the func args aren't used here - that's because this
info is already passed to the custom build command via environment
variables.
"""
ret = {}
if self.write_build_scripts:
# write out the script that places the user in a build env, where
# they can run bez directly themselves.
build_env_script = os.path.join(build_path, "build-env")
create_forwarding_script(build_env_script,
module=("build_system", "custom"),
func_name="_FWD__spawn_build_shell",
working_dir=self.working_dir,
build_path=build_path,
variant_index=variant.index,
install=install,
install_path=install_path)
ret["success"] = True
ret["build_env_script"] = build_env_script
return ret
# get build command
command = self.package.build_command
# False just means no build command
if command is False:
ret["success"] = True
return ret
def expand(txt):
root = self.package.root
install_ = "install" if install else ''
return txt.format(root=root, install=install_).strip()
if isinstance(command, basestring):
if self.build_args:
command = command + ' ' + ' '.join(map(quote, self.build_args))
command = expand(command)
cmd_str = command
else: # list
command = command + self.build_args
command = map(expand, command)
cmd_str = ' '.join(map(quote, command))
if self.verbose:
pr = Printer(sys.stdout)
pr("Running build command: %s" % cmd_str, heading)
# run the build command
def _callback(executor):
self._add_build_actions(executor,
context=context,
package=self.package,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path)
if self.opts:
# write args defined in ./parse_build_args.py out as env vars
extra_args = getattr(self.opts.parser, "_rezbuild_extra_args", [])
for key, value in vars(self.opts).iteritems():
if key in extra_args:
varname = "__PARSE_ARG_%s" % key.upper()
# do some value conversions
if isinstance(value, bool):
value = 1 if value else 0
elif isinstance(value, (list, tuple)):
value = map(str, value)
value = map(quote, value)
value = ' '.join(value)
executor.env[varname] = value
retcode, _, _ = context.execute_shell(command=command,
block=True,
cwd=build_path,
actions_callback=_callback)
ret["success"] = (not retcode)
return ret
|
Escape the <, >, ^, and & special characters reserved by Windows.
Args:
value (str/EscapedString): String or already escaped string.
Returns:
str: The value escaped for Windows.
|
def escape_string(self, value):
"""Escape the <, >, ^, and & special characters reserved by Windows.
Args:
value (str/EscapedString): String or already escaped string.
Returns:
str: The value escaped for Windows.
"""
if isinstance(value, EscapedString):
return value.formatted(self._escaper)
return self._escaper(value)
|
Create a tag on the toplevel repo if there is no patch repo,
or a tag on the patch repo and bookmark on the top repo if there is a
patch repo
Returns a list where each entry is a dict for each bookmark or tag
created, which looks like {'type': ('bookmark' or 'tag'), 'patch': bool}
|
def _create_tag_highlevel(self, tag_name, message=None):
"""Create a tag on the toplevel repo if there is no patch repo,
or a tag on the patch repo and bookmark on the top repo if there is a
patch repo
Returns a list where each entry is a dict for each bookmark or tag
created, which looks like {'type': ('bookmark' or 'tag'), 'patch': bool}
"""
results = []
if self.patch_path:
# make a tag on the patch queue
tagged = self._create_tag_lowlevel(tag_name, message=message,
patch=True)
if tagged:
results.append({'type': 'tag', 'patch': True})
# use a bookmark on the main repo since we can't change it
self.hg('bookmark', '-f', tag_name)
results.append({'type': 'bookmark', 'patch': False})
else:
tagged = self._create_tag_lowlevel(tag_name, message=message,
patch=False)
if tagged:
results.append({'type': 'tag', 'patch': False})
return results
|
Create a tag on the toplevel or patch repo
If the tag exists, and force is False, no tag is made. If force is True,
and a tag exists, but it is a direct ancestor of the current commit,
and there is no difference in filestate between the current commit
and the tagged commit, no tag is made. Otherwise, the old tag is
overwritten to point at the current commit.
Returns True or False indicating whether the tag was actually committed
|
def _create_tag_lowlevel(self, tag_name, message=None, force=True,
patch=False):
"""Create a tag on the toplevel or patch repo
If the tag exists, and force is False, no tag is made. If force is True,
and a tag exists, but it is a direct ancestor of the current commit,
and there is no difference in filestate between the current commit
and the tagged commit, no tag is made. Otherwise, the old tag is
overwritten to point at the current commit.
Returns True or False indicating whether the tag was actually committed
"""
# check if tag already exists, and if it does, if it is a direct
# ancestor, and there is NO difference in the files between the tagged
# state and current state
#
# This check is mainly to avoid re-creating the same tag over and over
# on what is essentially the same commit, since tagging will
# technically create a new commit, and update the working copy to it.
#
# Without this check, say you were releasing to three different
# locations, one right after another; the first would create the tag,
# and a new tag commit. The second would then recreate the exact same
# tag, but now pointing at the commit that made the first tag.
# The third would create the tag a THIRD time, but now pointing at the
# commit that created the 2nd tag.
tags = self.get_tags(patch=patch)
old_commit = tags.get(tag_name)
if old_commit is not None:
if not force:
return False
old_rev = old_commit['rev']
# ok, now check to see if direct ancestor...
if self.is_ancestor(old_rev, '.', patch=patch):
# ...and if filestates are same
altered = self.hg('status', '--rev', old_rev, '--rev', '.',
'--no-status')
if not altered or altered == ['.hgtags']:
force = False
if not force:
return False
tag_args = ['tag', tag_name]
if message:
tag_args += ['--message', message]
# we should be ok with ALWAYS having force flag on now, since we should
# have already checked if the commit exists.. but be paranoid, in case
# we've missed some edge case...
if force:
tag_args += ['--force']
self.hg(patch=patch, *tag_args)
return True
|
Returns True if commit1 is a direct ancestor of commit2, or False
otherwise.
This method considers a commit to be a direct ancestor of itself
|
def is_ancestor(self, commit1, commit2, patch=False):
"""Returns True if commit1 is a direct ancestor of commit2, or False
otherwise.
This method considers a commit to be a direct ancestor of itself"""
result = self.hg("log", "-r", "first(%s::%s)" % (commit1, commit2),
"--template", "exists", patch=patch)
return "exists" in result
|
Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request.
|
def get_patched_request(requires, patchlist):
"""Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request.
"""
# rules from table in docstring above
rules = {
'': (True, True, True ),
'!': (False, False, False),
'~': (False, False, True ),
'^': (True, True, True )
}
requires = [Requirement(x) if not isinstance(x, Requirement) else x
for x in requires]
appended = []
for patch in patchlist:
if patch and patch[0] in ('!', '~', '^'):
ch = patch[0]
name = Requirement(patch[1:]).name
else:
ch = ''
name = Requirement(patch).name
rule = rules[ch]
replaced = (ch == '^')
for i, req in enumerate(requires):
if req is None or req.name != name:
continue
if not req.conflict:
replace = rule[0] # foo
elif not req.weak:
replace = rule[1] # !foo
else:
replace = rule[2] # ~foo
if replace:
if replaced:
requires[i] = None
else:
requires[i] = Requirement(patch)
replaced = True
if not replaced:
appended.append(Requirement(patch))
result = [x for x in requires if x is not None] + appended
return result
|
The execute functon of the hook will be called to start the required application
:param app_path: (str) The path of the application executable
:param app_args: (str) Any arguments the application may require
:param version: (str) version of the application being run if set in the "versions" settings
of the Launcher instance, otherwise None
:returns: (dict) The two valid keys are 'command' (str) and 'return_code' (int).
|
def execute(self, app_path, app_args, version, **kwargs):
"""
The execute functon of the hook will be called to start the required application
:param app_path: (str) The path of the application executable
:param app_args: (str) Any arguments the application may require
:param version: (str) version of the application being run if set in the "versions" settings
of the Launcher instance, otherwise None
:returns: (dict) The two valid keys are 'command' (str) and 'return_code' (int).
"""
multi_launchapp = self.parent
extra = multi_launchapp.get_setting("extra")
use_rez = False
if self.check_rez():
from rez.resolved_context import ResolvedContext
from rez.config import config
# Define variables used to bootstrap tank from overwrite on first reference
# PYTHONPATH is used by tk-maya
# NUKE_PATH is used by tk-nuke
# HIERO_PLUGIN_PATH is used by tk-nuke (nukestudio)
# KATANA_RESOURCES is used by tk-katana
config.parent_variables = ["PYTHONPATH", "HOUDINI_PATH", "NUKE_PATH", "HIERO_PLUGIN_PATH", "KATANA_RESOURCES"]
rez_packages = extra["rez_packages"]
context = ResolvedContext(rez_packages)
use_rez = True
system = sys.platform
shell_type = 'bash'
if system == "linux2":
# on linux, we just run the executable directly
cmd = "%s %s &" % (app_path, app_args)
elif self.parent.get_setting("engine") in ["tk-flame", "tk-flare"]:
# flame and flare works in a different way from other DCCs
# on both linux and mac, they run unix-style command line
# and on the mac the more standardized "open" command cannot
# be utilized.
cmd = "%s %s &" % (app_path, app_args)
elif system == "darwin":
# on the mac, the executable paths are normally pointing
# to the application bundle and not to the binary file
# embedded in the bundle, meaning that we should use the
# built-in mac open command to execute it
cmd = "open -n \"%s\"" % (app_path)
if app_args:
cmd += " --args \"%s\"" % app_args.replace("\"", "\\\"")
elif system == "win32":
# on windows, we run the start command in order to avoid
# any command shells popping up as part of the application launch.
cmd = "start /B \"App\" \"%s\" %s" % (app_path, app_args)
shell_type = 'cmd'
# Execute App in a Rez context
if use_rez:
n_env = os.environ.copy()
proc = context.execute_shell(
command=cmd,
parent_environ=n_env,
shell=shell_type,
stdin=False,
block=False
)
exit_code = proc.wait()
context.print_info(verbosity=True)
else:
# run the command to launch the app
exit_code = os.system(cmd)
return {
"command": cmd,
"return_code": exit_code
}
|
Checks to see if a Rez package is available in the current environment.
If it is available, add it to the system path, exposing the Rez Python API
:param strict: (bool) If True, raise an error if Rez is not available as a package.
This will prevent the app from being launched.
:returns: A path to the Rez package.
|
def check_rez(self, strict=True):
"""
Checks to see if a Rez package is available in the current environment.
If it is available, add it to the system path, exposing the Rez Python API
:param strict: (bool) If True, raise an error if Rez is not available as a package.
This will prevent the app from being launched.
:returns: A path to the Rez package.
"""
system = sys.platform
if system == "win32":
rez_cmd = 'rez-env rez -- echo %REZ_REZ_ROOT%'
else:
rez_cmd = 'rez-env rez -- printenv REZ_REZ_ROOT'
process = subprocess.Popen(rez_cmd, stdout=subprocess.PIPE, shell=True)
rez_path, err = process.communicate()
if err or not rez_path:
if strict:
raise ImportError("Failed to find Rez as a package in the current "
"environment! Try 'rez-bind rez'!")
else:
print >> sys.stderr, ("WARNING: Failed to find a Rez package in the current "
"environment. Unable to request Rez packages.")
rez_path = ""
else:
rez_path = rez_path.strip()
print "Found Rez:", rez_path
print "Adding Rez to system path..."
sys.path.append(rez_path)
return rez_path
|
util func, get last revision of url
|
def get_last_changed_revision(client, url):
"""
util func, get last revision of url
"""
try:
svn_entries = client.info2(url,
pysvn.Revision(pysvn.opt_revision_kind.head),
recurse=False)
if not svn_entries:
raise ReleaseVCSError("svn.info2() returned no results on url %s" % url)
return svn_entries[0][1].last_changed_rev
except pysvn.ClientError, ce:
raise ReleaseVCSError("svn.info2() raised ClientError: %s" % ce)
|
provide svn with permissions. @TODO this will have to be updated to take
into account automated releases etc.
|
def get_svn_login(realm, username, may_save):
"""
provide svn with permissions. @TODO this will have to be updated to take
into account automated releases etc.
"""
import getpass
print "svn requires a password for the user %s:" % username
pwd = ''
while not pwd.strip():
pwd = getpass.getpass("--> ")
return True, username, pwd, False
|
Return a string specifying the given graph as a XML document.
@type G: graph
@param G: Graph.
@rtype: string
@return: String specifying the graph as a XML document.
|
def write(G):
"""
Return a string specifying the given graph as a XML document.
@type G: graph
@param G: Graph.
@rtype: string
@return: String specifying the graph as a XML document.
"""
# Document root
grxml = Document()
if (type(G) == graph):
grxmlr = grxml.createElement('graph')
elif (type(G) == digraph ):
grxmlr = grxml.createElement('digraph')
elif (type(G) == hypergraph ):
return write_hypergraph(G)
else:
raise InvalidGraphType
grxml.appendChild(grxmlr)
# Each node...
for each_node in G.nodes():
node = grxml.createElement('node')
node.setAttribute('id', str(each_node))
grxmlr.appendChild(node)
for each_attr in G.node_attributes(each_node):
attr = grxml.createElement('attribute')
attr.setAttribute('attr', each_attr[0])
attr.setAttribute('value', each_attr[1])
node.appendChild(attr)
# Each edge...
for edge_from, edge_to in G.edges():
edge = grxml.createElement('edge')
edge.setAttribute('from', str(edge_from))
edge.setAttribute('to', str(edge_to))
edge.setAttribute('wt', str(G.edge_weight((edge_from, edge_to))))
edge.setAttribute('label', str(G.edge_label((edge_from, edge_to))))
grxmlr.appendChild(edge)
for attr_name, attr_value in G.edge_attributes((edge_from, edge_to)):
attr = grxml.createElement('attribute')
attr.setAttribute('attr', attr_name)
attr.setAttribute('value', attr_value)
edge.appendChild(attr)
return grxml.toprettyxml()
|
Read a graph from a XML document and return it. Nodes and edges specified in the input will
be added to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: graph
@return: Graph
|
def read(string):
"""
Read a graph from a XML document and return it. Nodes and edges specified in the input will
be added to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: graph
@return: Graph
"""
dom = parseString(string)
if dom.getElementsByTagName("graph"):
G = graph()
elif dom.getElementsByTagName("digraph"):
G = digraph()
elif dom.getElementsByTagName("hypergraph"):
return read_hypergraph(string)
else:
raise InvalidGraphType
# Read nodes...
for each_node in dom.getElementsByTagName("node"):
G.add_node(each_node.getAttribute('id'))
for each_attr in each_node.getElementsByTagName("attribute"):
G.add_node_attribute(each_node.getAttribute('id'),
(each_attr.getAttribute('attr'),
each_attr.getAttribute('value')))
# Read edges...
for each_edge in dom.getElementsByTagName("edge"):
if (not G.has_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')))):
G.add_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')), \
wt = float(each_edge.getAttribute('wt')), label = each_edge.getAttribute('label'))
for each_attr in each_edge.getElementsByTagName("attribute"):
attr_tuple = (each_attr.getAttribute('attr'), each_attr.getAttribute('value'))
if (attr_tuple not in G.edge_attributes((each_edge.getAttribute('from'), \
each_edge.getAttribute('to')))):
G.add_edge_attribute((each_edge.getAttribute('from'), \
each_edge.getAttribute('to')), attr_tuple)
return G
|
Return a string specifying the given hypergraph as a XML document.
@type hgr: hypergraph
@param hgr: Hypergraph.
@rtype: string
@return: String specifying the graph as a XML document.
|
def write_hypergraph(hgr):
"""
Return a string specifying the given hypergraph as a XML document.
@type hgr: hypergraph
@param hgr: Hypergraph.
@rtype: string
@return: String specifying the graph as a XML document.
"""
# Document root
grxml = Document()
grxmlr = grxml.createElement('hypergraph')
grxml.appendChild(grxmlr)
# Each node...
nodes = hgr.nodes()
hyperedges = hgr.hyperedges()
for each_node in (nodes + hyperedges):
if (each_node in nodes):
node = grxml.createElement('node')
else:
node = grxml.createElement('hyperedge')
node.setAttribute('id', str(each_node))
grxmlr.appendChild(node)
# and its outgoing edge
if each_node in nodes:
for each_edge in hgr.links(each_node):
edge = grxml.createElement('link')
edge.setAttribute('to', str(each_edge))
node.appendChild(edge)
return grxml.toprettyxml()
|
Read a graph from a XML document. Nodes and hyperedges specified in the input will be added
to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: hypergraph
@return: Hypergraph
|
def read_hypergraph(string):
"""
Read a graph from a XML document. Nodes and hyperedges specified in the input will be added
to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: hypergraph
@return: Hypergraph
"""
hgr = hypergraph()
dom = parseString(string)
for each_node in dom.getElementsByTagName("node"):
hgr.add_node(each_node.getAttribute('id'))
for each_node in dom.getElementsByTagName("hyperedge"):
hgr.add_hyperedge(each_node.getAttribute('id'))
dom = parseString(string)
for each_node in dom.getElementsByTagName("node"):
for each_edge in each_node.getElementsByTagName("link"):
hgr.link(str(each_node.getAttribute('id')), str(each_edge.getAttribute('to')))
return hgr
|
Invoke a diff editor to show the difference between the source of two
packages.
Args:
pkg1 (`Package`): Package to diff.
pkg2 (`Package`): Package to diff against. If None, the next most recent
package version is used.
|
def diff_packages(pkg1, pkg2=None):
"""Invoke a diff editor to show the difference between the source of two
packages.
Args:
pkg1 (`Package`): Package to diff.
pkg2 (`Package`): Package to diff against. If None, the next most recent
package version is used.
"""
if pkg2 is None:
it = iter_packages(pkg1.name)
pkgs = [x for x in it if x.version < pkg1.version]
if not pkgs:
raise RezError("No package to diff with - %s is the earliest "
"package version" % pkg1.qualified_name)
pkgs = sorted(pkgs, key=lambda x: x.version)
pkg2 = pkgs[-1]
def _check_pkg(pkg):
if not (pkg.vcs and pkg.revision):
raise RezError("Cannot diff package %s: it is a legacy format "
"package that does not contain enough information"
% pkg.qualified_name)
_check_pkg(pkg1)
_check_pkg(pkg2)
path = mkdtemp(prefix="rez-pkg-diff")
paths = []
for pkg in (pkg1, pkg2):
print "Exporting %s..." % pkg.qualified_name
path_ = os.path.join(path, pkg.qualified_name)
vcs_cls_1 = plugin_manager.get_plugin_class("release_vcs", pkg1.vcs)
vcs_cls_1.export(revision=pkg.revision, path=path_)
paths.append(path_)
difftool = config.difftool
print "Opening diff viewer %s..." % difftool
proc = Popen([difftool] + paths)
proc.wait()
|
Exit gracefully on ctrl-C.
|
def sigint_handler(signum, frame):
"""Exit gracefully on ctrl-C."""
global _handled_int
if not _handled_int:
_handled_int = True
if not _env_var_true("_REZ_QUIET_ON_SIG"):
print >> sys.stderr, "Interrupted by user"
sigbase_handler(signum, frame)
|
Exit gracefully on terminate.
|
def sigterm_handler(signum, frame):
"""Exit gracefully on terminate."""
global _handled_term
if not _handled_term:
_handled_term = True
if not _env_var_true("_REZ_QUIET_ON_SIG"):
print >> sys.stderr, "Terminated by user"
sigbase_handler(signum, frame)
|
Sets up all sub-parsers when help is requested.
|
def format_help(self):
"""Sets up all sub-parsers when help is requested."""
if self._subparsers:
for action in self._subparsers._actions:
if isinstance(action, LazySubParsersAction):
for parser_name, parser in action._name_parser_map.iteritems():
action._setup_subparser(parser_name, parser)
return super(LazyArgumentParser, self).format_help()
|
Test the validity of a package name string.
Args:
name (str): Name to test.
raise_error (bool): If True, raise an exception on failure
Returns:
bool.
|
def is_valid_package_name(name, raise_error=False):
"""Test the validity of a package name string.
Args:
name (str): Name to test.
raise_error (bool): If True, raise an exception on failure
Returns:
bool.
"""
is_valid = PACKAGE_NAME_REGEX.match(name)
if raise_error and not is_valid:
raise PackageRequestError("Not a valid package name: %r" % name)
return is_valid
|
Expand abbreviations in a format string.
If an abbreviation does not match a field, or matches multiple fields, it
is left unchanged.
Example:
>>> fields = ("hey", "there", "dude")
>>> expand_abbreviations("hello {d}", fields)
'hello dude'
Args:
txt (str): Format string.
fields (list of str): Fields to expand to.
Returns:
Expanded string.
|
def expand_abbreviations(txt, fields):
"""Expand abbreviations in a format string.
If an abbreviation does not match a field, or matches multiple fields, it
is left unchanged.
Example:
>>> fields = ("hey", "there", "dude")
>>> expand_abbreviations("hello {d}", fields)
'hello dude'
Args:
txt (str): Format string.
fields (list of str): Fields to expand to.
Returns:
Expanded string.
"""
def _expand(matchobj):
s = matchobj.group("var")
if s not in fields:
matches = [x for x in fields if x.startswith(s)]
if len(matches) == 1:
s = matches[0]
return "{%s}" % s
return re.sub(FORMAT_VAR_REGEX, _expand, txt)
|
Expand shell variables of form $var and ${var}.
Unknown variables are left unchanged.
Args:
text (str): String to expand.
environ (dict): Environ dict to use for expansions, defaults to
os.environ.
Returns:
The expanded string.
|
def expandvars(text, environ=None):
"""Expand shell variables of form $var and ${var}.
Unknown variables are left unchanged.
Args:
text (str): String to expand.
environ (dict): Environ dict to use for expansions, defaults to
os.environ.
Returns:
The expanded string.
"""
if '$' not in text:
return text
i = 0
if environ is None:
environ = os.environ
while True:
m = ENV_VAR_REGEX.search(text, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
if name in environ:
tail = text[j:]
text = text[:i] + environ[name]
i = len(text)
text += tail
else:
i = j
return text
|
Given a nested dict, generate a python code equivalent.
Example:
>>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}}
>>> print dict_to_attributes_code(d)
foo = 'bah'
colors.red = 1
colors.blue = 2
Returns:
str.
|
def dict_to_attributes_code(dict_):
"""Given a nested dict, generate a python code equivalent.
Example:
>>> d = {'foo': 'bah', 'colors': {'red': 1, 'blue': 2}}
>>> print dict_to_attributes_code(d)
foo = 'bah'
colors.red = 1
colors.blue = 2
Returns:
str.
"""
lines = []
for key, value in dict_.iteritems():
if isinstance(value, dict):
txt = dict_to_attributes_code(value)
lines_ = txt.split('\n')
for line in lines_:
if not line.startswith(' '):
line = "%s.%s" % (key, line)
lines.append(line)
else:
value_txt = pformat(value)
if '\n' in value_txt:
lines.append("%s = \\" % key)
value_txt = indent(value_txt)
lines.extend(value_txt.split('\n'))
else:
line = "%s = %s" % (key, value_txt)
lines.append(line)
return '\n'.join(lines)
|
Print rows of entries in aligned columns.
|
def columnise(rows, padding=2):
"""Print rows of entries in aligned columns."""
strs = []
maxwidths = {}
for row in rows:
for i, e in enumerate(row):
se = str(e)
nse = len(se)
w = maxwidths.get(i, -1)
if nse > w:
maxwidths[i] = nse
for row in rows:
s = ''
for i, e in enumerate(row):
se = str(e)
if i < len(row) - 1:
n = maxwidths[i] + padding - len(se)
se += ' ' * n
s += se
strs.append(s)
return strs
|
Like `columnise`, but with colored rows.
Args:
printer (`colorize.Printer`): Printer object.
Note:
The last entry in each row is the row color, or None for no coloring.
|
def print_colored_columns(printer, rows, padding=2):
"""Like `columnise`, but with colored rows.
Args:
printer (`colorize.Printer`): Printer object.
Note:
The last entry in each row is the row color, or None for no coloring.
"""
rows_ = [x[:-1] for x in rows]
colors = [x[-1] for x in rows]
for col, line in zip(colors, columnise(rows_, padding=padding)):
printer(line, col)
|
Convert a string into epoch time. Examples of valid strings:
1418350671 # already epoch time
-12s # 12 seconds ago
-5.4m # 5.4 minutes ago
|
def get_epoch_time_from_str(s):
"""Convert a string into epoch time. Examples of valid strings:
1418350671 # already epoch time
-12s # 12 seconds ago
-5.4m # 5.4 minutes ago
"""
try:
return int(s)
except:
pass
try:
if s.startswith('-'):
chars = {'d': 24 * 60 * 60,
'h': 60 * 60,
'm': 60,
's': 1}
m = chars.get(s[-1])
if m:
n = float(s[1:-1])
secs = int(n * m)
now = int(time.time())
return max((now - secs), 0)
except:
pass
raise ValueError("'%s' is an unrecognised time format." % s)
|
Print the position string equivalent of a positive integer. Examples:
0: zeroeth
1: first
2: second
14: 14th
21: 21st
|
def positional_number_string(n):
"""Print the position string equivalent of a positive integer. Examples:
0: zeroeth
1: first
2: second
14: 14th
21: 21st
"""
if n > 20:
suffix = positional_suffix[n % 10]
return "%d%s" % (n, suffix)
elif n > 3:
return "%dth" % n
elif n == 3:
return "third"
elif n == 2:
return "second"
elif n == 1:
return "first"
return "zeroeth"
|
Expand '~' to home directory in the given string.
Note that this function deliberately differs from the builtin
os.path.expanduser() on Linux systems, which expands strings such as
'~sclaus' to that user's homedir. This is problematic in rez because the
string '~packagename' may inadvertently convert to a homedir, if a package
happens to match a username.
|
def expanduser(path):
"""Expand '~' to home directory in the given string.
Note that this function deliberately differs from the builtin
os.path.expanduser() on Linux systems, which expands strings such as
'~sclaus' to that user's homedir. This is problematic in rez because the
string '~packagename' may inadvertently convert to a homedir, if a package
happens to match a username.
"""
if '~' not in path:
return path
if os.name == "nt":
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
drive = os.environ.get('HOMEDRIVE', '')
userhome = os.path.join(drive, os.environ['HOMEPATH'])
else:
return path
else:
userhome = os.path.expanduser('~')
def _expanduser(path):
return EXPANDUSER_RE.sub(
lambda m: m.groups()[0] + userhome + m.groups()[1],
path)
# only replace '~' if it's at start of string or is preceeded by pathsep or
# ';' or whitespace; AND, is followed either by sep, pathsep, ';', ' ' or
# end-of-string.
#
return os.path.normpath(_expanduser(path))
|
Return a string formatted as a python block comment string, like the one
you're currently reading. Special characters are escaped if necessary.
|
def as_block_string(txt):
"""Return a string formatted as a python block comment string, like the one
you're currently reading. Special characters are escaped if necessary.
"""
import json
lines = []
for line in txt.split('\n'):
line_ = json.dumps(line)
line_ = line_[1:-1].rstrip() # drop double quotes
lines.append(line_)
return '"""\n%s\n"""' % '\n'.join(lines)
|
Format a string.
Args:
s (str): String to format, eg "hello {name}"
pretty (bool): If True, references to non-string attributes such as
lists are converted to basic form, with characters such as
brackets and parenthesis removed. If None, defaults to the
object's 'format_pretty' attribute.
expand (`StringFormatType`): Expansion mode. If None, will default
to the object's 'format_expand' attribute.
Returns:
The formatting string.
|
def format(self, s, pretty=None, expand=None):
"""Format a string.
Args:
s (str): String to format, eg "hello {name}"
pretty (bool): If True, references to non-string attributes such as
lists are converted to basic form, with characters such as
brackets and parenthesis removed. If None, defaults to the
object's 'format_pretty' attribute.
expand (`StringFormatType`): Expansion mode. If None, will default
to the object's 'format_expand' attribute.
Returns:
The formatting string.
"""
if pretty is None:
pretty = self.format_pretty
if expand is None:
expand = self.format_expand
formatter = ObjectStringFormatter(self, pretty=pretty, expand=expand)
return formatter.format(s)
|
Publish an AMQP message.
Returns:
bool: True if message was sent successfully.
|
def publish_message(host, amqp_settings, routing_key, data, async=False):
"""Publish an AMQP message.
Returns:
bool: True if message was sent successfully.
"""
global _thread
global _num_pending
kwargs = {
"host": host,
"amqp_settings": amqp_settings,
"routing_key": routing_key,
"data": data
}
if not async:
return _publish_message(**kwargs)
if _thread is None:
with _lock:
if _thread is None:
_thread = threading.Thread(target=_publish_messages_async)
_thread.daemon = True
_thread.start()
with _lock:
_num_pending += 1
_queue.put(kwargs)
return True
|
Publish an AMQP message.
Returns:
bool: True if message was sent successfully.
|
def _publish_message(host, amqp_settings, routing_key, data):
"""Publish an AMQP message.
Returns:
bool: True if message was sent successfully.
"""
if host == "stdout":
print("Published to %s: %s" % (routing_key, data))
return True
try:
conn = Connection(**remove_nones(
host=host,
userid=amqp_settings.get("userid"),
password=amqp_settings.get("password"),
connect_timeout=amqp_settings.get("connect_timeout")
))
except socket.error as e:
print_error("Cannot connect to the message broker: %s" % (e))
return False
channel = conn.channel()
# build the message
msg = basic_message.Message(**remove_nones(
body=json.dumps(data),
delivery_mode=amqp_settings.get("message_delivery_mode"),
content_type="application/json",
content_encoding="utf-8"
))
# publish the message
try:
channel.basic_publish(
msg,
amqp_settings["exchange_name"],
routing_key
)
except Exception as e:
print_error("Failed to publish message: %s" % (e))
return False
return True
|
Returns a copy of the version.
|
def copy(self):
"""Returns a copy of the version."""
other = Version(None)
other.tokens = self.tokens[:]
other.seps = self.seps[:]
return other
|
Return a copy of the version, possibly with less tokens.
Args:
len_ (int): New version length. If >= current length, an
unchanged copy of the version is returned.
|
def trim(self, len_):
"""Return a copy of the version, possibly with less tokens.
Args:
len_ (int): New version length. If >= current length, an
unchanged copy of the version is returned.
"""
other = Version(None)
other.tokens = self.tokens[:len_]
other.seps = self.seps[:len_ - 1]
return other
|
Return 'next' version. Eg, next(1.2) is 1.2_
|
def next(self):
"""Return 'next' version. Eg, next(1.2) is 1.2_"""
if self.tokens:
other = self.copy()
tok = other.tokens.pop()
other.tokens.append(tok.next())
return other
else:
return Version.inf
|
OR together version ranges.
Calculates the union of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to OR with.
Returns:
New VersionRange object representing the union.
|
def union(self, other):
"""OR together version ranges.
Calculates the union of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to OR with.
Returns:
New VersionRange object representing the union.
"""
if not hasattr(other, "__iter__"):
other = [other]
bounds = self.bounds[:]
for range in other:
bounds += range.bounds
bounds = self._union(bounds)
range = VersionRange(None)
range.bounds = bounds
return range
|
AND together version ranges.
Calculates the intersection of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to AND with.
Returns:
New VersionRange object representing the intersection, or None if
no ranges intersect.
|
def intersection(self, other):
"""AND together version ranges.
Calculates the intersection of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to AND with.
Returns:
New VersionRange object representing the intersection, or None if
no ranges intersect.
"""
if not hasattr(other, "__iter__"):
other = [other]
bounds = self.bounds
for range in other:
bounds = self._intersection(bounds, range.bounds)
if not bounds:
return None
range = VersionRange(None)
range.bounds = bounds
return range
|
Calculate the inverse of the range.
Returns:
New VersionRange object representing the inverse of this range, or
None if there is no inverse (ie, this range is the any range).
|
def inverse(self):
"""Calculate the inverse of the range.
Returns:
New VersionRange object representing the inverse of this range, or
None if there is no inverse (ie, this range is the any range).
"""
if self.is_any():
return None
else:
bounds = self._inverse(self.bounds)
range = VersionRange(None)
range.bounds = bounds
return range
|
Split into separate contiguous ranges.
Returns:
A list of VersionRange objects. For example, the range "3|5+" will
be split into ["3", "5+"].
|
def split(self):
"""Split into separate contiguous ranges.
Returns:
A list of VersionRange objects. For example, the range "3|5+" will
be split into ["3", "5+"].
"""
ranges = []
for bound in self.bounds:
range = VersionRange(None)
range.bounds = [bound]
ranges.append(range)
return ranges
|
Create a range from lower_version..upper_version.
Args:
lower_version: Version object representing lower bound of the range.
upper_version: Version object representing upper bound of the range.
Returns:
`VersionRange` object.
|
def as_span(cls, lower_version=None, upper_version=None,
lower_inclusive=True, upper_inclusive=True):
"""Create a range from lower_version..upper_version.
Args:
lower_version: Version object representing lower bound of the range.
upper_version: Version object representing upper bound of the range.
Returns:
`VersionRange` object.
"""
lower = (None if lower_version is None
else _LowerBound(lower_version, lower_inclusive))
upper = (None if upper_version is None
else _UpperBound(upper_version, upper_inclusive))
bound = _Bound(lower, upper)
range = cls(None)
range.bounds = [bound]
return range
|
Create a range from a version.
Args:
version: Version object. This is used as the upper/lower bound of
the range.
op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',
'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created
that contains the version superset.
Returns:
`VersionRange` object.
|
def from_version(cls, version, op=None):
"""Create a range from a version.
Args:
version: Version object. This is used as the upper/lower bound of
the range.
op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',
'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created
that contains the version superset.
Returns:
`VersionRange` object.
"""
lower = None
upper = None
if op is None:
lower = _LowerBound(version, True)
upper = _UpperBound(version.next(), False)
elif op in ("eq", "=="):
lower = _LowerBound(version, True)
upper = _UpperBound(version, True)
elif op in ("gt", ">"):
lower = _LowerBound(version, False)
elif op in ("gte", ">="):
lower = _LowerBound(version, True)
elif op in ("lt", "<"):
upper = _UpperBound(version, False)
elif op in ("lte", "<="):
upper = _UpperBound(version, True)
else:
raise VersionError("Unknown bound operation '%s'" % op)
bound = _Bound(lower, upper)
range = cls(None)
range.bounds = [bound]
return range
|
Create a range from a list of versions.
This method creates a range that contains only the given versions and
no other. Typically the range looks like (for eg) "==3|==4|==5.1".
Args:
versions: List of Version objects.
Returns:
`VersionRange` object.
|
def from_versions(cls, versions):
"""Create a range from a list of versions.
This method creates a range that contains only the given versions and
no other. Typically the range looks like (for eg) "==3|==4|==5.1".
Args:
versions: List of Version objects.
Returns:
`VersionRange` object.
"""
range = cls(None)
range.bounds = []
for version in dedup(sorted(versions)):
lower = _LowerBound(version, True)
upper = _UpperBound(version, True)
bound = _Bound(lower, upper)
range.bounds.append(bound)
return range
|
Returns exact version ranges as Version objects, or None if there
are no exact version ranges present.
|
def to_versions(self):
"""Returns exact version ranges as Version objects, or None if there
are no exact version ranges present.
"""
versions = []
for bound in self.bounds:
if bound.lower.inclusive and bound.upper.inclusive \
and (bound.lower.version == bound.upper.version):
versions.append(bound.lower.version)
return versions or None
|
Returns True if version is contained in this range.
|
def contains_version(self, version):
"""Returns True if version is contained in this range."""
if len(self.bounds) < 5:
# not worth overhead of binary search
for bound in self.bounds:
i = bound.version_containment(version)
if i == 0:
return True
if i == -1:
return False
else:
_, contains = self._contains_version(version)
return contains
return False
|
Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect.
|
def iter_intersecting(self, iterable, key=None, descending=False):
"""Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect.
"""
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_INTERSECTING)
|
Like `iter_intersect_test`, but returns non-intersections only.
Returns:
An iterator that returns items from `iterable` that don't intersect.
|
def iter_non_intersecting(self, iterable, key=None, descending=False):
"""Like `iter_intersect_test`, but returns non-intersections only.
Returns:
An iterator that returns items from `iterable` that don't intersect.
"""
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_NON_INTERSECTING)
|
Return a contiguous range that is a superset of this range.
Returns:
A VersionRange object representing the span of this range. For
example, the span of "2+<4|6+<8" would be "2+<8".
|
def span(self):
"""Return a contiguous range that is a superset of this range.
Returns:
A VersionRange object representing the span of this range. For
example, the span of "2+<4|6+<8" would be "2+<8".
"""
other = VersionRange(None)
bound = _Bound(self.bounds[0].lower, self.bounds[-1].upper)
other.bounds = [bound]
return other
|
Visit each version in the range, and apply a function to each.
This is for advanced usage only.
If `func` returns a `Version`, this call will change the versions in
place.
It is possible to change versions in a way that is nonsensical - for
example setting an upper bound to a smaller version than the lower bound.
Use at your own risk.
Args:
func (callable): Takes a `Version` instance arg, and is applied to
every version in the range. If `func` returns a `Version`, it
will replace the existing version, updating this `VersionRange`
instance in place.
|
def visit_versions(self, func):
"""Visit each version in the range, and apply a function to each.
This is for advanced usage only.
If `func` returns a `Version`, this call will change the versions in
place.
It is possible to change versions in a way that is nonsensical - for
example setting an upper bound to a smaller version than the lower bound.
Use at your own risk.
Args:
func (callable): Takes a `Version` instance arg, and is applied to
every version in the range. If `func` returns a `Version`, it
will replace the existing version, updating this `VersionRange`
instance in place.
"""
for bound in self.bounds:
if bound.lower is not _LowerBound.min:
result = func(bound.lower.version)
if isinstance(result, Version):
bound.lower.version = result
if bound.upper is not _UpperBound.inf:
result = func(bound.upper.version)
if isinstance(result, Version):
bound.upper.version = result
|
Spawn an async request to a remote webserver.
|
def async_send(self, url, data, headers, success_cb, failure_cb):
"""
Spawn an async request to a remote webserver.
"""
# this can be optimized by making a custom self.send that does not
# read the response since we don't use it.
self._lock.acquire()
return gevent.spawn(
super(GeventedHTTPTransport, self).send, url, data, headers
).link(lambda x: self._done(x, success_cb, failure_cb))
|
Sends a request to a remote webserver using HTTP POST.
|
def send(self, url, data, headers):
"""
Sends a request to a remote webserver using HTTP POST.
"""
req = urllib2.Request(url, headers=headers)
try:
response = urlopen(
url=req,
data=data,
timeout=self.timeout,
verify_ssl=self.verify_ssl,
ca_certs=self.ca_certs,
)
except urllib2.HTTPError as exc:
msg = exc.headers.get('x-sentry-error')
code = exc.getcode()
if code == 429:
try:
retry_after = int(exc.headers.get('retry-after'))
except (ValueError, TypeError):
retry_after = 0
raise RateLimited(msg, retry_after)
elif msg:
raise APIError(msg, code)
else:
raise
return response
|
raven-js will pass both Authorization and X-Sentry-Auth depending on the browser
and server configurations.
|
def extract_auth_vars(request):
"""
raven-js will pass both Authorization and X-Sentry-Auth depending on the browser
and server configurations.
"""
if request.META.get('HTTP_X_SENTRY_AUTH', '').startswith('Sentry'):
return request.META['HTTP_X_SENTRY_AUTH']
elif request.META.get('HTTP_AUTHORIZATION', '').startswith('Sentry'):
return request.META['HTTP_AUTHORIZATION']
else:
# Try to construct from GET request
args = [
'%s=%s' % i
for i in request.GET.items()
if i[0].startswith('sentry_') and i[0] != 'sentry_data'
]
if args:
return 'Sentry %s' % ', '.join(args)
return None
|
Convert exception info to a value for the values list.
|
def _get_value(self, exc_type, exc_value, exc_traceback):
"""
Convert exception info to a value for the values list.
"""
stack_info = get_stack_info(
iter_traceback_frames(exc_traceback),
transformer=self.transform,
capture_locals=self.client.capture_locals,
)
exc_module = getattr(exc_type, '__module__', None)
if exc_module:
exc_module = str(exc_module)
exc_type = getattr(exc_type, '__name__', '<unknown>')
return {
'value': to_unicode(exc_value),
'type': str(exc_type),
'module': to_unicode(exc_module),
'stacktrace': stack_info,
}
|
Records a breadcrumb for all active clients. This is what integration
code should use rather than invoking the `captureBreadcrumb` method
on a specific client.
|
def record(message=None, timestamp=None, level=None, category=None,
data=None, type=None, processor=None):
"""Records a breadcrumb for all active clients. This is what integration
code should use rather than invoking the `captureBreadcrumb` method
on a specific client.
"""
if timestamp is None:
timestamp = time()
for ctx in raven.context.get_active_contexts():
ctx.breadcrumbs.record(timestamp, level, message, category,
data, type, processor)
|
Ignores a logger during breadcrumb recording.
|
def ignore_logger(name_or_logger, allow_level=None):
"""Ignores a logger during breadcrumb recording.
"""
def handler(logger, level, msg, args, kwargs):
if allow_level is not None and \
level >= allow_level:
return False
return True
register_special_log_handler(name_or_logger, handler)
|
Registers a callback for log handling. The callback is invoked
with given arguments: `logger`, `level`, `msg`, `args` and `kwargs`
which are the values passed to the logging system. If the callback
returns true value the default handling is disabled. Only one callback
can be registered per one logger name. Logger tree is not traversed
so calling this method with `spammy_module` argument will not silence
messages from `spammy_module.child`.
|
def register_special_log_handler(name_or_logger, callback):
"""Registers a callback for log handling. The callback is invoked
with given arguments: `logger`, `level`, `msg`, `args` and `kwargs`
which are the values passed to the logging system. If the callback
returns true value the default handling is disabled. Only one callback
can be registered per one logger name. Logger tree is not traversed
so calling this method with `spammy_module` argument will not silence
messages from `spammy_module.child`.
"""
if isinstance(name_or_logger, string_types):
name = name_or_logger
else:
name = name_or_logger.name
special_logger_handlers[name] = callback
|
If installed this causes Django's queries to be captured.
|
def install_sql_hook():
"""If installed this causes Django's queries to be captured."""
try:
from django.db.backends.utils import CursorWrapper
except ImportError:
from django.db.backends.util import CursorWrapper
try:
real_execute = CursorWrapper.execute
real_executemany = CursorWrapper.executemany
except AttributeError:
# XXX(mitsuhiko): On some very old django versions (<1.6) this
# trickery would have to look different but I can't be bothered.
return
def record_many_sql(vendor, alias, start, sql, param_list):
duration = time.time() - start
for params in param_list:
record_sql(vendor, alias, start, duration, sql, params)
def execute(self, sql, params=None):
start = time.time()
try:
return real_execute(self, sql, params)
finally:
record_sql(self.db.vendor, getattr(self.db, 'alias', None),
start, time.time() - start, sql, params)
def executemany(self, sql, param_list):
start = time.time()
try:
return real_executemany(self, sql, param_list)
finally:
record_many_sql(self.db.vendor, getattr(self.db, 'alias', None),
start, sql, param_list)
CursorWrapper.execute = execute
CursorWrapper.executemany = executemany
breadcrumbs.ignore_logger('django.db.backends')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.