INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Return a list with all (following) lines. The sizehint parameter
is ignored in this implementation.
|
def readlines(self, sizehint = -1):
"""Return a list with all (following) lines. The sizehint parameter
is ignored in this implementation.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
|
Read the directory, making sure we close the file if the format
is bad.
|
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
|
Return a list of file names in the archive.
|
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
|
Print a table of contents for the zip file.
|
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
|
Return the instance of ZipInfo given 'name'.
|
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
|
Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
|
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
|
Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
|
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
|
Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
|
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
|
Check for errors before writing a file to the archive.
|
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
|
Put the bytes from filename into the archive under the name
arcname.
|
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
fp = open(filename, "rb")
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
fp.close()
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
|
Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
|
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
|
Return the temporary file to use.
|
def get_fileobject(self, dir=None, **kwargs):
'''Return the temporary file to use.'''
if dir is None:
dir = os.path.normpath(os.path.dirname(self._path))
descriptor, name = tempfile.mkstemp(dir=dir)
# io.open() will take either the descriptor or the name, but we need
# the name later for commit()/replace_atomic() and couldn't find a way
# to get the filename from the descriptor.
os.close(descriptor)
kwargs['mode'] = self._mode
kwargs['file'] = name
return io.open(**kwargs)
|
Move the temporary file to the target location.
|
def commit(self, f):
'''Move the temporary file to the target location.'''
if self._overwrite:
replace_atomic(f.name, self._path)
else:
move_atomic(f.name, self._path)
|
Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
|
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
|
Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
|
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
|
Call this after adding your child widgets.
|
def apply_saved_layout(self):
"""Call this after adding your child widgets."""
num_widgets = self.config.get(self.config_key + "/num_widgets", int)
if num_widgets:
sizes = []
for i in range(num_widgets):
key = "%s/size_%d" % (self.config_key, i)
size = self.config.get(key, int)
sizes.append(size)
self.setSizes(sizes)
return True
return False
|
Return diict copy with nones removed.
|
def remove_nones(**kwargs):
"""Return diict copy with nones removed.
"""
return dict((k, v) for k, v in kwargs.iteritems() if v is not None)
|
Perform a deep merge of `dict2` into `dict1`.
Note that `dict2` and any nested dicts are unchanged.
Supports `ModifyList` instances.
|
def deep_update(dict1, dict2):
"""Perform a deep merge of `dict2` into `dict1`.
Note that `dict2` and any nested dicts are unchanged.
Supports `ModifyList` instances.
"""
def flatten(v):
if isinstance(v, ModifyList):
return v.apply([])
elif isinstance(v, dict):
return dict((k, flatten(v_)) for k, v_ in v.iteritems())
else:
return v
def merge(v1, v2):
if isinstance(v1, dict) and isinstance(v2, dict):
deep_update(v1, v2)
return v1
elif isinstance(v2, ModifyList):
v1 = flatten(v1)
return v2.apply(v1)
else:
return flatten(v2)
for k1, v1 in dict1.iteritems():
if k1 not in dict2:
dict1[k1] = flatten(v1)
for k2, v2 in dict2.iteritems():
v1 = dict1.get(k2)
if v1 is KeyError:
dict1[k2] = flatten(v2)
else:
dict1[k2] = merge(v1, v2)
|
Create dict copy with removed items.
Recursively remove items where fn(value) is True.
Returns:
dict: New dict with matching items removed.
|
def deep_del(data, fn):
"""Create dict copy with removed items.
Recursively remove items where fn(value) is True.
Returns:
dict: New dict with matching items removed.
"""
result = {}
for k, v in data.iteritems():
if not fn(v):
if isinstance(v, dict):
result[k] = deep_del(v, fn)
else:
result[k] = v
return result
|
Get added/removed/changed keys between two dicts.
Each key in the return value is a list, which is the namespaced key that
was affected.
Returns:
3-tuple:
- list of added keys;
- list of removed key;
- list of changed keys.
|
def get_dict_diff(d1, d2):
"""Get added/removed/changed keys between two dicts.
Each key in the return value is a list, which is the namespaced key that
was affected.
Returns:
3-tuple:
- list of added keys;
- list of removed key;
- list of changed keys.
"""
def _diff(d1_, d2_, namespace):
added = []
removed = []
changed = []
for k1, v1 in d1_.iteritems():
if k1 not in d2_:
removed.append(namespace + [k1])
else:
v2 = d2_[k1]
if v2 != v1:
if isinstance(v1, dict) and isinstance(v2, dict):
namespace_ = namespace + [k1]
added_, removed_, changed_ = _diff(v1, v2, namespace_)
added.extend(added_)
removed.extend(removed_)
changed.extend(changed_)
else:
changed.append(namespace + [k1])
for k2 in d2_.iterkeys():
if k2 not in d1_:
added.append(namespace + [k2])
return added, removed, changed
return _diff(d1, d2, [])
|
Returns same as `get_dict_diff`, but as a readable string.
|
def get_dict_diff_str(d1, d2, title):
"""Returns same as `get_dict_diff`, but as a readable string.
"""
added, removed, changed = get_dict_diff(d1, d2)
lines = [title]
if added:
lines.append("Added attributes: %s"
% ['.'.join(x) for x in added])
if removed:
lines.append("Removed attributes: %s"
% ['.'.join(x) for x in removed])
if changed:
lines.append("Changed attributes: %s"
% ['.'.join(x) for x in changed])
return '\n'.join(lines)
|
Recursively convert dict and UserDict types.
Note that `d` is unchanged.
Args:
to_class (type): Dict-like type to convert values to, usually UserDict
subclass, or dict.
from_class (type): Dict-like type to convert values from. If a tuple,
multiple types are converted.
Returns:
Converted data as `to_class` instance.
|
def convert_dicts(d, to_class=AttrDictWrapper, from_class=dict):
"""Recursively convert dict and UserDict types.
Note that `d` is unchanged.
Args:
to_class (type): Dict-like type to convert values to, usually UserDict
subclass, or dict.
from_class (type): Dict-like type to convert values from. If a tuple,
multiple types are converted.
Returns:
Converted data as `to_class` instance.
"""
d_ = to_class()
for key, value in d.iteritems():
if isinstance(value, from_class):
d_[key] = convert_dicts(value, to_class=to_class,
from_class=from_class)
else:
d_[key] = value
return d_
|
Get completion strings based on an object's attributes/keys.
Completion also works on dynamic attributes (eg implemented via __getattr__)
if they are iterable.
Args:
instance (object): Object to introspect.
prefix (str): Prefix to match, can be dot-separated to access nested
attributes.
types (tuple): Attribute types to match, any if None.
instance_types (tuple): Class types to recurse into when a dotted
prefix is given, any if None.
Returns:
List of strings.
|
def get_object_completions(instance, prefix, types=None, instance_types=None):
"""Get completion strings based on an object's attributes/keys.
Completion also works on dynamic attributes (eg implemented via __getattr__)
if they are iterable.
Args:
instance (object): Object to introspect.
prefix (str): Prefix to match, can be dot-separated to access nested
attributes.
types (tuple): Attribute types to match, any if None.
instance_types (tuple): Class types to recurse into when a dotted
prefix is given, any if None.
Returns:
List of strings.
"""
word_toks = []
toks = prefix.split('.')
while len(toks) > 1:
attr = toks[0]
toks = toks[1:]
word_toks.append(attr)
try:
instance = getattr(instance, attr)
except AttributeError:
return []
if instance_types and not isinstance(instance, instance_types):
return []
prefix = toks[-1]
words = []
attrs = dir(instance)
try:
for attr in instance:
if isinstance(attr, basestring):
attrs.append(attr)
except TypeError:
pass
for attr in attrs:
if attr.startswith(prefix) and not attr.startswith('_') \
and not hasattr(instance.__class__, attr):
value = getattr(instance, attr)
if types and not isinstance(value, types):
continue
if not callable(value):
words.append(attr)
qual_words = ['.'.join(word_toks + [x]) for x in words]
if len(words) == 1 and value is not None and \
(instance_types is None or isinstance(value, instance_types)):
qual_word = qual_words[0]
words = get_object_completions(value, '', types)
for word in words:
qual_words.append("%s.%s" % (qual_word, word))
return qual_words
|
tab_index = self.tabs["help"]
if self.help_widget.success:
self.setTabEnabled(tab_index, True)
else:
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
|
def _set_packagebase(self, variant):
self.setEnabled(variant is not None)
self.variant = variant
is_package = isinstance(variant, Package)
prev_index = self.currentIndex()
disabled_tabs = set()
for d in self.tabs.itervalues():
index = d["index"]
if (not d["lazy"]) or (self.currentIndex() == index):
self.widget(index).set_variant(variant)
tab_index = self.tabs["variants"]["index"]
if (isinstance(variant, Variant) and variant.index is not None) \
or (is_package and variant.num_variants):
n = variant.num_variants if is_package else variant.parent.num_variants
label = "variants (%d)" % n
self.setTabEnabled(tab_index, True)
else:
label = "variants"
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
self.setTabText(tab_index, label)
tab_index = self.tabs["tools"]["index"]
if variant and variant.tools:
label = "tools (%d)" % len(variant.tools)
self.setTabEnabled(tab_index, True)
else:
label = "tools"
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
self.setTabText(tab_index, label)
"""
tab_index = self.tabs["help"]
if self.help_widget.success:
self.setTabEnabled(tab_index, True)
else:
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
"""
if prev_index in disabled_tabs:
self.setCurrentIndex(0)
|
Use this around code in your package repository that is loading a
package, for example from file or cache.
|
def package_loading(self):
"""Use this around code in your package repository that is loading a
package, for example from file or cache.
"""
t1 = time.time()
yield None
t2 = time.time()
self.package_load_time += t2 - t1
|
Determine if the repository contains any packages.
Returns:
True if there are no packages, False if there are at least one.
|
def is_empty(self):
"""Determine if the repository contains any packages.
Returns:
True if there are no packages, False if there are at least one.
"""
for family in self.iter_package_families():
for pkg in self.iter_packages(family):
return False
return True
|
Create a `ResourceHandle`
Nearly all `ResourceHandle` creation should go through here, because it
gives the various resource classes a chance to normalize / standardize
the resource handles, to improve caching / comparison / etc.
|
def make_resource_handle(self, resource_key, **variables):
"""Create a `ResourceHandle`
Nearly all `ResourceHandle` creation should go through here, because it
gives the various resource classes a chance to normalize / standardize
the resource handles, to improve caching / comparison / etc.
"""
if variables.get("repository_type", self.name()) != self.name():
raise ResourceError("repository_type mismatch - requested %r, "
"repository_type is %r"
% (variables["repository_type"], self.name()))
variables["repository_type"] = self.name()
if variables.get("location", self.location) != self.location:
raise ResourceError("location mismatch - requested %r, repository "
"location is %r" % (variables["location"],
self.location))
variables["location"] = self.location
resource_cls = self.pool.get_resource_class(resource_key)
variables = resource_cls.normalize_variables(variables)
return ResourceHandle(resource_key, variables)
|
Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
|
def get_resource(self, resource_key, **variables):
"""Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
"""
handle = self.make_resource_handle(resource_key, **variables)
return self.get_resource_from_handle(handle, verify_repo=False)
|
Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
|
def get_resource_from_handle(self, resource_handle, verify_repo=True):
"""Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
"""
if verify_repo:
# we could fix the handle at this point, but handles should
# always be made from repo.make_resource_handle... for now,
# at least, error to catch any "incorrect" construction of
# handles...
if resource_handle.variables.get("repository_type") != self.name():
raise ResourceError("repository_type mismatch - requested %r, "
"repository_type is %r"
% (resource_handle.variables["repository_type"],
self.name()))
if resource_handle.variables.get("location") != self.location:
raise ResourceError("location mismatch - requested %r, "
"repository location is %r "
% (resource_handle.variables["location"],
self.location))
resource = self.pool.get_resource_from_handle(resource_handle)
resource._repository = self
return resource
|
Get a package repository.
Args:
path (str): Entry from the 'packages_path' config setting. This may
simply be a path (which is managed by the 'filesystem' package
repository plugin), or a string in the form "type@location",
where 'type' identifies the repository plugin type to use.
Returns:
`PackageRepository` instance.
|
def get_repository(self, path):
"""Get a package repository.
Args:
path (str): Entry from the 'packages_path' config setting. This may
simply be a path (which is managed by the 'filesystem' package
repository plugin), or a string in the form "type@location",
where 'type' identifies the repository plugin type to use.
Returns:
`PackageRepository` instance.
"""
# normalise
parts = path.split('@', 1)
if len(parts) == 1:
parts = ("filesystem", parts[0])
repo_type, location = parts
if repo_type == "filesystem":
# choice of abspath here vs realpath is deliberate. Realpath gives
# canonical path, which can be a problem if two studios are sharing
# packages, and have mirrored package paths, but some are actually
# different paths, symlinked to look the same. It happened!
location = os.path.abspath(location)
normalised_path = "%s@%s" % (repo_type, location)
return self._get_repository(normalised_path)
|
Test that `path_1` and `path_2` refer to the same repository.
This is more reliable than testing that the strings match, since slightly
different strings might refer to the same repository (consider small
differences in a filesystem path for example, eg '//svr/foo', '/svr/foo').
Returns:
True if the paths refer to the same repository, False otherwise.
|
def are_same(self, path_1, path_2):
"""Test that `path_1` and `path_2` refer to the same repository.
This is more reliable than testing that the strings match, since slightly
different strings might refer to the same repository (consider small
differences in a filesystem path for example, eg '//svr/foo', '/svr/foo').
Returns:
True if the paths refer to the same repository, False otherwise.
"""
if path_1 == path_2:
return True
repo_1 = self.get_repository(path_1)
repo_2 = self.get_repository(path_2)
return (repo_1.uid == repo_2.uid)
|
Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
repository_type (`str`): What sort of repository to look for the
resource in
location (`str`): location for the repository
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
|
def get_resource(self, resource_key, repository_type, location,
**variables):
"""Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
repository_type (`str`): What sort of repository to look for the
resource in
location (`str`): location for the repository
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
"""
path = "%s@%s" % (repository_type, location)
repo = self.get_repository(path)
resource = repo.get_resource(**variables)
return resource
|
Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
|
def get_resource_from_handle(self, resource_handle):
"""Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
"""
repo_type = resource_handle.get("repository_type")
location = resource_handle.get("location")
if not (repo_type and location):
raise ValueError("PackageRepositoryManager requires "
"resource_handle objects to have a "
"repository_type and location defined")
path = "%s@%s" % (repo_type, location)
repo = self.get_repository(path)
resource = repo.get_resource_from_handle(resource_handle)
return resource
|
Clear all cached data.
|
def clear_caches(self):
"""Clear all cached data."""
self.get_repository.cache_clear()
self._get_repository.cache_clear()
self.pool.clear_caches()
|
Given a few parameters from the Connection constructor,
select and create a subclass of _AbstractTransport.
|
def create_transport(host, connect_timeout, ssl=False):
"""Given a few parameters from the Connection constructor,
select and create a subclass of _AbstractTransport."""
if ssl:
return SSLTransport(host, connect_timeout, ssl)
else:
return TCPTransport(host, connect_timeout)
|
Wrap the socket in an SSL object.
|
def _setup_transport(self):
"""Wrap the socket in an SSL object."""
if hasattr(self, 'sslopts'):
self.sock = ssl.wrap_socket(self.sock, **self.sslopts)
else:
self.sock = ssl.wrap_socket(self.sock)
self.sock.do_handshake()
self._quick_recv = self.sock.read
|
Write a string out to the SSL socket fully.
|
def _write(self, s):
"""Write a string out to the SSL socket fully."""
try:
write = self.sock.write
except AttributeError:
# Works around a bug in python socket library
raise IOError('Socket closed')
else:
while s:
n = write(s)
if not n:
raise IOError('Socket closed')
s = s[n:]
|
Setup to _write() directly to the socket, and
do our own buffered reads.
|
def _setup_transport(self):
"""Setup to _write() directly to the socket, and
do our own buffered reads."""
self._write = self.sock.sendall
self._read_buffer = EMPTY_BUFFER
self._quick_recv = self.sock.recv
|
Read exactly n bytes from the socket
|
def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)):
"""Read exactly n bytes from the socket"""
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf))
except socket.error as exc:
if not initial and exc.errno in _errnos:
continue
raise
if not s:
raise IOError('Socket closed')
rbuf += s
except:
self._read_buffer = rbuf
raise
result, self._read_buffer = rbuf[:n], rbuf[n:]
return result
|
Return (pyver, abi, arch) tuples compatible with this Python.
|
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
|
Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on 'config.plugin_path' named after the package. This is
useful if one wants to distribute different parts of a single logical
package as multiple directories.
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that 'plugin_path' is a sequence. Items of 'plugin_path'
that are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
|
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on 'config.plugin_path' named after the package. This is
useful if one wants to distribute different parts of a single logical
package as multiple directories.
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that 'plugin_path' is a sequence. Items of 'plugin_path'
that are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
init_py = "__init__" + os.extsep + "py"
path = path[:]
for dir in config.plugin_path:
if not os.path.isdir(dir):
if config.debug("plugins"):
print_debug("skipped nonexistant rez plugin path: %s" % dir)
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
return path
|
Returns the class registered under the given plugin name.
|
def get_plugin_class(self, plugin_name):
"""Returns the class registered under the given plugin name."""
try:
return self.plugin_classes[plugin_name]
except KeyError:
raise RezPluginError("Unrecognised %s plugin: '%s'"
% (self.pretty_type_name, plugin_name))
|
Returns the module containing the plugin of the given name.
|
def get_plugin_module(self, plugin_name):
"""Returns the module containing the plugin of the given name."""
try:
return self.plugin_modules[plugin_name]
except KeyError:
raise RezPluginError("Unrecognised %s plugin: '%s'"
% (self.pretty_type_name, plugin_name))
|
Returns the merged configuration data schema for this plugin
type.
|
def config_schema(self):
"""Returns the merged configuration data schema for this plugin
type."""
from rez.config import _plugin_config_dict
d = _plugin_config_dict.get(self.type_name, {})
for name, plugin_class in self.plugin_classes.iteritems():
if hasattr(plugin_class, "schema_dict") \
and plugin_class.schema_dict:
d_ = {name: plugin_class.schema_dict}
deep_update(d, d_)
return dict_to_schema(d, required=True, modifier=expand_system_vars)
|
Return the class registered under the given plugin name.
|
def get_plugin_class(self, plugin_type, plugin_name):
"""Return the class registered under the given plugin name."""
plugin = self._get_plugin_type(plugin_type)
return plugin.get_plugin_class(plugin_name)
|
Return the module defining the class registered under the given
plugin name.
|
def get_plugin_module(self, plugin_type, plugin_name):
"""Return the module defining the class registered under the given
plugin name."""
plugin = self._get_plugin_type(plugin_type)
return plugin.get_plugin_module(plugin_name)
|
Create and return an instance of the given plugin.
|
def create_instance(self, plugin_type, plugin_name, **instance_kwargs):
"""Create and return an instance of the given plugin."""
plugin_type = self._get_plugin_type(plugin_type)
return plugin_type.create_instance(plugin_name, **instance_kwargs)
|
Get a formatted string summarising the plugins that were loaded.
|
def get_summary_string(self):
"""Get a formatted string summarising the plugins that were loaded."""
rows = [["PLUGIN TYPE", "NAME", "DESCRIPTION", "STATUS"],
["-----------", "----", "-----------", "------"]]
for plugin_type in sorted(self.get_plugin_types()):
type_name = plugin_type.replace('_', ' ')
for name in sorted(self.get_plugins(plugin_type)):
module = self.get_plugin_module(plugin_type, name)
desc = (getattr(module, "__doc__", None) or '').strip()
rows.append((type_name, name, desc, "loaded"))
for (name, reason) in sorted(self.get_failed_plugins(plugin_type)):
msg = "FAILED: %s" % reason
rows.append((type_name, name, '', msg))
return '\n'.join(columnise(rows))
|
Get the part of the source which is causing a problem.
|
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
|
Evaluate a source string or node, using ``filename`` when
displaying errors.
|
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
|
Decorator to prevent infinite repr recursion.
|
def recursive_repr(func):
"""Decorator to prevent infinite repr recursion."""
repr_running = set()
@wraps(func)
def wrapper(self):
"Return ellipsis on recursive re-entry to function."
key = id(self), get_ident()
if key in repr_running:
return '...'
repr_running.add(key)
try:
return func(self)
finally:
repr_running.discard(key)
return wrapper
|
Reset sorted list load.
The *load* specifies the load-factor of the list. The default load
factor of '1000' works well for lists from tens to tens of millions of
elements. Good practice is to use a value that is the cube root of the
list size. With billions of elements, the best load factor depends on
your usage. It's best to leave the load factor at the default until
you start benchmarking.
|
def _reset(self, load):
"""
Reset sorted list load.
The *load* specifies the load-factor of the list. The default load
factor of '1000' works well for lists from tens to tens of millions of
elements. Good practice is to use a value that is the cube root of the
list size. With billions of elements, the best load factor depends on
your usage. It's best to leave the load factor at the default until
you start benchmarking.
"""
values = reduce(iadd, self._lists, [])
self._clear()
self._load = load
self._half = load >> 1
self._dual = load << 1
self._update(values)
|
Splits sublists that are more than double the load level.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see self._loc.
|
def _expand(self, pos):
"""Splits sublists that are more than double the load level.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see self._loc.
"""
_lists = self._lists
_index = self._index
if len(_lists[pos]) > self._dual:
_maxes = self._maxes
_load = self._load
_lists_pos = _lists[pos]
half = _lists_pos[_load:]
del _lists_pos[_load:]
_maxes[pos] = _lists_pos[-1]
_lists.insert(pos + 1, half)
_maxes.insert(pos + 1, half[-1])
del _index[:]
else:
if _index:
child = self._offset + pos
while child:
_index[child] += 1
child = (child - 1) >> 1
_index[0] += 1
|
Update the list by adding all elements from *iterable*.
|
def update(self, iterable):
"""Update the list by adding all elements from *iterable*."""
_lists = self._lists
_maxes = self._maxes
values = sorted(iterable)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort()
self._clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load = self._load
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_maxes.extend(sublist[-1] for sublist in _lists)
self._len = len(values)
del self._index[:]
|
Remove the first occurrence of *val*.
If *val* is not a member, does nothing.
|
def discard(self, val):
"""
Remove the first occurrence of *val*.
If *val* is not a member, does nothing.
"""
_maxes = self._maxes
if not _maxes:
return
pos = bisect_left(_maxes, val)
if pos == len(_maxes):
return
_lists = self._lists
idx = bisect_left(_lists[pos], val)
if _lists[pos][idx] == val:
self._delete(pos, idx)
|
Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
|
def _pos(self, idx):
"""Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
"""
if idx < 0:
last_len = len(self._lists[-1])
if (-idx) <= last_len:
return len(self._lists) - 1, last_len + idx
idx += self._len
if idx < 0:
raise IndexError('list index out of range')
elif idx >= self._len:
raise IndexError('list index out of range')
if idx < len(self._lists[0]):
return 0, idx
_index = self._index
if not _index:
self._build_index()
pos = 0
child = 1
len_index = len(_index)
while child < len_index:
index_child = _index[child]
if idx < index_child:
pos = child
else:
idx -= index_child
pos = child + 1
child = (pos << 1) + 1
return (pos - self._offset, idx)
|
Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
|
def _build_index(self):
"""Build an index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a _lists representation storing integers:
[0]: 1 2 3
[1]: 4 5
[2]: 6 7 8 9
[3]: 10 11 12 13 14
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists.
[0]: 3 2 4 5
Each row after that is the sum of consecutive pairs of the previous row:
[1]: 5 9
[2]: 14
Finally, the index is built by concatenating these lists together:
_index = 14 5 9 3 2 4 5
An offset storing the start of the first row is also stored:
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on self._pos for details.
"""
row0 = list(map(len, self._lists))
if len(row0) == 1:
self._index[:] = row0
self._offset = 0
return
head = iter(row0)
tail = iter(head)
row1 = list(starmap(add, zip(head, tail)))
if len(row0) & 1:
row1.append(row0[-1])
if len(row1) == 1:
self._index[:] = row1 + row0
self._offset = 1
return
size = 2 ** (int(log_e(len(row1) - 1, 2)) + 1)
row1.extend(repeat(0, size - len(row1)))
tree = [row0, row1]
while len(tree[-1]) > 1:
head = iter(tree[-1])
tail = iter(head)
row = list(starmap(add, zip(head, tail)))
tree.append(row)
reduce(iadd, reversed(tree), self._index)
self._offset = size * 2 - 1
|
Returns an iterator that slices `self` using two index pairs,
`(min_pos, min_idx)` and `(max_pos, max_idx)`; the first inclusive
and the latter exclusive. See `_pos` for details on how an index
is converted to an index pair.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
|
def _islice(self, min_pos, min_idx, max_pos, max_idx, reverse):
"""
Returns an iterator that slices `self` using two index pairs,
`(min_pos, min_idx)` and `(max_pos, max_idx)`; the first inclusive
and the latter exclusive. See `_pos` for details on how an index
is converted to an index pair.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
"""
_lists = self._lists
if min_pos > max_pos:
return iter(())
elif min_pos == max_pos and not reverse:
return iter(_lists[min_pos][min_idx:max_idx])
elif min_pos == max_pos and reverse:
return reversed(_lists[min_pos][min_idx:max_idx])
elif min_pos + 1 == max_pos and not reverse:
return chain(_lists[min_pos][min_idx:], _lists[max_pos][:max_idx])
elif min_pos + 1 == max_pos and reverse:
return chain(
reversed(_lists[max_pos][:max_idx]),
reversed(_lists[min_pos][min_idx:]),
)
elif not reverse:
return chain(
_lists[min_pos][min_idx:],
chain.from_iterable(_lists[(min_pos + 1):max_pos]),
_lists[max_pos][:max_idx],
)
temp = map(reversed, reversed(_lists[(min_pos + 1):max_pos]))
return chain(
reversed(_lists[max_pos][:max_idx]),
chain.from_iterable(temp),
reversed(_lists[min_pos][min_idx:]),
)
|
Create an iterator of values between `minimum` and `maximum`.
`inclusive` is a pair of booleans that indicates whether the minimum
and maximum ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
minimum and maximum.
Both `minimum` and `maximum` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
|
def irange(self, minimum=None, maximum=None, inclusive=(True, True),
reverse=False):
"""
Create an iterator of values between `minimum` and `maximum`.
`inclusive` is a pair of booleans that indicates whether the minimum
and maximum ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
minimum and maximum.
Both `minimum` and `maximum` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
"""
_maxes = self._maxes
if not _maxes:
return iter(())
_lists = self._lists
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if minimum is None:
min_pos = 0
min_idx = 0
else:
if inclusive[0]:
min_pos = bisect_left(_maxes, minimum)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_left(_lists[min_pos], minimum)
else:
min_pos = bisect_right(_maxes, minimum)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_right(_lists[min_pos], minimum)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if maximum is None:
max_pos = len(_maxes) - 1
max_idx = len(_lists[max_pos])
else:
if inclusive[1]:
max_pos = bisect_right(_maxes, maximum)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_lists[max_pos])
else:
max_idx = bisect_right(_lists[max_pos], maximum)
else:
max_pos = bisect_left(_maxes, maximum)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_lists[max_pos])
else:
max_idx = bisect_left(_lists[max_pos], maximum)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
|
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert *val*. If *val* is already present, the
insertion point will be before (to the left of) any existing entries.
|
def bisect_left(self, val):
"""
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert *val*. If *val* is already present, the
insertion point will be before (to the left of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_left(_maxes, val)
if pos == len(_maxes):
return self._len
idx = bisect_left(self._lists[pos], val)
return self._loc(pos, idx)
|
Return the number of occurrences of *val* in the list.
|
def count(self, val):
"""Return the number of occurrences of *val* in the list."""
# pylint: disable=arguments-differ
_maxes = self._maxes
if not _maxes:
return 0
pos_left = bisect_left(_maxes, val)
if pos_left == len(_maxes):
return 0
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], val)
pos_right = bisect_right(_maxes, val)
if pos_right == len(_maxes):
return self._len - self._loc(pos_left, idx_left)
idx_right = bisect_right(_lists[pos_right], val)
if pos_left == pos_right:
return idx_right - idx_left
right = self._loc(pos_right, idx_right)
left = self._loc(pos_left, idx_left)
return right - left
|
Append the element *val* to the list. Raises a ValueError if the *val*
would violate the sort order.
|
def append(self, val):
"""
Append the element *val* to the list. Raises a ValueError if the *val*
would violate the sort order.
"""
# pylint: disable=arguments-differ
_lists = self._lists
_maxes = self._maxes
if not _maxes:
_maxes.append(val)
_lists.append([val])
self._len = 1
return
pos = len(_lists) - 1
if val < _lists[pos][-1]:
msg = '{0!r} not in sort order at index {1}'.format(val, self._len)
raise ValueError(msg)
_maxes[pos] = val
_lists[pos].append(val)
self._len += 1
self._expand(pos)
|
Remove and return item at *idx* (default last). Raises IndexError if
list is empty or index is out of range. Negative indices are supported,
as for slice indices.
|
def pop(self, idx=-1):
"""
Remove and return item at *idx* (default last). Raises IndexError if
list is empty or index is out of range. Negative indices are supported,
as for slice indices.
"""
# pylint: disable=arguments-differ
if not self._len:
raise IndexError('pop index out of range')
_lists = self._lists
if idx == 0:
val = _lists[0][0]
self._delete(0, 0)
return val
if idx == -1:
pos = len(_lists) - 1
loc = len(_lists[pos]) - 1
val = _lists[pos][loc]
self._delete(pos, loc)
return val
if 0 <= idx < len(_lists[0]):
val = _lists[0][idx]
self._delete(0, idx)
return val
len_last = len(_lists[-1])
if -len_last < idx < 0:
pos = len(_lists) - 1
loc = len_last + idx
val = _lists[pos][loc]
self._delete(pos, loc)
return val
pos, idx = self._pos(idx)
val = _lists[pos][idx]
self._delete(pos, idx)
return val
|
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
|
def index(self, val, start=None, stop=None):
"""
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
"""
# pylint: disable=arguments-differ
_len = self._len
if not _len:
raise ValueError('{0!r} is not in list'.format(val))
if start is None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0!r} is not in list'.format(val))
_maxes = self._maxes
pos_left = bisect_left(_maxes, val)
if pos_left == len(_maxes):
raise ValueError('{0!r} is not in list'.format(val))
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], val)
if _lists[pos_left][idx_left] != val:
raise ValueError('{0!r} is not in list'.format(val))
stop -= 1
left = self._loc(pos_left, idx_left)
if start <= left:
if left <= stop:
return left
else:
right = self._bisect_right(val) - 1
if start <= right:
return start
raise ValueError('{0!r} is not in list'.format(val))
|
Add the element *val* to the list.
|
def add(self, val):
"""Add the element *val* to the list."""
_lists = self._lists
_keys = self._keys
_maxes = self._maxes
key = self._key(val)
if _maxes:
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
pos -= 1
_lists[pos].append(val)
_keys[pos].append(key)
_maxes[pos] = key
else:
idx = bisect_right(_keys[pos], key)
_lists[pos].insert(idx, val)
_keys[pos].insert(idx, key)
self._expand(pos)
else:
_lists.append([val])
_keys.append([key])
_maxes.append(key)
self._len += 1
|
Splits sublists that are more than double the load level.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see self._loc.
|
def _expand(self, pos):
"""Splits sublists that are more than double the load level.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see self._loc.
"""
_lists = self._lists
_keys = self._keys
_index = self._index
if len(_keys[pos]) > self._dual:
_maxes = self._maxes
_load = self._load
_lists_pos = _lists[pos]
_keys_pos = _keys[pos]
half = _lists_pos[_load:]
half_keys = _keys_pos[_load:]
del _lists_pos[_load:]
del _keys_pos[_load:]
_maxes[pos] = _keys_pos[-1]
_lists.insert(pos + 1, half)
_keys.insert(pos + 1, half_keys)
_maxes.insert(pos + 1, half_keys[-1])
del _index[:]
else:
if _index:
child = self._offset + pos
while child:
_index[child] += 1
child = (child - 1) >> 1
_index[0] += 1
|
Create an iterator of values between `min_key` and `max_key`.
`inclusive` is a pair of booleans that indicates whether the min_key
and max_key ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
`min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
|
def irange_key(self, min_key=None, max_key=None, inclusive=(True, True),
reverse=False):
"""
Create an iterator of values between `min_key` and `max_key`.
`inclusive` is a pair of booleans that indicates whether the min_key
and max_key ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
`min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
"""
_maxes = self._maxes
if not _maxes:
return iter(())
_keys = self._keys
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if min_key is None:
min_pos = 0
min_idx = 0
else:
if inclusive[0]:
min_pos = bisect_left(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_left(_keys[min_pos], min_key)
else:
min_pos = bisect_right(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_right(_keys[min_pos], min_key)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if max_key is None:
max_pos = len(_maxes) - 1
max_idx = len(_keys[max_pos])
else:
if inclusive[1]:
max_pos = bisect_right(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_right(_keys[max_pos], max_key)
else:
max_pos = bisect_left(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_left(_keys[max_pos], max_key)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
|
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert a value with a given *key*. If values with
*key* are already present, the insertion point will be before (to the
left of) any existing entries.
|
def bisect_key_left(self, key):
"""
Similar to the *bisect* module in the standard library, this returns an
appropriate index to insert a value with a given *key*. If values with
*key* are already present, the insertion point will be before (to the
left of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_left(self._keys[pos], key)
return self._loc(pos, idx)
|
Same as *bisect_key_left*, but if *key* is already present, the insertion
point will be after (to the right of) any existing entries.
|
def bisect_key_right(self, key):
"""
Same as *bisect_key_left*, but if *key* is already present, the insertion
point will be after (to the right of) any existing entries.
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_right(self._keys[pos], key)
return self._loc(pos, idx)
|
Return the number of occurrences of *val* in the list.
|
def count(self, val):
"""Return the number of occurrences of *val* in the list."""
_maxes = self._maxes
if not _maxes:
return 0
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return 0
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
total = 0
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return total
if _lists[pos][idx] == val:
total += 1
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return total
len_sublist = len(_keys[pos])
idx = 0
|
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
|
def index(self, val, start=None, stop=None):
"""
Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
ValueError if *val* is not present. *stop* defaults to the end of the
list. *start* defaults to the beginning. Negative indices are supported,
as for slice indices.
"""
_len = self._len
if not _len:
raise ValueError('{0!r} is not in list'.format(val))
if start is None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0!r} is not in list'.format(val))
_maxes = self._maxes
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0!r} is not in list'.format(val))
stop -= 1
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0!r} is not in list'.format(val))
if _lists[pos][idx] == val:
loc = self._loc(pos, idx)
if start <= loc <= stop:
return loc
elif loc > stop:
break
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0!r} is not in list'.format(val))
len_sublist = len(_keys[pos])
idx = 0
raise ValueError('{0!r} is not in list'.format(val))
|
View a graph.
|
def view_graph(graph_str, parent=None, prune_to=None):
"""View a graph."""
from rezgui.dialogs.ImageViewerDialog import ImageViewerDialog
from rez.config import config
# check for already written tempfile
h = hash((graph_str, prune_to))
filepath = graph_file_lookup.get(h)
if filepath and not os.path.exists(filepath):
filepath = None
# write graph to tempfile
if filepath is None:
suffix = ".%s" % config.dot_image_format
fd, filepath = tempfile.mkstemp(suffix=suffix, prefix="rez-graph-")
os.close(fd)
dlg = WriteGraphDialog(graph_str, filepath, parent, prune_to=prune_to)
if not dlg.write_graph():
return
# display graph
graph_file_lookup[h] = filepath
dlg = ImageViewerDialog(filepath, parent)
dlg.exec_()
|
Select the latest versioned package in the given range.
If there are no packages in the range, the selection is cleared.
|
def select_version(self, version_range):
"""Select the latest versioned package in the given range.
If there are no packages in the range, the selection is cleared.
"""
row = -1
version = None
for i, package in self.packages.iteritems():
if package.version in version_range \
and (version is None or version < package.version):
version = package.version
row = i
self.clearSelection()
if row != -1:
self.selectRow(row)
return version
|
Initialize sorted set from existing set.
|
def _fromset(cls, values, key=None):
"""Initialize sorted set from existing set."""
sorted_set = object.__new__(cls)
sorted_set._set = values # pylint: disable=protected-access
sorted_set.__init__(key=key)
return sorted_set
|
Create a shallow copy of the sorted set.
|
def copy(self):
"""Create a shallow copy of the sorted set."""
return self._fromset(set(self._set), key=self._key)
|
Remove the first occurrence of *value*. If *value* is not a member,
does nothing.
|
def discard(self, value):
"""
Remove the first occurrence of *value*. If *value* is not a member,
does nothing.
"""
_set = self._set
if value in _set:
_set.remove(value)
self._list.discard(value)
|
Remove and return item at *index* (default last). Raises IndexError if
set is empty or index is out of range. Negative indexes are supported,
as for slice indices.
|
def pop(self, index=-1):
"""
Remove and return item at *index* (default last). Raises IndexError if
set is empty or index is out of range. Negative indexes are supported,
as for slice indices.
"""
# pylint: disable=arguments-differ
value = self._list.pop(index)
self._set.remove(value)
return value
|
Remove first occurrence of *value*. Raises ValueError if
*value* is not present.
|
def remove(self, value):
"""
Remove first occurrence of *value*. Raises ValueError if
*value* is not present.
"""
self._set.remove(value)
self._list.remove(value)
|
Return a new set with elements in the set that are not in the
*iterables*.
|
def difference(self, *iterables):
"""
Return a new set with elements in the set that are not in the
*iterables*.
"""
diff = self._set.difference(*iterables)
return self._fromset(diff, key=self._key)
|
Update the set, removing elements found in keeping only elements
found in any of the *iterables*.
|
def difference_update(self, *iterables):
"""
Update the set, removing elements found in keeping only elements
found in any of the *iterables*.
"""
_set = self._set
values = set(chain(*iterables))
if (4 * len(values)) > len(_set):
_list = self._list
_set.difference_update(values)
_list.clear()
_list.update(_set)
else:
_discard = self.discard
for value in values:
_discard(value)
return self
|
Return a new set with elements common to the set and all *iterables*.
|
def intersection(self, *iterables):
"""
Return a new set with elements common to the set and all *iterables*.
"""
comb = self._set.intersection(*iterables)
return self._fromset(comb, key=self._key)
|
Update the set, keeping only elements found in it and all *iterables*.
|
def intersection_update(self, *iterables):
"""
Update the set, keeping only elements found in it and all *iterables*.
"""
_set = self._set
_list = self._list
_set.intersection_update(*iterables)
_list.clear()
_list.update(_set)
return self
|
Return a new set with elements in either *self* or *that* but not both.
|
def symmetric_difference(self, that):
"""
Return a new set with elements in either *self* or *that* but not both.
"""
diff = self._set.symmetric_difference(that)
return self._fromset(diff, key=self._key)
|
Update the set, keeping only elements found in either *self* or *that*,
but not in both.
|
def symmetric_difference_update(self, that):
"""
Update the set, keeping only elements found in either *self* or *that*,
but not in both.
"""
_set = self._set
_list = self._list
_set.symmetric_difference_update(that)
_list.clear()
_list.update(_set)
return self
|
Return a new SortedSet with elements from the set and all *iterables*.
|
def union(self, *iterables):
"""
Return a new SortedSet with elements from the set and all *iterables*.
"""
return self.__class__(chain(iter(self), *iterables), key=self._key)
|
Update the set, adding elements from all *iterables*.
|
def update(self, *iterables):
"""Update the set, adding elements from all *iterables*."""
_set = self._set
values = set(chain(*iterables))
if (4 * len(values)) > len(_set):
_list = self._list
_set.update(values)
_list.clear()
_list.update(_set)
else:
_add = self.add
for value in values:
_add(value)
return self
|
Parser setup common to both rez-build and rez-release.
|
def setup_parser_common(parser):
"""Parser setup common to both rez-build and rez-release."""
from rez.build_process_ import get_build_process_types
from rez.build_system import get_valid_build_systems
process_types = get_build_process_types()
parser.add_argument(
"--process", type=str, choices=process_types, default="local",
help="the build process to use (default: %(default)s).")
# add build system choices valid for this package
package = get_current_developer_package()
clss = get_valid_build_systems(os.getcwd(), package=package)
if clss:
if len(clss) == 1:
cls_ = clss[0]
title = "%s build system arguments" % cls_.name()
group = parser.add_argument_group(title)
cls_.bind_cli(parser, group)
types = [x.name() for x in clss]
else:
types = None
parser.add_argument(
"-b", "--build-system", dest="buildsys", choices=types,
help="the build system to use. If not specified, it is detected. Set "
"'build_system' or 'build_command' to specify the build system in the "
"package itself.")
parser.add_argument(
"--variants", nargs='+', type=int, metavar="INDEX",
help="select variants to build (zero-indexed).")
parser.add_argument(
"--ba", "--build-args", dest="build_args", metavar="ARGS",
help="arguments to pass to the build system. Alternatively, list these "
"after a '--'.")
parser.add_argument(
"--cba", "--child-build-args", dest="child_build_args", metavar="ARGS",
help="arguments to pass to the child build system, if any. "
"Alternatively, list these after a second '--'.")
|
Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print scoped_format("hello {foo.name}", foo=Foo())
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`.
|
def scoped_format(txt, **objects):
"""Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print scoped_format("hello {foo.name}", foo=Foo())
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`.
"""
pretty = objects.pop("pretty", RecursiveAttribute.format_pretty)
expand = objects.pop("expand", RecursiveAttribute.format_expand)
attr = RecursiveAttribute(objects, read_only=True)
formatter = scoped_formatter(**objects)
return formatter.format(txt, pretty=pretty, expand=expand)
|
Get an equivalent dict representation.
|
def to_dict(self):
"""Get an equivalent dict representation."""
d = {}
for k, v in self.__dict__["data"].iteritems():
if isinstance(v, RecursiveAttribute):
d[k] = v.to_dict()
else:
d[k] = v
return d
|
Get the value of a setting.
If `type` is not provided, the key must be for a known setting,
present in `self.default_settings`. Conversely if `type` IS provided,
the key must be for an unknown setting.
|
def value(self, key, type_=None):
"""Get the value of a setting.
If `type` is not provided, the key must be for a known setting,
present in `self.default_settings`. Conversely if `type` IS provided,
the key must be for an unknown setting.
"""
if type_ is None:
default = self._default_value(key)
val = self._value(key, default)
if type(val) == type(default):
return val
else:
return self._convert_value(val, type(default))
else:
val = self._value(key, None)
if val is None:
return None
return self._convert_value(val, type_)
|
Get a list of strings.
|
def get_string_list(self, key):
"""Get a list of strings."""
strings = []
size = self.beginReadArray(key)
for i in range(size):
self.setArrayIndex(i)
entry = str(self._value("entry"))
strings.append(entry)
self.endArray()
return strings
|
Prepend a fixed-length string list with a new string.
The oldest string will be removed from the list. If the string is
already in the list, it is shuffled to the top. Use this to implement
things like a 'most recent files' entry.
|
def prepend_string_list(self, key, value, max_length_key):
"""Prepend a fixed-length string list with a new string.
The oldest string will be removed from the list. If the string is
already in the list, it is shuffled to the top. Use this to implement
things like a 'most recent files' entry.
"""
max_len = self.get(max_length_key)
strings = self.get_string_list(key)
strings = [value] + [x for x in strings if x != value]
strings = strings[:max_len]
self.beginWriteArray(key)
for i in range(len(strings)):
self.setArrayIndex(i)
self.setValue("entry", strings[i])
self.endArray()
|
Insert item into the queue, with the given priority.
|
def insert(self, item, priority):
"""
Insert item into the queue, with the given priority.
"""
heappush(self.heap, HeapItem(item, priority))
|
Accessibility matrix (transitive closure).
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Accessibility information for each node.
|
def accessibility(graph):
"""
Accessibility matrix (transitive closure).
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Accessibility information for each node.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
accessibility = {} # Accessibility matrix
# For each node i, mark each node j if that exists a path from i to j.
for each in graph:
access = {}
# Perform DFS to explore all reachable nodes
_dfs(graph, access, 1, each)
accessibility[each] = list(access.keys())
setrecursionlimit(recursionlimit)
return accessibility
|
Mutual-accessibility matrix (strongly connected components).
@type graph: graph, digraph
@param graph: Graph.
@rtype: dictionary
@return: Mutual-accessibility information for each node.
|
def mutual_accessibility(graph):
"""
Mutual-accessibility matrix (strongly connected components).
@type graph: graph, digraph
@param graph: Graph.
@rtype: dictionary
@return: Mutual-accessibility information for each node.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
mutual_access = {}
stack = []
low = {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in graph.neighbors(node):
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = stack[stack_pos:]
del stack[stack_pos:]
component.sort()
for each in component:
mutual_access[each] = component
for item in component:
low[item] = len(graph)
for node in graph:
visit(node)
setrecursionlimit(recursionlimit)
return mutual_access
|
Connected components.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Pairing that associates each node to its connected component.
|
def connected_components(graph):
"""
Connected components.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Pairing that associates each node to its connected component.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {}
count = 1
# For 'each' node not found to belong to a connected component, find its connected
# component.
for each in graph:
if (each not in visited):
_dfs(graph, visited, count, each)
count = count + 1
setrecursionlimit(recursionlimit)
return visited
|
Depth-first search subfunction adapted for accessibility algorithms.
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@type visited: dictionary
@param visited: List of nodes (visited nodes are marked non-zero).
@type count: number
@param count: Counter of connected components.
@type node: node
@param node: Node to be explored by DFS.
|
def _dfs(graph, visited, count, node):
"""
Depth-first search subfunction adapted for accessibility algorithms.
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@type visited: dictionary
@param visited: List of nodes (visited nodes are marked non-zero).
@type count: number
@param count: Counter of connected components.
@type node: node
@param node: Node to be explored by DFS.
"""
visited[node] = count
# Explore recursively the connected component
for each in graph[node]:
if (each not in visited):
_dfs(graph, visited, count, each)
|
Return the cut-edges of the given graph.
A cut edge, or bridge, is an edge of a graph whose removal increases the number of connected
components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-edges.
|
def cut_edges(graph):
"""
Return the cut-edges of the given graph.
A cut edge, or bridge, is an edge of a graph whose removal increases the number of connected
components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-edges.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
# Dispatch if we have a hypergraph
if 'hypergraph' == graph.__class__.__name__:
return _cut_hyperedges(graph)
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
spanning_tree = {}
reply = []
pre[None] = 0
for each in graph:
if (each not in pre):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, reply, each)
setrecursionlimit(recursionlimit)
return reply
|
Return the cut-hyperedges of the given hypergraph.
@type hypergraph: hypergraph
@param hypergraph: Hypergraph
@rtype: list
@return: List of cut-nodes.
|
def _cut_hyperedges(hypergraph):
"""
Return the cut-hyperedges of the given hypergraph.
@type hypergraph: hypergraph
@param hypergraph: Hypergraph
@rtype: list
@return: List of cut-nodes.
"""
edges_ = cut_nodes(hypergraph.graph)
edges = []
for each in edges_:
if (each[1] == 'h'):
edges.append(each[0])
return edges
|
Return the cut-nodes of the given graph.
A cut node, or articulation point, is a node of a graph whose removal increases the number of
connected components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-nodes.
|
def cut_nodes(graph):
"""
Return the cut-nodes of the given graph.
A cut node, or articulation point, is a node of a graph whose removal increases the number of
connected components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-nodes.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
# Dispatch if we have a hypergraph
if 'hypergraph' == graph.__class__.__name__:
return _cut_hypernodes(graph)
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
reply = {}
spanning_tree = {}
pre[None] = 0
# Create spanning trees, calculate pre[], low[]
for each in graph:
if (each not in pre):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, [], each)
# Find cuts
for each in graph:
# If node is not a root
if (spanning_tree[each] is not None):
for other in graph[each]:
# If there is no back-edge from descendent to a ancestral of each
if (low[other] >= pre[each] and spanning_tree[other] == each):
reply[each] = 1
# If node is a root
else:
children = 0
for other in graph:
if (spanning_tree[other] == each):
children = children + 1
# root is cut-vertex iff it has two or more children
if (children >= 2):
reply[each] = 1
setrecursionlimit(recursionlimit)
return list(reply.keys())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.