repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
nerdvegas/rez | src/rez/solver.py | _PackageVariantSlice.intersect | def intersect(self, range_):
self.solver.intersection_broad_tests_count += 1
"""Remove variants whose version fall outside of the given range."""
if range_.is_any():
return self
if self.solver.optimised:
if range_ in self.been_intersected_with:
return self
if self.pr:
self.pr.passive("intersecting %s wrt range '%s'...", self, range_)
self.solver.intersection_tests_count += 1
with self.solver.timed(self.solver.intersection_time):
# this is faster than iter_intersecting :(
entries = [x for x in self.entries if x.version in range_]
if not entries:
return None
elif len(entries) < len(self.entries):
copy_ = self._copy(entries)
copy_.been_intersected_with.add(range_)
return copy_
else:
self.been_intersected_with.add(range_)
return self | python | def intersect(self, range_):
self.solver.intersection_broad_tests_count += 1
"""Remove variants whose version fall outside of the given range."""
if range_.is_any():
return self
if self.solver.optimised:
if range_ in self.been_intersected_with:
return self
if self.pr:
self.pr.passive("intersecting %s wrt range '%s'...", self, range_)
self.solver.intersection_tests_count += 1
with self.solver.timed(self.solver.intersection_time):
# this is faster than iter_intersecting :(
entries = [x for x in self.entries if x.version in range_]
if not entries:
return None
elif len(entries) < len(self.entries):
copy_ = self._copy(entries)
copy_.been_intersected_with.add(range_)
return copy_
else:
self.been_intersected_with.add(range_)
return self | [
"def",
"intersect",
"(",
"self",
",",
"range_",
")",
":",
"self",
".",
"solver",
".",
"intersection_broad_tests_count",
"+=",
"1",
"if",
"range_",
".",
"is_any",
"(",
")",
":",
"return",
"self",
"if",
"self",
".",
"solver",
".",
"optimised",
":",
"if",
... | Remove variants whose version fall outside of the given range. | [
"Remove",
"variants",
"whose",
"version",
"fall",
"outside",
"of",
"the",
"given",
"range",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L594-L622 | train | 227,300 |
nerdvegas/rez | src/rez/solver.py | _PackageVariantSlice.reduce_by | def reduce_by(self, package_request):
"""Remove variants whos dependencies conflict with the given package
request.
Returns:
(VariantSlice, [Reduction]) tuple, where slice may be None if all
variants were reduced.
"""
if self.pr:
reqstr = _short_req_str(package_request)
self.pr.passive("reducing %s wrt %s...", self, reqstr)
if self.solver.optimised:
if package_request in self.been_reduced_by:
return (self, [])
if (package_request.range is None) or \
(package_request.name not in self.fam_requires):
return (self, [])
with self.solver.timed(self.solver.reduction_time):
return self._reduce_by(package_request) | python | def reduce_by(self, package_request):
"""Remove variants whos dependencies conflict with the given package
request.
Returns:
(VariantSlice, [Reduction]) tuple, where slice may be None if all
variants were reduced.
"""
if self.pr:
reqstr = _short_req_str(package_request)
self.pr.passive("reducing %s wrt %s...", self, reqstr)
if self.solver.optimised:
if package_request in self.been_reduced_by:
return (self, [])
if (package_request.range is None) or \
(package_request.name not in self.fam_requires):
return (self, [])
with self.solver.timed(self.solver.reduction_time):
return self._reduce_by(package_request) | [
"def",
"reduce_by",
"(",
"self",
",",
"package_request",
")",
":",
"if",
"self",
".",
"pr",
":",
"reqstr",
"=",
"_short_req_str",
"(",
"package_request",
")",
"self",
".",
"pr",
".",
"passive",
"(",
"\"reducing %s wrt %s...\"",
",",
"self",
",",
"reqstr",
... | Remove variants whos dependencies conflict with the given package
request.
Returns:
(VariantSlice, [Reduction]) tuple, where slice may be None if all
variants were reduced. | [
"Remove",
"variants",
"whos",
"dependencies",
"conflict",
"with",
"the",
"given",
"package",
"request",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L624-L645 | train | 227,301 |
nerdvegas/rez | src/rez/solver.py | _PackageVariantSlice.split | def split(self):
"""Split the slice.
Returns:
(`_PackageVariantSlice`, `_PackageVariantSlice`) tuple, where the
first is the preferred slice.
"""
# We sort here in the split in order to sort as late as possible.
# Because splits usually happen after intersections/reductions, this
# means there can be less entries to sort.
#
self.sort_versions()
def _split(i_entry, n_variants, common_fams=None):
# perform a split at a specific point
result = self.entries[i_entry].split(n_variants)
if result:
entry, next_entry = result
entries = self.entries[:i_entry] + [entry]
next_entries = [next_entry] + self.entries[i_entry + 1:]
else:
entries = self.entries[:i_entry + 1]
next_entries = self.entries[i_entry + 1:]
slice_ = self._copy(entries)
next_slice = self._copy(next_entries)
if self.pr:
if common_fams:
if len(common_fams) == 1:
reason_str = iter(common_fams).next()
else:
reason_str = ", ".join(common_fams)
else:
reason_str = "first variant"
self.pr("split (reason: %s) %s into %s and %s",
reason_str, self, slice_, next_slice)
return slice_, next_slice
# determine if we need to find first variant without common dependency
if len(self) > 2:
fams = self.first_variant.request_fams - self.extracted_fams
else:
fams = None
if not fams:
# trivial case, split on first variant
return _split(0, 1)
# find split point - first variant with no dependency shared with previous
prev = None
for i, entry in enumerate(self.entries):
# sort the variants. This is done here in order to do the sort as
# late as possible, simply to avoid the cost.
entry.sort()
for j, variant in enumerate(entry.variants):
fams = fams & variant.request_fams
if not fams:
return _split(*prev)
prev = (i, j + 1, fams)
# should never get here - it's only possible if there's a common
# dependency, but if there's a common dependency, split() should never
# have been called.
raise RezSystemError(
"Unexpected solver error: common family(s) still in slice being "
"split: slice: %s, family(s): %s" % (self, str(fams))) | python | def split(self):
"""Split the slice.
Returns:
(`_PackageVariantSlice`, `_PackageVariantSlice`) tuple, where the
first is the preferred slice.
"""
# We sort here in the split in order to sort as late as possible.
# Because splits usually happen after intersections/reductions, this
# means there can be less entries to sort.
#
self.sort_versions()
def _split(i_entry, n_variants, common_fams=None):
# perform a split at a specific point
result = self.entries[i_entry].split(n_variants)
if result:
entry, next_entry = result
entries = self.entries[:i_entry] + [entry]
next_entries = [next_entry] + self.entries[i_entry + 1:]
else:
entries = self.entries[:i_entry + 1]
next_entries = self.entries[i_entry + 1:]
slice_ = self._copy(entries)
next_slice = self._copy(next_entries)
if self.pr:
if common_fams:
if len(common_fams) == 1:
reason_str = iter(common_fams).next()
else:
reason_str = ", ".join(common_fams)
else:
reason_str = "first variant"
self.pr("split (reason: %s) %s into %s and %s",
reason_str, self, slice_, next_slice)
return slice_, next_slice
# determine if we need to find first variant without common dependency
if len(self) > 2:
fams = self.first_variant.request_fams - self.extracted_fams
else:
fams = None
if not fams:
# trivial case, split on first variant
return _split(0, 1)
# find split point - first variant with no dependency shared with previous
prev = None
for i, entry in enumerate(self.entries):
# sort the variants. This is done here in order to do the sort as
# late as possible, simply to avoid the cost.
entry.sort()
for j, variant in enumerate(entry.variants):
fams = fams & variant.request_fams
if not fams:
return _split(*prev)
prev = (i, j + 1, fams)
# should never get here - it's only possible if there's a common
# dependency, but if there's a common dependency, split() should never
# have been called.
raise RezSystemError(
"Unexpected solver error: common family(s) still in slice being "
"split: slice: %s, family(s): %s" % (self, str(fams))) | [
"def",
"split",
"(",
"self",
")",
":",
"# We sort here in the split in order to sort as late as possible.",
"# Because splits usually happen after intersections/reductions, this",
"# means there can be less entries to sort.",
"#",
"self",
".",
"sort_versions",
"(",
")",
"def",
"_spli... | Split the slice.
Returns:
(`_PackageVariantSlice`, `_PackageVariantSlice`) tuple, where the
first is the preferred slice. | [
"Split",
"the",
"slice",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L730-L801 | train | 227,302 |
nerdvegas/rez | src/rez/solver.py | _PackageVariantSlice.sort_versions | def sort_versions(self):
"""Sort entries by version.
The order is typically descending, but package order functions can
change this.
"""
if self.sorted:
return
for orderer in (self.solver.package_orderers or []):
entries = orderer.reorder(self.entries, key=lambda x: x.package)
if entries is not None:
self.entries = entries
self.sorted = True
if self.pr:
self.pr("sorted: %s packages: %s", self.package_name, repr(orderer))
return
# default ordering is version descending
self.entries = sorted(self.entries, key=lambda x: x.version, reverse=True)
self.sorted = True
if self.pr:
self.pr("sorted: %s packages: version descending", self.package_name) | python | def sort_versions(self):
"""Sort entries by version.
The order is typically descending, but package order functions can
change this.
"""
if self.sorted:
return
for orderer in (self.solver.package_orderers or []):
entries = orderer.reorder(self.entries, key=lambda x: x.package)
if entries is not None:
self.entries = entries
self.sorted = True
if self.pr:
self.pr("sorted: %s packages: %s", self.package_name, repr(orderer))
return
# default ordering is version descending
self.entries = sorted(self.entries, key=lambda x: x.version, reverse=True)
self.sorted = True
if self.pr:
self.pr("sorted: %s packages: version descending", self.package_name) | [
"def",
"sort_versions",
"(",
"self",
")",
":",
"if",
"self",
".",
"sorted",
":",
"return",
"for",
"orderer",
"in",
"(",
"self",
".",
"solver",
".",
"package_orderers",
"or",
"[",
"]",
")",
":",
"entries",
"=",
"orderer",
".",
"reorder",
"(",
"self",
... | Sort entries by version.
The order is typically descending, but package order functions can
change this. | [
"Sort",
"entries",
"by",
"version",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L803-L827 | train | 227,303 |
nerdvegas/rez | src/rez/solver.py | PackageVariantCache.get_variant_slice | def get_variant_slice(self, package_name, range_):
"""Get a list of variants from the cache.
Args:
package_name (str): Name of package.
range_ (`VersionRange`): Package version range.
Returns:
`_PackageVariantSlice` object.
"""
variant_list = self.variant_lists.get(package_name)
if variant_list is None:
variant_list = _PackageVariantList(package_name, self.solver)
self.variant_lists[package_name] = variant_list
entries = variant_list.get_intersection(range_)
if not entries:
return None
slice_ = _PackageVariantSlice(package_name,
entries=entries,
solver=self.solver)
return slice_ | python | def get_variant_slice(self, package_name, range_):
"""Get a list of variants from the cache.
Args:
package_name (str): Name of package.
range_ (`VersionRange`): Package version range.
Returns:
`_PackageVariantSlice` object.
"""
variant_list = self.variant_lists.get(package_name)
if variant_list is None:
variant_list = _PackageVariantList(package_name, self.solver)
self.variant_lists[package_name] = variant_list
entries = variant_list.get_intersection(range_)
if not entries:
return None
slice_ = _PackageVariantSlice(package_name,
entries=entries,
solver=self.solver)
return slice_ | [
"def",
"get_variant_slice",
"(",
"self",
",",
"package_name",
",",
"range_",
")",
":",
"variant_list",
"=",
"self",
".",
"variant_lists",
".",
"get",
"(",
"package_name",
")",
"if",
"variant_list",
"is",
"None",
":",
"variant_list",
"=",
"_PackageVariantList",
... | Get a list of variants from the cache.
Args:
package_name (str): Name of package.
range_ (`VersionRange`): Package version range.
Returns:
`_PackageVariantSlice` object. | [
"Get",
"a",
"list",
"of",
"variants",
"from",
"the",
"cache",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L902-L925 | train | 227,304 |
nerdvegas/rez | src/rez/solver.py | _PackageScope.intersect | def intersect(self, range_):
"""Intersect this scope with a package range.
Returns:
A new copy of this scope, with variants whos version fall outside
of the given range removed. If there were no removals, self is
returned. If all variants were removed, None is returned.
"""
new_slice = None
if self.package_request.conflict:
if self.package_request.range is None:
new_slice = self.solver._get_variant_slice(
self.package_name, range_)
else:
new_range = range_ - self.package_request.range
if new_range is not None:
new_slice = self.solver._get_variant_slice(
self.package_name, new_range)
else:
new_slice = self.variant_slice.intersect(range_)
# intersection reduced the scope to nothing
if new_slice is None:
if self.pr:
self.pr("%s intersected with range '%s' resulted in no packages",
self, range_)
return None
# intersection narrowed the scope
if new_slice is not self.variant_slice:
scope = self._copy(new_slice)
if self.pr:
self.pr("%s was intersected to %s by range '%s'",
self, scope, range_)
return scope
# intersection did not change the scope
return self | python | def intersect(self, range_):
"""Intersect this scope with a package range.
Returns:
A new copy of this scope, with variants whos version fall outside
of the given range removed. If there were no removals, self is
returned. If all variants were removed, None is returned.
"""
new_slice = None
if self.package_request.conflict:
if self.package_request.range is None:
new_slice = self.solver._get_variant_slice(
self.package_name, range_)
else:
new_range = range_ - self.package_request.range
if new_range is not None:
new_slice = self.solver._get_variant_slice(
self.package_name, new_range)
else:
new_slice = self.variant_slice.intersect(range_)
# intersection reduced the scope to nothing
if new_slice is None:
if self.pr:
self.pr("%s intersected with range '%s' resulted in no packages",
self, range_)
return None
# intersection narrowed the scope
if new_slice is not self.variant_slice:
scope = self._copy(new_slice)
if self.pr:
self.pr("%s was intersected to %s by range '%s'",
self, scope, range_)
return scope
# intersection did not change the scope
return self | [
"def",
"intersect",
"(",
"self",
",",
"range_",
")",
":",
"new_slice",
"=",
"None",
"if",
"self",
".",
"package_request",
".",
"conflict",
":",
"if",
"self",
".",
"package_request",
".",
"range",
"is",
"None",
":",
"new_slice",
"=",
"self",
".",
"solver"... | Intersect this scope with a package range.
Returns:
A new copy of this scope, with variants whos version fall outside
of the given range removed. If there were no removals, self is
returned. If all variants were removed, None is returned. | [
"Intersect",
"this",
"scope",
"with",
"a",
"package",
"range",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L956-L994 | train | 227,305 |
nerdvegas/rez | src/rez/solver.py | _PackageScope.reduce_by | def reduce_by(self, package_request):
"""Reduce this scope wrt a package request.
Returns:
A (_PackageScope, [Reduction]) tuple, where the scope is a new
scope copy with reductions applied, or self if there were no
reductions, or None if the scope was completely reduced.
"""
self.solver.reduction_broad_tests_count += 1
if self.package_request.conflict:
# conflict scopes don't reduce. Instead, other scopes will be
# reduced against a conflict scope.
return (self, [])
# perform the reduction
new_slice, reductions = self.variant_slice.reduce_by(package_request)
# there was total reduction
if new_slice is None:
self.solver.reductions_count += 1
if self.pr:
reqstr = _short_req_str(package_request)
self.pr("%s was reduced to nothing by %s", self, reqstr)
self.pr.br()
return (None, reductions)
# there was some reduction
if new_slice is not self.variant_slice:
self.solver.reductions_count += 1
scope = self._copy(new_slice)
if self.pr:
reqstr = _short_req_str(package_request)
self.pr("%s was reduced to %s by %s", self, scope, reqstr)
self.pr.br()
return (scope, reductions)
# there was no reduction
return (self, []) | python | def reduce_by(self, package_request):
"""Reduce this scope wrt a package request.
Returns:
A (_PackageScope, [Reduction]) tuple, where the scope is a new
scope copy with reductions applied, or self if there were no
reductions, or None if the scope was completely reduced.
"""
self.solver.reduction_broad_tests_count += 1
if self.package_request.conflict:
# conflict scopes don't reduce. Instead, other scopes will be
# reduced against a conflict scope.
return (self, [])
# perform the reduction
new_slice, reductions = self.variant_slice.reduce_by(package_request)
# there was total reduction
if new_slice is None:
self.solver.reductions_count += 1
if self.pr:
reqstr = _short_req_str(package_request)
self.pr("%s was reduced to nothing by %s", self, reqstr)
self.pr.br()
return (None, reductions)
# there was some reduction
if new_slice is not self.variant_slice:
self.solver.reductions_count += 1
scope = self._copy(new_slice)
if self.pr:
reqstr = _short_req_str(package_request)
self.pr("%s was reduced to %s by %s", self, scope, reqstr)
self.pr.br()
return (scope, reductions)
# there was no reduction
return (self, []) | [
"def",
"reduce_by",
"(",
"self",
",",
"package_request",
")",
":",
"self",
".",
"solver",
".",
"reduction_broad_tests_count",
"+=",
"1",
"if",
"self",
".",
"package_request",
".",
"conflict",
":",
"# conflict scopes don't reduce. Instead, other scopes will be",
"# reduc... | Reduce this scope wrt a package request.
Returns:
A (_PackageScope, [Reduction]) tuple, where the scope is a new
scope copy with reductions applied, or self if there were no
reductions, or None if the scope was completely reduced. | [
"Reduce",
"this",
"scope",
"wrt",
"a",
"package",
"request",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L996-L1037 | train | 227,306 |
nerdvegas/rez | src/rez/solver.py | _PackageScope.split | def split(self):
"""Split the scope.
Returns:
A (_PackageScope, _PackageScope) tuple, where the first scope is
guaranteed to have a common dependency. Or None, if splitting is
not applicable to this scope.
"""
if self.package_request.conflict or (len(self.variant_slice) == 1):
return None
else:
r = self.variant_slice.split()
if r is None:
return None
else:
slice, next_slice = r
scope = self._copy(slice)
next_scope = self._copy(next_slice)
return (scope, next_scope) | python | def split(self):
"""Split the scope.
Returns:
A (_PackageScope, _PackageScope) tuple, where the first scope is
guaranteed to have a common dependency. Or None, if splitting is
not applicable to this scope.
"""
if self.package_request.conflict or (len(self.variant_slice) == 1):
return None
else:
r = self.variant_slice.split()
if r is None:
return None
else:
slice, next_slice = r
scope = self._copy(slice)
next_scope = self._copy(next_slice)
return (scope, next_scope) | [
"def",
"split",
"(",
"self",
")",
":",
"if",
"self",
".",
"package_request",
".",
"conflict",
"or",
"(",
"len",
"(",
"self",
".",
"variant_slice",
")",
"==",
"1",
")",
":",
"return",
"None",
"else",
":",
"r",
"=",
"self",
".",
"variant_slice",
".",
... | Split the scope.
Returns:
A (_PackageScope, _PackageScope) tuple, where the first scope is
guaranteed to have a common dependency. Or None, if splitting is
not applicable to this scope. | [
"Split",
"the",
"scope",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1059-L1077 | train | 227,307 |
nerdvegas/rez | src/rez/solver.py | _ResolvePhase.finalise | def finalise(self):
"""Remove conflict requests, detect cyclic dependencies, and reorder
packages wrt dependency and then request order.
Returns:
A new copy of the phase with conflict requests removed and packages
correctly ordered; or, if cyclic dependencies were detected, a new
phase marked as cyclic.
"""
assert(self._is_solved())
g = self._get_minimal_graph()
scopes = dict((x.package_name, x) for x in self.scopes
if not x.package_request.conflict)
# check for cyclic dependencies
fam_cycle = find_cycle(g)
if fam_cycle:
cycle = []
for fam in fam_cycle:
scope = scopes[fam]
variant = scope._get_solved_variant()
stmt = VersionedObject.construct(fam, variant.version)
cycle.append(stmt)
phase = copy.copy(self)
phase.scopes = scopes.values()
phase.failure_reason = Cycle(cycle)
phase.status = SolverStatus.cyclic
return phase
# reorder wrt dependencies, keeping original request order where possible
fams = [x.name for x in self.solver.request_list]
ordered_fams = _get_dependency_order(g, fams)
scopes_ = []
for fam in ordered_fams:
scope = scopes[fam]
if not scope.package_request.conflict:
scopes_.append(scope)
phase = copy.copy(self)
phase.scopes = scopes_
return phase | python | def finalise(self):
"""Remove conflict requests, detect cyclic dependencies, and reorder
packages wrt dependency and then request order.
Returns:
A new copy of the phase with conflict requests removed and packages
correctly ordered; or, if cyclic dependencies were detected, a new
phase marked as cyclic.
"""
assert(self._is_solved())
g = self._get_minimal_graph()
scopes = dict((x.package_name, x) for x in self.scopes
if not x.package_request.conflict)
# check for cyclic dependencies
fam_cycle = find_cycle(g)
if fam_cycle:
cycle = []
for fam in fam_cycle:
scope = scopes[fam]
variant = scope._get_solved_variant()
stmt = VersionedObject.construct(fam, variant.version)
cycle.append(stmt)
phase = copy.copy(self)
phase.scopes = scopes.values()
phase.failure_reason = Cycle(cycle)
phase.status = SolverStatus.cyclic
return phase
# reorder wrt dependencies, keeping original request order where possible
fams = [x.name for x in self.solver.request_list]
ordered_fams = _get_dependency_order(g, fams)
scopes_ = []
for fam in ordered_fams:
scope = scopes[fam]
if not scope.package_request.conflict:
scopes_.append(scope)
phase = copy.copy(self)
phase.scopes = scopes_
return phase | [
"def",
"finalise",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"_is_solved",
"(",
")",
")",
"g",
"=",
"self",
".",
"_get_minimal_graph",
"(",
")",
"scopes",
"=",
"dict",
"(",
"(",
"x",
".",
"package_name",
",",
"x",
")",
"for",
"x",
"in",
... | Remove conflict requests, detect cyclic dependencies, and reorder
packages wrt dependency and then request order.
Returns:
A new copy of the phase with conflict requests removed and packages
correctly ordered; or, if cyclic dependencies were detected, a new
phase marked as cyclic. | [
"Remove",
"conflict",
"requests",
"detect",
"cyclic",
"dependencies",
"and",
"reorder",
"packages",
"wrt",
"dependency",
"and",
"then",
"request",
"order",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1368-L1410 | train | 227,308 |
nerdvegas/rez | src/rez/solver.py | _ResolvePhase.split | def split(self):
"""Split the phase.
When a phase is exhausted, it gets split into a pair of phases to be
further solved. The split happens like so:
1) Select the first unsolved package scope.
2) Find some common dependency in the first N variants of the scope.
3) Split the scope into two: [:N] and [N:].
4) Create two copies of the phase, containing each half of the split
scope.
The result of this split is that we have a new phase (the first phase),
which contains a package scope with a common dependency. This
dependency can now be intersected with the current resolve, thus
progressing it.
Returns:
A 2-tuple of _ResolvePhase objects, where the first phase is the
best contender for resolving.
"""
assert(self.status == SolverStatus.exhausted)
scopes = []
next_scopes = []
split_i = None
for i, scope in enumerate(self.scopes):
if split_i is None:
r = scope.split()
if r is not None:
scope_, next_scope = r
scopes.append(scope_)
next_scopes.append(next_scope)
split_i = i
continue
scopes.append(scope)
next_scopes.append(scope)
assert split_i is not None
phase = copy.copy(self)
phase.scopes = scopes
phase.status = SolverStatus.pending
phase.changed_scopes_i = set([split_i])
# because a scope was narrowed by a split, other scopes need to be
# reduced against it
#for i in range(len(phase.scopes)):
# if i != split_i:
# phase.pending_reducts.add((i, split_i))
next_phase = copy.copy(phase)
next_phase.scopes = next_scopes
return (phase, next_phase) | python | def split(self):
"""Split the phase.
When a phase is exhausted, it gets split into a pair of phases to be
further solved. The split happens like so:
1) Select the first unsolved package scope.
2) Find some common dependency in the first N variants of the scope.
3) Split the scope into two: [:N] and [N:].
4) Create two copies of the phase, containing each half of the split
scope.
The result of this split is that we have a new phase (the first phase),
which contains a package scope with a common dependency. This
dependency can now be intersected with the current resolve, thus
progressing it.
Returns:
A 2-tuple of _ResolvePhase objects, where the first phase is the
best contender for resolving.
"""
assert(self.status == SolverStatus.exhausted)
scopes = []
next_scopes = []
split_i = None
for i, scope in enumerate(self.scopes):
if split_i is None:
r = scope.split()
if r is not None:
scope_, next_scope = r
scopes.append(scope_)
next_scopes.append(next_scope)
split_i = i
continue
scopes.append(scope)
next_scopes.append(scope)
assert split_i is not None
phase = copy.copy(self)
phase.scopes = scopes
phase.status = SolverStatus.pending
phase.changed_scopes_i = set([split_i])
# because a scope was narrowed by a split, other scopes need to be
# reduced against it
#for i in range(len(phase.scopes)):
# if i != split_i:
# phase.pending_reducts.add((i, split_i))
next_phase = copy.copy(phase)
next_phase.scopes = next_scopes
return (phase, next_phase) | [
"def",
"split",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"status",
"==",
"SolverStatus",
".",
"exhausted",
")",
"scopes",
"=",
"[",
"]",
"next_scopes",
"=",
"[",
"]",
"split_i",
"=",
"None",
"for",
"i",
",",
"scope",
"in",
"enumerate",
"(",... | Split the phase.
When a phase is exhausted, it gets split into a pair of phases to be
further solved. The split happens like so:
1) Select the first unsolved package scope.
2) Find some common dependency in the first N variants of the scope.
3) Split the scope into two: [:N] and [N:].
4) Create two copies of the phase, containing each half of the split
scope.
The result of this split is that we have a new phase (the first phase),
which contains a package scope with a common dependency. This
dependency can now be intersected with the current resolve, thus
progressing it.
Returns:
A 2-tuple of _ResolvePhase objects, where the first phase is the
best contender for resolving. | [
"Split",
"the",
"phase",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1412-L1466 | train | 227,309 |
nerdvegas/rez | src/rez/solver.py | Solver.status | def status(self):
"""Return the current status of the solve.
Returns:
SolverStatus: Enum representation of the state of the solver.
"""
if self.request_list.conflict:
return SolverStatus.failed
if self.callback_return == SolverCallbackReturn.fail:
# the solve has failed because a callback has nominated the most
# recent failure as the reason.
return SolverStatus.failed
st = self.phase_stack[-1].status
if st == SolverStatus.cyclic:
return SolverStatus.failed
elif len(self.phase_stack) > 1:
if st == SolverStatus.solved:
return SolverStatus.solved
else:
return SolverStatus.unsolved
elif st in (SolverStatus.pending, SolverStatus.exhausted):
return SolverStatus.unsolved
else:
return st | python | def status(self):
"""Return the current status of the solve.
Returns:
SolverStatus: Enum representation of the state of the solver.
"""
if self.request_list.conflict:
return SolverStatus.failed
if self.callback_return == SolverCallbackReturn.fail:
# the solve has failed because a callback has nominated the most
# recent failure as the reason.
return SolverStatus.failed
st = self.phase_stack[-1].status
if st == SolverStatus.cyclic:
return SolverStatus.failed
elif len(self.phase_stack) > 1:
if st == SolverStatus.solved:
return SolverStatus.solved
else:
return SolverStatus.unsolved
elif st in (SolverStatus.pending, SolverStatus.exhausted):
return SolverStatus.unsolved
else:
return st | [
"def",
"status",
"(",
"self",
")",
":",
"if",
"self",
".",
"request_list",
".",
"conflict",
":",
"return",
"SolverStatus",
".",
"failed",
"if",
"self",
".",
"callback_return",
"==",
"SolverCallbackReturn",
".",
"fail",
":",
"# the solve has failed because a callba... | Return the current status of the solve.
Returns:
SolverStatus: Enum representation of the state of the solver. | [
"Return",
"the",
"current",
"status",
"of",
"the",
"solve",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1881-L1906 | train | 227,310 |
nerdvegas/rez | src/rez/solver.py | Solver.num_fails | def num_fails(self):
"""Return the number of failed solve steps that have been executed.
Note that num_solves is inclusive of failures."""
n = len(self.failed_phase_list)
if self.phase_stack[-1].status in (SolverStatus.failed, SolverStatus.cyclic):
n += 1
return n | python | def num_fails(self):
"""Return the number of failed solve steps that have been executed.
Note that num_solves is inclusive of failures."""
n = len(self.failed_phase_list)
if self.phase_stack[-1].status in (SolverStatus.failed, SolverStatus.cyclic):
n += 1
return n | [
"def",
"num_fails",
"(",
"self",
")",
":",
"n",
"=",
"len",
"(",
"self",
".",
"failed_phase_list",
")",
"if",
"self",
".",
"phase_stack",
"[",
"-",
"1",
"]",
".",
"status",
"in",
"(",
"SolverStatus",
".",
"failed",
",",
"SolverStatus",
".",
"cyclic",
... | Return the number of failed solve steps that have been executed.
Note that num_solves is inclusive of failures. | [
"Return",
"the",
"number",
"of",
"failed",
"solve",
"steps",
"that",
"have",
"been",
"executed",
".",
"Note",
"that",
"num_solves",
"is",
"inclusive",
"of",
"failures",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1914-L1920 | train | 227,311 |
nerdvegas/rez | src/rez/solver.py | Solver.resolved_packages | def resolved_packages(self):
"""Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful.
"""
if (self.status != SolverStatus.solved):
return None
final_phase = self.phase_stack[-1]
return final_phase._get_solved_variants() | python | def resolved_packages(self):
"""Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful.
"""
if (self.status != SolverStatus.solved):
return None
final_phase = self.phase_stack[-1]
return final_phase._get_solved_variants() | [
"def",
"resolved_packages",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"status",
"!=",
"SolverStatus",
".",
"solved",
")",
":",
"return",
"None",
"final_phase",
"=",
"self",
".",
"phase_stack",
"[",
"-",
"1",
"]",
"return",
"final_phase",
".",
"_get_... | Return a list of PackageVariant objects, or None if the resolve did
not complete or was unsuccessful. | [
"Return",
"a",
"list",
"of",
"PackageVariant",
"objects",
"or",
"None",
"if",
"the",
"resolve",
"did",
"not",
"complete",
"or",
"was",
"unsuccessful",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1928-L1936 | train | 227,312 |
nerdvegas/rez | src/rez/solver.py | Solver.reset | def reset(self):
"""Reset the solver, removing any current solve."""
if not self.request_list.conflict:
phase = _ResolvePhase(self.request_list.requirements, solver=self)
self.pr("resetting...")
self._init()
self._push_phase(phase) | python | def reset(self):
"""Reset the solver, removing any current solve."""
if not self.request_list.conflict:
phase = _ResolvePhase(self.request_list.requirements, solver=self)
self.pr("resetting...")
self._init()
self._push_phase(phase) | [
"def",
"reset",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"request_list",
".",
"conflict",
":",
"phase",
"=",
"_ResolvePhase",
"(",
"self",
".",
"request_list",
".",
"requirements",
",",
"solver",
"=",
"self",
")",
"self",
".",
"pr",
"(",
"\"res... | Reset the solver, removing any current solve. | [
"Reset",
"the",
"solver",
"removing",
"any",
"current",
"solve",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1938-L1944 | train | 227,313 |
nerdvegas/rez | src/rez/solver.py | Solver.solve | def solve(self):
"""Attempt to solve the request.
"""
if self.solve_begun:
raise ResolveError("cannot run solve() on a solve that has "
"already been started")
t1 = time.time()
pt1 = package_repo_stats.package_load_time
# iteratively solve phases
while self.status == SolverStatus.unsolved:
self.solve_step()
if self.status == SolverStatus.unsolved and not self._do_callback():
break
self.load_time = package_repo_stats.package_load_time - pt1
self.solve_time = time.time() - t1
# print stats
if self.pr.verbosity > 2:
from pprint import pformat
self.pr.subheader("SOLVE STATS:")
self.pr(pformat(self.solve_stats))
elif self.print_stats:
from pprint import pformat
data = {"solve_stats": self.solve_stats}
print >> (self.buf or sys.stdout), pformat(data) | python | def solve(self):
"""Attempt to solve the request.
"""
if self.solve_begun:
raise ResolveError("cannot run solve() on a solve that has "
"already been started")
t1 = time.time()
pt1 = package_repo_stats.package_load_time
# iteratively solve phases
while self.status == SolverStatus.unsolved:
self.solve_step()
if self.status == SolverStatus.unsolved and not self._do_callback():
break
self.load_time = package_repo_stats.package_load_time - pt1
self.solve_time = time.time() - t1
# print stats
if self.pr.verbosity > 2:
from pprint import pformat
self.pr.subheader("SOLVE STATS:")
self.pr(pformat(self.solve_stats))
elif self.print_stats:
from pprint import pformat
data = {"solve_stats": self.solve_stats}
print >> (self.buf or sys.stdout), pformat(data) | [
"def",
"solve",
"(",
"self",
")",
":",
"if",
"self",
".",
"solve_begun",
":",
"raise",
"ResolveError",
"(",
"\"cannot run solve() on a solve that has \"",
"\"already been started\"",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"pt1",
"=",
"package_repo_stats",
... | Attempt to solve the request. | [
"Attempt",
"to",
"solve",
"the",
"request",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1946-L1974 | train | 227,314 |
nerdvegas/rez | src/rez/solver.py | Solver.solve_step | def solve_step(self):
"""Perform a single solve step.
"""
self.solve_begun = True
if self.status != SolverStatus.unsolved:
return
if self.pr:
self.pr.header("SOLVE #%d (%d fails so far)...",
self.solve_count + 1, self.num_fails)
phase = self._pop_phase()
if phase.status == SolverStatus.failed: # a previously failed phase
self.pr("discarded failed phase, fetching previous unsolved phase...")
self.failed_phase_list.append(phase)
phase = self._pop_phase()
if phase.status == SolverStatus.exhausted:
self.pr.subheader("SPLITTING:")
phase, next_phase = phase.split()
self._push_phase(next_phase)
if self.pr:
self.pr("new phase: %s", phase)
new_phase = phase.solve()
self.solve_count += 1
if new_phase.status == SolverStatus.failed:
self.pr.subheader("FAILED:")
self._push_phase(new_phase)
if self.pr and len(self.phase_stack) == 1:
self.pr.header("FAIL: there is no solution")
elif new_phase.status == SolverStatus.solved:
# solved, but there may be cyclic dependencies
self.pr.subheader("SOLVED:")
final_phase = new_phase.finalise()
self._push_phase(final_phase)
if self.pr:
if final_phase.status == SolverStatus.cyclic:
self.pr.header("FAIL: a cycle was detected")
else:
self.pr.header("SUCCESS")
else:
self.pr.subheader("EXHAUSTED:")
assert(new_phase.status == SolverStatus.exhausted)
self._push_phase(new_phase) | python | def solve_step(self):
"""Perform a single solve step.
"""
self.solve_begun = True
if self.status != SolverStatus.unsolved:
return
if self.pr:
self.pr.header("SOLVE #%d (%d fails so far)...",
self.solve_count + 1, self.num_fails)
phase = self._pop_phase()
if phase.status == SolverStatus.failed: # a previously failed phase
self.pr("discarded failed phase, fetching previous unsolved phase...")
self.failed_phase_list.append(phase)
phase = self._pop_phase()
if phase.status == SolverStatus.exhausted:
self.pr.subheader("SPLITTING:")
phase, next_phase = phase.split()
self._push_phase(next_phase)
if self.pr:
self.pr("new phase: %s", phase)
new_phase = phase.solve()
self.solve_count += 1
if new_phase.status == SolverStatus.failed:
self.pr.subheader("FAILED:")
self._push_phase(new_phase)
if self.pr and len(self.phase_stack) == 1:
self.pr.header("FAIL: there is no solution")
elif new_phase.status == SolverStatus.solved:
# solved, but there may be cyclic dependencies
self.pr.subheader("SOLVED:")
final_phase = new_phase.finalise()
self._push_phase(final_phase)
if self.pr:
if final_phase.status == SolverStatus.cyclic:
self.pr.header("FAIL: a cycle was detected")
else:
self.pr.header("SUCCESS")
else:
self.pr.subheader("EXHAUSTED:")
assert(new_phase.status == SolverStatus.exhausted)
self._push_phase(new_phase) | [
"def",
"solve_step",
"(",
"self",
")",
":",
"self",
".",
"solve_begun",
"=",
"True",
"if",
"self",
".",
"status",
"!=",
"SolverStatus",
".",
"unsolved",
":",
"return",
"if",
"self",
".",
"pr",
":",
"self",
".",
"pr",
".",
"header",
"(",
"\"SOLVE #%d (%... | Perform a single solve step. | [
"Perform",
"a",
"single",
"solve",
"step",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2013-L2062 | train | 227,315 |
nerdvegas/rez | src/rez/solver.py | Solver.failure_reason | def failure_reason(self, failure_index=None):
"""Get the reason for a failure.
Args:
failure_index: Index of the fail to return the graph for (can be
negative). If None, the most appropriate failure is chosen
according to these rules:
- If the fail is cyclic, the most recent fail (the one containing
the cycle) is used;
- If a callback has caused a failure, the most recent fail is used;
- Otherwise, the first fail is used.
Returns:
A `FailureReason` subclass instance describing the failure.
"""
phase, _ = self._get_failed_phase(failure_index)
return phase.failure_reason | python | def failure_reason(self, failure_index=None):
"""Get the reason for a failure.
Args:
failure_index: Index of the fail to return the graph for (can be
negative). If None, the most appropriate failure is chosen
according to these rules:
- If the fail is cyclic, the most recent fail (the one containing
the cycle) is used;
- If a callback has caused a failure, the most recent fail is used;
- Otherwise, the first fail is used.
Returns:
A `FailureReason` subclass instance describing the failure.
"""
phase, _ = self._get_failed_phase(failure_index)
return phase.failure_reason | [
"def",
"failure_reason",
"(",
"self",
",",
"failure_index",
"=",
"None",
")",
":",
"phase",
",",
"_",
"=",
"self",
".",
"_get_failed_phase",
"(",
"failure_index",
")",
"return",
"phase",
".",
"failure_reason"
] | Get the reason for a failure.
Args:
failure_index: Index of the fail to return the graph for (can be
negative). If None, the most appropriate failure is chosen
according to these rules:
- If the fail is cyclic, the most recent fail (the one containing
the cycle) is used;
- If a callback has caused a failure, the most recent fail is used;
- Otherwise, the first fail is used.
Returns:
A `FailureReason` subclass instance describing the failure. | [
"Get",
"the",
"reason",
"for",
"a",
"failure",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2064-L2080 | train | 227,316 |
nerdvegas/rez | src/rez/solver.py | Solver.failure_packages | def failure_packages(self, failure_index=None):
"""Get packages involved in a failure.
Args:
failure_index: See `failure_reason`.
Returns:
A list of Requirement objects.
"""
phase, _ = self._get_failed_phase(failure_index)
fr = phase.failure_reason
return fr.involved_requirements() if fr else None | python | def failure_packages(self, failure_index=None):
"""Get packages involved in a failure.
Args:
failure_index: See `failure_reason`.
Returns:
A list of Requirement objects.
"""
phase, _ = self._get_failed_phase(failure_index)
fr = phase.failure_reason
return fr.involved_requirements() if fr else None | [
"def",
"failure_packages",
"(",
"self",
",",
"failure_index",
"=",
"None",
")",
":",
"phase",
",",
"_",
"=",
"self",
".",
"_get_failed_phase",
"(",
"failure_index",
")",
"fr",
"=",
"phase",
".",
"failure_reason",
"return",
"fr",
".",
"involved_requirements",
... | Get packages involved in a failure.
Args:
failure_index: See `failure_reason`.
Returns:
A list of Requirement objects. | [
"Get",
"packages",
"involved",
"in",
"a",
"failure",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2092-L2103 | train | 227,317 |
nerdvegas/rez | src/rez/solver.py | Solver.get_graph | def get_graph(self):
"""Returns the most recent solve graph.
This gives a graph showing the latest state of the solve. The specific
graph returned depends on the solve status. When status is:
unsolved: latest unsolved graph is returned;
solved: final solved graph is returned;
failed: most appropriate failure graph is returned (see `failure_reason`);
cyclic: last failure is returned (contains cycle).
Returns:
A pygraph.digraph object.
"""
st = self.status
if st in (SolverStatus.solved, SolverStatus.unsolved):
phase = self._latest_nonfailed_phase()
return phase.get_graph()
else:
return self.get_fail_graph() | python | def get_graph(self):
"""Returns the most recent solve graph.
This gives a graph showing the latest state of the solve. The specific
graph returned depends on the solve status. When status is:
unsolved: latest unsolved graph is returned;
solved: final solved graph is returned;
failed: most appropriate failure graph is returned (see `failure_reason`);
cyclic: last failure is returned (contains cycle).
Returns:
A pygraph.digraph object.
"""
st = self.status
if st in (SolverStatus.solved, SolverStatus.unsolved):
phase = self._latest_nonfailed_phase()
return phase.get_graph()
else:
return self.get_fail_graph() | [
"def",
"get_graph",
"(",
"self",
")",
":",
"st",
"=",
"self",
".",
"status",
"if",
"st",
"in",
"(",
"SolverStatus",
".",
"solved",
",",
"SolverStatus",
".",
"unsolved",
")",
":",
"phase",
"=",
"self",
".",
"_latest_nonfailed_phase",
"(",
")",
"return",
... | Returns the most recent solve graph.
This gives a graph showing the latest state of the solve. The specific
graph returned depends on the solve status. When status is:
unsolved: latest unsolved graph is returned;
solved: final solved graph is returned;
failed: most appropriate failure graph is returned (see `failure_reason`);
cyclic: last failure is returned (contains cycle).
Returns:
A pygraph.digraph object. | [
"Returns",
"the",
"most",
"recent",
"solve",
"graph",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2105-L2123 | train | 227,318 |
nerdvegas/rez | src/rez/solver.py | Solver.get_fail_graph | def get_fail_graph(self, failure_index=None):
"""Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object.
"""
phase, _ = self._get_failed_phase(failure_index)
return phase.get_graph() | python | def get_fail_graph(self, failure_index=None):
"""Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object.
"""
phase, _ = self._get_failed_phase(failure_index)
return phase.get_graph() | [
"def",
"get_fail_graph",
"(",
"self",
",",
"failure_index",
"=",
"None",
")",
":",
"phase",
",",
"_",
"=",
"self",
".",
"_get_failed_phase",
"(",
"failure_index",
")",
"return",
"phase",
".",
"get_graph",
"(",
")"
] | Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object. | [
"Returns",
"a",
"graph",
"showing",
"a",
"solve",
"failure",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2125-L2135 | train | 227,319 |
nerdvegas/rez | src/rez/solver.py | Solver.dump | def dump(self):
"""Print a formatted summary of the current solve state."""
from rez.utils.formatting import columnise
rows = []
for i, phase in enumerate(self.phase_stack):
rows.append((self._depth_label(i), phase.status, str(phase)))
print "status: %s (%s)" % (self.status.name, self.status.description)
print "initial request: %s" % str(self.request_list)
print
print "solve stack:"
print '\n'.join(columnise(rows))
if self.failed_phase_list:
rows = []
for i, phase in enumerate(self.failed_phase_list):
rows.append(("#%d" % i, phase.status, str(phase)))
print
print "previous failures:"
print '\n'.join(columnise(rows)) | python | def dump(self):
"""Print a formatted summary of the current solve state."""
from rez.utils.formatting import columnise
rows = []
for i, phase in enumerate(self.phase_stack):
rows.append((self._depth_label(i), phase.status, str(phase)))
print "status: %s (%s)" % (self.status.name, self.status.description)
print "initial request: %s" % str(self.request_list)
print
print "solve stack:"
print '\n'.join(columnise(rows))
if self.failed_phase_list:
rows = []
for i, phase in enumerate(self.failed_phase_list):
rows.append(("#%d" % i, phase.status, str(phase)))
print
print "previous failures:"
print '\n'.join(columnise(rows)) | [
"def",
"dump",
"(",
"self",
")",
":",
"from",
"rez",
".",
"utils",
".",
"formatting",
"import",
"columnise",
"rows",
"=",
"[",
"]",
"for",
"i",
",",
"phase",
"in",
"enumerate",
"(",
"self",
".",
"phase_stack",
")",
":",
"rows",
".",
"append",
"(",
... | Print a formatted summary of the current solve state. | [
"Print",
"a",
"formatted",
"summary",
"of",
"the",
"current",
"solve",
"state",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2137-L2157 | train | 227,320 |
nerdvegas/rez | src/rez/utils/filesystem.py | make_path_writable | def make_path_writable(path):
"""Temporarily make `path` writable, if possible.
Does nothing if:
- config setting 'make_package_temporarily_writable' is False;
- this can't be done (eg we don't own `path`).
Args:
path (str): Path to make temporarily writable
"""
from rez.config import config
try:
orig_mode = os.stat(path).st_mode
new_mode = orig_mode
if config.make_package_temporarily_writable and \
not os.access(path, os.W_OK):
new_mode = orig_mode | stat.S_IWUSR
# make writable
if new_mode != orig_mode:
os.chmod(path, new_mode)
except OSError:
# ignore access errors here, and just do nothing. It will be more
# intuitive for the calling code to fail on access instead.
#
orig_mode = None
new_mode = None
# yield, then reset mode back to original
try:
yield
finally:
if new_mode != orig_mode:
os.chmod(path, orig_mode) | python | def make_path_writable(path):
"""Temporarily make `path` writable, if possible.
Does nothing if:
- config setting 'make_package_temporarily_writable' is False;
- this can't be done (eg we don't own `path`).
Args:
path (str): Path to make temporarily writable
"""
from rez.config import config
try:
orig_mode = os.stat(path).st_mode
new_mode = orig_mode
if config.make_package_temporarily_writable and \
not os.access(path, os.W_OK):
new_mode = orig_mode | stat.S_IWUSR
# make writable
if new_mode != orig_mode:
os.chmod(path, new_mode)
except OSError:
# ignore access errors here, and just do nothing. It will be more
# intuitive for the calling code to fail on access instead.
#
orig_mode = None
new_mode = None
# yield, then reset mode back to original
try:
yield
finally:
if new_mode != orig_mode:
os.chmod(path, orig_mode) | [
"def",
"make_path_writable",
"(",
"path",
")",
":",
"from",
"rez",
".",
"config",
"import",
"config",
"try",
":",
"orig_mode",
"=",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
"new_mode",
"=",
"orig_mode",
"if",
"config",
".",
"make_package_tempor... | Temporarily make `path` writable, if possible.
Does nothing if:
- config setting 'make_package_temporarily_writable' is False;
- this can't be done (eg we don't own `path`).
Args:
path (str): Path to make temporarily writable | [
"Temporarily",
"make",
"path",
"writable",
"if",
"possible",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L76-L112 | train | 227,321 |
nerdvegas/rez | src/rez/utils/filesystem.py | get_existing_path | def get_existing_path(path, topmost_path=None):
"""Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found.
"""
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path | python | def get_existing_path(path, topmost_path=None):
"""Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found.
"""
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path | [
"def",
"get_existing_path",
"(",
"path",
",",
"topmost_path",
"=",
"None",
")",
":",
"prev_path",
"=",
"None",
"if",
"topmost_path",
":",
"topmost_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"topmost_path",
")",
"while",
"True",
":",
"if",
"os",
... | Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found. | [
"Get",
"the",
"longest",
"parent",
"path",
"in",
"path",
"that",
"exists",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L126-L154 | train | 227,322 |
nerdvegas/rez | src/rez/utils/filesystem.py | safe_makedirs | def safe_makedirs(path):
"""Safe makedirs.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise | python | def safe_makedirs(path):
"""Safe makedirs.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise | [
"def",
"safe_makedirs",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"("... | Safe makedirs.
Works in a multithreaded scenario. | [
"Safe",
"makedirs",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L157-L167 | train | 227,323 |
nerdvegas/rez | src/rez/utils/filesystem.py | safe_remove | def safe_remove(path):
"""Safely remove the given file or directory.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
if os.path.exists(path):
raise | python | def safe_remove(path):
"""Safely remove the given file or directory.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
if os.path.exists(path):
raise | [
"def",
"safe_remove",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"try",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(... | Safely remove the given file or directory.
Works in a multithreaded scenario. | [
"Safely",
"remove",
"the",
"given",
"file",
"or",
"directory",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L170-L185 | train | 227,324 |
nerdvegas/rez | src/rez/utils/filesystem.py | replacing_symlink | def replacing_symlink(source, link_name):
"""Create symlink that overwrites any existing target.
"""
with make_tmp_name(link_name) as tmp_link_name:
os.symlink(source, tmp_link_name)
replace_file_or_dir(link_name, tmp_link_name) | python | def replacing_symlink(source, link_name):
"""Create symlink that overwrites any existing target.
"""
with make_tmp_name(link_name) as tmp_link_name:
os.symlink(source, tmp_link_name)
replace_file_or_dir(link_name, tmp_link_name) | [
"def",
"replacing_symlink",
"(",
"source",
",",
"link_name",
")",
":",
"with",
"make_tmp_name",
"(",
"link_name",
")",
"as",
"tmp_link_name",
":",
"os",
".",
"symlink",
"(",
"source",
",",
"tmp_link_name",
")",
"replace_file_or_dir",
"(",
"link_name",
",",
"tm... | Create symlink that overwrites any existing target. | [
"Create",
"symlink",
"that",
"overwrites",
"any",
"existing",
"target",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L188-L193 | train | 227,325 |
nerdvegas/rez | src/rez/utils/filesystem.py | replacing_copy | def replacing_copy(src, dest, follow_symlinks=False):
"""Perform copy that overwrites any existing target.
Will copy/copytree `src` to `dest`, and will remove `dest` if it exists,
regardless of what it is.
If `follow_symlinks` is False, symlinks are preserved, otherwise their
contents are copied.
Note that this behavior is different to `shutil.copy`, which copies src
into dest if dest is an existing dir.
"""
with make_tmp_name(dest) as tmp_dest:
if os.path.islink(src) and not follow_symlinks:
# special case - copy just a symlink
src_ = os.readlink(src)
os.symlink(src_, tmp_dest)
elif os.path.isdir(src):
# copy a dir
shutil.copytree(src, tmp_dest, symlinks=(not follow_symlinks))
else:
# copy a file
shutil.copy2(src, tmp_dest)
replace_file_or_dir(dest, tmp_dest) | python | def replacing_copy(src, dest, follow_symlinks=False):
"""Perform copy that overwrites any existing target.
Will copy/copytree `src` to `dest`, and will remove `dest` if it exists,
regardless of what it is.
If `follow_symlinks` is False, symlinks are preserved, otherwise their
contents are copied.
Note that this behavior is different to `shutil.copy`, which copies src
into dest if dest is an existing dir.
"""
with make_tmp_name(dest) as tmp_dest:
if os.path.islink(src) and not follow_symlinks:
# special case - copy just a symlink
src_ = os.readlink(src)
os.symlink(src_, tmp_dest)
elif os.path.isdir(src):
# copy a dir
shutil.copytree(src, tmp_dest, symlinks=(not follow_symlinks))
else:
# copy a file
shutil.copy2(src, tmp_dest)
replace_file_or_dir(dest, tmp_dest) | [
"def",
"replacing_copy",
"(",
"src",
",",
"dest",
",",
"follow_symlinks",
"=",
"False",
")",
":",
"with",
"make_tmp_name",
"(",
"dest",
")",
"as",
"tmp_dest",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"src",
")",
"and",
"not",
"follow_symlinks",
... | Perform copy that overwrites any existing target.
Will copy/copytree `src` to `dest`, and will remove `dest` if it exists,
regardless of what it is.
If `follow_symlinks` is False, symlinks are preserved, otherwise their
contents are copied.
Note that this behavior is different to `shutil.copy`, which copies src
into dest if dest is an existing dir. | [
"Perform",
"copy",
"that",
"overwrites",
"any",
"existing",
"target",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L196-L220 | train | 227,326 |
nerdvegas/rez | src/rez/utils/filesystem.py | replace_file_or_dir | def replace_file_or_dir(dest, source):
"""Replace `dest` with `source`.
Acts like an `os.rename` if `dest` does not exist. Otherwise, `dest` is
deleted and `src` is renamed to `dest`.
"""
from rez.vendor.atomicwrites import replace_atomic
if not os.path.exists(dest):
try:
os.rename(source, dest)
return
except:
if not os.path.exists(dest):
raise
try:
replace_atomic(source, dest)
return
except:
pass
with make_tmp_name(dest) as tmp_dest:
os.rename(dest, tmp_dest)
os.rename(source, dest) | python | def replace_file_or_dir(dest, source):
"""Replace `dest` with `source`.
Acts like an `os.rename` if `dest` does not exist. Otherwise, `dest` is
deleted and `src` is renamed to `dest`.
"""
from rez.vendor.atomicwrites import replace_atomic
if not os.path.exists(dest):
try:
os.rename(source, dest)
return
except:
if not os.path.exists(dest):
raise
try:
replace_atomic(source, dest)
return
except:
pass
with make_tmp_name(dest) as tmp_dest:
os.rename(dest, tmp_dest)
os.rename(source, dest) | [
"def",
"replace_file_or_dir",
"(",
"dest",
",",
"source",
")",
":",
"from",
"rez",
".",
"vendor",
".",
"atomicwrites",
"import",
"replace_atomic",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"try",
":",
"os",
".",
"rename",
"(... | Replace `dest` with `source`.
Acts like an `os.rename` if `dest` does not exist. Otherwise, `dest` is
deleted and `src` is renamed to `dest`. | [
"Replace",
"dest",
"with",
"source",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L223-L247 | train | 227,327 |
nerdvegas/rez | src/rez/utils/filesystem.py | make_tmp_name | def make_tmp_name(name):
"""Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted.
"""
path, base = os.path.split(name)
tmp_base = ".tmp-%s-%s" % (base, uuid4().hex)
tmp_name = os.path.join(path, tmp_base)
try:
yield tmp_name
finally:
safe_remove(tmp_name) | python | def make_tmp_name(name):
"""Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted.
"""
path, base = os.path.split(name)
tmp_base = ".tmp-%s-%s" % (base, uuid4().hex)
tmp_name = os.path.join(path, tmp_base)
try:
yield tmp_name
finally:
safe_remove(tmp_name) | [
"def",
"make_tmp_name",
"(",
"name",
")",
":",
"path",
",",
"base",
"=",
"os",
".",
"path",
".",
"split",
"(",
"name",
")",
"tmp_base",
"=",
"\".tmp-%s-%s\"",
"%",
"(",
"base",
",",
"uuid4",
"(",
")",
".",
"hex",
")",
"tmp_name",
"=",
"os",
".",
... | Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted. | [
"Generates",
"a",
"tmp",
"name",
"for",
"a",
"file",
"or",
"dir",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L267-L280 | train | 227,328 |
nerdvegas/rez | src/rez/utils/filesystem.py | is_subdirectory | def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
relative = os.path.relpath(path_a, path_b)
return (not relative.startswith(os.pardir + os.sep)) | python | def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
relative = os.path.relpath(path_a, path_b)
return (not relative.startswith(os.pardir + os.sep)) | [
"def",
"is_subdirectory",
"(",
"path_a",
",",
"path_b",
")",
":",
"path_a",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"path_a",
")",
"path_b",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"path_b",
")",
"relative",
"=",
"os",
".",
"path",
".",
... | Returns True if `path_a` is a subdirectory of `path_b`. | [
"Returns",
"True",
"if",
"path_a",
"is",
"a",
"subdirectory",
"of",
"path_b",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L283-L288 | train | 227,329 |
nerdvegas/rez | src/rez/utils/filesystem.py | find_matching_symlink | def find_matching_symlink(path, source):
"""Find a symlink under `path` that points at `source`.
If source is relative, it is considered relative to `path`.
Returns:
str: Name of symlink found, or None.
"""
def to_abs(target):
if os.path.isabs(target):
return target
else:
return os.path.normpath(os.path.join(path, target))
abs_source = to_abs(source)
for name in os.listdir(path):
linkpath = os.path.join(path, name)
if os.path.islink:
source_ = os.readlink(linkpath)
if to_abs(source_) == abs_source:
return name
return None | python | def find_matching_symlink(path, source):
"""Find a symlink under `path` that points at `source`.
If source is relative, it is considered relative to `path`.
Returns:
str: Name of symlink found, or None.
"""
def to_abs(target):
if os.path.isabs(target):
return target
else:
return os.path.normpath(os.path.join(path, target))
abs_source = to_abs(source)
for name in os.listdir(path):
linkpath = os.path.join(path, name)
if os.path.islink:
source_ = os.readlink(linkpath)
if to_abs(source_) == abs_source:
return name
return None | [
"def",
"find_matching_symlink",
"(",
"path",
",",
"source",
")",
":",
"def",
"to_abs",
"(",
"target",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"target",
")",
":",
"return",
"target",
"else",
":",
"return",
"os",
".",
"path",
".",
"normpa... | Find a symlink under `path` that points at `source`.
If source is relative, it is considered relative to `path`.
Returns:
str: Name of symlink found, or None. | [
"Find",
"a",
"symlink",
"under",
"path",
"that",
"points",
"at",
"source",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L291-L314 | train | 227,330 |
nerdvegas/rez | src/rez/utils/filesystem.py | copy_or_replace | def copy_or_replace(src, dst):
'''try to copy with mode, and if it fails, try replacing
'''
try:
shutil.copy(src, dst)
except (OSError, IOError), e:
# It's possible that the file existed, but was owned by someone
# else - in that situation, shutil.copy might then fail when it
# tries to copy perms.
# However, it's possible that we have write perms to the dir -
# in which case, we can just delete and replace
import errno
if e.errno == errno.EPERM:
import tempfile
# try copying into a temporary location beside the old
# file - if we have perms to do that, we should have perms
# to then delete the old file, and move the new one into
# place
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst_dir, dst_name = os.path.split(dst)
dst_temp = tempfile.mktemp(prefix=dst_name + '.', dir=dst_dir)
shutil.copy(src, dst_temp)
if not os.path.isfile(dst_temp):
raise RuntimeError(
"shutil.copy completed successfully, but path"
" '%s' still did not exist" % dst_temp)
os.remove(dst)
shutil.move(dst_temp, dst) | python | def copy_or_replace(src, dst):
'''try to copy with mode, and if it fails, try replacing
'''
try:
shutil.copy(src, dst)
except (OSError, IOError), e:
# It's possible that the file existed, but was owned by someone
# else - in that situation, shutil.copy might then fail when it
# tries to copy perms.
# However, it's possible that we have write perms to the dir -
# in which case, we can just delete and replace
import errno
if e.errno == errno.EPERM:
import tempfile
# try copying into a temporary location beside the old
# file - if we have perms to do that, we should have perms
# to then delete the old file, and move the new one into
# place
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst_dir, dst_name = os.path.split(dst)
dst_temp = tempfile.mktemp(prefix=dst_name + '.', dir=dst_dir)
shutil.copy(src, dst_temp)
if not os.path.isfile(dst_temp):
raise RuntimeError(
"shutil.copy completed successfully, but path"
" '%s' still did not exist" % dst_temp)
os.remove(dst)
shutil.move(dst_temp, dst) | [
"def",
"copy_or_replace",
"(",
"src",
",",
"dst",
")",
":",
"try",
":",
"shutil",
".",
"copy",
"(",
"src",
",",
"dst",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
",",
"e",
":",
"# It's possible that the file existed, but was owned by someone",
"# else... | try to copy with mode, and if it fails, try replacing | [
"try",
"to",
"copy",
"with",
"mode",
"and",
"if",
"it",
"fails",
"try",
"replacing"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L317-L347 | train | 227,331 |
nerdvegas/rez | src/rez/utils/filesystem.py | movetree | def movetree(src, dst):
"""Attempts a move, and falls back to a copy+delete if this fails
"""
try:
shutil.move(src, dst)
except:
copytree(src, dst, symlinks=True, hardlinks=True)
shutil.rmtree(src) | python | def movetree(src, dst):
"""Attempts a move, and falls back to a copy+delete if this fails
"""
try:
shutil.move(src, dst)
except:
copytree(src, dst, symlinks=True, hardlinks=True)
shutil.rmtree(src) | [
"def",
"movetree",
"(",
"src",
",",
"dst",
")",
":",
"try",
":",
"shutil",
".",
"move",
"(",
"src",
",",
"dst",
")",
"except",
":",
"copytree",
"(",
"src",
",",
"dst",
",",
"symlinks",
"=",
"True",
",",
"hardlinks",
"=",
"True",
")",
"shutil",
".... | Attempts a move, and falls back to a copy+delete if this fails | [
"Attempts",
"a",
"move",
"and",
"falls",
"back",
"to",
"a",
"copy",
"+",
"delete",
"if",
"this",
"fails"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L404-L411 | train | 227,332 |
nerdvegas/rez | src/rez/utils/filesystem.py | safe_chmod | def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode) | python | def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode) | [
"def",
"safe_chmod",
"(",
"path",
",",
"mode",
")",
":",
"if",
"stat",
".",
"S_IMODE",
"(",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
")",
"!=",
"mode",
":",
"os",
".",
"chmod",
"(",
"path",
",",
"mode",
")"
] | Set the permissions mode on path, but only if it differs from the current mode. | [
"Set",
"the",
"permissions",
"mode",
"on",
"path",
"but",
"only",
"if",
"it",
"differs",
"from",
"the",
"current",
"mode",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L414-L418 | train | 227,333 |
nerdvegas/rez | src/rez/utils/filesystem.py | encode_filesystem_name | def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, str):
input_str = unicode(input_str)
elif not isinstance(input_str, unicode):
raise TypeError("input_str must be a basestring")
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char)
elif char == u'_':
result.append('__')
elif char in uppercase:
result.append('_%s' % char.lower())
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0
HH = ''.join('%x' % ord(c) for c in utf8)
result.append('_%d%s' % (N, HH))
return ''.join(result) | python | def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, str):
input_str = unicode(input_str)
elif not isinstance(input_str, unicode):
raise TypeError("input_str must be a basestring")
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char)
elif char == u'_':
result.append('__')
elif char in uppercase:
result.append('_%s' % char.lower())
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0
HH = ''.join('%x' % ord(c) for c in utf8)
result.append('_%d%s' % (N, HH))
return ''.join(result) | [
"def",
"encode_filesystem_name",
"(",
"input_str",
")",
":",
"if",
"isinstance",
"(",
"input_str",
",",
"str",
")",
":",
"input_str",
"=",
"unicode",
"(",
"input_str",
")",
"elif",
"not",
"isinstance",
"(",
"input_str",
",",
"unicode",
")",
":",
"raise",
"... | Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt | [
"Encodes",
"an",
"arbitrary",
"unicode",
"string",
"to",
"a",
"generic",
"filesystem",
"-",
"compatible",
"non",
"-",
"unicode",
"filename",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L433-L499 | train | 227,334 |
nerdvegas/rez | src/rez/utils/filesystem.py | decode_filesystem_name | def decode_filesystem_name(filename):
"""Decodes a filename encoded using the rules given in encode_filesystem_name
to a unicode string.
"""
result = []
remain = filename
i = 0
while remain:
# use match, to ensure it matches from the start of the string...
match = _FILESYSTEM_TOKEN_RE.match(remain)
if not match:
raise ValueError("incorrectly encoded filesystem name %r"
" (bad index: %d - %r)" % (filename, i,
remain[:2]))
match_str = match.group(0)
match_len = len(match_str)
i += match_len
remain = remain[match_len:]
match_dict = match.groupdict()
if match_dict['as_is']:
result.append(unicode(match_str))
elif match_dict['underscore']:
result.append(u'_')
elif match_dict['uppercase']:
result.append(unicode(match_dict['uppercase'].upper()))
elif match_dict['N']:
N = int(match_dict['N'])
if N == 0:
N = 1
# hex-encoded, so need to grab 2*N chars
bytes_len = 2 * N
i += bytes_len
bytes = remain[:bytes_len]
remain = remain[bytes_len:]
# need this check to ensure that we don't end up eval'ing
# something nasty...
if not _HEX_RE.match(bytes):
raise ValueError("Bad utf8 encoding in name %r"
" (bad index: %d - %r)" % (filename, i, bytes))
bytes_repr = ''.join('\\x%s' % bytes[i:i + 2]
for i in xrange(0, bytes_len, 2))
bytes_repr = "'%s'" % bytes_repr
result.append(eval(bytes_repr).decode('utf8'))
else:
raise ValueError("Unrecognized match type in filesystem name %r"
" (bad index: %d - %r)" % (filename, i, remain[:2]))
return u''.join(result) | python | def decode_filesystem_name(filename):
"""Decodes a filename encoded using the rules given in encode_filesystem_name
to a unicode string.
"""
result = []
remain = filename
i = 0
while remain:
# use match, to ensure it matches from the start of the string...
match = _FILESYSTEM_TOKEN_RE.match(remain)
if not match:
raise ValueError("incorrectly encoded filesystem name %r"
" (bad index: %d - %r)" % (filename, i,
remain[:2]))
match_str = match.group(0)
match_len = len(match_str)
i += match_len
remain = remain[match_len:]
match_dict = match.groupdict()
if match_dict['as_is']:
result.append(unicode(match_str))
elif match_dict['underscore']:
result.append(u'_')
elif match_dict['uppercase']:
result.append(unicode(match_dict['uppercase'].upper()))
elif match_dict['N']:
N = int(match_dict['N'])
if N == 0:
N = 1
# hex-encoded, so need to grab 2*N chars
bytes_len = 2 * N
i += bytes_len
bytes = remain[:bytes_len]
remain = remain[bytes_len:]
# need this check to ensure that we don't end up eval'ing
# something nasty...
if not _HEX_RE.match(bytes):
raise ValueError("Bad utf8 encoding in name %r"
" (bad index: %d - %r)" % (filename, i, bytes))
bytes_repr = ''.join('\\x%s' % bytes[i:i + 2]
for i in xrange(0, bytes_len, 2))
bytes_repr = "'%s'" % bytes_repr
result.append(eval(bytes_repr).decode('utf8'))
else:
raise ValueError("Unrecognized match type in filesystem name %r"
" (bad index: %d - %r)" % (filename, i, remain[:2]))
return u''.join(result) | [
"def",
"decode_filesystem_name",
"(",
"filename",
")",
":",
"result",
"=",
"[",
"]",
"remain",
"=",
"filename",
"i",
"=",
"0",
"while",
"remain",
":",
"# use match, to ensure it matches from the start of the string...",
"match",
"=",
"_FILESYSTEM_TOKEN_RE",
".",
"matc... | Decodes a filename encoded using the rules given in encode_filesystem_name
to a unicode string. | [
"Decodes",
"a",
"filename",
"encoded",
"using",
"the",
"rules",
"given",
"in",
"encode_filesystem_name",
"to",
"a",
"unicode",
"string",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L506-L555 | train | 227,335 |
nerdvegas/rez | src/rez/utils/filesystem.py | walk_up_dirs | def walk_up_dirs(path):
"""Yields absolute directories starting with the given path, and iterating
up through all it's parents, until it reaches a root directory"""
prev_path = None
current_path = os.path.abspath(path)
while current_path != prev_path:
yield current_path
prev_path = current_path
current_path = os.path.dirname(prev_path) | python | def walk_up_dirs(path):
"""Yields absolute directories starting with the given path, and iterating
up through all it's parents, until it reaches a root directory"""
prev_path = None
current_path = os.path.abspath(path)
while current_path != prev_path:
yield current_path
prev_path = current_path
current_path = os.path.dirname(prev_path) | [
"def",
"walk_up_dirs",
"(",
"path",
")",
":",
"prev_path",
"=",
"None",
"current_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"while",
"current_path",
"!=",
"prev_path",
":",
"yield",
"current_path",
"prev_path",
"=",
"current_path",
"cur... | Yields absolute directories starting with the given path, and iterating
up through all it's parents, until it reaches a root directory | [
"Yields",
"absolute",
"directories",
"starting",
"with",
"the",
"given",
"path",
"and",
"iterating",
"up",
"through",
"all",
"it",
"s",
"parents",
"until",
"it",
"reaches",
"a",
"root",
"directory"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L575-L583 | train | 227,336 |
nerdvegas/rez | src/rez/wrapper.py | Wrapper.run | def run(self, *args):
"""Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run.
"""
if self.prefix_char is None:
prefix_char = config.suite_alias_prefix_char
else:
prefix_char = self.prefix_char
if prefix_char == '':
# empty prefix char means we don't support the '+' args
return self._run_no_args(args)
else:
return self._run(prefix_char, args) | python | def run(self, *args):
"""Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run.
"""
if self.prefix_char is None:
prefix_char = config.suite_alias_prefix_char
else:
prefix_char = self.prefix_char
if prefix_char == '':
# empty prefix char means we don't support the '+' args
return self._run_no_args(args)
else:
return self._run(prefix_char, args) | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"prefix_char",
"is",
"None",
":",
"prefix_char",
"=",
"config",
".",
"suite_alias_prefix_char",
"else",
":",
"prefix_char",
"=",
"self",
".",
"prefix_char",
"if",
"prefix_char",
"=="... | Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run. | [
"Invoke",
"the",
"wrapped",
"script",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/wrapper.py#L66-L81 | train | 227,337 |
nerdvegas/rez | src/rez/wrapper.py | Wrapper.print_about | def print_about(self):
"""Print an info message about the tool."""
filepath = os.path.join(self.suite_path, "bin", self.tool_name)
print "Tool: %s" % self.tool_name
print "Path: %s" % filepath
print "Suite: %s" % self.suite_path
msg = "%s (%r)" % (self.context.load_path, self.context_name)
print "Context: %s" % msg
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
else:
variant = iter(variants).next()
print "Package: %s" % variant.qualified_package_name
return 0 | python | def print_about(self):
"""Print an info message about the tool."""
filepath = os.path.join(self.suite_path, "bin", self.tool_name)
print "Tool: %s" % self.tool_name
print "Path: %s" % filepath
print "Suite: %s" % self.suite_path
msg = "%s (%r)" % (self.context.load_path, self.context_name)
print "Context: %s" % msg
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
else:
variant = iter(variants).next()
print "Package: %s" % variant.qualified_package_name
return 0 | [
"def",
"print_about",
"(",
"self",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"suite_path",
",",
"\"bin\"",
",",
"self",
".",
"tool_name",
")",
"print",
"\"Tool: %s\"",
"%",
"self",
".",
"tool_name",
"print",
"\"Path... | Print an info message about the tool. | [
"Print",
"an",
"info",
"message",
"about",
"the",
"tool",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/wrapper.py#L202-L219 | train | 227,338 |
nerdvegas/rez | src/rez/wrapper.py | Wrapper.print_package_versions | def print_package_versions(self):
"""Print a list of versions of the package this tool comes from, and
indicate which version this tool is from."""
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
return 1
else:
from rez.packages_ import iter_packages
variant = iter(variants).next()
it = iter_packages(name=variant.name)
rows = []
colors = []
for pkg in sorted(it, key=lambda x: x.version, reverse=True):
if pkg.version == variant.version:
name = "* %s" % pkg.qualified_name
col = heading
else:
name = " %s" % pkg.qualified_name
col = local if pkg.is_local else None
label = "(local)" if pkg.is_local else ""
rows.append((name, pkg.path, label))
colors.append(col)
_pr = Printer()
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
return 0 | python | def print_package_versions(self):
"""Print a list of versions of the package this tool comes from, and
indicate which version this tool is from."""
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
return 1
else:
from rez.packages_ import iter_packages
variant = iter(variants).next()
it = iter_packages(name=variant.name)
rows = []
colors = []
for pkg in sorted(it, key=lambda x: x.version, reverse=True):
if pkg.version == variant.version:
name = "* %s" % pkg.qualified_name
col = heading
else:
name = " %s" % pkg.qualified_name
col = local if pkg.is_local else None
label = "(local)" if pkg.is_local else ""
rows.append((name, pkg.path, label))
colors.append(col)
_pr = Printer()
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
return 0 | [
"def",
"print_package_versions",
"(",
"self",
")",
":",
"variants",
"=",
"self",
".",
"context",
".",
"get_tool_variants",
"(",
"self",
".",
"tool_name",
")",
"if",
"variants",
":",
"if",
"len",
"(",
"variants",
")",
">",
"1",
":",
"self",
".",
"_print_c... | Print a list of versions of the package this tool comes from, and
indicate which version this tool is from. | [
"Print",
"a",
"list",
"of",
"versions",
"of",
"the",
"package",
"this",
"tool",
"comes",
"from",
"and",
"indicate",
"which",
"version",
"this",
"tool",
"is",
"from",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/wrapper.py#L221-L251 | train | 227,339 |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/generators.py | generate_hypergraph | def generate_hypergraph(num_nodes, num_edges, r = 0):
"""
Create a random hyper graph.
@type num_nodes: number
@param num_nodes: Number of nodes.
@type num_edges: number
@param num_edges: Number of edges.
@type r: number
@param r: Uniform edges of size r.
"""
# Graph creation
random_graph = hypergraph()
# Nodes
nodes = list(map(str, list(range(num_nodes))))
random_graph.add_nodes(nodes)
# Base edges
edges = list(map(str, list(range(num_nodes, num_nodes+num_edges))))
random_graph.add_hyperedges(edges)
# Connect the edges
if 0 == r:
# Add each edge with 50/50 probability
for e in edges:
for n in nodes:
if choice([True, False]):
random_graph.link(n, e)
else:
# Add only uniform edges
for e in edges:
# First shuffle the nodes
shuffle(nodes)
# Then take the first r nodes
for i in range(r):
random_graph.link(nodes[i], e)
return random_graph | python | def generate_hypergraph(num_nodes, num_edges, r = 0):
"""
Create a random hyper graph.
@type num_nodes: number
@param num_nodes: Number of nodes.
@type num_edges: number
@param num_edges: Number of edges.
@type r: number
@param r: Uniform edges of size r.
"""
# Graph creation
random_graph = hypergraph()
# Nodes
nodes = list(map(str, list(range(num_nodes))))
random_graph.add_nodes(nodes)
# Base edges
edges = list(map(str, list(range(num_nodes, num_nodes+num_edges))))
random_graph.add_hyperedges(edges)
# Connect the edges
if 0 == r:
# Add each edge with 50/50 probability
for e in edges:
for n in nodes:
if choice([True, False]):
random_graph.link(n, e)
else:
# Add only uniform edges
for e in edges:
# First shuffle the nodes
shuffle(nodes)
# Then take the first r nodes
for i in range(r):
random_graph.link(nodes[i], e)
return random_graph | [
"def",
"generate_hypergraph",
"(",
"num_nodes",
",",
"num_edges",
",",
"r",
"=",
"0",
")",
":",
"# Graph creation",
"random_graph",
"=",
"hypergraph",
"(",
")",
"# Nodes",
"nodes",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"list",
"(",
"range",
"(",
"num... | Create a random hyper graph.
@type num_nodes: number
@param num_nodes: Number of nodes.
@type num_edges: number
@param num_edges: Number of edges.
@type r: number
@param r: Uniform edges of size r. | [
"Create",
"a",
"random",
"hyper",
"graph",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/generators.py#L90-L132 | train | 227,340 |
nerdvegas/rez | src/rez/bind/_utils.py | check_version | def check_version(version, range_=None):
"""Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object.
"""
if range_ and version not in range_:
raise RezBindError("found version %s is not within range %s"
% (str(version), str(range_))) | python | def check_version(version, range_=None):
"""Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object.
"""
if range_ and version not in range_:
raise RezBindError("found version %s is not within range %s"
% (str(version), str(range_))) | [
"def",
"check_version",
"(",
"version",
",",
"range_",
"=",
"None",
")",
":",
"if",
"range_",
"and",
"version",
"not",
"in",
"range_",
":",
"raise",
"RezBindError",
"(",
"\"found version %s is not within range %s\"",
"%",
"(",
"str",
"(",
"version",
")",
",",
... | Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object. | [
"Check",
"that",
"the",
"found",
"software",
"version",
"is",
"within",
"supplied",
"range",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/bind/_utils.py#L45-L54 | train | 227,341 |
nerdvegas/rez | src/rez/bind/_utils.py | extract_version | def extract_version(exepath, version_arg, word_index=-1, version_rank=3):
"""Run an executable and get the program version.
Args:
exepath: Filepath to executable.
version_arg: Arg to pass to program, eg "-V". Can also be a list.
word_index: Expect the Nth word of output to be the version.
version_rank: Cap the version to this many tokens.
Returns:
`Version` object.
"""
if isinstance(version_arg, basestring):
version_arg = [version_arg]
args = [exepath] + version_arg
stdout, stderr, returncode = _run_command(args)
if returncode:
raise RezBindError("failed to execute %s: %s\n(error code %d)"
% (exepath, stderr, returncode))
stdout = stdout.strip().split('\n')[0].strip()
log("extracting version from output: '%s'" % stdout)
try:
strver = stdout.split()[word_index]
toks = strver.replace('.', ' ').replace('-', ' ').split()
strver = '.'.join(toks[:version_rank])
version = Version(strver)
except Exception as e:
raise RezBindError("failed to parse version from output '%s': %s"
% (stdout, str(e)))
log("extracted version: '%s'" % str(version))
return version | python | def extract_version(exepath, version_arg, word_index=-1, version_rank=3):
"""Run an executable and get the program version.
Args:
exepath: Filepath to executable.
version_arg: Arg to pass to program, eg "-V". Can also be a list.
word_index: Expect the Nth word of output to be the version.
version_rank: Cap the version to this many tokens.
Returns:
`Version` object.
"""
if isinstance(version_arg, basestring):
version_arg = [version_arg]
args = [exepath] + version_arg
stdout, stderr, returncode = _run_command(args)
if returncode:
raise RezBindError("failed to execute %s: %s\n(error code %d)"
% (exepath, stderr, returncode))
stdout = stdout.strip().split('\n')[0].strip()
log("extracting version from output: '%s'" % stdout)
try:
strver = stdout.split()[word_index]
toks = strver.replace('.', ' ').replace('-', ' ').split()
strver = '.'.join(toks[:version_rank])
version = Version(strver)
except Exception as e:
raise RezBindError("failed to parse version from output '%s': %s"
% (stdout, str(e)))
log("extracted version: '%s'" % str(version))
return version | [
"def",
"extract_version",
"(",
"exepath",
",",
"version_arg",
",",
"word_index",
"=",
"-",
"1",
",",
"version_rank",
"=",
"3",
")",
":",
"if",
"isinstance",
"(",
"version_arg",
",",
"basestring",
")",
":",
"version_arg",
"=",
"[",
"version_arg",
"]",
"args... | Run an executable and get the program version.
Args:
exepath: Filepath to executable.
version_arg: Arg to pass to program, eg "-V". Can also be a list.
word_index: Expect the Nth word of output to be the version.
version_rank: Cap the version to this many tokens.
Returns:
`Version` object. | [
"Run",
"an",
"executable",
"and",
"get",
"the",
"program",
"version",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/bind/_utils.py#L80-L114 | train | 227,342 |
nerdvegas/rez | src/rez/vendor/version/requirement.py | VersionedObject.construct | def construct(cls, name, version=None):
"""Create a VersionedObject directly from an object name and version.
Args:
name: Object name string.
version: Version object.
"""
other = VersionedObject(None)
other.name_ = name
other.version_ = Version() if version is None else version
return other | python | def construct(cls, name, version=None):
"""Create a VersionedObject directly from an object name and version.
Args:
name: Object name string.
version: Version object.
"""
other = VersionedObject(None)
other.name_ = name
other.version_ = Version() if version is None else version
return other | [
"def",
"construct",
"(",
"cls",
",",
"name",
",",
"version",
"=",
"None",
")",
":",
"other",
"=",
"VersionedObject",
"(",
"None",
")",
"other",
".",
"name_",
"=",
"name",
"other",
".",
"version_",
"=",
"Version",
"(",
")",
"if",
"version",
"is",
"Non... | Create a VersionedObject directly from an object name and version.
Args:
name: Object name string.
version: Version object. | [
"Create",
"a",
"VersionedObject",
"directly",
"from",
"an",
"object",
"name",
"and",
"version",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/requirement.py#L37-L47 | train | 227,343 |
nerdvegas/rez | src/rez/vendor/version/requirement.py | Requirement.construct | def construct(cls, name, range=None):
"""Create a requirement directly from an object name and VersionRange.
Args:
name: Object name string.
range: VersionRange object. If None, an unversioned requirement is
created.
"""
other = Requirement(None)
other.name_ = name
other.range_ = VersionRange() if range is None else range
return other | python | def construct(cls, name, range=None):
"""Create a requirement directly from an object name and VersionRange.
Args:
name: Object name string.
range: VersionRange object. If None, an unversioned requirement is
created.
"""
other = Requirement(None)
other.name_ = name
other.range_ = VersionRange() if range is None else range
return other | [
"def",
"construct",
"(",
"cls",
",",
"name",
",",
"range",
"=",
"None",
")",
":",
"other",
"=",
"Requirement",
"(",
"None",
")",
"other",
".",
"name_",
"=",
"name",
"other",
".",
"range_",
"=",
"VersionRange",
"(",
")",
"if",
"range",
"is",
"None",
... | Create a requirement directly from an object name and VersionRange.
Args:
name: Object name string.
range: VersionRange object. If None, an unversioned requirement is
created. | [
"Create",
"a",
"requirement",
"directly",
"from",
"an",
"object",
"name",
"and",
"VersionRange",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/requirement.py#L152-L163 | train | 227,344 |
nerdvegas/rez | src/rez/vendor/version/requirement.py | Requirement.conflicts_with | def conflicts_with(self, other):
"""Returns True if this requirement conflicts with another `Requirement`
or `VersionedObject`."""
if isinstance(other, Requirement):
if (self.name_ != other.name_) or (self.range is None) \
or (other.range is None):
return False
elif self.conflict:
return False if other.conflict \
else self.range_.issuperset(other.range_)
elif other.conflict:
return other.range_.issuperset(self.range_)
else:
return not self.range_.intersects(other.range_)
else: # VersionedObject
if (self.name_ != other.name_) or (self.range is None):
return False
if self.conflict:
return (other.version_ in self.range_)
else:
return (other.version_ not in self.range_) | python | def conflicts_with(self, other):
"""Returns True if this requirement conflicts with another `Requirement`
or `VersionedObject`."""
if isinstance(other, Requirement):
if (self.name_ != other.name_) or (self.range is None) \
or (other.range is None):
return False
elif self.conflict:
return False if other.conflict \
else self.range_.issuperset(other.range_)
elif other.conflict:
return other.range_.issuperset(self.range_)
else:
return not self.range_.intersects(other.range_)
else: # VersionedObject
if (self.name_ != other.name_) or (self.range is None):
return False
if self.conflict:
return (other.version_ in self.range_)
else:
return (other.version_ not in self.range_) | [
"def",
"conflicts_with",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"Requirement",
")",
":",
"if",
"(",
"self",
".",
"name_",
"!=",
"other",
".",
"name_",
")",
"or",
"(",
"self",
".",
"range",
"is",
"None",
")",
"or... | Returns True if this requirement conflicts with another `Requirement`
or `VersionedObject`. | [
"Returns",
"True",
"if",
"this",
"requirement",
"conflicts",
"with",
"another",
"Requirement",
"or",
"VersionedObject",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/requirement.py#L196-L216 | train | 227,345 |
nerdvegas/rez | src/rez/vendor/version/requirement.py | Requirement.merged | def merged(self, other):
"""Returns the merged result of two requirements.
Two requirements can be in conflict and if so, this function returns
None. For example, requests for "foo-4" and "foo-6" are in conflict,
since both cannot be satisfied with a single version of foo.
Some example successful requirements merges are:
- "foo-3+" and "!foo-5+" == "foo-3+<5"
- "foo-1" and "foo-1.5" == "foo-1.5"
- "!foo-2" and "!foo-5" == "!foo-2|5"
"""
if self.name_ != other.name_:
return None # cannot merge across object names
def _r(r_):
r = Requirement(None)
r.name_ = r_.name_
r.negate_ = r_.negate_
r.conflict_ = r_.conflict_
r.sep_ = r_.sep_
return r
if self.range is None:
return other
elif other.range is None:
return self
elif self.conflict:
if other.conflict:
r = _r(self)
r.range_ = self.range_ | other.range_
r.negate_ = (self.negate_ and other.negate_
and not r.range_.is_any())
return r
else:
range_ = other.range - self.range
if range_ is None:
return None
else:
r = _r(other)
r.range_ = range_
return r
elif other.conflict:
range_ = self.range_ - other.range_
if range_ is None:
return None
else:
r = _r(self)
r.range_ = range_
return r
else:
range_ = self.range_ & other.range_
if range_ is None:
return None
else:
r = _r(self)
r.range_ = range_
return r | python | def merged(self, other):
"""Returns the merged result of two requirements.
Two requirements can be in conflict and if so, this function returns
None. For example, requests for "foo-4" and "foo-6" are in conflict,
since both cannot be satisfied with a single version of foo.
Some example successful requirements merges are:
- "foo-3+" and "!foo-5+" == "foo-3+<5"
- "foo-1" and "foo-1.5" == "foo-1.5"
- "!foo-2" and "!foo-5" == "!foo-2|5"
"""
if self.name_ != other.name_:
return None # cannot merge across object names
def _r(r_):
r = Requirement(None)
r.name_ = r_.name_
r.negate_ = r_.negate_
r.conflict_ = r_.conflict_
r.sep_ = r_.sep_
return r
if self.range is None:
return other
elif other.range is None:
return self
elif self.conflict:
if other.conflict:
r = _r(self)
r.range_ = self.range_ | other.range_
r.negate_ = (self.negate_ and other.negate_
and not r.range_.is_any())
return r
else:
range_ = other.range - self.range
if range_ is None:
return None
else:
r = _r(other)
r.range_ = range_
return r
elif other.conflict:
range_ = self.range_ - other.range_
if range_ is None:
return None
else:
r = _r(self)
r.range_ = range_
return r
else:
range_ = self.range_ & other.range_
if range_ is None:
return None
else:
r = _r(self)
r.range_ = range_
return r | [
"def",
"merged",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"name_",
"!=",
"other",
".",
"name_",
":",
"return",
"None",
"# cannot merge across object names",
"def",
"_r",
"(",
"r_",
")",
":",
"r",
"=",
"Requirement",
"(",
"None",
")",
"r"... | Returns the merged result of two requirements.
Two requirements can be in conflict and if so, this function returns
None. For example, requests for "foo-4" and "foo-6" are in conflict,
since both cannot be satisfied with a single version of foo.
Some example successful requirements merges are:
- "foo-3+" and "!foo-5+" == "foo-3+<5"
- "foo-1" and "foo-1.5" == "foo-1.5"
- "!foo-2" and "!foo-5" == "!foo-2|5" | [
"Returns",
"the",
"merged",
"result",
"of",
"two",
"requirements",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/requirement.py#L218-L275 | train | 227,346 |
nerdvegas/rez | src/rez/utils/graph_utils.py | read_graph_from_string | def read_graph_from_string(txt):
"""Read a graph from a string, either in dot format, or our own
compressed format.
Returns:
`pygraph.digraph`: Graph object.
"""
if not txt.startswith('{'):
return read_dot(txt) # standard dot format
def conv(value):
if isinstance(value, basestring):
return '"' + value + '"'
else:
return value
# our compacted format
doc = literal_eval(txt)
g = digraph()
for attrs, values in doc.get("nodes", []):
attrs = [(k, conv(v)) for k, v in attrs]
for value in values:
if isinstance(value, basestring):
node_name = value
attrs_ = attrs
else:
node_name, label = value
attrs_ = attrs + [("label", conv(label))]
g.add_node(node_name, attrs=attrs_)
for attrs, values in doc.get("edges", []):
attrs_ = [(k, conv(v)) for k, v in attrs]
for value in values:
if len(value) == 3:
edge = value[:2]
label = value[-1]
else:
edge = value
label = ''
g.add_edge(edge, label=label, attrs=attrs_)
return g | python | def read_graph_from_string(txt):
"""Read a graph from a string, either in dot format, or our own
compressed format.
Returns:
`pygraph.digraph`: Graph object.
"""
if not txt.startswith('{'):
return read_dot(txt) # standard dot format
def conv(value):
if isinstance(value, basestring):
return '"' + value + '"'
else:
return value
# our compacted format
doc = literal_eval(txt)
g = digraph()
for attrs, values in doc.get("nodes", []):
attrs = [(k, conv(v)) for k, v in attrs]
for value in values:
if isinstance(value, basestring):
node_name = value
attrs_ = attrs
else:
node_name, label = value
attrs_ = attrs + [("label", conv(label))]
g.add_node(node_name, attrs=attrs_)
for attrs, values in doc.get("edges", []):
attrs_ = [(k, conv(v)) for k, v in attrs]
for value in values:
if len(value) == 3:
edge = value[:2]
label = value[-1]
else:
edge = value
label = ''
g.add_edge(edge, label=label, attrs=attrs_)
return g | [
"def",
"read_graph_from_string",
"(",
"txt",
")",
":",
"if",
"not",
"txt",
".",
"startswith",
"(",
"'{'",
")",
":",
"return",
"read_dot",
"(",
"txt",
")",
"# standard dot format",
"def",
"conv",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
"... | Read a graph from a string, either in dot format, or our own
compressed format.
Returns:
`pygraph.digraph`: Graph object. | [
"Read",
"a",
"graph",
"from",
"a",
"string",
"either",
"in",
"dot",
"format",
"or",
"our",
"own",
"compressed",
"format",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L20-L66 | train | 227,347 |
nerdvegas/rez | src/rez/utils/graph_utils.py | write_compacted | def write_compacted(g):
"""Write a graph in our own compacted format.
Returns:
str.
"""
d_nodes = {}
d_edges = {}
def conv(value):
if isinstance(value, basestring):
return value.strip('"')
else:
return value
for node in g.nodes():
label = None
attrs = []
for k, v in sorted(g.node_attributes(node)):
v_ = conv(v)
if k == "label":
label = v_
else:
attrs.append((k, v_))
value = (node, label) if label else node
d_nodes.setdefault(tuple(attrs), []).append(value)
for edge in g.edges():
attrs = [(k, conv(v)) for k, v in sorted(g.edge_attributes(edge))]
label = str(g.edge_label(edge))
value = tuple(list(edge) + [label]) if label else edge
d_edges.setdefault(tuple(attrs), []).append(tuple(value))
doc = dict(nodes=d_nodes.items(), edges=d_edges.items())
contents = str(doc)
return contents | python | def write_compacted(g):
"""Write a graph in our own compacted format.
Returns:
str.
"""
d_nodes = {}
d_edges = {}
def conv(value):
if isinstance(value, basestring):
return value.strip('"')
else:
return value
for node in g.nodes():
label = None
attrs = []
for k, v in sorted(g.node_attributes(node)):
v_ = conv(v)
if k == "label":
label = v_
else:
attrs.append((k, v_))
value = (node, label) if label else node
d_nodes.setdefault(tuple(attrs), []).append(value)
for edge in g.edges():
attrs = [(k, conv(v)) for k, v in sorted(g.edge_attributes(edge))]
label = str(g.edge_label(edge))
value = tuple(list(edge) + [label]) if label else edge
d_edges.setdefault(tuple(attrs), []).append(tuple(value))
doc = dict(nodes=d_nodes.items(), edges=d_edges.items())
contents = str(doc)
return contents | [
"def",
"write_compacted",
"(",
"g",
")",
":",
"d_nodes",
"=",
"{",
"}",
"d_edges",
"=",
"{",
"}",
"def",
"conv",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"return",
"value",
".",
"strip",
"(",
"'\"'",
")... | Write a graph in our own compacted format.
Returns:
str. | [
"Write",
"a",
"graph",
"in",
"our",
"own",
"compacted",
"format",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L69-L106 | train | 227,348 |
nerdvegas/rez | src/rez/utils/graph_utils.py | write_dot | def write_dot(g):
"""Replacement for pygraph.readwrite.dot.write, which is dog slow.
Note:
This isn't a general replacement. It will work for the graphs that
Rez generates, but there are no guarantees beyond that.
Args:
g (`pygraph.digraph`): Input graph.
Returns:
str: Graph in dot format.
"""
lines = ["digraph g {"]
def attrs_txt(items):
if items:
txt = ", ".join(('%s="%s"' % (k, str(v).strip('"')))
for k, v in items)
return '[' + txt + ']'
else:
return ''
for node in g.nodes():
atxt = attrs_txt(g.node_attributes(node))
txt = "%s %s;" % (node, atxt)
lines.append(txt)
for e in g.edges():
edge_from, edge_to = e
attrs = g.edge_attributes(e)
label = str(g.edge_label(e))
if label:
attrs.append(("label", label))
atxt = attrs_txt(attrs)
txt = "%s -> %s %s;" % (edge_from, edge_to, atxt)
lines.append(txt)
lines.append("}")
return '\n'.join(lines) | python | def write_dot(g):
"""Replacement for pygraph.readwrite.dot.write, which is dog slow.
Note:
This isn't a general replacement. It will work for the graphs that
Rez generates, but there are no guarantees beyond that.
Args:
g (`pygraph.digraph`): Input graph.
Returns:
str: Graph in dot format.
"""
lines = ["digraph g {"]
def attrs_txt(items):
if items:
txt = ", ".join(('%s="%s"' % (k, str(v).strip('"')))
for k, v in items)
return '[' + txt + ']'
else:
return ''
for node in g.nodes():
atxt = attrs_txt(g.node_attributes(node))
txt = "%s %s;" % (node, atxt)
lines.append(txt)
for e in g.edges():
edge_from, edge_to = e
attrs = g.edge_attributes(e)
label = str(g.edge_label(e))
if label:
attrs.append(("label", label))
atxt = attrs_txt(attrs)
txt = "%s -> %s %s;" % (edge_from, edge_to, atxt)
lines.append(txt)
lines.append("}")
return '\n'.join(lines) | [
"def",
"write_dot",
"(",
"g",
")",
":",
"lines",
"=",
"[",
"\"digraph g {\"",
"]",
"def",
"attrs_txt",
"(",
"items",
")",
":",
"if",
"items",
":",
"txt",
"=",
"\", \"",
".",
"join",
"(",
"(",
"'%s=\"%s\"'",
"%",
"(",
"k",
",",
"str",
"(",
"v",
")... | Replacement for pygraph.readwrite.dot.write, which is dog slow.
Note:
This isn't a general replacement. It will work for the graphs that
Rez generates, but there are no guarantees beyond that.
Args:
g (`pygraph.digraph`): Input graph.
Returns:
str: Graph in dot format. | [
"Replacement",
"for",
"pygraph",
".",
"readwrite",
".",
"dot",
".",
"write",
"which",
"is",
"dog",
"slow",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L109-L150 | train | 227,349 |
nerdvegas/rez | src/rez/utils/graph_utils.py | prune_graph | def prune_graph(graph_str, package_name):
"""Prune a package graph so it only contains nodes accessible from the
given package.
Args:
graph_str (str): Dot-language graph string.
package_name (str): Name of package of interest.
Returns:
Pruned graph, as a string.
"""
# find nodes of interest
g = read_dot(graph_str)
nodes = set()
for node, attrs in g.node_attr.iteritems():
attr = [x for x in attrs if x[0] == "label"]
if attr:
label = attr[0][1]
try:
req_str = _request_from_label(label)
request = PackageRequest(req_str)
except PackageRequestError:
continue
if request.name == package_name:
nodes.add(node)
if not nodes:
raise ValueError("The package %r does not appear in the graph."
% package_name)
# find nodes upstream from these nodes
g_rev = g.reverse()
accessible_nodes = set()
access = accessibility(g_rev)
for node in nodes:
nodes_ = access.get(node, [])
accessible_nodes |= set(nodes_)
# remove inaccessible nodes
inaccessible_nodes = set(g.nodes()) - accessible_nodes
for node in inaccessible_nodes:
g.del_node(node)
return write_dot(g) | python | def prune_graph(graph_str, package_name):
"""Prune a package graph so it only contains nodes accessible from the
given package.
Args:
graph_str (str): Dot-language graph string.
package_name (str): Name of package of interest.
Returns:
Pruned graph, as a string.
"""
# find nodes of interest
g = read_dot(graph_str)
nodes = set()
for node, attrs in g.node_attr.iteritems():
attr = [x for x in attrs if x[0] == "label"]
if attr:
label = attr[0][1]
try:
req_str = _request_from_label(label)
request = PackageRequest(req_str)
except PackageRequestError:
continue
if request.name == package_name:
nodes.add(node)
if not nodes:
raise ValueError("The package %r does not appear in the graph."
% package_name)
# find nodes upstream from these nodes
g_rev = g.reverse()
accessible_nodes = set()
access = accessibility(g_rev)
for node in nodes:
nodes_ = access.get(node, [])
accessible_nodes |= set(nodes_)
# remove inaccessible nodes
inaccessible_nodes = set(g.nodes()) - accessible_nodes
for node in inaccessible_nodes:
g.del_node(node)
return write_dot(g) | [
"def",
"prune_graph",
"(",
"graph_str",
",",
"package_name",
")",
":",
"# find nodes of interest",
"g",
"=",
"read_dot",
"(",
"graph_str",
")",
"nodes",
"=",
"set",
"(",
")",
"for",
"node",
",",
"attrs",
"in",
"g",
".",
"node_attr",
".",
"iteritems",
"(",
... | Prune a package graph so it only contains nodes accessible from the
given package.
Args:
graph_str (str): Dot-language graph string.
package_name (str): Name of package of interest.
Returns:
Pruned graph, as a string. | [
"Prune",
"a",
"package",
"graph",
"so",
"it",
"only",
"contains",
"nodes",
"accessible",
"from",
"the",
"given",
"package",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L153-L198 | train | 227,350 |
nerdvegas/rez | src/rez/utils/graph_utils.py | save_graph | def save_graph(graph_str, dest_file, fmt=None, image_ratio=None):
"""Render a graph to an image file.
Args:
graph_str (str): Dot-language graph string.
dest_file (str): Filepath to save the graph to.
fmt (str): Format, eg "png", "jpg".
image_ratio (float): Image ratio.
Returns:
String representing format that was written, such as 'png'.
"""
g = pydot.graph_from_dot_data(graph_str)
# determine the dest format
if fmt is None:
fmt = os.path.splitext(dest_file)[1].lower().strip('.') or "png"
if hasattr(g, "write_" + fmt):
write_fn = getattr(g, "write_" + fmt)
else:
raise Exception("Unsupported graph format: '%s'" % fmt)
if image_ratio:
g.set_ratio(str(image_ratio))
write_fn(dest_file)
return fmt | python | def save_graph(graph_str, dest_file, fmt=None, image_ratio=None):
"""Render a graph to an image file.
Args:
graph_str (str): Dot-language graph string.
dest_file (str): Filepath to save the graph to.
fmt (str): Format, eg "png", "jpg".
image_ratio (float): Image ratio.
Returns:
String representing format that was written, such as 'png'.
"""
g = pydot.graph_from_dot_data(graph_str)
# determine the dest format
if fmt is None:
fmt = os.path.splitext(dest_file)[1].lower().strip('.') or "png"
if hasattr(g, "write_" + fmt):
write_fn = getattr(g, "write_" + fmt)
else:
raise Exception("Unsupported graph format: '%s'" % fmt)
if image_ratio:
g.set_ratio(str(image_ratio))
write_fn(dest_file)
return fmt | [
"def",
"save_graph",
"(",
"graph_str",
",",
"dest_file",
",",
"fmt",
"=",
"None",
",",
"image_ratio",
"=",
"None",
")",
":",
"g",
"=",
"pydot",
".",
"graph_from_dot_data",
"(",
"graph_str",
")",
"# determine the dest format",
"if",
"fmt",
"is",
"None",
":",
... | Render a graph to an image file.
Args:
graph_str (str): Dot-language graph string.
dest_file (str): Filepath to save the graph to.
fmt (str): Format, eg "png", "jpg".
image_ratio (float): Image ratio.
Returns:
String representing format that was written, such as 'png'. | [
"Render",
"a",
"graph",
"to",
"an",
"image",
"file",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L201-L226 | train | 227,351 |
nerdvegas/rez | src/rez/utils/graph_utils.py | view_graph | def view_graph(graph_str, dest_file=None):
"""View a dot graph in an image viewer."""
from rez.system import system
from rez.config import config
if (system.platform == "linux") and (not os.getenv("DISPLAY")):
print >> sys.stderr, "Unable to open display."
sys.exit(1)
dest_file = _write_graph(graph_str, dest_file=dest_file)
# view graph
viewed = False
prog = config.image_viewer or 'browser'
print "loading image viewer (%s)..." % prog
if config.image_viewer:
proc = popen([config.image_viewer, dest_file])
proc.wait()
viewed = not bool(proc.returncode)
if not viewed:
import webbrowser
webbrowser.open_new("file://" + dest_file) | python | def view_graph(graph_str, dest_file=None):
"""View a dot graph in an image viewer."""
from rez.system import system
from rez.config import config
if (system.platform == "linux") and (not os.getenv("DISPLAY")):
print >> sys.stderr, "Unable to open display."
sys.exit(1)
dest_file = _write_graph(graph_str, dest_file=dest_file)
# view graph
viewed = False
prog = config.image_viewer or 'browser'
print "loading image viewer (%s)..." % prog
if config.image_viewer:
proc = popen([config.image_viewer, dest_file])
proc.wait()
viewed = not bool(proc.returncode)
if not viewed:
import webbrowser
webbrowser.open_new("file://" + dest_file) | [
"def",
"view_graph",
"(",
"graph_str",
",",
"dest_file",
"=",
"None",
")",
":",
"from",
"rez",
".",
"system",
"import",
"system",
"from",
"rez",
".",
"config",
"import",
"config",
"if",
"(",
"system",
".",
"platform",
"==",
"\"linux\"",
")",
"and",
"(",
... | View a dot graph in an image viewer. | [
"View",
"a",
"dot",
"graph",
"in",
"an",
"image",
"viewer",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/graph_utils.py#L229-L252 | train | 227,352 |
nerdvegas/rez | src/rez/utils/platform_.py | Platform.physical_cores | def physical_cores(self):
"""Return the number of physical cpu cores on the system."""
try:
return self._physical_cores_base()
except Exception as e:
from rez.utils.logging_ import print_error
print_error("Error detecting physical core count, defaulting to 1: %s"
% str(e))
return 1 | python | def physical_cores(self):
"""Return the number of physical cpu cores on the system."""
try:
return self._physical_cores_base()
except Exception as e:
from rez.utils.logging_ import print_error
print_error("Error detecting physical core count, defaulting to 1: %s"
% str(e))
return 1 | [
"def",
"physical_cores",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_physical_cores_base",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"from",
"rez",
".",
"utils",
".",
"logging_",
"import",
"print_error",
"print_error",
"(",
"\"Error de... | Return the number of physical cpu cores on the system. | [
"Return",
"the",
"number",
"of",
"physical",
"cpu",
"cores",
"on",
"the",
"system",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/platform_.py#L82-L90 | train | 227,353 |
nerdvegas/rez | src/rez/utils/platform_.py | Platform.logical_cores | def logical_cores(self):
"""Return the number of cpu cores as reported to the os.
May be different from physical_cores if, ie, intel's hyperthreading is
enabled.
"""
try:
return self._logical_cores()
except Exception as e:
from rez.utils.logging_ import print_error
print_error("Error detecting logical core count, defaulting to 1: %s"
% str(e))
return 1 | python | def logical_cores(self):
"""Return the number of cpu cores as reported to the os.
May be different from physical_cores if, ie, intel's hyperthreading is
enabled.
"""
try:
return self._logical_cores()
except Exception as e:
from rez.utils.logging_ import print_error
print_error("Error detecting logical core count, defaulting to 1: %s"
% str(e))
return 1 | [
"def",
"logical_cores",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_logical_cores",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"from",
"rez",
".",
"utils",
".",
"logging_",
"import",
"print_error",
"print_error",
"(",
"\"Error detecting... | Return the number of cpu cores as reported to the os.
May be different from physical_cores if, ie, intel's hyperthreading is
enabled. | [
"Return",
"the",
"number",
"of",
"cpu",
"cores",
"as",
"reported",
"to",
"the",
"os",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/platform_.py#L93-L105 | train | 227,354 |
nerdvegas/rez | src/rez/vendor/colorama/ansitowin32.py | AnsiToWin32.write_and_convert | def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text)) | python | def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text)) | [
"def",
"write_and_convert",
"(",
"self",
",",
"text",
")",
":",
"cursor",
"=",
"0",
"for",
"match",
"in",
"self",
".",
"ANSI_RE",
".",
"finditer",
"(",
"text",
")",
":",
"start",
",",
"end",
"=",
"match",
".",
"span",
"(",
")",
"self",
".",
"write_... | Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls. | [
"Write",
"the",
"given",
"text",
"to",
"our",
"wrapped",
"stream",
"stripping",
"any",
"ANSI",
"sequences",
"from",
"the",
"text",
"and",
"optionally",
"converting",
"them",
"into",
"win32",
"calls",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/colorama/ansitowin32.py#L131-L143 | train | 227,355 |
nerdvegas/rez | src/rezgui/models/ContextModel.py | ContextModel.copy | def copy(self):
"""Returns a copy of the context."""
other = ContextModel(self._context, self.parent())
other._stale = self._stale
other._modified = self._modified
other.request = self.request[:]
other.packages_path = self.packages_path
other.implicit_packages = self.implicit_packages
other.package_filter = self.package_filter
other.caching = self.caching
other.default_patch_lock = self.default_patch_lock
other.patch_locks = copy.deepcopy(self.patch_locks)
return other | python | def copy(self):
"""Returns a copy of the context."""
other = ContextModel(self._context, self.parent())
other._stale = self._stale
other._modified = self._modified
other.request = self.request[:]
other.packages_path = self.packages_path
other.implicit_packages = self.implicit_packages
other.package_filter = self.package_filter
other.caching = self.caching
other.default_patch_lock = self.default_patch_lock
other.patch_locks = copy.deepcopy(self.patch_locks)
return other | [
"def",
"copy",
"(",
"self",
")",
":",
"other",
"=",
"ContextModel",
"(",
"self",
".",
"_context",
",",
"self",
".",
"parent",
"(",
")",
")",
"other",
".",
"_stale",
"=",
"self",
".",
"_stale",
"other",
".",
"_modified",
"=",
"self",
".",
"_modified",... | Returns a copy of the context. | [
"Returns",
"a",
"copy",
"of",
"the",
"context",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/models/ContextModel.py#L51-L63 | train | 227,356 |
nerdvegas/rez | src/rezgui/models/ContextModel.py | ContextModel.get_lock_requests | def get_lock_requests(self):
"""Take the current context, and the current patch locks, and determine
the effective requests that will be added to the main request.
Returns:
A dict of (PatchLock, [Requirement]) tuples. Each requirement will be
a weak package reference. If there is no current context, an empty
dict will be returned.
"""
d = defaultdict(list)
if self._context:
for variant in self._context.resolved_packages:
name = variant.name
version = variant.version
lock = self.patch_locks.get(name)
if lock is None:
lock = self.default_patch_lock
request = get_lock_request(name, version, lock)
if request is not None:
d[lock].append(request)
return d | python | def get_lock_requests(self):
"""Take the current context, and the current patch locks, and determine
the effective requests that will be added to the main request.
Returns:
A dict of (PatchLock, [Requirement]) tuples. Each requirement will be
a weak package reference. If there is no current context, an empty
dict will be returned.
"""
d = defaultdict(list)
if self._context:
for variant in self._context.resolved_packages:
name = variant.name
version = variant.version
lock = self.patch_locks.get(name)
if lock is None:
lock = self.default_patch_lock
request = get_lock_request(name, version, lock)
if request is not None:
d[lock].append(request)
return d | [
"def",
"get_lock_requests",
"(",
"self",
")",
":",
"d",
"=",
"defaultdict",
"(",
"list",
")",
"if",
"self",
".",
"_context",
":",
"for",
"variant",
"in",
"self",
".",
"_context",
".",
"resolved_packages",
":",
"name",
"=",
"variant",
".",
"name",
"versio... | Take the current context, and the current patch locks, and determine
the effective requests that will be added to the main request.
Returns:
A dict of (PatchLock, [Requirement]) tuples. Each requirement will be
a weak package reference. If there is no current context, an empty
dict will be returned. | [
"Take",
"the",
"current",
"context",
"and",
"the",
"current",
"patch",
"locks",
"and",
"determine",
"the",
"effective",
"requests",
"that",
"will",
"be",
"added",
"to",
"the",
"main",
"request",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/models/ContextModel.py#L113-L134 | train | 227,357 |
nerdvegas/rez | src/rezgui/models/ContextModel.py | ContextModel.resolve_context | def resolve_context(self, verbosity=0, max_fails=-1, timestamp=None,
callback=None, buf=None, package_load_callback=None):
"""Update the current context by performing a re-resolve.
The newly resolved context is only applied if it is a successful solve.
Returns:
`ResolvedContext` object, which may be a successful or failed solve.
"""
package_filter = PackageFilterList.from_pod(self.package_filter)
context = ResolvedContext(
self.request,
package_paths=self.packages_path,
package_filter=package_filter,
verbosity=verbosity,
max_fails=max_fails,
timestamp=timestamp,
buf=buf,
callback=callback,
package_load_callback=package_load_callback,
caching=self.caching)
if context.success:
if self._context and self._context.load_path:
context.set_load_path(self._context.load_path)
self._set_context(context)
self._modified = True
return context | python | def resolve_context(self, verbosity=0, max_fails=-1, timestamp=None,
callback=None, buf=None, package_load_callback=None):
"""Update the current context by performing a re-resolve.
The newly resolved context is only applied if it is a successful solve.
Returns:
`ResolvedContext` object, which may be a successful or failed solve.
"""
package_filter = PackageFilterList.from_pod(self.package_filter)
context = ResolvedContext(
self.request,
package_paths=self.packages_path,
package_filter=package_filter,
verbosity=verbosity,
max_fails=max_fails,
timestamp=timestamp,
buf=buf,
callback=callback,
package_load_callback=package_load_callback,
caching=self.caching)
if context.success:
if self._context and self._context.load_path:
context.set_load_path(self._context.load_path)
self._set_context(context)
self._modified = True
return context | [
"def",
"resolve_context",
"(",
"self",
",",
"verbosity",
"=",
"0",
",",
"max_fails",
"=",
"-",
"1",
",",
"timestamp",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"buf",
"=",
"None",
",",
"package_load_callback",
"=",
"None",
")",
":",
"package_filter... | Update the current context by performing a re-resolve.
The newly resolved context is only applied if it is a successful solve.
Returns:
`ResolvedContext` object, which may be a successful or failed solve. | [
"Update",
"the",
"current",
"context",
"by",
"performing",
"a",
"re",
"-",
"resolve",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/models/ContextModel.py#L175-L203 | train | 227,358 |
nerdvegas/rez | src/rezgui/models/ContextModel.py | ContextModel.set_context | def set_context(self, context):
"""Replace the current context with another."""
self._set_context(context, emit=False)
self._modified = (not context.load_path)
self.dataChanged.emit(self.CONTEXT_CHANGED |
self.REQUEST_CHANGED |
self.PACKAGES_PATH_CHANGED |
self.LOCKS_CHANGED |
self.LOADPATH_CHANGED |
self.PACKAGE_FILTER_CHANGED |
self.CACHING_CHANGED) | python | def set_context(self, context):
"""Replace the current context with another."""
self._set_context(context, emit=False)
self._modified = (not context.load_path)
self.dataChanged.emit(self.CONTEXT_CHANGED |
self.REQUEST_CHANGED |
self.PACKAGES_PATH_CHANGED |
self.LOCKS_CHANGED |
self.LOADPATH_CHANGED |
self.PACKAGE_FILTER_CHANGED |
self.CACHING_CHANGED) | [
"def",
"set_context",
"(",
"self",
",",
"context",
")",
":",
"self",
".",
"_set_context",
"(",
"context",
",",
"emit",
"=",
"False",
")",
"self",
".",
"_modified",
"=",
"(",
"not",
"context",
".",
"load_path",
")",
"self",
".",
"dataChanged",
".",
"emi... | Replace the current context with another. | [
"Replace",
"the",
"current",
"context",
"with",
"another",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/models/ContextModel.py#L214-L224 | train | 227,359 |
nerdvegas/rez | src/rez/vendor/distlib/util.py | get_resources_dests | def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations | python | def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations | [
"def",
"get_resources_dests",
"(",
"resources_root",
",",
"rules",
")",
":",
"def",
"get_rel_path",
"(",
"base",
",",
"path",
")",
":",
"# normalizes and returns a lstripped-/-separated path",
"base",
"=",
"base",
".",
"replace",
"(",
"os",
".",
"path",
".",
"se... | Find destinations for resources files | [
"Find",
"destinations",
"for",
"resources",
"files"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/distlib/util.py#L123-L147 | train | 227,360 |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/cycles.py | find_cycle | def find_cycle(graph):
"""
Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes.
"""
if (isinstance(graph, graph_class)):
directed = False
elif (isinstance(graph, digraph_class)):
directed = True
else:
raise InvalidGraphType
def find_cycle_to_ancestor(node, ancestor):
"""
Find a cycle containing both node and ancestor.
"""
path = []
while (node != ancestor):
if (node is None):
return []
path.append(node)
node = spanning_tree[node]
path.append(node)
path.reverse()
return path
def dfs(node):
"""
Depth-first search subfunction.
"""
visited[node] = 1
# Explore recursively the connected component
for each in graph[node]:
if (cycle):
return
if (each not in visited):
spanning_tree[each] = node
dfs(each)
else:
if (directed or spanning_tree[node] != each):
cycle.extend(find_cycle_to_ancestor(node, each))
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {} # List for marking visited and non-visited nodes
spanning_tree = {} # Spanning tree
cycle = []
# Algorithm outer-loop
for each in graph:
# Select a non-visited node
if (each not in visited):
spanning_tree[each] = None
# Explore node's connected component
dfs(each)
if (cycle):
setrecursionlimit(recursionlimit)
return cycle
setrecursionlimit(recursionlimit)
return [] | python | def find_cycle(graph):
"""
Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes.
"""
if (isinstance(graph, graph_class)):
directed = False
elif (isinstance(graph, digraph_class)):
directed = True
else:
raise InvalidGraphType
def find_cycle_to_ancestor(node, ancestor):
"""
Find a cycle containing both node and ancestor.
"""
path = []
while (node != ancestor):
if (node is None):
return []
path.append(node)
node = spanning_tree[node]
path.append(node)
path.reverse()
return path
def dfs(node):
"""
Depth-first search subfunction.
"""
visited[node] = 1
# Explore recursively the connected component
for each in graph[node]:
if (cycle):
return
if (each not in visited):
spanning_tree[each] = node
dfs(each)
else:
if (directed or spanning_tree[node] != each):
cycle.extend(find_cycle_to_ancestor(node, each))
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {} # List for marking visited and non-visited nodes
spanning_tree = {} # Spanning tree
cycle = []
# Algorithm outer-loop
for each in graph:
# Select a non-visited node
if (each not in visited):
spanning_tree[each] = None
# Explore node's connected component
dfs(each)
if (cycle):
setrecursionlimit(recursionlimit)
return cycle
setrecursionlimit(recursionlimit)
return [] | [
"def",
"find_cycle",
"(",
"graph",
")",
":",
"if",
"(",
"isinstance",
"(",
"graph",
",",
"graph_class",
")",
")",
":",
"directed",
"=",
"False",
"elif",
"(",
"isinstance",
"(",
"graph",
",",
"digraph_class",
")",
")",
":",
"directed",
"=",
"True",
"els... | Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes. | [
"Find",
"a",
"cycle",
"in",
"the",
"given",
"graph",
".",
"This",
"function",
"will",
"return",
"a",
"list",
"of",
"nodes",
"which",
"form",
"a",
"cycle",
"in",
"the",
"graph",
"or",
"an",
"empty",
"list",
"if",
"no",
"cycle",
"exists",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/cycles.py#L38-L108 | train | 227,361 |
nerdvegas/rez | src/rez/release_vcs.py | create_release_vcs | def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root) | python | def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root) | [
"def",
"create_release_vcs",
"(",
"path",
",",
"vcs_name",
"=",
"None",
")",
":",
"from",
"rez",
".",
"plugin_managers",
"import",
"plugin_manager",
"vcs_types",
"=",
"get_release_vcs_types",
"(",
")",
"if",
"vcs_name",
":",
"if",
"vcs_name",
"not",
"in",
"vcs... | Return a new release VCS that can release from this source path. | [
"Return",
"a",
"new",
"release",
"VCS",
"that",
"can",
"release",
"from",
"this",
"source",
"path",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/release_vcs.py#L17-L61 | train | 227,362 |
nerdvegas/rez | src/rez/release_vcs.py | ReleaseVCS.find_vcs_root | def find_vcs_root(cls, path):
"""Try to find a version control root directory of this type for the
given path.
If successful, returns (vcs_root, levels_up), where vcs_root is the
path to the version control root directory it found, and levels_up is an
integer indicating how many parent directories it had to search through
to find it, where 0 means it was found in the indicated path, 1 means it
was found in that path's parent, etc. If not sucessful, returns None
"""
if cls.search_parents_for_root():
valid_dirs = walk_up_dirs(path)
else:
valid_dirs = [path]
for i, current_path in enumerate(valid_dirs):
if cls.is_valid_root(current_path):
return current_path, i
return None | python | def find_vcs_root(cls, path):
"""Try to find a version control root directory of this type for the
given path.
If successful, returns (vcs_root, levels_up), where vcs_root is the
path to the version control root directory it found, and levels_up is an
integer indicating how many parent directories it had to search through
to find it, where 0 means it was found in the indicated path, 1 means it
was found in that path's parent, etc. If not sucessful, returns None
"""
if cls.search_parents_for_root():
valid_dirs = walk_up_dirs(path)
else:
valid_dirs = [path]
for i, current_path in enumerate(valid_dirs):
if cls.is_valid_root(current_path):
return current_path, i
return None | [
"def",
"find_vcs_root",
"(",
"cls",
",",
"path",
")",
":",
"if",
"cls",
".",
"search_parents_for_root",
"(",
")",
":",
"valid_dirs",
"=",
"walk_up_dirs",
"(",
"path",
")",
"else",
":",
"valid_dirs",
"=",
"[",
"path",
"]",
"for",
"i",
",",
"current_path",... | Try to find a version control root directory of this type for the
given path.
If successful, returns (vcs_root, levels_up), where vcs_root is the
path to the version control root directory it found, and levels_up is an
integer indicating how many parent directories it had to search through
to find it, where 0 means it was found in the indicated path, 1 means it
was found in that path's parent, etc. If not sucessful, returns None | [
"Try",
"to",
"find",
"a",
"version",
"control",
"root",
"directory",
"of",
"this",
"type",
"for",
"the",
"given",
"path",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/release_vcs.py#L115-L132 | train | 227,363 |
nerdvegas/rez | src/rez/release_vcs.py | ReleaseVCS._cmd | def _cmd(self, *nargs):
"""Convenience function for executing a program such as 'git' etc."""
cmd_str = ' '.join(map(quote, nargs))
if self.package.config.debug("package_release"):
print_debug("Running command: %s" % cmd_str)
p = popen(nargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.pkg_root)
out, err = p.communicate()
if p.returncode:
print_debug("command stdout:")
print_debug(out)
print_debug("command stderr:")
print_debug(err)
raise ReleaseVCSError("command failed: %s\n%s" % (cmd_str, err))
out = out.strip()
if out:
return [x.rstrip() for x in out.split('\n')]
else:
return [] | python | def _cmd(self, *nargs):
"""Convenience function for executing a program such as 'git' etc."""
cmd_str = ' '.join(map(quote, nargs))
if self.package.config.debug("package_release"):
print_debug("Running command: %s" % cmd_str)
p = popen(nargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.pkg_root)
out, err = p.communicate()
if p.returncode:
print_debug("command stdout:")
print_debug(out)
print_debug("command stderr:")
print_debug(err)
raise ReleaseVCSError("command failed: %s\n%s" % (cmd_str, err))
out = out.strip()
if out:
return [x.rstrip() for x in out.split('\n')]
else:
return [] | [
"def",
"_cmd",
"(",
"self",
",",
"*",
"nargs",
")",
":",
"cmd_str",
"=",
"' '",
".",
"join",
"(",
"map",
"(",
"quote",
",",
"nargs",
")",
")",
"if",
"self",
".",
"package",
".",
"config",
".",
"debug",
"(",
"\"package_release\"",
")",
":",
"print_d... | Convenience function for executing a program such as 'git' etc. | [
"Convenience",
"function",
"for",
"executing",
"a",
"program",
"such",
"as",
"git",
"etc",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/release_vcs.py#L203-L224 | train | 227,364 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._close | def _close(self, args):
"""Request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._send_method((20, 41))
self._do_revive()
raise error_for_code(
reply_code, reply_text, (class_id, method_id), ChannelError,
) | python | def _close(self, args):
"""Request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._send_method((20, 41))
self._do_revive()
raise error_for_code(
reply_code, reply_text, (class_id, method_id), ChannelError,
) | [
"def",
"_close",
"(",
"self",
",",
"args",
")",
":",
"reply_code",
"=",
"args",
".",
"read_short",
"(",
")",
"reply_text",
"=",
"args",
".",
"read_shortstr",
"(",
")",
"class_id",
"=",
"args",
".",
"read_short",
"(",
")",
"method_id",
"=",
"args",
".",... | Request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. | [
"Request",
"a",
"channel",
"close"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L186-L244 | train | 227,365 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._x_flow_ok | def _x_flow_ok(self, active):
"""Confirm a flow method
Confirms to the peer that a flow command was received and
processed.
PARAMETERS:
active: boolean
current flow setting
Confirms the setting of the processed flow method:
True means the peer will start sending or continue
to send content frames; False means it will not.
"""
args = AMQPWriter()
args.write_bit(active)
self._send_method((20, 21), args) | python | def _x_flow_ok(self, active):
"""Confirm a flow method
Confirms to the peer that a flow command was received and
processed.
PARAMETERS:
active: boolean
current flow setting
Confirms the setting of the processed flow method:
True means the peer will start sending or continue
to send content frames; False means it will not.
"""
args = AMQPWriter()
args.write_bit(active)
self._send_method((20, 21), args) | [
"def",
"_x_flow_ok",
"(",
"self",
",",
"active",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_bit",
"(",
"active",
")",
"self",
".",
"_send_method",
"(",
"(",
"20",
",",
"21",
")",
",",
"args",
")"
] | Confirm a flow method
Confirms to the peer that a flow command was received and
processed.
PARAMETERS:
active: boolean
current flow setting
Confirms the setting of the processed flow method:
True means the peer will start sending or continue
to send content frames; False means it will not. | [
"Confirm",
"a",
"flow",
"method"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L364-L382 | train | 227,366 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._x_open | def _x_open(self):
"""Open a channel for use
This method opens a virtual connection (a channel).
RULE:
This method MUST NOT be called when the channel is already
open.
PARAMETERS:
out_of_band: shortstr (DEPRECATED)
out-of-band settings
Configures out-of-band transfers on this channel. The
syntax and meaning of this field will be formally
defined at a later date.
"""
if self.is_open:
return
args = AMQPWriter()
args.write_shortstr('') # out_of_band: deprecated
self._send_method((20, 10), args)
return self.wait(allowed_methods=[
(20, 11), # Channel.open_ok
]) | python | def _x_open(self):
"""Open a channel for use
This method opens a virtual connection (a channel).
RULE:
This method MUST NOT be called when the channel is already
open.
PARAMETERS:
out_of_band: shortstr (DEPRECATED)
out-of-band settings
Configures out-of-band transfers on this channel. The
syntax and meaning of this field will be formally
defined at a later date.
"""
if self.is_open:
return
args = AMQPWriter()
args.write_shortstr('') # out_of_band: deprecated
self._send_method((20, 10), args)
return self.wait(allowed_methods=[
(20, 11), # Channel.open_ok
]) | [
"def",
"_x_open",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_open",
":",
"return",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_shortstr",
"(",
"''",
")",
"# out_of_band: deprecated",
"self",
".",
"_send_method",
"(",
"(",
"20",
",",
"10",... | Open a channel for use
This method opens a virtual connection (a channel).
RULE:
This method MUST NOT be called when the channel is already
open.
PARAMETERS:
out_of_band: shortstr (DEPRECATED)
out-of-band settings
Configures out-of-band transfers on this channel. The
syntax and meaning of this field will be formally
defined at a later date. | [
"Open",
"a",
"channel",
"for",
"use"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L402-L430 | train | 227,367 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.exchange_declare | def exchange_declare(self, exchange, type, passive=False, durable=False,
auto_delete=True, nowait=False, arguments=None):
"""Declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(type)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(auto_delete)
args.write_bit(False) # internal: deprecated
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 10), args)
if auto_delete:
warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED))
if not nowait:
return self.wait(allowed_methods=[
(40, 11), # Channel.exchange_declare_ok
]) | python | def exchange_declare(self, exchange, type, passive=False, durable=False,
auto_delete=True, nowait=False, arguments=None):
"""Declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(type)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(auto_delete)
args.write_bit(False) # internal: deprecated
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 10), args)
if auto_delete:
warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED))
if not nowait:
return self.wait(allowed_methods=[
(40, 11), # Channel.exchange_declare_ok
]) | [
"def",
"exchange_declare",
"(",
"self",
",",
"exchange",
",",
"type",
",",
"passive",
"=",
"False",
",",
"durable",
"=",
"False",
",",
"auto_delete",
"=",
"True",
",",
"nowait",
"=",
"False",
",",
"arguments",
"=",
"None",
")",
":",
"arguments",
"=",
"... | Declare exchange, create if needed
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True. | [
"Declare",
"exchange",
"create",
"if",
"needed"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L481-L623 | train | 227,368 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.exchange_bind | def exchange_bind(self, destination, source='', routing_key='',
nowait=False, arguments=None):
"""This method binds an exchange to an exchange.
RULE:
A server MUST allow and ignore duplicate bindings - that
is, two or more bind methods for a specific exchanges,
with identical arguments - without treating these as an
error.
RULE:
A server MUST allow cycles of exchange bindings to be
created including allowing an exchange to be bound to
itself.
RULE:
A server MUST not deliver the same message more than once
to a destination exchange, even if the topology of
exchanges and bindings results in multiple (even infinite)
routes to that exchange.
PARAMETERS:
reserved-1: short
destination: shortstr
Specifies the name of the destination exchange to
bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent destination exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
source: shortstr
Specifies the name of the source exchange to bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent source exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
routing-key: shortstr
Specifies the routing key for the binding. The routing
key is used for routing messages depending on the
exchange configuration. Not all exchanges use a
routing key - refer to the specific exchange
documentation.
no-wait: bit
arguments: table
A set of arguments for the binding. The syntax and
semantics of these arguments depends on the exchange
class.
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(destination)
args.write_shortstr(source)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 30), args)
if not nowait:
return self.wait(allowed_methods=[
(40, 31), # Channel.exchange_bind_ok
]) | python | def exchange_bind(self, destination, source='', routing_key='',
nowait=False, arguments=None):
"""This method binds an exchange to an exchange.
RULE:
A server MUST allow and ignore duplicate bindings - that
is, two or more bind methods for a specific exchanges,
with identical arguments - without treating these as an
error.
RULE:
A server MUST allow cycles of exchange bindings to be
created including allowing an exchange to be bound to
itself.
RULE:
A server MUST not deliver the same message more than once
to a destination exchange, even if the topology of
exchanges and bindings results in multiple (even infinite)
routes to that exchange.
PARAMETERS:
reserved-1: short
destination: shortstr
Specifies the name of the destination exchange to
bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent destination exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
source: shortstr
Specifies the name of the source exchange to bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent source exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
routing-key: shortstr
Specifies the routing key for the binding. The routing
key is used for routing messages depending on the
exchange configuration. Not all exchanges use a
routing key - refer to the specific exchange
documentation.
no-wait: bit
arguments: table
A set of arguments for the binding. The syntax and
semantics of these arguments depends on the exchange
class.
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(destination)
args.write_shortstr(source)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((40, 30), args)
if not nowait:
return self.wait(allowed_methods=[
(40, 31), # Channel.exchange_bind_ok
]) | [
"def",
"exchange_bind",
"(",
"self",
",",
"destination",
",",
"source",
"=",
"''",
",",
"routing_key",
"=",
"''",
",",
"nowait",
"=",
"False",
",",
"arguments",
"=",
"None",
")",
":",
"arguments",
"=",
"{",
"}",
"if",
"arguments",
"is",
"None",
"else",... | This method binds an exchange to an exchange.
RULE:
A server MUST allow and ignore duplicate bindings - that
is, two or more bind methods for a specific exchanges,
with identical arguments - without treating these as an
error.
RULE:
A server MUST allow cycles of exchange bindings to be
created including allowing an exchange to be bound to
itself.
RULE:
A server MUST not deliver the same message more than once
to a destination exchange, even if the topology of
exchanges and bindings results in multiple (even infinite)
routes to that exchange.
PARAMETERS:
reserved-1: short
destination: shortstr
Specifies the name of the destination exchange to
bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent destination exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
source: shortstr
Specifies the name of the source exchange to bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent source exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
routing-key: shortstr
Specifies the routing key for the binding. The routing
key is used for routing messages depending on the
exchange configuration. Not all exchanges use a
routing key - refer to the specific exchange
documentation.
no-wait: bit
arguments: table
A set of arguments for the binding. The syntax and
semantics of these arguments depends on the exchange
class. | [
"This",
"method",
"binds",
"an",
"exchange",
"to",
"an",
"exchange",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L697-L783 | train | 227,369 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.queue_declare | def queue_declare(self, queue='', passive=False, durable=False,
exclusive=False, auto_delete=True, nowait=False,
arguments=None):
"""Declare queue, create if needed
This method creates or checks a queue. When creating a new
queue the client can specify various properties that control
the durability of the queue and its contents, and the level of
sharing for the queue.
RULE:
The server MUST create a default binding for a newly-
created queue to the default exchange, which is an
exchange of type 'direct'.
RULE:
The server SHOULD support a minimum of 256 queues per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
queue: shortstr
RULE:
The queue name MAY be empty, in which case the
server MUST create a new queue with a unique
generated name and return this to the client in
the Declare-Ok method.
RULE:
Queue names starting with "amq." are reserved for
predeclared and standardised server queues. If
the queue name starts with "amq." and the passive
option is False, the server MUST raise a connection
exception with reply code 403 (access refused).
passive: boolean
do not create queue
If set, the server will not create the queue. The
client can use this to check whether a queue exists
without modifying the server state.
RULE:
If set, and the queue does not already exist, the
server MUST respond with a reply code 404 (not
found) and raise a channel exception.
durable: boolean
request a durable queue
If set when creating a new queue, the queue will be
marked as durable. Durable queues remain active when
a server restarts. Non-durable queues (transient
queues) are purged if/when a server restarts. Note
that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
RULE:
The server MUST recreate the durable queue after a
restart.
RULE:
The server MUST support both durable and transient
queues.
RULE:
The server MUST ignore the durable field if the
queue already exists.
exclusive: boolean
request an exclusive queue
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
RULE:
The server MUST support both exclusive (private)
and non-exclusive (shared) queues.
RULE:
The server MUST raise a channel exception if
'exclusive' is specified and the queue already
exists and is owned by a different connection.
auto_delete: boolean
auto-delete queue when unused
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that a queue
is not being used (or no longer used), and the
point when it deletes the queue. At the least it
must allow a client to create a queue and then
create a consumer to read from it, with a small
but non-zero delay between these two actions. The
server should equally allow for clients that may
be disconnected prematurely, and wish to re-
consume from the same queue without losing
messages. We would recommend a configurable
timeout, with a suitable default value being one
minute.
RULE:
The server MUST ignore the auto-delete field if
the queue already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
Returns a tuple containing 3 items:
the name of the queue (essential for automatically-named queues)
message count
consumer count
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(exclusive)
args.write_bit(auto_delete)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((50, 10), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 11), # Channel.queue_declare_ok
]) | python | def queue_declare(self, queue='', passive=False, durable=False,
exclusive=False, auto_delete=True, nowait=False,
arguments=None):
"""Declare queue, create if needed
This method creates or checks a queue. When creating a new
queue the client can specify various properties that control
the durability of the queue and its contents, and the level of
sharing for the queue.
RULE:
The server MUST create a default binding for a newly-
created queue to the default exchange, which is an
exchange of type 'direct'.
RULE:
The server SHOULD support a minimum of 256 queues per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
queue: shortstr
RULE:
The queue name MAY be empty, in which case the
server MUST create a new queue with a unique
generated name and return this to the client in
the Declare-Ok method.
RULE:
Queue names starting with "amq." are reserved for
predeclared and standardised server queues. If
the queue name starts with "amq." and the passive
option is False, the server MUST raise a connection
exception with reply code 403 (access refused).
passive: boolean
do not create queue
If set, the server will not create the queue. The
client can use this to check whether a queue exists
without modifying the server state.
RULE:
If set, and the queue does not already exist, the
server MUST respond with a reply code 404 (not
found) and raise a channel exception.
durable: boolean
request a durable queue
If set when creating a new queue, the queue will be
marked as durable. Durable queues remain active when
a server restarts. Non-durable queues (transient
queues) are purged if/when a server restarts. Note
that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
RULE:
The server MUST recreate the durable queue after a
restart.
RULE:
The server MUST support both durable and transient
queues.
RULE:
The server MUST ignore the durable field if the
queue already exists.
exclusive: boolean
request an exclusive queue
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
RULE:
The server MUST support both exclusive (private)
and non-exclusive (shared) queues.
RULE:
The server MUST raise a channel exception if
'exclusive' is specified and the queue already
exists and is owned by a different connection.
auto_delete: boolean
auto-delete queue when unused
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that a queue
is not being used (or no longer used), and the
point when it deletes the queue. At the least it
must allow a client to create a queue and then
create a consumer to read from it, with a small
but non-zero delay between these two actions. The
server should equally allow for clients that may
be disconnected prematurely, and wish to re-
consume from the same queue without losing
messages. We would recommend a configurable
timeout, with a suitable default value being one
minute.
RULE:
The server MUST ignore the auto-delete field if
the queue already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
Returns a tuple containing 3 items:
the name of the queue (essential for automatically-named queues)
message count
consumer count
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(exclusive)
args.write_bit(auto_delete)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method((50, 10), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 11), # Channel.queue_declare_ok
]) | [
"def",
"queue_declare",
"(",
"self",
",",
"queue",
"=",
"''",
",",
"passive",
"=",
"False",
",",
"durable",
"=",
"False",
",",
"exclusive",
"=",
"False",
",",
"auto_delete",
"=",
"True",
",",
"nowait",
"=",
"False",
",",
"arguments",
"=",
"None",
")",
... | Declare queue, create if needed
This method creates or checks a queue. When creating a new
queue the client can specify various properties that control
the durability of the queue and its contents, and the level of
sharing for the queue.
RULE:
The server MUST create a default binding for a newly-
created queue to the default exchange, which is an
exchange of type 'direct'.
RULE:
The server SHOULD support a minimum of 256 queues per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
queue: shortstr
RULE:
The queue name MAY be empty, in which case the
server MUST create a new queue with a unique
generated name and return this to the client in
the Declare-Ok method.
RULE:
Queue names starting with "amq." are reserved for
predeclared and standardised server queues. If
the queue name starts with "amq." and the passive
option is False, the server MUST raise a connection
exception with reply code 403 (access refused).
passive: boolean
do not create queue
If set, the server will not create the queue. The
client can use this to check whether a queue exists
without modifying the server state.
RULE:
If set, and the queue does not already exist, the
server MUST respond with a reply code 404 (not
found) and raise a channel exception.
durable: boolean
request a durable queue
If set when creating a new queue, the queue will be
marked as durable. Durable queues remain active when
a server restarts. Non-durable queues (transient
queues) are purged if/when a server restarts. Note
that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
RULE:
The server MUST recreate the durable queue after a
restart.
RULE:
The server MUST support both durable and transient
queues.
RULE:
The server MUST ignore the durable field if the
queue already exists.
exclusive: boolean
request an exclusive queue
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
RULE:
The server MUST support both exclusive (private)
and non-exclusive (shared) queues.
RULE:
The server MUST raise a channel exception if
'exclusive' is specified and the queue already
exists and is owned by a different connection.
auto_delete: boolean
auto-delete queue when unused
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that a queue
is not being used (or no longer used), and the
point when it deletes the queue. At the least it
must allow a client to create a queue and then
create a consumer to read from it, with a small
but non-zero delay between these two actions. The
server should equally allow for clients that may
be disconnected prematurely, and wish to re-
consume from the same queue without losing
messages. We would recommend a configurable
timeout, with a suitable default value being one
minute.
RULE:
The server MUST ignore the auto-delete field if
the queue already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
Returns a tuple containing 3 items:
the name of the queue (essential for automatically-named queues)
message count
consumer count | [
"Declare",
"queue",
"create",
"if",
"needed"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1090-L1260 | train | 227,370 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._queue_declare_ok | def _queue_declare_ok(self, args):
"""Confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count.
"""
return queue_declare_ok_t(
args.read_shortstr(),
args.read_long(),
args.read_long(),
) | python | def _queue_declare_ok(self, args):
"""Confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count.
"""
return queue_declare_ok_t(
args.read_shortstr(),
args.read_long(),
args.read_long(),
) | [
"def",
"_queue_declare_ok",
"(",
"self",
",",
"args",
")",
":",
"return",
"queue_declare_ok_t",
"(",
"args",
".",
"read_shortstr",
"(",
")",
",",
"args",
".",
"read_long",
"(",
")",
",",
"args",
".",
"read_long",
"(",
")",
",",
")"
] | Confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count. | [
"Confirms",
"a",
"queue",
"definition"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1262-L1295 | train | 227,371 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.queue_delete | def queue_delete(self, queue='',
if_unused=False, if_empty=False, nowait=False):
"""Delete a queue
This method deletes a queue. When a queue is deleted any
pending messages are sent to a dead-letter queue if this is
defined in the server configuration, and all consumers on the
queue are cancelled.
RULE:
The server SHOULD use a dead-letter queue to hold messages
that were pending on a deleted queue, and MAY provide
facilities for a system administrator to move these
messages back to an active queue.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to delete. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to delete a non-
existing queue causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the queue if it
has no consumers. If the queue has consumers the
server does does not delete it but raises a channel
exception instead.
RULE:
The server MUST respect the if-unused flag when
deleting a queue.
if_empty: boolean
delete only if empty
If set, the server will only delete the queue if it
has no messages. If the queue is not empty the server
raises a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(if_unused)
args.write_bit(if_empty)
args.write_bit(nowait)
self._send_method((50, 40), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 41), # Channel.queue_delete_ok
]) | python | def queue_delete(self, queue='',
if_unused=False, if_empty=False, nowait=False):
"""Delete a queue
This method deletes a queue. When a queue is deleted any
pending messages are sent to a dead-letter queue if this is
defined in the server configuration, and all consumers on the
queue are cancelled.
RULE:
The server SHOULD use a dead-letter queue to hold messages
that were pending on a deleted queue, and MAY provide
facilities for a system administrator to move these
messages back to an active queue.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to delete. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to delete a non-
existing queue causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the queue if it
has no consumers. If the queue has consumers the
server does does not delete it but raises a channel
exception instead.
RULE:
The server MUST respect the if-unused flag when
deleting a queue.
if_empty: boolean
delete only if empty
If set, the server will only delete the queue if it
has no messages. If the queue is not empty the server
raises a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(if_unused)
args.write_bit(if_empty)
args.write_bit(nowait)
self._send_method((50, 40), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 41), # Channel.queue_delete_ok
]) | [
"def",
"queue_delete",
"(",
"self",
",",
"queue",
"=",
"''",
",",
"if_unused",
"=",
"False",
",",
"if_empty",
"=",
"False",
",",
"nowait",
"=",
"False",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_short",
"(",
"0",
")",
"args",... | Delete a queue
This method deletes a queue. When a queue is deleted any
pending messages are sent to a dead-letter queue if this is
defined in the server configuration, and all consumers on the
queue are cancelled.
RULE:
The server SHOULD use a dead-letter queue to hold messages
that were pending on a deleted queue, and MAY provide
facilities for a system administrator to move these
messages back to an active queue.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to delete. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to delete a non-
existing queue causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the queue if it
has no consumers. If the queue has consumers the
server does does not delete it but raises a channel
exception instead.
RULE:
The server MUST respect the if-unused flag when
deleting a queue.
if_empty: boolean
delete only if empty
If set, the server will only delete the queue if it
has no messages. If the queue is not empty the server
raises a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception. | [
"Delete",
"a",
"queue"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1297-L1375 | train | 227,372 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.queue_purge | def queue_purge(self, queue='', nowait=False):
"""Purge a queue
This method removes all messages from a queue. It does not
cancel consumers. Purged messages are deleted without any
formal "undo" mechanism.
RULE:
A call to purge MUST result in an empty queue.
RULE:
On transacted channels the server MUST not purge messages
that have already been sent to a client but not yet
acknowledged.
RULE:
The server MAY implement a purge queue or log that allows
system administrators to recover accidentally-purged
messages. The server SHOULD NOT keep purged messages in
the same storage spaces as the live messages since the
volumes of purged messages may get very large.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to purge. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to purge a non-
existing queue causes a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
if nowait is False, returns a message_count
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(nowait)
self._send_method((50, 30), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 31), # Channel.queue_purge_ok
]) | python | def queue_purge(self, queue='', nowait=False):
"""Purge a queue
This method removes all messages from a queue. It does not
cancel consumers. Purged messages are deleted without any
formal "undo" mechanism.
RULE:
A call to purge MUST result in an empty queue.
RULE:
On transacted channels the server MUST not purge messages
that have already been sent to a client but not yet
acknowledged.
RULE:
The server MAY implement a purge queue or log that allows
system administrators to recover accidentally-purged
messages. The server SHOULD NOT keep purged messages in
the same storage spaces as the live messages since the
volumes of purged messages may get very large.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to purge. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to purge a non-
existing queue causes a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
if nowait is False, returns a message_count
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(nowait)
self._send_method((50, 30), args)
if not nowait:
return self.wait(allowed_methods=[
(50, 31), # Channel.queue_purge_ok
]) | [
"def",
"queue_purge",
"(",
"self",
",",
"queue",
"=",
"''",
",",
"nowait",
"=",
"False",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_short",
"(",
"0",
")",
"args",
".",
"write_shortstr",
"(",
"queue",
")",
"args",
".",
"write_b... | Purge a queue
This method removes all messages from a queue. It does not
cancel consumers. Purged messages are deleted without any
formal "undo" mechanism.
RULE:
A call to purge MUST result in an empty queue.
RULE:
On transacted channels the server MUST not purge messages
that have already been sent to a client but not yet
acknowledged.
RULE:
The server MAY implement a purge queue or log that allows
system administrators to recover accidentally-purged
messages. The server SHOULD NOT keep purged messages in
the same storage spaces as the live messages since the
volumes of purged messages may get very large.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to purge. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to purge a non-
existing queue causes a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
if nowait is False, returns a message_count | [
"Purge",
"a",
"queue"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1392-L1457 | train | 227,373 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.basic_ack | def basic_ack(self, delivery_tag, multiple=False):
"""Acknowledge one or more messages
This method acknowledges one or more messages delivered via
the Deliver or Get-Ok methods. The client can ask to confirm
a single message or a set of messages up to and including a
specific message.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
multiple: boolean
acknowledge multiple messages
If set to True, the delivery tag is treated as "up to
and including", so that the client can acknowledge
multiple messages with a single method. If set to
False, the delivery tag refers to a single message.
If the multiple field is True, and the delivery tag
is zero, tells the server to acknowledge all
outstanding mesages.
RULE:
The server MUST validate that a non-zero delivery-
tag refers to an delivered message, and raise a
channel exception if this is not the case.
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(multiple)
self._send_method((60, 80), args) | python | def basic_ack(self, delivery_tag, multiple=False):
"""Acknowledge one or more messages
This method acknowledges one or more messages delivered via
the Deliver or Get-Ok methods. The client can ask to confirm
a single message or a set of messages up to and including a
specific message.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
multiple: boolean
acknowledge multiple messages
If set to True, the delivery tag is treated as "up to
and including", so that the client can acknowledge
multiple messages with a single method. If set to
False, the delivery tag refers to a single message.
If the multiple field is True, and the delivery tag
is zero, tells the server to acknowledge all
outstanding mesages.
RULE:
The server MUST validate that a non-zero delivery-
tag refers to an delivered message, and raise a
channel exception if this is not the case.
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(multiple)
self._send_method((60, 80), args) | [
"def",
"basic_ack",
"(",
"self",
",",
"delivery_tag",
",",
"multiple",
"=",
"False",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_longlong",
"(",
"delivery_tag",
")",
"args",
".",
"write_bit",
"(",
"multiple",
")",
"self",
".",
"_se... | Acknowledge one or more messages
This method acknowledges one or more messages delivered via
the Deliver or Get-Ok methods. The client can ask to confirm
a single message or a set of messages up to and including a
specific message.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
multiple: boolean
acknowledge multiple messages
If set to True, the delivery tag is treated as "up to
and including", so that the client can acknowledge
multiple messages with a single method. If set to
False, the delivery tag refers to a single message.
If the multiple field is True, and the delivery tag
is zero, tells the server to acknowledge all
outstanding mesages.
RULE:
The server MUST validate that a non-zero delivery-
tag refers to an delivered message, and raise a
channel exception if this is not the case. | [
"Acknowledge",
"one",
"or",
"more",
"messages"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1534-L1584 | train | 227,374 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.basic_cancel | def basic_cancel(self, consumer_tag, nowait=False):
"""End a queue consumer
This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send
any more messages for that consumer. The client may receive
an abitrary number of messages in between sending the cancel
method and receiving the cancel-ok reply.
RULE:
If the queue no longer exists when the client sends a
cancel command, or the consumer has been cancelled for
other reasons, this command has no effect.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
"""
if self.connection is not None:
self.no_ack_consumers.discard(consumer_tag)
args = AMQPWriter()
args.write_shortstr(consumer_tag)
args.write_bit(nowait)
self._send_method((60, 30), args)
return self.wait(allowed_methods=[
(60, 31), # Channel.basic_cancel_ok
]) | python | def basic_cancel(self, consumer_tag, nowait=False):
"""End a queue consumer
This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send
any more messages for that consumer. The client may receive
an abitrary number of messages in between sending the cancel
method and receiving the cancel-ok reply.
RULE:
If the queue no longer exists when the client sends a
cancel command, or the consumer has been cancelled for
other reasons, this command has no effect.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
"""
if self.connection is not None:
self.no_ack_consumers.discard(consumer_tag)
args = AMQPWriter()
args.write_shortstr(consumer_tag)
args.write_bit(nowait)
self._send_method((60, 30), args)
return self.wait(allowed_methods=[
(60, 31), # Channel.basic_cancel_ok
]) | [
"def",
"basic_cancel",
"(",
"self",
",",
"consumer_tag",
",",
"nowait",
"=",
"False",
")",
":",
"if",
"self",
".",
"connection",
"is",
"not",
"None",
":",
"self",
".",
"no_ack_consumers",
".",
"discard",
"(",
"consumer_tag",
")",
"args",
"=",
"AMQPWriter",... | End a queue consumer
This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send
any more messages for that consumer. The client may receive
an abitrary number of messages in between sending the cancel
method and receiving the cancel-ok reply.
RULE:
If the queue no longer exists when the client sends a
cancel command, or the consumer has been cancelled for
other reasons, this command has no effect.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception. | [
"End",
"a",
"queue",
"consumer"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1586-L1634 | train | 227,375 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._basic_cancel_notify | def _basic_cancel_notify(self, args):
"""Consumer cancelled by server.
Most likely the queue was deleted.
"""
consumer_tag = args.read_shortstr()
callback = self._on_cancel(consumer_tag)
if callback:
callback(consumer_tag)
else:
raise ConsumerCancelled(consumer_tag, (60, 30)) | python | def _basic_cancel_notify(self, args):
"""Consumer cancelled by server.
Most likely the queue was deleted.
"""
consumer_tag = args.read_shortstr()
callback = self._on_cancel(consumer_tag)
if callback:
callback(consumer_tag)
else:
raise ConsumerCancelled(consumer_tag, (60, 30)) | [
"def",
"_basic_cancel_notify",
"(",
"self",
",",
"args",
")",
":",
"consumer_tag",
"=",
"args",
".",
"read_shortstr",
"(",
")",
"callback",
"=",
"self",
".",
"_on_cancel",
"(",
"consumer_tag",
")",
"if",
"callback",
":",
"callback",
"(",
"consumer_tag",
")",... | Consumer cancelled by server.
Most likely the queue was deleted. | [
"Consumer",
"cancelled",
"by",
"server",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1636-L1647 | train | 227,376 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.basic_consume | def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, arguments=None, on_cancel=None):
"""Start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
args.write_table(arguments or {})
self._send_method((60, 20), args)
if not nowait:
consumer_tag = self.wait(allowed_methods=[
(60, 21), # Channel.basic_consume_ok
])
self.callbacks[consumer_tag] = callback
if on_cancel:
self.cancel_callbacks[consumer_tag] = on_cancel
if no_ack:
self.no_ack_consumers.add(consumer_tag)
return consumer_tag | python | def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, arguments=None, on_cancel=None):
"""Start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
args.write_table(arguments or {})
self._send_method((60, 20), args)
if not nowait:
consumer_tag = self.wait(allowed_methods=[
(60, 21), # Channel.basic_consume_ok
])
self.callbacks[consumer_tag] = callback
if on_cancel:
self.cancel_callbacks[consumer_tag] = on_cancel
if no_ack:
self.no_ack_consumers.add(consumer_tag)
return consumer_tag | [
"def",
"basic_consume",
"(",
"self",
",",
"queue",
"=",
"''",
",",
"consumer_tag",
"=",
"''",
",",
"no_local",
"=",
"False",
",",
"no_ack",
"=",
"False",
",",
"exclusive",
"=",
"False",
",",
"nowait",
"=",
"False",
",",
"callback",
"=",
"None",
",",
... | Start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case. | [
"Start",
"a",
"queue",
"consumer"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1677-L1798 | train | 227,377 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._basic_deliver | def _basic_deliver(self, args, msg):
"""Notify the client of a consumer message
This method delivers a message to the client, via a consumer.
In the asynchronous message delivery model, the client starts
a consumer using the Consume method, then the server responds
with Deliver methods as and when messages arrive for that
consumer.
RULE:
The server SHOULD track the number of times a message has
been delivered to clients and when a message is
redelivered a certain number of times - e.g. 5 times -
without being acknowledged, the server SHOULD consider the
message to be unprocessable (possibly causing client
applications to abort), and move the message to a dead
letter queue.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
consumer_tag = args.read_shortstr()
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
msg.channel = self
msg.delivery_info = {
'consumer_tag': consumer_tag,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
try:
fun = self.callbacks[consumer_tag]
except KeyError:
pass
else:
fun(msg) | python | def _basic_deliver(self, args, msg):
"""Notify the client of a consumer message
This method delivers a message to the client, via a consumer.
In the asynchronous message delivery model, the client starts
a consumer using the Consume method, then the server responds
with Deliver methods as and when messages arrive for that
consumer.
RULE:
The server SHOULD track the number of times a message has
been delivered to clients and when a message is
redelivered a certain number of times - e.g. 5 times -
without being acknowledged, the server SHOULD consider the
message to be unprocessable (possibly causing client
applications to abort), and move the message to a dead
letter queue.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
consumer_tag = args.read_shortstr()
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
msg.channel = self
msg.delivery_info = {
'consumer_tag': consumer_tag,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
try:
fun = self.callbacks[consumer_tag]
except KeyError:
pass
else:
fun(msg) | [
"def",
"_basic_deliver",
"(",
"self",
",",
"args",
",",
"msg",
")",
":",
"consumer_tag",
"=",
"args",
".",
"read_shortstr",
"(",
")",
"delivery_tag",
"=",
"args",
".",
"read_longlong",
"(",
")",
"redelivered",
"=",
"args",
".",
"read_bit",
"(",
")",
"exc... | Notify the client of a consumer message
This method delivers a message to the client, via a consumer.
In the asynchronous message delivery model, the client starts
a consumer using the Consume method, then the server responds
with Deliver methods as and when messages arrive for that
consumer.
RULE:
The server SHOULD track the number of times a message has
been delivered to clients and when a message is
redelivered a certain number of times - e.g. 5 times -
without being acknowledged, the server SHOULD consider the
message to be unprocessable (possibly causing client
applications to abort), and move the message to a dead
letter queue.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published. | [
"Notify",
"the",
"client",
"of",
"a",
"consumer",
"message"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1816-L1909 | train | 227,378 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._basic_get_ok | def _basic_get_ok(self, args, msg):
"""Provide client with a message
This method delivers a message to the client following a get
method. A message delivered by 'get-ok' must be acknowledged
unless the no-ack option was set in the get method.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to. If empty, the message
was published to the default exchange.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
message_count: long
number of messages pending
This field reports the number of messages pending on
the queue, excluding the message being delivered.
Note that this figure is indicative, not reliable, and
can change arbitrarily as messages are added to the
queue and removed by other clients.
"""
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
message_count = args.read_long()
msg.channel = self
msg.delivery_info = {
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
'message_count': message_count
}
return msg | python | def _basic_get_ok(self, args, msg):
"""Provide client with a message
This method delivers a message to the client following a get
method. A message delivered by 'get-ok' must be acknowledged
unless the no-ack option was set in the get method.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to. If empty, the message
was published to the default exchange.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
message_count: long
number of messages pending
This field reports the number of messages pending on
the queue, excluding the message being delivered.
Note that this figure is indicative, not reliable, and
can change arbitrarily as messages are added to the
queue and removed by other clients.
"""
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
message_count = args.read_long()
msg.channel = self
msg.delivery_info = {
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
'message_count': message_count
}
return msg | [
"def",
"_basic_get_ok",
"(",
"self",
",",
"args",
",",
"msg",
")",
":",
"delivery_tag",
"=",
"args",
".",
"read_longlong",
"(",
")",
"redelivered",
"=",
"args",
".",
"read_bit",
"(",
")",
"exchange",
"=",
"args",
".",
"read_shortstr",
"(",
")",
"routing_... | Provide client with a message
This method delivers a message to the client following a get
method. A message delivered by 'get-ok' must be acknowledged
unless the no-ack option was set in the get method.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously
delivered to this or another client.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to. If empty, the message
was published to the default exchange.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
message_count: long
number of messages pending
This field reports the number of messages pending on
the queue, excluding the message being delivered.
Note that this figure is indicative, not reliable, and
can change arbitrarily as messages are added to the
queue and removed by other clients. | [
"Provide",
"client",
"with",
"a",
"message"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L1975-L2047 | train | 227,379 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._basic_publish | def _basic_publish(self, msg, exchange='', routing_key='',
mandatory=False, immediate=False):
"""Publish a message
This method publishes a message to a specific exchange. The
message will be routed to queues as defined by the exchange
configuration and distributed to any active consumers when the
transaction, if any, is committed.
PARAMETERS:
exchange: shortstr
Specifies the name of the exchange to publish to. The
exchange name can be empty, meaning the default
exchange. If the exchange name is specified, and that
exchange does not exist, the server will raise a
channel exception.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
RULE:
The exchange MAY refuse basic content in which
case it MUST raise a channel exception with reply
code 540 (not implemented).
routing_key: shortstr
Message routing key
Specifies the routing key for the message. The
routing key is used for routing messages depending on
the exchange configuration.
mandatory: boolean
indicate mandatory routing
This flag tells the server how to react if the message
cannot be routed to a queue. If this flag is True, the
server will return an unroutable message with a Return
method. If this flag is False, the server silently
drops the message.
RULE:
The server SHOULD implement the mandatory flag.
immediate: boolean
request immediate delivery
This flag tells the server how to react if the message
cannot be routed to a queue consumer immediately. If
this flag is set, the server will return an
undeliverable message with a Return method. If this
flag is zero, the server will queue the message, but
with no guarantee that it will ever be consumed.
RULE:
The server SHOULD implement the immediate flag.
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(mandatory)
args.write_bit(immediate)
self._send_method((60, 40), args, msg) | python | def _basic_publish(self, msg, exchange='', routing_key='',
mandatory=False, immediate=False):
"""Publish a message
This method publishes a message to a specific exchange. The
message will be routed to queues as defined by the exchange
configuration and distributed to any active consumers when the
transaction, if any, is committed.
PARAMETERS:
exchange: shortstr
Specifies the name of the exchange to publish to. The
exchange name can be empty, meaning the default
exchange. If the exchange name is specified, and that
exchange does not exist, the server will raise a
channel exception.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
RULE:
The exchange MAY refuse basic content in which
case it MUST raise a channel exception with reply
code 540 (not implemented).
routing_key: shortstr
Message routing key
Specifies the routing key for the message. The
routing key is used for routing messages depending on
the exchange configuration.
mandatory: boolean
indicate mandatory routing
This flag tells the server how to react if the message
cannot be routed to a queue. If this flag is True, the
server will return an unroutable message with a Return
method. If this flag is False, the server silently
drops the message.
RULE:
The server SHOULD implement the mandatory flag.
immediate: boolean
request immediate delivery
This flag tells the server how to react if the message
cannot be routed to a queue consumer immediately. If
this flag is set, the server will return an
undeliverable message with a Return method. If this
flag is zero, the server will queue the message, but
with no guarantee that it will ever be consumed.
RULE:
The server SHOULD implement the immediate flag.
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(mandatory)
args.write_bit(immediate)
self._send_method((60, 40), args, msg) | [
"def",
"_basic_publish",
"(",
"self",
",",
"msg",
",",
"exchange",
"=",
"''",
",",
"routing_key",
"=",
"''",
",",
"mandatory",
"=",
"False",
",",
"immediate",
"=",
"False",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_short",
"(",... | Publish a message
This method publishes a message to a specific exchange. The
message will be routed to queues as defined by the exchange
configuration and distributed to any active consumers when the
transaction, if any, is committed.
PARAMETERS:
exchange: shortstr
Specifies the name of the exchange to publish to. The
exchange name can be empty, meaning the default
exchange. If the exchange name is specified, and that
exchange does not exist, the server will raise a
channel exception.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
RULE:
The exchange MAY refuse basic content in which
case it MUST raise a channel exception with reply
code 540 (not implemented).
routing_key: shortstr
Message routing key
Specifies the routing key for the message. The
routing key is used for routing messages depending on
the exchange configuration.
mandatory: boolean
indicate mandatory routing
This flag tells the server how to react if the message
cannot be routed to a queue. If this flag is True, the
server will return an unroutable message with a Return
method. If this flag is False, the server silently
drops the message.
RULE:
The server SHOULD implement the mandatory flag.
immediate: boolean
request immediate delivery
This flag tells the server how to react if the message
cannot be routed to a queue consumer immediately. If
this flag is set, the server will return an
undeliverable message with a Return method. If this
flag is zero, the server will queue the message, but
with no guarantee that it will ever be consumed.
RULE:
The server SHOULD implement the immediate flag. | [
"Publish",
"a",
"message"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L2049-L2123 | train | 227,380 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.basic_qos | def basic_qos(self, prefetch_size, prefetch_count, a_global):
"""Specify quality of service
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
PARAMETERS:
prefetch_size: long
prefetch window in octets
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The prefetch-size is ignored if the no-ack
option is set.
RULE:
The server MUST ignore this setting when the
client is not processing any messages - i.e. the
prefetch size does not limit the transfer of
single messages to a client, only the sending in
advance of more messages while the client still
has one or more unacknowledged messages.
prefetch_count: short
prefetch window in messages
Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
the prefetch-size field; a message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the no-ack option is set.
RULE:
The server MAY send less data in advance than
allowed by the client's specified prefetch windows
but it MUST NOT send more.
a_global: boolean
apply to entire connection
By default the QoS settings apply to the current
channel only. If this field is set, they are applied
to the entire connection.
"""
args = AMQPWriter()
args.write_long(prefetch_size)
args.write_short(prefetch_count)
args.write_bit(a_global)
self._send_method((60, 10), args)
return self.wait(allowed_methods=[
(60, 11), # Channel.basic_qos_ok
]) | python | def basic_qos(self, prefetch_size, prefetch_count, a_global):
"""Specify quality of service
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
PARAMETERS:
prefetch_size: long
prefetch window in octets
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The prefetch-size is ignored if the no-ack
option is set.
RULE:
The server MUST ignore this setting when the
client is not processing any messages - i.e. the
prefetch size does not limit the transfer of
single messages to a client, only the sending in
advance of more messages while the client still
has one or more unacknowledged messages.
prefetch_count: short
prefetch window in messages
Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
the prefetch-size field; a message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the no-ack option is set.
RULE:
The server MAY send less data in advance than
allowed by the client's specified prefetch windows
but it MUST NOT send more.
a_global: boolean
apply to entire connection
By default the QoS settings apply to the current
channel only. If this field is set, they are applied
to the entire connection.
"""
args = AMQPWriter()
args.write_long(prefetch_size)
args.write_short(prefetch_count)
args.write_bit(a_global)
self._send_method((60, 10), args)
return self.wait(allowed_methods=[
(60, 11), # Channel.basic_qos_ok
]) | [
"def",
"basic_qos",
"(",
"self",
",",
"prefetch_size",
",",
"prefetch_count",
",",
"a_global",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_long",
"(",
"prefetch_size",
")",
"args",
".",
"write_short",
"(",
"prefetch_count",
")",
"args"... | Specify quality of service
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
PARAMETERS:
prefetch_size: long
prefetch window in octets
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The prefetch-size is ignored if the no-ack
option is set.
RULE:
The server MUST ignore this setting when the
client is not processing any messages - i.e. the
prefetch size does not limit the transfer of
single messages to a client, only the sending in
advance of more messages while the client still
has one or more unacknowledged messages.
prefetch_count: short
prefetch window in messages
Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
the prefetch-size field; a message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the no-ack option is set.
RULE:
The server MAY send less data in advance than
allowed by the client's specified prefetch windows
but it MUST NOT send more.
a_global: boolean
apply to entire connection
By default the QoS settings apply to the current
channel only. If this field is set, they are applied
to the entire connection. | [
"Specify",
"quality",
"of",
"service"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L2135-L2206 | train | 227,381 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.basic_recover | def basic_recover(self, requeue=False):
"""Redeliver unacknowledged messages
This method asks the broker to redeliver all unacknowledged
messages on a specified channel. Zero or more messages may be
redelivered. This method is only allowed on non-transacted
channels.
RULE:
The server MUST set the redelivered flag on all messages
that are resent.
RULE:
The server MUST raise a channel exception if this is
called on a transacted channel.
PARAMETERS:
requeue: boolean
requeue the message
If this field is False, the message will be redelivered
to the original recipient. If this field is True, the
server will attempt to requeue the message,
potentially then delivering it to an alternative
subscriber.
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method((60, 110), args) | python | def basic_recover(self, requeue=False):
"""Redeliver unacknowledged messages
This method asks the broker to redeliver all unacknowledged
messages on a specified channel. Zero or more messages may be
redelivered. This method is only allowed on non-transacted
channels.
RULE:
The server MUST set the redelivered flag on all messages
that are resent.
RULE:
The server MUST raise a channel exception if this is
called on a transacted channel.
PARAMETERS:
requeue: boolean
requeue the message
If this field is False, the message will be redelivered
to the original recipient. If this field is True, the
server will attempt to requeue the message,
potentially then delivering it to an alternative
subscriber.
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method((60, 110), args) | [
"def",
"basic_recover",
"(",
"self",
",",
"requeue",
"=",
"False",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_bit",
"(",
"requeue",
")",
"self",
".",
"_send_method",
"(",
"(",
"60",
",",
"110",
")",
",",
"args",
")"
] | Redeliver unacknowledged messages
This method asks the broker to redeliver all unacknowledged
messages on a specified channel. Zero or more messages may be
redelivered. This method is only allowed on non-transacted
channels.
RULE:
The server MUST set the redelivered flag on all messages
that are resent.
RULE:
The server MUST raise a channel exception if this is
called on a transacted channel.
PARAMETERS:
requeue: boolean
requeue the message
If this field is False, the message will be redelivered
to the original recipient. If this field is True, the
server will attempt to requeue the message,
potentially then delivering it to an alternative
subscriber. | [
"Redeliver",
"unacknowledged",
"messages"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L2218-L2250 | train | 227,382 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel.basic_reject | def basic_reject(self, delivery_tag, requeue):
"""Reject an incoming message
This method allows a client to reject a message. It can be
used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
RULE:
The server SHOULD be capable of accepting and process the
Reject method while sending message content with a Deliver
or Get-Ok method. I.e. the server should read and process
incoming methods while sending output frames. To cancel a
partially-send content, the server sends a content body
frame of size 1 (i.e. with no data except the frame-end
octet).
RULE:
The server SHOULD interpret this method as meaning that
the client is unable to process the message at this time.
RULE:
A client MUST NOT use this method as a means of selecting
messages to process. A rejected message MAY be discarded
or dead-lettered, not necessarily passed to another
client.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
requeue: boolean
requeue the message
If this field is False, the message will be discarded.
If this field is True, the server will attempt to
requeue the message.
RULE:
The server MUST NOT deliver the message to the
same client within the context of the current
channel. The recommended strategy is to attempt
to deliver the message to an alternative consumer,
and if that is not possible, to move the message
to a dead-letter queue. The server MAY use more
sophisticated tracking to hold the message on the
queue and redeliver it to the same client at a
later stage.
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(requeue)
self._send_method((60, 90), args) | python | def basic_reject(self, delivery_tag, requeue):
"""Reject an incoming message
This method allows a client to reject a message. It can be
used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
RULE:
The server SHOULD be capable of accepting and process the
Reject method while sending message content with a Deliver
or Get-Ok method. I.e. the server should read and process
incoming methods while sending output frames. To cancel a
partially-send content, the server sends a content body
frame of size 1 (i.e. with no data except the frame-end
octet).
RULE:
The server SHOULD interpret this method as meaning that
the client is unable to process the message at this time.
RULE:
A client MUST NOT use this method as a means of selecting
messages to process. A rejected message MAY be discarded
or dead-lettered, not necessarily passed to another
client.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
requeue: boolean
requeue the message
If this field is False, the message will be discarded.
If this field is True, the server will attempt to
requeue the message.
RULE:
The server MUST NOT deliver the message to the
same client within the context of the current
channel. The recommended strategy is to attempt
to deliver the message to an alternative consumer,
and if that is not possible, to move the message
to a dead-letter queue. The server MAY use more
sophisticated tracking to hold the message on the
queue and redeliver it to the same client at a
later stage.
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(requeue)
self._send_method((60, 90), args) | [
"def",
"basic_reject",
"(",
"self",
",",
"delivery_tag",
",",
"requeue",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"args",
".",
"write_longlong",
"(",
"delivery_tag",
")",
"args",
".",
"write_bit",
"(",
"requeue",
")",
"self",
".",
"_send_method",
"("... | Reject an incoming message
This method allows a client to reject a message. It can be
used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
RULE:
The server SHOULD be capable of accepting and process the
Reject method while sending message content with a Deliver
or Get-Ok method. I.e. the server should read and process
incoming methods while sending output frames. To cancel a
partially-send content, the server sends a content body
frame of size 1 (i.e. with no data except the frame-end
octet).
RULE:
The server SHOULD interpret this method as meaning that
the client is unable to process the message at this time.
RULE:
A client MUST NOT use this method as a means of selecting
messages to process. A rejected message MAY be discarded
or dead-lettered, not necessarily passed to another
client.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
requeue: boolean
requeue the message
If this field is False, the message will be discarded.
If this field is True, the server will attempt to
requeue the message.
RULE:
The server MUST NOT deliver the message to the
same client within the context of the current
channel. The recommended strategy is to attempt
to deliver the message to an alternative consumer,
and if that is not possible, to move the message
to a dead-letter queue. The server MAY use more
sophisticated tracking to hold the message on the
queue and redeliver it to the same client at a
later stage. | [
"Reject",
"an",
"incoming",
"message"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L2261-L2334 | train | 227,383 |
nerdvegas/rez | src/rez/vendor/amqp/channel.py | Channel._basic_return | def _basic_return(self, args, msg):
"""Return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
self.returned_messages.put(basic_return_t(
args.read_short(),
args.read_shortstr(),
args.read_shortstr(),
args.read_shortstr(),
msg,
)) | python | def _basic_return(self, args, msg):
"""Return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
self.returned_messages.put(basic_return_t(
args.read_short(),
args.read_shortstr(),
args.read_shortstr(),
args.read_shortstr(),
msg,
)) | [
"def",
"_basic_return",
"(",
"self",
",",
"args",
",",
"msg",
")",
":",
"self",
".",
"returned_messages",
".",
"put",
"(",
"basic_return_t",
"(",
"args",
".",
"read_short",
"(",
")",
",",
"args",
".",
"read_shortstr",
"(",
")",
",",
"args",
".",
"read_... | Return a failed message
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published. | [
"Return",
"a",
"failed",
"message"
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/channel.py#L2336-L2375 | train | 227,384 |
nerdvegas/rez | src/rez/build_process_.py | create_build_process | def create_build_process(process_type, working_dir, build_system, package=None,
vcs=None, ensure_latest=True, skip_repo_errors=False,
ignore_existing_tag=False, verbose=False, quiet=False):
"""Create a `BuildProcess` instance."""
from rez.plugin_managers import plugin_manager
process_types = get_build_process_types()
if process_type not in process_types:
raise BuildProcessError("Unknown build process: %r" % process_type)
cls = plugin_manager.get_plugin_class('build_process', process_type)
return cls(working_dir, # ignored (deprecated)
build_system,
package=package, # ignored (deprecated)
vcs=vcs,
ensure_latest=ensure_latest,
skip_repo_errors=skip_repo_errors,
ignore_existing_tag=ignore_existing_tag,
verbose=verbose,
quiet=quiet) | python | def create_build_process(process_type, working_dir, build_system, package=None,
vcs=None, ensure_latest=True, skip_repo_errors=False,
ignore_existing_tag=False, verbose=False, quiet=False):
"""Create a `BuildProcess` instance."""
from rez.plugin_managers import plugin_manager
process_types = get_build_process_types()
if process_type not in process_types:
raise BuildProcessError("Unknown build process: %r" % process_type)
cls = plugin_manager.get_plugin_class('build_process', process_type)
return cls(working_dir, # ignored (deprecated)
build_system,
package=package, # ignored (deprecated)
vcs=vcs,
ensure_latest=ensure_latest,
skip_repo_errors=skip_repo_errors,
ignore_existing_tag=ignore_existing_tag,
verbose=verbose,
quiet=quiet) | [
"def",
"create_build_process",
"(",
"process_type",
",",
"working_dir",
",",
"build_system",
",",
"package",
"=",
"None",
",",
"vcs",
"=",
"None",
",",
"ensure_latest",
"=",
"True",
",",
"skip_repo_errors",
"=",
"False",
",",
"ignore_existing_tag",
"=",
"False",... | Create a `BuildProcess` instance. | [
"Create",
"a",
"BuildProcess",
"instance",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/build_process_.py#L26-L45 | train | 227,385 |
nerdvegas/rez | src/rez/build_process_.py | BuildProcessHelper.visit_variants | def visit_variants(self, func, variants=None, **kwargs):
"""Iterate over variants and call a function on each."""
if variants:
present_variants = range(self.package.num_variants)
invalid_variants = set(variants) - set(present_variants)
if invalid_variants:
raise BuildError(
"The package does not contain the variants: %s"
% ", ".join(str(x) for x in sorted(invalid_variants)))
# iterate over variants
results = []
num_visited = 0
for variant in self.package.iter_variants():
if variants and variant.index not in variants:
self._print_header(
"Skipping variant %s (%s)..."
% (variant.index, self._n_of_m(variant)))
continue
# visit the variant
result = func(variant, **kwargs)
results.append(result)
num_visited += 1
return num_visited, results | python | def visit_variants(self, func, variants=None, **kwargs):
"""Iterate over variants and call a function on each."""
if variants:
present_variants = range(self.package.num_variants)
invalid_variants = set(variants) - set(present_variants)
if invalid_variants:
raise BuildError(
"The package does not contain the variants: %s"
% ", ".join(str(x) for x in sorted(invalid_variants)))
# iterate over variants
results = []
num_visited = 0
for variant in self.package.iter_variants():
if variants and variant.index not in variants:
self._print_header(
"Skipping variant %s (%s)..."
% (variant.index, self._n_of_m(variant)))
continue
# visit the variant
result = func(variant, **kwargs)
results.append(result)
num_visited += 1
return num_visited, results | [
"def",
"visit_variants",
"(",
"self",
",",
"func",
",",
"variants",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"variants",
":",
"present_variants",
"=",
"range",
"(",
"self",
".",
"package",
".",
"num_variants",
")",
"invalid_variants",
"=",
"... | Iterate over variants and call a function on each. | [
"Iterate",
"over",
"variants",
"and",
"call",
"a",
"function",
"on",
"each",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/build_process_.py#L175-L201 | train | 227,386 |
nerdvegas/rez | src/rez/build_process_.py | BuildProcessHelper.create_build_context | def create_build_context(self, variant, build_type, build_path):
"""Create a context to build the variant within."""
request = variant.get_requires(build_requires=True,
private_build_requires=True)
req_strs = map(str, request)
quoted_req_strs = map(quote, req_strs)
self._print("Resolving build environment: %s", ' '.join(quoted_req_strs))
if build_type == BuildType.local:
packages_path = self.package.config.packages_path
else:
packages_path = self.package.config.nonlocal_packages_path
if self.package.config.is_overridden("package_filter"):
from rez.package_filter import PackageFilterList
data = self.package.config.package_filter
package_filter = PackageFilterList.from_pod(data)
else:
package_filter = None
context = ResolvedContext(request,
package_paths=packages_path,
package_filter=package_filter,
building=True)
if self.verbose:
context.print_info()
# save context before possible fail, so user can debug
rxt_filepath = os.path.join(build_path, "build.rxt")
context.save(rxt_filepath)
if context.status != ResolverStatus.solved:
raise BuildContextResolveError(context)
return context, rxt_filepath | python | def create_build_context(self, variant, build_type, build_path):
"""Create a context to build the variant within."""
request = variant.get_requires(build_requires=True,
private_build_requires=True)
req_strs = map(str, request)
quoted_req_strs = map(quote, req_strs)
self._print("Resolving build environment: %s", ' '.join(quoted_req_strs))
if build_type == BuildType.local:
packages_path = self.package.config.packages_path
else:
packages_path = self.package.config.nonlocal_packages_path
if self.package.config.is_overridden("package_filter"):
from rez.package_filter import PackageFilterList
data = self.package.config.package_filter
package_filter = PackageFilterList.from_pod(data)
else:
package_filter = None
context = ResolvedContext(request,
package_paths=packages_path,
package_filter=package_filter,
building=True)
if self.verbose:
context.print_info()
# save context before possible fail, so user can debug
rxt_filepath = os.path.join(build_path, "build.rxt")
context.save(rxt_filepath)
if context.status != ResolverStatus.solved:
raise BuildContextResolveError(context)
return context, rxt_filepath | [
"def",
"create_build_context",
"(",
"self",
",",
"variant",
",",
"build_type",
",",
"build_path",
")",
":",
"request",
"=",
"variant",
".",
"get_requires",
"(",
"build_requires",
"=",
"True",
",",
"private_build_requires",
"=",
"True",
")",
"req_strs",
"=",
"m... | Create a context to build the variant within. | [
"Create",
"a",
"context",
"to",
"build",
"the",
"variant",
"within",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/build_process_.py#L218-L253 | train | 227,387 |
nerdvegas/rez | src/rez/build_process_.py | BuildProcessHelper.get_release_data | def get_release_data(self):
"""Get release data for this release.
Returns:
dict.
"""
previous_package = self.get_previous_release()
if previous_package:
previous_version = previous_package.version
previous_revision = previous_package.revision
else:
previous_version = None
previous_revision = None
if self.vcs is None:
return dict(vcs="None",
previous_version=previous_version)
revision = None
with self.repo_operation():
revision = self.vcs.get_current_revision()
changelog = self.get_changelog()
# truncate changelog - very large changelogs can cause package load
# times to be very high, we don't want that
maxlen = config.max_package_changelog_chars
if maxlen and changelog and len(changelog) > maxlen + 3:
changelog = changelog[:maxlen] + "..."
return dict(vcs=self.vcs.name(),
revision=revision,
changelog=changelog,
previous_version=previous_version,
previous_revision=previous_revision) | python | def get_release_data(self):
"""Get release data for this release.
Returns:
dict.
"""
previous_package = self.get_previous_release()
if previous_package:
previous_version = previous_package.version
previous_revision = previous_package.revision
else:
previous_version = None
previous_revision = None
if self.vcs is None:
return dict(vcs="None",
previous_version=previous_version)
revision = None
with self.repo_operation():
revision = self.vcs.get_current_revision()
changelog = self.get_changelog()
# truncate changelog - very large changelogs can cause package load
# times to be very high, we don't want that
maxlen = config.max_package_changelog_chars
if maxlen and changelog and len(changelog) > maxlen + 3:
changelog = changelog[:maxlen] + "..."
return dict(vcs=self.vcs.name(),
revision=revision,
changelog=changelog,
previous_version=previous_version,
previous_revision=previous_revision) | [
"def",
"get_release_data",
"(",
"self",
")",
":",
"previous_package",
"=",
"self",
".",
"get_previous_release",
"(",
")",
"if",
"previous_package",
":",
"previous_version",
"=",
"previous_package",
".",
"version",
"previous_revision",
"=",
"previous_package",
".",
"... | Get release data for this release.
Returns:
dict. | [
"Get",
"release",
"data",
"for",
"this",
"release",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/build_process_.py#L368-L402 | train | 227,388 |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/minmax.py | minimal_spanning_tree | def minimal_spanning_tree(graph, root=None):
"""
Minimal spanning tree.
@attention: Minimal spanning tree is meaningful only for weighted graphs.
@type graph: graph
@param graph: Graph.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: dictionary
@return: Generated spanning tree.
"""
visited = [] # List for marking visited and non-visited nodes
spanning_tree = {} # MInimal Spanning tree
# Initialization
if (root is not None):
visited.append(root)
nroot = root
spanning_tree[root] = None
else:
nroot = 1
# Algorithm loop
while (nroot is not None):
ledge = _lightest_edge(graph, visited)
if (ledge == None):
if (root is not None):
break
nroot = _first_unvisited(graph, visited)
if (nroot is not None):
spanning_tree[nroot] = None
visited.append(nroot)
else:
spanning_tree[ledge[1]] = ledge[0]
visited.append(ledge[1])
return spanning_tree | python | def minimal_spanning_tree(graph, root=None):
"""
Minimal spanning tree.
@attention: Minimal spanning tree is meaningful only for weighted graphs.
@type graph: graph
@param graph: Graph.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: dictionary
@return: Generated spanning tree.
"""
visited = [] # List for marking visited and non-visited nodes
spanning_tree = {} # MInimal Spanning tree
# Initialization
if (root is not None):
visited.append(root)
nroot = root
spanning_tree[root] = None
else:
nroot = 1
# Algorithm loop
while (nroot is not None):
ledge = _lightest_edge(graph, visited)
if (ledge == None):
if (root is not None):
break
nroot = _first_unvisited(graph, visited)
if (nroot is not None):
spanning_tree[nroot] = None
visited.append(nroot)
else:
spanning_tree[ledge[1]] = ledge[0]
visited.append(ledge[1])
return spanning_tree | [
"def",
"minimal_spanning_tree",
"(",
"graph",
",",
"root",
"=",
"None",
")",
":",
"visited",
"=",
"[",
"]",
"# List for marking visited and non-visited nodes",
"spanning_tree",
"=",
"{",
"}",
"# MInimal Spanning tree",
"# Initialization",
"if",
"(",
"root",
"is",
"n... | Minimal spanning tree.
@attention: Minimal spanning tree is meaningful only for weighted graphs.
@type graph: graph
@param graph: Graph.
@type root: node
@param root: Optional root node (will explore only root's connected component)
@rtype: dictionary
@return: Generated spanning tree. | [
"Minimal",
"spanning",
"tree",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/minmax.py#L46-L86 | train | 227,389 |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/minmax.py | cut_value | def cut_value(graph, flow, cut):
"""
Calculate the value of a cut.
@type graph: digraph
@param graph: Graph
@type flow: dictionary
@param flow: Dictionary containing a flow for each edge.
@type cut: dictionary
@param cut: Dictionary mapping each node to a subset index. The function only considers the flow between
nodes with 0 and 1.
@rtype: float
@return: The value of the flow between the subsets 0 and 1
"""
#max flow/min cut value calculation
S = []
T = []
for node in cut.keys():
if cut[node] == 0:
S.append(node)
elif cut[node] == 1:
T.append(node)
value = 0
for node in S:
for neigh in graph.neighbors(node):
if neigh in T:
value = value + flow[(node,neigh)]
for inc in graph.incidents(node):
if inc in T:
value = value - flow[(inc,node)]
return value | python | def cut_value(graph, flow, cut):
"""
Calculate the value of a cut.
@type graph: digraph
@param graph: Graph
@type flow: dictionary
@param flow: Dictionary containing a flow for each edge.
@type cut: dictionary
@param cut: Dictionary mapping each node to a subset index. The function only considers the flow between
nodes with 0 and 1.
@rtype: float
@return: The value of the flow between the subsets 0 and 1
"""
#max flow/min cut value calculation
S = []
T = []
for node in cut.keys():
if cut[node] == 0:
S.append(node)
elif cut[node] == 1:
T.append(node)
value = 0
for node in S:
for neigh in graph.neighbors(node):
if neigh in T:
value = value + flow[(node,neigh)]
for inc in graph.incidents(node):
if inc in T:
value = value - flow[(inc,node)]
return value | [
"def",
"cut_value",
"(",
"graph",
",",
"flow",
",",
"cut",
")",
":",
"#max flow/min cut value calculation",
"S",
"=",
"[",
"]",
"T",
"=",
"[",
"]",
"for",
"node",
"in",
"cut",
".",
"keys",
"(",
")",
":",
"if",
"cut",
"[",
"node",
"]",
"==",
"0",
... | Calculate the value of a cut.
@type graph: digraph
@param graph: Graph
@type flow: dictionary
@param flow: Dictionary containing a flow for each edge.
@type cut: dictionary
@param cut: Dictionary mapping each node to a subset index. The function only considers the flow between
nodes with 0 and 1.
@rtype: float
@return: The value of the flow between the subsets 0 and 1 | [
"Calculate",
"the",
"value",
"of",
"a",
"cut",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/minmax.py#L412-L445 | train | 227,390 |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/minmax.py | cut_tree | def cut_tree(igraph, caps = None):
"""
Construct a Gomory-Hu cut tree by applying the algorithm of Gusfield.
@type igraph: graph
@param igraph: Graph
@type caps: dictionary
@param caps: Dictionary specifying a maximum capacity for each edge. If not given, the weight of the edge
will be used as its capacity. Otherwise, for each edge (a,b), caps[(a,b)] should be given.
@rtype: dictionary
@return: Gomory-Hu cut tree as a dictionary, where each edge is associated with its weight
"""
#maximum flow needs a digraph, we get a graph
#I think this conversion relies on implementation details outside the api and may break in the future
graph = digraph()
graph.add_graph(igraph)
#handle optional argument
if not caps:
caps = {}
for edge in graph.edges():
caps[edge] = igraph.edge_weight(edge)
#temporary flow variable
f = {}
#we use a numbering of the nodes for easier handling
n = {}
N = 0
for node in graph.nodes():
n[N] = node
N = N + 1
#predecessor function
p = {}.fromkeys(range(N),0)
p[0] = None
for s in range(1,N):
t = p[s]
S = []
#max flow calculation
(flow,cut) = maximum_flow(graph,n[s],n[t],caps)
for i in range(N):
if cut[n[i]] == 0:
S.append(i)
value = cut_value(graph,flow,cut)
f[s] = value
for i in range(N):
if i == s:
continue
if i in S and p[i] == t:
p[i] = s
if p[t] in S:
p[s] = p[t]
p[t] = s
f[s] = f[t]
f[t] = value
#cut tree is a dictionary, where each edge is associated with its weight
b = {}
for i in range(1,N):
b[(n[i],n[p[i]])] = f[i]
return b | python | def cut_tree(igraph, caps = None):
"""
Construct a Gomory-Hu cut tree by applying the algorithm of Gusfield.
@type igraph: graph
@param igraph: Graph
@type caps: dictionary
@param caps: Dictionary specifying a maximum capacity for each edge. If not given, the weight of the edge
will be used as its capacity. Otherwise, for each edge (a,b), caps[(a,b)] should be given.
@rtype: dictionary
@return: Gomory-Hu cut tree as a dictionary, where each edge is associated with its weight
"""
#maximum flow needs a digraph, we get a graph
#I think this conversion relies on implementation details outside the api and may break in the future
graph = digraph()
graph.add_graph(igraph)
#handle optional argument
if not caps:
caps = {}
for edge in graph.edges():
caps[edge] = igraph.edge_weight(edge)
#temporary flow variable
f = {}
#we use a numbering of the nodes for easier handling
n = {}
N = 0
for node in graph.nodes():
n[N] = node
N = N + 1
#predecessor function
p = {}.fromkeys(range(N),0)
p[0] = None
for s in range(1,N):
t = p[s]
S = []
#max flow calculation
(flow,cut) = maximum_flow(graph,n[s],n[t],caps)
for i in range(N):
if cut[n[i]] == 0:
S.append(i)
value = cut_value(graph,flow,cut)
f[s] = value
for i in range(N):
if i == s:
continue
if i in S and p[i] == t:
p[i] = s
if p[t] in S:
p[s] = p[t]
p[t] = s
f[s] = f[t]
f[t] = value
#cut tree is a dictionary, where each edge is associated with its weight
b = {}
for i in range(1,N):
b[(n[i],n[p[i]])] = f[i]
return b | [
"def",
"cut_tree",
"(",
"igraph",
",",
"caps",
"=",
"None",
")",
":",
"#maximum flow needs a digraph, we get a graph",
"#I think this conversion relies on implementation details outside the api and may break in the future",
"graph",
"=",
"digraph",
"(",
")",
"graph",
".",
"add_... | Construct a Gomory-Hu cut tree by applying the algorithm of Gusfield.
@type igraph: graph
@param igraph: Graph
@type caps: dictionary
@param caps: Dictionary specifying a maximum capacity for each edge. If not given, the weight of the edge
will be used as its capacity. Otherwise, for each edge (a,b), caps[(a,b)] should be given.
@rtype: dictionary
@return: Gomory-Hu cut tree as a dictionary, where each edge is associated with its weight | [
"Construct",
"a",
"Gomory",
"-",
"Hu",
"cut",
"tree",
"by",
"applying",
"the",
"algorithm",
"of",
"Gusfield",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/minmax.py#L447-L515 | train | 227,391 |
nerdvegas/rez | src/rez/vendor/distlib/locators.py | Locator.locate | def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result | python | def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result | [
"def",
"locate",
"(",
"self",
",",
"requirement",
",",
"prereleases",
"=",
"False",
")",
":",
"result",
"=",
"None",
"r",
"=",
"parse_requirement",
"(",
"requirement",
")",
"if",
"r",
"is",
"None",
":",
"raise",
"DistlibException",
"(",
"'Not a valid require... | Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located. | [
"Find",
"the",
"most",
"recent",
"distribution",
"which",
"matches",
"the",
"given",
"requirement",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/distlib/locators.py#L313-L370 | train | 227,392 |
nerdvegas/rez | src/rez/package_bind.py | get_bind_modules | def get_bind_modules(verbose=False):
"""Get available bind modules.
Returns:
dict: Map of (name, filepath) listing all bind modules.
"""
builtin_path = os.path.join(module_root_path, "bind")
searchpaths = config.bind_module_path + [builtin_path]
bindnames = {}
for path in searchpaths:
if verbose:
print "searching %s..." % path
if not os.path.isdir(path):
continue
for filename in os.listdir(path):
fpath = os.path.join(path, filename)
fname, ext = os.path.splitext(filename)
if os.path.isfile(fpath) and ext == ".py" \
and not fname.startswith('_'):
bindnames[fname] = fpath
return bindnames | python | def get_bind_modules(verbose=False):
"""Get available bind modules.
Returns:
dict: Map of (name, filepath) listing all bind modules.
"""
builtin_path = os.path.join(module_root_path, "bind")
searchpaths = config.bind_module_path + [builtin_path]
bindnames = {}
for path in searchpaths:
if verbose:
print "searching %s..." % path
if not os.path.isdir(path):
continue
for filename in os.listdir(path):
fpath = os.path.join(path, filename)
fname, ext = os.path.splitext(filename)
if os.path.isfile(fpath) and ext == ".py" \
and not fname.startswith('_'):
bindnames[fname] = fpath
return bindnames | [
"def",
"get_bind_modules",
"(",
"verbose",
"=",
"False",
")",
":",
"builtin_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"module_root_path",
",",
"\"bind\"",
")",
"searchpaths",
"=",
"config",
".",
"bind_module_path",
"+",
"[",
"builtin_path",
"]",
"bind... | Get available bind modules.
Returns:
dict: Map of (name, filepath) listing all bind modules. | [
"Get",
"available",
"bind",
"modules",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_bind.py#L13-L36 | train | 227,393 |
nerdvegas/rez | src/rez/package_bind.py | find_bind_module | def find_bind_module(name, verbose=False):
"""Find the bind module matching the given name.
Args:
name (str): Name of package to find bind module for.
verbose (bool): If True, print extra output.
Returns:
str: Filepath to bind module .py file, or None if not found.
"""
bindnames = get_bind_modules(verbose=verbose)
bindfile = bindnames.get(name)
if bindfile:
return bindfile
if not verbose:
return None
# suggest close matches
fuzzy_matches = get_close_pkgs(name, bindnames.keys())
if fuzzy_matches:
rows = [(x[0], bindnames[x[0]]) for x in fuzzy_matches]
print "'%s' not found. Close matches:" % name
print '\n'.join(columnise(rows))
else:
print "No matches."
return None | python | def find_bind_module(name, verbose=False):
"""Find the bind module matching the given name.
Args:
name (str): Name of package to find bind module for.
verbose (bool): If True, print extra output.
Returns:
str: Filepath to bind module .py file, or None if not found.
"""
bindnames = get_bind_modules(verbose=verbose)
bindfile = bindnames.get(name)
if bindfile:
return bindfile
if not verbose:
return None
# suggest close matches
fuzzy_matches = get_close_pkgs(name, bindnames.keys())
if fuzzy_matches:
rows = [(x[0], bindnames[x[0]]) for x in fuzzy_matches]
print "'%s' not found. Close matches:" % name
print '\n'.join(columnise(rows))
else:
print "No matches."
return None | [
"def",
"find_bind_module",
"(",
"name",
",",
"verbose",
"=",
"False",
")",
":",
"bindnames",
"=",
"get_bind_modules",
"(",
"verbose",
"=",
"verbose",
")",
"bindfile",
"=",
"bindnames",
".",
"get",
"(",
"name",
")",
"if",
"bindfile",
":",
"return",
"bindfil... | Find the bind module matching the given name.
Args:
name (str): Name of package to find bind module for.
verbose (bool): If True, print extra output.
Returns:
str: Filepath to bind module .py file, or None if not found. | [
"Find",
"the",
"bind",
"module",
"matching",
"the",
"given",
"name",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_bind.py#L39-L68 | train | 227,394 |
nerdvegas/rez | src/rez/package_bind.py | bind_package | def bind_package(name, path=None, version_range=None, no_deps=False,
bind_args=None, quiet=False):
"""Bind software available on the current system, as a rez package.
Note:
`bind_args` is provided when software is bound via the 'rez-bind'
command line tool. Bind modules can define their own command line
options, and they will be present in `bind_args` if applicable.
Args:
name (str): Package name.
path (str): Package path to install into; local packages path if None.
version_range (`VersionRange`): If provided, only bind the software if
it falls within this version range.
no_deps (bool): If True, don't bind dependencies.
bind_args (list of str): Command line options.
quiet (bool): If True, suppress superfluous output.
Returns:
List of `Variant`: The variant(s) that were installed as a result of
binding this package.
"""
pending = set([name])
installed_variants = []
installed_package_names = set()
primary = True
# bind package and possibly dependencies
while pending:
pending_ = pending
pending = set()
exc_type = None
for name_ in pending_:
# turn error on binding of dependencies into a warning - we don't
# want to skip binding some dependencies because others failed
try:
variants_ = _bind_package(name_,
path=path,
version_range=version_range,
bind_args=bind_args,
quiet=quiet)
except exc_type as e:
print_error("Could not bind '%s': %s: %s"
% (name_, e.__class__.__name__, str(e)))
continue
installed_variants.extend(variants_)
for variant in variants_:
installed_package_names.add(variant.name)
# add dependencies
if not no_deps:
for variant in variants_:
for requirement in variant.requires:
if not requirement.conflict:
pending.add(requirement.name)
# non-primary packages are treated a little differently
primary = False
version_range = None
bind_args = None
exc_type = RezBindError
if installed_variants and not quiet:
print "The following packages were installed:"
print
_print_package_list(installed_variants)
return installed_variants | python | def bind_package(name, path=None, version_range=None, no_deps=False,
bind_args=None, quiet=False):
"""Bind software available on the current system, as a rez package.
Note:
`bind_args` is provided when software is bound via the 'rez-bind'
command line tool. Bind modules can define their own command line
options, and they will be present in `bind_args` if applicable.
Args:
name (str): Package name.
path (str): Package path to install into; local packages path if None.
version_range (`VersionRange`): If provided, only bind the software if
it falls within this version range.
no_deps (bool): If True, don't bind dependencies.
bind_args (list of str): Command line options.
quiet (bool): If True, suppress superfluous output.
Returns:
List of `Variant`: The variant(s) that were installed as a result of
binding this package.
"""
pending = set([name])
installed_variants = []
installed_package_names = set()
primary = True
# bind package and possibly dependencies
while pending:
pending_ = pending
pending = set()
exc_type = None
for name_ in pending_:
# turn error on binding of dependencies into a warning - we don't
# want to skip binding some dependencies because others failed
try:
variants_ = _bind_package(name_,
path=path,
version_range=version_range,
bind_args=bind_args,
quiet=quiet)
except exc_type as e:
print_error("Could not bind '%s': %s: %s"
% (name_, e.__class__.__name__, str(e)))
continue
installed_variants.extend(variants_)
for variant in variants_:
installed_package_names.add(variant.name)
# add dependencies
if not no_deps:
for variant in variants_:
for requirement in variant.requires:
if not requirement.conflict:
pending.add(requirement.name)
# non-primary packages are treated a little differently
primary = False
version_range = None
bind_args = None
exc_type = RezBindError
if installed_variants and not quiet:
print "The following packages were installed:"
print
_print_package_list(installed_variants)
return installed_variants | [
"def",
"bind_package",
"(",
"name",
",",
"path",
"=",
"None",
",",
"version_range",
"=",
"None",
",",
"no_deps",
"=",
"False",
",",
"bind_args",
"=",
"None",
",",
"quiet",
"=",
"False",
")",
":",
"pending",
"=",
"set",
"(",
"[",
"name",
"]",
")",
"... | Bind software available on the current system, as a rez package.
Note:
`bind_args` is provided when software is bound via the 'rez-bind'
command line tool. Bind modules can define their own command line
options, and they will be present in `bind_args` if applicable.
Args:
name (str): Package name.
path (str): Package path to install into; local packages path if None.
version_range (`VersionRange`): If provided, only bind the software if
it falls within this version range.
no_deps (bool): If True, don't bind dependencies.
bind_args (list of str): Command line options.
quiet (bool): If True, suppress superfluous output.
Returns:
List of `Variant`: The variant(s) that were installed as a result of
binding this package. | [
"Bind",
"software",
"available",
"on",
"the",
"current",
"system",
"as",
"a",
"rez",
"package",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_bind.py#L71-L141 | train | 227,395 |
nerdvegas/rez | src/rez/release_hook.py | create_release_hook | def create_release_hook(name, source_path):
"""Return a new release hook of the given type."""
from rez.plugin_managers import plugin_manager
return plugin_manager.create_instance('release_hook',
name,
source_path=source_path) | python | def create_release_hook(name, source_path):
"""Return a new release hook of the given type."""
from rez.plugin_managers import plugin_manager
return plugin_manager.create_instance('release_hook',
name,
source_path=source_path) | [
"def",
"create_release_hook",
"(",
"name",
",",
"source_path",
")",
":",
"from",
"rez",
".",
"plugin_managers",
"import",
"plugin_manager",
"return",
"plugin_manager",
".",
"create_instance",
"(",
"'release_hook'",
",",
"name",
",",
"source_path",
"=",
"source_path"... | Return a new release hook of the given type. | [
"Return",
"a",
"new",
"release",
"hook",
"of",
"the",
"given",
"type",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/release_hook.py#L12-L17 | train | 227,396 |
nerdvegas/rez | src/rez/release_hook.py | ReleaseHook.pre_build | def pre_build(self, user, install_path, variants=None, release_message=None,
changelog=None, previous_version=None,
previous_revision=None, **kwargs):
"""Pre-build hook.
Args:
user: Name of person who did the release.
install_path: Directory the package was installed into.
variants: List of variant indices we are attempting to build, or
None
release_message: User-supplied release message.
changelog: List of strings describing changes since last release.
previous_version: Version object - previously-release package, or
None if no previous release.
previous_revision: Revision of previously-released package (type
depends on repo - see ReleaseVCS.get_current_revision().
kwargs: Reserved.
Note:
This method should raise a `ReleaseHookCancellingError` if the
release process should be cancelled.
"""
pass | python | def pre_build(self, user, install_path, variants=None, release_message=None,
changelog=None, previous_version=None,
previous_revision=None, **kwargs):
"""Pre-build hook.
Args:
user: Name of person who did the release.
install_path: Directory the package was installed into.
variants: List of variant indices we are attempting to build, or
None
release_message: User-supplied release message.
changelog: List of strings describing changes since last release.
previous_version: Version object - previously-release package, or
None if no previous release.
previous_revision: Revision of previously-released package (type
depends on repo - see ReleaseVCS.get_current_revision().
kwargs: Reserved.
Note:
This method should raise a `ReleaseHookCancellingError` if the
release process should be cancelled.
"""
pass | [
"def",
"pre_build",
"(",
"self",
",",
"user",
",",
"install_path",
",",
"variants",
"=",
"None",
",",
"release_message",
"=",
"None",
",",
"changelog",
"=",
"None",
",",
"previous_version",
"=",
"None",
",",
"previous_revision",
"=",
"None",
",",
"*",
"*",... | Pre-build hook.
Args:
user: Name of person who did the release.
install_path: Directory the package was installed into.
variants: List of variant indices we are attempting to build, or
None
release_message: User-supplied release message.
changelog: List of strings describing changes since last release.
previous_version: Version object - previously-release package, or
None if no previous release.
previous_revision: Revision of previously-released package (type
depends on repo - see ReleaseVCS.get_current_revision().
kwargs: Reserved.
Note:
This method should raise a `ReleaseHookCancellingError` if the
release process should be cancelled. | [
"Pre",
"-",
"build",
"hook",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/release_hook.py#L56-L78 | train | 227,397 |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/traversal.py | traversal | def traversal(graph, node, order):
"""
Graph traversal iterator.
@type graph: graph, digraph
@param graph: Graph.
@type node: node
@param node: Node.
@type order: string
@param order: traversal ordering. Possible values are:
2. 'pre' - Preordering (default)
1. 'post' - Postordering
@rtype: iterator
@return: Traversal iterator.
"""
visited = {}
if (order == 'pre'):
pre = 1
post = 0
elif (order == 'post'):
pre = 0
post = 1
for each in _dfs(graph, visited, node, pre, post):
yield each | python | def traversal(graph, node, order):
"""
Graph traversal iterator.
@type graph: graph, digraph
@param graph: Graph.
@type node: node
@param node: Node.
@type order: string
@param order: traversal ordering. Possible values are:
2. 'pre' - Preordering (default)
1. 'post' - Postordering
@rtype: iterator
@return: Traversal iterator.
"""
visited = {}
if (order == 'pre'):
pre = 1
post = 0
elif (order == 'post'):
pre = 0
post = 1
for each in _dfs(graph, visited, node, pre, post):
yield each | [
"def",
"traversal",
"(",
"graph",
",",
"node",
",",
"order",
")",
":",
"visited",
"=",
"{",
"}",
"if",
"(",
"order",
"==",
"'pre'",
")",
":",
"pre",
"=",
"1",
"post",
"=",
"0",
"elif",
"(",
"order",
"==",
"'post'",
")",
":",
"pre",
"=",
"0",
... | Graph traversal iterator.
@type graph: graph, digraph
@param graph: Graph.
@type node: node
@param node: Node.
@type order: string
@param order: traversal ordering. Possible values are:
2. 'pre' - Preordering (default)
1. 'post' - Postordering
@rtype: iterator
@return: Traversal iterator. | [
"Graph",
"traversal",
"iterator",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/traversal.py#L34-L61 | train | 227,398 |
nerdvegas/rez | src/rez/backport/zipfile.py | is_zipfile | def is_zipfile(filename):
"""Quickly see if file is a ZIP file by checking the magic number."""
try:
fpin = open(filename, "rb")
endrec = _EndRecData(fpin)
fpin.close()
if endrec:
return True # file has correct magic number
except IOError:
pass
return False | python | def is_zipfile(filename):
"""Quickly see if file is a ZIP file by checking the magic number."""
try:
fpin = open(filename, "rb")
endrec = _EndRecData(fpin)
fpin.close()
if endrec:
return True # file has correct magic number
except IOError:
pass
return False | [
"def",
"is_zipfile",
"(",
"filename",
")",
":",
"try",
":",
"fpin",
"=",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"endrec",
"=",
"_EndRecData",
"(",
"fpin",
")",
"fpin",
".",
"close",
"(",
")",
"if",
"endrec",
":",
"return",
"True",
"# file has cor... | Quickly see if file is a ZIP file by checking the magic number. | [
"Quickly",
"see",
"if",
"file",
"is",
"a",
"ZIP",
"file",
"by",
"checking",
"the",
"magic",
"number",
"."
] | 1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7 | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/backport/zipfile.py#L133-L143 | train | 227,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.