text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def potentials_difference(p1, p2):
""" Computes and prints the differences between two lists of potentials If the shells contain a different number primitives, or the lists are of different length, inf is returned. Otherwise, the maximum relative difference is returned. """ |
max_rdiff = 0.0
np = len(p1)
if len(p2) != np:
print("Different number of potentials")
return float('inf')
pots1 = sort_potentials(p1)
pots2 = sort_potentials(p2)
for n in range(np):
pot1 = pots1[n]
pot2 = pots2[n]
if pot1['angular_momentum'] != pot2['angular_momentum']:
print("Different angular momentum for potential {}".format(n))
return float('inf')
nprim = len(pot1['gaussian_exponents'])
if len(pot2['gaussian_exponents']) != nprim:
print("Different number of primitives for potential {}".format(n))
return float('inf')
ngen = len(pot1['coefficients'])
if len(pot2['coefficients']) != ngen:
print("Different number of general contractions for potential {}".format(n))
return float('inf')
for p in range(nprim):
e1 = pot1['gaussian_exponents'][p]
e2 = pot2['gaussian_exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Gaussian Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
e1 = pot1['r_exponents'][p]
e2 = pot2['r_exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" R Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = pot1['coefficients'][g][p]
c2 = pot2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print(" Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these potentials: {}".format(max_rdiff))
return max_rdiff |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def basis_comparison_report(bs1, bs2, uncontract_general=False):
'''
Compares two basis set dictionaries and prints a report about their differences
'''
all_bs1 = list(bs1['elements'].keys())
if uncontract_general:
bs1 = manip.uncontract_general(bs1)
bs2 = manip.uncontract_general(bs2)
not_in_bs1 = [] # Found in bs2, not in bs1
not_in_bs2 = all_bs1.copy() # Found in bs1, not in bs2
no_diff = [] # Elements for which there is no difference
some_diff = [] # Elements that are different
big_diff = [] # Elements that are substantially different
for k, v in bs2['elements'].items():
if k not in all_bs1:
not_in_bs1.append(k)
continue
print()
print("-------------------------------------")
print(" Element ", k)
bs1_el = bs1['elements'][k]
max_rdiff_el = 0.0
max_rdiff_ecp = 0.0
# Check to make sure that neither or both have ecp/electron shells
if 'electron_shells' in v and 'electron_shells' not in bs1_el:
print("bs2 has electron_shells, but bs1 does not")
max_rdiff_el = float('inf')
if 'electron_shells' in bs1_el and 'electron_shells' not in v:
print("bs1 has electron_shells, but bs2 does not")
max_rdiff_el = float('inf')
if 'ecp_potentials' in v and 'ecp_potentials' not in bs1_el:
print("bs2 has ecp_potentials, but bs1 does not")
max_rdiff_ecp = float('inf')
if 'ecp_potentials' in bs1_el and 'ecp_potentials' not in v:
print("bs1 has ecp_potentials, but bs2 does not")
max_rdiff_ecp = float('inf')
if 'electron_shells' in v and 'electron_shells' in bs1_el:
max_rdiff_el = max(max_rdiff_el, shells_difference(v['electron_shells'], bs1_el['electron_shells']))
if 'ecp_potentials' in v and 'ecp_potentials' in bs1_el:
nel1 = v['ecp_electrons']
nel2 = bs1_el['ecp_electrons']
if int(nel1) != int(nel2):
print('Different number of electrons replaced by ECP ({} vs {})'.format(nel1, nel2))
max_rdiff_ecp = float('inf')
else:
max_rdiff_ecp = max(max_rdiff_ecp, potentials_difference(v['ecp_potentials'],
bs1_el['ecp_potentials']))
max_rdiff = max(max_rdiff_el, max_rdiff_ecp)
# Handle some differences
if max_rdiff == float('inf'):
big_diff.append(k)
elif max_rdiff == 0.0:
no_diff.append(k)
else:
some_diff.append(k)
not_in_bs2.remove(k)
print()
print(" Not in bs1: ", _print_list(not_in_bs1))
print(" Not in bs2: ", _print_list(not_in_bs2))
print(" No difference: ", _print_list(no_diff))
print("Some difference: ", _print_list(some_diff))
print(" BIG difference: ", _print_list(big_diff))
print()
return (len(not_in_bs1) == 0 and len(not_in_bs2) == 0 and len(some_diff) == 0 and len(big_diff) == 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compare_basis_against_file(basis_name,
src_filepath,
file_type=None,
version=None,
uncontract_general=False,
data_dir=None):
'''Compare a basis set in the BSE against a reference file'''
src_data = read_formatted_basis(src_filepath, file_type)
bse_data = get_basis(basis_name, version=version, data_dir=data_dir)
return basis_comparison_report(src_data, bse_data, uncontract_general=uncontract_general) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_list_basis_sets(args):
'''Handles the list-basis-sets subcommand'''
metadata = api.filter_basis_sets(args.substr, args.family, args.role, args.data_dir)
if args.no_description:
liststr = metadata.keys()
else:
liststr = format_columns([(k, v['description']) for k, v in metadata.items()])
return '\n'.join(liststr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_list_formats(args):
'''Handles the list-formats subcommand'''
all_formats = api.get_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(liststr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_list_ref_formats(args):
'''Handles the list-ref-formats subcommand'''
all_refformats = api.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return '\n'.join(liststr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_list_roles(args):
'''Handles the list-roles subcommand'''
all_roles = api.get_roles()
if args.no_description:
liststr = all_roles.keys()
else:
liststr = format_columns(all_roles.items())
return '\n'.join(liststr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_lookup_by_role(args):
'''Handles the lookup-by-role subcommand'''
return api.lookup_basis_by_role(args.basis, args.role, args.data_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_get_basis(args):
'''Handles the get-basis subcommand'''
return api.get_basis(
name=args.basis,
elements=args.elements,
version=args.version,
fmt=args.fmt,
uncontract_general=args.unc_gen,
uncontract_spdf=args.unc_spdf,
uncontract_segmented=args.unc_seg,
make_general=args.make_gen,
optimize_general=args.opt_gen,
data_dir=args.data_dir,
header=not args.noheader) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_get_refs(args):
'''Handles the get-refs subcommand'''
return api.get_references(
basis_name=args.basis, elements=args.elements, version=args.version, fmt=args.reffmt, data_dir=args.data_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_get_info(args):
'''Handles the get-info subcommand'''
bs_meta = api.get_metadata(args.data_dir)[args.basis]
ret = []
ret.append('-' * 80)
ret.append(args.basis)
ret.append('-' * 80)
ret.append(' Display Name: ' + bs_meta['display_name'])
ret.append(' Description: ' + bs_meta['description'])
ret.append(' Role: ' + bs_meta['role'])
ret.append(' Family: ' + bs_meta['family'])
ret.append(' Function Types: ' + ','.join(bs_meta['functiontypes']))
ret.append(' Latest Version: ' + bs_meta['latest_version'])
ret.append('')
aux = bs_meta['auxiliaries']
if len(aux) == 0:
ret.append('Auxiliary Basis Sets: None')
else:
ret.append('Auxiliary Basis Sets:')
ret.extend(format_columns(list(aux.items()), ' '))
ver = bs_meta['versions']
ret.append('')
ret.append('Versions:')
# Print 3 columns - version, elements, revision description
version_lines = format_columns([(k, compact_elements(v['elements']), v['revdesc']) for k, v in ver.items()],
' ')
ret.extend(version_lines)
return '\n'.join(ret) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_get_versions(args):
'''Handles the get-versions subcommand'''
name = args.basis.lower()
metadata = api.get_metadata(args.data_dir)
if not name in metadata:
raise KeyError(
"Basis set {} does not exist. For a complete list of basis sets, use the 'list-basis-sets' command".format(
name))
version_data = {k: v['revdesc'] for k, v in metadata[name]['versions'].items()}
if args.no_description:
liststr = version_data.keys()
else:
liststr = format_columns(version_data.items())
return '\n'.join(liststr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bse_cli_create_bundle(args):
'''Handles the create-bundle subcommand'''
bundle.create_bundle(args.bundle_file, args.fmt, args.reffmt, args.archive_type, args.data_dir)
return "Created " + args.bundle_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wait_ready(self, timeout=120):
""" wait until WDA back to normal Returns: bool (if wda works) """ |
deadline = time.time() + timeout
while time.time() < deadline:
try:
self.status()
return True
except:
time.sleep(2)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def screenshot(self, png_filename=None, format='raw'):
""" Screenshot with PNG format Args: png_filename(string):
optional, save file name format(string):
return format, pillow or raw(default) Returns: raw data or PIL.Image Raises: WDAError """ |
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDAError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
return Image.open(buff)
else:
raise ValueError("unknown format") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tap_hold(self, x, y, duration=1.0):
""" Tap and hold for a moment Args: - x, y(int):
position - duration(float):
seconds of hold time [[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)], """ |
data = {'x': x, 'y': y, 'duration': duration}
return self.http.post('/wda/touchAndHold', data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def screenshot(self):
""" Take screenshot with session check Returns: PIL.Image """ |
b64data = self.http.get('/screenshot').value
raw_data = base64.b64decode(b64data)
from PIL import Image
buff = io.BytesIO(raw_data)
return Image.open(buff) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_keys(self, value):
""" send keys, yet I know not, todo function """ |
if isinstance(value, six.string_types):
value = list(value)
return self.http.post('/wda/keys', data={'value': value}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def click_exists(self, timeout=0):
""" Wait element and perform click Args: timeout (float):
timeout for wait Returns: bool: if successfully clicked """ |
e = self.get(timeout=timeout, raise_error=False)
if e is None:
return False
e.click()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_graph_data(self, graph, benchmark):
""" Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection. """ |
if benchmark.get('params'):
param_iter = enumerate(zip(itertools.product(*benchmark['params']),
graph.get_steps()))
else:
param_iter = [(None, (None, graph.get_steps()))]
for j, (param, steps) in param_iter:
if param is None:
entry_name = benchmark['name']
else:
entry_name = benchmark['name'] + '({0})'.format(', '.join(param))
start_revision = self._get_start_revision(graph, benchmark, entry_name)
threshold = self._get_threshold(graph, benchmark, entry_name)
if start_revision is None:
# Skip detection
continue
steps = [step for step in steps if step[1] >= start_revision]
yield j, entry_name, steps, threshold |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_start_revision(self, graph, benchmark, entry_name):
""" Compute the first revision allowed by asv.conf.json. Revisions correspond to linearized commit history and the regression detection runs on this order --- the starting commit thus corresponds to a specific starting revision. """ |
start_revision = min(six.itervalues(self.revisions))
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
# Disable regression detection completely
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get('branch'))
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
# Commit not found in the branch --- warn and ignore.
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_threshold(self, graph, benchmark, entry_name):
""" Compute the regression threshold in asv.conf.json. """ |
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
max_threshold = None
for regex, threshold in six.iteritems(self.conf.regressions_thresholds):
if re.match(regex, entry_name + branch_suffix):
try:
threshold = float(threshold)
except ValueError:
raise util.UserError("Non-float threshold in asv.conf.json: {!r}".format(threshold))
if max_threshold is None:
max_threshold = threshold
else:
max_threshold = max(threshold, max_threshold)
if max_threshold is None:
max_threshold = 0.05
return max_threshold |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __check_submodules(self):
""" Verify that the submodules are checked out and clean. """ |
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve_potts_autogamma(y, w, beta=None, **kw):
"""Solve Potts problem with automatically determined gamma. The optimal value is determined by minimizing the information measure:: f(gamma) = beta J(x(gamma)) + log sum(abs(x(gamma) - y)**p) where x(gamma) is the solution to the Potts problem for a fixed gamma. The minimization is only performed rather roughly. Parameters beta : float or 'bic' Penalty parameter. Default is 4*ln(n)/n, similar to Bayesian information criterion for gaussian model with unknown variance assuming 4 DOF per breakpoint. """ |
n = len(y)
if n == 0:
return [], [], [], None
mu_dist = get_mu_dist(y, w)
mu, dist = mu_dist.mu, mu_dist.dist
if beta is None:
beta = 4 * math.log(n) / n
gamma_0 = dist(0, n-1)
if gamma_0 == 0:
# Zero variance
gamma_0 = 1.0
best_r = [None]
best_v = [None]
best_d = [None]
best_obj = [float('inf')]
best_gamma = [None]
def f(x):
gamma = gamma_0 * math.exp(x)
r, v, d = solve_potts_approx(y, w, gamma=gamma, mu_dist=mu_dist, **kw)
# MLE fit noise correlation
def sigma_star(rights, values, rho):
"""
|E_0| + sum_{j>0} |E_j - rho E_{j-1}|
"""
l = 1
E_prev = y[0] - values[0]
s = abs(E_prev)
for r, v in zip(rights, values):
for yv in y[l:r]:
E = yv - v
s += abs(E - rho*E_prev)
E_prev = E
l = r
return s
rho_best = golden_search(lambda rho: sigma_star(r, v, rho), -1, 1,
xatol=0.05, expand_bounds=True)
# Measurement noise floor
if len(v) > 2:
absdiff = [abs(v[j+1] - v[j]) for j in range(len(v) - 1)]
sigma_0 = 0.1 * min(absdiff)
else:
absv = [abs(z) for z in v]
sigma_0 = 0.001 * min(absv)
sigma_0 = max(1e-300, sigma_0)
# Objective function
s = sigma_star(r, v, rho_best)
obj = beta*len(r) + math.log(sigma_0 + s)
# Done
if obj < best_obj[0]:
best_r[0] = r
best_v[0] = v
best_d[0] = d
best_gamma[0] = gamma
best_obj[0] = obj
return obj
# Try to find best gamma (golden section search on log-scale); we
# don't need an accurate value for it however
a = math.log(0.1/n)
b = 0.0
golden_search(f, a, b, xatol=abs(a)*0.1, ftol=0, expand_bounds=True)
return best_r[0], best_v[0], best_d[0], best_gamma[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_pieces(gamma, right, values, dists, mu_dist, max_size):
""" Combine consecutive intervals in Potts model solution, if doing that reduces the cost function. """ |
mu, dist = mu_dist.mu, mu_dist.dist
right = list(right)
# Combine consecutive intervals, if it results to decrease of cost
# function
while True:
min_change = 0
min_change_j = len(right)
l = 0
for j in range(1, len(right)):
if min_change_j < j - 2:
break
# Check whether merging consecutive intervals results to
# decrease in the cost function
change = dist(l, right[j]-1) - (dist(l, right[j-1]-1) + dist(right[j-1], right[j]-1) + gamma)
if change <= min_change:
min_change = change
min_change_j = j-1
l = right[j-1]
if min_change_j < len(right):
del right[min_change_j]
else:
break
# Check whether perturbing boundary positions leads to improvement
# in the cost function. The restricted Potts minimization can
# return sub-optimal boundaries due to the interval maximum size
# restriction.
l = 0
for j in range(1, len(right)):
prev_score = dist(l, right[j-1]-1) + dist(right[j-1], right[j]-1)
new_off = 0
for off in range(-max_size, max_size+1):
if right[j-1] + off - 1 <= l or right[j-1] + off >= right[j] - 1 or off == 0:
continue
new_score = dist(l, right[j-1]+off-1) + dist(right[j-1]+off, right[j]-1)
if new_score < prev_score:
new_off = off
prev_score = new_score
if new_off != 0:
right[j-1] += new_off
l = right[j-1]
# Rebuild values and dists lists
l = 0
values = []
dists = []
for j in range(len(right)):
dists.append(dist(l, right[j]-1))
values.append(mu(l, right[j]-1))
l = right[j]
return right, values, dists |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def weighted_median(y, w):
""" Compute weighted median of `y` with weights `w`. """ |
items = sorted(zip(y, w))
midpoint = sum(w) / 2
yvals = []
wsum = 0
for yy, ww in items:
wsum += ww
if wsum > midpoint:
yvals.append(yy)
break
elif wsum == midpoint:
yvals.append(yy)
else:
yvals = y
return sum(yvals) / len(yvals) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_python(python):
"""Find Python executable for the given Python version""" |
is_pypy = python.startswith("pypy")
# Parse python specifier
if is_pypy:
executable = python
if python == 'pypy':
python_version = '2'
else:
python_version = python[4:]
else:
python_version = python
executable = "python{0}".format(python_version)
# Find Python executable on path
try:
return util.which(executable)
except IOError:
pass
# Maybe the current one is correct?
current_is_pypy = hasattr(sys, 'pypy_version_info')
current_versions = ['{0[0]}'.format(sys.version_info),
'{0[0]}.{0[1]}'.format(sys.version_info)]
if is_pypy == current_is_pypy and python_version in current_versions:
return sys.executable
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name(self):
""" Get a name to uniquely identify this environment. """ |
python = self._python
if self._python.startswith('pypy'):
# get_env_name adds py-prefix
python = python[2:]
return environment.get_env_name(self.tool_name, python, self._requirements) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup(self):
""" Setup the environment on disk using virtualenv. Then, all of the requirements are installed into it using `pip install`. """ |
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
'--no-site-packages',
"-p",
self._executable,
self._path])
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _basicsize(t, base=0, heap=False, obj=None):
'''Get non-zero basicsize of type,
including the header sizes.
'''
s = max(getattr(t, '__basicsize__', 0), base)
# include gc header size
if t != _Type_type:
h = getattr(t, '__flags__', 0) & _Py_TPFLAGS_HAVE_GC
elif heap: # type, allocated on heap
h = True
else: # None has no __flags__ attr
h = getattr(obj, '__flags__', 0) & _Py_TPFLAGS_HEAPTYPE
if h:
s += _sizeof_CPyGC_Head
# include reference counters
return s + _sizeof_Crefcounts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _derive_typedef(typ):
'''Return single, existing super type typedef or None.
'''
v = [v for v in _values(_typedefs) if _issubclass(typ, v.type)]
if len(v) == 1:
return v[0]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _infer_dict(obj):
'''Return True for likely dict object.
'''
for ats in (('__len__', 'get', 'has_key', 'items', 'keys', 'values'),
('__len__', 'get', 'has_key', 'iteritems', 'iterkeys', 'itervalues')):
for a in ats: # no all(<generator_expression>) in Python 2.2
if not _callable(getattr(obj, a, None)):
break
else: # all True
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _isdictclass(obj):
'''Return True for known dict objects.
'''
c = getattr(obj, '__class__', None)
return c and c.__name__ in _dict_classes.get(c.__module__, ()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _lengstr(obj):
'''Object length as a string.
'''
n = leng(obj)
if n is None: # no len
r = ''
elif n > _len(obj): # extended
r = ' leng %d!' % n
else:
r = ' leng %d' % n
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _objs_opts(objs, all=None, **opts):
'''Return given or 'all' objects
and the remaining options.
'''
if objs: # given objects
t = objs
elif all in (False, None):
t = ()
elif all is True: # 'all' objects ...
# ... modules first, globals and stack
# (may contain duplicate objects)
t = tuple(_values(sys.modules)) + (
globals(), stack(sys.getrecursionlimit())[2:])
else:
raise ValueError('invalid option: %s=%r' % ('all', all))
return t, opts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _p100(part, total, prec=1):
'''Return percentage as string.
'''
r = float(total)
if r:
r = part * 100.0 / r
return '%.*f%%' % (prec, r)
return 'n/a' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _printf(fmt, *args, **print3opts):
'''Formatted print.
'''
if print3opts: # like Python 3.0
f = print3opts.get('file', None) or sys.stdout
if args:
f.write(fmt % args)
else:
f.write(fmt)
f.write(print3opts.get('end', linesep))
elif args:
print(fmt % args)
else:
print(fmt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _refs(obj, named, *ats, **kwds):
'''Return specific attribute objects of an object.
'''
if named:
for a in ats: # cf. inspect.getmembers()
if hasattr(obj, a):
yield _NamedRef(a, getattr(obj, a))
if kwds: # kwds are _dir2() args
for a, o in _dir2(obj, **kwds):
yield _NamedRef(a, o)
else:
for a in ats: # cf. inspect.getmembers()
if hasattr(obj, a):
yield getattr(obj, a)
if kwds: # kwds are _dir2() args
for _, o in _dir2(obj, **kwds):
yield o |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _SI(size, K=1024, i='i'):
'''Return size as SI string.
'''
if 1 < K < size:
f = float(size)
for si in iter('KMGPTE'):
f /= K
if f < K:
return ' or %.1f %s%sB' % (f, si, i)
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _module_refs(obj, named):
'''Return specific referents of a module object.
'''
# ignore this very module
if obj.__name__ == __name__:
return ()
# module is essentially a dict
return _dict_refs(obj.__dict__, named) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _len_frame(obj):
'''Length of a frame object.
'''
c = getattr(obj, 'f_code', None)
if c:
n = _len_code(c)
else:
n = 0
return n |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _len_slice(obj):
'''Slice length.
'''
try:
return ((obj.stop - obj.start + 1) // obj.step)
except (AttributeError, TypeError):
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _claskey(obj, style):
'''Wrap an old- or new-style class object.
'''
i = id(obj)
k = _claskeys.get(i, None)
if not k:
_claskeys[i] = k = _Claskey(obj, style)
return k |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _typedef_both(t, base=0, item=0, leng=None, refs=None, kind=_kind_static, heap=False):
'''Add new typedef for both data and code.
'''
v = _Typedef(base=_basicsize(t, base=base), item=_itemsize(t, item),
refs=refs, leng=leng,
both=True, kind=kind, type=t)
v.save(t, base=base, heap=heap)
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _typedef_code(t, base=0, refs=None, kind=_kind_static, heap=False):
'''Add new typedef for code only.
'''
v = _Typedef(base=_basicsize(t, base=base),
refs=refs,
both=False, kind=kind, type=t)
v.save(t, base=base, heap=heap)
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _typedef(obj, derive=False, infer=False):
'''Create a new typedef for an object.
'''
t = type(obj)
v = _Typedef(base=_basicsize(t, obj=obj),
kind=_kind_dynamic, type=t)
##_printf('new %r %r/%r %s', t, _basicsize(t), _itemsize(t), _repr(dir(obj)))
if ismodule(obj): # handle module like dict
v.dup(item=_dict_typedef.item + _sizeof_CPyModuleObject,
leng=_len_module,
refs=_module_refs)
elif isframe(obj):
v.set(base=_basicsize(t, base=_sizeof_CPyFrameObject, obj=obj),
item=_itemsize(t),
leng=_len_frame,
refs=_frame_refs)
elif iscode(obj):
v.set(base=_basicsize(t, base=_sizeof_CPyCodeObject, obj=obj),
item=_sizeof_Cvoidp,
leng=_len_code,
refs=_co_refs,
both=False) # code only
elif _callable(obj):
if isclass(obj): # class or type
v.set(refs=_class_refs,
both=False) # code only
if obj.__module__ in _builtin_modules:
v.set(kind=_kind_ignored)
elif isbuiltin(obj): # function or method
v.set(both=False, # code only
kind=_kind_ignored)
elif isfunction(obj):
v.set(refs=_func_refs,
both=False) # code only
elif ismethod(obj):
v.set(refs=_im_refs,
both=False) # code only
elif isclass(t): # callable instance, e.g. SCons,
# handle like any other instance further below
v.set(item=_itemsize(t), safe_len=True,
refs=_inst_refs) # not code only!
else:
v.set(both=False) # code only
elif _issubclass(t, dict):
v.dup(kind=_kind_derived)
elif _isdictclass(obj) or (infer and _infer_dict(obj)):
v.dup(kind=_kind_inferred)
elif getattr(obj, '__module__', None) in _builtin_modules:
v.set(kind=_kind_ignored)
else: # assume an instance of some class
if derive:
p = _derive_typedef(t)
if p: # duplicate parent
v.dup(other=p, kind=_kind_derived)
return v
if _issubclass(t, Exception):
v.set(item=_itemsize(t), safe_len=True,
refs=_exc_refs,
kind=_kind_derived)
elif isinstance(obj, Exception):
v.set(item=_itemsize(t), safe_len=True,
refs=_exc_refs)
else:
v.set(item=_itemsize(t), safe_len=True,
refs=_inst_refs)
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def adict(*classes):
'''Install one or more classes to be handled as dict.
'''
a = True
for c in classes:
# if class is dict-like, add class
# name to _dict_classes[module]
if isclass(c) and _infer_dict(c):
t = _dict_classes.get(c.__module__, ())
if c.__name__ not in t: # extend tuple
_dict_classes[c.__module__] = t + (c.__name__,)
else: # not a dict-like class
a = False
return a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def asizeof(*objs, **opts):
'''Return the combined size in bytes of all objects passed as positional argments.
The available options and defaults are the following.
*align=8* -- size alignment
*all=False* -- all current objects
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
All current module, global and stack objects are sized if
*all* is True and if no positional arguments are supplied.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat[4] sizes of
the given objects. High *limit* values may cause runtime
errors and miss objects for sizing.
A positive value for *stats* prints up to 8 statistics, (1)
a summary of the number of objects sized and seen, (2) a
simple profile of the sized objects by type and (3+) up to
6 tables showing the static, dynamic, derived, ignored,
inferred and dict types used, found resp. installed. The
fractional part of the *stats* value (x100) is the cutoff
percentage for simple profiles.
[4] See the documentation of this module for the definition of flat size.
'''
t, p = _objs_opts(objs, **opts)
if t:
_asizer.reset(**p)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts) # show opts as _kwdstr
_asizer._clear()
else:
s = 0
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def asizesof(*objs, **opts):
'''Return a tuple containing the size in bytes of all objects
passed as positional argments using the following options.
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics
See function **asizeof** for a description of the options.
The length of the returned tuple equals the number of given
objects.
'''
if 'all' in opts:
raise KeyError('invalid option: %s=%r' % ('all', opts['all']))
if objs: # size given objects
_asizer.reset(**opts)
t = _asizer.asizesof(*objs)
_asizer.print_stats(objs, opts=opts, sizes=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _typedefof(obj, save=False, **opts):
'''Get the typedef for an object.
'''
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def args(self): # as args tuple
'''Return all attributes as arguments tuple.
'''
return (self.base, self.item, self.leng, self.refs,
self.both, self.kind, self.type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dup(self, other=None, **kwds):
'''Duplicate attributes of dict or other typedef.
'''
if other is None:
d = _dict_typedef.kwds()
else:
d = other.kwds()
d.update(kwds)
self.reset(**d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def flat(self, obj, mask=0):
'''Return the aligned flat size.
'''
s = self.base
if self.leng and self.item > 0: # include items
s += self.leng(obj) * self.item
if _getsizeof: # _getsizeof prevails
s = _getsizeof(obj, s)
if mask: # align
s = (s + mask) & ~mask
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def kwds(self):
'''Return all attributes as keywords dict.
'''
# no dict(refs=self.refs, ..., kind=self.kind) in Python 2.0
return _kwds(base=self.base, item=self.item,
leng=self.leng, refs=self.refs,
both=self.both, kind=self.kind, type=self.type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def save(self, t, base=0, heap=False):
'''Save this typedef plus its class typedef.
'''
c, k = _keytuple(t)
if k and k not in _typedefs: # instance key
_typedefs[k] = self
if c and c not in _typedefs: # class key
if t.__module__ in _builtin_modules:
k = _kind_ignored # default
else:
k = self.kind
_typedefs[c] = _Typedef(base=_basicsize(type(t), base=base, heap=heap),
refs=_type_refs,
both=False, kind=k, type=t)
elif isbuiltin(t) and t not in _typedefs: # array, range, xrange in Python 2.x
_typedefs[t] = _Typedef(base=_basicsize(t, base=base),
both=False, kind=_kind_ignored, type=t)
else:
raise KeyError('asizeof typedef %r bad: %r %r' % (self, (c, k), self.both)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set(self, safe_len=False, **kwds):
'''Set one or more attributes.
'''
if kwds: # double check
d = self.kwds()
d.update(kwds)
self.reset(**d)
if safe_len and self.item:
self.leng = _len |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def reset(self, base=0, item=0, leng=None, refs=None,
both=True, kind=None, type=None):
'''Reset all specified attributes.
'''
if base < 0:
raise ValueError('invalid option: %s=%r' % ('base', base))
else:
self.base = base
if item < 0:
raise ValueError('invalid option: %s=%r' % ('item', item))
else:
self.item = item
if leng in _all_lengs: # XXX or _callable(leng)
self.leng = leng
else:
raise ValueError('invalid option: %s=%r' % ('leng', leng))
if refs in _all_refs: # XXX or _callable(refs)
self.refs = refs
else:
raise ValueError('invalid option: %s=%r' % ('refs', refs))
if both in (False, True):
self.both = both
else:
raise ValueError('invalid option: %s=%r' % ('both', both))
if kind in _all_kinds:
self.kind = kind
else:
raise ValueError('invalid option: %s=%r' % ('kind', kind))
self.type = type |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def update(self, obj, size):
'''Update this profile.
'''
self.number += 1
self.total += size
if self.high < size: # largest
self.high = size
try: # prefer using weak ref
self.objref, self.weak = Weakref.ref(obj), True
except TypeError:
self.objref, self.weak = obj, False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _printf(self, *args, **kwargs):
'''Print to configured stream if any is specified and the file argument
is not already set for this specific call.
'''
if self._stream and not kwargs.get('file'):
kwargs['file'] = self._stream
_printf(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _clear(self):
'''Clear state.
'''
self._depth = 0 # recursion depth
self._duplicate = 0
self._incl = '' # or ' (incl. code)'
self._missed = 0 # due to errors
self._profile = False
self._profs = {}
self._seen = {}
self._total = 0 # total size
for k in _keys(self._excl_d):
self._excl_d[k] = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _prof(self, key):
'''Get _Prof object.
'''
p = self._profs.get(key, None)
if not p:
self._profs[key] = p = _Prof()
return p |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _sizer(self, obj, deep, sized):
'''Size an object, recursively.
'''
s, f, i = 0, 0, id(obj)
# skip obj if seen before
# or if ref of a given obj
if i in self._seen:
if deep:
self._seen[i] += 1
if sized:
s = sized(s, f, name=self._nameof(obj))
return s
else:
self._seen[i] = 0
try:
k, rs = _objkey(obj), []
if k in self._excl_d:
self._excl_d[k] += 1
else:
v = _typedefs.get(k, None)
if not v: # new typedef
_typedefs[k] = v = _typedef(obj, derive=self._derive_,
infer=self._infer_)
if (v.both or self._code_) and v.kind is not self._ign_d:
s = f = v.flat(obj, self._mask) # flat size
if self._profile: # profile type
self._prof(k).update(obj, s)
# recurse, but not for nested modules
if v.refs and deep < self._limit_ and not (deep and ismodule(obj)):
# add sizes of referents
r, z, d = v.refs, self._sizer, deep + 1
if sized and deep < self._detail_:
# use named referents
for o in r(obj, True):
if isinstance(o, _NamedRef):
t = z(o.ref, d, sized)
t.name = o.name
else:
t = z(o, d, sized)
t.name = self._nameof(o)
rs.append(t)
s += t.size
else: # no sum(<generator_expression>) in Python 2.2
for o in r(obj, False):
s += z(o, d, None)
# recursion depth
if self._depth < d:
self._depth = d
self._seen[i] += 1
except RuntimeError: # XXX RecursionLimitExceeded:
self._missed += 1
if sized:
s = sized(s, f, name=self._nameof(obj), refs=rs)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def exclude_refs(self, *objs):
'''Exclude any references to the specified objects from sizing.
While any references to the given objects are excluded, the
objects will be sized if specified as positional arguments
in subsequent calls to methods **asizeof** and **asizesof**.
'''
for o in objs:
self._seen.setdefault(id(o), 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def exclude_types(self, *objs):
'''Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**.
'''
for o in objs:
for t in _keytuple(o):
if t and t not in self._excl_d:
self._excl_d[t] = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def print_summary(self, w=0, objs=(), **print3opts):
'''Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0
'''
self._printf('%*d bytes%s%s', w, self._total, _SI(self._total), self._incl, **print3opts)
if self._mask:
self._printf('%*d byte aligned', w, self._mask + 1, **print3opts)
self._printf('%*d byte sizeof(void*)', w, _sizeof_Cvoidp, **print3opts)
n = len(objs or ())
if n > 0:
d = self._duplicate or ''
if d:
d = ', %d duplicate' % self._duplicate
self._printf('%*d object%s given%s', w, n, _plural(n), d, **print3opts)
t = _sum([1 for t in _values(self._seen) if t != 0]) # [] for Python 2.2
self._printf('%*d object%s sized', w, t, _plural(t), **print3opts)
if self._excl_d:
t = _sum(_values(self._excl_d))
self._printf('%*d object%s excluded', w, t, _plural(t), **print3opts)
t = _sum(_values(self._seen))
self._printf('%*d object%s seen', w, t, _plural(t), **print3opts)
if self._missed > 0:
self._printf('%*d object%s missed', w, self._missed, _plural(self._missed), **print3opts)
if self._depth > 0:
self._printf('%*d recursion depth', w, self._depth, **print3opts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def print_typedefs(self, w=0, **print3opts):
'''Print the types and dict tables.
*w=0* -- indentation for each line
*print3options* -- print options, as in Python 3.0
'''
for k in _all_kinds:
# XXX Python 3.0 doesn't sort type objects
t = [(self._prepr(a), v) for a, v in _items(_typedefs) if v.kind == k and (v.both or self._code_)]
if t:
self._printf('%s%*d %s type%s: basicsize, itemsize, _len_(), _refs()',
linesep, w, len(t), k, _plural(len(t)), **print3opts)
for a, v in _sorted(t):
self._printf('%*s %s: %s', w, '', a, v, **print3opts)
# dict and dict-like classes
t = _sum([len(v) for v in _values(_dict_classes)]) # [] for Python 2.2
if t:
self._printf('%s%*d dict/-like classes:', linesep, w, t, **print3opts)
for m, v in _items(_dict_classes):
self._printf('%*s %s: %s', w, '', m, self._prepr(v), **print3opts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def reset(self, align=8, clip=80, code=False, derive=False,
detail=0, ignored=True, infer=False, limit=100, stats=0,
stream=None):
'''Reset options, state, etc.
The available options and default values are:
*align=8* -- size alignment
*clip=80* -- clip repr() strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*detail=0* -- Asized refs level
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics, see function **asizeof**
*stream=None* -- output stream for printing
See function **asizeof** for a description of the options.
'''
# options
self._align_ = align
self._clip_ = clip
self._code_ = code
self._derive_ = derive
self._detail_ = detail # for Asized only
self._infer_ = infer
self._limit_ = limit
self._stats_ = stats
self._stream = stream
if ignored:
self._ign_d = _kind_ignored
else:
self._ign_d = None
# clear state
self._clear()
self.set(align=align, code=code, stats=stats) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_conda():
"""Find the conda executable robustly across conda versions. Returns ------- conda : str Path to the conda executable. Raises ------ IOError If the executable cannot be found in either the CONDA_EXE environment variable or in the PATH. Notes ----- In POSIX platforms in conda >= 4.4, conda can be set up as a bash function rather than an executable. (This is to enable the syntax ``conda activate env-name``.) In this case, the environment variable ``CONDA_EXE`` contains the path to the conda executable. In other cases, we use standard search for the appropriate name in the PATH. See https://github.com/airspeed-velocity/asv/issues/645 for more details. """ |
if 'CONDA_EXE' in os.environ:
conda = os.environ['CONDA_EXE']
else:
conda = util.which('conda')
return conda |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recvall(sock, size):
""" Receive data of given size from a socket connection """ |
data = b""
while len(data) < size:
s = sock.recv(size - len(data))
data += s
if not s:
raise RuntimeError("did not receive data from socket "
"(size {}, got only {!r})".format(size, data))
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_source_code(items):
""" Extract source code of given items, and concatenate and dedent it. """ |
sources = []
prev_class_name = None
for func in items:
try:
lines, lineno = inspect.getsourcelines(func)
except TypeError:
continue
if not lines:
continue
src = "\n".join(line.rstrip() for line in lines)
src = textwrap.dedent(src)
class_name = None
if inspect.ismethod(func):
# Add class name
if hasattr(func, 'im_class'):
class_name = func.im_class.__name__
elif hasattr(func, '__qualname__'):
names = func.__qualname__.split('.')
if len(names) > 1:
class_name = names[-2]
if class_name and prev_class_name != class_name:
src = "class {0}:\n {1}".format(
class_name, src.replace("\n", "\n "))
elif class_name:
src = " {1}".format(
class_name, src.replace("\n", "\n "))
sources.append(src)
prev_class_name = class_name
return "\n\n".join(sources).rstrip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disc_modules(module_name, ignore_import_errors=False):
""" Recursively import a module and all sub-modules in the package Yields ------ module Imported module in the package tree """ |
if not ignore_import_errors:
module = import_module(module_name)
else:
try:
module = import_module(module_name)
except BaseException:
traceback.print_exc()
return
yield module
if getattr(module, '__path__', None):
for _, name, _ in pkgutil.iter_modules(module.__path__, module_name + '.'):
for item in disc_modules(name, ignore_import_errors=ignore_import_errors):
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disc_benchmarks(root, ignore_import_errors=False):
""" Discover all benchmarks in a given directory tree, yielding Benchmark objects For each class definition, looks for any methods with a special name. For each free function, yields all functions with a special name. """ |
root_name = os.path.basename(root)
for module in disc_modules(root_name, ignore_import_errors=ignore_import_errors):
for attr_name, module_attr in (
(k, v) for k, v in module.__dict__.items()
if not k.startswith('_')
):
if inspect.isclass(module_attr):
for name, class_attr in inspect.getmembers(module_attr):
if (inspect.isfunction(class_attr) or
inspect.ismethod(class_attr)):
benchmark = _get_benchmark(name, module, module_attr,
class_attr)
if benchmark is not None:
yield benchmark
elif inspect.isfunction(module_attr):
benchmark = _get_benchmark(attr_name, module, None, module_attr)
if benchmark is not None:
yield benchmark |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_benchmark_from_name(root, name, extra_params=None):
""" Create a benchmark from a fully-qualified benchmark name. Parameters root : str Path to the root of a benchmark suite. name : str Fully-qualified name to a specific benchmark. """ |
if '-' in name:
try:
name, param_idx = name.split('-', 1)
param_idx = int(param_idx)
except ValueError:
raise ValueError("Benchmark id %r is invalid" % (name,))
else:
param_idx = None
update_sys_path(root)
benchmark = None
# try to directly import benchmark function by guessing its import module
# name
parts = name.split('.')
for i in [1, 2]:
path = os.path.join(root, *parts[:-i]) + '.py'
if not os.path.isfile(path):
continue
modname = '.'.join([os.path.basename(root)] + parts[:-i])
module = import_module(modname)
try:
module_attr = getattr(module, parts[-i])
except AttributeError:
break
if i == 1 and inspect.isfunction(module_attr):
benchmark = _get_benchmark(parts[-i], module, None, module_attr)
break
elif i == 2 and inspect.isclass(module_attr):
try:
class_attr = getattr(module_attr, parts[-1])
except AttributeError:
break
if (inspect.isfunction(class_attr) or
inspect.ismethod(class_attr)):
benchmark = _get_benchmark(parts[-1], module, module_attr,
class_attr)
break
if benchmark is None:
for benchmark in disc_benchmarks(root):
if benchmark.name == name:
break
else:
raise ValueError(
"Could not find benchmark '{0}'".format(name))
if param_idx is not None:
benchmark.set_param_idx(param_idx)
if extra_params:
class ExtraBenchmarkAttrs:
pass
for key, value in extra_params.items():
setattr(ExtraBenchmarkAttrs, key, value)
benchmark._attr_sources.insert(0, ExtraBenchmarkAttrs)
return benchmark |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_benchmarks(root, fp):
""" List all of the discovered benchmarks to fp as JSON. """ |
update_sys_path(root)
# Streaming of JSON back out to the master process
fp.write('[')
first = True
for benchmark in disc_benchmarks(root):
if not first:
fp.write(', ')
clean = dict(
(k, v) for (k, v) in benchmark.__dict__.items()
if isinstance(v, (str, int, float, list, dict, bool)) and not
k.startswith('_'))
json.dump(clean, fp, skipkeys=True)
first = False
fp.write(']') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert_param(self, param):
""" Insert a parameter at the front of the parameter list. """ |
self._current_params = tuple([param] + list(self._current_params)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_cache_dir(self, commit_hash):
""" Get the cache dir and timestamp file corresponding to a given commit hash. """ |
path = os.path.join(self._path, commit_hash)
stamp = path + ".timestamp"
return path, stamp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_atom(dest, entries, author, title, address, updated=None, link=None, language="en"):
""" Write an atom feed to a file. Parameters dest : str Destination file path, or a file-like object entries : list of FeedEntry Feed entries. author : str Author of the feed. title : str Title for the feed. address : str Address (domain name or email) to be used in building unique IDs. updated : datetime, optional Time stamp for the feed. If not given, take from the newest entry. link : str, optional Link for the feed. language : str, optional Language of the feed. Default is 'en'. """ |
if updated is None:
if entries:
updated = max(entry.updated for entry in entries)
else:
updated = datetime.datetime.utcnow()
root = etree.Element(ATOM_NS + 'feed')
# id (obligatory)
el = etree.Element(ATOM_NS + 'id')
el.text = _get_id(address, None, ["feed", author, title])
root.append(el)
# author (obligatory)
el = etree.Element(ATOM_NS + 'author')
el2 = etree.Element(ATOM_NS + 'name')
el2.text = author
el.append(el2)
root.append(el)
# title (obligatory)
el = etree.Element(ATOM_NS + 'title')
el.attrib[XML_NS + 'lang'] = language
el.text = title
root.append(el)
# updated (obligatory)
el = etree.Element(ATOM_NS + 'updated')
el.text = updated.strftime('%Y-%m-%dT%H:%M:%SZ')
root.append(el)
# link
if link is not None:
el = etree.Element(ATOM_NS + 'link')
el.attrib[ATOM_NS + 'href'] = link
root.append(el)
# entries
for entry in entries:
root.append(entry.get_atom(address, language))
tree = etree.ElementTree(root)
def write(f):
if sys.version_info[:2] < (2, 7):
_etree_py26_write(f, tree)
else:
tree.write(f, xml_declaration=True, default_namespace=ATOM_NS[1:-1],
encoding=str('utf-8'))
if hasattr(dest, 'write'):
write(dest)
else:
with util.long_path_open(dest, 'wb') as f:
write(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _etree_py26_write(f, tree):
""" Compatibility workaround for ElementTree shipped with py2.6 """ |
f.write("<?xml version='1.0' encoding='utf-8'?>\n".encode('utf-8'))
if etree.VERSION[:3] == '1.2':
def fixtag(tag, namespaces):
if tag == XML_NS + 'lang':
return 'xml:lang', ""
if '}' in tag:
j = tag.index('}') + 1
tag = tag[j:]
xmlns = ''
if tag == 'feed':
xmlns = ('xmlns', str('http://www.w3.org/2005/Atom'))
namespaces['http://www.w3.org/2005/Atom'] = 'xmlns'
return tag, xmlns
else:
fixtag = etree.fixtag
old_fixtag = etree.fixtag
etree.fixtag = fixtag
try:
tree.write(f, encoding=str('utf-8'))
finally:
etree.fixtag = old_fixtag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_id(owner, date, content):
""" Generate an unique Atom id for the given content """ |
h = hashlib.sha256()
# Hash still contains the original project url, keep as is
h.update("github.com/spacetelescope/asv".encode('utf-8'))
for x in content:
if x is None:
h.update(",".encode('utf-8'))
else:
h.update(x.encode('utf-8'))
h.update(",".encode('utf-8'))
if date is None:
date = datetime.datetime(1970, 1, 1)
return "tag:{0},{1}:/{2}".format(owner, date.strftime('%Y-%m-%d'), h.hexdigest()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def InitializeDebuggeeLabels(self, flags):
"""Initialize debuggee labels from environment variables and flags. The caller passes all the flags that the the debuglet got. This function will only use the flags used to label the debuggee. Flags take precedence over environment variables. Debuggee description is formatted from available flags. Args: flags: dictionary of debuglet command line flags. """ |
self._debuggee_labels = {}
for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):
# var_names is a list of possible environment variables that may contain
# the label value. Find the first one that is set.
for name in var_names:
value = os.environ.get(name)
if value:
# Special case for module. We omit the "default" module
# to stay consistent with AppEngine.
if label == labels.Debuggee.MODULE and value == 'default':
break
self._debuggee_labels[label] = value
break
if flags:
self._debuggee_labels.update(
{name: value for (name, value) in six.iteritems(flags)
if name in _DEBUGGEE_LABELS})
self._debuggee_labels['projectid'] = self._project_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SetupAuth(self, project_id=None, project_number=None, service_account_json_file=None):
"""Sets up authentication with Google APIs. This will use the credentials from service_account_json_file if provided, falling back to application default credentials. See https://cloud.google.com/docs/authentication/production. Args: project_id: GCP project ID (e.g. myproject). If not provided, will attempt to retrieve it from the credentials. project_number: GCP project number (e.g. 72386324623). If not provided, project_id will be used in its place. service_account_json_file: JSON file to use for credentials. If not provided, will default to application default credentials. Raises: NoProjectIdError: If the project id cannot be determined. """ |
if service_account_json_file:
self._credentials = (
service_account.Credentials.from_service_account_file(
service_account_json_file, scopes=_CLOUD_PLATFORM_SCOPE))
if not project_id:
with open(service_account_json_file) as f:
project_id = json.load(f).get('project_id')
else:
self._credentials, credentials_project_id = google.auth.default(
scopes=_CLOUD_PLATFORM_SCOPE)
project_id = project_id or credentials_project_id
if not project_id:
raise NoProjectIdError(
'Unable to determine the project id from the API credentials. '
'Please specify the project id using the --project_id flag.')
self._project_id = project_id
self._project_number = project_number or project_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Start(self):
"""Starts the worker thread.""" |
self._shutdown = False
self._main_thread = threading.Thread(target=self._MainThreadProc)
self._main_thread.name = 'Cloud Debugger main worker thread'
self._main_thread.daemon = True
self._main_thread.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Stop(self):
"""Signals the worker threads to shut down and waits until it exits.""" |
self._shutdown = True
self._new_updates.set() # Wake up the transmission thread.
if self._main_thread is not None:
self._main_thread.join()
self._main_thread = None
if self._transmission_thread is not None:
self._transmission_thread.join()
self._transmission_thread = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def EnqueueBreakpointUpdate(self, breakpoint):
"""Asynchronously updates the specified breakpoint on the backend. This function returns immediately. The worker thread is actually doing all the work. The worker thread is responsible to retry the transmission in case of transient errors. Args: breakpoint: breakpoint in either final or non-final state. """ |
with self._transmission_thread_startup_lock:
if self._transmission_thread is None:
self._transmission_thread = threading.Thread(
target=self._TransmissionThreadProc)
self._transmission_thread.name = 'Cloud Debugger transmission thread'
self._transmission_thread.daemon = True
self._transmission_thread.start()
self._transmission_queue.append((breakpoint, 0))
self._new_updates.set() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _MainThreadProc(self):
"""Entry point for the worker thread.""" |
registration_required = True
while not self._shutdown:
if registration_required:
service = self._BuildService()
registration_required, delay = self._RegisterDebuggee(service)
if not registration_required:
registration_required, delay = self._ListActiveBreakpoints(service)
if self.on_idle is not None:
self.on_idle()
if not self._shutdown:
time.sleep(delay) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _TransmissionThreadProc(self):
"""Entry point for the transmission worker thread.""" |
reconnect = True
while not self._shutdown:
self._new_updates.clear()
if reconnect:
service = self._BuildService()
reconnect = False
reconnect, delay = self._TransmitBreakpointUpdates(service)
self._new_updates.wait(delay) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _RegisterDebuggee(self, service):
"""Single attempt to register the debuggee. If the registration succeeds, sets self._debuggee_id to the registered debuggee ID. Args: service: client to use for API calls Returns: (registration_required, delay) tuple """ |
try:
request = {'debuggee': self._GetDebuggee()}
try:
response = service.debuggees().register(body=request).execute()
# self._project_number will refer to the project id on initialization if
# the project number is not available. The project field in the debuggee
# will always refer to the project number. Update so the server will not
# have to do id->number translations in the future.
project_number = response['debuggee'].get('project')
self._project_number = project_number or self._project_number
self._debuggee_id = response['debuggee']['id']
native.LogInfo('Debuggee registered successfully, ID: %s' % (
self._debuggee_id))
self.register_backoff.Succeeded()
return (False, 0) # Proceed immediately to list active breakpoints.
except BaseException:
native.LogInfo('Failed to register debuggee: %s, %s' %
(request, traceback.format_exc()))
except BaseException:
native.LogWarning('Debuggee information not available: ' +
traceback.format_exc())
return (True, self.register_backoff.Failed()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ListActiveBreakpoints(self, service):
"""Single attempt query the list of active breakpoints. Must not be called before the debuggee has been registered. If the request fails, this function resets self._debuggee_id, which triggers repeated debuggee registration. Args: service: client to use for API calls Returns: (registration_required, delay) tuple """ |
try:
response = service.debuggees().breakpoints().list(
debuggeeId=self._debuggee_id, waitToken=self._wait_token,
successOnTimeout=True).execute()
if not response.get('waitExpired'):
self._wait_token = response.get('nextWaitToken')
breakpoints = response.get('breakpoints') or []
if self._breakpoints != breakpoints:
self._breakpoints = breakpoints
native.LogInfo(
'Breakpoints list changed, %d active, wait token: %s' % (
len(self._breakpoints), self._wait_token))
self.on_active_breakpoints_changed(copy.deepcopy(self._breakpoints))
except BaseException:
native.LogInfo('Failed to query active breakpoints: ' +
traceback.format_exc())
# Forget debuggee ID to trigger repeated debuggee registration. Once the
# registration succeeds, the worker thread will retry this query
self._debuggee_id = None
return (True, self.list_backoff.Failed())
self.list_backoff.Succeeded()
return (False, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _TransmitBreakpointUpdates(self, service):
"""Tries to send pending breakpoint updates to the backend. Sends all the pending breakpoint updates. In case of transient failures, the breakpoint is inserted back to the top of the queue. Application failures are not retried (for example updating breakpoint in a final state). Each pending breakpoint maintains a retry counter. After repeated transient failures the breakpoint is discarded and dropped from the queue. Args: service: client to use for API calls Returns: (reconnect, timeout) tuple. The first element ("reconnect") is set to true on unexpected HTTP responses. The caller should discard the HTTP connection and create a new one. The second element ("timeout") is set to None if all pending breakpoints were sent successfully. Otherwise returns time interval in seconds to stall before retrying. """ |
reconnect = False
retry_list = []
# There is only one consumer, so two step pop is safe.
while self._transmission_queue:
breakpoint, retry_count = self._transmission_queue.popleft()
try:
service.debuggees().breakpoints().update(
debuggeeId=self._debuggee_id, id=breakpoint['id'],
body={'breakpoint': breakpoint}).execute()
native.LogInfo('Breakpoint %s update transmitted successfully' % (
breakpoint['id']))
except apiclient.errors.HttpError as err:
# Treat 400 error codes (except timeout) as application error that will
# not be retried. All other errors are assumed to be transient.
status = err.resp.status
is_transient = ((status >= 500) or (status == 408))
if is_transient and retry_count < self.max_transmit_attempts - 1:
native.LogInfo('Failed to send breakpoint %s update: %s' % (
breakpoint['id'], traceback.format_exc()))
retry_list.append((breakpoint, retry_count + 1))
elif is_transient:
native.LogWarning(
'Breakpoint %s retry count exceeded maximum' % breakpoint['id'])
else:
# This is very common if multiple instances are sending final update
# simultaneously.
native.LogInfo('%s, breakpoint: %s' % (err, breakpoint['id']))
except BaseException:
native.LogWarning(
'Fatal error sending breakpoint %s update: %s' % (
breakpoint['id'], traceback.format_exc()))
reconnect = True
self._transmission_queue.extend(retry_list)
if not self._transmission_queue:
self.update_backoff.Succeeded()
# Nothing to send, wait until next breakpoint update.
return (reconnect, None)
else:
return (reconnect, self.update_backoff.Failed()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _GetDebuggee(self):
"""Builds the debuggee structure.""" |
major_version = 'v' + version.__version__.split('.')[0]
python_version = ''.join(platform.python_version().split('.')[:2])
agent_version = ('google.com/python%s-gcp/%s' % (python_version,
major_version))
debuggee = {
'project': self._project_number,
'description': self._GetDebuggeeDescription(),
'labels': self._debuggee_labels,
'agentVersion': agent_version,
}
source_context = self._ReadAppJsonFile('source-context.json')
if source_context:
debuggee['sourceContexts'] = [source_context]
debuggee['uniquifier'] = self._ComputeUniquifier(debuggee)
return debuggee |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _GetDebuggeeDescription(self):
"""Formats debuggee description based on debuggee labels.""" |
return '-'.join(self._debuggee_labels[label]
for label in _DESCRIPTION_LABELS
if label in self._debuggee_labels) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ComputeUniquifier(self, debuggee):
"""Computes debuggee uniquifier. The debuggee uniquifier has to be identical on all instances. Therefore the uniquifier should not include any random numbers and should only be based on inputs that are guaranteed to be the same on all instances. Args: debuggee: complete debuggee message without the uniquifier Returns: Hex string of SHA1 hash of project information, debuggee labels and debuglet version. """ |
uniquifier = hashlib.sha1()
# Compute hash of application files if we don't have source context. This
# way we can still distinguish between different deployments.
if ('minorversion' not in debuggee.get('labels', []) and
'sourceContexts' not in debuggee):
uniquifier_computer.ComputeApplicationUniquifier(uniquifier)
return uniquifier.hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ReadAppJsonFile(self, relative_path):
"""Reads JSON file from an application directory. Args: relative_path: file name relative to application root directory. Returns: Parsed JSON data or None if the file does not exist, can't be read or not a valid JSON file. """ |
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return json.load(f)
except (IOError, ValueError):
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def NormalizePath(path):
"""Removes any Python system path prefix from the given path. Python keeps almost all paths absolute. This is not what we actually want to return. This loops through system paths (directories in which Python will load modules). If "path" is relative to one of them, the directory prefix is removed. Args: path: absolute path to normalize (relative paths will not be altered) Returns: Relative path if "path" is within one of the sys.path directories or the input otherwise. """ |
path = os.path.normpath(path)
for sys_path in sys.path:
if not sys_path:
continue
# Append '/' at the end of the path if it's not there already.
sys_path = os.path.join(sys_path, '')
if path.startswith(sys_path):
return path[len(sys_path):]
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def DetermineType(value):
"""Determines the type of val, returning a "full path" string. For example: DetermineType(5) -> __builtin__.int DetermineType(Foo()) -> com.google.bar.Foo Args: value: Any value, the value is irrelevant as only the type metadata is checked Returns: Type path string. None if type cannot be determined. """ |
object_type = type(value)
if not hasattr(object_type, '__name__'):
return None
type_string = getattr(object_type, '__module__', '')
if type_string:
type_string += '.'
type_string += object_type.__name__
return type_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GetLoggingLocation():
"""Search for and return the file and line number from the log collector. Returns: (pathname, lineno, func_name) The full path, line number, and function name for the logpoint location. """ |
frame = inspect.currentframe()
this_file = frame.f_code.co_filename
frame = frame.f_back
while frame:
if this_file == frame.f_code.co_filename:
if 'cdbg_logging_location' in frame.f_locals:
ret = frame.f_locals['cdbg_logging_location']
if len(ret) != 3:
return (None, None, None)
return ret
frame = frame.f_back
return (None, None, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SetLogger(logger):
"""Sets the logger object to use for all 'LOG' breakpoint actions.""" |
global log_info_message
global log_warning_message
global log_error_message
log_info_message = logger.info
log_warning_message = logger.warning
log_error_message = logger.error
logger.addFilter(LineNoFilter()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _EvaluateExpression(frame, expression):
"""Compiles and evaluates watched expression. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: (False, status) on error or (True, value) on success. """ |
try:
code = compile(expression, '<watched_expression>', 'eval')
except (TypeError, ValueError) as e:
# expression string contains null bytes.
return (False, {
'isError': True,
'refersTo': 'VARIABLE_NAME',
'description': {
'format': 'Invalid expression',
'parameters': [str(e)]}})
except SyntaxError as e:
return (False, {
'isError': True,
'refersTo': 'VARIABLE_NAME',
'description': {
'format': 'Expression could not be compiled: $0',
'parameters': [e.msg]}})
try:
return (True, native.CallImmutable(frame, code))
except BaseException as e: # pylint: disable=broad-except
return (False, {
'isError': True,
'refersTo': 'VARIABLE_VALUE',
'description': {
'format': 'Exception occurred: $0',
'parameters': [str(e)]}}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _GetFrameCodeObjectName(frame):
"""Gets the code object name for the frame. Args: frame: the frame to get the name from Returns: The function name if the code is a static function or the class name with the method name if it is an member function. """ |
# This functions under the assumption that member functions will name their
# first parameter argument 'self' but has some edge-cases.
if frame.f_code.co_argcount >= 1 and 'self' == frame.f_code.co_varnames[0]:
return (frame.f_locals['self'].__class__.__name__ +
'.' + frame.f_code.co_name)
else:
return frame.f_code.co_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Collect(self, top_frame):
"""Collects call stack, local variables and objects. Starts collection from the specified frame. We don't start from the top frame to exclude the frames due to debugger. Updates the content of self.breakpoint. Args: top_frame: top frame to start data collection. """ |
# Evaluate call stack.
frame = top_frame
top_line = self.breakpoint['location']['line']
breakpoint_frames = self.breakpoint['stackFrames']
try:
# Evaluate watched expressions.
if 'expressions' in self.breakpoint:
self.breakpoint['evaluatedExpressions'] = [
self._CaptureExpression(top_frame, expression) for expression
in self.breakpoint['expressions']]
while frame and (len(breakpoint_frames) < self.max_frames):
line = top_line if frame == top_frame else frame.f_lineno
code = frame.f_code
if len(breakpoint_frames) < self.max_expand_frames:
frame_arguments, frame_locals = self.CaptureFrameLocals(frame)
else:
frame_arguments = []
frame_locals = []
breakpoint_frames.append({
'function': _GetFrameCodeObjectName(frame),
'location': {
'path': NormalizePath(code.co_filename),
'line': line
},
'arguments': frame_arguments,
'locals': frame_locals
})
frame = frame.f_back
except BaseException as e: # pylint: disable=broad-except
# The variable table will get serialized even though there was a failure.
# The results can be useful for diagnosing the internal error.
self.breakpoint['status'] = {
'isError': True,
'description': {
'format': ('INTERNAL ERROR: Failed while capturing locals '
'of frame $0: $1'),
'parameters': [str(len(breakpoint_frames)), str(e)]}}
# Number of entries in _var_table. Starts at 1 (index 0 is the 'buffer full'
# status value).
num_vars = 1
# Explore variables table in BFS fashion. The variables table will grow
# inside CaptureVariable as we encounter new references.
while (num_vars < len(self._var_table)) and (
self._total_size < self.max_size):
self._var_table[num_vars] = self.CaptureVariable(
self._var_table[num_vars], 0, self.default_capture_limits,
can_enqueue=False)
# Move on to the next entry in the variable table.
num_vars += 1
# Trim variables table and change make all references to variables that
# didn't make it point to var_index of 0 ("buffer full")
self.TrimVariableTable(num_vars)
self._CaptureEnvironmentLabels()
self._CaptureRequestLogId()
self._CaptureUserId() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.