text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_files(files):
"""Expand list of paths to include all text files matching the pattern.""" |
expanded_files = []
legal_extensions = [".md", ".txt", ".rtf", ".html", ".tex", ".markdown"]
for f in files:
# If it's a directory, recursively walk through it and find the files.
if os.path.isdir(f):
for dir_, _, filenames in os.walk(f):
for filename in filenames:
fn, file_extension = os.path.splitext(filename)
if file_extension in legal_extensions:
joined_file = os.path.join(dir_, filename)
expanded_files.append(joined_file)
# Otherwise add the file directly.
else:
expanded_files.append(f)
return expanded_files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_vtech(text):
"""Suggest the correct name. source: Virginia Tech Division of Student Affairs source_url: http://bit.ly/2en1zbv """ |
err = "institution.vtech"
msg = "Incorrect name. Use '{}' instead of '{}'."
institution = [
["Virginia Polytechnic Institute and State University",
["Virginia Polytechnic and State University"]],
]
return preferred_forms_check(text, institution, err, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_decade_apostrophes_short(text):
"""Check the text for dates of the form X0's.""" |
err = "dates_times.dates"
msg = u"Apostrophes aren't needed for decades."
regex = "\d0\'s"
return existence_check(
text, [regex], err, msg, excluded_topics=["50 Cent"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_decade_apostrophes_long(text):
"""Check the text for dates of the form XXX0's.""" |
err = "dates_times.dates"
msg = u"Apostrophes aren't needed for decades."
regex = "\d\d\d0\'s"
return existence_check(text, [regex], err, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close_cache_shelves_after(f):
"""Decorator that ensures cache shelves are closed after the call.""" |
@functools.wraps(f)
def wrapped(*args, **kwargs):
f(*args, **kwargs)
close_cache_shelves()
return wrapped |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def memoize(f):
"""Cache results of computations on disk.""" |
# Determine the location of the cache.
cache_dirname = os.path.join(_get_xdg_cache_home(), 'proselint')
legacy_cache_dirname = os.path.join(os.path.expanduser("~"), ".proselint")
if not os.path.isdir(cache_dirname):
# Migrate the cache from the legacy path to XDG complaint location.
if os.path.isdir(legacy_cache_dirname):
os.rename(legacy_cache_dirname, cache_dirname)
# Create the cache if it does not already exist.
else:
os.makedirs(cache_dirname)
cache_filename = f.__module__ + "." + f.__name__
cachepath = os.path.join(cache_dirname, cache_filename)
@functools.wraps(f)
def wrapped(*args, **kwargs):
# handle instance methods
if hasattr(f, '__self__'):
args = args[1:]
signature = (f.__module__ + '.' + f.__name__).encode("utf-8")
tempargdict = inspect.getcallargs(f, *args, **kwargs)
for item in list(tempargdict.items()):
signature += item[1].encode("utf-8")
key = hashlib.sha256(signature).hexdigest()
try:
cache = _get_cache(cachepath)
return cache[key]
except KeyError:
value = f(*args, **kwargs)
cache[key] = value
cache.sync()
return value
except TypeError:
call_to = f.__module__ + '.' + f.__name__
print('Warning: could not disk cache call to %s;'
'it probably has unhashable args. Error: %s' %
(call_to, traceback.format_exc()))
return f(*args, **kwargs)
return wrapped |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_checks(options):
"""Extract the checks.""" |
sys.path.append(proselint_path)
checks = []
check_names = [key for (key, val)
in list(options["checks"].items()) if val]
for check_name in check_names:
module = importlib.import_module("checks." + check_name)
for d in dir(module):
if re.match("check", d):
checks.append(getattr(module, d))
return checks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_options():
"""Read various proselintrc files, allowing user overrides.""" |
possible_defaults = (
'/etc/proselintrc',
os.path.join(proselint_path, '.proselintrc'),
)
options = {}
has_overrides = False
for filename in possible_defaults:
try:
options = json.load(open(filename))
break
except IOError:
pass
try:
user_options = json.load(
open(os.path.join(_get_xdg_config_home(), 'proselint', 'config')))
has_overrides = True
except IOError:
pass
# Read user configuration from the legacy path.
if not has_overrides:
try:
user_options = json.load(
open(os.path.join(os.path.expanduser('~'), '.proselintrc')))
has_overrides = True
except IOError:
pass
if has_overrides:
if 'max_errors' in user_options:
options['max_errors'] = user_options['max_errors']
if 'checks' in user_options:
for (key, value) in user_options['checks'].items():
try:
options['checks'][key] = value
except KeyError:
pass
return options |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def errors_to_json(errors):
"""Convert the errors to JSON.""" |
out = []
for e in errors:
out.append({
"check": e[0],
"message": e[1],
"line": 1 + e[2],
"column": 1 + e[3],
"start": 1 + e[4],
"end": 1 + e[5],
"extent": e[6],
"severity": e[7],
"replacements": e[8],
})
return json.dumps(
dict(status="success", data={"errors": out}), sort_keys=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def line_and_column(text, position):
"""Return the line number and column of a position in a string.""" |
position_counter = 0
for idx_line, line in enumerate(text.splitlines(True)):
if (position_counter + len(line.rstrip())) >= position:
return (idx_line, position - position_counter)
else:
position_counter += len(line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lint(input_file, debug=False):
"""Run the linter on the input file.""" |
options = load_options()
if isinstance(input_file, string_types):
text = input_file
else:
text = input_file.read()
# Get the checks.
checks = get_checks(options)
# Apply all the checks.
errors = []
for check in checks:
result = check(text)
for error in result:
(start, end, check, message, replacements) = error
(line, column) = line_and_column(text, start)
if not is_quoted(start, text):
errors += [(check, message, line, column, start, end,
end - start, "warning", replacements)]
if len(errors) > options["max_errors"]:
break
# Sort the errors by line and column number.
errors = sorted(errors[:options["max_errors"]], key=lambda e: (e[2], e[3]))
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assert_error(text, check, n=1):
"""Assert that text has n errors of type check.""" |
assert_error.description = "No {} error for '{}'".format(check, text)
assert(check in [error[0] for error in lint(text)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consistency_check(text, word_pairs, err, msg, offset=0):
"""Build a consistency checker for the given word_pairs.""" |
errors = []
msg = " ".join(msg.split())
for w in word_pairs:
matches = [
[m for m in re.finditer(w[0], text)],
[m for m in re.finditer(w[1], text)]
]
if len(matches[0]) > 0 and len(matches[1]) > 0:
idx_minority = len(matches[0]) > len(matches[1])
for m in matches[idx_minority]:
errors.append((
m.start() + offset,
m.end() + offset,
err,
msg.format(w[~idx_minority], m.group(0)),
w[~idx_minority]))
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preferred_forms_check(text, list, err, msg, ignore_case=True, offset=0, max_errors=float("inf")):
"""Build a checker that suggests the preferred form.""" |
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
msg = " ".join(msg.split())
errors = []
regex = u"[\W^]{}[\W$]"
for p in list:
for r in p[1]:
for m in re.finditer(regex.format(r), text, flags=flags):
txt = m.group(0).strip()
errors.append((
m.start() + 1 + offset,
m.end() + offset,
err,
msg.format(p[0], txt),
p[0]))
errors = truncate_to_max(errors, max_errors)
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def existence_check(text, list, err, msg, ignore_case=True, str=False, max_errors=float("inf"), offset=0, require_padding=True, dotall=False, excluded_topics=None, join=False):
"""Build a checker that blacklists certain words.""" |
flags = 0
msg = " ".join(msg.split())
if ignore_case:
flags = flags | re.IGNORECASE
if str:
flags = flags | re.UNICODE
if dotall:
flags = flags | re.DOTALL
if require_padding:
regex = u"(?:^|\W){}[\W$]"
else:
regex = u"{}"
errors = []
# If the topic of the text is in the excluded list, return immediately.
if excluded_topics:
tps = topics(text)
if any([t in excluded_topics for t in tps]):
return errors
rx = "|".join(regex.format(w) for w in list)
for m in re.finditer(rx, text, flags=flags):
txt = m.group(0).strip()
errors.append((
m.start() + 1 + offset,
m.end() + offset,
err,
msg.format(txt),
None))
errors = truncate_to_max(errors, max_errors)
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def truncate_to_max(errors, max_errors):
"""If max_errors was specified, truncate the list of errors. Give the total number of times that the error was found elsewhere. """ |
if len(errors) > max_errors:
start1, end1, err1, msg1, replacements = errors[0]
if len(errors) == (max_errors + 1):
msg1 += " Found once elsewhere."
else:
msg1 += " Found {} times elsewhere.".format(len(errors))
errors = errors[1:max_errors]
errors = [(start1, end1, err1, msg1, replacements)] + errors
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detector_50_Cent(text):
"""Determine whether 50 Cent is a topic.""" |
keywords = [
"50 Cent",
"rap",
"hip hop",
"Curtis James Jackson III",
"Curtis Jackson",
"Eminem",
"Dre",
"Get Rich or Die Tryin'",
"G-Unit",
"Street King Immortal",
"In da Club",
"Interscope",
]
num_keywords = sum(word in text for word in keywords)
return ("50 Cent", float(num_keywords > 2)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def topics(text):
"""Return a list of topics.""" |
detectors = [
detector_50_Cent
]
ts = []
for detector in detectors:
ts.append(detector(text))
return [t[0] for t in ts if t[1] > 0.95] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_ellipsis(text):
"""Use an ellipsis instead of three dots.""" |
err = "typography.symbols.ellipsis"
msg = u"'...' is an approximation, use the ellipsis symbol '…'."
regex = "\.\.\."
return existence_check(text, [regex], err, msg, max_errors=3,
require_padding=False, offset=0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_sentence_spacing(text):
"""Use no more than two spaces after a period.""" |
err = "typography.symbols.sentence_spacing"
msg = u"More than two spaces after the period; use 1 or 2."
regex = "\. {3}"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_email():
"""Check the mail account and lint new mail.""" |
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(user, password)
g = gmail.login(user, password)
# Check for unread messages.
unread = g.inbox().mail(unread=True)
# Submit a job to lint each email sent to editor@proselint.com. Record the
# resulting job_ids somewhere (in Redis, I suppose), keyed by a hash of the
# email.
for u in unread:
u.fetch()
signature = (u.fr.decode('utf-8') +
u.subject.decode('utf-8') +
u.body.decode('utf-8'))
hash = hashlib.sha256(signature.encode('utf-8')).hexdigest()
if user_to in u.to or user_to in u.headers.get('Cc', []):
job_id = conn.get(hash)
if not job_id:
# If the email hasn't been sent for processing, send it.
r = requests.post(api_url, data={"text": u.body})
conn.set(hash, r.json()["job_id"])
print("Email {} sent for processing.".format(hash))
else:
# Otherwise, check whether the results are ready, and if so,
# reply with them.
r = requests.get(api_url, params={"job_id": job_id})
if r.json()["status"] == "success":
reply = quoted(u.body)
errors = r.json()['data']['errors']
reply += "\r\n\r\n".join([json.dumps(e) for e in errors])
msg = MIMEMultipart()
msg["From"] = "{} <{}>".format(name, user)
msg["To"] = u.fr
msg["Subject"] = "Re: " + u.subject
if u.headers.get('Message-ID'):
msg.add_header("In-Reply-To", u.headers['Message-ID'])
msg.add_header("References", u.headers['Message-ID'])
body = reply + "\r\n\r\n--\r\n" + tagline + "\r\n" + url
msg.attach(MIMEText(body, "plain"))
text = msg.as_string()
server.sendmail(user, u.fr, text)
# Mark the email as read.
u.read()
u.archive()
print("Email {} has been replied to.".format(hash)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_bottleneck(text):
"""Avoid mixing metaphors about bottles and their necks. source: Sir Ernest Gowers source_url: http://bit.ly/1CQPH61 """ |
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_misc(text):
"""Avoid mixing metaphors. source: Garner's Modern American Usage source_url: http://bit.ly/1T4alrY """ |
err = "mixed_metaphors.misc.misc"
msg = u"Mixed metaphor. Try '{}'."
preferences = [
["cream rises to the top", ["cream rises to the crop"]],
["fasten your seatbelts", ["button your seatbelts"]],
["a minute to decompress", ["a minute to decompose"]],
["sharpest tool in the shed", ["sharpest marble in the (shed|box)"]],
["not rocket science", ["not rocket surgery"]],
]
return preferred_forms_check(text, preferences, err, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checkout(self, revision, options):
""" Checkout a specific revision. :param revision: The revision identifier. :type revision: :class:`Revision` :param options: Any additional options. :type options: ``dict`` """ |
rev = revision.key
self.repo.git.checkout(rev) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_cache_path(path):
""" Generate a reusable path to cache results. Will use the --path of the target and hash into a 9-character directory within the HOME folder. :return: The cache path :rtype: ``str`` """ |
logger.debug(f"Generating cache for {path}")
sha = hashlib.sha1(str(path).encode()).hexdigest()[:9]
HOME = pathlib.Path.home()
cache_path = str(HOME / ".wily" / sha)
logger.debug(f"Cache path is {cache_path}")
return cache_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(config_path=DEFAULT_CONFIG_PATH):
""" Load config file and set values to defaults where no present. :return: The configuration ``WilyConfig`` :rtype: :class:`wily.config.WilyConfig` """ |
if not pathlib.Path(config_path).exists():
logger.debug(f"Could not locate {config_path}, using default config.")
return DEFAULT_CONFIG
config = configparser.ConfigParser(default_section=DEFAULT_CONFIG_SECTION)
config.read(config_path)
operators = config.get(
section=DEFAULT_CONFIG_SECTION, option="operators", fallback=DEFAULT_OPERATORS
)
archiver = config.get(
section=DEFAULT_CONFIG_SECTION, option="archiver", fallback=DEFAULT_ARCHIVER
)
path = config.get(section=DEFAULT_CONFIG_SECTION, option="path", fallback=".")
max_revisions = int(
config.get(
section=DEFAULT_CONFIG_SECTION,
option="max_revisions",
fallback=DEFAULT_MAX_REVISIONS,
)
)
return WilyConfig(
operators=operators, archiver=archiver, path=path, max_revisions=max_revisions
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cli(ctx, debug, config, path, cache):
""" \U0001F98A Inspect and search through the complexity of your source code. To get started, run setup: $ wily setup To reindex any changes in your source code: $ wily build <src> Then explore basic metrics with: $ wily report <file> You can also graph specific metrics in a browser with: $ wily graph <file> <metric> """ |
ctx.ensure_object(dict)
ctx.obj["DEBUG"] = debug
if debug:
logger.setLevel("DEBUG")
else:
logger.setLevel("INFO")
ctx.obj["CONFIG"] = load_config(config)
if path:
logger.debug(f"Fixing path to {path}")
ctx.obj["CONFIG"].path = path
if cache:
logger.debug(f"Fixing cache to {cache}")
ctx.obj["CONFIG"].cache_path = cache
logger.debug(f"Loaded configuration from {config}") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(ctx, max_revisions, targets, operators, archiver):
"""Build the wily cache.""" |
config = ctx.obj["CONFIG"]
from wily.commands.build import build
if max_revisions:
logger.debug(f"Fixing revisions to {max_revisions}")
config.max_revisions = max_revisions
if operators:
logger.debug(f"Fixing operators to {operators}")
config.operators = operators.strip().split(",")
if archiver:
logger.debug(f"Fixing archiver to {archiver}")
config.archiver = archiver
if targets:
logger.debug(f"Fixing targets to {targets}")
config.targets = targets
build(
config=config,
archiver=resolve_archiver(config.archiver),
operators=resolve_operators(config.operators),
)
logger.info(
"Completed building wily history, run `wily report <file>` or `wily index` to see more."
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report(ctx, file, metrics, number, message, format, console_format, output):
"""Show metrics for a given file.""" |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
if not metrics:
metrics = get_default_metrics(config)
logger.info(f"Using default metrics {metrics}")
new_output = Path().cwd()
if output:
new_output = new_output / Path(output)
else:
new_output = new_output / "wily_report" / "index.html"
from wily.commands.report import report
logger.debug(f"Running report on {file} for metric {metrics}")
logger.debug(f"Output format is {format}")
report(
config=config,
path=file,
metrics=metrics,
n=number,
output=new_output,
include_message=message,
format=ReportFormat[format],
console_format=console_format,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diff(ctx, files, metrics, all, detail):
"""Show the differences in metrics for each file.""" |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
if not metrics:
metrics = get_default_metrics(config)
logger.info(f"Using default metrics {metrics}")
else:
metrics = metrics.split(",")
logger.info(f"Using specified metrics {metrics}")
from wily.commands.diff import diff
logger.debug(f"Running diff on {files} for metric {metrics}")
diff(
config=config, files=files, metrics=metrics, changes_only=not all, detail=detail
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def graph(ctx, path, metrics, output, x_axis, changes):
""" Graph a specific metric for a given file, if a path is given, all files within path will be graphed. Some common examples: Graph all .py files within src/ for the raw.loc metric $ wily graph src/ raw.loc Graph test.py against raw.loc and cyclomatic.complexity metrics $ wily graph src/test.py raw.loc cyclomatic.complexity Graph test.py against raw.loc and raw.sloc on the x-axis $ wily graph src/test.py raw.loc --x-axis raw.sloc """ |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
from wily.commands.graph import graph
logger.debug(f"Running report on {path} for metrics {metrics}")
graph(
config=config,
path=path,
metrics=metrics,
output=output,
x_axis=x_axis,
changes=changes,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_metrics(ctx):
"""List the available metrics.""" |
config = ctx.obj["CONFIG"]
if not exists(config):
handle_no_cache(ctx)
from wily.commands.list_metrics import list_metrics
list_metrics() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_no_cache(context):
"""Handle lack-of-cache error, prompt user for index process.""" |
logger.error(
f"Could not locate wily cache, the cache is required to provide insights."
)
p = input("Do you want to run setup and index your project now? [y/N]")
if p.lower() != "y":
exit(1)
else:
revisions = input("How many previous git revisions do you want to index? : ")
revisions = int(revisions)
path = input("Path to your source files; comma-separated for multiple: ")
paths = path.split(",")
context.invoke(
build,
max_revisions=revisions,
targets=paths,
operators=None,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def metric_parts(metric):
"""Convert a metric name into the operator and metric names.""" |
operator, met = resolve_metric_as_tuple(metric)
return operator.name, met.name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def graph(config, path, metrics, output=None, x_axis=None, changes=True, text=False):
""" Graph information about the cache and runtime. :param config: The configuration. :type config: :class:`wily.config.WilyConfig` :param path: The path to the files. :type path: ``list`` :param metrics: The Y and Z-axis metrics to report on. :type metrics: ``tuple`` :param output: Save report to specified path instead of opening browser. :type output: ``str`` """ |
logger.debug("Running report command")
data = []
state = State(config)
abs_path = config.path / pathlib.Path(path)
if x_axis is None:
x_axis = "history"
else:
x_operator, x_key = metric_parts(x_axis)
if abs_path.is_dir():
paths = [
p.relative_to(config.path) for p in pathlib.Path(abs_path).glob("**/*.py")
]
else:
paths = [path]
operator, key = metric_parts(metrics[0])
if len(metrics) == 1: # only y-axis
z_axis = None
else:
z_axis = resolve_metric(metrics[1])
z_operator, z_key = metric_parts(metrics[1])
for path in paths:
x = []
y = []
z = []
labels = []
last_y = None
for rev in state.index[state.default_archiver].revisions:
labels.append(f"{rev.revision.author_name} <br>{rev.revision.message}")
try:
val = rev.get(config, state.default_archiver, operator, str(path), key)
if val != last_y or not changes:
y.append(val)
if z_axis:
z.append(
rev.get(
config,
state.default_archiver,
z_operator,
str(path),
z_key,
)
)
if x_axis == "history":
x.append(format_datetime(rev.revision.date))
else:
x.append(
rev.get(
config,
state.default_archiver,
x_operator,
str(path),
x_key,
)
)
last_y = val
except KeyError:
# missing data
pass
# Create traces
trace = go.Scatter(
x=x,
y=y,
mode="lines+markers+text" if text else "lines+markers",
name=f"{path}",
ids=state.index[state.default_archiver].revision_keys,
text=labels,
marker=dict(
size=0 if z_axis is None else z,
color=list(range(len(y))),
# colorscale='Viridis',
),
xcalendar="gregorian",
hoveron="points+fills",
)
data.append(trace)
if output:
filename = output
auto_open = False
else:
filename = "wily-report.html"
auto_open = True
y_metric = resolve_metric(metrics[0])
title = f"{x_axis.capitalize()} of {y_metric.description} for {path}"
plotly.offline.plot(
{
"data": data,
"layout": go.Layout(
title=title,
xaxis={"title": x_axis},
yaxis={"title": y_metric.description},
),
},
auto_open=auto_open,
filename=filename,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_metrics():
"""List metrics available.""" |
for name, operator in ALL_OPERATORS.items():
print(f"{name} operator:")
if len(operator.cls.metrics) > 0:
print(
tabulate.tabulate(
headers=("Name", "Description", "Type"),
tabular_data=operator.cls.metrics,
tablefmt=DEFAULT_GRID_STYLE,
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_operator(operator, revision, config):
"""Run an operator for the multiprocessing pool. Not called directly.""" |
instance = operator.cls(config)
logger.debug(f"Running {operator.name} operator on {revision.key}")
return operator.name, instance.run(revision, config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(config, archiver, operators):
""" Build the history given a archiver and collection of operators. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param archiver: The archiver to use :type archiver: :namedtuple:`wily.archivers.Archiver` :param operators: The list of operators to execute :type operators: `list` of :namedtuple:`wily.operators.Operator` """ |
try:
logger.debug(f"Using {archiver.name} archiver module")
archiver = archiver.cls(config)
revisions = archiver.revisions(config.path, config.max_revisions)
except InvalidGitRepositoryError:
# TODO: This logic shouldn't really be here (SoC)
logger.info(f"Defaulting back to the filesystem archiver, not a valid git repo")
archiver = FilesystemArchiver(config)
revisions = archiver.revisions(config.path, config.max_revisions)
except Exception as e:
if hasattr(e, "message"):
logger.error(f"Failed to setup archiver: '{e.message}'")
else:
logger.error(f"Failed to setup archiver: '{type(e)} - {e}'")
exit(1)
state = State(config, archiver=archiver)
# Check for existence of cache, else provision
state.ensure_exists()
index = state.index[archiver.name]
# remove existing revisions from the list
revisions = [revision for revision in revisions if revision not in index]
logger.info(
f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'."
)
_op_desc = ",".join([operator.name for operator in operators])
logger.info(f"Running operators - {_op_desc}")
bar = Bar("Processing", max=len(revisions) * len(operators))
state.operators = operators
try:
with multiprocessing.Pool(processes=len(operators)) as pool:
for revision in revisions:
# Checkout target revision
archiver.checkout(revision, config.checkout_options)
stats = {"operator_data": {}}
# Run each operator as a seperate process
data = pool.starmap(
run_operator,
[(operator, revision, config) for operator in operators],
)
# Map the data back into a dictionary
for operator_name, result in data:
# aggregate values to directories
roots = []
# find all unique directories in the results
for entry in result.keys():
parent = pathlib.Path(entry).parents[0]
if parent not in roots:
roots.append(parent)
for root in roots:
# find all matching entries recursively
aggregates = [
path
for path in result.keys()
if root in pathlib.Path(path).parents
]
result[str(root)] = {}
# aggregate values
for metric in resolve_operator(operator_name).cls.metrics:
func = metric.aggregate
values = [
result[aggregate][metric.name]
for aggregate in aggregates
if aggregate in result
and metric.name in result[aggregate]
]
if len(values) > 0:
result[str(root)][metric.name] = func(values)
stats["operator_data"][operator_name] = result
bar.next()
ir = index.add(revision, operators=operators)
ir.store(config, archiver, stats)
index.save()
bar.finish()
except Exception as e:
logger.error(f"Failed to build cache: '{e}'")
raise e
finally:
# Reset the archive after every run back to the head of the branch
archiver.finish() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, revision, operators):
""" Add a revision to the index. :param revision: The revision. :type revision: :class:`Revision` or :class:`LazyRevision` """ |
ir = IndexedRevision(
revision=revision, operators=[operator.name for operator in operators]
)
self._revisions[revision.key] = ir
return ir |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""Save the index data back to the wily cache.""" |
data = [i.asdict() for i in self._revisions.values()]
logger.debug("Saving data")
cache.store_archiver_index(self.config, self.archiver, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_exists(self):
"""Ensure that cache directory exists.""" |
if not cache.exists(self.config):
logger.debug("Wily cache not found, creating.")
cache.create(self.config)
logger.debug("Created wily cache")
else:
logger.debug(f"Cache {self.config.cache_path} exists") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_version(f):
""" Add the version of wily to the help heading. :param f: function to decorate :return: decorated function """ |
doc = f.__doc__
f.__doc__ = "Version: " + __version__ + "\n\n" + doc
return f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_metric_as_tuple(metric):
""" Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric` """ |
if "." in metric:
_, metric = metric.split(".")
r = [
(operator, match) for operator, match in ALL_METRICS if match[0] == metric
]
if not r or len(r) == 0:
raise ValueError(f"Metric {metric} not recognised.")
else:
return r[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_metric(revision, operator, path, key):
""" Get a metric from the cache. :param revision: The revision id. :type revision: ``str`` :param operator: The operator name. :type operator: ``str`` :param path: The path to the file/function :type path: ``str`` :param key: The key of the data :type key: ``str`` :return: Data from the cache :rtype: ``dict`` """ |
if ":" in path:
part, entry = path.split(":")
val = revision[operator][part][entry][key]
else:
val = revision[operator][path][key]
return val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_index(config):
"""Create the root index.""" |
filename = pathlib.Path(config.cache_path) / "index.json"
index = {"version": __version__}
with open(filename, "w") as out:
out.write(json.dumps(index, indent=2)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(config):
""" Create a wily cache. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :return: The path to the cache :rtype: ``str`` """ |
if exists(config):
logger.debug("Wily cache exists, skipping")
return config.cache_path
logger.debug(f"Creating wily cache {config.cache_path}")
pathlib.Path(config.cache_path).mkdir(parents=True, exist_ok=True)
create_index(config)
return config.cache_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(config):
""" Delete a wily cache. :param config: The configuration :type config: :class:`wily.config.WilyConfig` """ |
if not exists(config):
logger.debug("Wily cache does not exist, skipping")
return
shutil.rmtree(config.cache_path)
logger.debug("Deleted wily cache") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(config, archiver, revision, stats):
""" Store a revision record within an archiver folder. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param revision: The revision ID :type revision: ``str`` :param stats: The collected data :type stats: ``dict`` :return: The absolute path to the created file :rtype: ``str`` :rtype: `pathlib.Path` """ |
root = pathlib.Path(config.cache_path) / archiver.name
if not root.exists():
logger.debug("Creating wily cache")
root.mkdir()
# fix absolute path references.
if config.path != ".":
for operator, operator_data in list(stats["operator_data"].items()):
if operator_data:
new_operator_data = operator_data.copy()
for k, v in list(operator_data.items()):
new_key = os.path.relpath(str(k), str(config.path))
del new_operator_data[k]
new_operator_data[new_key] = v
del stats["operator_data"][operator]
stats["operator_data"][operator] = new_operator_data
logger.debug(f"Creating {revision.key} output")
filename = root / (revision.key + ".json")
if filename.exists():
raise RuntimeError(f"File {filename} already exists, index may be corrupt.")
with open(filename, "w") as out:
out.write(json.dumps(stats, indent=2))
return filename |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_archiver_index(config, archiver, index):
""" Store an archiver's index record for faster search. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param index: The archiver index record :type index: ``dict`` :rtype: `pathlib.Path` """ |
root = pathlib.Path(config.cache_path) / archiver.name
if not root.exists():
root.mkdir()
logger.debug("Created archiver directory")
index = sorted(index, key=lambda k: k["date"], reverse=True)
filename = root / "index.json"
with open(filename, "w") as out:
out.write(json.dumps(index, indent=2))
logger.debug(f"Created index output")
return filename |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_archivers(config):
""" List the names of archivers with data. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :return: A list of archiver names :rtype: ``list`` of ``str`` """ |
root = pathlib.Path(config.cache_path)
result = []
for name in ALL_ARCHIVERS.keys():
if (root / name).exists():
result.append(name)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_default_metrics(config):
""" Get the default metrics for a configuration. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :return: Return the list of default metrics in this index :rtype: ``list`` of ``str`` """ |
archivers = list_archivers(config)
default_metrics = []
for archiver in archivers:
index = get_archiver_index(config, archiver)
if len(index) == 0:
logger.warning("No records found in the index, no metrics available")
return []
operators = index[0]["operators"]
for operator in operators:
o = resolve_operator(operator)
if o.cls.default_metric_index is not None:
metric = o.cls.metrics[o.cls.default_metric_index]
default_metrics.append("{0}.{1}".format(o.cls.name, metric.name))
return default_metrics |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_archiver_index(config, archiver):
""" Check if this archiver has an index file. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :return: the exist :rtype: ``bool`` """ |
root = pathlib.Path(config.cache_path) / archiver / "index.json"
return root.exists() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_archiver_index(config, archiver):
""" Get the contents of the archiver index file. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :return: The index data :rtype: ``dict`` """ |
root = pathlib.Path(config.cache_path) / archiver
with (root / "index.json").open("r") as index_f:
index = json.load(index_f)
return index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(config, archiver, revision):
""" Get the data for a given revision. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param revision: The revision ID :type revision: ``str`` :return: The data record for that revision :rtype: ``dict`` """ |
root = pathlib.Path(config.cache_path) / archiver
# TODO : string escaping!!!
with (root / f"{revision}.json").open("r") as rev_f:
index = json.load(rev_f)
return index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mode(data):
""" Return the modal value of a iterable with discrete values. If there is more than 1 modal value, arbritrarily return the first top n. """ |
c = Counter(data)
mode, freq = c.most_common(1)[0]
return mode |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load(fp, encode_nominal=False, return_type=DENSE):
'''Load a file-like object containing the ARFF document and convert it into
a Python object.
:param fp: a file-like object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(fp, encode_nominal=encode_nominal,
return_type=return_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def loads(s, encode_nominal=False, return_type=DENSE):
'''Convert a string instance containing the ARFF document into a Python
object.
:param s: a string object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(s, encode_nominal=encode_nominal,
return_type=return_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dump(obj, fp):
'''Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
'''
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + u'\n')
last_row = row
fp.write(last_row)
return fp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _decode(self, s, encode_nominal=False, matrix_type=DENSE):
'''Do the job the ``encode``.'''
# Make sure this method is idempotent
self._current_line = 0
# If string, convert to a list of lines
if isinstance(s, basestring):
s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
# Create the return object
obj = {
u'description': u'',
u'relation': u'',
u'attributes': [],
u'data': []
}
attribute_names = {}
# Create the data helper object
data = _get_data_object_for_decoding(matrix_type)
# Read all lines
STATE = _TK_DESCRIPTION
s = iter(s)
for row in s:
self._current_line += 1
# Ignore empty lines
row = row.strip(' \r\n')
if not row: continue
u_row = row.upper()
# DESCRIPTION -----------------------------------------------------
if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
obj['description'] += self._decode_comment(row) + '\n'
# -----------------------------------------------------------------
# RELATION --------------------------------------------------------
elif u_row.startswith(_TK_RELATION):
if STATE != _TK_DESCRIPTION:
raise BadLayout()
STATE = _TK_RELATION
obj['relation'] = self._decode_relation(row)
# -----------------------------------------------------------------
# ATTRIBUTE -------------------------------------------------------
elif u_row.startswith(_TK_ATTRIBUTE):
if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
raise BadLayout()
STATE = _TK_ATTRIBUTE
attr = self._decode_attribute(row)
if attr[0] in attribute_names:
raise BadAttributeName(attr[0], attribute_names[attr[0]])
else:
attribute_names[attr[0]] = self._current_line
obj['attributes'].append(attr)
if isinstance(attr[1], (list, tuple)):
if encode_nominal:
conversor = EncodedNominalConversor(attr[1])
else:
conversor = NominalConversor(attr[1])
else:
CONVERSOR_MAP = {'STRING': unicode,
'INTEGER': lambda x: int(float(x)),
'NUMERIC': float,
'REAL': float}
conversor = CONVERSOR_MAP[attr[1]]
self._conversors.append(conversor)
# -----------------------------------------------------------------
# DATA ------------------------------------------------------------
elif u_row.startswith(_TK_DATA):
if STATE != _TK_ATTRIBUTE:
raise BadLayout()
break
# -----------------------------------------------------------------
# COMMENT ---------------------------------------------------------
elif u_row.startswith(_TK_COMMENT):
pass
# -----------------------------------------------------------------
else:
# Never found @DATA
raise BadLayout()
def stream():
for row in s:
self._current_line += 1
row = row.strip()
# Ignore empty lines and comment lines.
if row and not row.startswith(_TK_COMMENT):
yield row
# Alter the data object
obj['data'] = data.decode_rows(stream(), self._conversors)
if obj['description'].endswith('\n'):
obj['description'] = obj['description'][:-1]
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def decode(self, s, encode_nominal=False, return_type=DENSE):
'''Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
'''
try:
return self._decode(s, encode_nominal=encode_nominal,
matrix_type=return_type)
except ArffException as e:
e.line = self._current_line
raise e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def encode(self, obj):
'''Encodes a given object to an ARFF file.
:param obj: the object containing the ARFF information.
:return: the ARFF file as an unicode string.
'''
data = [row for row in self.iter_encode(obj)]
return u'\n'.join(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_encode(self, obj):
'''The iterative version of `arff.ArffEncoder.encode`.
This encodes iteratively a given object and return, one-by-one, the
lines of the ARFF file.
:param obj: the object containing the ARFF information.
:return: (yields) the ARFF file as unicode strings.
'''
# DESCRIPTION
if obj.get('description', None):
for row in obj['description'].split('\n'):
yield self._encode_comment(row)
# RELATION
if not obj.get('relation'):
raise BadObject('Relation name not found or with invalid value.')
yield self._encode_relation(obj['relation'])
yield u''
# ATTRIBUTES
if not obj.get('attributes'):
raise BadObject('Attributes not found.')
attribute_names = set()
for attr in obj['attributes']:
# Verify for bad object format
if not isinstance(attr, (tuple, list)) or \
len(attr) != 2 or \
not isinstance(attr[0], basestring):
raise BadObject('Invalid attribute declaration "%s"'%str(attr))
if isinstance(attr[1], basestring):
# Verify for invalid types
if attr[1] not in _SIMPLE_TYPES:
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify for bad object format
elif not isinstance(attr[1], (tuple, list)):
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify attribute name is not used twice
if attr[0] in attribute_names:
raise BadObject('Trying to use attribute name "%s" for the '
'second time.' % str(attr[0]))
else:
attribute_names.add(attr[0])
yield self._encode_attribute(attr[0], attr[1])
yield u''
attributes = obj['attributes']
# DATA
yield _TK_DATA
if 'data' in obj:
data = _get_data_object_for_encoding(obj.get('data'))
for line in data.encode_data(obj.get('data'), attributes):
yield line
yield u'' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def begin(self):
"""Initialize library, must be called once before other functions are called. """ |
resp = ws.ws2811_init(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, str_resp)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show(self):
"""Update the display with the data from the LED buffer.""" |
resp = ws.ws2811_render(self._leds)
if resp != 0:
str_resp = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, str_resp)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def startService(self):
""" Start the writer thread. """ |
Service.startService(self)
self._thread = threading.Thread(target=self._writer)
self._thread.start()
addDestination(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stopService(self):
""" Stop the writer thread, wait for it to finish. """ |
Service.stopService(self)
removeDestination(self)
self._reactor.callFromThread(self._reactor.stop)
return deferToThreadPool(
self._mainReactor,
self._mainReactor.getThreadPool(), self._thread.join) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, logger=None, action=None):
""" Write the message to the given logger. This will additionally include a timestamp, the action context if any, and any other fields. Byte field names will be converted to Unicode. @type logger: L{eliot.ILogger} or C{None} indicating the default one. @param action: The L{Action} which is the context for this message. If C{None}, the L{Action} will be deduced from the current call stack. """ |
if logger is None:
logger = _output._DEFAULT_LOGGER
logged_dict = self._freeze(action=action)
logger.write(logged_dict, self._serializer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_logging(dsk, ignore=None):
""" Add logging to a Dask graph. @param dsk: The Dask graph. @return: New Dask graph. """ |
ctx = current_action()
result = {}
# Use topological sort to ensure Eliot actions are in logical order of
# execution in Dask:
keys = toposort(dsk)
# Give each key a string name. Some keys are just aliases to other
# keys, so make sure we have underlying key available. Later on might
# want to shorten them as well.
def simplify(k):
if isinstance(k, str):
return k
return "-".join(str(o) for o in k)
key_names = {}
for key in keys:
value = dsk[key]
if not callable(value) and value in keys:
# It's an alias for another key:
key_names[key] = key_names[value]
else:
key_names[key] = simplify(key)
# 2. Create Eliot child Actions for each key, in topological order:
key_to_action_id = {
key: str(ctx.serialize_task_id(), "utf-8")
for key in keys
}
# 3. Replace function with wrapper that logs appropriate Action:
for key in keys:
func = dsk[key][0]
args = dsk[key][1:]
if not callable(func):
# This key is just an alias for another key, no need to add
# logging:
result[key] = dsk[key]
continue
wrapped_func = _RunWithEliotContext(
task_id=key_to_action_id[key],
func=func,
key=key_names[key],
dependencies=[key_names[k] for k in get_dependencies(dsk, key)],
)
result[key] = (wrapped_func, ) + tuple(args)
assert result.keys() == dsk.keys()
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eliot_friendly_generator_function(original):
""" Decorate a generator function so that the Eliot action context is preserved across ``yield`` expressions. """ |
@wraps(original)
def wrapper(*a, **kw):
# Keep track of whether the next value to deliver to the generator is
# a non-exception or an exception.
ok = True
# Keep track of the next value to deliver to the generator.
value_in = None
# Create the generator with a call to the generator function. This
# happens with whatever Eliot action context happens to be active,
# which is fine and correct and also irrelevant because no code in the
# generator function can run until we call send or throw on it.
gen = original(*a, **kw)
# Initialize the per-generator context to a copy of the current context.
context = copy_context()
while True:
try:
# Whichever way we invoke the generator, we will do it
# with the Eliot action context stack we've saved for it.
# Then the context manager will re-save it and restore the
# "outside" stack for us.
#
# Regarding the support of Twisted's inlineCallbacks-like
# functionality (see eliot.twisted.inline_callbacks):
#
# The invocation may raise the inlineCallbacks internal
# control flow exception _DefGen_Return. It is not wrong to
# just let that propagate upwards here but inlineCallbacks
# does think it is wrong. The behavior triggers a
# DeprecationWarning to try to get us to fix our code. We
# could explicitly handle and re-raise the _DefGen_Return but
# only at the expense of depending on a private Twisted API.
# For now, I'm opting to try to encourage Twisted to fix the
# situation (or at least not worsen it):
# https://twistedmatrix.com/trac/ticket/9590
#
# Alternatively, _DefGen_Return is only required on Python 2.
# When Python 2 support is dropped, this concern can be
# eliminated by always using `return value` instead of
# `returnValue(value)` (and adding the necessary logic to the
# StopIteration handler below).
def go():
if ok:
value_out = gen.send(value_in)
else:
value_out = gen.throw(*value_in)
# We have obtained a value from the generator. In
# giving it to us, it has given up control. Note this
# fact here. Importantly, this is within the
# generator's action context so that we get a good
# indication of where the yield occurred.
#
# This is noisy, enable only for debugging:
if wrapper.debug:
Message.log(message_type=u"yielded")
return value_out
value_out = context.run(go)
except StopIteration:
# When the generator raises this, it is signaling
# completion. Leave the loop.
break
else:
try:
# Pass the generator's result along to whoever is
# driving. Capture the result as the next value to
# send inward.
value_in = yield value_out
except:
# Or capture the exception if that's the flavor of the
# next value. This could possibly include GeneratorExit
# which turns out to be just fine because throwing it into
# the inner generator effectively propagates the close
# (and with the right context!) just as you would want.
# True, the GeneratorExit does get re-throwing out of the
# gen.throw call and hits _the_generator_context's
# contextmanager. But @contextmanager extremely
# conveniently eats it for us! Thanks, @contextmanager!
ok = False
value_in = exc_info()
else:
ok = True
wrapper.debug = False
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sd_journal_send(**kwargs):
""" Send a message to the journald log. @param kwargs: Mapping between field names to values, both as bytes. @raise IOError: If the operation failed. """ |
# The function uses printf formatting, so we need to quote
# percentages.
fields = [
_ffi.new(
"char[]", key.encode("ascii") + b'=' + value.replace(b"%", b"%%"))
for key, value in kwargs.items()]
fields.append(_ffi.NULL)
result = _journald.sd_journal_send(*fields)
if result != 0:
raise IOError(-result, strerror(-result)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inline_callbacks(original, debug=False):
""" Decorate a function like ``inlineCallbacks`` would but in a more Eliot-friendly way. Use it just like ``inlineCallbacks`` but where you want Eliot action contexts to Do The Right Thing inside the decorated function. """ |
f = eliot_friendly_generator_function(original)
if debug:
f.debug = True
return inlineCallbacks(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addCallbacks( self, callback, errback=None, callbackArgs=None, callbackKeywords=None, errbackArgs=None, errbackKeywords=None ):
""" Add a pair of callbacks that will be run in the context of an eliot action. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ |
if self._finishAdded:
raise AlreadyFinished()
if errback is None:
errback = _passthrough
def callbackWithContext(*args, **kwargs):
return self._action.run(callback, *args, **kwargs)
def errbackWithContext(*args, **kwargs):
return self._action.run(errback, *args, **kwargs)
self.result.addCallbacks(
callbackWithContext, errbackWithContext, callbackArgs,
callbackKeywords, errbackArgs, errbackKeywords)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addCallback(self, callback, *args, **kw):
""" Add a success callback that will be run in the context of an eliot action. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ |
return self.addCallbacks(
callback, _passthrough, callbackArgs=args, callbackKeywords=kw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addErrback(self, errback, *args, **kw):
""" Add a failure callback that will be run in the context of an eliot action. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ |
return self.addCallbacks(
_passthrough, errback, errbackArgs=args, errbackKeywords=kw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addBoth(self, callback, *args, **kw):
""" Add a single callback as both success and failure callbacks. @return: C{self} @rtype: L{DeferredContext} @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called. This indicates a programmer error. """ |
return self.addCallbacks(callback, callback, args, kw, args, kw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addActionFinish(self):
""" Indicates all callbacks that should run within the action's context have been added, and that the action should therefore finish once those callbacks have fired. @return: The wrapped L{Deferred}. @raises AlreadyFinished: L{DeferredContext.addActionFinish} has been called previously. This indicates a programmer error. """ |
if self._finishAdded:
raise AlreadyFinished()
self._finishAdded = True
def done(result):
if isinstance(result, Failure):
exception = result.value
else:
exception = None
self._action.finish(exception)
return result
self.result.addBoth(done)
return self.result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _loads(s):
""" Support decoding bytes. """ |
if isinstance(s, bytes):
s = s.decode("utf-8")
return pyjson.loads(s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _dumps(obj, cls=pyjson.JSONEncoder):
""" Encode to bytes, and presume bytes in inputs are UTF-8 encoded strings. """ |
class WithBytes(cls):
"""
JSON encoder that supports L{bytes}.
"""
def default(self, o):
if isinstance(o, bytes):
warnings.warn(
"Eliot will soon stop supporting encoding bytes in JSON"
" on Python 3", DeprecationWarning
)
return o.decode("utf-8")
return cls.default(self, o)
return pyjson.dumps(obj, cls=WithBytes).encode("utf-8") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_module(name, original_module):
""" Load a copy of a module, distinct from what you'd get if you imported it directly. @param str name: The name of the new module. @param original_module: The original module we're recreating. @return: A new, distinct module. """ |
module = ModuleType(name)
if PY3:
import importlib.util
spec = importlib.util.find_spec(original_module.__name__)
source = spec.loader.get_code(original_module.__name__)
else:
if getattr(sys, "frozen", False):
raise NotImplementedError("Can't load modules on Python 2 with PyInstaller")
path = original_module.__file__
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
with open(path) as f:
source = f.read()
exec_(source, module.__dict__, module.__dict__)
return module |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _writeTracebackMessage(logger, typ, exception, traceback):
""" Write a traceback to the log. @param typ: The class of the exception. @param exception: The L{Exception} instance. @param traceback: The traceback, a C{str}. """ |
msg = TRACEBACK_MESSAGE(
reason=exception, traceback=traceback, exception=typ)
msg = msg.bind(
**_error_extraction.get_fields_for_exception(logger, exception))
msg.write(logger) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_traceback(logger=None, exc_info=None):
""" Write the latest traceback to the log. This should be used inside an C{except} block. For example: try: dostuff() except: write_traceback(logger) Or you can pass the result of C{sys.exc_info()} to the C{exc_info} parameter. """ |
if exc_info is None:
exc_info = sys.exc_info()
typ, exception, tb = exc_info
traceback = "".join(_traceback_no_io.format_exception(typ, exception, tb))
_writeTracebackMessage(logger, typ, exception, traceback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exclusively(f):
""" Decorate a function to make it thread-safe by serializing invocations using a per-instance lock. """ |
@wraps(f)
def exclusively_f(self, *a, **kw):
with self._lock:
return f(self, *a, **kw)
return exclusively_f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_file(output_file, encoder=EliotJSONEncoder):
""" Add a destination that writes a JSON message per line to the given file. @param output_file: A file-like object. """ |
Logger._destinations.add(
FileDestination(file=output_file, encoder=encoder)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send(self, message):
""" Deliver a message to all destinations. The passed in message might be mutated. @param message: A message dictionary that can be serialized to JSON. @type message: L{dict} """ |
message.update(self._globalFields)
errors = []
for dest in self._destinations:
try:
dest(message)
except:
errors.append(sys.exc_info())
if errors:
raise _DestinationsSendError(errors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, *destinations):
""" Adds new destinations. A destination should never ever throw an exception. Seriously. A destination should not mutate the dictionary it is given. @param destinations: A list of callables that takes message dictionaries. """ |
buffered_messages = None
if not self._any_added:
# These are first set of messages added, so we need to clear
# BufferingDestination:
self._any_added = True
buffered_messages = self._destinations[0].messages
self._destinations = []
self._destinations.extend(destinations)
if buffered_messages:
# Re-deliver buffered messages:
for message in buffered_messages:
self.send(message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode(timestamp):
""" Convert seconds since epoch to TAI64N string. @param timestamp: Seconds since UTC Unix epoch as C{float}. @return: TAI64N-encoded time, as C{unicode}. """ |
seconds = int(timestamp)
nanoseconds = int((timestamp - seconds) * 1000000000)
seconds = seconds + _OFFSET
encoded = b2a_hex(struct.pack(_STRUCTURE, seconds, nanoseconds))
return "@" + encoded.decode("ascii") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode(tai64n):
""" Convert TAI64N string to seconds since epoch. Note that dates before 2013 may not decode accurately due to leap second issues. If you need correct decoding for earlier dates you can try the tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}). @param tai64n: TAI64N-encoded time, as C{unicode}. @return: Seconds since UTC Unix epoch as C{float}. """ |
seconds, nanoseconds = struct.unpack(_STRUCTURE, a2b_hex(tai64n[1:]))
seconds -= _OFFSET
return seconds + (nanoseconds / 1000000000.0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preserve_context(f):
""" Package up the given function with the current Eliot context, and then restore context and call given function when the resulting callable is run. This allows continuing the action context within a different thread. The result should only be used once, since it relies on L{Action.serialize_task_id} whose results should only be deserialized once. @param f: A callable. @return: One-time use callable that calls given function in context of a child of current Eliot action. """ |
action = current_action()
if action is None:
return f
task_id = action.serialize_task_id()
called = threading.Lock()
def restore_eliot_context(*args, **kwargs):
# Make sure the function has not already been called:
if not called.acquire(False):
raise TooManyCalls(f)
with Action.continue_task(task_id=task_id):
return f(*args, **kwargs)
return restore_eliot_context |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_task_id(self):
""" Create a unique identifier for the current location within the task. The format is C{b"<task_uuid>@<task_level>"}. @return: L{bytes} encoding the current location within the task. """ |
return "{}@{}".format(
self._identification[TASK_UUID_FIELD],
self._nextTaskLevel().toString()).encode("ascii") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def continue_task(cls, logger=None, task_id=_TASK_ID_NOT_SUPPLIED):
""" Start a new action which is part of a serialized task. @param logger: The L{eliot.ILogger} to which to write messages, or C{None} if the default one should be used. @param task_id: A serialized task identifier, the output of L{Action.serialize_task_id}, either ASCII-encoded bytes or unicode string. Required. @return: The new L{Action} instance. """ |
if task_id is _TASK_ID_NOT_SUPPLIED:
raise RuntimeError("You must supply a task_id keyword argument.")
if isinstance(task_id, bytes):
task_id = task_id.decode("ascii")
uuid, task_level = task_id.split("@")
action = cls(
logger, uuid, TaskLevel.fromString(task_level),
"eliot:remote_task")
action._start({})
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start(self, fields):
""" Log the start message. The action identification fields, and any additional given fields, will be logged. In general you shouldn't call this yourself, instead using a C{with} block or L{Action.finish}. """ |
fields[ACTION_STATUS_FIELD] = STARTED_STATUS
fields.update(self._identification)
if self._serializers is None:
serializer = None
else:
serializer = self._serializers.start
Message(fields, serializer).write(self._logger, self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finish(self, exception=None):
""" Log the finish message. The action identification fields, and any additional given fields, will be logged. In general you shouldn't call this yourself, instead using a C{with} block or L{Action.finish}. @param exception: C{None}, in which case the fields added with L{Action.addSuccessFields} are used. Or an L{Exception}, in which case an C{"exception"} field is added with the given L{Exception} type and C{"reason"} with its contents. """ |
if self._finished:
return
self._finished = True
serializer = None
if exception is None:
fields = self._successFields
fields[ACTION_STATUS_FIELD] = SUCCEEDED_STATUS
if self._serializers is not None:
serializer = self._serializers.success
else:
fields = _error_extraction.get_fields_for_exception(
self._logger, exception)
fields[EXCEPTION_FIELD] = "%s.%s" % (
exception.__class__.__module__, exception.__class__.__name__)
fields[REASON_FIELD] = safeunicode(exception)
fields[ACTION_STATUS_FIELD] = FAILED_STATUS
if self._serializers is not None:
serializer = self._serializers.failure
fields.update(self._identification)
Message(fields, serializer).write(self._logger, self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def context(self):
""" Create a context manager that ensures code runs within action's context. The action does NOT finish when the context is exited. """ |
parent = _ACTION_CONTEXT.set(self)
try:
yield self
finally:
_ACTION_CONTEXT.reset(parent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def children(self):
""" The list of child messages and actions sorted by task level, excluding the start and end messages. """ |
return pvector(
sorted(self._children.values(), key=lambda m: m.task_level)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start(self, start_message):
""" Start this action given its start message. @param WrittenMessage start_message: A start message that has the same level as this action. @raise InvalidStartMessage: If C{start_message} does not have a C{ACTION_STATUS_FIELD} of C{STARTED_STATUS}, or if it has a C{task_level} indicating that it is not the first message of an action. """ |
if start_message.contents.get(
ACTION_STATUS_FIELD, None) != STARTED_STATUS:
raise InvalidStartMessage.wrong_status(start_message)
if start_message.task_level.level[-1] != 1:
raise InvalidStartMessage.wrong_task_level(start_message)
return self.set(start_message=start_message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, message):
""" Serialize the given message in-place, converting inputs to outputs. We do this in-place for performance reasons. There are more fields in a message than there are L{Field} objects because of the timestamp, task_level and task_uuid fields. By only iterating over our L{Fields} we therefore reduce the number of function calls in a critical code path. @param message: A C{dict}. """ |
for key, field in self.fields.items():
message[key] = field.serialize(message[key]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, message):
""" Validate the given message. @param message: A C{dict}. @raises ValidationError: If the message has the wrong fields or one of its field values fail validation. """ |
for key, field in self.fields.items():
if key not in message:
raise ValidationError(message, "Field %r is missing" % (key, ))
field.validate(message[key])
if self.allow_additional_fields:
return
# Otherwise, additional fields are not allowed:
fieldSet = set(self.fields) | set(RESERVED_FIELDS)
for key in message:
if key not in fieldSet:
raise ValidationError(message, "Unexpected field %r" % (key, )) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pretty_format(message):
""" Convert a message dictionary into a human-readable string. @param message: Message to parse, as dictionary. @return: Unicode string. """ |
skip = {
TIMESTAMP_FIELD, TASK_UUID_FIELD, TASK_LEVEL_FIELD, MESSAGE_TYPE_FIELD,
ACTION_TYPE_FIELD, ACTION_STATUS_FIELD}
def add_field(previous, key, value):
value = unicode(pprint.pformat(value, width=40)).replace(
"\\n", "\n ").replace("\\t", "\t")
# Reindent second line and later to match up with first line's
# indentation:
lines = value.split("\n")
# indent lines are " <key length>| <value>"
indent = "{}| ".format(" " * (2 + len(key)))
value = "\n".join([lines[0]] + [indent + l for l in lines[1:]])
return " %s: %s\n" % (key, value)
remaining = ""
for field in [ACTION_TYPE_FIELD, MESSAGE_TYPE_FIELD, ACTION_STATUS_FIELD]:
if field in message:
remaining += add_field(remaining, field, message[field])
for (key, value) in sorted(message.items()):
if key not in skip:
remaining += add_field(remaining, key, value)
level = "/" + "/".join(map(unicode, message[TASK_LEVEL_FIELD]))
return "%s -> %s\n%sZ\n%s" % (
message[TASK_UUID_FIELD],
level,
# If we were returning or storing the datetime we'd want to use an
# explicit timezone instead of a naive datetime, but since we're
# just using it for formatting we needn't bother.
datetime.utcfromtimestamp(message[TIMESTAMP_FIELD]).isoformat(
sep=str(" ")),
remaining, ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _main():
""" Command-line program that reads in JSON from stdin and writes out pretty-printed messages to stdout. """ |
if argv[1:]:
stdout.write(_CLI_HELP)
raise SystemExit()
for line in stdin:
try:
message = loads(line)
except ValueError:
stdout.write("Not JSON: {}\n\n".format(line.rstrip(b"\n")))
continue
if REQUIRED_FIELDS - set(message.keys()):
stdout.write(
"Not an Eliot message: {}\n\n".format(line.rstrip(b"\n")))
continue
result = pretty_format(message) + "\n"
if PY2:
result = result.encode("utf-8")
stdout.write(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fields_for_exception(self, logger, exception):
""" Given an exception instance, return fields to add to the failed action message. @param logger: ``ILogger`` currently being used. @param exception: An exception instance. @return: Dictionary with fields to include. """ |
for klass in getmro(exception.__class__):
if klass in self.registry:
extractor = self.registry[klass]
try:
return extractor(exception)
except:
from ._traceback import write_traceback
write_traceback(logger)
return {}
return {} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.