text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_performance_signatures(self, project, **params):
'''
Gets a set of performance signatures associated with a project and time range
'''
results = self._get_json(self.PERFORMANCE_SIGNATURES_ENDPOINT, project, **params)
return PerformanceSignatureCollection(results) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_performance_data(self, project, **params):
'''
Gets a dictionary of PerformanceSeries objects
You can specify which signatures to get by passing signature to this function
'''
results = self._get_json(self.PERFORMANCE_DATA_ENDPOINT, project, **params)
return {k: PerformanceSeries(v) for k, v in results.items()} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_matchers():
""" Get matcher functions from treeherder.autoclassify.matchers We classify matchers as any function treeherder.autoclassify.matchers with a name ending in _matcher. This is currently overkill but protects against the unwarey engineer adding new functions to the matchers module that shouldn't be treated as matchers. """ |
from . import matchers
def is_matcher_func(member):
return inspect.isfunction(member) and member.__name__.endswith("_matcher")
members = inspect.getmembers(matchers, is_matcher_func)
for name, func in members:
yield func |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_best_matches(errors, matchers):
""" Find the best match for each error We use the Good Enough™ ratio as a watershed level for match scores. """ |
for text_log_error in errors:
matches = find_all_matches(text_log_error, matchers) # TextLogErrorMatch instances, unsaved!
best_match = first(matches, key=lambda m: (-m.score, -m.classified_failure_id))
if not best_match:
continue
newrelic.agent.record_custom_event('highest_scored_matcher', {
'matcher': best_match.matcher_name,
'score': best_match.score,
'text_log_error': best_match.text_log_error_id,
})
yield best_match |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_all_matches(text_log_error, matchers):
""" Find matches for the given error using the given matcher classes Returns *unsaved* TextLogErrorMatch instances. """ |
for matcher_func in matchers:
matches = matcher_func(text_log_error)
# matches: iterator of (score, ClassifiedFailure.id)
if not matches:
continue
for score, classified_failure_id in matches:
yield TextLogErrorMatch(
score=score,
matcher_name=matcher_func.__name__,
classified_failure_id=classified_failure_id,
text_log_error=text_log_error,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_best_match(text_log_error):
""" Get the best TextLogErrorMatch for a given TextLogErrorMatch. Matches are further filtered by the score cut off. """ |
score_cut_off = 0.7
return (text_log_error.matches.filter(score__gt=score_cut_off)
.order_by("-score", "-classified_failure_id")
.select_related('classified_failure')
.first()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mark_best_classification(text_log_error, classified_failure):
""" Wrapper for setting best_classification on both TextLogError and FailureLine. Set the given ClassifiedFailure as best_classification for the given TextLogError. Handles the duplication of best_classification on FailureLine so you don't have to! """ |
text_log_error.metadata.best_classification = classified_failure
text_log_error.metadata.save(update_fields=['best_classification'])
text_log_error.metadata.failure_line.elastic_search_insert() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mark_best_classifications(errors):
""" Convenience wrapper around mark_best_classification. Finds the best match for each TextLogError in errors, handling no match meeting the cut off score and then mark_best_classification to save that information. """ |
for text_log_error in errors:
best_match = get_best_match(text_log_error)
if not best_match:
continue
mark_best_classification(text_log_error, best_match.classified_failure) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_db(matches):
""" Save TextLogErrorMatch instances to the DB We loop each Match instance instead of calling bulk_create() so we can catch any potential IntegrityErrors and continue. """ |
for match in matches:
try:
match.save()
except IntegrityError:
args = (match.text_log_error_id, match.matcher_name, match.classified_failure_id)
logger.warning(
"Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i",
args,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_json_schema(filename):
""" Get a JSON Schema by filename. """ |
file_path = os.path.join("schemas", filename)
with open(file_path) as f:
schema = yaml.load(f)
return schema |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_job_info_artifact(job, job_info_artifact):
""" Store the contents of the job info artifact in job details """ |
job_details = json.loads(job_info_artifact['blob'])['job_details']
for job_detail in job_details:
job_detail_dict = {
'title': job_detail.get('title'),
'value': job_detail['value'],
'url': job_detail.get('url')
}
for (k, v) in job_detail_dict.items():
max_field_length = JobDetail._meta.get_field(k).max_length
if v is not None and len(v) > max_field_length:
logger.warning("Job detail '%s' for job_guid %s too long, truncating",
v[:max_field_length], job.guid)
job_detail_dict[k] = v[:max_field_length]
# move the url field to be updated in defaults now that it's
# had its size trimmed, if necessary
job_detail_dict['defaults'] = {'url': job_detail_dict['url']}
del job_detail_dict['url']
JobDetail.objects.update_or_create(
job=job,
**job_detail_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_text_log_summary_artifact(job, text_log_summary_artifact):
""" Store the contents of the text log summary artifact """ |
step_data = json.loads(
text_log_summary_artifact['blob'])['step_data']
result_map = {v: k for (k, v) in TextLogStep.RESULTS}
with transaction.atomic():
for step in step_data['steps']:
name = step['name'][:TextLogStep._meta.get_field('name').max_length]
# process start/end times if we have them
# we currently don't support timezones in treeherder, so
# just ignore that when importing/updating the bug to avoid
# a ValueError (though by default the text log summaries
# we produce should have time expressed in UTC anyway)
time_kwargs = {}
for tkey in ('started', 'finished'):
if step.get(tkey):
time_kwargs[tkey] = dateutil.parser.parse(
step[tkey], ignoretz=True)
log_step = TextLogStep.objects.create(
job=job,
started_line_number=step['started_linenumber'],
finished_line_number=step['finished_linenumber'],
name=name,
result=result_map[step['result']],
**time_kwargs)
if step.get('errors'):
for error in step['errors']:
TextLogError.objects.create(
step=log_step,
line_number=error['linenumber'],
line=astral_filter(error['line']))
# get error summary immediately (to warm the cache)
error_summary.get_error_summary(job) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_artifact_json_blobs(artifacts):
""" Ensure that JSON artifact blobs passed as dicts are converted to JSON """ |
for artifact in artifacts:
blob = artifact['blob']
if (artifact['type'].lower() == 'json' and
not isinstance(blob, str)):
artifact['blob'] = json.dumps(blob)
return artifacts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_option_collection_hash(self):
""" Gets option collection hash, a mapping of hash values to build properties Returns a dictionary with the following structure: { } """ |
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_pushes(self, project, **params):
""" Gets pushes from project, filtered by parameters By default this method will just return the latest 10 pushes (if they exist) :param project: project (repository name) to query data for :param params: keyword arguments to filter results """ |
return self._get_json_list(self.PUSH_ENDPOINT, project, **params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_jobs(self, project, **params):
""" Gets jobs from project, filtered by parameters :param project: project (repository name) to query data for :param params: keyword arguments to filter results """ |
return self._get_json_list(self.JOBS_ENDPOINT, project, **params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_job_log_url(self, project, **params):
""" Gets job log url, filtered by parameters :param project: project (repository name) to query data for :param params: keyword arguments to filter results """ |
return self._get_json(self.JOB_LOG_URL_ENDPOINT, project,
**params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all_documents(index=INDEX_NAME):
""" Get all documents from the given index. Returns full Elasticsearch objects so you can get metadata too. """ |
query = {
'query': {
'match_all': {}
}
}
for result in raw_query(query, index=index):
yield result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bulk(iterable, index=INDEX_NAME, doc_type=DOC_TYPE, action='index'):
""" Wrapper of elasticsearch's bulk method Converts an interable of models to document operations and submits them to Elasticsearch. Returns a count of operations when done. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html """ |
actions = compact(dict_to_op(
to_dict(model),
index_name=INDEX_NAME,
doc_type=DOC_TYPE,
op_type=action,
) for model in iterable)
# fail fast if there are no actions
if not actions:
return 0
items, _ = es_bulk(es_conn, actions, doc_type=doc_type, index=index)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count_index(index=INDEX_NAME):
""" Return a document count for the given index. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.count https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html """ |
refresh_index() # Refresh the index so we can get a correct count
query = {
'query': {
'match_all': {}
}
}
result = es_conn.count(index=index, doc_type=DOC_TYPE, body=query)
return result['count'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_document(id, index=INDEX_NAME, doc_type=DOC_TYPE, **kwargs):
""" Thin wrapper to get a single document by ID. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html """ |
result = es_conn.get(index=index, doc_type=doc_type, id=id, **kwargs)
return result['_source'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raw_query(query, index=INDEX_NAME, doc_type=DOC_TYPE):
""" Thin wrapper of the search function to provide useful defaults https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html """ |
result = es_conn.search(index=index, doc_type=DOC_TYPE, body=query)
return result['hits']['hits'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(query, index=INDEX_NAME, doc_type=DOC_TYPE):
""" Thin wrapper of the main query function to provide just the resulting objects """ |
results = raw_query(query, index=index, doc_type=doc_type)
return [r['_source'] for r in results] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def clear_expiration_field_for_expired_jobs(self):
'''Set the expiration date of every job that has expired.'''
# Only select rows where there is an expiration date set
for job in JobPriority.objects.filter(expiration_date__isnull=False):
if job.has_expired():
job.expiration_date = None
job.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adjust_jobs_priority(self, high_value_jobs, priority=1):
"""For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0. """ |
# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100
# for jobs update via load_preseed) are updated
for jp in JobPriority.objects.filter(expiration_date__isnull=True):
if jp.unique_identifier() not in high_value_jobs:
if jp.priority != SETA_LOW_VALUE_PRIORITY:
logger.warning('Decreasing priority of %s', jp.unique_identifier())
jp.priority = SETA_LOW_VALUE_PRIORITY
jp.save(update_fields=['priority'])
elif jp.priority != priority:
logger.warning('Increasing priority of %s', jp.unique_identifier())
jp.priority = priority
jp.save(update_fields=['priority']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_job_list_response(self, job_qs, offset, count, return_type):
'''
custom method to serialize + format jobs information
It's worth doing this big ugly thing (as opposed to using
the django rest framework serializer or whatever) as
this function is often in the critical path
'''
option_collection_map = OptionCollection.objects.get_option_collection_map()
results = []
for values in job_qs[offset:(offset+count)].values_list(
*[pq[1] for pq in self._property_query_mapping]):
platform_option = option_collection_map.get(
values[self._option_collection_hash_idx],
"")
# some values need to be transformed
values = list(values)
for (i, _) in enumerate(values):
func = self._property_query_mapping[i][2]
if func:
values[i] = func(values[i])
# append results differently depending on if we are returning
# a dictionary or a list
if return_type == 'dict':
results.append(dict(zip(
[pq[0] for pq in self._property_query_mapping] +
['platform_option'],
values + [platform_option])))
else:
results.append(values + [platform_option])
response_dict = {
'results': results
}
if return_type == 'list':
response_dict.update({
'job_property_names': [pq[0] for pq in self._property_query_mapping] + ['platform_option']
})
return response_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self, request, project, pk=None):
""" GET method implementation for detail view Return a single job with log_references and artifact names and links to the artifact blobs. """ |
try:
job = Job.objects.select_related(
*self._default_select_related + ['taskcluster_metadata']).get(
repository__name=project, id=pk)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
resp = serializers.JobSerializer(job, read_only=True).data
resp["resource_uri"] = reverse("jobs-detail",
kwargs={"project": project, "pk": pk})
resp["logs"] = []
for (name, url) in JobLog.objects.filter(job=job).values_list(
'name', 'url'):
resp["logs"].append({'name': name, 'url': url})
platform_option = job.get_platform_option()
if platform_option:
resp["platform_option"] = platform_option
try:
resp['taskcluster_metadata'] = {
'task_id': job.taskcluster_metadata.task_id,
'retry_id': job.taskcluster_metadata.retry_id
}
except ObjectDoesNotExist:
pass
status_map = {k: v for k, v in Job.AUTOCLASSIFY_STATUSES}
resp["autoclassify_status"] = status_map[job.autoclassify_status]
return Response(resp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bug_suggestions(self, request, project, pk=None):
""" Gets a set of bug suggestions for this job """ |
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(get_error_summary(job)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def similar_jobs(self, request, project, pk=None):
""" Get a list of jobs similar to the one selected. """ |
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response({
"detail": "No project with name {}".format(project)
}, status=HTTP_404_NOT_FOUND)
try:
job = Job.objects.get(repository=repository, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
filter_params = request.query_params.copy()
try:
offset = int(filter_params.get("offset", 0))
# we don't need a big page size on this endoint,
# let's cap it to 50 elements
count = int(filter_params.get("count", 50))
except ValueError:
return Response("Invalid value for offset or count",
status=HTTP_400_BAD_REQUEST)
return_type = filter_params.get("return_type", "dict").lower()
jobs = JobFilter({k: v for (k, v) in filter_params.items()},
queryset=Job.objects.filter(
job_type_id=job.job_type_id,
repository=repository).exclude(
id=job.id).select_related(
*self._default_select_related)).qs
# similar jobs we want in descending order from most recent
jobs = jobs.order_by('-start_time')
response_body = self._get_job_list_response(jobs, offset, count,
return_type)
response_body["meta"] = dict(offset=offset, count=count,
repository=project)
return Response(response_body) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def structured_iterator(failure_lines):
"""Create FailureLine, Tbpl-formatted-string tuples.""" |
summary = partial(failure_line_summary, TbplFormatter())
for failure_line in failure_lines:
repr_str = summary(failure_line)
if repr_str:
yield failure_line, repr_str
while True:
yield None, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def failure_line_summary(formatter, failure_line):
""" Create a mozlog formatted error summary string from the given failure_line. Create a string which can be compared to a TextLogError.line string to see if they match. """ |
if failure_line.action == "test_result":
action = "test_status" if failure_line.subtest is not None else "test_end"
elif failure_line.action == "truncated":
return
else:
action = failure_line.action
try:
mozlog_func = getattr(formatter, action)
except AttributeError:
logger.warning('Unknown mozlog function "%s"', action)
return
formatted_log = mozlog_func(failure_line.to_mozlog_format())
split_log = first(formatted_log.split("\n", 1))
if not split_log:
logger.debug('Failed to split log', formatted_log)
return
return split_log.strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tls_redis_url(redis_url):
""" Returns the TLS version of a Heroku REDIS_URL string. Whilst Redis server (like memcached) doesn't natively support TLS, Heroku runs an stunnel daemon on their Redis instances, which can be connected to directly by Redis clients that support TLS (avoiding the need for stunnel on the client). The stunnel port is one higher than the Redis server port, and the informal `rediss://` scheme used to instruct clients to wrap the connection with TLS. Will convert 'redis://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8409' See: https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel """ |
url = furl(redis_url)
url.port += 1
url.scheme += 's'
# Disable TLS certificate validation (restoring the behaviour of the older redis-py 2.x),
# since for now Heroku Redis uses self-signed certificates:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1510000
url.args['ssl_cert_reqs'] = 'none'
return str(url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
""" Iterate over each line of the log, running each parser against it. Stream lines from the gzip file and run each parser against it, building the ``artifact`` as we go. """ |
with make_request(self.url, stream=True) as response:
download_size_in_bytes = int(response.headers.get('Content-Length', -1))
# Temporary annotation of log size to help set thresholds in bug 1295997.
newrelic.agent.add_custom_parameter(
'unstructured_log_size',
download_size_in_bytes
)
newrelic.agent.add_custom_parameter(
'unstructured_log_encoding',
response.headers.get('Content-Encoding', 'None')
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
raise LogSizeException('Download size of %i bytes exceeds limit' % download_size_in_bytes)
# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default
# and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline
# characters such as `\u0085` (which can appear in test output) are treated the same
# as `\n` or `\r`, and so split into unwanted additional lines by `iter_lines()`.
for line in response.iter_lines():
for builder in self.builders:
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
builder.parse_line(line.decode('utf-8', 'replace'))
# gather the artifacts from all builders
for builder in self.builders:
# Run end-of-parsing actions for this parser,
# in case the artifact needs clean-up/summarising.
builder.finish_parse()
name = builder.name
artifact = builder.get_artifact()
if name == 'performance_data' and not artifact[name]:
continue
self.artifacts[name] = artifact |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_hash(options):
"""returns an option_collection_hash given a list of options""" |
options = sorted(list(options))
sha_hash = sha1()
# equivalent to loop over the options and call sha_hash.update()
sha_hash.update(''.join(options).encode('utf-8'))
return sha_hash.hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time):
""" Delete data older than cycle_interval, splitting the target data into chunks of chunk_size size. Returns the number of result sets deleted """ |
# Retrieve list of jobs to delete
jobs_max_timestamp = datetime.datetime.now() - cycle_interval
jobs_cycled = 0
while True:
jobs_chunk = list(self.filter(repository=repository, submit_time__lt=jobs_max_timestamp)
.values_list('guid', flat=True)[:chunk_size])
if not jobs_chunk:
# no more jobs to cycle, we're done!
return jobs_cycled
# Remove ORM entries for these jobs that don't currently have a
# foreign key relation
lines = FailureLine.objects.filter(job_guid__in=jobs_chunk)
if settings.ELASTICSEARCH_URL:
# To delete the data from elasticsearch we need the document
# id. However selecting all this data can be rather slow, so
# split the job into multiple smaller chunks.
failures = itertools.chain.from_iterable(
chunked_qs(
lines,
chunk_size=chunk_size,
fields=['id', 'test'],
),
)
bulk(failures, action='delete')
lines.delete()
# cycle jobs *after* related data has been deleted, to be sure
# we don't have any orphan data
try:
self.filter(guid__in=jobs_chunk).delete()
except UnicodeDecodeError as e:
# Some TextLogError `line` fields contain invalid Unicode, which causes a
# UnicodeDecodeError since Django's .delete() fetches all fields (even those
# not required for the delete). As such we delete the offending `TextLogError`s
# separately (using only() to prevent pulling in `line`), before trying again.
# This can likely be removed once all pre-Python 3 migration `TextLogError`s
# have expired (check New Relic Insights at that point to confirm). See:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1528710
newrelic.agent.record_custom_event('cycle_data UnicodeDecodeError workaround', {
'exception': str(e),
})
TextLogError.objects.filter(step__job__guid__in=jobs_chunk).only('id').delete()
self.filter(guid__in=jobs_chunk).delete()
jobs_cycled += len(jobs_chunk)
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_fully_verified(self):
""" Determine if this Job is fully verified based on the state of its Errors. An Error (TextLogError or FailureLine) is considered Verified once its related TextLogErrorMetadata has best_is_verified set to True. A Job is then considered Verified once all its Errors TextLogErrorMetadata instances are set to True. """ |
unverified_errors = TextLogError.objects.filter(
_metadata__best_is_verified=False,
step__job=self).count()
if unverified_errors:
logger.error("Job %r has unverified TextLogErrors", self)
return False
logger.info("Job %r is fully verified", self)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_after_verification(self, user):
""" Updates a job's state after being verified by a sheriff """ |
if not self.is_fully_verified():
return
classification = 'autoclassified intermittent'
already_classified = (JobNote.objects.filter(job=self)
.exclude(failure_classification__name=classification)
.exists())
if already_classified:
# Don't add an autoclassification note if a Human already
# classified this job.
return
JobNote.create_autoclassify_job_note(job=self, user=user) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_manual_classification_line(self):
""" If this Job has a single TextLogError line, return that TextLogError. Some Jobs only have one related [via TextLogStep] TextLogError. This method checks if this Job is one of those (returning None if not) by: * checking the number of related TextLogErrors * counting the number of search results for the single TextLogError * checking there is a related FailureLine * checking the related FailureLine is in a given state If all these checks pass the TextLogError is returned, any failure returns None. """ |
try:
text_log_error = TextLogError.objects.get(step__job=self)
except (TextLogError.DoesNotExist, TextLogError.MultipleObjectsReturned):
return None
# Can this TextLogError be converted into a single "useful search"?
# FIXME: what is the significance of only one search result here?
from treeherder.model.error_summary import get_useful_search_results
search_results = get_useful_search_results(self)
if len(search_results) != 1:
return None
# Check that we have a related FailureLine
failure_line = text_log_error.get_failure_line()
if failure_line is None:
return None
# Check our FailureLine is in a state we expect for
# auto-classification.
if not (failure_line.action == "test_result" and
failure_line.test and
failure_line.status and
failure_line.expected):
return None
return text_log_error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_failure_type(self):
""" Updates the failure type of this Note's Job. Set the linked Job's failure type to that of the most recent JobNote or set to Not Classified if there are no JobNotes. This is called when JobNotes are created (via .save()) and deleted (via .delete()) and is used to resolved the FailureClassification which has been denormalised onto Job. """ |
# update the job classification
note = JobNote.objects.filter(job=self.job).order_by('-created').first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id
self.job.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ensure_classification(self):
""" Ensures a single TextLogError's related bugs have Classifications. If the linked Job has a single meaningful TextLogError: - find the bugs currently related to it via a Classification - find the bugs mapped to the job related to this note - find the bugs that are mapped but not classified - link this subset of bugs to Classifications - if there's only one new bug and no existing ones, verify it """ |
# if this note was automatically filed, don't update the auto-classification information
if not self.user:
return
# if the failure type isn't intermittent, ignore
if self.failure_classification.name not in ["intermittent", "intermittent needs filing"]:
return
# if the linked Job has more than one TextLogError, ignore
text_log_error = self.job.get_manual_classification_line()
if not text_log_error:
return
# evaluate the QuerySet here so it can be used when creating new_bugs below
existing_bugs = list(ClassifiedFailure.objects.filter(error_matches__text_log_error=text_log_error)
.values_list('bug_number', flat=True))
new_bugs = (self.job.bugjobmap_set.exclude(bug_id__in=existing_bugs)
.values_list('bug_id', flat=True))
if not new_bugs:
return
# Create Match instances for each new bug
for bug_number in new_bugs:
classification, _ = ClassifiedFailure.objects.get_or_create(bug_number=bug_number)
text_log_error.create_match("ManualDetector", classification)
# if there's only one new bug and no existing ones, verify it
if len(new_bugs) == 1 and not existing_bugs:
text_log_error.verify_classification(classification) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_autoclassify_job_note(self, job, user=None):
""" Create a JobNote, possibly via auto-classification. Create mappings from the given Job to Bugs via verified Classifications of this Job. Also creates a JobNote. """ |
# Only insert bugs for verified failures since these are automatically
# mirrored to ES and the mirroring can't be undone
# TODO: Decide whether this should change now that we're no longer mirroring.
bug_numbers = set(ClassifiedFailure.objects
.filter(best_for_errors__text_log_error__step__job=job,
best_for_errors__best_is_verified=True)
.exclude(bug_number=None)
.exclude(bug_number=0)
.values_list('bug_number', flat=True))
existing_maps = set(BugJobMap.objects.filter(bug_id__in=bug_numbers)
.values_list('bug_id'))
for bug_number in (bug_numbers - existing_maps):
BugJobMap.objects.create(job_id=job.id, bug_id=bug_number, user=user)
# if user is not specified, then this is an autoclassified job note and
# we should mark it as such
classification_name = 'intermittent' if user else 'autoclassified intermittent'
classification = FailureClassification.objects.get(name=classification_name)
return JobNote.objects.create(job=job,
failure_classification=classification,
user=user,
text="") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unstructured_bugs(self):
""" Get bugs that match this line in the Bug Suggestions artifact for this job. """ |
components = self._serialized_components()
if not components:
return []
from treeherder.model.error_summary import get_useful_search_results
job = Job.objects.get(guid=self.job_guid)
rv = []
ids_seen = set()
for item in get_useful_search_results(job):
if all(component in item["search"] for component in components):
for suggestion in itertools.chain(item["bugs"]["open_recent"],
item["bugs"]["all_others"]):
if suggestion["id"] not in ids_seen:
ids_seen.add(suggestion["id"])
rv.append(suggestion)
return rv |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_mozlog_format(self):
"""Convert a FailureLine into a mozlog formatted dictionary.""" |
data = {
"action": self.action,
"line_number": self.line,
"test": self.test,
"subtest": self.subtest,
"status": self.status,
"expected": self.expected,
"message": self.message,
"signature": self.signature,
"level": self.level,
"stack": self.stack,
"stackwalk_stdout": self.stackwalk_stdout,
"stackwalk_stderr": self.stackwalk_stderr,
}
# Remove empty values
data = {k: v for k, v in data.items() if v}
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_bug(self, bug_number):
""" Set the bug number of this Classified Failure If an existing ClassifiedFailure exists with the same bug number replace this instance with the existing one. """ |
if bug_number == self.bug_number:
return self
other = ClassifiedFailure.objects.filter(bug_number=bug_number).first()
if not other:
self.bug_number = bug_number
self.save(update_fields=['bug_number'])
return self
self.replace_with(other)
return other |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_with(self, other):
""" Replace this instance with the given other. Deletes stale Match objects and updates related TextLogErrorMetadatas' best_classifications to point to the given other. """ |
match_ids_to_delete = list(self.update_matches(other))
TextLogErrorMatch.objects.filter(id__in=match_ids_to_delete).delete()
# Update best classifications
self.best_for_errors.update(best_classification=other)
self.delete() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_matches(self, other):
""" Update this instance's Matches to point to the given other's Matches. Find Matches with the same TextLogError as our Matches, updating their score if less than ours and mark our matches for deletion. If there are no other matches, update ours to point to the other ClassifiedFailure. """ |
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_match(self, matcher_name, classification):
""" Create a TextLogErrorMatch instance Typically used for manual "matches" or tests. """ |
if classification is None:
classification = ClassifiedFailure.objects.create()
TextLogErrorMatch.objects.create(
text_log_error=self,
classified_failure=classification,
matcher_name=matcher_name,
score=1,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify_classification(self, classification):
""" Mark the given ClassifiedFailure as verified. Handles the classification not currently being related to this TextLogError and no Metadata existing. """ |
if classification not in self.classified_failures.all():
self.create_match("ManualDetector", classification)
# create a TextLogErrorMetadata instance for this TextLogError if it
# doesn't exist. We can't use update_or_create here since OneToOne
# relations don't use an object manager so a missing relation is simply
# None as opposed to RelatedManager.
if self.metadata is None:
TextLogErrorMetadata.objects.create(text_log_error=self,
best_classification=classification,
best_is_verified=True)
else:
self.metadata.best_classification = classification
self.metadata.best_is_verified = True
self.metadata.save(update_fields=['best_classification', 'best_is_verified'])
self.metadata.failure_line.elastic_search_insert()
# Send event to NewRelic when a verifing an autoclassified failure.
match = self.matches.filter(classified_failure=classification).first()
if not match:
return
newrelic.agent.record_custom_event('user_verified_classification', {
'matcher': match.matcher_name,
'job_id': self.id,
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_bug(self, request):
""" Create a bugzilla bug with passed params """ |
if settings.BUGFILER_API_KEY is None:
return Response({"failure": "Bugzilla API key not set!"},
status=HTTP_400_BAD_REQUEST)
params = request.data
# Arbitrarily cap crash signatures at 2048 characters to prevent perf issues on bmo
crash_signature = params.get("crash_signature")
if crash_signature and len(crash_signature) > 2048:
return Response({"failure": "Crash signature can't be more than 2048 characters."},
status=HTTP_400_BAD_REQUEST)
description = u"**Filed by:** {}\n{}".format(
request.user.email.replace('@', " [at] "),
params.get("comment", "")
).encode("utf-8")
summary = params.get("summary").encode("utf-8").strip()
url = settings.BUGFILER_API_URL + "/rest/bug"
headers = {
'x-bugzilla-api-key': settings.BUGFILER_API_KEY,
'Accept': 'application/json'
}
data = {
'product': params.get("product"),
'component': params.get("component"),
'summary': summary,
'keywords': params.get("keywords"),
'blocks': params.get("blocks"),
'depends_on': params.get("depends_on"),
'see_also': params.get("see_also"),
'version': params.get("version"),
'cf_crash_signature': params.get("crash_signature"),
'severity': params.get("severity"),
'priority': params.get("priority"),
'description': description,
'comment_tags': "treeherder",
}
try:
response = make_request(url, method='POST', headers=headers, json=data)
except requests.exceptions.HTTPError as e:
try:
message = e.response.json()['message']
except (ValueError, KeyError):
message = e.response.text
return Response({"failure": message}, status=HTTP_400_BAD_REQUEST)
return Response({"success": response.json()["id"]}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chunked_qs(qs, chunk_size=10000, fields=None):
""" Generator to iterate over the given QuerySet, chunk_size rows at a time Usage: Note: While Django 2.0 provides chunking [1] via QuerySet.iterator() we can't make use of this while using MySQL which doesn't support streaming results. [1]: https://docs.djangoproject.com/en/2.0/ref/models/querysets/#iterator """ |
min_id = 0
while True:
chunk = qs.filter(id__gt=min_id).order_by('id')
if fields is not None:
chunk = chunk.only(*fields)
# Cast to a list to execute the QuerySet here and allow us to get the
# last ID when updating min_id. We can't use .last() later as it
# ignores the slicing we do.
rows = list(chunk[:chunk_size])
total = len(rows)
if total < 1:
break
yield rows
# update the minimum ID for next iteration
min_id = rows[-1].id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chunked_qs_reverse(qs, chunk_size=10000):
""" Generator to iterate over the given QuerySet in reverse chunk_size rows at a time Usage: Note: This method is just different enough that it seemed easier to keep this function separate to chunked_qs. """ |
if not qs:
return
qs = qs.order_by('-id')
# Can't use .only() here in case the query used select_related
max_id = qs.first().id
while True:
chunk = qs.filter(id__lte=max_id) # upper bound of this chunk
rows = chunk[:chunk_size]
if len(rows) < 1:
break
yield rows
# update the maximum ID for next iteration
max_id = max_id - chunk_size |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, request, project):
"""Add a new relation between a job and a bug.""" |
job_id = int(request.data['job_id'])
bug_id = int(request.data['bug_id'])
try:
BugJobMap.create(
job_id=job_id,
bug_id=bug_id,
user=request.user,
)
message = "Bug job map saved"
except IntegrityError:
message = "Bug job map skipped: mapping already exists"
return Response({"message": message}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy(self, request, project, pk=None):
""" Delete bug-job-map entry. pk is a composite key in the form bug_id-job_id """ |
job_id, bug_id = map(int, pk.split("-"))
job = Job.objects.get(repository__name=project, id=job_id)
BugJobMap.objects.filter(job=job, bug_id=bug_id).delete()
return Response({"message": "Bug job map deleted"}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self, request, project, pk=None):
""" Retrieve a bug-job-map entry. pk is a composite key in the form bug_id-job_id """ |
job_id, bug_id = map(int, pk.split("-"))
job = Job.objects.get(repository__name=project, id=job_id)
try:
bug_job_map = BugJobMap.objects.get(job=job, bug_id=bug_id)
serializer = BugJobMapSerializer(bug_job_map)
return Response(serializer.data)
except BugJobMap.DoesNotExist:
return Response("Object not found", status=HTTP_404_NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_unicode_character_to_ascii_repr(match_obj):
""" Converts a matched pattern from a unicode character to an ASCII representation For example the emoji 🍆 would get converted to the literal <U+01F346> """ |
match = match_obj.group(0)
code_point = ord(match)
hex_repr = hex(code_point)
hex_code_point = hex_repr[2:]
hex_value = hex_code_point.zfill(6).upper()
return '<U+{}>'.format(hex_value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_push_logs():
""" Run several fetch_hg_push_log subtasks, one per repository """ |
for repo in Repository.objects.filter(dvcs_type='hg',
active_status="active"):
fetch_hg_push_log.apply_async(
args=(repo.name, repo.url),
queue='pushlog'
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_hg_push_log(repo_name, repo_url):
""" Run a HgPushlog etl process """ |
newrelic.agent.add_custom_parameter("repo_name", repo_name)
process = HgPushlogProcess()
process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_pulse_jobs(pulse_job, exchange, routing_key):
""" Fetches the jobs pending from pulse exchanges and loads them. """ |
newrelic.agent.add_custom_parameter("exchange", exchange)
newrelic.agent.add_custom_parameter("routing_key", routing_key)
JobLoader().process_job(pulse_job) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_pulse_pushes(body, exchange, routing_key):
""" Fetches the pushes pending from pulse exchanges and loads them. """ |
newrelic.agent.add_custom_parameter("exchange", exchange)
newrelic.agent.add_custom_parameter("routing_key", routing_key)
PushLoader().process(body, exchange) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_failure_lines(job_log):
"""Store the failure lines from a log corresponding to the structured errorsummary file.""" |
logger.debug('Running store_failure_lines for job %s', job_log.job.id)
failureline.store_failure_lines(job_log) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self, request, project, pk=None):
""" GET method implementation for a note detail """ |
try:
serializer = JobNoteSerializer(JobNote.objects.get(id=pk))
return Response(serializer.data)
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, request, project):
""" POST method implementation """ |
JobNote.objects.create(
job=Job.objects.get(repository__name=project,
id=int(request.data['job_id'])),
failure_classification_id=int(request.data['failure_classification_id']),
user=request.user,
text=request.data.get('text', ''))
return Response(
{'message': 'note stored for job {0}'.format(
request.data['job_id']
)}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy(self, request, project, pk=None):
""" Delete a note entry """ |
try:
note = JobNote.objects.get(id=pk)
note.delete()
return Response({"message": "Note deleted"})
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_text_log_artifacts(job_log):
"""Generate a set of artifacts by parsing from the raw text log.""" |
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(job_log.url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({
"job_guid": job_log.job.guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
return artifact_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_log_artifacts(job_log):
"""Post a list of artifacts to a job.""" |
logger.debug("Downloading/parsing log for log %s", job_log.id)
try:
artifact_list = extract_text_log_artifacts(job_log)
except LogSizeException as e:
job_log.update_status(JobLog.SKIPPED_SIZE)
logger.warning('Skipping parsing log for %s: %s', job_log.id, e)
return
except Exception as e:
job_log.update_status(JobLog.FAILED)
# Unrecoverable http error (doesn't exist or permission denied).
# Apparently this can happen somewhat often with taskcluster if
# the job fails (bug 1154248), so just warn rather than raising,
# to prevent the noise/load from retrying.
if isinstance(e, HTTPError) and e.response.status_code in (403, 404):
logger.warning("Unable to retrieve log for %s: %s", job_log.id, e)
return
logger.error("Failed to download/parse log for %s: %s", job_log.id, e)
raise
try:
serialized_artifacts = serialize_artifact_json_blobs(artifact_list)
store_job_artifacts(serialized_artifacts)
job_log.update_status(JobLog.PARSED)
logger.debug("Stored artifact for %s %s", job_log.job.repository.name,
job_log.job.id)
except Exception as e:
logger.error("Failed to store parsed artifact for %s: %s", job_log.id, e)
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_error_summary(job):
""" Create a list of bug suggestions for a job. Caches the results if there are any. """ |
cache_key = 'error-summary-{}'.format(job.id)
cached_error_summary = cache.get(cache_key)
if cached_error_summary is not None:
return cached_error_summary
# don't cache or do anything if we have no text log errors to get
# results for
errors = TextLogError.objects.filter(step__job=job)
if not errors:
return []
# cache terms generated from error line to save excessive querying
term_cache = {}
error_summary = [bug_suggestions_line(err, term_cache) for err in errors]
cache.set(cache_key, error_summary, BUG_SUGGESTION_CACHE_TIMEOUT)
return error_summary |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_error_search_term(error_line):
""" Generate a search term from the given error_line string. Attempt to build a search term that will yield meaningful results when used in a MySQL FTS query. """ |
if not error_line:
return None
# This is strongly inspired by
# https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73
tokens = error_line.split(" | ")
search_term = None
if len(tokens) >= 3:
# If this is process output then discard the token with the PID
if len(tokens) > 3 and OUTPUT_RE.match(tokens[0]):
tokens = tokens[1:]
# it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format.
test_name_or_path = tokens[1]
message = tokens[2]
# Leak failure messages are of the form:
# leakcheck | .*\d+ bytes leaked (Object-1, Object-2, Object-3, ...)
match = LEAK_RE.search(message)
if match:
search_term = match.group(1) if match.group(1) is not None else match.group(2)
else:
# For reftests, remove the reference path from the tokens as this is
# not very unique
test_name_or_path = REFTEST_RE.sub("", test_name_or_path)
for splitter in ("/", "\\"):
# if this is a path, we are interested in the last part
test_name_or_path = test_name_or_path.split(splitter)[-1]
search_term = test_name_or_path
# If the failure line was not in the pipe symbol delimited format or the search term
# will likely return too many (or irrelevant) results (eg: too short or matches terms
# on the blacklist), then we fall back to searching for the entire failure line if
# it is suitable.
if not (search_term and is_helpful_search_term(search_term)):
if is_helpful_search_term(error_line):
search_term = error_line
else:
search_term = None
# Searching for extremely long search terms is undesirable, since:
# a) Bugzilla's max summary length is 256 characters, and once "Intermittent "
# and platform/suite information is prefixed, there are even fewer characters
# left for us to use for the failure string against which we need to match.
# b) For long search terms, the additional length does little to prevent against
# false positives, but means we're more susceptible to false negatives due to
# run-to-run variances in the error messages (eg paths, process IDs).
if search_term:
search_term = search_term[:100]
return search_term |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_crash_signature(error_line):
"""Try to get a crash signature from the given error_line string.""" |
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_helpful_search_term(search_term):
""" Decide if the given search_term string is helpful or not. We define "helpful" here as search terms that won't match an excessive number of bug summaries. Very short terms and those matching generic strings (listed in the blacklist) are deemed unhelpful since they wouldn't result in useful suggestions. """ |
# Search terms that will match too many bug summaries
# and so not result in useful suggestions.
search_term = search_term.strip()
blacklist = [
'automation.py',
'remoteautomation.py',
'Shutdown',
'undefined',
'Main app process exited normally',
'Traceback (most recent call last):',
'Return code: 0',
'Return code: 1',
'Return code: 2',
'Return code: 9',
'Return code: 10',
'mozalloc_abort(char const*)',
'mozalloc_abort',
'Exiting 1',
'Exiting 9',
'CrashingThread(void *)',
'libSystem.B.dylib + 0xd7a',
'linux-gate.so + 0x424',
'TypeError: content is null',
'leakcheck',
'ImportError: No module named pygtk',
'# TBPL FAILURE #'
]
return len(search_term) > 4 and search_term not in blacklist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_failures_fixed_by_commit():
""" Return all job failures annotated with "fixed by commit" grouped by reason given for annotation. It returns a dictionary with a revision or bug ID as the key (bug ID is used for intermittent failures and the revision is used for real failures). For SETA's purposes we only care about revisions (real failures). The failures for *real failures* will contain all jobs that have been starred as "fixed by commit". Notice that the data does not tell you on which repository a root failure was fixed. For instance, in the raw data you might see a reference to 9fa614d8310d which is a back out and it is referenced by 12 starred jobs: https://treeherder.mozilla.org/#/jobs?repo=autoland&filter-searchStr=android%20debug%20cpp&tochange=9fa614d8310db9aabe85cc3c3cff6281fe1edb0c The raw data will show those 12 jobs. The returned data will look like this: { "44d29bac3654": [ ["android-4-0-armv7-api15", "opt", "android-lint"], ["android-4-0-armv7-api15", "opt", "android-api-15-gradle-dependencies"], ] } """ |
failures = defaultdict(list)
option_collection_map = models.OptionCollection.objects.get_option_collection_map()
fixed_by_commit_data_set = models.JobNote.objects.filter(
failure_classification=2,
created__gt=timezone.now() - timedelta(days=SETA_FIXED_BY_COMMIT_DAYS),
text__isnull=False,
job__repository__name__in=SETA_FIXED_BY_COMMIT_REPOS
).exclude(
job__signature__build_platform__in=SETA_UNSUPPORTED_PLATFORMS
).exclude(
text=""
).select_related('job', 'job__signature', 'job__job_type')
# check if at least one fixed by commit job meets our requirements without populating queryset
if not fixed_by_commit_data_set.exists():
logger.warning("We couldn't find any fixed-by-commit jobs")
return failures
# now process the fixed by commit jobs in batches using django's queryset iterator
for job_note in fixed_by_commit_data_set.iterator():
# if we have http://hg.mozilla.org/rev/<rev> and <rev>, we will only use <rev>
revision_id = job_note.text.strip('/')
revision_id = revision_id.split('/')[-1]
# This prevents the empty string case and ignores bug ids
if not revision_id or len(revision_id) < 12:
continue
# We currently don't guarantee that text is actually a revision
# Even if not perfect the main idea is that a bunch of jobs were annotated with
# a unique identifier. The assumption is that the text is unique
#
# I've seen these values being used:
# * 12 char revision
# * 40 char revision
# * link to revision on hg
# * revisionA & revisionB
# * should be fixed by <revision>
# * bug id
#
# Note that if some jobs are annotated with the 12char revision and others with the
# 40char revision we will have two disjunct set of failures
#
# Some of this will be improved in https://bugzilla.mozilla.org/show_bug.cgi?id=1323536
try:
# check if jobtype is supported by SETA (see treeherder/seta/settings.py)
if job_note.job.signature.build_system_type != 'buildbot':
if not job_note.job.job_type.name.startswith(tuple(SETA_SUPPORTED_TC_JOBTYPES)):
continue
testtype = parse_testtype(
build_system_type=job_note.job.signature.build_system_type, # e.g. taskcluster
job_type_name=job_note.job.job_type.name, # e.g. Mochitest
platform_option=job_note.job.get_platform_option(option_collection_map), # e.g. 'opt'
ref_data_name=job_note.job.signature.name, # buildername or task label
)
if testtype:
if is_job_blacklisted(testtype):
continue
else:
logger.warning('We were unable to parse %s/%s',
job_note.job.job_type.name, job_note.job.signature.name)
continue
# we now have a legit fixed-by-commit job failure
failures[revision_id].append(unique_key(
testtype=testtype,
buildtype=job_note.job.get_platform_option(option_collection_map), # e.g. 'opt'
platform=job_note.job.signature.build_platform
))
except models.Job.DoesNotExist:
logger.warning('job_note %s has no job associated to it', job_note.id)
continue
logger.warning("Number of fixed_by_commit revisions: %s", len(failures))
return failures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score_matches(matches, score_multiplier=(1, 1)):
""" Get scores for the given matches. Given a QuerySet of TextLogErrorMatches produce a score for each one until Good Enough™. An optional score multiplier can be passed in. """ |
for match in matches:
# generate a new score from the current match
dividend, divisor = score_multiplier
score = match.score * dividend / divisor
yield (score, match.classified_failure_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def time_boxed(func, iterable, time_budget, *args):
""" Apply a function to the items of an iterable within a given time budget. Loop the given iterable, calling the given function on each item. The expended time is compared to the given time budget after each iteration. """ |
time_budget = time_budget / 1000 # budget in milliseconds
start = time.time()
for thing in iterable:
yield func(thing, *args)
end = time.time() - start
if end > time_budget:
# Putting the condition at the end of the loop ensures that we
# always run it once, which is useful for testing
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def job_priority_index(job_priorities):
'''This structure helps with finding data from the job priorities table'''
jp_index = {}
# Creating this data structure which reduces how many times we iterate through the DB rows
for jp in job_priorities:
key = jp.unique_identifier()
# This is guaranteed by a unique composite index for these 3 fields in models.py
if key in jp_index:
msg = '"{}" should be a unique job priority and that is unexpected.'.format(key)
raise DuplicateKeyError(msg)
# (testtype, buildtype, platform)
jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem}
return jp_index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, pulse_job):
""" Transform a pulse job into a job that can be written to disk. Log References and artifacts will also be transformed and loaded with the job. We can rely on the structure of ``pulse_job`` because it will already have been validated against the JSON Schema at this point. """ |
job_guid = pulse_job["taskId"]
x = {
"job": {
"job_guid": job_guid,
"name": pulse_job["display"].get("jobName", "unknown"),
"job_symbol": self._get_job_symbol(pulse_job),
"group_name": pulse_job["display"].get("groupName", "unknown"),
"group_symbol": pulse_job["display"].get("groupSymbol"),
"product_name": pulse_job.get("productName", "unknown"),
"state": pulse_job["state"],
"result": self._get_result(pulse_job),
"reason": pulse_job.get("reason", "unknown"),
"who": pulse_job.get("owner", "unknown"),
"build_system_type": pulse_job["buildSystem"],
"tier": pulse_job.get("tier", 1),
"machine": self._get_machine(pulse_job),
"option_collection": self._get_option_collection(pulse_job),
"log_references": self._get_log_references(pulse_job),
"artifacts": self._get_artifacts(pulse_job, job_guid),
},
"superseded": pulse_job.get("coalesced", []),
"revision": pulse_job["origin"]["revision"]
}
# some or all the time fields may not be present in some cases
for k, v in self.TIME_FIELD_MAP.items():
if v in pulse_job:
x["job"][k] = to_timestamp(pulse_job[v])
# if only one platform is given, use it.
default_platform = pulse_job.get(
"buildMachine",
pulse_job.get("runMachine", {}))
for k, v in self.PLATFORM_FIELD_MAP.items():
platform_src = pulse_job[v] if v in pulse_job else default_platform
x["job"][k] = self._get_platform(platform_src)
# add some taskcluster metadata if it's available
# currently taskcluster doesn't pass the taskId directly, so we'll
# derive it from the guid, where it is stored in uncompressed
# guid form of a slug (see: https://github.com/taskcluster/slugid)
# FIXME: add support for processing the taskcluster information
# properly, when it's available:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1323110#c7
try:
(decoded_task_id, retry_id) = job_guid.split('/')
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
real_task_id = slugid.encode(uuid.UUID(decoded_task_id))
x["job"].update({
"taskcluster_task_id": real_task_id,
"taskcluster_retry_id": int(retry_id)
})
# TODO: Figure out what exception types we actually expect here.
except Exception:
pass
return x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self, request, project, pk=None):
""" GET method implementation for detail view of ``push`` """ |
try:
push = Push.objects.get(repository__name=project,
id=pk)
serializer = PushSerializer(push)
return Response(serializer.data)
except Push.DoesNotExist:
return Response("No push with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def status(self, request, project, pk=None):
""" Return a count of the jobs belonging to this push grouped by job status. """ |
try:
push = Push.objects.get(id=pk)
except Push.DoesNotExist:
return Response("No push with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
return Response(push.get_status()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def health(self, request, project):
""" Return a calculated assessment of the health of this push. """ |
revision = request.query_params.get('revision')
try:
push = Push.objects.get(revision=revision, repository__name=project)
except Push.DoesNotExist:
return Response("No push with revision: {0}".format(revision),
status=HTTP_404_NOT_FOUND)
push_health_test_failures = get_push_health_test_failures(push, REPO_GROUPS['trunk'])
test_result = 'fail' if len(push_health_test_failures['needInvestigation']) else 'pass'
return Response({
'revision': revision,
'id': push.id,
'result': test_result,
'metrics': [
{
'name': 'Tests',
'result': test_result,
'failures': push_health_test_failures,
},
{
'name': 'Builds (Not yet implemented)',
'result': 'pass',
'details': ['Wow, everything passed!'],
},
{
'name': 'Linting (Not yet implemented)',
'result': 'pass',
'details': ['Gosh, this code is really nicely formatted.'],
},
{
'name': 'Coverage (Not yet implemented)',
'result': 'pass',
'details': [
'Covered 42% of the tests that are needed for feature ``foo``.',
'Covered 100% of the tests that are needed for feature ``bar``.',
'The ratio of people to cake is too many...',
],
},
{
'name': 'Performance (Not yet implemented)',
'result': 'pass',
'details': ['Ludicrous Speed'],
},
],
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decisiontask(self, request, project):
""" Return the decision task ids for the pushes. """ |
push_ids = request.query_params.getlist('push_ids')
job_type = JobType.objects.get(name='Gecko Decision Task')
decision_jobs = Job.objects.filter(
push_id__in=push_ids,
job_type=job_type
).select_related('taskcluster_metadata')
if decision_jobs:
return Response(
{job.push_id: job.taskcluster_metadata.task_id for job in decision_jobs}
)
else:
return Response("No decision tasks found for pushes: {}".format(push_ids),
status=HTTP_404_NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def language_file_exists(language_code):
""" Check if TinyMCE has a language file for the specified lang code :param language_code: language code :type language_code: str :return: check result :rtype: bool """ |
filename = '{0}.js'.format(language_code)
path = os.path.join('tinymce', 'js', 'tinymce', 'langs', filename)
return finders.find(path) is not None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_language_config():
""" Creates a language configuration for TinyMCE4 based on Django project settings :return: language- and locale-related parameters for TinyMCE 4 :rtype: dict """ |
language_code = convert_language_code(get_language() or settings.LANGUAGE_CODE)
if not language_file_exists(language_code):
language_code = language_code[:2]
if not language_file_exists(language_code):
# Fall back to English if Tiny MCE 4 does not have required translation
language_code = 'en'
config = {'language': language_code}
if get_language_bidi():
config['directionality'] = 'rtl'
else:
config['directionality'] = 'ltr'
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_spellcheck_config():
""" Create TinyMCE spellchecker config based on Django settings :return: spellchecker parameters for TinyMCE :rtype: dict """ |
config = {}
if mce_settings.USE_SPELLCHECKER:
from enchant import list_languages
enchant_languages = list_languages()
if settings.DEBUG:
logger.info('Enchant languages: {0}'.format(enchant_languages))
lang_names = []
for lang, name in settings.LANGUAGES:
lang = convert_language_code(lang)
if lang not in enchant_languages:
lang = lang[:2]
if lang not in enchant_languages:
logger.warning('Missing {0} spellchecker dictionary!'.format(lang))
continue
if config.get('spellchecker_language') is None:
config['spellchecker_language'] = lang
lang_names.append('{0}={1}'.format(name, lang))
config['spellchecker_languages'] = ','.join(lang_names)
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_language_code(django_lang):
""" Converts Django language codes "ll-cc" into ISO codes "ll_CC" or "ll" :param django_lang: Django language code as ll-cc :type django_lang: str :return: ISO language code as ll_CC :rtype: str """ |
lang_and_country = django_lang.split('-')
try:
return '_'.join((lang_and_country[0], lang_and_country[1].upper()))
except IndexError:
return lang_and_country[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_managed():
""" Check if a Django project is being managed with ``manage.py`` or ``django-admin`` scripts :return: Check result :rtype: bool """ |
for item in sys.argv:
if re.search(r'manage.py|django-admin|django', item) is not None:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spell_check(request):
""" Implements the TinyMCE 4 spellchecker protocol :param request: Django http request with JSON-RPC payload from TinyMCE 4 containing a language code and a text to check for errors. :type request: django.http.request.HttpRequest :return: Django http response containing JSON-RPC payload with spellcheck results for TinyMCE 4 :rtype: django.http.JsonResponse """ |
data = json.loads(request.body.decode('utf-8'))
output = {'id': data['id']}
error = None
status = 200
try:
if data['params']['lang'] not in list_languages():
error = 'Missing {0} dictionary!'.format(data['params']['lang'])
raise LookupError(error)
spell_checker = checker.SpellChecker(data['params']['lang'])
spell_checker.set_text(strip_tags(data['params']['text']))
output['result'] = {spell_checker.word: spell_checker.suggest()
for err in spell_checker}
except NameError:
error = 'The pyenchant package is not installed!'
logger.exception(error)
except LookupError:
logger.exception(error)
except Exception:
error = 'Unknown error!'
logger.exception(error)
if error is not None:
output['error'] = error
status = 500
return JsonResponse(output, status=status) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def css(request):
""" Custom CSS for TinyMCE 4 widget By default it fixes widget's position in Django Admin :param request: Django http request :type request: django.http.request.HttpRequest :return: Django http response with CSS file for TinyMCE 4 :rtype: django.http.HttpResponse """ |
if 'grappelli' in settings.INSTALLED_APPS:
margin_left = 0
elif VERSION[:2] <= (1, 8):
margin_left = 110 # For old style admin
else:
margin_left = 170 # For Django >= 1.9 style admin
# For Django >= 2.0 responsive admin
responsive_admin = VERSION[:2] >= (2, 0)
return HttpResponse(render_to_string('tinymce/tinymce4.css',
context={
'margin_left': margin_left,
'responsive_admin': responsive_admin
},
request=request),
content_type='text/css; charset=utf-8') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filebrowser(request):
""" JavaScript callback function for `django-filebrowser`_ :param request: Django http request :type request: django.http.request.HttpRequest :return: Django http response with filebrowser JavaScript code for for TinyMCE 4 :rtype: django.http.HttpResponse .. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser """ |
try:
fb_url = reverse('fb_browse')
except:
fb_url = reverse('filebrowser:fb_browse')
return HttpResponse(jsmin(render_to_string('tinymce/filebrowser.js',
context={'fb_url': fb_url},
request=request)),
content_type='application/javascript; charset=utf-8') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(text):
"""Flag offensive words based on the GLAAD reference guide.""" |
err = "glaad.offensive_terms"
msg = "Offensive term. Remove it or consider the context."
list = [
"fag",
"faggot",
"dyke",
"sodomite",
"homosexual agenda",
"gay agenda",
"transvestite",
"homosexual lifestyle",
"gay lifestyle"
# homo - may create false positives without additional context
# FIXME use topic detetor to decide whether "homo" is offensive
]
return existence_check(text, list, err, msg, join=True, ignore_case=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(check=None):
"""Compute the linter's score on the corpus. Proselint's score reflects the desire to have a linter that catches many errors, but which takes false alarms seriously. It is better not to say something than to say the wrong thing, and the harm from saying the wrong thing is greater than the benefit of saying the right thing. Thus our score metric is defined as: TP * (TP / (FP + TP)) ^ k, where TP is the number of true positives (hits), FP is the number of false positives (false alarms), and k > 0 is a temperature parameter that determines the penalty for imprecision. In general, we should choose a large value of k, one that strongly discourages the creation of rules that can't be trusted. Suppose that k = 2. Then if the linter detects 100 errors, of which 10 are false positives, the score is 81. """ |
tp = 0
fp = 0
parent_directory = os.path.dirname(proselint_path)
path_to_corpus = os.path.join(parent_directory, "corpora", "0.1.0")
for root, _, files in os.walk(path_to_corpus):
files = [f for f in files if f.endswith(".md")]
for f in files:
fullpath = os.path.join(root, f)
# Run the linter.
print("Linting {}".format(f))
out = subprocess.check_output(["proselint", fullpath])
# Determine the number of errors.
regex = r".+?:(?P<line>\d+):(?P<col>\d+): (?P<message>.+)"
num_errors = len(tuple(re.finditer(regex, out)))
print("Found {} errors.".format(num_errors))
# Open the document.
subprocess.call(["open", fullpath])
# Ask the scorer how many of the errors were false alarms?
input_val = None
while not isinstance(input_val, int):
try:
input_val = input("# of false alarms? ")
if input_val == "exit":
return
else:
input_val = int(input_val)
fp += input_val
tp += (num_errors - input_val)
except ValueError:
pass
print("Currently {} hits and {} false alarms\n---".format(tp, fp))
if (tp + fp) > 0:
return tp * (1.0 * tp / (tp + fp)) ** 2
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_broken_link(url):
"""Determine whether the link returns a 404 error.""" |
try:
request = urllib_request.Request(
url, headers={'User-Agent': 'Mozilla/5.0'})
urllib_request.urlopen(request).read()
return False
except urllib_request.URLError:
return True
except SocketError:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(text):
"""Advice on sudden vs suddenly.""" |
err = "misc.suddenly"
msg = u"Suddenly is nondescript, slows the action, and warns your reader."
regex = "Suddenly,"
return existence_check(text, [regex], err, msg, max_errors=3,
require_padding=False, offset=-1, ignore_case=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(text):
"""Avoid 'very'.""" |
err = "weasel_words.very"
msg = ("Substitute 'damn' every time you're "
"inclined to write 'very'; your editor will delete it "
"and the writing will be just as it should be.")
regex = "very"
return existence_check(text, [regex], err, msg, max_errors=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_p_equals_zero(text):
"""Check for p = 0.000.""" |
err = "psychology.p_equals_zero"
msg = "Unless p really equals zero, you should use more decimal places."
list = [
"p = 0.00",
"p = 0.000",
"p = 0.0000",
]
return existence_check(text, list, err, msg, join=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rate():
"""Set rate limits for authenticated and nonauthenticated users.""" |
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return "60/minute"
else:
return "600/minute" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lint():
"""Run linter on the provided text and return the results.""" |
if 'text' in request.values:
text = unquote(request.values['text'])
job = q.enqueue(worker_function, text)
return jsonify(job_id=job.id), 202
elif 'job_id' in request.values:
job = q.fetch_job(request.values['job_id'])
if not job:
return jsonify(
status="error",
message="No job with requested job_id."), 404
elif job.result is None:
return jsonify(
status="error",
message="Job is not yet ready."), 202
else:
errors = []
for i, e in enumerate(job.result):
app.logger.debug(e)
errors.append({
"check": e[0],
"message": e[1],
"line": e[2],
"column": e[3],
"start": e[4],
"end": e[5],
"extent": e[5] - e[4],
"severity": e[7],
"replacements": e[8],
"source_name": "",
"source_url": "",
})
return jsonify(
status="success",
data={"errors": errors}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_without_your_collusion(text):
"""Check the textself.""" |
err = "misc.illogic.collusion"
msg = "It's impossible to defraud yourself. Try 'aquiescence'."
regex = "without your collusion"
return existence_check(
text, [regex], err, msg, require_padding=False, offset=-1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_exclamations_ppm(text):
"""Make sure that the exclamation ppm is under 30.""" |
err = "leonard.exclamation.30ppm"
msg = u"More than 30 ppm of exclamations. Keep them under control."
regex = r"\w!"
count = len(re.findall(regex, text))
num_words = len(text.split(" "))
ppm = (count*1.0 / num_words) * 1e6
if ppm > 30 and count > 1:
loc = re.search(regex, text).start() + 1
return [(loc, loc+1, err, msg, ".")]
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(text):
"""Suggest preferred forms given the reference document.""" |
err = "glaad.terms"
msg = "Possibly offensive term. Consider using '{}' instead of '{}'."
list = [
["gay man", ["homosexual man"]],
["gay men", ["homosexual men"]],
["lesbian", ["homosexual woman"]],
["lesbians", ["homosexual women"]],
["gay people", ["homosexual people"]],
["gay couple", ["homosexual couple"]],
["sexual orientation", ["sexual preference"]],
["openly gay", ["admitted homosexual", "avowed homosexual"]],
["equal rights", ["special rights"]]
]
return preferred_forms_check(text, list, err, msg, ignore_case=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _delete_compiled_python_files():
"""Remove files with a 'pyc' extension.""" |
for path, _, files in os.walk(os.getcwd()):
for fname in [f for f in files if os.path.splitext(f)[1] == ".pyc"]:
try:
os.remove(os.path.join(path, fname))
except OSError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_errors(filename, errors, output_json=False, compact=False):
"""Print the errors, resulting from lint, for filename.""" |
if output_json:
click.echo(errors_to_json(errors))
else:
for error in errors:
(check, message, line, column, start, end,
extent, severity, replacements) = error
if compact:
filename = "-"
click.echo(
filename + ":" +
str(1 + line) + ":" +
str(1 + column) + ": " +
check + " " +
message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proselint(paths=None, version=None, clean=None, debug=None, output_json=None, time=None, demo=None, compact=None):
"""A CLI for proselint, a linter for prose.""" |
if time:
click.echo(timing_test())
return
# In debug or clean mode, delete cache & *.pyc files before running.
if debug or clean:
clear_cache()
# Use the demo file by default.
if demo:
paths = [demo_file]
# Expand the list of directories and files.
filepaths = extract_files(list(paths))
# Lint the files
num_errors = 0
# Use stdin if no paths were specified
if len(paths) == 0:
filepaths.append('-')
for fp in filepaths:
try:
if fp == '-':
fp = '<stdin>'
f = sys.stdin
else:
f = click.open_file(
fp, 'r', encoding="utf-8", errors="replace")
errors = lint(f, debug=debug)
num_errors += len(errors)
print_errors(fp, errors, output_json, compact=compact)
except Exception:
traceback.print_exc()
# Return an exit code
close_cache_shelves()
if num_errors > 0:
sys.exit(1)
else:
sys.exit(0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.