repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/treeherder | treeherder/webapp/graphql/helpers.py | collect_fields | def collect_fields(node):
"""
Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230
"""
fields = set()
for leaf in node:
if leaf.get('kind', None) == "Field":
fields.add(leaf["name"]["value"])
if leaf.get("selection_set", None):
fields = fields.union(collect_fields(leaf["selection_set"]["selections"]))
return fields | python | def collect_fields(node):
"""
Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230
"""
fields = set()
for leaf in node:
if leaf.get('kind', None) == "Field":
fields.add(leaf["name"]["value"])
if leaf.get("selection_set", None):
fields = fields.union(collect_fields(leaf["selection_set"]["selections"]))
return fields | [
"def",
"collect_fields",
"(",
"node",
")",
":",
"fields",
"=",
"set",
"(",
")",
"for",
"leaf",
"in",
"node",
":",
"if",
"leaf",
".",
"get",
"(",
"'kind'",
",",
"None",
")",
"==",
"\"Field\"",
":",
"fields",
".",
"add",
"(",
"leaf",
"[",
"\"name\"",... | Get all the unique field names that are eligible for optimization
Requested a function like this be added to the ``info`` object
upstream in graphene_django:
https://github.com/graphql-python/graphene-django/issues/230 | [
"Get",
"all",
"the",
"unique",
"field",
"names",
"that",
"are",
"eligible",
"for",
"optimization"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/graphql/helpers.py#L2-L18 | train | 205,500 |
mozilla/treeherder | treeherder/webapp/graphql/helpers.py | optimize | def optimize(qs, info_dict, field_map):
"""Add either select_related or prefetch_related to fields of the qs"""
fields = collect_fields(info_dict)
for field in fields:
if field in field_map:
field_name, opt = field_map[field]
qs = (qs.prefetch_related(field_name)
if opt == "prefetch" else qs.select_related(field_name))
return qs | python | def optimize(qs, info_dict, field_map):
"""Add either select_related or prefetch_related to fields of the qs"""
fields = collect_fields(info_dict)
for field in fields:
if field in field_map:
field_name, opt = field_map[field]
qs = (qs.prefetch_related(field_name)
if opt == "prefetch" else qs.select_related(field_name))
return qs | [
"def",
"optimize",
"(",
"qs",
",",
"info_dict",
",",
"field_map",
")",
":",
"fields",
"=",
"collect_fields",
"(",
"info_dict",
")",
"for",
"field",
"in",
"fields",
":",
"if",
"field",
"in",
"field_map",
":",
"field_name",
",",
"opt",
"=",
"field_map",
"[... | Add either select_related or prefetch_related to fields of the qs | [
"Add",
"either",
"select_related",
"or",
"prefetch_related",
"to",
"fields",
"of",
"the",
"qs"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/graphql/helpers.py#L21-L30 | train | 205,501 |
mozilla/treeherder | treeherder/services/elasticsearch/connection.py | build_connection | def build_connection(url):
"""
Build an Elasticsearch connection with the given url
Elastic.co's Heroku addon doesn't create credientials with access to the
cluster by default so they aren't exposed in the URL they provide either.
This function works around the situation by grabbing our credentials from
the environment via Django settings and building a connection with them.
"""
username = os.environ.get('ELASTICSEARCH_USERNAME')
password = os.environ.get('ELASTICSEARCH_PASSWORD')
if username and password:
return Elasticsearch(url, http_auth=(username, password))
return Elasticsearch(url) | python | def build_connection(url):
"""
Build an Elasticsearch connection with the given url
Elastic.co's Heroku addon doesn't create credientials with access to the
cluster by default so they aren't exposed in the URL they provide either.
This function works around the situation by grabbing our credentials from
the environment via Django settings and building a connection with them.
"""
username = os.environ.get('ELASTICSEARCH_USERNAME')
password = os.environ.get('ELASTICSEARCH_PASSWORD')
if username and password:
return Elasticsearch(url, http_auth=(username, password))
return Elasticsearch(url) | [
"def",
"build_connection",
"(",
"url",
")",
":",
"username",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'ELASTICSEARCH_USERNAME'",
")",
"password",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'ELASTICSEARCH_PASSWORD'",
")",
"if",
"username",
"and",
"passw... | Build an Elasticsearch connection with the given url
Elastic.co's Heroku addon doesn't create credientials with access to the
cluster by default so they aren't exposed in the URL they provide either.
This function works around the situation by grabbing our credentials from
the environment via Django settings and building a connection with them. | [
"Build",
"an",
"Elasticsearch",
"connection",
"with",
"the",
"given",
"url"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/connection.py#L9-L24 | train | 205,502 |
mozilla/treeherder | treeherder/log_parser/artifactbuilders.py | ArtifactBuilderBase.get_artifact | def get_artifact(self):
"""Return the job artifact built by the parser."""
self.artifact[self.parser.name] = self.parser.get_artifact()
return self.artifact | python | def get_artifact(self):
"""Return the job artifact built by the parser."""
self.artifact[self.parser.name] = self.parser.get_artifact()
return self.artifact | [
"def",
"get_artifact",
"(",
"self",
")",
":",
"self",
".",
"artifact",
"[",
"self",
".",
"parser",
".",
"name",
"]",
"=",
"self",
".",
"parser",
".",
"get_artifact",
"(",
")",
"return",
"self",
".",
"artifact"
] | Return the job artifact built by the parser. | [
"Return",
"the",
"job",
"artifact",
"built",
"by",
"the",
"parser",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/artifactbuilders.py#L61-L64 | train | 205,503 |
mozilla/treeherder | treeherder/autoclassify/matchers.py | precise_matcher | def precise_matcher(text_log_error):
"""Query for TextLogErrorMatches identical to matches of the given TextLogError."""
failure_line = text_log_error.metadata.failure_line
logger.debug("Looking for test match in failure %d", failure_line.id)
if failure_line.action != "test_result" or failure_line.message is None:
return
f = {
'text_log_error___metadata__failure_line__action': 'test_result',
'text_log_error___metadata__failure_line__test': failure_line.test,
'text_log_error___metadata__failure_line__subtest': failure_line.subtest,
'text_log_error___metadata__failure_line__status': failure_line.status,
'text_log_error___metadata__failure_line__expected': failure_line.expected,
'text_log_error___metadata__failure_line__message': failure_line.message
}
qwargs = (
Q(text_log_error___metadata__best_classification=None)
& (Q(text_log_error___metadata__best_is_verified=True)
| Q(text_log_error__step__job=text_log_error.step.job))
)
qs = (TextLogErrorMatch.objects.filter(**f)
.exclude(qwargs)
.order_by('-score', '-classified_failure'))
if not qs:
return
# chunk through the QuerySet because it could potentially be very large
# time bound each call to the scoring function to avoid job timeouts
# returns an iterable of (score, classified_failure_id) tuples
chunks = chunked_qs_reverse(qs, chunk_size=20000)
return chain.from_iterable(time_boxed(score_matches, chunks, time_budget=500)) | python | def precise_matcher(text_log_error):
"""Query for TextLogErrorMatches identical to matches of the given TextLogError."""
failure_line = text_log_error.metadata.failure_line
logger.debug("Looking for test match in failure %d", failure_line.id)
if failure_line.action != "test_result" or failure_line.message is None:
return
f = {
'text_log_error___metadata__failure_line__action': 'test_result',
'text_log_error___metadata__failure_line__test': failure_line.test,
'text_log_error___metadata__failure_line__subtest': failure_line.subtest,
'text_log_error___metadata__failure_line__status': failure_line.status,
'text_log_error___metadata__failure_line__expected': failure_line.expected,
'text_log_error___metadata__failure_line__message': failure_line.message
}
qwargs = (
Q(text_log_error___metadata__best_classification=None)
& (Q(text_log_error___metadata__best_is_verified=True)
| Q(text_log_error__step__job=text_log_error.step.job))
)
qs = (TextLogErrorMatch.objects.filter(**f)
.exclude(qwargs)
.order_by('-score', '-classified_failure'))
if not qs:
return
# chunk through the QuerySet because it could potentially be very large
# time bound each call to the scoring function to avoid job timeouts
# returns an iterable of (score, classified_failure_id) tuples
chunks = chunked_qs_reverse(qs, chunk_size=20000)
return chain.from_iterable(time_boxed(score_matches, chunks, time_budget=500)) | [
"def",
"precise_matcher",
"(",
"text_log_error",
")",
":",
"failure_line",
"=",
"text_log_error",
".",
"metadata",
".",
"failure_line",
"logger",
".",
"debug",
"(",
"\"Looking for test match in failure %d\"",
",",
"failure_line",
".",
"id",
")",
"if",
"failure_line",
... | Query for TextLogErrorMatches identical to matches of the given TextLogError. | [
"Query",
"for",
"TextLogErrorMatches",
"identical",
"to",
"matches",
"of",
"the",
"given",
"TextLogError",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/matchers.py#L20-L52 | train | 205,504 |
mozilla/treeherder | treeherder/autoclassify/matchers.py | elasticsearch_matcher | def elasticsearch_matcher(text_log_error):
"""
Query Elasticsearch and score the results.
Uses a filtered search checking test, status, expected, and the message
as a phrase query with non-alphabet tokens removed.
"""
# Note: Elasticsearch is currently disabled in all environments (see bug 1527868).
if not settings.ELASTICSEARCH_URL:
return []
failure_line = text_log_error.metadata.failure_line
if failure_line.action != "test_result" or not failure_line.message:
logger.debug("Skipped elasticsearch matching")
return
filters = [
{'term': {'test': failure_line.test}},
{'term': {'status': failure_line.status}},
{'term': {'expected': failure_line.expected}},
{'exists': {'field': 'best_classification'}}
]
if failure_line.subtest:
query = filters.append({'term': {'subtest': failure_line.subtest}})
query = {
'query': {
'bool': {
'filter': filters,
'must': [{
'match_phrase': {
'message': failure_line.message[:1024],
},
}],
},
},
}
try:
results = search(query)
except Exception:
logger.error("Elasticsearch lookup failed: %s %s %s %s %s",
failure_line.test, failure_line.subtest, failure_line.status,
failure_line.expected, failure_line.message)
raise
if len(results) > 1:
args = (
text_log_error.id,
failure_line.id,
len(results),
)
logger.info('text_log_error=%i failure_line=%i Elasticsearch produced %i results' % args)
newrelic.agent.record_custom_event('es_matches', {
'num_results': len(results),
'text_log_error_id': text_log_error.id,
'failure_line_id': failure_line.id,
})
scorer = MatchScorer(failure_line.message)
matches = [(item, item['message']) for item in results]
best_match = scorer.best_match(matches)
if not best_match:
return
score, es_result = best_match
# TODO: score all results and return
# TODO: just return results with score above cut off?
return [(score, es_result['best_classification'])] | python | def elasticsearch_matcher(text_log_error):
"""
Query Elasticsearch and score the results.
Uses a filtered search checking test, status, expected, and the message
as a phrase query with non-alphabet tokens removed.
"""
# Note: Elasticsearch is currently disabled in all environments (see bug 1527868).
if not settings.ELASTICSEARCH_URL:
return []
failure_line = text_log_error.metadata.failure_line
if failure_line.action != "test_result" or not failure_line.message:
logger.debug("Skipped elasticsearch matching")
return
filters = [
{'term': {'test': failure_line.test}},
{'term': {'status': failure_line.status}},
{'term': {'expected': failure_line.expected}},
{'exists': {'field': 'best_classification'}}
]
if failure_line.subtest:
query = filters.append({'term': {'subtest': failure_line.subtest}})
query = {
'query': {
'bool': {
'filter': filters,
'must': [{
'match_phrase': {
'message': failure_line.message[:1024],
},
}],
},
},
}
try:
results = search(query)
except Exception:
logger.error("Elasticsearch lookup failed: %s %s %s %s %s",
failure_line.test, failure_line.subtest, failure_line.status,
failure_line.expected, failure_line.message)
raise
if len(results) > 1:
args = (
text_log_error.id,
failure_line.id,
len(results),
)
logger.info('text_log_error=%i failure_line=%i Elasticsearch produced %i results' % args)
newrelic.agent.record_custom_event('es_matches', {
'num_results': len(results),
'text_log_error_id': text_log_error.id,
'failure_line_id': failure_line.id,
})
scorer = MatchScorer(failure_line.message)
matches = [(item, item['message']) for item in results]
best_match = scorer.best_match(matches)
if not best_match:
return
score, es_result = best_match
# TODO: score all results and return
# TODO: just return results with score above cut off?
return [(score, es_result['best_classification'])] | [
"def",
"elasticsearch_matcher",
"(",
"text_log_error",
")",
":",
"# Note: Elasticsearch is currently disabled in all environments (see bug 1527868).",
"if",
"not",
"settings",
".",
"ELASTICSEARCH_URL",
":",
"return",
"[",
"]",
"failure_line",
"=",
"text_log_error",
".",
"meta... | Query Elasticsearch and score the results.
Uses a filtered search checking test, status, expected, and the message
as a phrase query with non-alphabet tokens removed. | [
"Query",
"Elasticsearch",
"and",
"score",
"the",
"results",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/matchers.py#L56-L125 | train | 205,505 |
mozilla/treeherder | treeherder/autoclassify/matchers.py | crash_signature_matcher | def crash_signature_matcher(text_log_error):
"""
Query for TextLogErrorMatches with the same crash signature.
Produces two queries, first checking if the same test produces matches
and secondly checking without the same test but lowering the produced
scores.
"""
failure_line = text_log_error.metadata.failure_line
if (failure_line.action != "crash" or
failure_line.signature is None or
failure_line.signature == "None"):
return
f = {
'text_log_error___metadata__failure_line__action': 'crash',
'text_log_error___metadata__failure_line__signature': failure_line.signature,
}
qwargs = (
Q(text_log_error___metadata__best_classification=None)
& (Q(text_log_error___metadata__best_is_verified=True)
| Q(text_log_error__step__job=text_log_error.step.job))
)
qs = (TextLogErrorMatch.objects.filter(**f)
.exclude(qwargs)
.select_related('text_log_error', 'text_log_error___metadata')
.order_by('-score', '-classified_failure'))
size = 20000
time_budget = 500
# See if we can get any matches when filtering by the same test
first_attempt = qs.filter(text_log_error___metadata__failure_line__test=failure_line.test)
chunks = chunked_qs_reverse(first_attempt, chunk_size=size)
scored_matches = chain.from_iterable(time_boxed(score_matches, chunks, time_budget))
if scored_matches:
return scored_matches
# try again without filtering to the test but applying a .8 score multiplyer
chunks = chunked_qs_reverse(qs, chunk_size=size)
scored_matches = chain.from_iterable(time_boxed(
score_matches,
chunks,
time_budget,
score_multiplier=(8, 10),
))
return scored_matches | python | def crash_signature_matcher(text_log_error):
"""
Query for TextLogErrorMatches with the same crash signature.
Produces two queries, first checking if the same test produces matches
and secondly checking without the same test but lowering the produced
scores.
"""
failure_line = text_log_error.metadata.failure_line
if (failure_line.action != "crash" or
failure_line.signature is None or
failure_line.signature == "None"):
return
f = {
'text_log_error___metadata__failure_line__action': 'crash',
'text_log_error___metadata__failure_line__signature': failure_line.signature,
}
qwargs = (
Q(text_log_error___metadata__best_classification=None)
& (Q(text_log_error___metadata__best_is_verified=True)
| Q(text_log_error__step__job=text_log_error.step.job))
)
qs = (TextLogErrorMatch.objects.filter(**f)
.exclude(qwargs)
.select_related('text_log_error', 'text_log_error___metadata')
.order_by('-score', '-classified_failure'))
size = 20000
time_budget = 500
# See if we can get any matches when filtering by the same test
first_attempt = qs.filter(text_log_error___metadata__failure_line__test=failure_line.test)
chunks = chunked_qs_reverse(first_attempt, chunk_size=size)
scored_matches = chain.from_iterable(time_boxed(score_matches, chunks, time_budget))
if scored_matches:
return scored_matches
# try again without filtering to the test but applying a .8 score multiplyer
chunks = chunked_qs_reverse(qs, chunk_size=size)
scored_matches = chain.from_iterable(time_boxed(
score_matches,
chunks,
time_budget,
score_multiplier=(8, 10),
))
return scored_matches | [
"def",
"crash_signature_matcher",
"(",
"text_log_error",
")",
":",
"failure_line",
"=",
"text_log_error",
".",
"metadata",
".",
"failure_line",
"if",
"(",
"failure_line",
".",
"action",
"!=",
"\"crash\"",
"or",
"failure_line",
".",
"signature",
"is",
"None",
"or",... | Query for TextLogErrorMatches with the same crash signature.
Produces two queries, first checking if the same test produces matches
and secondly checking without the same test but lowering the produced
scores. | [
"Query",
"for",
"TextLogErrorMatches",
"with",
"the",
"same",
"crash",
"signature",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/matchers.py#L129-L176 | train | 205,506 |
mozilla/treeherder | treeherder/autoclassify/matchers.py | MatchScorer.best_match | def best_match(self, matches):
"""
Find the most similar string to self.target.
Given a list of candidate strings find the closest match to
self.target, returning the best match with a score indicating closeness
of match.
:param matches: A list of candidate matches
:returns: A tuple of (score, best_match)
"""
best_match = None
for match, message in matches:
self.matcher.set_seq1(message)
ratio = self.matcher.quick_ratio()
if best_match is None or ratio >= best_match[0]:
new_ratio = self.matcher.ratio()
if best_match is None or new_ratio > best_match[0]:
best_match = (new_ratio, match)
return best_match | python | def best_match(self, matches):
"""
Find the most similar string to self.target.
Given a list of candidate strings find the closest match to
self.target, returning the best match with a score indicating closeness
of match.
:param matches: A list of candidate matches
:returns: A tuple of (score, best_match)
"""
best_match = None
for match, message in matches:
self.matcher.set_seq1(message)
ratio = self.matcher.quick_ratio()
if best_match is None or ratio >= best_match[0]:
new_ratio = self.matcher.ratio()
if best_match is None or new_ratio > best_match[0]:
best_match = (new_ratio, match)
return best_match | [
"def",
"best_match",
"(",
"self",
",",
"matches",
")",
":",
"best_match",
"=",
"None",
"for",
"match",
",",
"message",
"in",
"matches",
":",
"self",
".",
"matcher",
".",
"set_seq1",
"(",
"message",
")",
"ratio",
"=",
"self",
".",
"matcher",
".",
"quick... | Find the most similar string to self.target.
Given a list of candidate strings find the closest match to
self.target, returning the best match with a score indicating closeness
of match.
:param matches: A list of candidate matches
:returns: A tuple of (score, best_match) | [
"Find",
"the",
"most",
"similar",
"string",
"to",
"self",
".",
"target",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/matchers.py#L186-L205 | train | 205,507 |
mozilla/treeherder | treeherder/etl/push.py | store_push_data | def store_push_data(repository, pushes):
"""
Stores push data in the treeherder database
pushes = [
{
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80",
"push_timestamp": 1378293517,
"author": "some-sheriff@mozilla.com",
"revisions": [
{
"comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer",
"author": "John Doe <jdoe@mozilla.com>",
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80"
},
...
]
},
...
]
returns = {
}
"""
if not pushes:
logger.info("No new pushes to store")
return
for push in pushes:
store_push(repository, push) | python | def store_push_data(repository, pushes):
"""
Stores push data in the treeherder database
pushes = [
{
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80",
"push_timestamp": 1378293517,
"author": "some-sheriff@mozilla.com",
"revisions": [
{
"comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer",
"author": "John Doe <jdoe@mozilla.com>",
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80"
},
...
]
},
...
]
returns = {
}
"""
if not pushes:
logger.info("No new pushes to store")
return
for push in pushes:
store_push(repository, push) | [
"def",
"store_push_data",
"(",
"repository",
",",
"pushes",
")",
":",
"if",
"not",
"pushes",
":",
"logger",
".",
"info",
"(",
"\"No new pushes to store\"",
")",
"return",
"for",
"push",
"in",
"pushes",
":",
"store_push",
"(",
"repository",
",",
"push",
")"
] | Stores push data in the treeherder database
pushes = [
{
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80",
"push_timestamp": 1378293517,
"author": "some-sheriff@mozilla.com",
"revisions": [
{
"comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer",
"author": "John Doe <jdoe@mozilla.com>",
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80"
},
...
]
},
...
]
returns = {
} | [
"Stores",
"push",
"data",
"in",
"the",
"treeherder",
"database"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/push.py#L36-L67 | train | 205,508 |
mozilla/treeherder | treeherder/perf/models.py | PerformanceDatumManager.cycle_data | def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time):
"""Delete data older than cycle_interval, splitting the target data
into chunks of chunk_size size."""
max_timestamp = datetime.datetime.now() - cycle_interval
# seperate datums into chunks
while True:
perf_datums_to_cycle = list(self.filter(
repository=repository,
push_timestamp__lt=max_timestamp).values_list('id', flat=True)[:chunk_size])
if not perf_datums_to_cycle:
# we're done!
break
self.filter(id__in=perf_datums_to_cycle).delete()
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time)
# also remove any signatures which are (no longer) associated with
# a job
for signature in PerformanceSignature.objects.filter(
repository=repository):
if not self.filter(signature=signature).exists():
signature.delete() | python | def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time):
"""Delete data older than cycle_interval, splitting the target data
into chunks of chunk_size size."""
max_timestamp = datetime.datetime.now() - cycle_interval
# seperate datums into chunks
while True:
perf_datums_to_cycle = list(self.filter(
repository=repository,
push_timestamp__lt=max_timestamp).values_list('id', flat=True)[:chunk_size])
if not perf_datums_to_cycle:
# we're done!
break
self.filter(id__in=perf_datums_to_cycle).delete()
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time)
# also remove any signatures which are (no longer) associated with
# a job
for signature in PerformanceSignature.objects.filter(
repository=repository):
if not self.filter(signature=signature).exists():
signature.delete() | [
"def",
"cycle_data",
"(",
"self",
",",
"repository",
",",
"cycle_interval",
",",
"chunk_size",
",",
"sleep_time",
")",
":",
"max_timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"cycle_interval",
"# seperate datums into chunks",
"while",
... | Delete data older than cycle_interval, splitting the target data
into chunks of chunk_size size. | [
"Delete",
"data",
"older",
"than",
"cycle_interval",
"splitting",
"the",
"target",
"data",
"into",
"chunks",
"of",
"chunk_size",
"size",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/perf/models.py#L110-L134 | train | 205,509 |
mozilla/treeherder | treeherder/webapp/api/serializers.py | FailureLineNoStackSerializer.to_representation | def to_representation(self, failure_line):
"""
Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead.
"""
try:
matches = failure_line.error.matches.all()
except AttributeError: # failure_line.error can return None
matches = []
tle_serializer = TextLogErrorMatchSerializer(matches, many=True)
classified_failures = models.ClassifiedFailure.objects.filter(error_matches__in=matches)
cf_serializer = ClassifiedFailureSerializer(classified_failures, many=True)
response = super().to_representation(failure_line)
response['matches'] = tle_serializer.data
response['classified_failures'] = cf_serializer.data
return response | python | def to_representation(self, failure_line):
"""
Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead.
"""
try:
matches = failure_line.error.matches.all()
except AttributeError: # failure_line.error can return None
matches = []
tle_serializer = TextLogErrorMatchSerializer(matches, many=True)
classified_failures = models.ClassifiedFailure.objects.filter(error_matches__in=matches)
cf_serializer = ClassifiedFailureSerializer(classified_failures, many=True)
response = super().to_representation(failure_line)
response['matches'] = tle_serializer.data
response['classified_failures'] = cf_serializer.data
return response | [
"def",
"to_representation",
"(",
"self",
",",
"failure_line",
")",
":",
"try",
":",
"matches",
"=",
"failure_line",
".",
"error",
".",
"matches",
".",
"all",
"(",
")",
"except",
"AttributeError",
":",
"# failure_line.error can return None",
"matches",
"=",
"[",
... | Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead. | [
"Manually",
"add",
"matches",
"our",
"wrapper",
"of",
"the",
"TLEMetadata",
"-",
">",
"TLE",
"relation",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/serializers.py#L131-L150 | train | 205,510 |
mozilla/treeherder | treeherder/intermittents_commenter/commenter.py | Commenter.check_whiteboard_status | def check_whiteboard_status(self, whiteboard):
"""Extracts stockwell text from a bug's whiteboard status to
determine whether it matches specified stockwell text;
returns a boolean."""
stockwell_text = re.search(r'\[stockwell (.+?)\]', whiteboard)
if stockwell_text is not None:
text = stockwell_text.group(1).split(':')[0]
if text == 'fixed' or text == 'disable-recommended' or text == 'infra' or text == 'disabled':
return True
return False | python | def check_whiteboard_status(self, whiteboard):
"""Extracts stockwell text from a bug's whiteboard status to
determine whether it matches specified stockwell text;
returns a boolean."""
stockwell_text = re.search(r'\[stockwell (.+?)\]', whiteboard)
if stockwell_text is not None:
text = stockwell_text.group(1).split(':')[0]
if text == 'fixed' or text == 'disable-recommended' or text == 'infra' or text == 'disabled':
return True
return False | [
"def",
"check_whiteboard_status",
"(",
"self",
",",
"whiteboard",
")",
":",
"stockwell_text",
"=",
"re",
".",
"search",
"(",
"r'\\[stockwell (.+?)\\]'",
",",
"whiteboard",
")",
"if",
"stockwell_text",
"is",
"not",
"None",
":",
"text",
"=",
"stockwell_text",
".",... | Extracts stockwell text from a bug's whiteboard status to
determine whether it matches specified stockwell text;
returns a boolean. | [
"Extracts",
"stockwell",
"text",
"from",
"a",
"bug",
"s",
"whiteboard",
"status",
"to",
"determine",
"whether",
"it",
"matches",
"specified",
"stockwell",
"text",
";",
"returns",
"a",
"boolean",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/intermittents_commenter/commenter.py#L186-L196 | train | 205,511 |
mozilla/treeherder | treeherder/intermittents_commenter/commenter.py | Commenter.fetch_bug_details | def fetch_bug_details(self, bug_ids):
"""Fetches bug metadata from bugzilla and returns an encoded
dict if successful, otherwise returns None."""
params = {'include_fields': 'product, component, priority, whiteboard, id'}
params['id'] = bug_ids
try:
response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers,
params=params, timeout=30)
response.raise_for_status()
except RequestException as e:
logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e))
return None
if response.headers['Content-Type'] == 'text/html; charset=UTF-8':
return None
data = response.json()
if 'bugs' not in data:
return None
return data['bugs'] | python | def fetch_bug_details(self, bug_ids):
"""Fetches bug metadata from bugzilla and returns an encoded
dict if successful, otherwise returns None."""
params = {'include_fields': 'product, component, priority, whiteboard, id'}
params['id'] = bug_ids
try:
response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers,
params=params, timeout=30)
response.raise_for_status()
except RequestException as e:
logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e))
return None
if response.headers['Content-Type'] == 'text/html; charset=UTF-8':
return None
data = response.json()
if 'bugs' not in data:
return None
return data['bugs'] | [
"def",
"fetch_bug_details",
"(",
"self",
",",
"bug_ids",
")",
":",
"params",
"=",
"{",
"'include_fields'",
":",
"'product, component, priority, whiteboard, id'",
"}",
"params",
"[",
"'id'",
"]",
"=",
"bug_ids",
"try",
":",
"response",
"=",
"self",
".",
"session"... | Fetches bug metadata from bugzilla and returns an encoded
dict if successful, otherwise returns None. | [
"Fetches",
"bug",
"metadata",
"from",
"bugzilla",
"and",
"returns",
"an",
"encoded",
"dict",
"if",
"successful",
"otherwise",
"returns",
"None",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/intermittents_commenter/commenter.py#L213-L234 | train | 205,512 |
mozilla/treeherder | treeherder/intermittents_commenter/commenter.py | Commenter.get_alt_date_bug_totals | def get_alt_date_bug_totals(self, startday, endday, bug_ids):
"""use previously fetched bug_ids to check for total failures
exceeding 150 in 21 days"""
bugs = (BugJobMap.failures.by_date(startday, endday)
.filter(bug_id__in=bug_ids)
.values('bug_id')
.annotate(total=Count('id'))
.values('bug_id', 'total'))
return {bug['bug_id']: bug['total'] for bug in bugs if bug['total'] >= 150} | python | def get_alt_date_bug_totals(self, startday, endday, bug_ids):
"""use previously fetched bug_ids to check for total failures
exceeding 150 in 21 days"""
bugs = (BugJobMap.failures.by_date(startday, endday)
.filter(bug_id__in=bug_ids)
.values('bug_id')
.annotate(total=Count('id'))
.values('bug_id', 'total'))
return {bug['bug_id']: bug['total'] for bug in bugs if bug['total'] >= 150} | [
"def",
"get_alt_date_bug_totals",
"(",
"self",
",",
"startday",
",",
"endday",
",",
"bug_ids",
")",
":",
"bugs",
"=",
"(",
"BugJobMap",
".",
"failures",
".",
"by_date",
"(",
"startday",
",",
"endday",
")",
".",
"filter",
"(",
"bug_id__in",
"=",
"bug_ids",
... | use previously fetched bug_ids to check for total failures
exceeding 150 in 21 days | [
"use",
"previously",
"fetched",
"bug_ids",
"to",
"check",
"for",
"total",
"failures",
"exceeding",
"150",
"in",
"21",
"days"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/intermittents_commenter/commenter.py#L302-L311 | train | 205,513 |
mozilla/treeherder | treeherder/etl/seta.py | transform | def transform(testtype):
'''
A lot of these transformations are from tasks before task labels and some of them are if we
grab data directly from Treeherder jobs endpoint instead of runnable jobs API.
'''
# XXX: Evaluate which of these transformations are still valid
if testtype.startswith('[funsize'):
return None
testtype = testtype.split('/opt-')[-1]
testtype = testtype.split('/debug-')[-1]
# this is plain-reftests for android
testtype = testtype.replace('plain-', '')
testtype = testtype.strip()
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313844
testtype = testtype.replace('browser-chrome-e10s', 'e10s-browser-chrome')
testtype = testtype.replace('devtools-chrome-e10s', 'e10s-devtools-chrome')
testtype = testtype.replace('[TC] Android 4.3 API15+ ', '')
# mochitest-gl-1 <-- Android 4.3 armv7 API 15+ mozilla-inbound opt test mochitest-gl-1
# mochitest-webgl-9 <-- test-android-4.3-arm7-api-15/opt-mochitest-webgl-9
testtype = testtype.replace('webgl-', 'gl-')
return testtype | python | def transform(testtype):
'''
A lot of these transformations are from tasks before task labels and some of them are if we
grab data directly from Treeherder jobs endpoint instead of runnable jobs API.
'''
# XXX: Evaluate which of these transformations are still valid
if testtype.startswith('[funsize'):
return None
testtype = testtype.split('/opt-')[-1]
testtype = testtype.split('/debug-')[-1]
# this is plain-reftests for android
testtype = testtype.replace('plain-', '')
testtype = testtype.strip()
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313844
testtype = testtype.replace('browser-chrome-e10s', 'e10s-browser-chrome')
testtype = testtype.replace('devtools-chrome-e10s', 'e10s-devtools-chrome')
testtype = testtype.replace('[TC] Android 4.3 API15+ ', '')
# mochitest-gl-1 <-- Android 4.3 armv7 API 15+ mozilla-inbound opt test mochitest-gl-1
# mochitest-webgl-9 <-- test-android-4.3-arm7-api-15/opt-mochitest-webgl-9
testtype = testtype.replace('webgl-', 'gl-')
return testtype | [
"def",
"transform",
"(",
"testtype",
")",
":",
"# XXX: Evaluate which of these transformations are still valid",
"if",
"testtype",
".",
"startswith",
"(",
"'[funsize'",
")",
":",
"return",
"None",
"testtype",
"=",
"testtype",
".",
"split",
"(",
"'/opt-'",
")",
"[",
... | A lot of these transformations are from tasks before task labels and some of them are if we
grab data directly from Treeherder jobs endpoint instead of runnable jobs API. | [
"A",
"lot",
"of",
"these",
"transformations",
"are",
"from",
"tasks",
"before",
"task",
"labels",
"and",
"some",
"of",
"them",
"are",
"if",
"we",
"grab",
"data",
"directly",
"from",
"Treeherder",
"jobs",
"endpoint",
"instead",
"of",
"runnable",
"jobs",
"API"... | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/seta.py#L41-L67 | train | 205,514 |
mozilla/treeherder | treeherder/auth/backends.py | AuthBackend._get_username_from_userinfo | def _get_username_from_userinfo(self, user_info):
"""
Get the user's username from the jwt sub property
"""
subject = user_info['sub']
email = user_info['email']
if "Mozilla-LDAP" in subject:
return "mozilla-ldap/" + email
elif "email" in subject:
return "email/" + email
elif "github" in subject:
return "github/" + email
elif "google" in subject:
return "google/" + email
# Firefox account
elif "oauth2" in subject:
return "oauth2/" + email
else:
raise AuthenticationFailed("Unrecognized identity") | python | def _get_username_from_userinfo(self, user_info):
"""
Get the user's username from the jwt sub property
"""
subject = user_info['sub']
email = user_info['email']
if "Mozilla-LDAP" in subject:
return "mozilla-ldap/" + email
elif "email" in subject:
return "email/" + email
elif "github" in subject:
return "github/" + email
elif "google" in subject:
return "google/" + email
# Firefox account
elif "oauth2" in subject:
return "oauth2/" + email
else:
raise AuthenticationFailed("Unrecognized identity") | [
"def",
"_get_username_from_userinfo",
"(",
"self",
",",
"user_info",
")",
":",
"subject",
"=",
"user_info",
"[",
"'sub'",
"]",
"email",
"=",
"user_info",
"[",
"'email'",
"]",
"if",
"\"Mozilla-LDAP\"",
"in",
"subject",
":",
"return",
"\"mozilla-ldap/\"",
"+",
"... | Get the user's username from the jwt sub property | [
"Get",
"the",
"user",
"s",
"username",
"from",
"the",
"jwt",
"sub",
"property"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/auth/backends.py#L72-L92 | train | 205,515 |
mozilla/treeherder | treeherder/auth/backends.py | AuthBackend._get_user_info | def _get_user_info(self, access_token, id_token):
"""
Extracts the user info payload from the Id Token.
Example return value:
{
"at_hash": "<HASH>",
"aud": "<HASH>",
"email_verified": true,
"email": "fsurname@mozilla.com",
"exp": 1551259495,
"family_name": "Surname",
"given_name": "Firstname",
"https://sso.mozilla.com/claim/groups": [
"all_scm_level_1",
"all_scm_level_2",
"all_scm_level_3",
# ...
],
"iat": 1550654695,
"iss": "https://auth.mozilla.auth0.com/",
"name": "Firstname Surname",
"nickname": "Firstname Surname",
"nonce": "<HASH>",
"picture": "<GRAVATAR_URL>",
"sub": "ad|Mozilla-LDAP|fsurname",
"updated_at": "2019-02-20T09:24:55.449Z",
}
"""
# JWT Validator
# Per https://auth0.com/docs/quickstart/backend/python/01-authorization#create-the-jwt-validation-decorator
try:
unverified_header = jwt.get_unverified_header(id_token)
except jwt.JWTError:
raise AuthError('Unable to decode the Id token header')
if 'kid' not in unverified_header:
raise AuthError('Id token header missing RSA key ID')
rsa_key = None
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
break
if not rsa_key:
raise AuthError('Id token using unrecognised RSA key ID')
try:
# https://python-jose.readthedocs.io/en/latest/jwt/api.html#jose.jwt.decode
user_info = jwt.decode(
id_token,
rsa_key,
algorithms=['RS256'],
audience=AUTH0_CLIENTID,
access_token=access_token,
issuer="https://"+AUTH0_DOMAIN+"/"
)
return user_info
except jwt.ExpiredSignatureError:
raise AuthError('Id token is expired')
except jwt.JWTClaimsError:
raise AuthError("Incorrect claims: please check the audience and issuer")
except jwt.JWTError:
raise AuthError("Invalid header: Unable to parse authentication") | python | def _get_user_info(self, access_token, id_token):
"""
Extracts the user info payload from the Id Token.
Example return value:
{
"at_hash": "<HASH>",
"aud": "<HASH>",
"email_verified": true,
"email": "fsurname@mozilla.com",
"exp": 1551259495,
"family_name": "Surname",
"given_name": "Firstname",
"https://sso.mozilla.com/claim/groups": [
"all_scm_level_1",
"all_scm_level_2",
"all_scm_level_3",
# ...
],
"iat": 1550654695,
"iss": "https://auth.mozilla.auth0.com/",
"name": "Firstname Surname",
"nickname": "Firstname Surname",
"nonce": "<HASH>",
"picture": "<GRAVATAR_URL>",
"sub": "ad|Mozilla-LDAP|fsurname",
"updated_at": "2019-02-20T09:24:55.449Z",
}
"""
# JWT Validator
# Per https://auth0.com/docs/quickstart/backend/python/01-authorization#create-the-jwt-validation-decorator
try:
unverified_header = jwt.get_unverified_header(id_token)
except jwt.JWTError:
raise AuthError('Unable to decode the Id token header')
if 'kid' not in unverified_header:
raise AuthError('Id token header missing RSA key ID')
rsa_key = None
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
break
if not rsa_key:
raise AuthError('Id token using unrecognised RSA key ID')
try:
# https://python-jose.readthedocs.io/en/latest/jwt/api.html#jose.jwt.decode
user_info = jwt.decode(
id_token,
rsa_key,
algorithms=['RS256'],
audience=AUTH0_CLIENTID,
access_token=access_token,
issuer="https://"+AUTH0_DOMAIN+"/"
)
return user_info
except jwt.ExpiredSignatureError:
raise AuthError('Id token is expired')
except jwt.JWTClaimsError:
raise AuthError("Incorrect claims: please check the audience and issuer")
except jwt.JWTError:
raise AuthError("Invalid header: Unable to parse authentication") | [
"def",
"_get_user_info",
"(",
"self",
",",
"access_token",
",",
"id_token",
")",
":",
"# JWT Validator",
"# Per https://auth0.com/docs/quickstart/backend/python/01-authorization#create-the-jwt-validation-decorator",
"try",
":",
"unverified_header",
"=",
"jwt",
".",
"get_unverifie... | Extracts the user info payload from the Id Token.
Example return value:
{
"at_hash": "<HASH>",
"aud": "<HASH>",
"email_verified": true,
"email": "fsurname@mozilla.com",
"exp": 1551259495,
"family_name": "Surname",
"given_name": "Firstname",
"https://sso.mozilla.com/claim/groups": [
"all_scm_level_1",
"all_scm_level_2",
"all_scm_level_3",
# ...
],
"iat": 1550654695,
"iss": "https://auth.mozilla.auth0.com/",
"name": "Firstname Surname",
"nickname": "Firstname Surname",
"nonce": "<HASH>",
"picture": "<GRAVATAR_URL>",
"sub": "ad|Mozilla-LDAP|fsurname",
"updated_at": "2019-02-20T09:24:55.449Z",
} | [
"Extracts",
"the",
"user",
"info",
"payload",
"from",
"the",
"Id",
"Token",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/auth/backends.py#L94-L167 | train | 205,516 |
mozilla/treeherder | treeherder/auth/backends.py | AuthBackend._calculate_session_expiry | def _calculate_session_expiry(self, request, user_info):
"""Returns the number of seconds after which the Django session should expire."""
access_token_expiry_timestamp = self._get_access_token_expiry(request)
id_token_expiry_timestamp = self._get_id_token_expiry(user_info)
now_in_seconds = int(time.time())
# The session length is set to match whichever token expiration time is closer.
earliest_expiration_timestamp = min(access_token_expiry_timestamp, id_token_expiry_timestamp)
seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds
if seconds_until_expiry <= 0:
raise AuthError('Session expiry time has already passed!')
return seconds_until_expiry | python | def _calculate_session_expiry(self, request, user_info):
"""Returns the number of seconds after which the Django session should expire."""
access_token_expiry_timestamp = self._get_access_token_expiry(request)
id_token_expiry_timestamp = self._get_id_token_expiry(user_info)
now_in_seconds = int(time.time())
# The session length is set to match whichever token expiration time is closer.
earliest_expiration_timestamp = min(access_token_expiry_timestamp, id_token_expiry_timestamp)
seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds
if seconds_until_expiry <= 0:
raise AuthError('Session expiry time has already passed!')
return seconds_until_expiry | [
"def",
"_calculate_session_expiry",
"(",
"self",
",",
"request",
",",
"user_info",
")",
":",
"access_token_expiry_timestamp",
"=",
"self",
".",
"_get_access_token_expiry",
"(",
"request",
")",
"id_token_expiry_timestamp",
"=",
"self",
".",
"_get_id_token_expiry",
"(",
... | Returns the number of seconds after which the Django session should expire. | [
"Returns",
"the",
"number",
"of",
"seconds",
"after",
"which",
"the",
"Django",
"session",
"should",
"expire",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/auth/backends.py#L169-L182 | train | 205,517 |
mozilla/treeherder | treeherder/seta/update_job_priority.py | _unique_key | def _unique_key(job):
"""Return a key to query our uniqueness mapping system.
This makes sure that we use a consistent key between our code and selecting jobs from the
table.
"""
return unique_key(testtype=str(job['testtype']),
buildtype=str(job['platform_option']),
platform=str(job['platform'])) | python | def _unique_key(job):
"""Return a key to query our uniqueness mapping system.
This makes sure that we use a consistent key between our code and selecting jobs from the
table.
"""
return unique_key(testtype=str(job['testtype']),
buildtype=str(job['platform_option']),
platform=str(job['platform'])) | [
"def",
"_unique_key",
"(",
"job",
")",
":",
"return",
"unique_key",
"(",
"testtype",
"=",
"str",
"(",
"job",
"[",
"'testtype'",
"]",
")",
",",
"buildtype",
"=",
"str",
"(",
"job",
"[",
"'platform_option'",
"]",
")",
",",
"platform",
"=",
"str",
"(",
... | Return a key to query our uniqueness mapping system.
This makes sure that we use a consistent key between our code and selecting jobs from the
table. | [
"Return",
"a",
"key",
"to",
"query",
"our",
"uniqueness",
"mapping",
"system",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/update_job_priority.py#L38-L46 | train | 205,518 |
mozilla/treeherder | treeherder/seta/update_job_priority.py | _sanitize_data | def _sanitize_data(runnable_jobs_data):
"""We receive data from runnable jobs api and return the sanitized data that meets our needs.
This is a loop to remove duplicates (including buildsystem -> * transformations if needed)
By doing this, it allows us to have a single database query
It returns sanitized_list which will contain a subset which excludes:
* jobs that don't specify the platform
* jobs that don't specify the testtype
* if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears
under both 'buildbot' and 'taskcluster', its build_system_type will be '*'
"""
job_build_system_type = {}
sanitized_list = []
for job in runnable_jobs_data:
if not valid_platform(job['platform']):
logger.info('Invalid platform %s', job['platform'])
continue
testtype = parse_testtype(
build_system_type=job['build_system_type'],
job_type_name=job['job_type_name'],
platform_option=job['platform_option'],
ref_data_name=job['ref_data_name']
)
if not testtype:
continue
# NOTE: This is *all* the data we need from the runnable API
new_job = {
'build_system_type': job['build_system_type'], # e.g. {buildbot,taskcluster,*}
'platform': job['platform'], # e.g. windows8-64
'platform_option': job['platform_option'], # e.g. {opt,debug}
'testtype': testtype, # e.g. web-platform-tests-1
}
key = _unique_key(new_job)
# Let's build a map of all the jobs and if duplicated change the build_system_type to *
if key not in job_build_system_type:
job_build_system_type[key] = job['build_system_type']
sanitized_list.append(new_job)
elif new_job['build_system_type'] != job_build_system_type[key]:
new_job['build_system_type'] = job_build_system_type[key]
# This will *replace* the previous build system type with '*'
# This guarantees that we don't have duplicates
sanitized_list[sanitized_list.index(new_job)]['build_system_type'] = '*'
return sanitized_list | python | def _sanitize_data(runnable_jobs_data):
"""We receive data from runnable jobs api and return the sanitized data that meets our needs.
This is a loop to remove duplicates (including buildsystem -> * transformations if needed)
By doing this, it allows us to have a single database query
It returns sanitized_list which will contain a subset which excludes:
* jobs that don't specify the platform
* jobs that don't specify the testtype
* if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears
under both 'buildbot' and 'taskcluster', its build_system_type will be '*'
"""
job_build_system_type = {}
sanitized_list = []
for job in runnable_jobs_data:
if not valid_platform(job['platform']):
logger.info('Invalid platform %s', job['platform'])
continue
testtype = parse_testtype(
build_system_type=job['build_system_type'],
job_type_name=job['job_type_name'],
platform_option=job['platform_option'],
ref_data_name=job['ref_data_name']
)
if not testtype:
continue
# NOTE: This is *all* the data we need from the runnable API
new_job = {
'build_system_type': job['build_system_type'], # e.g. {buildbot,taskcluster,*}
'platform': job['platform'], # e.g. windows8-64
'platform_option': job['platform_option'], # e.g. {opt,debug}
'testtype': testtype, # e.g. web-platform-tests-1
}
key = _unique_key(new_job)
# Let's build a map of all the jobs and if duplicated change the build_system_type to *
if key not in job_build_system_type:
job_build_system_type[key] = job['build_system_type']
sanitized_list.append(new_job)
elif new_job['build_system_type'] != job_build_system_type[key]:
new_job['build_system_type'] = job_build_system_type[key]
# This will *replace* the previous build system type with '*'
# This guarantees that we don't have duplicates
sanitized_list[sanitized_list.index(new_job)]['build_system_type'] = '*'
return sanitized_list | [
"def",
"_sanitize_data",
"(",
"runnable_jobs_data",
")",
":",
"job_build_system_type",
"=",
"{",
"}",
"sanitized_list",
"=",
"[",
"]",
"for",
"job",
"in",
"runnable_jobs_data",
":",
"if",
"not",
"valid_platform",
"(",
"job",
"[",
"'platform'",
"]",
")",
":",
... | We receive data from runnable jobs api and return the sanitized data that meets our needs.
This is a loop to remove duplicates (including buildsystem -> * transformations if needed)
By doing this, it allows us to have a single database query
It returns sanitized_list which will contain a subset which excludes:
* jobs that don't specify the platform
* jobs that don't specify the testtype
* if the job appears again, we replace build_system_type with '*'. By doing so, if a job appears
under both 'buildbot' and 'taskcluster', its build_system_type will be '*' | [
"We",
"receive",
"data",
"from",
"runnable",
"jobs",
"api",
"and",
"return",
"the",
"sanitized",
"data",
"that",
"meets",
"our",
"needs",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/update_job_priority.py#L49-L97 | train | 205,519 |
mozilla/treeherder | treeherder/seta/update_job_priority.py | _update_table | def _update_table(data):
"""Add new jobs to the priority table and update the build system if required.
data - it is a list of dictionaries that describe a job type
returns the number of new, failed and updated jobs
"""
jp_index, priority, expiration_date = _initialize_values()
total_jobs = len(data)
new_jobs, failed_changes, updated_jobs = 0, 0, 0
# Loop through sanitized jobs, add new jobs and update the build system if needed
for job in data:
key = _unique_key(job)
if key in jp_index:
# We already know about this job, we might need to update the build system
# We're seeing the job again with another build system (e.g. buildbot vs
# taskcluster). We need to change it to '*'
if jp_index[key]['build_system_type'] != '*' and jp_index[key]['build_system_type'] != job["build_system_type"]:
db_job = JobPriority.objects.get(pk=jp_index[key]['pk'])
db_job.buildsystem = '*'
db_job.save()
logger.info('Updated %s/%s from %s to %s',
db_job.testtype, db_job.buildtype,
job['build_system_type'], db_job.buildsystem)
updated_jobs += 1
else:
# We have a new job from runnablejobs to add to our master list
try:
jobpriority = JobPriority(
testtype=str(job["testtype"]),
buildtype=str(job["platform_option"]),
platform=str(job["platform"]),
priority=priority,
expiration_date=expiration_date,
buildsystem=job["build_system_type"]
)
jobpriority.save()
logger.info('New job was found (%s,%s,%s,%s)',
job['testtype'], job['platform_option'], job['platform'],
job["build_system_type"])
new_jobs += 1
except Exception as error:
logger.warning(str(error))
failed_changes += 1
logger.info('We have %s new jobs and %s updated jobs out of %s total jobs processed.',
new_jobs, updated_jobs, total_jobs)
if failed_changes != 0:
logger.warning('We have failed %s changes out of %s total jobs processed.',
failed_changes, total_jobs)
return new_jobs, failed_changes, updated_jobs | python | def _update_table(data):
"""Add new jobs to the priority table and update the build system if required.
data - it is a list of dictionaries that describe a job type
returns the number of new, failed and updated jobs
"""
jp_index, priority, expiration_date = _initialize_values()
total_jobs = len(data)
new_jobs, failed_changes, updated_jobs = 0, 0, 0
# Loop through sanitized jobs, add new jobs and update the build system if needed
for job in data:
key = _unique_key(job)
if key in jp_index:
# We already know about this job, we might need to update the build system
# We're seeing the job again with another build system (e.g. buildbot vs
# taskcluster). We need to change it to '*'
if jp_index[key]['build_system_type'] != '*' and jp_index[key]['build_system_type'] != job["build_system_type"]:
db_job = JobPriority.objects.get(pk=jp_index[key]['pk'])
db_job.buildsystem = '*'
db_job.save()
logger.info('Updated %s/%s from %s to %s',
db_job.testtype, db_job.buildtype,
job['build_system_type'], db_job.buildsystem)
updated_jobs += 1
else:
# We have a new job from runnablejobs to add to our master list
try:
jobpriority = JobPriority(
testtype=str(job["testtype"]),
buildtype=str(job["platform_option"]),
platform=str(job["platform"]),
priority=priority,
expiration_date=expiration_date,
buildsystem=job["build_system_type"]
)
jobpriority.save()
logger.info('New job was found (%s,%s,%s,%s)',
job['testtype'], job['platform_option'], job['platform'],
job["build_system_type"])
new_jobs += 1
except Exception as error:
logger.warning(str(error))
failed_changes += 1
logger.info('We have %s new jobs and %s updated jobs out of %s total jobs processed.',
new_jobs, updated_jobs, total_jobs)
if failed_changes != 0:
logger.warning('We have failed %s changes out of %s total jobs processed.',
failed_changes, total_jobs)
return new_jobs, failed_changes, updated_jobs | [
"def",
"_update_table",
"(",
"data",
")",
":",
"jp_index",
",",
"priority",
",",
"expiration_date",
"=",
"_initialize_values",
"(",
")",
"total_jobs",
"=",
"len",
"(",
"data",
")",
"new_jobs",
",",
"failed_changes",
",",
"updated_jobs",
"=",
"0",
",",
"0",
... | Add new jobs to the priority table and update the build system if required.
data - it is a list of dictionaries that describe a job type
returns the number of new, failed and updated jobs | [
"Add",
"new",
"jobs",
"to",
"the",
"priority",
"table",
"and",
"update",
"the",
"build",
"system",
"if",
"required",
".",
"data",
"-",
"it",
"is",
"a",
"list",
"of",
"dictionaries",
"that",
"describe",
"a",
"job",
"type"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/update_job_priority.py#L126-L180 | train | 205,520 |
mozilla/treeherder | treeherder/seta/preseed.py | load_preseed | def load_preseed():
""" Update JobPriority information from preseed.json
The preseed data has these fields: buildtype, testtype, platform, priority, expiration_date
The expiration_date field defaults to 2 weeks when inserted in the table
The expiration_date field has the format "YYYY-MM-DD", however, it can have "*" to indicate to never expire
The default priority is 1, however, if we want to force coalescing we can do that
The fields buildtype, testtype and platform can have * which makes ut match all flavors of
the * field. For example: (linux64, pgo, *) matches all Linux 64 pgo tests
"""
if not JobPriority.objects.exists():
return
preseed = preseed_data()
for job in preseed:
queryset = JobPriority.objects.all()
for field in ('testtype', 'buildtype', 'platform'):
if job[field] != '*':
queryset = queryset.filter(**{field: job[field]})
# Deal with the case where we have a new entry in preseed
if not queryset:
create_new_entry(job)
else:
# We can have wildcards, so loop on all returned values in data
for jp in queryset:
process_job_priority(jp, job) | python | def load_preseed():
""" Update JobPriority information from preseed.json
The preseed data has these fields: buildtype, testtype, platform, priority, expiration_date
The expiration_date field defaults to 2 weeks when inserted in the table
The expiration_date field has the format "YYYY-MM-DD", however, it can have "*" to indicate to never expire
The default priority is 1, however, if we want to force coalescing we can do that
The fields buildtype, testtype and platform can have * which makes ut match all flavors of
the * field. For example: (linux64, pgo, *) matches all Linux 64 pgo tests
"""
if not JobPriority.objects.exists():
return
preseed = preseed_data()
for job in preseed:
queryset = JobPriority.objects.all()
for field in ('testtype', 'buildtype', 'platform'):
if job[field] != '*':
queryset = queryset.filter(**{field: job[field]})
# Deal with the case where we have a new entry in preseed
if not queryset:
create_new_entry(job)
else:
# We can have wildcards, so loop on all returned values in data
for jp in queryset:
process_job_priority(jp, job) | [
"def",
"load_preseed",
"(",
")",
":",
"if",
"not",
"JobPriority",
".",
"objects",
".",
"exists",
"(",
")",
":",
"return",
"preseed",
"=",
"preseed_data",
"(",
")",
"for",
"job",
"in",
"preseed",
":",
"queryset",
"=",
"JobPriority",
".",
"objects",
".",
... | Update JobPriority information from preseed.json
The preseed data has these fields: buildtype, testtype, platform, priority, expiration_date
The expiration_date field defaults to 2 weeks when inserted in the table
The expiration_date field has the format "YYYY-MM-DD", however, it can have "*" to indicate to never expire
The default priority is 1, however, if we want to force coalescing we can do that
The fields buildtype, testtype and platform can have * which makes ut match all flavors of
the * field. For example: (linux64, pgo, *) matches all Linux 64 pgo tests | [
"Update",
"JobPriority",
"information",
"from",
"preseed",
".",
"json"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/preseed.py#L13-L40 | train | 205,521 |
mozilla/treeherder | treeherder/client/thclient/perfherder.py | PerformanceTimeInterval.all_valid_time_intervals | def all_valid_time_intervals():
'''
Helper method to return all possible valid time intervals for data
stored by Perfherder
'''
return [PerformanceTimeInterval.DAY,
PerformanceTimeInterval.WEEK,
PerformanceTimeInterval.TWO_WEEKS,
PerformanceTimeInterval.SIXTY_DAYS,
PerformanceTimeInterval.NINETY_DAYS,
PerformanceTimeInterval.ONE_YEAR] | python | def all_valid_time_intervals():
'''
Helper method to return all possible valid time intervals for data
stored by Perfherder
'''
return [PerformanceTimeInterval.DAY,
PerformanceTimeInterval.WEEK,
PerformanceTimeInterval.TWO_WEEKS,
PerformanceTimeInterval.SIXTY_DAYS,
PerformanceTimeInterval.NINETY_DAYS,
PerformanceTimeInterval.ONE_YEAR] | [
"def",
"all_valid_time_intervals",
"(",
")",
":",
"return",
"[",
"PerformanceTimeInterval",
".",
"DAY",
",",
"PerformanceTimeInterval",
".",
"WEEK",
",",
"PerformanceTimeInterval",
".",
"TWO_WEEKS",
",",
"PerformanceTimeInterval",
".",
"SIXTY_DAYS",
",",
"PerformanceTim... | Helper method to return all possible valid time intervals for data
stored by Perfherder | [
"Helper",
"method",
"to",
"return",
"all",
"possible",
"valid",
"time",
"intervals",
"for",
"data",
"stored",
"by",
"Perfherder"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/perfherder.py#L16-L26 | train | 205,522 |
mozilla/treeherder | treeherder/client/thclient/perfherder.py | PerformanceSignatureCollection.get_property_names | def get_property_names(self):
'''
Returns all property names in this collection of signatures
'''
property_names = set()
for signature_value in self.values():
for property_name in signature_value.keys():
property_names.add(property_name)
return property_names | python | def get_property_names(self):
'''
Returns all property names in this collection of signatures
'''
property_names = set()
for signature_value in self.values():
for property_name in signature_value.keys():
property_names.add(property_name)
return property_names | [
"def",
"get_property_names",
"(",
"self",
")",
":",
"property_names",
"=",
"set",
"(",
")",
"for",
"signature_value",
"in",
"self",
".",
"values",
"(",
")",
":",
"for",
"property_name",
"in",
"signature_value",
".",
"keys",
"(",
")",
":",
"property_names",
... | Returns all property names in this collection of signatures | [
"Returns",
"all",
"property",
"names",
"in",
"this",
"collection",
"of",
"signatures"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/perfherder.py#L66-L74 | train | 205,523 |
mozilla/treeherder | treeherder/client/thclient/perfherder.py | PerformanceSignatureCollection.get_property_values | def get_property_values(self, property_name):
'''
Returns all property values for a particular property name in this collection
'''
property_values = set()
for signature_value in self.values():
if signature_value.get(property_name):
property_values.add(signature_value[property_name])
return property_values | python | def get_property_values(self, property_name):
'''
Returns all property values for a particular property name in this collection
'''
property_values = set()
for signature_value in self.values():
if signature_value.get(property_name):
property_values.add(signature_value[property_name])
return property_values | [
"def",
"get_property_values",
"(",
"self",
",",
"property_name",
")",
":",
"property_values",
"=",
"set",
"(",
")",
"for",
"signature_value",
"in",
"self",
".",
"values",
"(",
")",
":",
"if",
"signature_value",
".",
"get",
"(",
"property_name",
")",
":",
"... | Returns all property values for a particular property name in this collection | [
"Returns",
"all",
"property",
"values",
"for",
"a",
"particular",
"property",
"name",
"in",
"this",
"collection"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/perfherder.py#L76-L84 | train | 205,524 |
mozilla/treeherder | treeherder/client/thclient/perfherder.py | PerfherderClient.get_performance_signatures | def get_performance_signatures(self, project, **params):
'''
Gets a set of performance signatures associated with a project and time range
'''
results = self._get_json(self.PERFORMANCE_SIGNATURES_ENDPOINT, project, **params)
return PerformanceSignatureCollection(results) | python | def get_performance_signatures(self, project, **params):
'''
Gets a set of performance signatures associated with a project and time range
'''
results = self._get_json(self.PERFORMANCE_SIGNATURES_ENDPOINT, project, **params)
return PerformanceSignatureCollection(results) | [
"def",
"get_performance_signatures",
"(",
"self",
",",
"project",
",",
"*",
"*",
"params",
")",
":",
"results",
"=",
"self",
".",
"_get_json",
"(",
"self",
".",
"PERFORMANCE_SIGNATURES_ENDPOINT",
",",
"project",
",",
"*",
"*",
"params",
")",
"return",
"Perfo... | Gets a set of performance signatures associated with a project and time range | [
"Gets",
"a",
"set",
"of",
"performance",
"signatures",
"associated",
"with",
"a",
"project",
"and",
"time",
"range"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/perfherder.py#L112-L117 | train | 205,525 |
mozilla/treeherder | treeherder/client/thclient/perfherder.py | PerfherderClient.get_performance_data | def get_performance_data(self, project, **params):
'''
Gets a dictionary of PerformanceSeries objects
You can specify which signatures to get by passing signature to this function
'''
results = self._get_json(self.PERFORMANCE_DATA_ENDPOINT, project, **params)
return {k: PerformanceSeries(v) for k, v in results.items()} | python | def get_performance_data(self, project, **params):
'''
Gets a dictionary of PerformanceSeries objects
You can specify which signatures to get by passing signature to this function
'''
results = self._get_json(self.PERFORMANCE_DATA_ENDPOINT, project, **params)
return {k: PerformanceSeries(v) for k, v in results.items()} | [
"def",
"get_performance_data",
"(",
"self",
",",
"project",
",",
"*",
"*",
"params",
")",
":",
"results",
"=",
"self",
".",
"_get_json",
"(",
"self",
".",
"PERFORMANCE_DATA_ENDPOINT",
",",
"project",
",",
"*",
"*",
"params",
")",
"return",
"{",
"k",
":",... | Gets a dictionary of PerformanceSeries objects
You can specify which signatures to get by passing signature to this function | [
"Gets",
"a",
"dictionary",
"of",
"PerformanceSeries",
"objects"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/perfherder.py#L119-L127 | train | 205,526 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | get_matchers | def get_matchers():
"""
Get matcher functions from treeherder.autoclassify.matchers
We classify matchers as any function treeherder.autoclassify.matchers with
a name ending in _matcher. This is currently overkill but protects against
the unwarey engineer adding new functions to the matchers module that
shouldn't be treated as matchers.
"""
from . import matchers
def is_matcher_func(member):
return inspect.isfunction(member) and member.__name__.endswith("_matcher")
members = inspect.getmembers(matchers, is_matcher_func)
for name, func in members:
yield func | python | def get_matchers():
"""
Get matcher functions from treeherder.autoclassify.matchers
We classify matchers as any function treeherder.autoclassify.matchers with
a name ending in _matcher. This is currently overkill but protects against
the unwarey engineer adding new functions to the matchers module that
shouldn't be treated as matchers.
"""
from . import matchers
def is_matcher_func(member):
return inspect.isfunction(member) and member.__name__.endswith("_matcher")
members = inspect.getmembers(matchers, is_matcher_func)
for name, func in members:
yield func | [
"def",
"get_matchers",
"(",
")",
":",
"from",
".",
"import",
"matchers",
"def",
"is_matcher_func",
"(",
"member",
")",
":",
"return",
"inspect",
".",
"isfunction",
"(",
"member",
")",
"and",
"member",
".",
"__name__",
".",
"endswith",
"(",
"\"_matcher\"",
... | Get matcher functions from treeherder.autoclassify.matchers
We classify matchers as any function treeherder.autoclassify.matchers with
a name ending in _matcher. This is currently overkill but protects against
the unwarey engineer adding new functions to the matchers module that
shouldn't be treated as matchers. | [
"Get",
"matcher",
"functions",
"from",
"treeherder",
".",
"autoclassify",
".",
"matchers"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L20-L37 | train | 205,527 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | find_best_matches | def find_best_matches(errors, matchers):
"""
Find the best match for each error
We use the Good Enough™ ratio as a watershed level for match scores.
"""
for text_log_error in errors:
matches = find_all_matches(text_log_error, matchers) # TextLogErrorMatch instances, unsaved!
best_match = first(matches, key=lambda m: (-m.score, -m.classified_failure_id))
if not best_match:
continue
newrelic.agent.record_custom_event('highest_scored_matcher', {
'matcher': best_match.matcher_name,
'score': best_match.score,
'text_log_error': best_match.text_log_error_id,
})
yield best_match | python | def find_best_matches(errors, matchers):
"""
Find the best match for each error
We use the Good Enough™ ratio as a watershed level for match scores.
"""
for text_log_error in errors:
matches = find_all_matches(text_log_error, matchers) # TextLogErrorMatch instances, unsaved!
best_match = first(matches, key=lambda m: (-m.score, -m.classified_failure_id))
if not best_match:
continue
newrelic.agent.record_custom_event('highest_scored_matcher', {
'matcher': best_match.matcher_name,
'score': best_match.score,
'text_log_error': best_match.text_log_error_id,
})
yield best_match | [
"def",
"find_best_matches",
"(",
"errors",
",",
"matchers",
")",
":",
"for",
"text_log_error",
"in",
"errors",
":",
"matches",
"=",
"find_all_matches",
"(",
"text_log_error",
",",
"matchers",
")",
"# TextLogErrorMatch instances, unsaved!",
"best_match",
"=",
"first",
... | Find the best match for each error
We use the Good Enough™ ratio as a watershed level for match scores. | [
"Find",
"the",
"best",
"match",
"for",
"each",
"error"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L91-L110 | train | 205,528 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | find_all_matches | def find_all_matches(text_log_error, matchers):
"""
Find matches for the given error using the given matcher classes
Returns *unsaved* TextLogErrorMatch instances.
"""
for matcher_func in matchers:
matches = matcher_func(text_log_error)
# matches: iterator of (score, ClassifiedFailure.id)
if not matches:
continue
for score, classified_failure_id in matches:
yield TextLogErrorMatch(
score=score,
matcher_name=matcher_func.__name__,
classified_failure_id=classified_failure_id,
text_log_error=text_log_error,
) | python | def find_all_matches(text_log_error, matchers):
"""
Find matches for the given error using the given matcher classes
Returns *unsaved* TextLogErrorMatch instances.
"""
for matcher_func in matchers:
matches = matcher_func(text_log_error)
# matches: iterator of (score, ClassifiedFailure.id)
if not matches:
continue
for score, classified_failure_id in matches:
yield TextLogErrorMatch(
score=score,
matcher_name=matcher_func.__name__,
classified_failure_id=classified_failure_id,
text_log_error=text_log_error,
) | [
"def",
"find_all_matches",
"(",
"text_log_error",
",",
"matchers",
")",
":",
"for",
"matcher_func",
"in",
"matchers",
":",
"matches",
"=",
"matcher_func",
"(",
"text_log_error",
")",
"# matches: iterator of (score, ClassifiedFailure.id)",
"if",
"not",
"matches",
":",
... | Find matches for the given error using the given matcher classes
Returns *unsaved* TextLogErrorMatch instances. | [
"Find",
"matches",
"for",
"the",
"given",
"error",
"using",
"the",
"given",
"matcher",
"classes"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L113-L131 | train | 205,529 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | get_best_match | def get_best_match(text_log_error):
"""
Get the best TextLogErrorMatch for a given TextLogErrorMatch.
Matches are further filtered by the score cut off.
"""
score_cut_off = 0.7
return (text_log_error.matches.filter(score__gt=score_cut_off)
.order_by("-score", "-classified_failure_id")
.select_related('classified_failure')
.first()) | python | def get_best_match(text_log_error):
"""
Get the best TextLogErrorMatch for a given TextLogErrorMatch.
Matches are further filtered by the score cut off.
"""
score_cut_off = 0.7
return (text_log_error.matches.filter(score__gt=score_cut_off)
.order_by("-score", "-classified_failure_id")
.select_related('classified_failure')
.first()) | [
"def",
"get_best_match",
"(",
"text_log_error",
")",
":",
"score_cut_off",
"=",
"0.7",
"return",
"(",
"text_log_error",
".",
"matches",
".",
"filter",
"(",
"score__gt",
"=",
"score_cut_off",
")",
".",
"order_by",
"(",
"\"-score\"",
",",
"\"-classified_failure_id\"... | Get the best TextLogErrorMatch for a given TextLogErrorMatch.
Matches are further filtered by the score cut off. | [
"Get",
"the",
"best",
"TextLogErrorMatch",
"for",
"a",
"given",
"TextLogErrorMatch",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L134-L144 | train | 205,530 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | mark_best_classification | def mark_best_classification(text_log_error, classified_failure):
"""
Wrapper for setting best_classification on both TextLogError and FailureLine.
Set the given ClassifiedFailure as best_classification for the given
TextLogError. Handles the duplication of best_classification on FailureLine
so you don't have to!
"""
text_log_error.metadata.best_classification = classified_failure
text_log_error.metadata.save(update_fields=['best_classification'])
text_log_error.metadata.failure_line.elastic_search_insert() | python | def mark_best_classification(text_log_error, classified_failure):
"""
Wrapper for setting best_classification on both TextLogError and FailureLine.
Set the given ClassifiedFailure as best_classification for the given
TextLogError. Handles the duplication of best_classification on FailureLine
so you don't have to!
"""
text_log_error.metadata.best_classification = classified_failure
text_log_error.metadata.save(update_fields=['best_classification'])
text_log_error.metadata.failure_line.elastic_search_insert() | [
"def",
"mark_best_classification",
"(",
"text_log_error",
",",
"classified_failure",
")",
":",
"text_log_error",
".",
"metadata",
".",
"best_classification",
"=",
"classified_failure",
"text_log_error",
".",
"metadata",
".",
"save",
"(",
"update_fields",
"=",
"[",
"'b... | Wrapper for setting best_classification on both TextLogError and FailureLine.
Set the given ClassifiedFailure as best_classification for the given
TextLogError. Handles the duplication of best_classification on FailureLine
so you don't have to! | [
"Wrapper",
"for",
"setting",
"best_classification",
"on",
"both",
"TextLogError",
"and",
"FailureLine",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L147-L157 | train | 205,531 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | mark_best_classifications | def mark_best_classifications(errors):
"""
Convenience wrapper around mark_best_classification.
Finds the best match for each TextLogError in errors, handling no match
meeting the cut off score and then mark_best_classification to save that
information.
"""
for text_log_error in errors:
best_match = get_best_match(text_log_error)
if not best_match:
continue
mark_best_classification(text_log_error, best_match.classified_failure) | python | def mark_best_classifications(errors):
"""
Convenience wrapper around mark_best_classification.
Finds the best match for each TextLogError in errors, handling no match
meeting the cut off score and then mark_best_classification to save that
information.
"""
for text_log_error in errors:
best_match = get_best_match(text_log_error)
if not best_match:
continue
mark_best_classification(text_log_error, best_match.classified_failure) | [
"def",
"mark_best_classifications",
"(",
"errors",
")",
":",
"for",
"text_log_error",
"in",
"errors",
":",
"best_match",
"=",
"get_best_match",
"(",
"text_log_error",
")",
"if",
"not",
"best_match",
":",
"continue",
"mark_best_classification",
"(",
"text_log_error",
... | Convenience wrapper around mark_best_classification.
Finds the best match for each TextLogError in errors, handling no match
meeting the cut off score and then mark_best_classification to save that
information. | [
"Convenience",
"wrapper",
"around",
"mark_best_classification",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L160-L173 | train | 205,532 |
mozilla/treeherder | treeherder/autoclassify/autoclassify.py | update_db | def update_db(matches):
"""
Save TextLogErrorMatch instances to the DB
We loop each Match instance instead of calling bulk_create() so we can
catch any potential IntegrityErrors and continue.
"""
for match in matches:
try:
match.save()
except IntegrityError:
args = (match.text_log_error_id, match.matcher_name, match.classified_failure_id)
logger.warning(
"Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i",
args,
) | python | def update_db(matches):
"""
Save TextLogErrorMatch instances to the DB
We loop each Match instance instead of calling bulk_create() so we can
catch any potential IntegrityErrors and continue.
"""
for match in matches:
try:
match.save()
except IntegrityError:
args = (match.text_log_error_id, match.matcher_name, match.classified_failure_id)
logger.warning(
"Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i",
args,
) | [
"def",
"update_db",
"(",
"matches",
")",
":",
"for",
"match",
"in",
"matches",
":",
"try",
":",
"match",
".",
"save",
"(",
")",
"except",
"IntegrityError",
":",
"args",
"=",
"(",
"match",
".",
"text_log_error_id",
",",
"match",
".",
"matcher_name",
",",
... | Save TextLogErrorMatch instances to the DB
We loop each Match instance instead of calling bulk_create() so we can
catch any potential IntegrityErrors and continue. | [
"Save",
"TextLogErrorMatch",
"instances",
"to",
"the",
"DB"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/autoclassify.py#L176-L191 | train | 205,533 |
mozilla/treeherder | treeherder/etl/schema.py | get_json_schema | def get_json_schema(filename):
"""
Get a JSON Schema by filename.
"""
file_path = os.path.join("schemas", filename)
with open(file_path) as f:
schema = yaml.load(f)
return schema | python | def get_json_schema(filename):
"""
Get a JSON Schema by filename.
"""
file_path = os.path.join("schemas", filename)
with open(file_path) as f:
schema = yaml.load(f)
return schema | [
"def",
"get_json_schema",
"(",
"filename",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"schemas\"",
",",
"filename",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"f",
":",
"schema",
"=",
"yaml",
".",
"load",
"(",
"f",
")",... | Get a JSON Schema by filename. | [
"Get",
"a",
"JSON",
"Schema",
"by",
"filename",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/schema.py#L6-L14 | train | 205,534 |
mozilla/treeherder | treeherder/etl/artifact.py | store_job_info_artifact | def store_job_info_artifact(job, job_info_artifact):
"""
Store the contents of the job info artifact
in job details
"""
job_details = json.loads(job_info_artifact['blob'])['job_details']
for job_detail in job_details:
job_detail_dict = {
'title': job_detail.get('title'),
'value': job_detail['value'],
'url': job_detail.get('url')
}
for (k, v) in job_detail_dict.items():
max_field_length = JobDetail._meta.get_field(k).max_length
if v is not None and len(v) > max_field_length:
logger.warning("Job detail '%s' for job_guid %s too long, truncating",
v[:max_field_length], job.guid)
job_detail_dict[k] = v[:max_field_length]
# move the url field to be updated in defaults now that it's
# had its size trimmed, if necessary
job_detail_dict['defaults'] = {'url': job_detail_dict['url']}
del job_detail_dict['url']
JobDetail.objects.update_or_create(
job=job,
**job_detail_dict) | python | def store_job_info_artifact(job, job_info_artifact):
"""
Store the contents of the job info artifact
in job details
"""
job_details = json.loads(job_info_artifact['blob'])['job_details']
for job_detail in job_details:
job_detail_dict = {
'title': job_detail.get('title'),
'value': job_detail['value'],
'url': job_detail.get('url')
}
for (k, v) in job_detail_dict.items():
max_field_length = JobDetail._meta.get_field(k).max_length
if v is not None and len(v) > max_field_length:
logger.warning("Job detail '%s' for job_guid %s too long, truncating",
v[:max_field_length], job.guid)
job_detail_dict[k] = v[:max_field_length]
# move the url field to be updated in defaults now that it's
# had its size trimmed, if necessary
job_detail_dict['defaults'] = {'url': job_detail_dict['url']}
del job_detail_dict['url']
JobDetail.objects.update_or_create(
job=job,
**job_detail_dict) | [
"def",
"store_job_info_artifact",
"(",
"job",
",",
"job_info_artifact",
")",
":",
"job_details",
"=",
"json",
".",
"loads",
"(",
"job_info_artifact",
"[",
"'blob'",
"]",
")",
"[",
"'job_details'",
"]",
"for",
"job_detail",
"in",
"job_details",
":",
"job_detail_d... | Store the contents of the job info artifact
in job details | [
"Store",
"the",
"contents",
"of",
"the",
"job",
"info",
"artifact",
"in",
"job",
"details"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/artifact.py#L19-L45 | train | 205,535 |
mozilla/treeherder | treeherder/etl/artifact.py | store_text_log_summary_artifact | def store_text_log_summary_artifact(job, text_log_summary_artifact):
"""
Store the contents of the text log summary artifact
"""
step_data = json.loads(
text_log_summary_artifact['blob'])['step_data']
result_map = {v: k for (k, v) in TextLogStep.RESULTS}
with transaction.atomic():
for step in step_data['steps']:
name = step['name'][:TextLogStep._meta.get_field('name').max_length]
# process start/end times if we have them
# we currently don't support timezones in treeherder, so
# just ignore that when importing/updating the bug to avoid
# a ValueError (though by default the text log summaries
# we produce should have time expressed in UTC anyway)
time_kwargs = {}
for tkey in ('started', 'finished'):
if step.get(tkey):
time_kwargs[tkey] = dateutil.parser.parse(
step[tkey], ignoretz=True)
log_step = TextLogStep.objects.create(
job=job,
started_line_number=step['started_linenumber'],
finished_line_number=step['finished_linenumber'],
name=name,
result=result_map[step['result']],
**time_kwargs)
if step.get('errors'):
for error in step['errors']:
TextLogError.objects.create(
step=log_step,
line_number=error['linenumber'],
line=astral_filter(error['line']))
# get error summary immediately (to warm the cache)
error_summary.get_error_summary(job) | python | def store_text_log_summary_artifact(job, text_log_summary_artifact):
"""
Store the contents of the text log summary artifact
"""
step_data = json.loads(
text_log_summary_artifact['blob'])['step_data']
result_map = {v: k for (k, v) in TextLogStep.RESULTS}
with transaction.atomic():
for step in step_data['steps']:
name = step['name'][:TextLogStep._meta.get_field('name').max_length]
# process start/end times if we have them
# we currently don't support timezones in treeherder, so
# just ignore that when importing/updating the bug to avoid
# a ValueError (though by default the text log summaries
# we produce should have time expressed in UTC anyway)
time_kwargs = {}
for tkey in ('started', 'finished'):
if step.get(tkey):
time_kwargs[tkey] = dateutil.parser.parse(
step[tkey], ignoretz=True)
log_step = TextLogStep.objects.create(
job=job,
started_line_number=step['started_linenumber'],
finished_line_number=step['finished_linenumber'],
name=name,
result=result_map[step['result']],
**time_kwargs)
if step.get('errors'):
for error in step['errors']:
TextLogError.objects.create(
step=log_step,
line_number=error['linenumber'],
line=astral_filter(error['line']))
# get error summary immediately (to warm the cache)
error_summary.get_error_summary(job) | [
"def",
"store_text_log_summary_artifact",
"(",
"job",
",",
"text_log_summary_artifact",
")",
":",
"step_data",
"=",
"json",
".",
"loads",
"(",
"text_log_summary_artifact",
"[",
"'blob'",
"]",
")",
"[",
"'step_data'",
"]",
"result_map",
"=",
"{",
"v",
":",
"k",
... | Store the contents of the text log summary artifact | [
"Store",
"the",
"contents",
"of",
"the",
"text",
"log",
"summary",
"artifact"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/artifact.py#L48-L85 | train | 205,536 |
mozilla/treeherder | treeherder/etl/artifact.py | serialize_artifact_json_blobs | def serialize_artifact_json_blobs(artifacts):
"""
Ensure that JSON artifact blobs passed as dicts are converted to JSON
"""
for artifact in artifacts:
blob = artifact['blob']
if (artifact['type'].lower() == 'json' and
not isinstance(blob, str)):
artifact['blob'] = json.dumps(blob)
return artifacts | python | def serialize_artifact_json_blobs(artifacts):
"""
Ensure that JSON artifact blobs passed as dicts are converted to JSON
"""
for artifact in artifacts:
blob = artifact['blob']
if (artifact['type'].lower() == 'json' and
not isinstance(blob, str)):
artifact['blob'] = json.dumps(blob)
return artifacts | [
"def",
"serialize_artifact_json_blobs",
"(",
"artifacts",
")",
":",
"for",
"artifact",
"in",
"artifacts",
":",
"blob",
"=",
"artifact",
"[",
"'blob'",
"]",
"if",
"(",
"artifact",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"==",
"'json'",
"and",
"not",
"... | Ensure that JSON artifact blobs passed as dicts are converted to JSON | [
"Ensure",
"that",
"JSON",
"artifact",
"blobs",
"passed",
"as",
"dicts",
"are",
"converted",
"to",
"JSON"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/artifact.py#L140-L150 | train | 205,537 |
mozilla/treeherder | treeherder/client/thclient/client.py | TreeherderClient.get_option_collection_hash | def get_option_collection_hash(self):
"""
Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
}
"""
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret | python | def get_option_collection_hash(self):
"""
Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
}
"""
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret | [
"def",
"get_option_collection_hash",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"_get_json",
"(",
"self",
".",
"OPTION_COLLECTION_HASH_ENDPOINT",
")",
"ret",
"=",
"{",
"}",
"for",
"result",
"in",
"resp",
":",
"ret",
"[",
"result",
"[",
"'option_collect... | Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
} | [
"Gets",
"option",
"collection",
"hash",
"a",
"mapping",
"of",
"hash",
"values",
"to",
"build",
"properties"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/client.py#L85-L102 | train | 205,538 |
mozilla/treeherder | treeherder/client/thclient/client.py | TreeherderClient.get_pushes | def get_pushes(self, project, **params):
"""
Gets pushes from project, filtered by parameters
By default this method will just return the latest 10 pushes (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.PUSH_ENDPOINT, project, **params) | python | def get_pushes(self, project, **params):
"""
Gets pushes from project, filtered by parameters
By default this method will just return the latest 10 pushes (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.PUSH_ENDPOINT, project, **params) | [
"def",
"get_pushes",
"(",
"self",
",",
"project",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"_get_json_list",
"(",
"self",
".",
"PUSH_ENDPOINT",
",",
"project",
",",
"*",
"*",
"params",
")"
] | Gets pushes from project, filtered by parameters
By default this method will just return the latest 10 pushes (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results | [
"Gets",
"pushes",
"from",
"project",
"filtered",
"by",
"parameters"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/client.py#L131-L140 | train | 205,539 |
mozilla/treeherder | treeherder/client/thclient/client.py | TreeherderClient.get_jobs | def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, project, **params) | python | def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, project, **params) | [
"def",
"get_jobs",
"(",
"self",
",",
"project",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"_get_json_list",
"(",
"self",
".",
"JOBS_ENDPOINT",
",",
"project",
",",
"*",
"*",
"params",
")"
] | Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results | [
"Gets",
"jobs",
"from",
"project",
"filtered",
"by",
"parameters"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/client.py#L142-L149 | train | 205,540 |
mozilla/treeherder | treeherder/client/thclient/client.py | TreeherderClient.get_job_log_url | def get_job_log_url(self, project, **params):
"""
Gets job log url, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json(self.JOB_LOG_URL_ENDPOINT, project,
**params) | python | def get_job_log_url(self, project, **params):
"""
Gets job log url, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json(self.JOB_LOG_URL_ENDPOINT, project,
**params) | [
"def",
"get_job_log_url",
"(",
"self",
",",
"project",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"_get_json",
"(",
"self",
".",
"JOB_LOG_URL_ENDPOINT",
",",
"project",
",",
"*",
"*",
"params",
")"
] | Gets job log url, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results | [
"Gets",
"job",
"log",
"url",
"filtered",
"by",
"parameters"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/client/thclient/client.py#L164-L172 | train | 205,541 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | all_documents | def all_documents(index=INDEX_NAME):
"""
Get all documents from the given index.
Returns full Elasticsearch objects so you can get metadata too.
"""
query = {
'query': {
'match_all': {}
}
}
for result in raw_query(query, index=index):
yield result | python | def all_documents(index=INDEX_NAME):
"""
Get all documents from the given index.
Returns full Elasticsearch objects so you can get metadata too.
"""
query = {
'query': {
'match_all': {}
}
}
for result in raw_query(query, index=index):
yield result | [
"def",
"all_documents",
"(",
"index",
"=",
"INDEX_NAME",
")",
":",
"query",
"=",
"{",
"'query'",
":",
"{",
"'match_all'",
":",
"{",
"}",
"}",
"}",
"for",
"result",
"in",
"raw_query",
"(",
"query",
",",
"index",
"=",
"index",
")",
":",
"yield",
"resul... | Get all documents from the given index.
Returns full Elasticsearch objects so you can get metadata too. | [
"Get",
"all",
"documents",
"from",
"the",
"given",
"index",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L18-L30 | train | 205,542 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | bulk | def bulk(iterable, index=INDEX_NAME, doc_type=DOC_TYPE, action='index'):
"""
Wrapper of elasticsearch's bulk method
Converts an interable of models to document operations and submits them to
Elasticsearch. Returns a count of operations when done.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
actions = compact(dict_to_op(
to_dict(model),
index_name=INDEX_NAME,
doc_type=DOC_TYPE,
op_type=action,
) for model in iterable)
# fail fast if there are no actions
if not actions:
return 0
items, _ = es_bulk(es_conn, actions, doc_type=doc_type, index=index)
return items | python | def bulk(iterable, index=INDEX_NAME, doc_type=DOC_TYPE, action='index'):
"""
Wrapper of elasticsearch's bulk method
Converts an interable of models to document operations and submits them to
Elasticsearch. Returns a count of operations when done.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
"""
actions = compact(dict_to_op(
to_dict(model),
index_name=INDEX_NAME,
doc_type=DOC_TYPE,
op_type=action,
) for model in iterable)
# fail fast if there are no actions
if not actions:
return 0
items, _ = es_bulk(es_conn, actions, doc_type=doc_type, index=index)
return items | [
"def",
"bulk",
"(",
"iterable",
",",
"index",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
",",
"action",
"=",
"'index'",
")",
":",
"actions",
"=",
"compact",
"(",
"dict_to_op",
"(",
"to_dict",
"(",
"model",
")",
",",
"index_name",
"=",
"INDEX_NA... | Wrapper of elasticsearch's bulk method
Converts an interable of models to document operations and submits them to
Elasticsearch. Returns a count of operations when done.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.bulk
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html | [
"Wrapper",
"of",
"elasticsearch",
"s",
"bulk",
"method"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L33-L56 | train | 205,543 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | count_index | def count_index(index=INDEX_NAME):
"""
Return a document count for the given index.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.count
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html
"""
refresh_index() # Refresh the index so we can get a correct count
query = {
'query': {
'match_all': {}
}
}
result = es_conn.count(index=index, doc_type=DOC_TYPE, body=query)
return result['count'] | python | def count_index(index=INDEX_NAME):
"""
Return a document count for the given index.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.count
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html
"""
refresh_index() # Refresh the index so we can get a correct count
query = {
'query': {
'match_all': {}
}
}
result = es_conn.count(index=index, doc_type=DOC_TYPE, body=query)
return result['count'] | [
"def",
"count_index",
"(",
"index",
"=",
"INDEX_NAME",
")",
":",
"refresh_index",
"(",
")",
"# Refresh the index so we can get a correct count",
"query",
"=",
"{",
"'query'",
":",
"{",
"'match_all'",
":",
"{",
"}",
"}",
"}",
"result",
"=",
"es_conn",
".",
"cou... | Return a document count for the given index.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.count
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html | [
"Return",
"a",
"document",
"count",
"for",
"the",
"given",
"index",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L59-L74 | train | 205,544 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | get_document | def get_document(id, index=INDEX_NAME, doc_type=DOC_TYPE, **kwargs):
"""
Thin wrapper to get a single document by ID.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
"""
result = es_conn.get(index=index, doc_type=doc_type, id=id, **kwargs)
return result['_source'] | python | def get_document(id, index=INDEX_NAME, doc_type=DOC_TYPE, **kwargs):
"""
Thin wrapper to get a single document by ID.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
"""
result = es_conn.get(index=index, doc_type=doc_type, id=id, **kwargs)
return result['_source'] | [
"def",
"get_document",
"(",
"id",
",",
"index",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"es_conn",
".",
"get",
"(",
"index",
"=",
"index",
",",
"doc_type",
"=",
"doc_type",
",",
"id",
"="... | Thin wrapper to get a single document by ID.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html | [
"Thin",
"wrapper",
"to",
"get",
"a",
"single",
"document",
"by",
"ID",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L77-L85 | train | 205,545 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | index | def index(obj, index=INDEX_NAME, doc_type=DOC_TYPE):
"""
Index the given document.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.index
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
"""
doc = to_dict(obj)
if doc is None:
return
id = doc.pop('id')
return es_conn.index(index, doc_type, doc, id=id) | python | def index(obj, index=INDEX_NAME, doc_type=DOC_TYPE):
"""
Index the given document.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.index
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
"""
doc = to_dict(obj)
if doc is None:
return
id = doc.pop('id')
return es_conn.index(index, doc_type, doc, id=id) | [
"def",
"index",
"(",
"obj",
",",
"index",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
")",
":",
"doc",
"=",
"to_dict",
"(",
"obj",
")",
"if",
"doc",
"is",
"None",
":",
"return",
"id",
"=",
"doc",
".",
"pop",
"(",
"'id'",
")",
"return",
"... | Index the given document.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.index
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html | [
"Index",
"the",
"given",
"document",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L88-L102 | train | 205,546 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | raw_query | def raw_query(query, index=INDEX_NAME, doc_type=DOC_TYPE):
"""
Thin wrapper of the search function to provide useful defaults
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
"""
result = es_conn.search(index=index, doc_type=DOC_TYPE, body=query)
return result['hits']['hits'] | python | def raw_query(query, index=INDEX_NAME, doc_type=DOC_TYPE):
"""
Thin wrapper of the search function to provide useful defaults
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
"""
result = es_conn.search(index=index, doc_type=DOC_TYPE, body=query)
return result['hits']['hits'] | [
"def",
"raw_query",
"(",
"query",
",",
"index",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
")",
":",
"result",
"=",
"es_conn",
".",
"search",
"(",
"index",
"=",
"index",
",",
"doc_type",
"=",
"DOC_TYPE",
",",
"body",
"=",
"query",
")",
"retur... | Thin wrapper of the search function to provide useful defaults
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html | [
"Thin",
"wrapper",
"of",
"the",
"search",
"function",
"to",
"provide",
"useful",
"defaults"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L105-L113 | train | 205,547 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | reinit_index | def reinit_index(index=INDEX_NAME):
"""
Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
"""
es_conn.indices.delete(index, ignore=404)
try:
es_conn.indices.create(index, INDEX_SETTINGS.get(index, None))
except TransportError as e:
raise Exception('Failed to created index, got: {}'.format(e.error)) | python | def reinit_index(index=INDEX_NAME):
"""
Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
"""
es_conn.indices.delete(index, ignore=404)
try:
es_conn.indices.create(index, INDEX_SETTINGS.get(index, None))
except TransportError as e:
raise Exception('Failed to created index, got: {}'.format(e.error)) | [
"def",
"reinit_index",
"(",
"index",
"=",
"INDEX_NAME",
")",
":",
"es_conn",
".",
"indices",
".",
"delete",
"(",
"index",
",",
"ignore",
"=",
"404",
")",
"try",
":",
"es_conn",
".",
"indices",
".",
"create",
"(",
"index",
",",
"INDEX_SETTINGS",
".",
"g... | Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html | [
"Delete",
"and",
"then",
"initialise",
"the",
"given",
"index",
"name"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L127-L144 | train | 205,548 |
mozilla/treeherder | treeherder/services/elasticsearch/helpers.py | search | def search(query, index=INDEX_NAME, doc_type=DOC_TYPE):
"""
Thin wrapper of the main query function to provide just the resulting objects
"""
results = raw_query(query, index=index, doc_type=doc_type)
return [r['_source'] for r in results] | python | def search(query, index=INDEX_NAME, doc_type=DOC_TYPE):
"""
Thin wrapper of the main query function to provide just the resulting objects
"""
results = raw_query(query, index=index, doc_type=doc_type)
return [r['_source'] for r in results] | [
"def",
"search",
"(",
"query",
",",
"index",
"=",
"INDEX_NAME",
",",
"doc_type",
"=",
"DOC_TYPE",
")",
":",
"results",
"=",
"raw_query",
"(",
"query",
",",
"index",
"=",
"index",
",",
"doc_type",
"=",
"doc_type",
")",
"return",
"[",
"r",
"[",
"'_source... | Thin wrapper of the main query function to provide just the resulting objects | [
"Thin",
"wrapper",
"of",
"the",
"main",
"query",
"function",
"to",
"provide",
"just",
"the",
"resulting",
"objects"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/services/elasticsearch/helpers.py#L147-L152 | train | 205,549 |
mozilla/treeherder | treeherder/seta/models.py | JobPriorityManager.clear_expiration_field_for_expired_jobs | def clear_expiration_field_for_expired_jobs(self):
'''Set the expiration date of every job that has expired.'''
# Only select rows where there is an expiration date set
for job in JobPriority.objects.filter(expiration_date__isnull=False):
if job.has_expired():
job.expiration_date = None
job.save() | python | def clear_expiration_field_for_expired_jobs(self):
'''Set the expiration date of every job that has expired.'''
# Only select rows where there is an expiration date set
for job in JobPriority.objects.filter(expiration_date__isnull=False):
if job.has_expired():
job.expiration_date = None
job.save() | [
"def",
"clear_expiration_field_for_expired_jobs",
"(",
"self",
")",
":",
"# Only select rows where there is an expiration date set",
"for",
"job",
"in",
"JobPriority",
".",
"objects",
".",
"filter",
"(",
"expiration_date__isnull",
"=",
"False",
")",
":",
"if",
"job",
".... | Set the expiration date of every job that has expired. | [
"Set",
"the",
"expiration",
"date",
"of",
"every",
"job",
"that",
"has",
"expired",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/models.py#L13-L19 | train | 205,550 |
mozilla/treeherder | treeherder/seta/models.py | JobPriorityManager.adjust_jobs_priority | def adjust_jobs_priority(self, high_value_jobs, priority=1):
"""For every job priority determine if we need to increase or decrease the job priority
Currently, high value jobs have a priority of 1 and a timeout of 0.
"""
# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100
# for jobs update via load_preseed) are updated
for jp in JobPriority.objects.filter(expiration_date__isnull=True):
if jp.unique_identifier() not in high_value_jobs:
if jp.priority != SETA_LOW_VALUE_PRIORITY:
logger.warning('Decreasing priority of %s', jp.unique_identifier())
jp.priority = SETA_LOW_VALUE_PRIORITY
jp.save(update_fields=['priority'])
elif jp.priority != priority:
logger.warning('Increasing priority of %s', jp.unique_identifier())
jp.priority = priority
jp.save(update_fields=['priority']) | python | def adjust_jobs_priority(self, high_value_jobs, priority=1):
"""For every job priority determine if we need to increase or decrease the job priority
Currently, high value jobs have a priority of 1 and a timeout of 0.
"""
# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100
# for jobs update via load_preseed) are updated
for jp in JobPriority.objects.filter(expiration_date__isnull=True):
if jp.unique_identifier() not in high_value_jobs:
if jp.priority != SETA_LOW_VALUE_PRIORITY:
logger.warning('Decreasing priority of %s', jp.unique_identifier())
jp.priority = SETA_LOW_VALUE_PRIORITY
jp.save(update_fields=['priority'])
elif jp.priority != priority:
logger.warning('Increasing priority of %s', jp.unique_identifier())
jp.priority = priority
jp.save(update_fields=['priority']) | [
"def",
"adjust_jobs_priority",
"(",
"self",
",",
"high_value_jobs",
",",
"priority",
"=",
"1",
")",
":",
"# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100",
"# for jobs update via load_preseed) are updated",
"for",
"jp",
"in",
"JobPriority... | For every job priority determine if we need to increase or decrease the job priority
Currently, high value jobs have a priority of 1 and a timeout of 0. | [
"For",
"every",
"job",
"priority",
"determine",
"if",
"we",
"need",
"to",
"increase",
"or",
"decrease",
"the",
"job",
"priority"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/models.py#L21-L37 | train | 205,551 |
mozilla/treeherder | treeherder/webapp/api/jobs.py | JobsViewSet._get_job_list_response | def _get_job_list_response(self, job_qs, offset, count, return_type):
'''
custom method to serialize + format jobs information
It's worth doing this big ugly thing (as opposed to using
the django rest framework serializer or whatever) as
this function is often in the critical path
'''
option_collection_map = OptionCollection.objects.get_option_collection_map()
results = []
for values in job_qs[offset:(offset+count)].values_list(
*[pq[1] for pq in self._property_query_mapping]):
platform_option = option_collection_map.get(
values[self._option_collection_hash_idx],
"")
# some values need to be transformed
values = list(values)
for (i, _) in enumerate(values):
func = self._property_query_mapping[i][2]
if func:
values[i] = func(values[i])
# append results differently depending on if we are returning
# a dictionary or a list
if return_type == 'dict':
results.append(dict(zip(
[pq[0] for pq in self._property_query_mapping] +
['platform_option'],
values + [platform_option])))
else:
results.append(values + [platform_option])
response_dict = {
'results': results
}
if return_type == 'list':
response_dict.update({
'job_property_names': [pq[0] for pq in self._property_query_mapping] + ['platform_option']
})
return response_dict | python | def _get_job_list_response(self, job_qs, offset, count, return_type):
'''
custom method to serialize + format jobs information
It's worth doing this big ugly thing (as opposed to using
the django rest framework serializer or whatever) as
this function is often in the critical path
'''
option_collection_map = OptionCollection.objects.get_option_collection_map()
results = []
for values in job_qs[offset:(offset+count)].values_list(
*[pq[1] for pq in self._property_query_mapping]):
platform_option = option_collection_map.get(
values[self._option_collection_hash_idx],
"")
# some values need to be transformed
values = list(values)
for (i, _) in enumerate(values):
func = self._property_query_mapping[i][2]
if func:
values[i] = func(values[i])
# append results differently depending on if we are returning
# a dictionary or a list
if return_type == 'dict':
results.append(dict(zip(
[pq[0] for pq in self._property_query_mapping] +
['platform_option'],
values + [platform_option])))
else:
results.append(values + [platform_option])
response_dict = {
'results': results
}
if return_type == 'list':
response_dict.update({
'job_property_names': [pq[0] for pq in self._property_query_mapping] + ['platform_option']
})
return response_dict | [
"def",
"_get_job_list_response",
"(",
"self",
",",
"job_qs",
",",
"offset",
",",
"count",
",",
"return_type",
")",
":",
"option_collection_map",
"=",
"OptionCollection",
".",
"objects",
".",
"get_option_collection_map",
"(",
")",
"results",
"=",
"[",
"]",
"for",... | custom method to serialize + format jobs information
It's worth doing this big ugly thing (as opposed to using
the django rest framework serializer or whatever) as
this function is often in the critical path | [
"custom",
"method",
"to",
"serialize",
"+",
"format",
"jobs",
"information"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/jobs.py#L156-L195 | train | 205,552 |
mozilla/treeherder | treeherder/webapp/api/jobs.py | JobsViewSet.retrieve | def retrieve(self, request, project, pk=None):
"""
GET method implementation for detail view
Return a single job with log_references and
artifact names and links to the artifact blobs.
"""
try:
job = Job.objects.select_related(
*self._default_select_related + ['taskcluster_metadata']).get(
repository__name=project, id=pk)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
resp = serializers.JobSerializer(job, read_only=True).data
resp["resource_uri"] = reverse("jobs-detail",
kwargs={"project": project, "pk": pk})
resp["logs"] = []
for (name, url) in JobLog.objects.filter(job=job).values_list(
'name', 'url'):
resp["logs"].append({'name': name, 'url': url})
platform_option = job.get_platform_option()
if platform_option:
resp["platform_option"] = platform_option
try:
resp['taskcluster_metadata'] = {
'task_id': job.taskcluster_metadata.task_id,
'retry_id': job.taskcluster_metadata.retry_id
}
except ObjectDoesNotExist:
pass
status_map = {k: v for k, v in Job.AUTOCLASSIFY_STATUSES}
resp["autoclassify_status"] = status_map[job.autoclassify_status]
return Response(resp) | python | def retrieve(self, request, project, pk=None):
"""
GET method implementation for detail view
Return a single job with log_references and
artifact names and links to the artifact blobs.
"""
try:
job = Job.objects.select_related(
*self._default_select_related + ['taskcluster_metadata']).get(
repository__name=project, id=pk)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
resp = serializers.JobSerializer(job, read_only=True).data
resp["resource_uri"] = reverse("jobs-detail",
kwargs={"project": project, "pk": pk})
resp["logs"] = []
for (name, url) in JobLog.objects.filter(job=job).values_list(
'name', 'url'):
resp["logs"].append({'name': name, 'url': url})
platform_option = job.get_platform_option()
if platform_option:
resp["platform_option"] = platform_option
try:
resp['taskcluster_metadata'] = {
'task_id': job.taskcluster_metadata.task_id,
'retry_id': job.taskcluster_metadata.retry_id
}
except ObjectDoesNotExist:
pass
status_map = {k: v for k, v in Job.AUTOCLASSIFY_STATUSES}
resp["autoclassify_status"] = status_map[job.autoclassify_status]
return Response(resp) | [
"def",
"retrieve",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"try",
":",
"job",
"=",
"Job",
".",
"objects",
".",
"select_related",
"(",
"*",
"self",
".",
"_default_select_related",
"+",
"[",
"'taskcluster_metadata'",
... | GET method implementation for detail view
Return a single job with log_references and
artifact names and links to the artifact blobs. | [
"GET",
"method",
"implementation",
"for",
"detail",
"view"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/jobs.py#L197-L235 | train | 205,553 |
mozilla/treeherder | treeherder/webapp/api/jobs.py | JobsViewSet.bug_suggestions | def bug_suggestions(self, request, project, pk=None):
"""
Gets a set of bug suggestions for this job
"""
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(get_error_summary(job)) | python | def bug_suggestions(self, request, project, pk=None):
"""
Gets a set of bug suggestions for this job
"""
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
return Response(get_error_summary(job)) | [
"def",
"bug_suggestions",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"try",
":",
"job",
"=",
"Job",
".",
"objects",
".",
"get",
"(",
"repository__name",
"=",
"project",
",",
"id",
"=",
"pk",
")",
"except",
"ObjectD... | Gets a set of bug suggestions for this job | [
"Gets",
"a",
"set",
"of",
"bug",
"suggestions",
"for",
"this",
"job"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/jobs.py#L344-L353 | train | 205,554 |
mozilla/treeherder | treeherder/webapp/api/jobs.py | JobsViewSet.similar_jobs | def similar_jobs(self, request, project, pk=None):
"""
Get a list of jobs similar to the one selected.
"""
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response({
"detail": "No project with name {}".format(project)
}, status=HTTP_404_NOT_FOUND)
try:
job = Job.objects.get(repository=repository, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
filter_params = request.query_params.copy()
try:
offset = int(filter_params.get("offset", 0))
# we don't need a big page size on this endoint,
# let's cap it to 50 elements
count = int(filter_params.get("count", 50))
except ValueError:
return Response("Invalid value for offset or count",
status=HTTP_400_BAD_REQUEST)
return_type = filter_params.get("return_type", "dict").lower()
jobs = JobFilter({k: v for (k, v) in filter_params.items()},
queryset=Job.objects.filter(
job_type_id=job.job_type_id,
repository=repository).exclude(
id=job.id).select_related(
*self._default_select_related)).qs
# similar jobs we want in descending order from most recent
jobs = jobs.order_by('-start_time')
response_body = self._get_job_list_response(jobs, offset, count,
return_type)
response_body["meta"] = dict(offset=offset, count=count,
repository=project)
return Response(response_body) | python | def similar_jobs(self, request, project, pk=None):
"""
Get a list of jobs similar to the one selected.
"""
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response({
"detail": "No project with name {}".format(project)
}, status=HTTP_404_NOT_FOUND)
try:
job = Job.objects.get(repository=repository, id=pk)
except ObjectDoesNotExist:
return Response("No job with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
filter_params = request.query_params.copy()
try:
offset = int(filter_params.get("offset", 0))
# we don't need a big page size on this endoint,
# let's cap it to 50 elements
count = int(filter_params.get("count", 50))
except ValueError:
return Response("Invalid value for offset or count",
status=HTTP_400_BAD_REQUEST)
return_type = filter_params.get("return_type", "dict").lower()
jobs = JobFilter({k: v for (k, v) in filter_params.items()},
queryset=Job.objects.filter(
job_type_id=job.job_type_id,
repository=repository).exclude(
id=job.id).select_related(
*self._default_select_related)).qs
# similar jobs we want in descending order from most recent
jobs = jobs.order_by('-start_time')
response_body = self._get_job_list_response(jobs, offset, count,
return_type)
response_body["meta"] = dict(offset=offset, count=count,
repository=project)
return Response(response_body) | [
"def",
"similar_jobs",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"try",
":",
"repository",
"=",
"Repository",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"project",
")",
"except",
"Repository",
".",
"DoesNotExist",
... | Get a list of jobs similar to the one selected. | [
"Get",
"a",
"list",
"of",
"jobs",
"similar",
"to",
"the",
"one",
"selected",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/jobs.py#L356-L401 | train | 205,555 |
mozilla/treeherder | treeherder/log_parser/crossreference.py | structured_iterator | def structured_iterator(failure_lines):
"""Create FailureLine, Tbpl-formatted-string tuples."""
summary = partial(failure_line_summary, TbplFormatter())
for failure_line in failure_lines:
repr_str = summary(failure_line)
if repr_str:
yield failure_line, repr_str
while True:
yield None, None | python | def structured_iterator(failure_lines):
"""Create FailureLine, Tbpl-formatted-string tuples."""
summary = partial(failure_line_summary, TbplFormatter())
for failure_line in failure_lines:
repr_str = summary(failure_line)
if repr_str:
yield failure_line, repr_str
while True:
yield None, None | [
"def",
"structured_iterator",
"(",
"failure_lines",
")",
":",
"summary",
"=",
"partial",
"(",
"failure_line_summary",
",",
"TbplFormatter",
"(",
")",
")",
"for",
"failure_line",
"in",
"failure_lines",
":",
"repr_str",
"=",
"summary",
"(",
"failure_line",
")",
"i... | Create FailureLine, Tbpl-formatted-string tuples. | [
"Create",
"FailureLine",
"Tbpl",
"-",
"formatted",
"-",
"string",
"tuples",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/crossreference.py#L76-L85 | train | 205,556 |
mozilla/treeherder | treeherder/log_parser/crossreference.py | failure_line_summary | def failure_line_summary(formatter, failure_line):
"""
Create a mozlog formatted error summary string from the given failure_line.
Create a string which can be compared to a TextLogError.line string to see
if they match.
"""
if failure_line.action == "test_result":
action = "test_status" if failure_line.subtest is not None else "test_end"
elif failure_line.action == "truncated":
return
else:
action = failure_line.action
try:
mozlog_func = getattr(formatter, action)
except AttributeError:
logger.warning('Unknown mozlog function "%s"', action)
return
formatted_log = mozlog_func(failure_line.to_mozlog_format())
split_log = first(formatted_log.split("\n", 1))
if not split_log:
logger.debug('Failed to split log', formatted_log)
return
return split_log.strip() | python | def failure_line_summary(formatter, failure_line):
"""
Create a mozlog formatted error summary string from the given failure_line.
Create a string which can be compared to a TextLogError.line string to see
if they match.
"""
if failure_line.action == "test_result":
action = "test_status" if failure_line.subtest is not None else "test_end"
elif failure_line.action == "truncated":
return
else:
action = failure_line.action
try:
mozlog_func = getattr(formatter, action)
except AttributeError:
logger.warning('Unknown mozlog function "%s"', action)
return
formatted_log = mozlog_func(failure_line.to_mozlog_format())
split_log = first(formatted_log.split("\n", 1))
if not split_log:
logger.debug('Failed to split log', formatted_log)
return
return split_log.strip() | [
"def",
"failure_line_summary",
"(",
"formatter",
",",
"failure_line",
")",
":",
"if",
"failure_line",
".",
"action",
"==",
"\"test_result\"",
":",
"action",
"=",
"\"test_status\"",
"if",
"failure_line",
".",
"subtest",
"is",
"not",
"None",
"else",
"\"test_end\"",
... | Create a mozlog formatted error summary string from the given failure_line.
Create a string which can be compared to a TextLogError.line string to see
if they match. | [
"Create",
"a",
"mozlog",
"formatted",
"error",
"summary",
"string",
"from",
"the",
"given",
"failure_line",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/crossreference.py#L88-L115 | train | 205,557 |
mozilla/treeherder | treeherder/config/utils.py | get_tls_redis_url | def get_tls_redis_url(redis_url):
"""
Returns the TLS version of a Heroku REDIS_URL string.
Whilst Redis server (like memcached) doesn't natively support TLS, Heroku runs an stunnel
daemon on their Redis instances, which can be connected to directly by Redis clients that
support TLS (avoiding the need for stunnel on the client). The stunnel port is one higher
than the Redis server port, and the informal `rediss://` scheme used to instruct clients
to wrap the connection with TLS.
Will convert 'redis://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8409'
...to: 'rediss://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8410?ssl_cert_reqs=none'
See:
https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel
"""
url = furl(redis_url)
url.port += 1
url.scheme += 's'
# Disable TLS certificate validation (restoring the behaviour of the older redis-py 2.x),
# since for now Heroku Redis uses self-signed certificates:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1510000
url.args['ssl_cert_reqs'] = 'none'
return str(url) | python | def get_tls_redis_url(redis_url):
"""
Returns the TLS version of a Heroku REDIS_URL string.
Whilst Redis server (like memcached) doesn't natively support TLS, Heroku runs an stunnel
daemon on their Redis instances, which can be connected to directly by Redis clients that
support TLS (avoiding the need for stunnel on the client). The stunnel port is one higher
than the Redis server port, and the informal `rediss://` scheme used to instruct clients
to wrap the connection with TLS.
Will convert 'redis://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8409'
...to: 'rediss://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8410?ssl_cert_reqs=none'
See:
https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel
"""
url = furl(redis_url)
url.port += 1
url.scheme += 's'
# Disable TLS certificate validation (restoring the behaviour of the older redis-py 2.x),
# since for now Heroku Redis uses self-signed certificates:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1510000
url.args['ssl_cert_reqs'] = 'none'
return str(url) | [
"def",
"get_tls_redis_url",
"(",
"redis_url",
")",
":",
"url",
"=",
"furl",
"(",
"redis_url",
")",
"url",
".",
"port",
"+=",
"1",
"url",
".",
"scheme",
"+=",
"'s'",
"# Disable TLS certificate validation (restoring the behaviour of the older redis-py 2.x),",
"# since for... | Returns the TLS version of a Heroku REDIS_URL string.
Whilst Redis server (like memcached) doesn't natively support TLS, Heroku runs an stunnel
daemon on their Redis instances, which can be connected to directly by Redis clients that
support TLS (avoiding the need for stunnel on the client). The stunnel port is one higher
than the Redis server port, and the informal `rediss://` scheme used to instruct clients
to wrap the connection with TLS.
Will convert 'redis://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8409'
...to: 'rediss://h:PASSWORD@INSTANCE.compute-1.amazonaws.com:8410?ssl_cert_reqs=none'
See:
https://devcenter.heroku.com/articles/securing-heroku-redis#connecting-directly-to-stunnel | [
"Returns",
"the",
"TLS",
"version",
"of",
"a",
"Heroku",
"REDIS_URL",
"string",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/config/utils.py#L11-L34 | train | 205,558 |
mozilla/treeherder | treeherder/log_parser/artifactbuildercollection.py | ArtifactBuilderCollection.parse | def parse(self):
"""
Iterate over each line of the log, running each parser against it.
Stream lines from the gzip file and run each parser against it,
building the ``artifact`` as we go.
"""
with make_request(self.url, stream=True) as response:
download_size_in_bytes = int(response.headers.get('Content-Length', -1))
# Temporary annotation of log size to help set thresholds in bug 1295997.
newrelic.agent.add_custom_parameter(
'unstructured_log_size',
download_size_in_bytes
)
newrelic.agent.add_custom_parameter(
'unstructured_log_encoding',
response.headers.get('Content-Encoding', 'None')
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
raise LogSizeException('Download size of %i bytes exceeds limit' % download_size_in_bytes)
# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default
# and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline
# characters such as `\u0085` (which can appear in test output) are treated the same
# as `\n` or `\r`, and so split into unwanted additional lines by `iter_lines()`.
for line in response.iter_lines():
for builder in self.builders:
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
builder.parse_line(line.decode('utf-8', 'replace'))
# gather the artifacts from all builders
for builder in self.builders:
# Run end-of-parsing actions for this parser,
# in case the artifact needs clean-up/summarising.
builder.finish_parse()
name = builder.name
artifact = builder.get_artifact()
if name == 'performance_data' and not artifact[name]:
continue
self.artifacts[name] = artifact | python | def parse(self):
"""
Iterate over each line of the log, running each parser against it.
Stream lines from the gzip file and run each parser against it,
building the ``artifact`` as we go.
"""
with make_request(self.url, stream=True) as response:
download_size_in_bytes = int(response.headers.get('Content-Length', -1))
# Temporary annotation of log size to help set thresholds in bug 1295997.
newrelic.agent.add_custom_parameter(
'unstructured_log_size',
download_size_in_bytes
)
newrelic.agent.add_custom_parameter(
'unstructured_log_encoding',
response.headers.get('Content-Encoding', 'None')
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
raise LogSizeException('Download size of %i bytes exceeds limit' % download_size_in_bytes)
# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default
# and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline
# characters such as `\u0085` (which can appear in test output) are treated the same
# as `\n` or `\r`, and so split into unwanted additional lines by `iter_lines()`.
for line in response.iter_lines():
for builder in self.builders:
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
builder.parse_line(line.decode('utf-8', 'replace'))
# gather the artifacts from all builders
for builder in self.builders:
# Run end-of-parsing actions for this parser,
# in case the artifact needs clean-up/summarising.
builder.finish_parse()
name = builder.name
artifact = builder.get_artifact()
if name == 'performance_data' and not artifact[name]:
continue
self.artifacts[name] = artifact | [
"def",
"parse",
"(",
"self",
")",
":",
"with",
"make_request",
"(",
"self",
".",
"url",
",",
"stream",
"=",
"True",
")",
"as",
"response",
":",
"download_size_in_bytes",
"=",
"int",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Length'",
",... | Iterate over each line of the log, running each parser against it.
Stream lines from the gzip file and run each parser against it,
building the ``artifact`` as we go. | [
"Iterate",
"over",
"each",
"line",
"of",
"the",
"log",
"running",
"each",
"parser",
"against",
"it",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/artifactbuildercollection.py#L84-L126 | train | 205,559 |
mozilla/treeherder | treeherder/model/models.py | OptionCollection.calculate_hash | def calculate_hash(options):
"""returns an option_collection_hash given a list of options"""
options = sorted(list(options))
sha_hash = sha1()
# equivalent to loop over the options and call sha_hash.update()
sha_hash.update(''.join(options).encode('utf-8'))
return sha_hash.hexdigest() | python | def calculate_hash(options):
"""returns an option_collection_hash given a list of options"""
options = sorted(list(options))
sha_hash = sha1()
# equivalent to loop over the options and call sha_hash.update()
sha_hash.update(''.join(options).encode('utf-8'))
return sha_hash.hexdigest() | [
"def",
"calculate_hash",
"(",
"options",
")",
":",
"options",
"=",
"sorted",
"(",
"list",
"(",
"options",
")",
")",
"sha_hash",
"=",
"sha1",
"(",
")",
"# equivalent to loop over the options and call sha_hash.update()",
"sha_hash",
".",
"update",
"(",
"''",
".",
... | returns an option_collection_hash given a list of options | [
"returns",
"an",
"option_collection_hash",
"given",
"a",
"list",
"of",
"options"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L334-L340 | train | 205,560 |
mozilla/treeherder | treeherder/model/models.py | JobManager.cycle_data | def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time):
"""
Delete data older than cycle_interval, splitting the target data into
chunks of chunk_size size. Returns the number of result sets deleted
"""
# Retrieve list of jobs to delete
jobs_max_timestamp = datetime.datetime.now() - cycle_interval
jobs_cycled = 0
while True:
jobs_chunk = list(self.filter(repository=repository, submit_time__lt=jobs_max_timestamp)
.values_list('guid', flat=True)[:chunk_size])
if not jobs_chunk:
# no more jobs to cycle, we're done!
return jobs_cycled
# Remove ORM entries for these jobs that don't currently have a
# foreign key relation
lines = FailureLine.objects.filter(job_guid__in=jobs_chunk)
if settings.ELASTICSEARCH_URL:
# To delete the data from elasticsearch we need the document
# id. However selecting all this data can be rather slow, so
# split the job into multiple smaller chunks.
failures = itertools.chain.from_iterable(
chunked_qs(
lines,
chunk_size=chunk_size,
fields=['id', 'test'],
),
)
bulk(failures, action='delete')
lines.delete()
# cycle jobs *after* related data has been deleted, to be sure
# we don't have any orphan data
try:
self.filter(guid__in=jobs_chunk).delete()
except UnicodeDecodeError as e:
# Some TextLogError `line` fields contain invalid Unicode, which causes a
# UnicodeDecodeError since Django's .delete() fetches all fields (even those
# not required for the delete). As such we delete the offending `TextLogError`s
# separately (using only() to prevent pulling in `line`), before trying again.
# This can likely be removed once all pre-Python 3 migration `TextLogError`s
# have expired (check New Relic Insights at that point to confirm). See:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1528710
newrelic.agent.record_custom_event('cycle_data UnicodeDecodeError workaround', {
'exception': str(e),
})
TextLogError.objects.filter(step__job__guid__in=jobs_chunk).only('id').delete()
self.filter(guid__in=jobs_chunk).delete()
jobs_cycled += len(jobs_chunk)
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time) | python | def cycle_data(self, repository, cycle_interval, chunk_size, sleep_time):
"""
Delete data older than cycle_interval, splitting the target data into
chunks of chunk_size size. Returns the number of result sets deleted
"""
# Retrieve list of jobs to delete
jobs_max_timestamp = datetime.datetime.now() - cycle_interval
jobs_cycled = 0
while True:
jobs_chunk = list(self.filter(repository=repository, submit_time__lt=jobs_max_timestamp)
.values_list('guid', flat=True)[:chunk_size])
if not jobs_chunk:
# no more jobs to cycle, we're done!
return jobs_cycled
# Remove ORM entries for these jobs that don't currently have a
# foreign key relation
lines = FailureLine.objects.filter(job_guid__in=jobs_chunk)
if settings.ELASTICSEARCH_URL:
# To delete the data from elasticsearch we need the document
# id. However selecting all this data can be rather slow, so
# split the job into multiple smaller chunks.
failures = itertools.chain.from_iterable(
chunked_qs(
lines,
chunk_size=chunk_size,
fields=['id', 'test'],
),
)
bulk(failures, action='delete')
lines.delete()
# cycle jobs *after* related data has been deleted, to be sure
# we don't have any orphan data
try:
self.filter(guid__in=jobs_chunk).delete()
except UnicodeDecodeError as e:
# Some TextLogError `line` fields contain invalid Unicode, which causes a
# UnicodeDecodeError since Django's .delete() fetches all fields (even those
# not required for the delete). As such we delete the offending `TextLogError`s
# separately (using only() to prevent pulling in `line`), before trying again.
# This can likely be removed once all pre-Python 3 migration `TextLogError`s
# have expired (check New Relic Insights at that point to confirm). See:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1528710
newrelic.agent.record_custom_event('cycle_data UnicodeDecodeError workaround', {
'exception': str(e),
})
TextLogError.objects.filter(step__job__guid__in=jobs_chunk).only('id').delete()
self.filter(guid__in=jobs_chunk).delete()
jobs_cycled += len(jobs_chunk)
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time) | [
"def",
"cycle_data",
"(",
"self",
",",
"repository",
",",
"cycle_interval",
",",
"chunk_size",
",",
"sleep_time",
")",
":",
"# Retrieve list of jobs to delete",
"jobs_max_timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"cycle_interval",
"j... | Delete data older than cycle_interval, splitting the target data into
chunks of chunk_size size. Returns the number of result sets deleted | [
"Delete",
"data",
"older",
"than",
"cycle_interval",
"splitting",
"the",
"target",
"data",
"into",
"chunks",
"of",
"chunk_size",
"size",
".",
"Returns",
"the",
"number",
"of",
"result",
"sets",
"deleted"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L409-L469 | train | 205,561 |
mozilla/treeherder | treeherder/model/models.py | Job.is_fully_verified | def is_fully_verified(self):
"""
Determine if this Job is fully verified based on the state of its Errors.
An Error (TextLogError or FailureLine) is considered Verified once its
related TextLogErrorMetadata has best_is_verified set to True. A Job
is then considered Verified once all its Errors TextLogErrorMetadata
instances are set to True.
"""
unverified_errors = TextLogError.objects.filter(
_metadata__best_is_verified=False,
step__job=self).count()
if unverified_errors:
logger.error("Job %r has unverified TextLogErrors", self)
return False
logger.info("Job %r is fully verified", self)
return True | python | def is_fully_verified(self):
"""
Determine if this Job is fully verified based on the state of its Errors.
An Error (TextLogError or FailureLine) is considered Verified once its
related TextLogErrorMetadata has best_is_verified set to True. A Job
is then considered Verified once all its Errors TextLogErrorMetadata
instances are set to True.
"""
unverified_errors = TextLogError.objects.filter(
_metadata__best_is_verified=False,
step__job=self).count()
if unverified_errors:
logger.error("Job %r has unverified TextLogErrors", self)
return False
logger.info("Job %r is fully verified", self)
return True | [
"def",
"is_fully_verified",
"(",
"self",
")",
":",
"unverified_errors",
"=",
"TextLogError",
".",
"objects",
".",
"filter",
"(",
"_metadata__best_is_verified",
"=",
"False",
",",
"step__job",
"=",
"self",
")",
".",
"count",
"(",
")",
"if",
"unverified_errors",
... | Determine if this Job is fully verified based on the state of its Errors.
An Error (TextLogError or FailureLine) is considered Verified once its
related TextLogErrorMetadata has best_is_verified set to True. A Job
is then considered Verified once all its Errors TextLogErrorMetadata
instances are set to True. | [
"Determine",
"if",
"this",
"Job",
"is",
"fully",
"verified",
"based",
"on",
"the",
"state",
"of",
"its",
"Errors",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L580-L598 | train | 205,562 |
mozilla/treeherder | treeherder/model/models.py | Job.update_after_verification | def update_after_verification(self, user):
"""
Updates a job's state after being verified by a sheriff
"""
if not self.is_fully_verified():
return
classification = 'autoclassified intermittent'
already_classified = (JobNote.objects.filter(job=self)
.exclude(failure_classification__name=classification)
.exists())
if already_classified:
# Don't add an autoclassification note if a Human already
# classified this job.
return
JobNote.create_autoclassify_job_note(job=self, user=user) | python | def update_after_verification(self, user):
"""
Updates a job's state after being verified by a sheriff
"""
if not self.is_fully_verified():
return
classification = 'autoclassified intermittent'
already_classified = (JobNote.objects.filter(job=self)
.exclude(failure_classification__name=classification)
.exists())
if already_classified:
# Don't add an autoclassification note if a Human already
# classified this job.
return
JobNote.create_autoclassify_job_note(job=self, user=user) | [
"def",
"update_after_verification",
"(",
"self",
",",
"user",
")",
":",
"if",
"not",
"self",
".",
"is_fully_verified",
"(",
")",
":",
"return",
"classification",
"=",
"'autoclassified intermittent'",
"already_classified",
"=",
"(",
"JobNote",
".",
"objects",
".",
... | Updates a job's state after being verified by a sheriff | [
"Updates",
"a",
"job",
"s",
"state",
"after",
"being",
"verified",
"by",
"a",
"sheriff"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L600-L617 | train | 205,563 |
mozilla/treeherder | treeherder/model/models.py | Job.get_manual_classification_line | def get_manual_classification_line(self):
"""
If this Job has a single TextLogError line, return that TextLogError.
Some Jobs only have one related [via TextLogStep] TextLogError. This
method checks if this Job is one of those (returning None if not) by:
* checking the number of related TextLogErrors
* counting the number of search results for the single TextLogError
* checking there is a related FailureLine
* checking the related FailureLine is in a given state
If all these checks pass the TextLogError is returned, any failure returns None.
"""
try:
text_log_error = TextLogError.objects.get(step__job=self)
except (TextLogError.DoesNotExist, TextLogError.MultipleObjectsReturned):
return None
# Can this TextLogError be converted into a single "useful search"?
# FIXME: what is the significance of only one search result here?
from treeherder.model.error_summary import get_useful_search_results
search_results = get_useful_search_results(self)
if len(search_results) != 1:
return None
# Check that we have a related FailureLine
failure_line = text_log_error.get_failure_line()
if failure_line is None:
return None
# Check our FailureLine is in a state we expect for
# auto-classification.
if not (failure_line.action == "test_result" and
failure_line.test and
failure_line.status and
failure_line.expected):
return None
return text_log_error | python | def get_manual_classification_line(self):
"""
If this Job has a single TextLogError line, return that TextLogError.
Some Jobs only have one related [via TextLogStep] TextLogError. This
method checks if this Job is one of those (returning None if not) by:
* checking the number of related TextLogErrors
* counting the number of search results for the single TextLogError
* checking there is a related FailureLine
* checking the related FailureLine is in a given state
If all these checks pass the TextLogError is returned, any failure returns None.
"""
try:
text_log_error = TextLogError.objects.get(step__job=self)
except (TextLogError.DoesNotExist, TextLogError.MultipleObjectsReturned):
return None
# Can this TextLogError be converted into a single "useful search"?
# FIXME: what is the significance of only one search result here?
from treeherder.model.error_summary import get_useful_search_results
search_results = get_useful_search_results(self)
if len(search_results) != 1:
return None
# Check that we have a related FailureLine
failure_line = text_log_error.get_failure_line()
if failure_line is None:
return None
# Check our FailureLine is in a state we expect for
# auto-classification.
if not (failure_line.action == "test_result" and
failure_line.test and
failure_line.status and
failure_line.expected):
return None
return text_log_error | [
"def",
"get_manual_classification_line",
"(",
"self",
")",
":",
"try",
":",
"text_log_error",
"=",
"TextLogError",
".",
"objects",
".",
"get",
"(",
"step__job",
"=",
"self",
")",
"except",
"(",
"TextLogError",
".",
"DoesNotExist",
",",
"TextLogError",
".",
"Mu... | If this Job has a single TextLogError line, return that TextLogError.
Some Jobs only have one related [via TextLogStep] TextLogError. This
method checks if this Job is one of those (returning None if not) by:
* checking the number of related TextLogErrors
* counting the number of search results for the single TextLogError
* checking there is a related FailureLine
* checking the related FailureLine is in a given state
If all these checks pass the TextLogError is returned, any failure returns None. | [
"If",
"this",
"Job",
"has",
"a",
"single",
"TextLogError",
"line",
"return",
"that",
"TextLogError",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L619-L657 | train | 205,564 |
mozilla/treeherder | treeherder/model/models.py | JobNote._update_failure_type | def _update_failure_type(self):
"""
Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job.
"""
# update the job classification
note = JobNote.objects.filter(job=self.job).order_by('-created').first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id
self.job.save() | python | def _update_failure_type(self):
"""
Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job.
"""
# update the job classification
note = JobNote.objects.filter(job=self.job).order_by('-created').first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(name='not classified').id
self.job.save() | [
"def",
"_update_failure_type",
"(",
"self",
")",
":",
"# update the job classification",
"note",
"=",
"JobNote",
".",
"objects",
".",
"filter",
"(",
"job",
"=",
"self",
".",
"job",
")",
".",
"order_by",
"(",
"'-created'",
")",
".",
"first",
"(",
")",
"if",... | Updates the failure type of this Note's Job.
Set the linked Job's failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes.
This is called when JobNotes are created (via .save()) and deleted (via
.delete()) and is used to resolved the FailureClassification which has
been denormalised onto Job. | [
"Updates",
"the",
"failure",
"type",
"of",
"this",
"Note",
"s",
"Job",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L832-L849 | train | 205,565 |
mozilla/treeherder | treeherder/model/models.py | JobNote._ensure_classification | def _ensure_classification(self):
"""
Ensures a single TextLogError's related bugs have Classifications.
If the linked Job has a single meaningful TextLogError:
- find the bugs currently related to it via a Classification
- find the bugs mapped to the job related to this note
- find the bugs that are mapped but not classified
- link this subset of bugs to Classifications
- if there's only one new bug and no existing ones, verify it
"""
# if this note was automatically filed, don't update the auto-classification information
if not self.user:
return
# if the failure type isn't intermittent, ignore
if self.failure_classification.name not in ["intermittent", "intermittent needs filing"]:
return
# if the linked Job has more than one TextLogError, ignore
text_log_error = self.job.get_manual_classification_line()
if not text_log_error:
return
# evaluate the QuerySet here so it can be used when creating new_bugs below
existing_bugs = list(ClassifiedFailure.objects.filter(error_matches__text_log_error=text_log_error)
.values_list('bug_number', flat=True))
new_bugs = (self.job.bugjobmap_set.exclude(bug_id__in=existing_bugs)
.values_list('bug_id', flat=True))
if not new_bugs:
return
# Create Match instances for each new bug
for bug_number in new_bugs:
classification, _ = ClassifiedFailure.objects.get_or_create(bug_number=bug_number)
text_log_error.create_match("ManualDetector", classification)
# if there's only one new bug and no existing ones, verify it
if len(new_bugs) == 1 and not existing_bugs:
text_log_error.verify_classification(classification) | python | def _ensure_classification(self):
"""
Ensures a single TextLogError's related bugs have Classifications.
If the linked Job has a single meaningful TextLogError:
- find the bugs currently related to it via a Classification
- find the bugs mapped to the job related to this note
- find the bugs that are mapped but not classified
- link this subset of bugs to Classifications
- if there's only one new bug and no existing ones, verify it
"""
# if this note was automatically filed, don't update the auto-classification information
if not self.user:
return
# if the failure type isn't intermittent, ignore
if self.failure_classification.name not in ["intermittent", "intermittent needs filing"]:
return
# if the linked Job has more than one TextLogError, ignore
text_log_error = self.job.get_manual_classification_line()
if not text_log_error:
return
# evaluate the QuerySet here so it can be used when creating new_bugs below
existing_bugs = list(ClassifiedFailure.objects.filter(error_matches__text_log_error=text_log_error)
.values_list('bug_number', flat=True))
new_bugs = (self.job.bugjobmap_set.exclude(bug_id__in=existing_bugs)
.values_list('bug_id', flat=True))
if not new_bugs:
return
# Create Match instances for each new bug
for bug_number in new_bugs:
classification, _ = ClassifiedFailure.objects.get_or_create(bug_number=bug_number)
text_log_error.create_match("ManualDetector", classification)
# if there's only one new bug and no existing ones, verify it
if len(new_bugs) == 1 and not existing_bugs:
text_log_error.verify_classification(classification) | [
"def",
"_ensure_classification",
"(",
"self",
")",
":",
"# if this note was automatically filed, don't update the auto-classification information",
"if",
"not",
"self",
".",
"user",
":",
"return",
"# if the failure type isn't intermittent, ignore",
"if",
"self",
".",
"failure_cla... | Ensures a single TextLogError's related bugs have Classifications.
If the linked Job has a single meaningful TextLogError:
- find the bugs currently related to it via a Classification
- find the bugs mapped to the job related to this note
- find the bugs that are mapped but not classified
- link this subset of bugs to Classifications
- if there's only one new bug and no existing ones, verify it | [
"Ensures",
"a",
"single",
"TextLogError",
"s",
"related",
"bugs",
"have",
"Classifications",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L851-L892 | train | 205,566 |
mozilla/treeherder | treeherder/model/models.py | JobNote.create_autoclassify_job_note | def create_autoclassify_job_note(self, job, user=None):
"""
Create a JobNote, possibly via auto-classification.
Create mappings from the given Job to Bugs via verified Classifications
of this Job.
Also creates a JobNote.
"""
# Only insert bugs for verified failures since these are automatically
# mirrored to ES and the mirroring can't be undone
# TODO: Decide whether this should change now that we're no longer mirroring.
bug_numbers = set(ClassifiedFailure.objects
.filter(best_for_errors__text_log_error__step__job=job,
best_for_errors__best_is_verified=True)
.exclude(bug_number=None)
.exclude(bug_number=0)
.values_list('bug_number', flat=True))
existing_maps = set(BugJobMap.objects.filter(bug_id__in=bug_numbers)
.values_list('bug_id'))
for bug_number in (bug_numbers - existing_maps):
BugJobMap.objects.create(job_id=job.id, bug_id=bug_number, user=user)
# if user is not specified, then this is an autoclassified job note and
# we should mark it as such
classification_name = 'intermittent' if user else 'autoclassified intermittent'
classification = FailureClassification.objects.get(name=classification_name)
return JobNote.objects.create(job=job,
failure_classification=classification,
user=user,
text="") | python | def create_autoclassify_job_note(self, job, user=None):
"""
Create a JobNote, possibly via auto-classification.
Create mappings from the given Job to Bugs via verified Classifications
of this Job.
Also creates a JobNote.
"""
# Only insert bugs for verified failures since these are automatically
# mirrored to ES and the mirroring can't be undone
# TODO: Decide whether this should change now that we're no longer mirroring.
bug_numbers = set(ClassifiedFailure.objects
.filter(best_for_errors__text_log_error__step__job=job,
best_for_errors__best_is_verified=True)
.exclude(bug_number=None)
.exclude(bug_number=0)
.values_list('bug_number', flat=True))
existing_maps = set(BugJobMap.objects.filter(bug_id__in=bug_numbers)
.values_list('bug_id'))
for bug_number in (bug_numbers - existing_maps):
BugJobMap.objects.create(job_id=job.id, bug_id=bug_number, user=user)
# if user is not specified, then this is an autoclassified job note and
# we should mark it as such
classification_name = 'intermittent' if user else 'autoclassified intermittent'
classification = FailureClassification.objects.get(name=classification_name)
return JobNote.objects.create(job=job,
failure_classification=classification,
user=user,
text="") | [
"def",
"create_autoclassify_job_note",
"(",
"self",
",",
"job",
",",
"user",
"=",
"None",
")",
":",
"# Only insert bugs for verified failures since these are automatically",
"# mirrored to ES and the mirroring can't be undone",
"# TODO: Decide whether this should change now that we're no... | Create a JobNote, possibly via auto-classification.
Create mappings from the given Job to Bugs via verified Classifications
of this Job.
Also creates a JobNote. | [
"Create",
"a",
"JobNote",
"possibly",
"via",
"auto",
"-",
"classification",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L911-L944 | train | 205,567 |
mozilla/treeherder | treeherder/model/models.py | FailureLine.unstructured_bugs | def unstructured_bugs(self):
"""
Get bugs that match this line in the Bug Suggestions artifact for this job.
"""
components = self._serialized_components()
if not components:
return []
from treeherder.model.error_summary import get_useful_search_results
job = Job.objects.get(guid=self.job_guid)
rv = []
ids_seen = set()
for item in get_useful_search_results(job):
if all(component in item["search"] for component in components):
for suggestion in itertools.chain(item["bugs"]["open_recent"],
item["bugs"]["all_others"]):
if suggestion["id"] not in ids_seen:
ids_seen.add(suggestion["id"])
rv.append(suggestion)
return rv | python | def unstructured_bugs(self):
"""
Get bugs that match this line in the Bug Suggestions artifact for this job.
"""
components = self._serialized_components()
if not components:
return []
from treeherder.model.error_summary import get_useful_search_results
job = Job.objects.get(guid=self.job_guid)
rv = []
ids_seen = set()
for item in get_useful_search_results(job):
if all(component in item["search"] for component in components):
for suggestion in itertools.chain(item["bugs"]["open_recent"],
item["bugs"]["all_others"]):
if suggestion["id"] not in ids_seen:
ids_seen.add(suggestion["id"])
rv.append(suggestion)
return rv | [
"def",
"unstructured_bugs",
"(",
"self",
")",
":",
"components",
"=",
"self",
".",
"_serialized_components",
"(",
")",
"if",
"not",
"components",
":",
"return",
"[",
"]",
"from",
"treeherder",
".",
"model",
".",
"error_summary",
"import",
"get_useful_search_resu... | Get bugs that match this line in the Bug Suggestions artifact for this job. | [
"Get",
"bugs",
"that",
"match",
"this",
"line",
"in",
"the",
"Bug",
"Suggestions",
"artifact",
"for",
"this",
"job",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1024-L1044 | train | 205,568 |
mozilla/treeherder | treeherder/model/models.py | FailureLine.to_mozlog_format | def to_mozlog_format(self):
"""Convert a FailureLine into a mozlog formatted dictionary."""
data = {
"action": self.action,
"line_number": self.line,
"test": self.test,
"subtest": self.subtest,
"status": self.status,
"expected": self.expected,
"message": self.message,
"signature": self.signature,
"level": self.level,
"stack": self.stack,
"stackwalk_stdout": self.stackwalk_stdout,
"stackwalk_stderr": self.stackwalk_stderr,
}
# Remove empty values
data = {k: v for k, v in data.items() if v}
return data | python | def to_mozlog_format(self):
"""Convert a FailureLine into a mozlog formatted dictionary."""
data = {
"action": self.action,
"line_number": self.line,
"test": self.test,
"subtest": self.subtest,
"status": self.status,
"expected": self.expected,
"message": self.message,
"signature": self.signature,
"level": self.level,
"stack": self.stack,
"stackwalk_stdout": self.stackwalk_stdout,
"stackwalk_stderr": self.stackwalk_stderr,
}
# Remove empty values
data = {k: v for k, v in data.items() if v}
return data | [
"def",
"to_mozlog_format",
"(",
"self",
")",
":",
"data",
"=",
"{",
"\"action\"",
":",
"self",
".",
"action",
",",
"\"line_number\"",
":",
"self",
".",
"line",
",",
"\"test\"",
":",
"self",
".",
"test",
",",
"\"subtest\"",
":",
"self",
".",
"subtest",
... | Convert a FailureLine into a mozlog formatted dictionary. | [
"Convert",
"a",
"FailureLine",
"into",
"a",
"mozlog",
"formatted",
"dictionary",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1081-L1101 | train | 205,569 |
mozilla/treeherder | treeherder/model/models.py | ClassifiedFailure.set_bug | def set_bug(self, bug_number):
"""
Set the bug number of this Classified Failure
If an existing ClassifiedFailure exists with the same bug number
replace this instance with the existing one.
"""
if bug_number == self.bug_number:
return self
other = ClassifiedFailure.objects.filter(bug_number=bug_number).first()
if not other:
self.bug_number = bug_number
self.save(update_fields=['bug_number'])
return self
self.replace_with(other)
return other | python | def set_bug(self, bug_number):
"""
Set the bug number of this Classified Failure
If an existing ClassifiedFailure exists with the same bug number
replace this instance with the existing one.
"""
if bug_number == self.bug_number:
return self
other = ClassifiedFailure.objects.filter(bug_number=bug_number).first()
if not other:
self.bug_number = bug_number
self.save(update_fields=['bug_number'])
return self
self.replace_with(other)
return other | [
"def",
"set_bug",
"(",
"self",
",",
"bug_number",
")",
":",
"if",
"bug_number",
"==",
"self",
".",
"bug_number",
":",
"return",
"self",
"other",
"=",
"ClassifiedFailure",
".",
"objects",
".",
"filter",
"(",
"bug_number",
"=",
"bug_number",
")",
".",
"first... | Set the bug number of this Classified Failure
If an existing ClassifiedFailure exists with the same bug number
replace this instance with the existing one. | [
"Set",
"the",
"bug",
"number",
"of",
"this",
"Classified",
"Failure"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1149-L1167 | train | 205,570 |
mozilla/treeherder | treeherder/model/models.py | ClassifiedFailure.replace_with | def replace_with(self, other):
"""
Replace this instance with the given other.
Deletes stale Match objects and updates related TextLogErrorMetadatas'
best_classifications to point to the given other.
"""
match_ids_to_delete = list(self.update_matches(other))
TextLogErrorMatch.objects.filter(id__in=match_ids_to_delete).delete()
# Update best classifications
self.best_for_errors.update(best_classification=other)
self.delete() | python | def replace_with(self, other):
"""
Replace this instance with the given other.
Deletes stale Match objects and updates related TextLogErrorMetadatas'
best_classifications to point to the given other.
"""
match_ids_to_delete = list(self.update_matches(other))
TextLogErrorMatch.objects.filter(id__in=match_ids_to_delete).delete()
# Update best classifications
self.best_for_errors.update(best_classification=other)
self.delete() | [
"def",
"replace_with",
"(",
"self",
",",
"other",
")",
":",
"match_ids_to_delete",
"=",
"list",
"(",
"self",
".",
"update_matches",
"(",
"other",
")",
")",
"TextLogErrorMatch",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"match_ids_to_delete",
")",
".... | Replace this instance with the given other.
Deletes stale Match objects and updates related TextLogErrorMetadatas'
best_classifications to point to the given other. | [
"Replace",
"this",
"instance",
"with",
"the",
"given",
"other",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1170-L1183 | train | 205,571 |
mozilla/treeherder | treeherder/model/models.py | ClassifiedFailure.update_matches | def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id | python | def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id | [
"def",
"update_matches",
"(",
"self",
",",
"other",
")",
":",
"for",
"match",
"in",
"self",
".",
"error_matches",
".",
"all",
"(",
")",
":",
"other_matches",
"=",
"TextLogErrorMatch",
".",
"objects",
".",
"filter",
"(",
"classified_failure",
"=",
"other",
... | Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure. | [
"Update",
"this",
"instance",
"s",
"Matches",
"to",
"point",
"to",
"the",
"given",
"other",
"s",
"Matches",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1185-L1210 | train | 205,572 |
mozilla/treeherder | treeherder/model/models.py | TextLogError.create_match | def create_match(self, matcher_name, classification):
"""
Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests.
"""
if classification is None:
classification = ClassifiedFailure.objects.create()
TextLogErrorMatch.objects.create(
text_log_error=self,
classified_failure=classification,
matcher_name=matcher_name,
score=1,
) | python | def create_match(self, matcher_name, classification):
"""
Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests.
"""
if classification is None:
classification = ClassifiedFailure.objects.create()
TextLogErrorMatch.objects.create(
text_log_error=self,
classified_failure=classification,
matcher_name=matcher_name,
score=1,
) | [
"def",
"create_match",
"(",
"self",
",",
"matcher_name",
",",
"classification",
")",
":",
"if",
"classification",
"is",
"None",
":",
"classification",
"=",
"ClassifiedFailure",
".",
"objects",
".",
"create",
"(",
")",
"TextLogErrorMatch",
".",
"objects",
".",
... | Create a TextLogErrorMatch instance
Typically used for manual "matches" or tests. | [
"Create",
"a",
"TextLogErrorMatch",
"instance"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1287-L1301 | train | 205,573 |
mozilla/treeherder | treeherder/model/models.py | TextLogError.verify_classification | def verify_classification(self, classification):
"""
Mark the given ClassifiedFailure as verified.
Handles the classification not currently being related to this
TextLogError and no Metadata existing.
"""
if classification not in self.classified_failures.all():
self.create_match("ManualDetector", classification)
# create a TextLogErrorMetadata instance for this TextLogError if it
# doesn't exist. We can't use update_or_create here since OneToOne
# relations don't use an object manager so a missing relation is simply
# None as opposed to RelatedManager.
if self.metadata is None:
TextLogErrorMetadata.objects.create(text_log_error=self,
best_classification=classification,
best_is_verified=True)
else:
self.metadata.best_classification = classification
self.metadata.best_is_verified = True
self.metadata.save(update_fields=['best_classification', 'best_is_verified'])
self.metadata.failure_line.elastic_search_insert()
# Send event to NewRelic when a verifing an autoclassified failure.
match = self.matches.filter(classified_failure=classification).first()
if not match:
return
newrelic.agent.record_custom_event('user_verified_classification', {
'matcher': match.matcher_name,
'job_id': self.id,
}) | python | def verify_classification(self, classification):
"""
Mark the given ClassifiedFailure as verified.
Handles the classification not currently being related to this
TextLogError and no Metadata existing.
"""
if classification not in self.classified_failures.all():
self.create_match("ManualDetector", classification)
# create a TextLogErrorMetadata instance for this TextLogError if it
# doesn't exist. We can't use update_or_create here since OneToOne
# relations don't use an object manager so a missing relation is simply
# None as opposed to RelatedManager.
if self.metadata is None:
TextLogErrorMetadata.objects.create(text_log_error=self,
best_classification=classification,
best_is_verified=True)
else:
self.metadata.best_classification = classification
self.metadata.best_is_verified = True
self.metadata.save(update_fields=['best_classification', 'best_is_verified'])
self.metadata.failure_line.elastic_search_insert()
# Send event to NewRelic when a verifing an autoclassified failure.
match = self.matches.filter(classified_failure=classification).first()
if not match:
return
newrelic.agent.record_custom_event('user_verified_classification', {
'matcher': match.matcher_name,
'job_id': self.id,
}) | [
"def",
"verify_classification",
"(",
"self",
",",
"classification",
")",
":",
"if",
"classification",
"not",
"in",
"self",
".",
"classified_failures",
".",
"all",
"(",
")",
":",
"self",
".",
"create_match",
"(",
"\"ManualDetector\"",
",",
"classification",
")",
... | Mark the given ClassifiedFailure as verified.
Handles the classification not currently being related to this
TextLogError and no Metadata existing. | [
"Mark",
"the",
"given",
"ClassifiedFailure",
"as",
"verified",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1303-L1336 | train | 205,574 |
mozilla/treeherder | treeherder/webapp/api/bugzilla.py | BugzillaViewSet.create_bug | def create_bug(self, request):
"""
Create a bugzilla bug with passed params
"""
if settings.BUGFILER_API_KEY is None:
return Response({"failure": "Bugzilla API key not set!"},
status=HTTP_400_BAD_REQUEST)
params = request.data
# Arbitrarily cap crash signatures at 2048 characters to prevent perf issues on bmo
crash_signature = params.get("crash_signature")
if crash_signature and len(crash_signature) > 2048:
return Response({"failure": "Crash signature can't be more than 2048 characters."},
status=HTTP_400_BAD_REQUEST)
description = u"**Filed by:** {}\n{}".format(
request.user.email.replace('@', " [at] "),
params.get("comment", "")
).encode("utf-8")
summary = params.get("summary").encode("utf-8").strip()
url = settings.BUGFILER_API_URL + "/rest/bug"
headers = {
'x-bugzilla-api-key': settings.BUGFILER_API_KEY,
'Accept': 'application/json'
}
data = {
'product': params.get("product"),
'component': params.get("component"),
'summary': summary,
'keywords': params.get("keywords"),
'blocks': params.get("blocks"),
'depends_on': params.get("depends_on"),
'see_also': params.get("see_also"),
'version': params.get("version"),
'cf_crash_signature': params.get("crash_signature"),
'severity': params.get("severity"),
'priority': params.get("priority"),
'description': description,
'comment_tags': "treeherder",
}
try:
response = make_request(url, method='POST', headers=headers, json=data)
except requests.exceptions.HTTPError as e:
try:
message = e.response.json()['message']
except (ValueError, KeyError):
message = e.response.text
return Response({"failure": message}, status=HTTP_400_BAD_REQUEST)
return Response({"success": response.json()["id"]}) | python | def create_bug(self, request):
"""
Create a bugzilla bug with passed params
"""
if settings.BUGFILER_API_KEY is None:
return Response({"failure": "Bugzilla API key not set!"},
status=HTTP_400_BAD_REQUEST)
params = request.data
# Arbitrarily cap crash signatures at 2048 characters to prevent perf issues on bmo
crash_signature = params.get("crash_signature")
if crash_signature and len(crash_signature) > 2048:
return Response({"failure": "Crash signature can't be more than 2048 characters."},
status=HTTP_400_BAD_REQUEST)
description = u"**Filed by:** {}\n{}".format(
request.user.email.replace('@', " [at] "),
params.get("comment", "")
).encode("utf-8")
summary = params.get("summary").encode("utf-8").strip()
url = settings.BUGFILER_API_URL + "/rest/bug"
headers = {
'x-bugzilla-api-key': settings.BUGFILER_API_KEY,
'Accept': 'application/json'
}
data = {
'product': params.get("product"),
'component': params.get("component"),
'summary': summary,
'keywords': params.get("keywords"),
'blocks': params.get("blocks"),
'depends_on': params.get("depends_on"),
'see_also': params.get("see_also"),
'version': params.get("version"),
'cf_crash_signature': params.get("crash_signature"),
'severity': params.get("severity"),
'priority': params.get("priority"),
'description': description,
'comment_tags': "treeherder",
}
try:
response = make_request(url, method='POST', headers=headers, json=data)
except requests.exceptions.HTTPError as e:
try:
message = e.response.json()['message']
except (ValueError, KeyError):
message = e.response.text
return Response({"failure": message}, status=HTTP_400_BAD_REQUEST)
return Response({"success": response.json()["id"]}) | [
"def",
"create_bug",
"(",
"self",
",",
"request",
")",
":",
"if",
"settings",
".",
"BUGFILER_API_KEY",
"is",
"None",
":",
"return",
"Response",
"(",
"{",
"\"failure\"",
":",
"\"Bugzilla API key not set!\"",
"}",
",",
"status",
"=",
"HTTP_400_BAD_REQUEST",
")",
... | Create a bugzilla bug with passed params | [
"Create",
"a",
"bugzilla",
"bug",
"with",
"passed",
"params"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/bugzilla.py#L16-L67 | train | 205,575 |
mozilla/treeherder | treeherder/utils/queryset.py | chunked_qs | def chunked_qs(qs, chunk_size=10000, fields=None):
"""
Generator to iterate over the given QuerySet, chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs(qs, chunk_size=10000, fields=['id', 'message']):
... for line in qs:
... print(line.message)
Note: While Django 2.0 provides chunking [1] via QuerySet.iterator() we
can't make use of this while using MySQL which doesn't support streaming
results.
[1]: https://docs.djangoproject.com/en/2.0/ref/models/querysets/#iterator
"""
min_id = 0
while True:
chunk = qs.filter(id__gt=min_id).order_by('id')
if fields is not None:
chunk = chunk.only(*fields)
# Cast to a list to execute the QuerySet here and allow us to get the
# last ID when updating min_id. We can't use .last() later as it
# ignores the slicing we do.
rows = list(chunk[:chunk_size])
total = len(rows)
if total < 1:
break
yield rows
# update the minimum ID for next iteration
min_id = rows[-1].id | python | def chunked_qs(qs, chunk_size=10000, fields=None):
"""
Generator to iterate over the given QuerySet, chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs(qs, chunk_size=10000, fields=['id', 'message']):
... for line in qs:
... print(line.message)
Note: While Django 2.0 provides chunking [1] via QuerySet.iterator() we
can't make use of this while using MySQL which doesn't support streaming
results.
[1]: https://docs.djangoproject.com/en/2.0/ref/models/querysets/#iterator
"""
min_id = 0
while True:
chunk = qs.filter(id__gt=min_id).order_by('id')
if fields is not None:
chunk = chunk.only(*fields)
# Cast to a list to execute the QuerySet here and allow us to get the
# last ID when updating min_id. We can't use .last() later as it
# ignores the slicing we do.
rows = list(chunk[:chunk_size])
total = len(rows)
if total < 1:
break
yield rows
# update the minimum ID for next iteration
min_id = rows[-1].id | [
"def",
"chunked_qs",
"(",
"qs",
",",
"chunk_size",
"=",
"10000",
",",
"fields",
"=",
"None",
")",
":",
"min_id",
"=",
"0",
"while",
"True",
":",
"chunk",
"=",
"qs",
".",
"filter",
"(",
"id__gt",
"=",
"min_id",
")",
".",
"order_by",
"(",
"'id'",
")"... | Generator to iterate over the given QuerySet, chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs(qs, chunk_size=10000, fields=['id', 'message']):
... for line in qs:
... print(line.message)
Note: While Django 2.0 provides chunking [1] via QuerySet.iterator() we
can't make use of this while using MySQL which doesn't support streaming
results.
[1]: https://docs.djangoproject.com/en/2.0/ref/models/querysets/#iterator | [
"Generator",
"to",
"iterate",
"over",
"the",
"given",
"QuerySet",
"chunk_size",
"rows",
"at",
"a",
"time"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/utils/queryset.py#L1-L39 | train | 205,576 |
mozilla/treeherder | treeherder/utils/queryset.py | chunked_qs_reverse | def chunked_qs_reverse(qs, chunk_size=10000):
"""
Generator to iterate over the given QuerySet in reverse chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs_reverse(qs, chunk_size=100):
... for line in qs:
... print(line.message)
Note: This method is just different enough that it seemed easier to keep
this function separate to chunked_qs.
"""
if not qs:
return
qs = qs.order_by('-id')
# Can't use .only() here in case the query used select_related
max_id = qs.first().id
while True:
chunk = qs.filter(id__lte=max_id) # upper bound of this chunk
rows = chunk[:chunk_size]
if len(rows) < 1:
break
yield rows
# update the maximum ID for next iteration
max_id = max_id - chunk_size | python | def chunked_qs_reverse(qs, chunk_size=10000):
"""
Generator to iterate over the given QuerySet in reverse chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs_reverse(qs, chunk_size=100):
... for line in qs:
... print(line.message)
Note: This method is just different enough that it seemed easier to keep
this function separate to chunked_qs.
"""
if not qs:
return
qs = qs.order_by('-id')
# Can't use .only() here in case the query used select_related
max_id = qs.first().id
while True:
chunk = qs.filter(id__lte=max_id) # upper bound of this chunk
rows = chunk[:chunk_size]
if len(rows) < 1:
break
yield rows
# update the maximum ID for next iteration
max_id = max_id - chunk_size | [
"def",
"chunked_qs_reverse",
"(",
"qs",
",",
"chunk_size",
"=",
"10000",
")",
":",
"if",
"not",
"qs",
":",
"return",
"qs",
"=",
"qs",
".",
"order_by",
"(",
"'-id'",
")",
"# Can't use .only() here in case the query used select_related",
"max_id",
"=",
"qs",
".",
... | Generator to iterate over the given QuerySet in reverse chunk_size rows at a time
Usage:
>>> qs = FailureLine.objects.filter(action='test_result')
>>> for qs in chunked_qs_reverse(qs, chunk_size=100):
... for line in qs:
... print(line.message)
Note: This method is just different enough that it seemed easier to keep
this function separate to chunked_qs. | [
"Generator",
"to",
"iterate",
"over",
"the",
"given",
"QuerySet",
"in",
"reverse",
"chunk_size",
"rows",
"at",
"a",
"time"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/utils/queryset.py#L42-L74 | train | 205,577 |
mozilla/treeherder | treeherder/webapp/api/bug.py | BugJobMapViewSet.create | def create(self, request, project):
"""Add a new relation between a job and a bug."""
job_id = int(request.data['job_id'])
bug_id = int(request.data['bug_id'])
try:
BugJobMap.create(
job_id=job_id,
bug_id=bug_id,
user=request.user,
)
message = "Bug job map saved"
except IntegrityError:
message = "Bug job map skipped: mapping already exists"
return Response({"message": message}) | python | def create(self, request, project):
"""Add a new relation between a job and a bug."""
job_id = int(request.data['job_id'])
bug_id = int(request.data['bug_id'])
try:
BugJobMap.create(
job_id=job_id,
bug_id=bug_id,
user=request.user,
)
message = "Bug job map saved"
except IntegrityError:
message = "Bug job map skipped: mapping already exists"
return Response({"message": message}) | [
"def",
"create",
"(",
"self",
",",
"request",
",",
"project",
")",
":",
"job_id",
"=",
"int",
"(",
"request",
".",
"data",
"[",
"'job_id'",
"]",
")",
"bug_id",
"=",
"int",
"(",
"request",
".",
"data",
"[",
"'bug_id'",
"]",
")",
"try",
":",
"BugJobM... | Add a new relation between a job and a bug. | [
"Add",
"a",
"new",
"relation",
"between",
"a",
"job",
"and",
"a",
"bug",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/bug.py#L14-L29 | train | 205,578 |
mozilla/treeherder | treeherder/webapp/api/bug.py | BugJobMapViewSet.destroy | def destroy(self, request, project, pk=None):
"""
Delete bug-job-map entry. pk is a composite key in the form
bug_id-job_id
"""
job_id, bug_id = map(int, pk.split("-"))
job = Job.objects.get(repository__name=project, id=job_id)
BugJobMap.objects.filter(job=job, bug_id=bug_id).delete()
return Response({"message": "Bug job map deleted"}) | python | def destroy(self, request, project, pk=None):
"""
Delete bug-job-map entry. pk is a composite key in the form
bug_id-job_id
"""
job_id, bug_id = map(int, pk.split("-"))
job = Job.objects.get(repository__name=project, id=job_id)
BugJobMap.objects.filter(job=job, bug_id=bug_id).delete()
return Response({"message": "Bug job map deleted"}) | [
"def",
"destroy",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"job_id",
",",
"bug_id",
"=",
"map",
"(",
"int",
",",
"pk",
".",
"split",
"(",
"\"-\"",
")",
")",
"job",
"=",
"Job",
".",
"objects",
".",
"get",
"(... | Delete bug-job-map entry. pk is a composite key in the form
bug_id-job_id | [
"Delete",
"bug",
"-",
"job",
"-",
"map",
"entry",
".",
"pk",
"is",
"a",
"composite",
"key",
"in",
"the",
"form",
"bug_id",
"-",
"job_id"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/bug.py#L31-L40 | train | 205,579 |
mozilla/treeherder | treeherder/webapp/api/bug.py | BugJobMapViewSet.retrieve | def retrieve(self, request, project, pk=None):
"""
Retrieve a bug-job-map entry. pk is a composite key in the form
bug_id-job_id
"""
job_id, bug_id = map(int, pk.split("-"))
job = Job.objects.get(repository__name=project, id=job_id)
try:
bug_job_map = BugJobMap.objects.get(job=job, bug_id=bug_id)
serializer = BugJobMapSerializer(bug_job_map)
return Response(serializer.data)
except BugJobMap.DoesNotExist:
return Response("Object not found", status=HTTP_404_NOT_FOUND) | python | def retrieve(self, request, project, pk=None):
"""
Retrieve a bug-job-map entry. pk is a composite key in the form
bug_id-job_id
"""
job_id, bug_id = map(int, pk.split("-"))
job = Job.objects.get(repository__name=project, id=job_id)
try:
bug_job_map = BugJobMap.objects.get(job=job, bug_id=bug_id)
serializer = BugJobMapSerializer(bug_job_map)
return Response(serializer.data)
except BugJobMap.DoesNotExist:
return Response("Object not found", status=HTTP_404_NOT_FOUND) | [
"def",
"retrieve",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"job_id",
",",
"bug_id",
"=",
"map",
"(",
"int",
",",
"pk",
".",
"split",
"(",
"\"-\"",
")",
")",
"job",
"=",
"Job",
".",
"objects",
".",
"get",
"... | Retrieve a bug-job-map entry. pk is a composite key in the form
bug_id-job_id | [
"Retrieve",
"a",
"bug",
"-",
"job",
"-",
"map",
"entry",
".",
"pk",
"is",
"a",
"composite",
"key",
"in",
"the",
"form",
"bug_id",
"-",
"job_id"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/bug.py#L42-L55 | train | 205,580 |
mozilla/treeherder | treeherder/etl/text.py | convert_unicode_character_to_ascii_repr | def convert_unicode_character_to_ascii_repr(match_obj):
"""
Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346>
"""
match = match_obj.group(0)
code_point = ord(match)
hex_repr = hex(code_point)
hex_code_point = hex_repr[2:]
hex_value = hex_code_point.zfill(6).upper()
return '<U+{}>'.format(hex_value) | python | def convert_unicode_character_to_ascii_repr(match_obj):
"""
Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346>
"""
match = match_obj.group(0)
code_point = ord(match)
hex_repr = hex(code_point)
hex_code_point = hex_repr[2:]
hex_value = hex_code_point.zfill(6).upper()
return '<U+{}>'.format(hex_value) | [
"def",
"convert_unicode_character_to_ascii_repr",
"(",
"match_obj",
")",
":",
"match",
"=",
"match_obj",
".",
"group",
"(",
"0",
")",
"code_point",
"=",
"ord",
"(",
"match",
")",
"hex_repr",
"=",
"hex",
"(",
"code_point",
")",
"hex_code_point",
"=",
"hex_repr"... | Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346> | [
"Converts",
"a",
"matched",
"pattern",
"from",
"a",
"unicode",
"character",
"to",
"an",
"ASCII",
"representation"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/text.py#L8-L22 | train | 205,581 |
mozilla/treeherder | treeherder/etl/tasks/pushlog_tasks.py | fetch_push_logs | def fetch_push_logs():
"""
Run several fetch_hg_push_log subtasks, one per repository
"""
for repo in Repository.objects.filter(dvcs_type='hg',
active_status="active"):
fetch_hg_push_log.apply_async(
args=(repo.name, repo.url),
queue='pushlog'
) | python | def fetch_push_logs():
"""
Run several fetch_hg_push_log subtasks, one per repository
"""
for repo in Repository.objects.filter(dvcs_type='hg',
active_status="active"):
fetch_hg_push_log.apply_async(
args=(repo.name, repo.url),
queue='pushlog'
) | [
"def",
"fetch_push_logs",
"(",
")",
":",
"for",
"repo",
"in",
"Repository",
".",
"objects",
".",
"filter",
"(",
"dvcs_type",
"=",
"'hg'",
",",
"active_status",
"=",
"\"active\"",
")",
":",
"fetch_hg_push_log",
".",
"apply_async",
"(",
"args",
"=",
"(",
"re... | Run several fetch_hg_push_log subtasks, one per repository | [
"Run",
"several",
"fetch_hg_push_log",
"subtasks",
"one",
"per",
"repository"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/tasks/pushlog_tasks.py#L9-L18 | train | 205,582 |
mozilla/treeherder | treeherder/etl/tasks/pushlog_tasks.py | fetch_hg_push_log | def fetch_hg_push_log(repo_name, repo_url):
"""
Run a HgPushlog etl process
"""
newrelic.agent.add_custom_parameter("repo_name", repo_name)
process = HgPushlogProcess()
process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name) | python | def fetch_hg_push_log(repo_name, repo_url):
"""
Run a HgPushlog etl process
"""
newrelic.agent.add_custom_parameter("repo_name", repo_name)
process = HgPushlogProcess()
process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name) | [
"def",
"fetch_hg_push_log",
"(",
"repo_name",
",",
"repo_url",
")",
":",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"\"repo_name\"",
",",
"repo_name",
")",
"process",
"=",
"HgPushlogProcess",
"(",
")",
"process",
".",
"run",
"(",
"repo_url",
"... | Run a HgPushlog etl process | [
"Run",
"a",
"HgPushlog",
"etl",
"process"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/tasks/pushlog_tasks.py#L22-L28 | train | 205,583 |
mozilla/treeherder | treeherder/etl/tasks/pulse_tasks.py | store_pulse_jobs | def store_pulse_jobs(pulse_job, exchange, routing_key):
"""
Fetches the jobs pending from pulse exchanges and loads them.
"""
newrelic.agent.add_custom_parameter("exchange", exchange)
newrelic.agent.add_custom_parameter("routing_key", routing_key)
JobLoader().process_job(pulse_job) | python | def store_pulse_jobs(pulse_job, exchange, routing_key):
"""
Fetches the jobs pending from pulse exchanges and loads them.
"""
newrelic.agent.add_custom_parameter("exchange", exchange)
newrelic.agent.add_custom_parameter("routing_key", routing_key)
JobLoader().process_job(pulse_job) | [
"def",
"store_pulse_jobs",
"(",
"pulse_job",
",",
"exchange",
",",
"routing_key",
")",
":",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"\"exchange\"",
",",
"exchange",
")",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"\"routing_key... | Fetches the jobs pending from pulse exchanges and loads them. | [
"Fetches",
"the",
"jobs",
"pending",
"from",
"pulse",
"exchanges",
"and",
"loads",
"them",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/tasks/pulse_tasks.py#L12-L19 | train | 205,584 |
mozilla/treeherder | treeherder/etl/tasks/pulse_tasks.py | store_pulse_pushes | def store_pulse_pushes(body, exchange, routing_key):
"""
Fetches the pushes pending from pulse exchanges and loads them.
"""
newrelic.agent.add_custom_parameter("exchange", exchange)
newrelic.agent.add_custom_parameter("routing_key", routing_key)
PushLoader().process(body, exchange) | python | def store_pulse_pushes(body, exchange, routing_key):
"""
Fetches the pushes pending from pulse exchanges and loads them.
"""
newrelic.agent.add_custom_parameter("exchange", exchange)
newrelic.agent.add_custom_parameter("routing_key", routing_key)
PushLoader().process(body, exchange) | [
"def",
"store_pulse_pushes",
"(",
"body",
",",
"exchange",
",",
"routing_key",
")",
":",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"\"exchange\"",
",",
"exchange",
")",
"newrelic",
".",
"agent",
".",
"add_custom_parameter",
"(",
"\"routing_key\""... | Fetches the pushes pending from pulse exchanges and loads them. | [
"Fetches",
"the",
"pushes",
"pending",
"from",
"pulse",
"exchanges",
"and",
"loads",
"them",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/tasks/pulse_tasks.py#L23-L30 | train | 205,585 |
mozilla/treeherder | treeherder/log_parser/tasks.py | store_failure_lines | def store_failure_lines(job_log):
"""Store the failure lines from a log corresponding to the structured
errorsummary file."""
logger.debug('Running store_failure_lines for job %s', job_log.job.id)
failureline.store_failure_lines(job_log) | python | def store_failure_lines(job_log):
"""Store the failure lines from a log corresponding to the structured
errorsummary file."""
logger.debug('Running store_failure_lines for job %s', job_log.job.id)
failureline.store_failure_lines(job_log) | [
"def",
"store_failure_lines",
"(",
"job_log",
")",
":",
"logger",
".",
"debug",
"(",
"'Running store_failure_lines for job %s'",
",",
"job_log",
".",
"job",
".",
"id",
")",
"failureline",
".",
"store_failure_lines",
"(",
"job_log",
")"
] | Store the failure lines from a log corresponding to the structured
errorsummary file. | [
"Store",
"the",
"failure",
"lines",
"from",
"a",
"log",
"corresponding",
"to",
"the",
"structured",
"errorsummary",
"file",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/tasks.py#L102-L106 | train | 205,586 |
mozilla/treeherder | treeherder/webapp/api/note.py | NoteViewSet.retrieve | def retrieve(self, request, project, pk=None):
"""
GET method implementation for a note detail
"""
try:
serializer = JobNoteSerializer(JobNote.objects.get(id=pk))
return Response(serializer.data)
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) | python | def retrieve(self, request, project, pk=None):
"""
GET method implementation for a note detail
"""
try:
serializer = JobNoteSerializer(JobNote.objects.get(id=pk))
return Response(serializer.data)
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND) | [
"def",
"retrieve",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"try",
":",
"serializer",
"=",
"JobNoteSerializer",
"(",
"JobNote",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"pk",
")",
")",
"return",
"Response",
"(... | GET method implementation for a note detail | [
"GET",
"method",
"implementation",
"for",
"a",
"note",
"detail"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/note.py#L17-L26 | train | 205,587 |
mozilla/treeherder | treeherder/webapp/api/note.py | NoteViewSet.create | def create(self, request, project):
"""
POST method implementation
"""
JobNote.objects.create(
job=Job.objects.get(repository__name=project,
id=int(request.data['job_id'])),
failure_classification_id=int(request.data['failure_classification_id']),
user=request.user,
text=request.data.get('text', ''))
return Response(
{'message': 'note stored for job {0}'.format(
request.data['job_id']
)}
) | python | def create(self, request, project):
"""
POST method implementation
"""
JobNote.objects.create(
job=Job.objects.get(repository__name=project,
id=int(request.data['job_id'])),
failure_classification_id=int(request.data['failure_classification_id']),
user=request.user,
text=request.data.get('text', ''))
return Response(
{'message': 'note stored for job {0}'.format(
request.data['job_id']
)}
) | [
"def",
"create",
"(",
"self",
",",
"request",
",",
"project",
")",
":",
"JobNote",
".",
"objects",
".",
"create",
"(",
"job",
"=",
"Job",
".",
"objects",
".",
"get",
"(",
"repository__name",
"=",
"project",
",",
"id",
"=",
"int",
"(",
"request",
".",... | POST method implementation | [
"POST",
"method",
"implementation"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/note.py#L47-L62 | train | 205,588 |
mozilla/treeherder | treeherder/webapp/api/note.py | NoteViewSet.destroy | def destroy(self, request, project, pk=None):
"""
Delete a note entry
"""
try:
note = JobNote.objects.get(id=pk)
note.delete()
return Response({"message": "Note deleted"})
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND) | python | def destroy(self, request, project, pk=None):
"""
Delete a note entry
"""
try:
note = JobNote.objects.get(id=pk)
note.delete()
return Response({"message": "Note deleted"})
except JobNote.DoesNotExist:
return Response("No note with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND) | [
"def",
"destroy",
"(",
"self",
",",
"request",
",",
"project",
",",
"pk",
"=",
"None",
")",
":",
"try",
":",
"note",
"=",
"JobNote",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"pk",
")",
"note",
".",
"delete",
"(",
")",
"return",
"Response",
"("... | Delete a note entry | [
"Delete",
"a",
"note",
"entry"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/note.py#L64-L74 | train | 205,589 |
mozilla/treeherder | treeherder/log_parser/utils.py | extract_text_log_artifacts | def extract_text_log_artifacts(job_log):
"""Generate a set of artifacts by parsing from the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(job_log.url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({
"job_guid": job_log.job.guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
return artifact_list | python | def extract_text_log_artifacts(job_log):
"""Generate a set of artifacts by parsing from the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(job_log.url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
artifact_list.append({
"job_guid": job_log.job.guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
return artifact_list | [
"def",
"extract_text_log_artifacts",
"(",
"job_log",
")",
":",
"# parse a log given its url",
"artifact_bc",
"=",
"ArtifactBuilderCollection",
"(",
"job_log",
".",
"url",
")",
"artifact_bc",
".",
"parse",
"(",
")",
"artifact_list",
"=",
"[",
"]",
"for",
"name",
",... | Generate a set of artifacts by parsing from the raw text log. | [
"Generate",
"a",
"set",
"of",
"artifacts",
"by",
"parsing",
"from",
"the",
"raw",
"text",
"log",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/utils.py#L15-L31 | train | 205,590 |
mozilla/treeherder | treeherder/log_parser/utils.py | post_log_artifacts | def post_log_artifacts(job_log):
"""Post a list of artifacts to a job."""
logger.debug("Downloading/parsing log for log %s", job_log.id)
try:
artifact_list = extract_text_log_artifacts(job_log)
except LogSizeException as e:
job_log.update_status(JobLog.SKIPPED_SIZE)
logger.warning('Skipping parsing log for %s: %s', job_log.id, e)
return
except Exception as e:
job_log.update_status(JobLog.FAILED)
# Unrecoverable http error (doesn't exist or permission denied).
# Apparently this can happen somewhat often with taskcluster if
# the job fails (bug 1154248), so just warn rather than raising,
# to prevent the noise/load from retrying.
if isinstance(e, HTTPError) and e.response.status_code in (403, 404):
logger.warning("Unable to retrieve log for %s: %s", job_log.id, e)
return
logger.error("Failed to download/parse log for %s: %s", job_log.id, e)
raise
try:
serialized_artifacts = serialize_artifact_json_blobs(artifact_list)
store_job_artifacts(serialized_artifacts)
job_log.update_status(JobLog.PARSED)
logger.debug("Stored artifact for %s %s", job_log.job.repository.name,
job_log.job.id)
except Exception as e:
logger.error("Failed to store parsed artifact for %s: %s", job_log.id, e)
raise | python | def post_log_artifacts(job_log):
"""Post a list of artifacts to a job."""
logger.debug("Downloading/parsing log for log %s", job_log.id)
try:
artifact_list = extract_text_log_artifacts(job_log)
except LogSizeException as e:
job_log.update_status(JobLog.SKIPPED_SIZE)
logger.warning('Skipping parsing log for %s: %s', job_log.id, e)
return
except Exception as e:
job_log.update_status(JobLog.FAILED)
# Unrecoverable http error (doesn't exist or permission denied).
# Apparently this can happen somewhat often with taskcluster if
# the job fails (bug 1154248), so just warn rather than raising,
# to prevent the noise/load from retrying.
if isinstance(e, HTTPError) and e.response.status_code in (403, 404):
logger.warning("Unable to retrieve log for %s: %s", job_log.id, e)
return
logger.error("Failed to download/parse log for %s: %s", job_log.id, e)
raise
try:
serialized_artifacts = serialize_artifact_json_blobs(artifact_list)
store_job_artifacts(serialized_artifacts)
job_log.update_status(JobLog.PARSED)
logger.debug("Stored artifact for %s %s", job_log.job.repository.name,
job_log.job.id)
except Exception as e:
logger.error("Failed to store parsed artifact for %s: %s", job_log.id, e)
raise | [
"def",
"post_log_artifacts",
"(",
"job_log",
")",
":",
"logger",
".",
"debug",
"(",
"\"Downloading/parsing log for log %s\"",
",",
"job_log",
".",
"id",
")",
"try",
":",
"artifact_list",
"=",
"extract_text_log_artifacts",
"(",
"job_log",
")",
"except",
"LogSizeExcep... | Post a list of artifacts to a job. | [
"Post",
"a",
"list",
"of",
"artifacts",
"to",
"a",
"job",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/utils.py#L34-L66 | train | 205,591 |
mozilla/treeherder | treeherder/model/error_summary.py | get_error_summary | def get_error_summary(job):
"""
Create a list of bug suggestions for a job.
Caches the results if there are any.
"""
cache_key = 'error-summary-{}'.format(job.id)
cached_error_summary = cache.get(cache_key)
if cached_error_summary is not None:
return cached_error_summary
# don't cache or do anything if we have no text log errors to get
# results for
errors = TextLogError.objects.filter(step__job=job)
if not errors:
return []
# cache terms generated from error line to save excessive querying
term_cache = {}
error_summary = [bug_suggestions_line(err, term_cache) for err in errors]
cache.set(cache_key, error_summary, BUG_SUGGESTION_CACHE_TIMEOUT)
return error_summary | python | def get_error_summary(job):
"""
Create a list of bug suggestions for a job.
Caches the results if there are any.
"""
cache_key = 'error-summary-{}'.format(job.id)
cached_error_summary = cache.get(cache_key)
if cached_error_summary is not None:
return cached_error_summary
# don't cache or do anything if we have no text log errors to get
# results for
errors = TextLogError.objects.filter(step__job=job)
if not errors:
return []
# cache terms generated from error line to save excessive querying
term_cache = {}
error_summary = [bug_suggestions_line(err, term_cache) for err in errors]
cache.set(cache_key, error_summary, BUG_SUGGESTION_CACHE_TIMEOUT)
return error_summary | [
"def",
"get_error_summary",
"(",
"job",
")",
":",
"cache_key",
"=",
"'error-summary-{}'",
".",
"format",
"(",
"job",
".",
"id",
")",
"cached_error_summary",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"cached_error_summary",
"is",
"not",
"None",
"... | Create a list of bug suggestions for a job.
Caches the results if there are any. | [
"Create",
"a",
"list",
"of",
"bug",
"suggestions",
"for",
"a",
"job",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/error_summary.py#L25-L48 | train | 205,592 |
mozilla/treeherder | treeherder/model/error_summary.py | get_error_search_term | def get_error_search_term(error_line):
"""
Generate a search term from the given error_line string.
Attempt to build a search term that will yield meaningful results when used
in a MySQL FTS query.
"""
if not error_line:
return None
# This is strongly inspired by
# https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73
tokens = error_line.split(" | ")
search_term = None
if len(tokens) >= 3:
# If this is process output then discard the token with the PID
if len(tokens) > 3 and OUTPUT_RE.match(tokens[0]):
tokens = tokens[1:]
# it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format.
test_name_or_path = tokens[1]
message = tokens[2]
# Leak failure messages are of the form:
# leakcheck | .*\d+ bytes leaked (Object-1, Object-2, Object-3, ...)
match = LEAK_RE.search(message)
if match:
search_term = match.group(1) if match.group(1) is not None else match.group(2)
else:
# For reftests, remove the reference path from the tokens as this is
# not very unique
test_name_or_path = REFTEST_RE.sub("", test_name_or_path)
for splitter in ("/", "\\"):
# if this is a path, we are interested in the last part
test_name_or_path = test_name_or_path.split(splitter)[-1]
search_term = test_name_or_path
# If the failure line was not in the pipe symbol delimited format or the search term
# will likely return too many (or irrelevant) results (eg: too short or matches terms
# on the blacklist), then we fall back to searching for the entire failure line if
# it is suitable.
if not (search_term and is_helpful_search_term(search_term)):
if is_helpful_search_term(error_line):
search_term = error_line
else:
search_term = None
# Searching for extremely long search terms is undesirable, since:
# a) Bugzilla's max summary length is 256 characters, and once "Intermittent "
# and platform/suite information is prefixed, there are even fewer characters
# left for us to use for the failure string against which we need to match.
# b) For long search terms, the additional length does little to prevent against
# false positives, but means we're more susceptible to false negatives due to
# run-to-run variances in the error messages (eg paths, process IDs).
if search_term:
search_term = search_term[:100]
return search_term | python | def get_error_search_term(error_line):
"""
Generate a search term from the given error_line string.
Attempt to build a search term that will yield meaningful results when used
in a MySQL FTS query.
"""
if not error_line:
return None
# This is strongly inspired by
# https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73
tokens = error_line.split(" | ")
search_term = None
if len(tokens) >= 3:
# If this is process output then discard the token with the PID
if len(tokens) > 3 and OUTPUT_RE.match(tokens[0]):
tokens = tokens[1:]
# it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format.
test_name_or_path = tokens[1]
message = tokens[2]
# Leak failure messages are of the form:
# leakcheck | .*\d+ bytes leaked (Object-1, Object-2, Object-3, ...)
match = LEAK_RE.search(message)
if match:
search_term = match.group(1) if match.group(1) is not None else match.group(2)
else:
# For reftests, remove the reference path from the tokens as this is
# not very unique
test_name_or_path = REFTEST_RE.sub("", test_name_or_path)
for splitter in ("/", "\\"):
# if this is a path, we are interested in the last part
test_name_or_path = test_name_or_path.split(splitter)[-1]
search_term = test_name_or_path
# If the failure line was not in the pipe symbol delimited format or the search term
# will likely return too many (or irrelevant) results (eg: too short or matches terms
# on the blacklist), then we fall back to searching for the entire failure line if
# it is suitable.
if not (search_term and is_helpful_search_term(search_term)):
if is_helpful_search_term(error_line):
search_term = error_line
else:
search_term = None
# Searching for extremely long search terms is undesirable, since:
# a) Bugzilla's max summary length is 256 characters, and once "Intermittent "
# and platform/suite information is prefixed, there are even fewer characters
# left for us to use for the failure string against which we need to match.
# b) For long search terms, the additional length does little to prevent against
# false positives, but means we're more susceptible to false negatives due to
# run-to-run variances in the error messages (eg paths, process IDs).
if search_term:
search_term = search_term[:100]
return search_term | [
"def",
"get_error_search_term",
"(",
"error_line",
")",
":",
"if",
"not",
"error_line",
":",
"return",
"None",
"# This is strongly inspired by",
"# https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73",
"tokens",
"=",
"error_line",
".",
"split",... | Generate a search term from the given error_line string.
Attempt to build a search term that will yield meaningful results when used
in a MySQL FTS query. | [
"Generate",
"a",
"search",
"term",
"from",
"the",
"given",
"error_line",
"string",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/error_summary.py#L103-L160 | train | 205,593 |
mozilla/treeherder | treeherder/model/error_summary.py | get_crash_signature | def get_crash_signature(error_line):
"""Try to get a crash signature from the given error_line string."""
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term | python | def get_crash_signature(error_line):
"""Try to get a crash signature from the given error_line string."""
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term | [
"def",
"get_crash_signature",
"(",
"error_line",
")",
":",
"search_term",
"=",
"None",
"match",
"=",
"CRASH_RE",
".",
"match",
"(",
"error_line",
")",
"if",
"match",
"and",
"is_helpful_search_term",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
":",
"sea... | Try to get a crash signature from the given error_line string. | [
"Try",
"to",
"get",
"a",
"crash",
"signature",
"from",
"the",
"given",
"error_line",
"string",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/error_summary.py#L163-L169 | train | 205,594 |
mozilla/treeherder | treeherder/model/error_summary.py | is_helpful_search_term | def is_helpful_search_term(search_term):
"""
Decide if the given search_term string is helpful or not.
We define "helpful" here as search terms that won't match an excessive
number of bug summaries. Very short terms and those matching generic
strings (listed in the blacklist) are deemed unhelpful since they wouldn't
result in useful suggestions.
"""
# Search terms that will match too many bug summaries
# and so not result in useful suggestions.
search_term = search_term.strip()
blacklist = [
'automation.py',
'remoteautomation.py',
'Shutdown',
'undefined',
'Main app process exited normally',
'Traceback (most recent call last):',
'Return code: 0',
'Return code: 1',
'Return code: 2',
'Return code: 9',
'Return code: 10',
'mozalloc_abort(char const*)',
'mozalloc_abort',
'Exiting 1',
'Exiting 9',
'CrashingThread(void *)',
'libSystem.B.dylib + 0xd7a',
'linux-gate.so + 0x424',
'TypeError: content is null',
'leakcheck',
'ImportError: No module named pygtk',
'# TBPL FAILURE #'
]
return len(search_term) > 4 and search_term not in blacklist | python | def is_helpful_search_term(search_term):
"""
Decide if the given search_term string is helpful or not.
We define "helpful" here as search terms that won't match an excessive
number of bug summaries. Very short terms and those matching generic
strings (listed in the blacklist) are deemed unhelpful since they wouldn't
result in useful suggestions.
"""
# Search terms that will match too many bug summaries
# and so not result in useful suggestions.
search_term = search_term.strip()
blacklist = [
'automation.py',
'remoteautomation.py',
'Shutdown',
'undefined',
'Main app process exited normally',
'Traceback (most recent call last):',
'Return code: 0',
'Return code: 1',
'Return code: 2',
'Return code: 9',
'Return code: 10',
'mozalloc_abort(char const*)',
'mozalloc_abort',
'Exiting 1',
'Exiting 9',
'CrashingThread(void *)',
'libSystem.B.dylib + 0xd7a',
'linux-gate.so + 0x424',
'TypeError: content is null',
'leakcheck',
'ImportError: No module named pygtk',
'# TBPL FAILURE #'
]
return len(search_term) > 4 and search_term not in blacklist | [
"def",
"is_helpful_search_term",
"(",
"search_term",
")",
":",
"# Search terms that will match too many bug summaries",
"# and so not result in useful suggestions.",
"search_term",
"=",
"search_term",
".",
"strip",
"(",
")",
"blacklist",
"=",
"[",
"'automation.py'",
",",
"'re... | Decide if the given search_term string is helpful or not.
We define "helpful" here as search terms that won't match an excessive
number of bug summaries. Very short terms and those matching generic
strings (listed in the blacklist) are deemed unhelpful since they wouldn't
result in useful suggestions. | [
"Decide",
"if",
"the",
"given",
"search_term",
"string",
"is",
"helpful",
"or",
"not",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/error_summary.py#L172-L210 | train | 205,595 |
mozilla/treeherder | treeherder/seta/analyze_failures.py | get_failures_fixed_by_commit | def get_failures_fixed_by_commit():
""" Return all job failures annotated with "fixed by commit" grouped by reason given for annotation.
It returns a dictionary with a revision or bug ID as the key (bug ID is used for
intermittent failures and the revision is used for real failures). For SETA's purposes
we only care about revisions (real failures).
The failures for *real failures* will contain all jobs that have been starred as "fixed by commit".
Notice that the data does not tell you on which repository a root failure was fixed.
For instance, in the raw data you might see a reference to 9fa614d8310d which is a back out
and it is referenced by 12 starred jobs:
https://treeherder.mozilla.org/#/jobs?repo=autoland&filter-searchStr=android%20debug%20cpp&tochange=9fa614d8310db9aabe85cc3c3cff6281fe1edb0c
The raw data will show those 12 jobs.
The returned data will look like this:
{
"44d29bac3654": [
["android-4-0-armv7-api15", "opt", "android-lint"],
["android-4-0-armv7-api15", "opt", "android-api-15-gradle-dependencies"],
]
}
"""
failures = defaultdict(list)
option_collection_map = models.OptionCollection.objects.get_option_collection_map()
fixed_by_commit_data_set = models.JobNote.objects.filter(
failure_classification=2,
created__gt=timezone.now() - timedelta(days=SETA_FIXED_BY_COMMIT_DAYS),
text__isnull=False,
job__repository__name__in=SETA_FIXED_BY_COMMIT_REPOS
).exclude(
job__signature__build_platform__in=SETA_UNSUPPORTED_PLATFORMS
).exclude(
text=""
).select_related('job', 'job__signature', 'job__job_type')
# check if at least one fixed by commit job meets our requirements without populating queryset
if not fixed_by_commit_data_set.exists():
logger.warning("We couldn't find any fixed-by-commit jobs")
return failures
# now process the fixed by commit jobs in batches using django's queryset iterator
for job_note in fixed_by_commit_data_set.iterator():
# if we have http://hg.mozilla.org/rev/<rev> and <rev>, we will only use <rev>
revision_id = job_note.text.strip('/')
revision_id = revision_id.split('/')[-1]
# This prevents the empty string case and ignores bug ids
if not revision_id or len(revision_id) < 12:
continue
# We currently don't guarantee that text is actually a revision
# Even if not perfect the main idea is that a bunch of jobs were annotated with
# a unique identifier. The assumption is that the text is unique
#
# I've seen these values being used:
# * 12 char revision
# * 40 char revision
# * link to revision on hg
# * revisionA & revisionB
# * should be fixed by <revision>
# * bug id
#
# Note that if some jobs are annotated with the 12char revision and others with the
# 40char revision we will have two disjunct set of failures
#
# Some of this will be improved in https://bugzilla.mozilla.org/show_bug.cgi?id=1323536
try:
# check if jobtype is supported by SETA (see treeherder/seta/settings.py)
if job_note.job.signature.build_system_type != 'buildbot':
if not job_note.job.job_type.name.startswith(tuple(SETA_SUPPORTED_TC_JOBTYPES)):
continue
testtype = parse_testtype(
build_system_type=job_note.job.signature.build_system_type, # e.g. taskcluster
job_type_name=job_note.job.job_type.name, # e.g. Mochitest
platform_option=job_note.job.get_platform_option(option_collection_map), # e.g. 'opt'
ref_data_name=job_note.job.signature.name, # buildername or task label
)
if testtype:
if is_job_blacklisted(testtype):
continue
else:
logger.warning('We were unable to parse %s/%s',
job_note.job.job_type.name, job_note.job.signature.name)
continue
# we now have a legit fixed-by-commit job failure
failures[revision_id].append(unique_key(
testtype=testtype,
buildtype=job_note.job.get_platform_option(option_collection_map), # e.g. 'opt'
platform=job_note.job.signature.build_platform
))
except models.Job.DoesNotExist:
logger.warning('job_note %s has no job associated to it', job_note.id)
continue
logger.warning("Number of fixed_by_commit revisions: %s", len(failures))
return failures | python | def get_failures_fixed_by_commit():
""" Return all job failures annotated with "fixed by commit" grouped by reason given for annotation.
It returns a dictionary with a revision or bug ID as the key (bug ID is used for
intermittent failures and the revision is used for real failures). For SETA's purposes
we only care about revisions (real failures).
The failures for *real failures* will contain all jobs that have been starred as "fixed by commit".
Notice that the data does not tell you on which repository a root failure was fixed.
For instance, in the raw data you might see a reference to 9fa614d8310d which is a back out
and it is referenced by 12 starred jobs:
https://treeherder.mozilla.org/#/jobs?repo=autoland&filter-searchStr=android%20debug%20cpp&tochange=9fa614d8310db9aabe85cc3c3cff6281fe1edb0c
The raw data will show those 12 jobs.
The returned data will look like this:
{
"44d29bac3654": [
["android-4-0-armv7-api15", "opt", "android-lint"],
["android-4-0-armv7-api15", "opt", "android-api-15-gradle-dependencies"],
]
}
"""
failures = defaultdict(list)
option_collection_map = models.OptionCollection.objects.get_option_collection_map()
fixed_by_commit_data_set = models.JobNote.objects.filter(
failure_classification=2,
created__gt=timezone.now() - timedelta(days=SETA_FIXED_BY_COMMIT_DAYS),
text__isnull=False,
job__repository__name__in=SETA_FIXED_BY_COMMIT_REPOS
).exclude(
job__signature__build_platform__in=SETA_UNSUPPORTED_PLATFORMS
).exclude(
text=""
).select_related('job', 'job__signature', 'job__job_type')
# check if at least one fixed by commit job meets our requirements without populating queryset
if not fixed_by_commit_data_set.exists():
logger.warning("We couldn't find any fixed-by-commit jobs")
return failures
# now process the fixed by commit jobs in batches using django's queryset iterator
for job_note in fixed_by_commit_data_set.iterator():
# if we have http://hg.mozilla.org/rev/<rev> and <rev>, we will only use <rev>
revision_id = job_note.text.strip('/')
revision_id = revision_id.split('/')[-1]
# This prevents the empty string case and ignores bug ids
if not revision_id or len(revision_id) < 12:
continue
# We currently don't guarantee that text is actually a revision
# Even if not perfect the main idea is that a bunch of jobs were annotated with
# a unique identifier. The assumption is that the text is unique
#
# I've seen these values being used:
# * 12 char revision
# * 40 char revision
# * link to revision on hg
# * revisionA & revisionB
# * should be fixed by <revision>
# * bug id
#
# Note that if some jobs are annotated with the 12char revision and others with the
# 40char revision we will have two disjunct set of failures
#
# Some of this will be improved in https://bugzilla.mozilla.org/show_bug.cgi?id=1323536
try:
# check if jobtype is supported by SETA (see treeherder/seta/settings.py)
if job_note.job.signature.build_system_type != 'buildbot':
if not job_note.job.job_type.name.startswith(tuple(SETA_SUPPORTED_TC_JOBTYPES)):
continue
testtype = parse_testtype(
build_system_type=job_note.job.signature.build_system_type, # e.g. taskcluster
job_type_name=job_note.job.job_type.name, # e.g. Mochitest
platform_option=job_note.job.get_platform_option(option_collection_map), # e.g. 'opt'
ref_data_name=job_note.job.signature.name, # buildername or task label
)
if testtype:
if is_job_blacklisted(testtype):
continue
else:
logger.warning('We were unable to parse %s/%s',
job_note.job.job_type.name, job_note.job.signature.name)
continue
# we now have a legit fixed-by-commit job failure
failures[revision_id].append(unique_key(
testtype=testtype,
buildtype=job_note.job.get_platform_option(option_collection_map), # e.g. 'opt'
platform=job_note.job.signature.build_platform
))
except models.Job.DoesNotExist:
logger.warning('job_note %s has no job associated to it', job_note.id)
continue
logger.warning("Number of fixed_by_commit revisions: %s", len(failures))
return failures | [
"def",
"get_failures_fixed_by_commit",
"(",
")",
":",
"failures",
"=",
"defaultdict",
"(",
"list",
")",
"option_collection_map",
"=",
"models",
".",
"OptionCollection",
".",
"objects",
".",
"get_option_collection_map",
"(",
")",
"fixed_by_commit_data_set",
"=",
"model... | Return all job failures annotated with "fixed by commit" grouped by reason given for annotation.
It returns a dictionary with a revision or bug ID as the key (bug ID is used for
intermittent failures and the revision is used for real failures). For SETA's purposes
we only care about revisions (real failures).
The failures for *real failures* will contain all jobs that have been starred as "fixed by commit".
Notice that the data does not tell you on which repository a root failure was fixed.
For instance, in the raw data you might see a reference to 9fa614d8310d which is a back out
and it is referenced by 12 starred jobs:
https://treeherder.mozilla.org/#/jobs?repo=autoland&filter-searchStr=android%20debug%20cpp&tochange=9fa614d8310db9aabe85cc3c3cff6281fe1edb0c
The raw data will show those 12 jobs.
The returned data will look like this:
{
"44d29bac3654": [
["android-4-0-armv7-api15", "opt", "android-lint"],
["android-4-0-armv7-api15", "opt", "android-api-15-gradle-dependencies"],
]
} | [
"Return",
"all",
"job",
"failures",
"annotated",
"with",
"fixed",
"by",
"commit",
"grouped",
"by",
"reason",
"given",
"for",
"annotation",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/analyze_failures.py#L39-L140 | train | 205,596 |
mozilla/treeherder | treeherder/autoclassify/utils.py | score_matches | def score_matches(matches, score_multiplier=(1, 1)):
"""
Get scores for the given matches.
Given a QuerySet of TextLogErrorMatches produce a score for each one until
Good Enough™. An optional score multiplier can be passed in.
"""
for match in matches:
# generate a new score from the current match
dividend, divisor = score_multiplier
score = match.score * dividend / divisor
yield (score, match.classified_failure_id) | python | def score_matches(matches, score_multiplier=(1, 1)):
"""
Get scores for the given matches.
Given a QuerySet of TextLogErrorMatches produce a score for each one until
Good Enough™. An optional score multiplier can be passed in.
"""
for match in matches:
# generate a new score from the current match
dividend, divisor = score_multiplier
score = match.score * dividend / divisor
yield (score, match.classified_failure_id) | [
"def",
"score_matches",
"(",
"matches",
",",
"score_multiplier",
"=",
"(",
"1",
",",
"1",
")",
")",
":",
"for",
"match",
"in",
"matches",
":",
"# generate a new score from the current match",
"dividend",
",",
"divisor",
"=",
"score_multiplier",
"score",
"=",
"ma... | Get scores for the given matches.
Given a QuerySet of TextLogErrorMatches produce a score for each one until
Good Enough™. An optional score multiplier can be passed in. | [
"Get",
"scores",
"for",
"the",
"given",
"matches",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/utils.py#L5-L17 | train | 205,597 |
mozilla/treeherder | treeherder/autoclassify/utils.py | time_boxed | def time_boxed(func, iterable, time_budget, *args):
"""
Apply a function to the items of an iterable within a given time budget.
Loop the given iterable, calling the given function on each item. The expended
time is compared to the given time budget after each iteration.
"""
time_budget = time_budget / 1000 # budget in milliseconds
start = time.time()
for thing in iterable:
yield func(thing, *args)
end = time.time() - start
if end > time_budget:
# Putting the condition at the end of the loop ensures that we
# always run it once, which is useful for testing
return | python | def time_boxed(func, iterable, time_budget, *args):
"""
Apply a function to the items of an iterable within a given time budget.
Loop the given iterable, calling the given function on each item. The expended
time is compared to the given time budget after each iteration.
"""
time_budget = time_budget / 1000 # budget in milliseconds
start = time.time()
for thing in iterable:
yield func(thing, *args)
end = time.time() - start
if end > time_budget:
# Putting the condition at the end of the loop ensures that we
# always run it once, which is useful for testing
return | [
"def",
"time_boxed",
"(",
"func",
",",
"iterable",
",",
"time_budget",
",",
"*",
"args",
")",
":",
"time_budget",
"=",
"time_budget",
"/",
"1000",
"# budget in milliseconds",
"start",
"=",
"time",
".",
"time",
"(",
")",
"for",
"thing",
"in",
"iterable",
":... | Apply a function to the items of an iterable within a given time budget.
Loop the given iterable, calling the given function on each item. The expended
time is compared to the given time budget after each iteration. | [
"Apply",
"a",
"function",
"to",
"the",
"items",
"of",
"an",
"iterable",
"within",
"a",
"given",
"time",
"budget",
"."
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/utils.py#L20-L37 | train | 205,598 |
mozilla/treeherder | treeherder/seta/common.py | job_priority_index | def job_priority_index(job_priorities):
'''This structure helps with finding data from the job priorities table'''
jp_index = {}
# Creating this data structure which reduces how many times we iterate through the DB rows
for jp in job_priorities:
key = jp.unique_identifier()
# This is guaranteed by a unique composite index for these 3 fields in models.py
if key in jp_index:
msg = '"{}" should be a unique job priority and that is unexpected.'.format(key)
raise DuplicateKeyError(msg)
# (testtype, buildtype, platform)
jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem}
return jp_index | python | def job_priority_index(job_priorities):
'''This structure helps with finding data from the job priorities table'''
jp_index = {}
# Creating this data structure which reduces how many times we iterate through the DB rows
for jp in job_priorities:
key = jp.unique_identifier()
# This is guaranteed by a unique composite index for these 3 fields in models.py
if key in jp_index:
msg = '"{}" should be a unique job priority and that is unexpected.'.format(key)
raise DuplicateKeyError(msg)
# (testtype, buildtype, platform)
jp_index[key] = {'pk': jp.id, 'build_system_type': jp.buildsystem}
return jp_index | [
"def",
"job_priority_index",
"(",
"job_priorities",
")",
":",
"jp_index",
"=",
"{",
"}",
"# Creating this data structure which reduces how many times we iterate through the DB rows",
"for",
"jp",
"in",
"job_priorities",
":",
"key",
"=",
"jp",
".",
"unique_identifier",
"(",
... | This structure helps with finding data from the job priorities table | [
"This",
"structure",
"helps",
"with",
"finding",
"data",
"from",
"the",
"job",
"priorities",
"table"
] | cc47bdec872e5c668d0f01df89517390a164cda3 | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/common.py#L10-L25 | train | 205,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.