id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
157,003
import collections from firebase_admin import auth from google.cloud import ndb from googleapiclient.discovery import build import jwt import requests from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from libs import request_cache class AuthError(Exception): """Auth error.""" The provided code snippet includes necessary dependencies for implementing the `create_session_cookie` function. Write a Python function `def create_session_cookie(id_token, expires_in)` to solve the following problem: Create a new session cookie. Here is the function: def create_session_cookie(id_token, expires_in): """Create a new session cookie.""" try: return auth.create_session_cookie(id_token, expires_in=expires_in) except auth.AuthError: raise AuthError('Failed to create session cookie.')
Create a new session cookie.
157,004
import collections from firebase_admin import auth from google.cloud import ndb from googleapiclient.discovery import build import jwt import requests from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from libs import request_cache def decode_claims(session_cookie): """Decode the claims for the current session cookie.""" try: return auth.verify_session_cookie(session_cookie, check_revoked=True) except (ValueError, auth.AuthError): raise AuthError('Invalid session cookie.') The provided code snippet includes necessary dependencies for implementing the `revoke_session_cookie` function. Write a Python function `def revoke_session_cookie(session_cookie)` to solve the following problem: Revoke a session cookie. Here is the function: def revoke_session_cookie(session_cookie): """Revoke a session cookie.""" decoded_claims = decode_claims(session_cookie) auth.revoke_refresh_tokens(decoded_claims['sub'])
Revoke a session cookie.
157,005
from flask import Flask from flask import redirect from flask import request from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from handlers import auth from handlers import base_handler from handlers import bots from handlers import commit_range from handlers import configuration from handlers import corpora from handlers import coverage_report from handlers import crash_query from handlers import crash_stats from handlers import domain_verifier from handlers import download from handlers import external_update from handlers import fuzzer_stats from handlers import fuzzers from handlers import gcs_redirector from handlers import help_redirector from handlers import home from handlers import issue_redirector from handlers import jobs from handlers import login from handlers import report_csp_failure from handlers import revisions_info from handlers import testcase_list from handlers import upload_testcase from handlers import viewer from handlers.cron import cleanup from handlers.cron import predator_pull from handlers.cron import triage from handlers.testcase_detail import crash_stats as crash_stats_on_testcase from handlers.testcase_detail import create_issue from handlers.testcase_detail import delete from handlers.testcase_detail import download_testcase from handlers.testcase_detail import find_similar_issues from handlers.testcase_detail import mark_fixed from handlers.testcase_detail import mark_security from handlers.testcase_detail import mark_unconfirmed from handlers.testcase_detail import redo from handlers.testcase_detail import remove_duplicate from handlers.testcase_detail import remove_group from handlers.testcase_detail import remove_issue from handlers.testcase_detail import show as show_testcase from handlers.testcase_detail import testcase_variants from handlers.testcase_detail import update_from_trunk from handlers.testcase_detail import update_issue ndb_client = ndb.Client() The provided code snippet includes necessary dependencies for implementing the `ndb_wsgi_middleware` function. Write a Python function `def ndb_wsgi_middleware(wsgi_app)` to solve the following problem: WSGI middleware for ndb_datastore context allocation to the app. Here is the function: def ndb_wsgi_middleware(wsgi_app): """WSGI middleware for ndb_datastore context allocation to the app.""" def middleware(environ, start_response): with ndb_client.context(): return wsgi_app(environ, start_response) return middleware
WSGI middleware for ndb_datastore context allocation to the app.
157,006
from flask import Flask from flask import redirect from flask import request from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from handlers import auth from handlers import base_handler from handlers import bots from handlers import commit_range from handlers import configuration from handlers import corpora from handlers import coverage_report from handlers import crash_query from handlers import crash_stats from handlers import domain_verifier from handlers import download from handlers import external_update from handlers import fuzzer_stats from handlers import fuzzers from handlers import gcs_redirector from handlers import help_redirector from handlers import home from handlers import issue_redirector from handlers import jobs from handlers import login from handlers import report_csp_failure from handlers import revisions_info from handlers import testcase_list from handlers import upload_testcase from handlers import viewer from handlers.cron import cleanup from handlers.cron import predator_pull from handlers.cron import triage from handlers.testcase_detail import crash_stats as crash_stats_on_testcase from handlers.testcase_detail import create_issue from handlers.testcase_detail import delete from handlers.testcase_detail import download_testcase from handlers.testcase_detail import find_similar_issues from handlers.testcase_detail import mark_fixed from handlers.testcase_detail import mark_security from handlers.testcase_detail import mark_unconfirmed from handlers.testcase_detail import redo from handlers.testcase_detail import remove_duplicate from handlers.testcase_detail import remove_group from handlers.testcase_detail import remove_issue from handlers.testcase_detail import show as show_testcase from handlers.testcase_detail import testcase_variants from handlers.testcase_detail import update_from_trunk from handlers.testcase_detail import update_issue The provided code snippet includes necessary dependencies for implementing the `register_routes` function. Write a Python function `def register_routes(flask_app, routes)` to solve the following problem: Utility function to register all routes to the flask app. Here is the function: def register_routes(flask_app, routes): """Utility function to register all routes to the flask app.""" for route, handler in routes: flask_app.add_url_rule(route, view_func=handler.as_view(route))
Utility function to register all routes to the flask app.
157,007
from flask import Flask from flask import redirect from flask import request from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from handlers import auth from handlers import base_handler from handlers import bots from handlers import commit_range from handlers import configuration from handlers import corpora from handlers import coverage_report from handlers import crash_query from handlers import crash_stats from handlers import domain_verifier from handlers import download from handlers import external_update from handlers import fuzzer_stats from handlers import fuzzers from handlers import gcs_redirector from handlers import help_redirector from handlers import home from handlers import issue_redirector from handlers import jobs from handlers import login from handlers import report_csp_failure from handlers import revisions_info from handlers import testcase_list from handlers import upload_testcase from handlers import viewer from handlers.cron import cleanup from handlers.cron import predator_pull from handlers.cron import triage from handlers.testcase_detail import crash_stats as crash_stats_on_testcase from handlers.testcase_detail import create_issue from handlers.testcase_detail import delete from handlers.testcase_detail import download_testcase from handlers.testcase_detail import find_similar_issues from handlers.testcase_detail import mark_fixed from handlers.testcase_detail import mark_security from handlers.testcase_detail import mark_unconfirmed from handlers.testcase_detail import redo from handlers.testcase_detail import remove_duplicate from handlers.testcase_detail import remove_group from handlers.testcase_detail import remove_issue from handlers.testcase_detail import show as show_testcase from handlers.testcase_detail import testcase_variants from handlers.testcase_detail import update_from_trunk from handlers.testcase_detail import update_issue main_domain = config.get('domains.main') redirect_domains = config.get('domains.redirects') The provided code snippet includes necessary dependencies for implementing the `redirect_handler` function. Write a Python function `def redirect_handler()` to solve the following problem: Redirection handler. Here is the function: def redirect_handler(): """Redirection handler.""" if not redirect_domains: return None if request.host in redirect_domains: return redirect('https://' + main_domain + request.full_path) return None
Redirection handler.
157,008
def _levenshtein_distance(string_1, string_2): """"Levenshtein_distance calculation: Iterative with two matrix rows, based on Wikipedia article and code by Christopher P. Matthews.""" if string_1 == string_2: return 0 if not string_1: return len(string_2) if not string_2: return len(string_1) v0 = list(range(len(string_2) + 1)) v1 = [None] * (len(string_2) + 1) for i in range(len(string_1)): v1[0] = i + 1 for j in range(len(string_2)): cost = 0 if string_1[i] == string_2[j] else 1 v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost) for j in range(len(v0)): v0[j] = v1[j] return v1[len(string_2)] The provided code snippet includes necessary dependencies for implementing the `_similarity_ratio` function. Write a Python function `def _similarity_ratio(string_1, string_2)` to solve the following problem: Return a ratio on how similar two strings are. Here is the function: def _similarity_ratio(string_1, string_2): """Return a ratio on how similar two strings are.""" length_sum = len(string_1) + len(string_2) if length_sum == 0: return 1.0 return (length_sum - _levenshtein_distance(string_1, string_2)) / ( 1.0 * length_sum)
Return a ratio on how similar two strings are.
157,009
The provided code snippet includes necessary dependencies for implementing the `longest_common_subsequence` function. Write a Python function `def longest_common_subsequence(first_frames, second_frames)` to solve the following problem: Count number of frames which are the same (taking into account order). Here is the function: def longest_common_subsequence(first_frames, second_frames): """Count number of frames which are the same (taking into account order).""" first_len = len(first_frames) second_len = len(second_frames) solution = [[0 for _ in range(second_len + 1)] for _ in range(first_len + 1)] for i in range(1, first_len + 1): for j in range(1, second_len + 1): if first_frames[i - 1] == second_frames[j - 1]: solution[i][j] = solution[i - 1][j - 1] + 1 else: solution[i][j] = max(solution[i - 1][j], solution[i][j - 1]) return solution[first_len][second_len]
Count number of frames which are the same (taking into account order).
157,010
import re from clusterfuzz._internal.datastore.data_types import MISSING_VALUE_STRING from clusterfuzz._internal.datastore.data_types import SecuritySeverity from clusterfuzz._internal.system import environment SEVERITY_ORDER = [ SecuritySeverity.LOW, SecuritySeverity.MEDIUM, SecuritySeverity.HIGH, SecuritySeverity.CRITICAL ] class SecuritySeverity: """Enum for Security Severity.""" CRITICAL = 0 HIGH = 1 MEDIUM = 2 LOW = 3 MISSING = 4 def is_valid(cls, security_severity): """Return bool on whether a severity is valid.""" return (security_severity in [cls.CRITICAL, cls.HIGH, cls.MEDIUM, cls.LOW]) def list(cls): """Return the list of severities for a dropdown menu.""" return [ { 'value': cls.CRITICAL, 'name': 'Critical' }, { 'value': cls.HIGH, 'name': 'High', 'default': True }, { 'value': cls.MEDIUM, 'name': 'Medium' }, { 'value': cls.LOW, 'name': 'Low' }, { 'value': cls.MISSING, 'name': 'Missing' }, ] The provided code snippet includes necessary dependencies for implementing the `_modify_severity` function. Write a Python function `def _modify_severity(severity, delta, min_severity=SecuritySeverity.LOW, max_severity=SecuritySeverity.CRITICAL)` to solve the following problem: Increase/decrease the given |severity| by |delta|. Here is the function: def _modify_severity(severity, delta, min_severity=SecuritySeverity.LOW, max_severity=SecuritySeverity.CRITICAL): """Increase/decrease the given |severity| by |delta|.""" min_index = SEVERITY_ORDER.index(min_severity) max_index = SEVERITY_ORDER.index(max_severity) assert min_index != -1 and max_index != -1 severity_index = SEVERITY_ORDER.index(severity) assert severity_index != -1 max_index = min(len(SEVERITY_ORDER) - 1, max_index) severity_index += delta severity_index = min(severity_index, max_index) severity_index = max(severity_index, min_index) return SEVERITY_ORDER[severity_index]
Increase/decrease the given |severity| by |delta|.
157,011
import os import re import subprocess import sys from clusterfuzz._internal.base import utils from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms.android import fetch_artifact from clusterfuzz._internal.platforms.android import settings from clusterfuzz._internal.platforms.android import symbols_downloader from clusterfuzz._internal.platforms.linux import lkl from clusterfuzz._internal.platforms.linux.lkl import constants as lkl_constants from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell def fix_filename(file_name): """Clean up the filename, nulls out tool specific ones.""" file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name) file_name = re.sub('.*crtstuff.c:0', '', file_name) file_name = re.sub(':0$', '', file_name) # If we don't have a file name, just bail out. if not file_name or file_name.startswith('??'): return '' return os.path.normpath(file_name) def fix_function_name(function_name): """Clean up function name.""" if function_name.startswith('??'): return '' return function_name The provided code snippet includes necessary dependencies for implementing the `get_stack_frame` function. Write a Python function `def get_stack_frame(binary, addr, function_name, file_name)` to solve the following problem: Return a stack frame entry. Here is the function: def get_stack_frame(binary, addr, function_name, file_name): """Return a stack frame entry.""" # Cleanup file and function name. file_name = fix_filename(file_name) function_name = fix_function_name(function_name) # Check if we don't have any symbols at all. If yes, this is probably # a system library. In this case, just return the binary name. if not function_name and not file_name: return '%s in %s' % (addr, os.path.basename(binary)) # We just have a file name. Probably running in global context. if not function_name: # Filter the filename to act as a function name. filtered_file_name = os.path.basename(file_name) return '%s in %s %s' % (addr, filtered_file_name, file_name) # Regular stack frame. return '%s in %s %s' % (addr, function_name, file_name)
Return a stack frame entry.
157,012
import os import re import subprocess import sys from clusterfuzz._internal.base import utils from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms.android import fetch_artifact from clusterfuzz._internal.platforms.android import settings from clusterfuzz._internal.platforms.android import symbols_downloader from clusterfuzz._internal.platforms.linux import lkl from clusterfuzz._internal.platforms.linux.lkl import constants as lkl_constants from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `is_valid_arch` function. Write a Python function `def is_valid_arch(s)` to solve the following problem: Check if this is a valid supported architecture. Here is the function: def is_valid_arch(s): """Check if this is a valid supported architecture.""" return s in [ "i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s", "armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390" ]
Check if this is a valid supported architecture.
157,013
import os import re import subprocess import sys from clusterfuzz._internal.base import utils from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms.android import fetch_artifact from clusterfuzz._internal.platforms.android import settings from clusterfuzz._internal.platforms.android import symbols_downloader from clusterfuzz._internal.platforms.linux import lkl from clusterfuzz._internal.platforms.linux.lkl import constants as lkl_constants from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `guess_arch` function. Write a Python function `def guess_arch(address)` to solve the following problem: Guess which architecture we're running on (32/64). 10 = len('0x') + 8 hex digits. Here is the function: def guess_arch(address): """Guess which architecture we're running on (32/64). 10 = len('0x') + 8 hex digits.""" if len(address) > 10: return 'x86_64' else: return 'i386'
Guess which architecture we're running on (32/64). 10 = len('0x') + 8 hex digits.
157,014
import os import re import subprocess import sys from clusterfuzz._internal.base import utils from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms.android import fetch_artifact from clusterfuzz._internal.platforms.android import settings from clusterfuzz._internal.platforms.android import symbols_downloader from clusterfuzz._internal.platforms.linux import lkl from clusterfuzz._internal.platforms.linux.lkl import constants as lkl_constants from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell llvm_symbolizer_path = '' class LLVMSymbolizer(Symbolizer): def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]): super().__init__() self.symbolizer_path = symbolizer_path self.default_arch = default_arch self.system = system self.dsym_hints = dsym_hints self.pipe = self.open_llvm_symbolizer() def open_llvm_symbolizer(self): if not os.path.exists(self.symbolizer_path): return None # Setup symbolizer command line. cmd = [ self.symbolizer_path, '--default-arch=%s' % self.default_arch, '--demangle', '--functions=linkage', '--inlining=%s' % stack_inlining, ] if self.system == 'darwin': for hint in self.dsym_hints: cmd.append('--dsym-hint=%s' % hint) # Set LD_LIBRARY_PATH to use the right libstdc++. env_copy = environment.copy() env_copy['LD_LIBRARY_PATH'] = os.path.dirname(self.symbolizer_path) # FIXME: Since we are not using process_handler.run_process here, we can run # into issues with unicode environment variable and values. Add this # explicit hack to convert these into strings. env_copy = {str(key): str(value) for key, value in env_copy.items()} # Run the symbolizer. pipe = subprocess.Popen( cmd, env=env_copy, stdin=subprocess.PIPE, stdout=subprocess.PIPE) global pipes pipes.append(pipe) return pipe def symbolize(self, addr, binary, offset): """Overrides Symbolizer.symbolize.""" if not binary.strip(): return ['%s in' % addr] result = [] try: symbolizer_input = '"%s" %s' % (binary, offset) self.pipe.stdin.write(symbolizer_input.encode('utf-8') + b'\n') self.pipe.stdin.flush() while True: function_name = self.pipe.stdout.readline().rstrip().decode('utf-8') if not function_name: break file_name = self.pipe.stdout.readline().rstrip().decode('utf-8') result.append(get_stack_frame(binary, addr, function_name, file_name)) except Exception: logs.log_error('Symbolization using llvm-symbolizer failed for: "%s".' % symbolizer_input) result = [] if not result: result = None return result def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]): return LLVMSymbolizer(llvm_symbolizer_path, default_arch, system, dsym_hints)
null
157,015
import os import re import subprocess import sys from clusterfuzz._internal.base import utils from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms.android import fetch_artifact from clusterfuzz._internal.platforms.android import settings from clusterfuzz._internal.platforms.android import symbols_downloader from clusterfuzz._internal.platforms.linux import lkl from clusterfuzz._internal.platforms.linux.lkl import constants as lkl_constants from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell class Addr2LineSymbolizer(Symbolizer): def __init__(self, binary): super().__init__() self.binary = binary self.pipe = self.open_addr2line() def open_addr2line(self): cmd = ['addr2line', '--demangle', '-f', '-e', self.binary] pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) global pipes pipes.append(pipe) return pipe def symbolize(self, addr, binary, offset): """Overrides Symbolizer.symbolize.""" if self.binary != binary: return None if not binary.strip(): return ['%s in' % addr] try: symbolizer_input = str(offset).encode('utf-8') self.pipe.stdin.write(symbolizer_input + b'\n') self.pipe.stdin.flush() function_name = self.pipe.stdout.readline().rstrip().decode('utf-8') file_name = self.pipe.stdout.readline().rstrip().decode('utf-8') except Exception: logs.log_error('Symbolization using addr2line failed for: "%s %s".' % (binary, str(offset))) function_name = '' file_name = '' return [get_stack_frame(binary, addr, function_name, file_name)] class DarwinSymbolizer(Symbolizer): def __init__(self, addr, binary, arch): super().__init__() self.binary = binary self.arch = arch self.open_atos() def open_atos(self): cmdline = ['atos', '-o', self.binary, '-arch', self.arch] self.atos = UnbufferedLineConverter(cmdline, close_stderr=True) def symbolize(self, addr, binary, offset): """Overrides Symbolizer.symbolize.""" if self.binary != binary: return None try: atos_line = self.atos.convert('0x%x' % int(offset, 16)) while 'got symbolicator for' in atos_line: atos_line = self.atos.readline() # A well-formed atos response looks like this: # foo(type1, type2) (in object.name) (filename.cc:80) match = re.match(r'^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line) if match: function_name = match.group(1) function_name = re.sub(r'\(.*?\)', '', function_name) file_name = match.group(3) return [get_stack_frame(binary, addr, function_name, file_name)] else: return ['%s in %s' % (addr, atos_line)] except Exception: logs.log_error('Symbolization using atos failed for: "%s %s".' % (binary, str(offset))) return ['{} ({}:{}+{})'.format(addr, binary, self.arch, offset)] def SystemSymbolizerFactory(system, addr, binary, arch): if system == 'darwin': return DarwinSymbolizer(addr, binary, arch) elif system.startswith('linux'): return Addr2LineSymbolizer(binary)
null
157,016
import inspect from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.protos import process_state_pb2 The provided code snippet includes necessary dependencies for implementing the `unsigned_to_signed` function. Write a Python function `def unsigned_to_signed(address)` to solve the following problem: Convert unsigned address to signed int64 (as defined in the proto). Here is the function: def unsigned_to_signed(address): """Convert unsigned address to signed int64 (as defined in the proto).""" return (address - 2**64) if address >= 2**63 else address
Convert unsigned address to signed int64 (as defined in the proto).
157,017
import inspect from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.protos import process_state_pb2 The provided code snippet includes necessary dependencies for implementing the `format_address_to_dec` function. Write a Python function `def format_address_to_dec(address, base=16)` to solve the following problem: Addresses may be formatted as decimal, hex string with 0x or 0X prefix, or without any prefix. Convert to decimal int. Here is the function: def format_address_to_dec(address, base=16): """Addresses may be formatted as decimal, hex string with 0x or 0X prefix, or without any prefix. Convert to decimal int.""" if address is None: return None address = str(address).replace('`', '').strip() if not address: return None # This is required for Chrome Win and Mac stacks, which mix decimal and hex. try_bases = [base, 16] if base != 16 else [base] for base_try in try_bases: try: address = int(address, base_try) return address except Exception: continue logs.log_warn('Error formatting address %s to decimal int64 in bases %s.' % (str(address), str(try_bases))) return None
Addresses may be formatted as decimal, hex string with 0x or 0X prefix, or without any prefix. Convert to decimal int.
157,018
import copy import datetime import os import queue import subprocess import sys import threading import time from clusterfuzz._internal.base import utils from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.platforms import linux from clusterfuzz._internal.platforms import windows from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `close_queue` function. Write a Python function `def close_queue(queue_to_close)` to solve the following problem: Close the queue. Here is the function: def close_queue(queue_to_close): """Close the queue.""" if environment.is_trusted_host(): # We don't use multiprocessing.Queue on trusted hosts. return try: queue_to_close.close() except: logs.log_error('Unable to close queue.')
Close the queue.
157,019
import copy import datetime import os import queue import subprocess import sys import threading import time from clusterfuzz._internal.base import utils from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.platforms import linux from clusterfuzz._internal.platforms import windows from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell try: import multiprocessing import mozprocess import psutil except ImportError: pass The provided code snippet includes necessary dependencies for implementing the `get_process` function. Write a Python function `def get_process()` to solve the following problem: Return a multiprocessing process object (with bug fixes). Here is the function: def get_process(): """Return a multiprocessing process object (with bug fixes).""" if environment.is_trusted_host(): # forking/multiprocessing is unsupported because of the RPC connection. return threading.Thread # FIXME(unassigned): Remove this hack after real bug is fixed. # pylint: disable=protected-access multiprocessing.current_process()._identity = () return multiprocessing.Process
Return a multiprocessing process object (with bug fixes).
157,020
import copy import datetime import os import queue import subprocess import sys import threading import time from clusterfuzz._internal.base import utils from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.platforms import linux from clusterfuzz._internal.platforms import windows from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell try: import multiprocessing import mozprocess import psutil except ImportError: pass The provided code snippet includes necessary dependencies for implementing the `get_queue` function. Write a Python function `def get_queue()` to solve the following problem: Return a multiprocessing queue object. Here is the function: def get_queue(): """Return a multiprocessing queue object.""" if environment.is_trusted_host(): # We don't use multiprocessing.Process on trusted hosts. No need to use # multiprocessing.Queue. return queue.Queue() try: result_queue = multiprocessing.Queue() except: # FIXME: Invalid cross-device link error. Happens sometimes with # chroot jobs even though /dev/shm and /run/shm are mounted. logs.log_error('Unable to get multiprocessing queue.') return None return result_queue
Return a multiprocessing queue object.
157,021
import copy import datetime import os import queue import subprocess import sys import threading import time from clusterfuzz._internal.base import utils from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.platforms import linux from clusterfuzz._internal.platforms import windows from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell THREAD_FINISH_WAIT_TIME = 5 def get_runtime_snapshot(): """Return a list of current processes and their command lines as string.""" process_strings = [] for process in psutil.process_iter(): try: process_info = process.as_dict(attrs=['name', 'cmdline', 'pid', 'ppid']) process_string = '{name} ({pid}, {ppid})'.format( name=process_info['name'], pid=process_info['pid'], ppid=process_info['ppid']) process_cmd_line = process_info['cmdline'] if process_cmd_line: process_string += f': {" ".join(process_cmd_line)}' process_strings.append(process_string) except (psutil.AccessDenied, psutil.NoSuchProcess, OSError): # Ignore the error, use whatever info is available for access. pass return '\n'.join(sorted(process_strings)) The provided code snippet includes necessary dependencies for implementing the `terminate_hung_threads` function. Write a Python function `def terminate_hung_threads(threads)` to solve the following problem: Terminate hung threads. Here is the function: def terminate_hung_threads(threads): """Terminate hung threads.""" start_time = time.time() while time.time() - start_time < THREAD_FINISH_WAIT_TIME: if not any([thread.is_alive() for thread in threads]): # No threads are alive, so we're done. return time.sleep(0.1) logs.log_warn('Hang detected.', snapshot=get_runtime_snapshot()) if environment.is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import host # Bail out on trusted hosts since we're using threads and can't clean up. host.host_exit_no_return() # Terminate all threads that are still alive. try: [thread.terminate() for thread in threads if thread.is_alive()] except: pass
Terminate hung threads.
157,022
import copy import datetime import os import queue import subprocess import sys import threading import time from clusterfuzz._internal.base import utils from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.platforms import linux from clusterfuzz._internal.platforms import windows from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `scripts_are_running` function. Write a Python function `def scripts_are_running(expected_scripts)` to solve the following problem: Check if all target scripts are running as expected. Here is the function: def scripts_are_running(expected_scripts): """Check if all target scripts are running as expected.""" scripts_left = expected_scripts.copy() for process in psutil.process_iter(): for expected_script in scripts_left: if any(expected_script == os.path.basename(cmdline) for cmdline in process.cmdline()): scripts_left.remove(expected_script) if not scripts_left: return True return False
Check if all target scripts are running as expected.
157,023
from collections import namedtuple import os import shutil import signal import subprocess import tempfile from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import new_process from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `_get_minijail_path` function. Write a Python function `def _get_minijail_path()` to solve the following problem: Get the minijail path. Returns: The path to the minijail binary. Here is the function: def _get_minijail_path(): """Get the minijail path. Returns: The path to the minijail binary. """ return os.path.join(environment.get_platform_resources_directory(), 'minijail0')
Get the minijail path. Returns: The path to the minijail binary.
157,024
from collections import namedtuple import os import shutil import signal import subprocess import tempfile from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import new_process from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `_get_minijail_user_namespace_args` function. Write a Python function `def _get_minijail_user_namespace_args()` to solve the following problem: Get user namespace arguments for minijail. Returns: A list representing arguments to minijail. Here is the function: def _get_minijail_user_namespace_args(): """Get user namespace arguments for minijail. Returns: A list representing arguments to minijail. """ arguments = ['-U'] # User namespace option # root (uid 0 in namespace) -> USER. # The reason for this is that minijail does setresuid(0, 0, 0) before doing a # chroot, which means uid 0 needs access to the chroot dir (owned by USER). # # Note that we also run fuzzers as uid 0 (but with no capabilities in # permitted/effective/inherited sets which *should* mean there's nothing # special about it). This is because the uid running the fuzzer also needs # access to things owned by USER (fuzzer binaries, supporting files), and USER # can only be mapped once. uid_map = [ f'0 {os.getuid()} 1', ] arguments.extend(['-m', ','.join(uid_map)]) return arguments
Get user namespace arguments for minijail. Returns: A list representing arguments to minijail.
157,025
from collections import namedtuple import os import shutil import signal import subprocess import tempfile from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import new_process from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `_create_chroot_dir` function. Write a Python function `def _create_chroot_dir(base_dir)` to solve the following problem: Create dir for chroot. Here is the function: def _create_chroot_dir(base_dir): """Create dir for chroot.""" return tempfile.mkdtemp(dir=base_dir)
Create dir for chroot.
157,026
from collections import namedtuple import os import shutil import signal import subprocess import tempfile from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import new_process from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `_create_tmp_mount` function. Write a Python function `def _create_tmp_mount(base_dir)` to solve the following problem: Create a tmp mount in base_dir. Here is the function: def _create_tmp_mount(base_dir): """Create a tmp mount in base_dir.""" return tempfile.mkdtemp(dir=base_dir)
Create a tmp mount in base_dir.
157,027
import contextlib import os import re import shlex import shutil import subprocess import sys import tempfile import uuid from clusterfuzz._internal.base import persistent_cache from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment FILE_COPY_BUFFER_SIZE = 10 * 1024 * 1024 The provided code snippet includes necessary dependencies for implementing the `copy_file` function. Write a Python function `def copy_file(source_file_path, destination_file_path)` to solve the following problem: Faster version of shutil.copy with buffer size. Here is the function: def copy_file(source_file_path, destination_file_path): """Faster version of shutil.copy with buffer size.""" if not os.path.exists(source_file_path): logs.log_error('Source file %s for copy not found.' % source_file_path) return False error_occurred = False try: with open(source_file_path, 'rb') as source_file_handle: with open(destination_file_path, 'wb') as destination_file_handle: shutil.copyfileobj(source_file_handle, destination_file_handle, FILE_COPY_BUFFER_SIZE) except: error_occurred = True # Make sure that the destination file actually exists. error_occurred |= not os.path.exists(destination_file_path) if error_occurred: logs.log_warn('Failed to copy source file %s to destination file %s.' % (source_file_path, destination_file_path)) return False return True
Faster version of shutil.copy with buffer size.
157,028
import contextlib import os import re import shlex import shutil import subprocess import sys import tempfile import uuid from clusterfuzz._internal.base import persistent_cache from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment def walk(directory, **kwargs): """Wrapper around walk to resolve compatibility issues.""" return os.walk(directory, **kwargs) The provided code snippet includes necessary dependencies for implementing the `remove_empty_files` function. Write a Python function `def remove_empty_files(root_path)` to solve the following problem: Removes empty files in a path recursively Here is the function: def remove_empty_files(root_path): """Removes empty files in a path recursively""" for directory, _, filenames in walk(root_path): for filename in filenames: path = os.path.join(directory, filename) if os.path.getsize(path) > 0: continue try: os.remove(path) except: logs.log_error('Unable to remove the empty file: %s (%s).' % (path, sys.exc_info()[0]))
Removes empty files in a path recursively
157,029
import contextlib import os import re import shlex import shutil import subprocess import sys import tempfile import uuid from clusterfuzz._internal.base import persistent_cache from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment def _get_random_filename(): return str(uuid.uuid4()).lower() The provided code snippet includes necessary dependencies for implementing the `get_tempfile` function. Write a Python function `def get_tempfile(prefix='', suffix='')` to solve the following problem: Returns path to a temporary file. Here is the function: def get_tempfile(prefix='', suffix=''): """Returns path to a temporary file.""" tempdir = environment.get_value('BOT_TMPDIR', '/tmp') os.makedirs(tempdir, exist_ok=True) basename = _get_random_filename() filename = f'{prefix}{basename}{suffix}' filepath = os.path.join(tempdir, filename) yield filepath if os.path.exists(filepath): os.remove(filepath)
Returns path to a temporary file.
157,030
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_current_memory_tool_var(): """Get the environment variable name for the current job type's sanitizer.""" memory_tool_name = get_memory_tool_name(get_value('JOB_NAME')) if not memory_tool_name: return None return memory_tool_name + '_OPTIONS' def get_memory_tool_options(env_var, default_value=None): """Get the current memory tool options as a dict. Returns |default_value| if |env_var| isn't set. Otherwise returns a dictionary containing the memory tool options and their values.""" env_value = get_value(env_var) if env_value is not None: return _parse_memory_tool_options(env_value) return default_value def set_memory_tool_options(env_var, options_dict): """Set current memory tool options.""" set_value(env_var, join_memory_tool_options(options_dict)) The provided code snippet includes necessary dependencies for implementing the `disable_lsan` function. Write a Python function `def disable_lsan()` to solve the following problem: Disable leak detection (if enabled). Here is the function: def disable_lsan(): """Disable leak detection (if enabled).""" if get_current_memory_tool_var() != 'ASAN_OPTIONS': return sanitizer_options = get_memory_tool_options('ASAN_OPTIONS', {}) sanitizer_options['detect_leaks'] = 0 set_memory_tool_options('ASAN_OPTIONS', sanitizer_options)
Disable leak detection (if enabled).
157,031
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_memory_tool_name(job_name): """Figures out name of memory debugging tool.""" for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS: if tool_matches(tool, job_name): return tool # If no tool specified, assume it is ASAN. Also takes care of LSAN job type. return 'ASAN' def get_value(environment_variable, default_value=None, env=None): """Return an environment variable value.""" if env is None: env = os.environ value_string = env.get(environment_variable) # value_string will be None if the variable is not defined. if value_string is None: return default_value # Exception for ANDROID_SERIAL. Sometimes serial can be just numbers, # so we don't want to it eval it. if environment_variable == 'ANDROID_SERIAL': return value_string # Evaluate the value of the environment variable with string fallback. return _eval_value(value_string) The provided code snippet includes necessary dependencies for implementing the `get_instrumented_libraries_paths` function. Write a Python function `def get_instrumented_libraries_paths()` to solve the following problem: Get the instrumented libraries path for the current sanitizer. Here is the function: def get_instrumented_libraries_paths(): """Get the instrumented libraries path for the current sanitizer.""" memory_tool_name = get_memory_tool_name(get_value('JOB_NAME')) if not memory_tool_name: return None if memory_tool_name == 'MSAN': if 'no-origins' in get_value('BUILD_URL', ''): memory_tool_name += '_NO_ORIGINS' else: memory_tool_name += '_CHAINED' paths = get_value('INSTRUMENTED_LIBRARIES_PATHS_' + memory_tool_name) if not paths: return None return paths.split(':')
Get the instrumented libraries path for the current sanitizer.
157,032
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_config_directory(): """Return the path to the configs directory.""" config_dir = get_value('CONFIG_DIR_OVERRIDE') if config_dir: return config_dir if is_running_on_app_engine(): # Root is already src/appengine. return 'config' # Running on bot, give path to config folder inside appengine dir. return os.path.join(get_root_directory(), 'src', 'appengine', 'config') The provided code snippet includes necessary dependencies for implementing the `get_gae_config_directory` function. Write a Python function `def get_gae_config_directory()` to solve the following problem: Return the path to the google appengine configs directory. Here is the function: def get_gae_config_directory(): """Return the path to the google appengine configs directory.""" return os.path.join(get_config_directory(), 'gae')
Return the path to the google appengine configs directory.
157,033
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_engine_for_job(job_name=None): """Get the engine for the given job.""" if not job_name: job_name = get_value('JOB_NAME') for engine in fuzzing.ENGINES: if engine.lower() in job_name: return engine return None The provided code snippet includes necessary dependencies for implementing the `is_honggfuzz_job` function. Write a Python function `def is_honggfuzz_job(job_name=None)` to solve the following problem: Return True if the current job uses honggfuzz. Here is the function: def is_honggfuzz_job(job_name=None): """Return True if the current job uses honggfuzz.""" return get_engine_for_job(job_name) == 'honggfuzz'
Return True if the current job uses honggfuzz.
157,034
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_engine_for_job(job_name=None): """Get the engine for the given job.""" if not job_name: job_name = get_value('JOB_NAME') for engine in fuzzing.ENGINES: if engine.lower() in job_name: return engine return None The provided code snippet includes necessary dependencies for implementing the `is_centipede_fuzzer_job` function. Write a Python function `def is_centipede_fuzzer_job(job_name=None)` to solve the following problem: Return True if the current job uses Centipede. Here is the function: def is_centipede_fuzzer_job(job_name=None): """Return True if the current job uses Centipede.""" return get_engine_for_job(job_name) == 'centipede'
Return True if the current job uses Centipede.
157,035
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing The provided code snippet includes necessary dependencies for implementing the `is_posix` function. Write a Python function `def is_posix()` to solve the following problem: Return True if we are on a posix platform (linux/unix and mac os). Here is the function: def is_posix(): """Return True if we are on a posix platform (linux/unix and mac os).""" return os.name == 'posix'
Return True if we are on a posix platform (linux/unix and mac os).
157,036
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def set_value(environment_variable, value, env=None): """Set an environment variable.""" if env is None: env = os.environ value_str = str(value) environment_variable_str = str(environment_variable) value_str = value_str.replace('%ROOT_DIR%', os.getenv('ROOT_DIR', '')) env[environment_variable_str] = value_str if is_trusted_host(): from clusterfuzz._internal.bot.untrusted_runner import \ environment as untrusted_env untrusted_env.forward_environment_variable(environment_variable_str, value_str) The provided code snippet includes necessary dependencies for implementing the `set_environment_parameters_from_file` function. Write a Python function `def set_environment_parameters_from_file(file_path)` to solve the following problem: Set environment variables from a file. Here is the function: def set_environment_parameters_from_file(file_path): """Set environment variables from a file.""" if not os.path.exists(file_path): return with open(file_path) as f: file_data = f.read() for line in file_data.splitlines(): if line.startswith('#') or not line.strip(): continue m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line) if m: environment_variable = m.group(1) environment_variable_value = m.group(2) set_value(environment_variable, environment_variable_value)
Set environment variables from a file.
157,037
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_value(environment_variable, default_value=None, env=None): """Return an environment variable value.""" if env is None: env = os.environ value_string = env.get(environment_variable) # value_string will be None if the variable is not defined. if value_string is None: return default_value # Exception for ANDROID_SERIAL. Sometimes serial can be just numbers, # so we don't want to it eval it. if environment_variable == 'ANDROID_SERIAL': return value_string # Evaluate the value of the environment variable with string fallback. return _eval_value(value_string) def is_trusted_host(ensure_connected=True): """Return whether or not the current bot is a trusted host.""" return get_value('TRUSTED_HOST') and (not ensure_connected or get_value('WORKER_BOT_NAME')) def set_default_vars(): """Set default environment vars and values.""" env_file_path = os.path.join(get_value('ROOT_DIR'), 'bot', 'env.yaml') with open(env_file_path) as file_handle: env_file_contents = file_handle.read() env_vars_and_values = yaml.safe_load(env_file_contents) for variable, value in env_vars_and_values.items(): # We cannot call set_value here. os.environ[variable] = str(value) The provided code snippet includes necessary dependencies for implementing the `set_bot_environment` function. Write a Python function `def set_bot_environment()` to solve the following problem: Set environment for the bots. Here is the function: def set_bot_environment(): """Set environment for the bots.""" root_dir = get_value('ROOT_DIR') if not root_dir: # Error, bail out. return False # Reset our current working directory. Our's last job might # have left us in a non-existent temp directory. # Or ROOT_DIR might be deleted and recreated. os.chdir(root_dir) # Set some default directories. These can be overriden by config files below. bot_dir = os.path.join(root_dir, 'bot') if is_trusted_host(ensure_connected=False): worker_root_dir = os.environ['WORKER_ROOT_DIR'] os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot', 'builds') else: os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds') os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls') os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs') os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache') inputs_dir = os.path.join(bot_dir, 'inputs') os.environ['INPUT_DIR'] = inputs_dir os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir, 'crash-stacks') os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers') os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles') os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases') os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS'] os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir, 'fuzzer-testcases-disk') os.environ['FUZZ_DATA'] = os.path.join(inputs_dir, 'fuzzer-common-data-bundles') os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images') os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols') os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir, 'user-profile-dirs') # Set bot name. if not get_value('BOT_NAME'): # If not defined, default to host name. os.environ['BOT_NAME'] = socket.gethostname().lower() # Local temp directory (non-tmpfs). local_tmp_dir = os.path.join(bot_dir, 'tmp') # Set BOT_TMPDIR if not already set. if not get_value('BOT_TMPDIR'): os.environ['BOT_TMPDIR'] = local_tmp_dir # Add common environment variables needed by Bazel test runner. # See https://docs.bazel.build/versions/master/test-encyclopedia.html. # NOTE: Do not use a tmpfs folder as some fuzz targets don't work. os.environ['TEST_TMPDIR'] = local_tmp_dir os.environ['TZ'] = 'UTC' # Sets the default configuration. Can be overridden by job environment. set_default_vars() # Set environment variable from local project configuration. from clusterfuzz._internal.config import local_config local_config.ProjectConfig().set_environment() # Success. return True
Set environment for the bots.
157,038
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def is_running_on_app_engine(): """Return True if we are running on appengine (local or production).""" return (os.getenv('GAE_ENV') or is_running_on_app_engine_development() or os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')) The provided code snippet includes necessary dependencies for implementing the `appengine_noop` function. Write a Python function `def appengine_noop(func)` to solve the following problem: Wrap a function into no-op and return None if running on App Engine. Here is the function: def appengine_noop(func): """Wrap a function into no-op and return None if running on App Engine.""" @functools.wraps(func) def wrapper(*args, **kwargs): if is_running_on_app_engine(): return None return func(*args, **kwargs) return wrapper
Wrap a function into no-op and return None if running on App Engine.
157,039
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def is_running_on_app_engine(): """Return True if we are running on appengine (local or production).""" return (os.getenv('GAE_ENV') or is_running_on_app_engine_development() or os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')) The provided code snippet includes necessary dependencies for implementing the `bot_noop` function. Write a Python function `def bot_noop(func)` to solve the following problem: Wrap a function into no-op and return None if running on bot. Here is the function: def bot_noop(func): """Wrap a function into no-op and return None if running on bot.""" @functools.wraps(func) def wrapper(*args, **kwargs): is_bot = not is_running_on_app_engine() if is_bot: return None return func(*args, **kwargs) return wrapper
Wrap a function into no-op and return None if running on bot.
157,040
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def is_running_on_app_engine_development(): """Return True if running on the local development appengine server.""" return (os.getenv('GAE_ENV') == 'dev' or os.getenv('SERVER_SOFTWARE', '').startswith('Development/')) def is_local_development(): """Return True if running in local development environment (e.g. running a bot locally, excludes tests).""" return bool(get_value('LOCAL_DEVELOPMENT') and not get_value('PY_UNITTESTS')) The provided code snippet includes necessary dependencies for implementing the `local_noop` function. Write a Python function `def local_noop(func)` to solve the following problem: Wrap a function into no-op and return None if running in local development environment. Here is the function: def local_noop(func): """Wrap a function into no-op and return None if running in local development environment.""" @functools.wraps(func) def wrapper(*args, **kwargs): if (is_local_development() or is_running_on_app_engine_development()): return None return func(*args, **kwargs) return wrapper
Wrap a function into no-op and return None if running in local development environment.
157,041
import ast import functools import os import re import socket import subprocess import sys import yaml from clusterfuzz._internal import fuzzing def get_value(environment_variable, default_value=None, env=None): """Return an environment variable value.""" if env is None: env = os.environ value_string = env.get(environment_variable) # value_string will be None if the variable is not defined. if value_string is None: return default_value # Exception for ANDROID_SERIAL. Sometimes serial can be just numbers, # so we don't want to it eval it. if environment_variable == 'ANDROID_SERIAL': return value_string # Evaluate the value of the environment variable with string fallback. return _eval_value(value_string) The provided code snippet includes necessary dependencies for implementing the `if_redis_available` function. Write a Python function `def if_redis_available(func)` to solve the following problem: Wrap a function if redis is available and return None if not. Here is the function: def if_redis_available(func): """Wrap a function if redis is available and return None if not.""" @functools.wraps(func) def wrapper(*args, **kwargs): if get_value('REDIS_HOST'): return func(*args, **kwargs) return None return wrapper
Wrap a function if redis is available and return None if not.
157,042
import abc import dataclasses import os import tarfile from typing import BinaryIO from typing import Callable from typing import List from typing import Optional from typing import Union import zipfile from clusterfuzz._internal.metrics import logs StrBytesPathLike = Union[str, bytes, os.PathLike] The provided code snippet includes necessary dependencies for implementing the `_is_attempting_path_traversal` function. Write a Python function `def _is_attempting_path_traversal(archive_name: StrBytesPathLike, output_dir: StrBytesPathLike, filename: StrBytesPathLike) -> bool` to solve the following problem: Detects whether there is a path traversal attempt. Args: archive_name: the name of the archive. output_dir: the output directory. filename: the name of the file being checked. Returns: Whether there is a path traversal attempt Here is the function: def _is_attempting_path_traversal(archive_name: StrBytesPathLike, output_dir: StrBytesPathLike, filename: StrBytesPathLike) -> bool: """Detects whether there is a path traversal attempt. Args: archive_name: the name of the archive. output_dir: the output directory. filename: the name of the file being checked. Returns: Whether there is a path traversal attempt """ output_dir = os.path.realpath(output_dir) absolute_file_path = os.path.join(output_dir, os.path.normpath(filename)) real_file_path = os.path.realpath(absolute_file_path) if real_file_path == output_dir: # Workaround for https://bugs.python.org/issue28488. # Ignore directories named '.'. return False if real_file_path != absolute_file_path: logs.log_error('Directory traversal attempted while unpacking archive %s ' '(file path=%s, actual file path=%s). Aborting.' % (archive_name, absolute_file_path, real_file_path)) return True return False
Detects whether there is a path traversal attempt. Args: archive_name: the name of the archive. output_dir: the output directory. filename: the name of the file being checked. Returns: Whether there is a path traversal attempt
157,043
import abc import dataclasses import os import tarfile from typing import BinaryIO from typing import Callable from typing import List from typing import Optional from typing import Union import zipfile from clusterfuzz._internal.metrics import logs class ArchiveType: """Type of the archive.""" UNKNOWN = 0 ZIP = 1 TAR = 2 TAR_LZMA = 3 def get_archive_type(archive_path: str) -> ArchiveType: """Get the type of the archive. Args: archive_path: the path to the archive. Returns: the type of the archive, or ArchiveType.UNKNOWN if unknown. """ def has_extension(extensions): """Returns True if |archive_path| endswith an extension in |extensions|.""" for extension in extensions: if archive_path.endswith(extension): return True return False if has_extension(ZIP_FILE_EXTENSIONS): return ArchiveType.ZIP if has_extension(TAR_FILE_EXTENSIONS): return ArchiveType.TAR if has_extension(LZMA_FILE_EXTENSIONS): return ArchiveType.TAR_LZMA return ArchiveType.UNKNOWN The provided code snippet includes necessary dependencies for implementing the `is_archive` function. Write a Python function `def is_archive(filename: str) -> bool` to solve the following problem: Return true if the file is an archive. Args: filename: the path to a file. Returns: whether the provided file is an archive. Here is the function: def is_archive(filename: str) -> bool: """Return true if the file is an archive. Args: filename: the path to a file. Returns: whether the provided file is an archive. """ return get_archive_type(filename) != ArchiveType.UNKNOWN
Return true if the file is an archive. Args: filename: the path to a file. Returns: whether the provided file is an archive.
157,044
import os import subprocess import sys import tempfile import threading import time import urllib.request from clusterfuzz._internal.base import utils from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment def _end_process(terminate_function, process_result): """Ends a running process. Ignores exceptions. Args: process: A subprocess.Popen object. terminate_function: The function to terminate the process. process_result: A ProcessResult object where timeout information will be written to. """ try: terminate_function() except OSError: logs.log('Process already killed.') process_result.timed_out = True class ProcessResult: """Object representing result of a process execution. Returned by ProcessRunner.run_and_wait(). Attributes: command: A list of arguments representing the command line that was run. return_code: Exit code of the process. output: Process output. time_executed: Number of seconds process ran for. timed_out: Whether or not the process timed out. """ def __init__(self, command=None, return_code=None, output=None, time_executed=None, timed_out=False): """Inits the ProcessResult.""" self.command = command self.return_code = return_code self.output = output self.time_executed = time_executed self.timed_out = timed_out The provided code snippet includes necessary dependencies for implementing the `wait_process` function. Write a Python function `def wait_process(process, timeout, input_data=None, terminate_before_kill=False, terminate_wait_time=None)` to solve the following problem: Waits until either the process exits or times out. Args: process: A subprocess.Popen object. timeout: Maximum number of seconds to wait for before sending a signal. input_data: Input to be sent to the process. terminate_before_kill: A bool indicating that SIGTERM should be sent to the process first before SIGKILL (to let the SIGTERM handler run). terminate_wait_time: Maximum number of seconds to wait for the SIGTERM handler. Returns: A ProcessResult. Here is the function: def wait_process(process, timeout, input_data=None, terminate_before_kill=False, terminate_wait_time=None): """Waits until either the process exits or times out. Args: process: A subprocess.Popen object. timeout: Maximum number of seconds to wait for before sending a signal. input_data: Input to be sent to the process. terminate_before_kill: A bool indicating that SIGTERM should be sent to the process first before SIGKILL (to let the SIGTERM handler run). terminate_wait_time: Maximum number of seconds to wait for the SIGTERM handler. Returns: A ProcessResult. """ result = ProcessResult() is_windows = environment.platform() == 'WINDOWS' # On Windows, terminate() just calls Win32 API function TerminateProcess() # which is equivalent to process kill. So, skip terminate_before_kill. if terminate_before_kill and not is_windows: first_timeout_function = process.terminate # Use a second timer to send the process kill. second_timer = threading.Timer(timeout + terminate_wait_time, _end_process, [process.kill, result]) else: first_timeout_function = process.kill second_timer = None first_timer = threading.Timer(timeout, _end_process, [first_timeout_function, result]) output = None start_time = time.time() try: first_timer.start() if second_timer: second_timer.start() output = process.communicate(input_data)[0] finally: first_timer.cancel() if second_timer: second_timer.cancel() result.return_code = process.poll() result.output = output result.time_executed = time.time() - start_time return result
Waits until either the process exits or times out. Args: process: A subprocess.Popen object. timeout: Maximum number of seconds to wait for before sending a signal. input_data: Input to be sent to the process. terminate_before_kill: A bool indicating that SIGTERM should be sent to the process first before SIGKILL (to let the SIGTERM handler run). terminate_wait_time: Maximum number of seconds to wait for the SIGTERM handler. Returns: A ProcessResult.
157,045
import random import struct The provided code snippet includes necessary dependencies for implementing the `get_pack_format_and_mask_for_num_bytes` function. Write a Python function `def get_pack_format_and_mask_for_num_bytes(num_bytes, signed=False, little_endian=True)` to solve the following problem: Return the struct pack format and bit mask for the integer values of size |num_bytes|. Here is the function: def get_pack_format_and_mask_for_num_bytes(num_bytes, signed=False, little_endian=True): """Return the struct pack format and bit mask for the integer values of size |num_bytes|.""" if num_bytes == 1: pack_fmt = 'B' mask = (1 << 8) - 1 elif num_bytes == 2: pack_fmt = 'H' mask = (1 << 16) - 1 elif num_bytes == 4: pack_fmt = 'I' mask = (1 << 32) - 1 elif num_bytes == 8: pack_fmt = 'Q' mask = (1 << 64) - 1 else: raise ValueError if signed: pack_fmt = pack_fmt.lower() if num_bytes > 1: if little_endian: pack_fmt = '<' + pack_fmt else: pack_fmt = '>' + pack_fmt return pack_fmt, mask
Return the struct pack format and bit mask for the integer values of size |num_bytes|.
157,046
import ast import base64 import bisect import os import re import time import urllib.parse from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import source_mapper from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment def get_component_range_list(start_revision, end_revision, job_type, platform_id=None): """Gets revision variable ranges for a changeset range.""" start_component_revisions_dict = get_component_revisions_dict( start_revision, job_type, platform_id=platform_id) if start_revision == end_revision: end_component_revisions_dict = start_component_revisions_dict else: end_component_revisions_dict = get_component_revisions_dict( end_revision, job_type, platform_id=platform_id) if (start_component_revisions_dict is None or end_component_revisions_dict is None): return [] component_revisions = [] keys = get_components_list(end_component_revisions_dict, job_type) for key in keys: if not start_component_revisions_dict: # 0 start revision, can only show link text. end_component_display_revision = _get_display_revision( end_component_revisions_dict[key]) component_name = end_component_revisions_dict[key]['name'] component_revisions.append({ 'component': component_name, 'link_text': '0:%s' % end_component_display_revision }) continue if key not in start_component_revisions_dict: logs.log_warn('Key %s not found in start revision %s for job %s.' % (key, start_revision, job_type)) continue start_component_revision_dict = start_component_revisions_dict[key] end_component_revision_dict = end_component_revisions_dict[key] component_revisions.append({ 'component': start_component_revision_dict['name'], 'link_text': _get_link_text(start_component_revision_dict, end_component_revision_dict), 'link_url': _get_link_url(start_component_revision_dict, end_component_revision_dict) }) return component_revisions The provided code snippet includes necessary dependencies for implementing the `get_component_list` function. Write a Python function `def get_component_list(revision, job_type)` to solve the following problem: Gets mapped revisions for a given revision. Here is the function: def get_component_list(revision, job_type): """Gets mapped revisions for a given revision.""" return get_component_range_list(revision, revision, job_type)
Gets mapped revisions for a given revision.
157,047
import ast import base64 import bisect import os import re import time import urllib.parse from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import source_mapper from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment The provided code snippet includes necessary dependencies for implementing the `format_revision_list` function. Write a Python function `def format_revision_list(revisions, use_html=True)` to solve the following problem: Converts component revision list to html. Here is the function: def format_revision_list(revisions, use_html=True): """Converts component revision list to html.""" result = '' for revision in revisions: if revision['component']: result += '%s: ' % revision['component'] if 'link_url' in revision and revision['link_url'] and use_html: result += '<a target="_blank" href="{link_url}">{link_text}</a>'.format( link_url=revision['link_url'], link_text=revision['link_text']) else: result += revision['link_text'] if use_html: result += '<br />' else: result += '\n' return result
Converts component revision list to html.
157,048
import ast import base64 import bisect import os import re import time import urllib.parse from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import source_mapper from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment def _get_display_revision(component_revision_dict): """Return display revision for a component revision dict.""" if 'commit_pos' in component_revision_dict: return component_revision_dict['commit_pos'] return component_revision_dict['rev'] or '<empty>' def _get_revision(component_revision_dict): """Return revision for a component revision dict.""" return component_revision_dict['rev'] def get_components_list(component_revisions_dict, job_type): """Return a prioritized order of components based on job type.""" components = sorted(component_revisions_dict.keys()) if utils.is_chromium(): # Components prioritization only applies to non-chromium projects. return components project_name = data_handler.get_project_name(job_type) if not project_name: # No project name found in job environment, return list as-is. return components main_repo = data_handler.get_main_repo(job_type) project_src = '/src/' + project_name for component in components.copy(): if component_revisions_dict[component]['url'] == main_repo: # Matches recorded main repo. components.remove(component) components.insert(0, component) break if component == project_src: components.remove(component) components.insert(0, component) break if project_name.lower() in os.path.basename(component).lower(): components.remove(component) components.insert(0, component) # Keep trying in case an exact match is found later. return components def get_component_revisions_dict(revision, job_type, platform_id=None): """Retrieve revision vars dict.""" if revision == 0 or revision == '0' or revision is None: # Return empty dict for zero start revision. return {} revision_vars_url_format = _get_revision_vars_url_format( job_type, platform_id=platform_id) if not revision_vars_url_format: return None project_name = data_handler.get_project_name(job_type) revisions_dict = {} if utils.is_chromium(): component = data_handler.get_component_name(job_type) repository = data_handler.get_repository_for_component(component) if repository and not _is_clank(revision_vars_url_format): revision_hash = _git_commit_position_to_git_hash_for_chromium( revision, repository) if revision_hash is None: return None # FIXME: While we check for this explicitly appended component in all # applicable cases that we know of within this codebase, if the dict # is shared with an external service (e.g. Predator) we may need to clean # this up beforehand. revisions_dict['/src'] = { 'name': _get_component_display_name(component, project_name), 'url': _git_url_for_chromium_repository(repository), 'rev': revision_hash, 'commit_pos': revision } # Use revision hash for info url later. revision = revision_hash revision_vars_url = revision_vars_url_format % revision url_content = _get_url_content(revision_vars_url) if not url_content: logs.log_error( 'Failed to get component revisions from %s.' % revision_vars_url) return None # Parse as per DEPS format. if _is_deps(revision_vars_url): deps_revisions_dict = deps_to_revisions_dict(url_content) if not deps_revisions_dict: return None revisions_dict.update(deps_revisions_dict) return revisions_dict # Parse as per Clank DEPS format. if _is_clank(revision_vars_url): return _clank_revision_file_to_revisions_dict(url_content) # Default case: parse content as yaml. revisions_dict = _to_dict(url_content) if revisions_dict is None: logs.log_error( 'Failed to parse component revisions from %s.' % revision_vars_url) return None # Parse as per source map format. if revision_vars_url.endswith(SOURCE_MAP_EXTENSION): revisions_dict = _src_map_to_revisions_dict(revisions_dict, project_name) return revisions_dict The provided code snippet includes necessary dependencies for implementing the `get_real_revision` function. Write a Python function `def get_real_revision(revision, job_type, display=False, platform_id=None)` to solve the following problem: Convert the revision number into a real revision hash (e.g. git hash). Here is the function: def get_real_revision(revision, job_type, display=False, platform_id=None): """Convert the revision number into a real revision hash (e.g. git hash).""" if revision is None: # Bail early when caller passes revision from a non-existent attribute. return None component_revisions_dict = get_component_revisions_dict( revision, job_type, platform_id=platform_id) if not component_revisions_dict: return str(revision) keys = list(component_revisions_dict.keys()) key = ('/src' if '/src' in keys else get_components_list( component_revisions_dict, job_type)[0]) helper = _get_display_revision if display else _get_revision return helper(component_revisions_dict[key])
Convert the revision number into a real revision hash (e.g. git hash).
157,049
import re from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config SOURCE_STRIP_REGEX = re.compile(r'^[/]?src[/]?') class ComponentPath: def __init__(self, source=None, relative_path=None, display_path=None): self.source = source self.relative_path = relative_path self.display_path = display_path def __eq__(self, other): return (self.source == other.source and self.relative_path == other.relative_path and self.display_path == other.display_path) def normalize_source_path(path): """Normalizes source path for comparison with component sources.""" # Account for ../../ at start of path due to working directory # out/<build_dir>/ at time of build generation (chromium only). path = utils.remove_prefix(path, '../../') # Remove /proc/self/cwd prefix added by Bazel. path = utils.remove_prefix(path, '/proc/self/cwd/') # Cross-platform way to determine path absoluteness. is_path_absolute = path.startswith('/') or DRIVE_LETTER_REGEX.match(path) # Normalize backslashes into slashes. normalized_path = path.replace('\\', '/') if is_path_absolute: source_start_id_index = normalized_path.find(SOURCE_START_ID) if source_start_id_index == -1: # This absolute path does not have source start id, so we cannot # figure out a relative path. Bail out. return None return normalized_path[source_start_id_index + len(SOURCE_START_ID):] return normalized_path The provided code snippet includes necessary dependencies for implementing the `get_component_source_and_relative_path` function. Write a Python function `def get_component_source_and_relative_path(path, revisions_dict)` to solve the following problem: Get component source and relative path given a revisions dictionary and path. Here is the function: def get_component_source_and_relative_path(path, revisions_dict): """Get component source and relative path given a revisions dictionary and path.""" if not revisions_dict: return ComponentPath() normalized_path = normalize_source_path(path) if normalized_path is None: return ComponentPath() component_sources = sorted(list(revisions_dict.keys()), key=len, reverse=True) default_component_source = None for component_source in component_sources: # Trailing slash is important so that we match the exact component source. # E.g. without slash, we would match src/webrtc_overrides with src/webrtc # which is incorrect. stripped_component_source = ( SOURCE_STRIP_REGEX.sub('', component_source) + '/') if normalized_path.startswith(stripped_component_source): relative_path = utils.strip_from_left(normalized_path, stripped_component_source) return ComponentPath(component_source, relative_path, normalized_path) if stripped_component_source == '/': default_component_source = component_source if default_component_source is None: return ComponentPath() return ComponentPath(default_component_source, normalized_path, normalized_path)
Get component source and relative path given a revisions dictionary and path.
157,050
import re from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config def should_linkify_java_stack_frames(): return local_config.Config(local_config.PROJECT_PATH).get('linkify_java')
null
157,051
from collections import namedtuple import os import re import shutil import subprocess import time from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import build_archive from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import revisions from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import blobs from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell MAX_EVICTED_BUILDS = 100 MIN_FREE_DISK_SPACE_CHROMIUM = 10 * 1024 * 1024 * 1024 MIN_FREE_DISK_SPACE_DEFAULT = 5 * 1024 * 1024 * 1024 def _evict_build(current_build_dir): """Remove the least recently used build to make room.""" builds_directory = environment.get_value('BUILDS_DIR') least_recently_used = None least_recently_used_timestamp = None for build_directory in os.listdir(builds_directory): absolute_build_directory = os.path.abspath( os.path.join(builds_directory, build_directory)) if not os.path.isdir(absolute_build_directory): continue if os.path.commonpath( [absolute_build_directory, os.path.abspath(current_build_dir)]) == absolute_build_directory: # Don't evict the build we're trying to extract. This could be a parent # directory of where we're currently extracting to. continue build = BaseBuild(absolute_build_directory) timestamp = build.last_used_time() if (least_recently_used_timestamp is None or timestamp < least_recently_used_timestamp): least_recently_used_timestamp = timestamp least_recently_used = build if not least_recently_used: return False logs.log( 'Deleting build %s to save space.' % least_recently_used.base_build_dir) least_recently_used.delete() return True The provided code snippet includes necessary dependencies for implementing the `_make_space` function. Write a Python function `def _make_space(requested_size, current_build_dir=None)` to solve the following problem: Try to make the requested number of bytes available by deleting builds. Here is the function: def _make_space(requested_size, current_build_dir=None): """Try to make the requested number of bytes available by deleting builds.""" if utils.is_chromium(): min_free_disk_space = MIN_FREE_DISK_SPACE_CHROMIUM else: min_free_disk_space = MIN_FREE_DISK_SPACE_DEFAULT builds_directory = environment.get_value('BUILDS_DIR') error_message = 'Need at least %d GB of free disk space.' % ( (min_free_disk_space + requested_size) // 1024**3) for _ in range(MAX_EVICTED_BUILDS): free_disk_space = shell.get_free_disk_space(builds_directory) if free_disk_space is None: # Can't determine free disk space, bail out. return False if requested_size + min_free_disk_space < free_disk_space: return True if not _evict_build(current_build_dir): logs.log_error(error_message) return False free_disk_space = shell.get_free_disk_space(builds_directory) result = requested_size + min_free_disk_space < free_disk_space if not result: logs.log_error(error_message) return result
Try to make the requested number of bytes available by deleting builds.
157,052
from collections import namedtuple import os import re import shutil import subprocess import time from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import build_archive from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import revisions from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import blobs from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `_handle_unrecoverable_error_on_windows` function. Write a Python function `def _handle_unrecoverable_error_on_windows()` to solve the following problem: Handle non-recoverable error on Windows. This is usually either due to disk corruption or processes failing to terminate using regular methods. Force a restart for recovery. Here is the function: def _handle_unrecoverable_error_on_windows(): """Handle non-recoverable error on Windows. This is usually either due to disk corruption or processes failing to terminate using regular methods. Force a restart for recovery.""" if environment.platform() != 'WINDOWS': return logs.log_error('Unrecoverable error, restarting machine...') time.sleep(60) utils.restart_machine()
Handle non-recoverable error on Windows. This is usually either due to disk corruption or processes failing to terminate using regular methods. Force a restart for recovery.
157,053
from collections import namedtuple import os import re import shutil import subprocess import time from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import build_archive from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import revisions from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import blobs from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell The provided code snippet includes necessary dependencies for implementing the `_setup_build_directories` function. Write a Python function `def _setup_build_directories(base_build_dir)` to solve the following problem: Set up build directories for a job. Here is the function: def _setup_build_directories(base_build_dir): """Set up build directories for a job.""" # Create the root build directory for this job. shell.create_directory(base_build_dir, create_intermediates=True) custom_binary_directory = os.path.join(base_build_dir, 'custom') revision_build_directory = os.path.join(base_build_dir, 'revisions') sym_build_directory = os.path.join(base_build_dir, 'symbolized') sym_debug_build_directory = os.path.join(sym_build_directory, 'debug') sym_release_build_directory = os.path.join(sym_build_directory, 'release') build_directories = [ custom_binary_directory, revision_build_directory, sym_build_directory, sym_debug_build_directory, sym_release_build_directory ] for build_directory in build_directories: shell.create_directory(build_directory)
Set up build directories for a job.
157,054
from collections import namedtuple import os import re import shutil import subprocess import time from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import build_archive from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import revisions from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import blobs from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell PATCHELF_SIZE_LIMIT = 1.5 * 1024 * 1024 * 1024 def _set_rpaths_chrpath(binary_path, rpaths): """Set rpaths using chrpath.""" chrpath = environment.get_default_tool_path('chrpath') if not chrpath: raise BuildManagerError('Failed to find chrpath') subprocess.check_output( [chrpath, '-r', ':'.join(rpaths), binary_path], stderr=subprocess.PIPE) def _set_rpaths_patchelf(binary_path, rpaths): """Set rpaths using patchelf.""" patchelf = shutil.which('patchelf') if not patchelf: raise BuildManagerError('Failed to find patchelf') subprocess.check_output( [patchelf, '--force-rpath', '--set-rpath', ':'.join(rpaths), binary_path], stderr=subprocess.PIPE) The provided code snippet includes necessary dependencies for implementing the `set_rpaths` function. Write a Python function `def set_rpaths(binary_path, rpaths)` to solve the following problem: Set rpath of a binary. Here is the function: def set_rpaths(binary_path, rpaths): """Set rpath of a binary.""" # Patchelf handles rpath patching much better, and allows e.g. extending the # length of the rpath. However, it loads the entire binary into memory so # does not work for large binaries, so use chrpath for larger binaries. binary_size = os.path.getsize(binary_path) if binary_size >= PATCHELF_SIZE_LIMIT: _set_rpaths_chrpath(binary_path, rpaths) else: _set_rpaths_patchelf(binary_path, rpaths)
Set rpath of a binary.
157,055
from collections import namedtuple import os import re import shutil import subprocess import time from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import build_archive from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import revisions from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import blobs from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.platforms import android from clusterfuzz._internal.system import archive from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell class BuildManagerError(Exception): """Build manager exceptions.""" The provided code snippet includes necessary dependencies for implementing the `get_rpaths` function. Write a Python function `def get_rpaths(binary_path)` to solve the following problem: Get rpath of a binary. Here is the function: def get_rpaths(binary_path): """Get rpath of a binary.""" chrpath = environment.get_default_tool_path('chrpath') if not chrpath: raise BuildManagerError('Failed to find chrpath') try: rpaths = subprocess.check_output( [chrpath, '-l', binary_path], stderr=subprocess.PIPE).strip().decode('utf-8') except subprocess.CalledProcessError as e: if b'no rpath or runpath tag found' in e.output: return [] raise if rpaths: search_marker = 'RPATH=' start_index = rpaths.index(search_marker) + len(search_marker) return rpaths[start_index:].split(':') return []
Get rpath of a binary.
157,056
import datetime import functools import itertools import json import os import random import re from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import fuzz_target_utils from clusterfuzz._internal.google_cloud_utils import big_query from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import fuzzer_logs from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell def _logs_bucket_key_fn(func, args, kwargs): # pylint: disable=unused-argument return 'fuzzer_logs_bucket:' + args[1]
null
157,057
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment class _MockMetric: """Mock metric object, used for when monitoring isn't available.""" def _mock_method(self, *args, **kwargs): # pylint: disable=unused-argument pass def __getattr__(self, _): return self._mock_method def check_module_loaded(module): """Used for mocking.""" return module is not None The provided code snippet includes necessary dependencies for implementing the `stub_unavailable` function. Write a Python function `def stub_unavailable(module)` to solve the following problem: Decorator to stub out functions on failed imports. Here is the function: def stub_unavailable(module): """Decorator to stub out functions on failed imports.""" def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if check_module_loaded(module): return func(*args, **kwargs) return _MockMetric() return wrapper return decorator
Decorator to stub out functions on failed imports.
157,058
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment The provided code snippet includes necessary dependencies for implementing the `_time_to_timestamp` function. Write a Python function `def _time_to_timestamp(timestamp, time_seconds)` to solve the following problem: Convert result of time.time() to Timestamp. Here is the function: def _time_to_timestamp(timestamp, time_seconds): """Convert result of time.time() to Timestamp.""" timestamp.seconds = int(time_seconds) timestamp.nanos = int((time_seconds - timestamp.seconds) * 10**9)
Convert result of time.time() to Timestamp.
157,059
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment class _FlusherThread(threading.Thread): """Flusher thread.""" def __init__(self): super().__init__() self.daemon = True self.stop_event = threading.Event() def run(self): """Run the flusher thread.""" create_time_series = _retry_wrap(_monitoring_v3_client.create_time_series) project_path = _monitoring_v3_client.project_path( utils.get_application_id()) while True: try: if self.stop_event.wait(FLUSH_INTERVAL_SECONDS): return time_series = [] end_time = time.time() for metric, labels, start_time, value in _metrics_store.iter_values(): if (metric.metric_kind == monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE): start_time = end_time series = monitoring_v3.types.TimeSeries() # pylint: disable=no-member metric.monitoring_v3_time_series(series, labels, start_time, end_time, value) time_series.append(series) if len(time_series) == MAX_TIME_SERIES_PER_CALL: create_time_series(project_path, time_series) time_series = [] if time_series: create_time_series(project_path, time_series) except Exception: logs.log_error('Failed to flush metrics.') def stop(self): self.stop_event.set() self.join() _monitoring_v3_client = None _flusher_thread = None def check_module_loaded(module): """Used for mocking.""" return module is not None def _initialize_monitored_resource(): """Monitored resources.""" global _monitored_resource _monitored_resource = monitoring_v3.types.MonitoredResource() # pylint: disable=no-member # TODO(ochang): Use generic_node when that is available. _monitored_resource.type = 'gce_instance' # The project ID must be the same as the one we write metrics to, not the ID # where the instance lives. _monitored_resource.labels['project_id'] = utils.get_application_id() # Use bot name here instance as that's more useful to us. _monitored_resource.labels['instance_id'] = environment.get_value('BOT_NAME') if compute_metadata.is_gce(): # Returned in the form projects/{id}/zones/{zone} zone = compute_metadata.get('instance/zone').split('/')[-1] _monitored_resource.labels['zone'] = zone else: # Default zone for instances not on GCE. _monitored_resource.labels['zone'] = 'us-central1-f' The provided code snippet includes necessary dependencies for implementing the `initialize` function. Write a Python function `def initialize()` to solve the following problem: Initialize if monitoring is enabled for this bot. Here is the function: def initialize(): """Initialize if monitoring is enabled for this bot.""" global _monitoring_v3_client global _flusher_thread if environment.get_value('LOCAL_DEVELOPMENT'): return if not local_config.ProjectConfig().get('monitoring.enabled'): return if check_module_loaded(monitoring_v3): _initialize_monitored_resource() _monitoring_v3_client = monitoring_v3.MetricServiceClient( credentials=credentials.get_default()[0]) _flusher_thread = _FlusherThread() _flusher_thread.start()
Initialize if monitoring is enabled for this bot.
157,060
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment _flusher_thread = None The provided code snippet includes necessary dependencies for implementing the `stop` function. Write a Python function `def stop()` to solve the following problem: Stops monitoring and cleans up (only if monitoring is enabled). Here is the function: def stop(): """Stops monitoring and cleans up (only if monitoring is enabled).""" if _flusher_thread: _flusher_thread.stop()
Stops monitoring and cleans up (only if monitoring is enabled).
157,061
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment _metrics_store = _MetricsStore() The provided code snippet includes necessary dependencies for implementing the `metrics_store` function. Write a Python function `def metrics_store()` to solve the following problem: Get the per-process metrics store. Here is the function: def metrics_store(): """Get the per-process metrics store.""" return _metrics_store
Get the per-process metrics store.
157,062
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment The provided code snippet includes necessary dependencies for implementing the `_get_region` function. Write a Python function `def _get_region(bot_name)` to solve the following problem: Get bot region. Here is the function: def _get_region(bot_name): """Get bot region.""" try: regions = local_config.MonitoringRegionsConfig() except errors.BadConfigError: return 'unknown' for pattern in regions.get('patterns'): if re.match(pattern['pattern'], bot_name): return pattern['name'] return 'unknown'
Get bot region.
157,063
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment class _CounterMetric(Metric): """Counter metric.""" def value_type(self): return monitoring_v3.enums.MetricDescriptor.ValueType.INT64 def metric_kind(self): return monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE def default_value(self): return 0 def increment(self, labels=None): self.increment_by(1, labels=labels) def increment_by(self, count, labels=None): _metrics_store.increment(self, labels, count) def _set_value(self, point, value): """Get Point.""" point.int64_value = value The provided code snippet includes necessary dependencies for implementing the `CounterMetric` function. Write a Python function `def CounterMetric(name, description, field_spec)` to solve the following problem: Build _CounterMetric. Here is the function: def CounterMetric(name, description, field_spec): """Build _CounterMetric.""" return _CounterMetric(name, field_spec=field_spec, description=description)
Build _CounterMetric.
157,064
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment class _GaugeMetric(Metric): """Gauge metric.""" def value_type(self): return monitoring_v3.enums.MetricDescriptor.ValueType.INT64 def metric_kind(self): return monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE def default_value(self): return 0 def set(self, value, labels=None): _metrics_store.put(self, labels, value) def _set_value(self, point, value): """Get Point.""" point.int64_value = value The provided code snippet includes necessary dependencies for implementing the `GaugeMetric` function. Write a Python function `def GaugeMetric(name, description, field_spec)` to solve the following problem: Build _CounterMetric. Here is the function: def GaugeMetric(name, description, field_spec): """Build _CounterMetric.""" return _GaugeMetric(name, field_spec=field_spec, description=description)
Build _CounterMetric.
157,065
import bisect import collections import functools import itertools import re import threading import time from google.api_core import exceptions from google.api_core import retry from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import compute_metadata from clusterfuzz._internal.google_cloud_utils import credentials from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment class _CumulativeDistributionMetric(Metric): """Cumulative distribution metric.""" def __init__(self, name, description, bucketer, field_spec=None): super().__init__(name, description=description, field_spec=field_spec) self.bucketer = bucketer def value_type(self): return monitoring_v3.enums.MetricDescriptor.ValueType.DISTRIBUTION def metric_kind(self): return monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE def default_value(self): return _Distribution(self.bucketer) def add(self, value, labels=None): _metrics_store.increment(self, labels, value) def _set_value(self, point, value): value.monitoring_v3_distribution(point.distribution_value) The provided code snippet includes necessary dependencies for implementing the `CumulativeDistributionMetric` function. Write a Python function `def CumulativeDistributionMetric(name, description, bucketer, field_spec)` to solve the following problem: Build _CounterMetric. Here is the function: def CumulativeDistributionMetric(name, description, bucketer, field_spec): """Build _CounterMetric.""" return _CumulativeDistributionMetric( name, description=description, bucketer=bucketer, field_spec=field_spec)
Build _CounterMetric.
157,066
import datetime import json import logging from logging import config import os import socket import sys import time import traceback from typing import Any STACKDRIVER_LOG_MESSAGE_LIMIT = 80000 def truncate(msg, limit): """We need to truncate the message in the middle if it gets too long.""" if len(msg) <= limit: return msg half = limit // 2 return '\n'.join([ msg[:half], '...%d characters truncated...' % (len(msg) - limit), msg[-half:] ]) def _handle_unserializable(unserializable: Any) -> str: try: return str(unserializable, 'utf-8') except TypeError: return str(unserializable) def update_entry_with_exc(entry, exc_info): """Update the dict `entry` with exc_info.""" if not exc_info: return error = exc_info[1] error_extras = getattr(error, 'extras', {}) entry['task_payload'] = ( entry.get('task_payload') or error_extras.pop('task_payload', None)) entry['extras'].update(error_extras) entry['serviceContext'] = {'service': 'bots'} # Reference: # https://cloud.google.com/error-reporting/docs/formatting-error-messages, if exc_info[0]: # we need to set the result of traceback.format_exception to the field # `message`. And we move our entry['message'] += '\n' + ''.join( traceback.format_exception(exc_info[0], exc_info[1], exc_info[2])) else: # If we log error without exception, we need to set # `context.reportLocation`. location = entry.get('location', {}) entry['context'] = { 'reportLocation': { 'filePath': location.get('path', ''), 'lineNumber': location.get('line', 0), 'functionName': location.get('method', '') } } The provided code snippet includes necessary dependencies for implementing the `format_record` function. Write a Python function `def format_record(record: logging.LogRecord) -> str` to solve the following problem: Format LogEntry into JSON string. Here is the function: def format_record(record: logging.LogRecord) -> str: """Format LogEntry into JSON string.""" entry = { 'message': truncate(record.getMessage(), STACKDRIVER_LOG_MESSAGE_LIMIT), 'created': ( datetime.datetime.utcfromtimestamp(record.created).isoformat() + 'Z'), 'severity': record.levelname, 'bot_name': os.getenv('BOT_NAME'), 'task_payload': os.getenv('TASK_PAYLOAD'), 'name': record.name, } initial_payload = os.getenv('INITIAL_TASK_PAYLOAD') if initial_payload: entry['actual_task_payload'] = entry['task_payload'] entry['task_payload'] = initial_payload entry['location'] = getattr(record, 'location', {'error': True}) entry['extras'] = getattr(record, 'extras', {}) update_entry_with_exc(entry, record.exc_info) if not entry['extras']: del entry['extras'] worker_bot_name = os.environ.get('WORKER_BOT_NAME') if worker_bot_name: entry['worker_bot_name'] = worker_bot_name fuzz_target = os.getenv('FUZZ_TARGET') if fuzz_target: entry['fuzz_target'] = fuzz_target # Log bot shutdown cases as WARNINGs since this is expected for preemptibles. if (entry['severity'] in ['ERROR', 'CRITICAL'] and 'IOError: [Errno 4] Interrupted function call' in entry['message']): entry['severity'] = 'WARNING' return json.dumps(entry, default=_handle_unserializable)
Format LogEntry into JSON string.
157,067
import datetime import json import math from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.google_cloud_utils import big_query from clusterfuzz._internal.system import environment def _get_first_or_last_successful_hour(is_last): """Get the first successful hour.""" order = data_types.BuildCrashStatsJobHistory.end_time_in_hours if is_last: order = -order item = data_types.BuildCrashStatsJobHistory.query().order(order).get() if not item: return None return item.end_time_in_hours The provided code snippet includes necessary dependencies for implementing the `get_min_hour` function. Write a Python function `def get_min_hour()` to solve the following problem: Get the first hour that ran successfully (for the date-time picker). Here is the function: def get_min_hour(): """Get the first hour that ran successfully (for the date-time picker).""" hour = _get_first_or_last_successful_hour(is_last=False) # `hour` is None when we haven't run build_crash_stats at all. # Therefore, there's no crash stats data. # # On the UI, the date-time picker choose a point of time. Therefore, # if we choose, say, 3pm, this means we want the crash stats until 2:59pm. # Therefore, we need to increment by 1. return (hour or 0) + 1
Get the first hour that ran successfully (for the date-time picker).
157,068
import datetime import json import math from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.google_cloud_utils import big_query from clusterfuzz._internal.system import environment def get_last_successful_hour(): """Get the last hour that ran successfully. We want to run the next hour.""" return _get_first_or_last_successful_hour(is_last=True) The provided code snippet includes necessary dependencies for implementing the `get_max_hour` function. Write a Python function `def get_max_hour()` to solve the following problem: Get the last hour that can be selected by the date-time picker. Here is the function: def get_max_hour(): """Get the last hour that can be selected by the date-time picker.""" hour = get_last_successful_hour() # `hour` is None when we haven't run build_crash_stats at all. # Therefore, there's no crash stats data. # # On the UI, the date-time picker choose a point of time. Therefore, # if we choose, say, 3pm, this means we want the crash stats until 2:59pm. # Therefore, we need to increment by 1. return (hour or 0) + 1
Get the last hour that can be selected by the date-time picker.
157,069
import datetime import json import math from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.google_cloud_utils import big_query from clusterfuzz._internal.system import environment def get_datetime(hours): """Get datetime obj from hours from epoch.""" return datetime.datetime.utcfromtimestamp(hours * 60 * 60) The provided code snippet includes necessary dependencies for implementing the `get_last_crash_time` function. Write a Python function `def get_last_crash_time(testcase)` to solve the following problem: Return timestamp for last crash with same crash params as testcase. Here is the function: def get_last_crash_time(testcase): """Return timestamp for last crash with same crash params as testcase.""" client = big_query.Client() where_clause = ('crash_type = {crash_type} AND ' 'crash_state = {crash_state} AND ' 'security_flag = {security_flag} AND ' 'project = {project}').format( crash_type=json.dumps(testcase.crash_type), crash_state=json.dumps(testcase.crash_state), security_flag=json.dumps(testcase.security_flag), project=json.dumps(testcase.project_name), ) sql = """ SELECT hour FROM main.crash_stats WHERE {where_clause} ORDER by hour DESC LIMIT 1 """.format(where_clause=where_clause) result = client.query(query=sql) if result and result.rows: return get_datetime(result.rows[0]['hour']) return None
Return timestamp for last crash with same crash params as testcase.
157,070
from clusterfuzz._internal.base import utils from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment The provided code snippet includes necessary dependencies for implementing the `start_if_needed` function. Write a Python function `def start_if_needed(service)` to solve the following problem: Start Google Cloud Profiler if |USE_PYTHON_PROFILER| environment variable is set. Here is the function: def start_if_needed(service): """Start Google Cloud Profiler if |USE_PYTHON_PROFILER| environment variable is set.""" if not environment.get_value('USE_PYTHON_PROFILER'): return True project_id = utils.get_application_id() service_with_platform = '{service}_{platform}'.format( service=service, platform=environment.platform().lower()) try: # Import the package here since it is only needed when profiler is enabled. # Also, this is supported on Linux only. import googlecloudprofiler googlecloudprofiler.start( project_id=project_id, service=service_with_platform) except Exception: logs.log_error( 'Failed to start the profiler for service %s.' % service_with_platform) return False return True
Start Google Cloud Profiler if |USE_PYTHON_PROFILER| environment variable is set.
157,071
import datetime import itertools import json from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs from . import grouper The provided code snippet includes necessary dependencies for implementing the `_create_filed_bug_metadata` function. Write a Python function `def _create_filed_bug_metadata(testcase)` to solve the following problem: Create a dummy bug entry for a test case. Here is the function: def _create_filed_bug_metadata(testcase): """Create a dummy bug entry for a test case.""" metadata = data_types.FiledBug() metadata.timestamp = datetime.datetime.utcnow() metadata.testcase_id = testcase.key.id() metadata.bug_information = int(testcase.bug_information) metadata.group_id = testcase.group_id metadata.crash_type = testcase.crash_type metadata.crash_state = testcase.crash_state metadata.security_flag = testcase.security_flag metadata.platform_id = testcase.platform_id metadata.project_name = testcase.project_name metadata.job_type = testcase.job_type metadata.put()
Create a dummy bug entry for a test case.
157,072
import datetime import itertools import json from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs from . import grouper The provided code snippet includes necessary dependencies for implementing the `_get_excluded_jobs` function. Write a Python function `def _get_excluded_jobs()` to solve the following problem: Return list of jobs excluded from bug filing. Here is the function: def _get_excluded_jobs(): """Return list of jobs excluded from bug filing.""" excluded_jobs = [] jobs = ndb_utils.get_all_from_model(data_types.Job) for job in jobs: job_environment = job.get_environment() # Exclude experimental jobs. if utils.string_is_true(job_environment.get('EXPERIMENTAL')): excluded_jobs.append(job.name) return excluded_jobs
Return list of jobs excluded from bug filing.
157,073
import datetime import itertools import json from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs from . import grouper The provided code snippet includes necessary dependencies for implementing the `_is_bug_filed` function. Write a Python function `def _is_bug_filed(testcase)` to solve the following problem: Indicate if the bug is already filed. Here is the function: def _is_bug_filed(testcase): """Indicate if the bug is already filed.""" # Check if the testcase is already associated with a bug. if testcase.bug_information: return True # Re-check our stored metadata so that we don't file the same testcase twice. is_bug_filed_for_testcase = data_types.FiledBug.query( data_types.FiledBug.testcase_id == testcase.key.id()).get() if is_bug_filed_for_testcase: return True return False
Indicate if the bug is already filed.
157,074
import datetime import itertools import json from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs from . import grouper UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES = [ 'Out-of-memory', 'Stack-overflow', 'Timeout' ] The provided code snippet includes necessary dependencies for implementing the `_is_crash_important` function. Write a Python function `def _is_crash_important(testcase)` to solve the following problem: Indicate if the crash is important to file. Here is the function: def _is_crash_important(testcase): """Indicate if the crash is important to file.""" if not testcase.one_time_crasher_flag: # A reproducible crash is an important crash. return True if testcase.status != 'Processed': # A duplicate or unreproducible crash is not an important crash. return False # Testcase is unreproducible. Only those crashes that are crashing frequently # are important. if testcase.crash_type in UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES: return False # Ensure that there is no reproducible testcase in our group. if testcase.group_id: other_reproducible_testcase = data_types.Testcase.query( data_types.Testcase.group_id == testcase.group_id, ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if other_reproducible_testcase: # There is another reproducible testcase in our group. So, this crash is # not important. return False # Get crash statistics data on this unreproducible crash for last X days. last_hour = crash_stats.get_last_successful_hour() if not last_hour: # No crash stats available, skip. return False _, rows = crash_stats.get( end=last_hour, block='day', days=data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE, group_by='reproducible_flag', where_clause=( 'crash_type = %s AND crash_state = %s AND security_flag = %s' % (json.dumps(testcase.crash_type), json.dumps(testcase.crash_state), json.dumps(testcase.security_flag))), group_having_clause='', sort_by='total_count', offset=0, limit=1) # Calculate total crash count and crash days count. crash_days_indices = set() total_crash_count = 0 for row in rows: if 'groups' not in row: continue total_crash_count += row['totalCount'] for group in row['groups']: for index in group['indices']: crash_days_indices.add(index['hour']) crash_days_count = len(crash_days_indices) # Only those unreproducible testcases are important that happened atleast once # everyday for the last X days and total crash count exceeded our threshold # limit. return (crash_days_count == data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE and total_crash_count >= data_types.FILE_UNREPRODUCIBLE_TESTCASE_MIN_CRASH_THRESHOLD)
Indicate if the crash is important to file.
157,075
import datetime import itertools import json from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs from . import grouper def _add_triage_message(testcase, message): """Add a triage message.""" if testcase.get_metadata(TRIAGE_MESSAGE_KEY) == message: # Message already exists, skip update. return # Re-fetch testcase to get latest entity and avoid race condition in updates. testcase = data_handler.get_testcase_by_id(testcase.key.id()) testcase.set_metadata(TRIAGE_MESSAGE_KEY, message) The provided code snippet includes necessary dependencies for implementing the `_check_and_update_similar_bug` function. Write a Python function `def _check_and_update_similar_bug(testcase, issue_tracker)` to solve the following problem: Get list of similar open issues and ones that were recently closed. Here is the function: def _check_and_update_similar_bug(testcase, issue_tracker): """Get list of similar open issues and ones that were recently closed.""" # Get similar testcases from the same group. similar_testcases_from_group = [] if testcase.group_id: group_query = data_types.Testcase.query( data_types.Testcase.group_id == testcase.group_id) similar_testcases_from_group = ndb_utils.get_all_from_query( group_query, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2) # Get testcases with the same crash params. These might not be in the a group # if they were just fixed. same_crash_params_query = data_types.Testcase.query( data_types.Testcase.crash_type == testcase.crash_type, data_types.Testcase.crash_state == testcase.crash_state, data_types.Testcase.security_flag == testcase.security_flag, data_types.Testcase.project_name == testcase.project_name, data_types.Testcase.status == 'Processed') similar_testcases_from_query = ndb_utils.get_all_from_query( same_crash_params_query, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2) for similar_testcase in itertools.chain(similar_testcases_from_group, similar_testcases_from_query): # Exclude ourself from comparison. if similar_testcase.key.id() == testcase.key.id(): continue # Exclude similar testcases without bug information. if not similar_testcase.bug_information: continue # Get the issue object given its ID. issue = issue_tracker.get_issue(similar_testcase.bug_information) if not issue: continue # If the reproducible issue is not verified yet, bug is still valid and # might be caused by non-availability of latest builds. In that case, # don't file a new bug yet. if similar_testcase.open and not similar_testcase.one_time_crasher_flag: return True # If the issue is still open, no need to file a duplicate bug. if issue.is_open: return True # If the issue indicates that this crash needs to be ignored, no need to # file another one. policy = issue_tracker_policy.get(issue_tracker.project) ignore_label = policy.label('ignore') if ignore_label in issue.labels: _add_triage_message( testcase, ('Skipping filing a bug since similar testcase ({testcase_id}) in ' 'issue ({issue_id}) is blacklisted with {ignore_label} label.' ).format( testcase_id=similar_testcase.key.id(), issue_id=issue.id, ignore_label=ignore_label)) return True # If this testcase is not reproducible, and a previous similar # non-reproducible bug was previously filed, don't file it again to avoid # spam. if (testcase.one_time_crasher_flag and similar_testcase.one_time_crasher_flag): _add_triage_message( testcase, 'Skipping filing unreproducible bug since one was already filed ' f'({similar_testcase.key.id()}).') return True # If the issue is recently closed, wait certain time period to make sure # our fixed verification has completed. if (issue.closed_time and not dates.time_has_expired( issue.closed_time, hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED)): _add_triage_message( testcase, ('Delaying filing a bug since similar testcase ' '({testcase_id}) in issue ({issue_id}) was just fixed.').format( testcase_id=similar_testcase.key.id(), issue_id=issue.id)) return True return False
Get list of similar open issues and ones that were recently closed.
157,076
import datetime import itertools import json from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.crash_analysis import crash_analyzer from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs from . import grouper def _add_triage_message(testcase, message): """Add a triage message.""" if testcase.get_metadata(TRIAGE_MESSAGE_KEY) == message: # Message already exists, skip update. return # Re-fetch testcase to get latest entity and avoid race condition in updates. testcase = data_handler.get_testcase_by_id(testcase.key.id()) testcase.set_metadata(TRIAGE_MESSAGE_KEY, message) The provided code snippet includes necessary dependencies for implementing the `_file_issue` function. Write a Python function `def _file_issue(testcase, issue_tracker, throttler)` to solve the following problem: File an issue for the testcase. Here is the function: def _file_issue(testcase, issue_tracker, throttler): """File an issue for the testcase.""" filed = False file_exception = None if throttler.should_throttle(testcase): _add_triage_message(testcase, 'Skipping filing as it is throttled.') return False if crash_analyzer.is_experimental_crash(testcase.crash_type): logs.log(f'Skipping bug filing for {testcase.key.id()} as it ' 'has an experimental crash type.') _add_triage_message( testcase, 'Skipping filing as this is an experimental crash type.') return False try: _, file_exception = issue_filer.file_issue(testcase, issue_tracker) filed = True except Exception as e: file_exception = e if file_exception: logs.log_error(f'Failed to file issue for testcase {testcase.key.id()}.') _add_triage_message( testcase, f'Failed to file issue due to exception: {str(file_exception)}') return filed
File an issue for the testcase.
157,077
import logging from clusterfuzz._internal.base import external_users from clusterfuzz._internal.base import memoize from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils The provided code snippet includes necessary dependencies for implementing the `get_open_testcases_with_bugs` function. Write a Python function `def get_open_testcases_with_bugs()` to solve the following problem: Return iterator to open testcases with bugs. Here is the function: def get_open_testcases_with_bugs(): """Return iterator to open testcases with bugs.""" return data_types.Testcase.query( ndb_utils.is_true(data_types.Testcase.open), data_types.Testcase.status == 'Processed', data_types.Testcase.bug_information != '').order( # pylint: disable=g-explicit-bool-comparison data_types.Testcase.bug_information, data_types.Testcase.key)
Return iterator to open testcases with bugs.
157,078
import logging from clusterfuzz._internal.base import external_users from clusterfuzz._internal.base import memoize from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils The provided code snippet includes necessary dependencies for implementing the `cc_users_for_job` function. Write a Python function `def cc_users_for_job(job_type, security_flag)` to solve the following problem: Return users to CC for a job. Here is the function: def cc_users_for_job(job_type, security_flag): """Return users to CC for a job.""" # Memoized per cron run. return external_users.cc_users_for_job(job_type, security_flag)
Return users to CC for a job.
157,079
import datetime from google.cloud import ndb from googleapiclient import discovery from googleapiclient import errors from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.metrics import logs The provided code snippet includes necessary dependencies for implementing the `_datastore_client` function. Write a Python function `def _datastore_client()` to solve the following problem: Returns an api client for datastore. Here is the function: def _datastore_client(): """Returns an api client for datastore.""" return discovery.build('datastore', 'v1')
Returns an api client for datastore.
157,080
from collections import namedtuple from concurrent.futures import ThreadPoolExecutor import copy import itertools import json import logging from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.cron.helpers import bot_manager from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import compute_engine_projects The provided code snippet includes necessary dependencies for implementing the `_get_project_ids` function. Write a Python function `def _get_project_ids()` to solve the following problem: Return the GCE project IDs. Here is the function: def _get_project_ids(): """Return the GCE project IDs.""" return list(local_config.Config(local_config.GCE_CLUSTERS_PATH).get().keys())
Return the GCE project IDs.
157,081
from collections import namedtuple from concurrent.futures import ThreadPoolExecutor import copy import itertools import json import logging from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.cron.helpers import bot_manager from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import compute_engine_projects The provided code snippet includes necessary dependencies for implementing the `_instance_name_from_url` function. Write a Python function `def _instance_name_from_url(instance_url)` to solve the following problem: Extract instance name from url. Here is the function: def _instance_name_from_url(instance_url): """Extract instance name from url.""" return instance_url.split('/')[-1]
Extract instance name from url.
157,082
from collections import namedtuple from concurrent.futures import ThreadPoolExecutor import copy import itertools import json import logging from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.cron.helpers import bot_manager from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import compute_engine_projects The provided code snippet includes necessary dependencies for implementing the `get_resource_name` function. Write a Python function `def get_resource_name(prefix, project_name)` to solve the following problem: Get a name that can be used for GCE resources. Here is the function: def get_resource_name(prefix, project_name): """Get a name that can be used for GCE resources.""" # https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers max_name_length = 58 project_name = project_name.lower().replace('_', '-') name = prefix + '-' + project_name return name[:max_name_length]
Get a name that can be used for GCE resources.
157,083
from collections import namedtuple from concurrent.futures import ThreadPoolExecutor import copy import itertools import json import logging from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.cron.helpers import bot_manager from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import compute_engine_projects The provided code snippet includes necessary dependencies for implementing the `get_template_body` function. Write a Python function `def get_template_body(gce_project, template_name, task_tag=None, disk_size_gb=None, service_account=None, tls_cert=None)` to solve the following problem: Return the instance template body. Here is the function: def get_template_body(gce_project, template_name, task_tag=None, disk_size_gb=None, service_account=None, tls_cert=None): """Return the instance template body.""" template_body = copy.deepcopy( gce_project.get_instance_template(template_name)) if task_tag: template_body['properties']['metadata']['items'].append({ 'key': 'task-tag', 'value': task_tag, }) if disk_size_gb: disk = template_body['properties']['disks'][0] disk['initializeParams']['diskSizeGb'] = disk_size_gb if service_account: template_body['properties']['serviceAccounts'][0]['email'] = service_account if tls_cert: template_body['properties']['metadata']['items'].extend([{ 'key': 'tls-cert', 'value': tls_cert.cert_contents.decode('utf-8'), }, { 'key': 'tls-key', 'value': tls_cert.key_contents.decode('utf-8'), }]) return template_body
Return the instance template body.
157,084
from collections import namedtuple from concurrent.futures import ThreadPoolExecutor import copy import itertools import json import logging from google.cloud import ndb from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.cron.helpers import bot_manager from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import compute_engine_projects def _get_template_disk_size(template): """Get disk size from template.""" return int( template['properties']['disks'][0]['initializeParams']['diskSizeGb']) def _get_template_service_account(template): """Get service account from template.""" return template['properties']['serviceAccounts'][0]['email'] def _get_metadata_value(metadata_items, key): return next((item['value'] for item in metadata_items if item['key'] == key), None) The provided code snippet includes necessary dependencies for implementing the `_template_needs_update` function. Write a Python function `def _template_needs_update(current_template, new_template, resource_name)` to solve the following problem: Return whether or not the template needs an update. Here is the function: def _template_needs_update(current_template, new_template, resource_name): """Return whether or not the template needs an update.""" current_version = json.loads(current_template['description'])['version'] new_version = json.loads(new_template['description'])['version'] if current_version != new_version: logging.info( 'Instance template version out of date ' '(current=%s, new=%s): %s', current_version, new_version, resource_name) return True current_disk_size_gb = _get_template_disk_size(current_template) new_disk_size_gb = _get_template_disk_size(new_template) if current_disk_size_gb != new_disk_size_gb: logging.info( 'Instance template disk size changed ' '(current=%d, new=%d): %s', current_disk_size_gb, new_disk_size_gb, resource_name) return True current_service_account = _get_template_service_account(current_template) new_service_account = _get_template_service_account(new_template) if current_service_account != new_service_account: logging.info('Service account changed ' '(current=%s, new=%s): %s', current_service_account, new_service_account, resource_name) return True current_tls_cert = _get_metadata_value( current_template['properties']['metadata']['items'], 'tls-cert') new_tls_cert = _get_metadata_value( new_template['properties']['metadata']['items'], 'tls-cert') if current_tls_cert != new_tls_cert: logging.info('TLS cert changed.') return True return False
Return whether or not the template needs an update.
157,085
import collections import datetime import json from googleapiclient.errors import HttpError from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.chrome import build_info from clusterfuzz._internal.crash_analysis import crash_comparer from clusterfuzz._internal.crash_analysis import severity_analyzer from clusterfuzz._internal.cron.libs import mail from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import leak_blacklist from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs The provided code snippet includes necessary dependencies for implementing the `cleanup_reports_metadata` function. Write a Python function `def cleanup_reports_metadata()` to solve the following problem: Delete ReportMetadata for uploaded reports. Here is the function: def cleanup_reports_metadata(): """Delete ReportMetadata for uploaded reports.""" uploaded_reports = ndb_utils.get_all_from_query( data_types.ReportMetadata.query( ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)), keys_only=True) ndb_utils.delete_multi(uploaded_reports)
Delete ReportMetadata for uploaded reports.
157,086
import collections import datetime import json from googleapiclient.errors import HttpError from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.chrome import build_info from clusterfuzz._internal.crash_analysis import crash_comparer from clusterfuzz._internal.crash_analysis import severity_analyzer from clusterfuzz._internal.cron.libs import mail from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import leak_blacklist from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs def get_top_crashes_for_all_projects_and_platforms(limit=TOP_CRASHES_LIMIT): """Return top crashes for all projects and platforms.""" last_hour = crash_stats.get_last_successful_hour() if not last_hour: # No crash stats available, skip. return {} projects_to_jobs_and_platforms = get_jobs_and_platforms_for_project() top_crashes_by_project_and_platform_map = {} for project_name, project_map in projects_to_jobs_and_platforms.items(): top_crashes_by_project_and_platform_map[project_name] = {} for platform in project_map.platforms: where_clause = ( 'crash_type NOT IN UNNEST' f'({json.dumps(TOP_CRASHES_IGNORE_CRASH_TYPES)}) AND ' 'crash_state NOT IN UNNEST' f'({json.dumps(TOP_CRASHES_IGNORE_CRASH_STATES)}) AND ' f'job_type IN UNNEST({json.dumps(list(project_map.jobs))}) AND ' f'platform LIKE {json.dumps(platform.lower() + "%")} AND ' f'project = {json.dumps(project_name)}') _, rows = crash_stats.get( end=last_hour, block='day', days=TOP_CRASHES_DAYS_LOOKBEHIND, group_by='platform', where_clause=where_clause, group_having_clause='', sort_by='total_count', offset=0, limit=limit) if not rows: continue top_crashes_by_project_and_platform_map[project_name][platform] = [{ 'crashState': row['crashState'], 'crashType': row['crashType'], 'isSecurity': row['isSecurity'], 'totalCount': row['totalCount'], } for row in rows if row['totalCount'] >= TOP_CRASHES_MIN_THRESHOLD] return top_crashes_by_project_and_platform_map def delete_unreproducible_testcase_with_no_issue(testcase): """Delete an unreproducible testcase if it has no associated issue and has been open for a certain time interval.""" # Make sure that this testcase is an unreproducible bug. If not, bail out. if not testcase.one_time_crasher_flag: return # Make sure that this testcase has no associated bug. If not, bail out. if testcase.bug_information: return # Make sure that testcase is atleast older than # |UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE|, otherwise it will be seen in # crash stats anyway. if (testcase.timestamp and not dates.time_has_expired( testcase.timestamp, days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)): return # Make sure that testcase is not seen in crash stats for a certain time # interval. if get_crash_occurrence_platforms( testcase, data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE): return testcase.key.delete() logs.log( f'Deleted unreproducible testcase {testcase.key.id()} with no issue.') def mark_duplicate_testcase_as_closed_with_no_issue(testcase): """Closes a duplicate testcase if it has no associated issue and has been open for a certain time interval.""" # Make sure that this testcase is a duplicate bug. If not, bail out. if testcase.status != 'Duplicate': return # Make sure that this testcase has no associated bug. If not, bail out. if testcase.bug_information: return # Make sure that testcase has been open for a certain time interval. We do # a null timestamp check since some older testcases could be missing it. if (testcase.timestamp and not dates.time_has_expired( testcase.timestamp, days=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE)): return testcase.fixed = 'NA' testcase.open = False testcase.put() logs.log(f'Closed duplicate testcase {testcase.key.id()} with no issue.') def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue): """Mark an issue as fixed if all of its associated reproducible testcase are fixed.""" verified_label = policy.label('verified') if not verified_label: return # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is closed in a status other than Fixed, like Duplicate, WontFix # or Archived, we shouldn't change it. Bail out. if not issue.is_open and issue.status != policy.status('fixed'): return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # If the testcase is still open, no work needs to be done. Bail out. if testcase.open: return # FIXME: Find a better solution to skip over reproducible tests that are now # showing up a flaky (esp when we are unable to reproduce crash in original # crash revision). if testcase.fixed == 'NA': return # We can only verify fixed issues for reproducible testcases. If the testcase # is unreproducible, bail out. Exception is if we explicitly marked this as # fixed. if testcase.one_time_crasher_flag and testcase.fixed != 'Yes': return # Make sure that no other testcases associated with this issue are open. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # As a last check, do the expensive call of actually checking all issue # comments to make sure we didn't do the verification already and we didn't # get called out on issue mistriage. # If a "good" label was set, we ignore past "verified" flipping. good_label = policy.label('good') if good_label and good_label in issue.labels: was_verified_added = verified_label in issue.labels else: was_verified_added = issue_tracker_utils.was_label_added( issue, verified_label) if (was_verified_added or issue_tracker_utils.was_label_added(issue, policy.label('wrong'))): return issue.labels.add(verified_label) comment = f'ClusterFuzz testcase {testcase.key.id()} is verified as fixed' fixed_range_url = data_handler.get_fixed_range_url(testcase) if fixed_range_url: comment += ' in ' + fixed_range_url else: comment += '.' if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment = _append_generic_incorrect_comment(comment, policy, issue, ' and re-open the issue.') skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if not skip_auto_close: issue.status = policy.status('verified') issue.save(new_comment=comment, notify=True) logs.log(f'Mark issue {issue.id} as verified for ' f'fixed testcase {testcase.key.id()}.') issue_filer.notify_issue_update(testcase, 'verified') def mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue): """Mark an unreproducible testcase as fixed if the associated issue is closed.""" # If the testcase is already closed, no more work to do. if not testcase.open: return # Make sure that this testcase is an unreproducible bug. If not, bail out. if not testcase.one_time_crasher_flag: return # Make sure that this testcase has an associated bug. If not, bail out. if not testcase.bug_information: return # Make sure that there is an associated bug and it is in closed state. if not issue or issue.is_open: return testcase.fixed = 'NA' testcase.open = False testcase.put() logs.log(f'Closed unreproducible testcase {testcase.key.id()} ' 'with issue closed.') def mark_unreproducible_testcase_and_issue_as_closed_after_deadline( policy, testcase, issue): """Closes an unreproducible testcase and its associated issue after a certain time period.""" # If the testcase is already closed, no more work to do. if not testcase.open: return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # Make sure that this testcase is an unreproducible bug. If not, bail out. if not testcase.one_time_crasher_flag: return # Make sure that this testcase has an associated bug. If not, bail out. if not testcase.bug_information: return # If this testcase was manually uploaded, don't change issue state as our # reproduction result might be incorrect. if testcase.uploader_email: return # Make sure that there is an associated bug and it is in open state. if not issue or not issue.is_open: return # Skip closing if flag is set. skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if skip_auto_close: return # Check if there are any reproducible open testcases are associated with # this bug. If yes, return. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # Make sure that testcase is atleast older than # |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in # crash stats anyway. if (testcase.timestamp and not dates.time_has_expired( testcase.timestamp, days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)): return # Handle testcase that turned from reproducible to unreproducible. Account # for the recent progression task run time. last_tested_crash_time = testcase.get_metadata('last_tested_crash_time') if (last_tested_crash_time and not dates.time_has_expired( last_tested_crash_time, days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)): return # Make that there is no crash seen in the deadline period. if get_crash_occurrence_platforms( testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE): return # As a last check, do the expensive call of actually checking all issue # comments to make sure we we didn't get called out on issue mistriage. if issue_tracker_utils.was_label_added(issue, policy.label('wrong')): return # Close associated issue and testcase. comment = (f'ClusterFuzz testcase {testcase.key.id()} ' 'is flaky and no longer crashes, so closing issue.') if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment = _append_generic_incorrect_comment(comment, policy, issue, ' and re-open the issue.') issue.status = policy.status('wontfix') issue.save(new_comment=comment, notify=True) testcase.fixed = 'NA' testcase.open = False testcase.put() issue_filer.notify_issue_update(testcase, 'wontfix') logs.log(f'Closed unreproducible testcase {testcase.key.id()} ' 'and associated issue.') def mark_na_testcase_issues_as_wontfix(policy, testcase, issue): """Mark issues for testcases with fixed == 'NA' as fixed.""" # Check for for closed, NA testcases. if testcase.open or testcase.fixed != 'NA': return # Nothing to be done if no issue is attached, or if issue is already closed. if not issue or not issue.is_open: return # Make sure that no other testcases associated with this issue are open. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # Make that there is no crash seen in the deadline period. if get_crash_occurrence_platforms( testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE): return # As a last check, do the expensive call of actually checking all issue # comments to make sure we we didn't get called out on issue mistriage. if issue_tracker_utils.was_label_added(issue, policy.label('wrong')): return skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if skip_auto_close: return comment = (f'ClusterFuzz testcase {testcase.key.id()} is closed as invalid, ' 'so closing issue.') issue.status = policy.status('wontfix') issue.save(new_comment=comment, notify=True) issue_filer.notify_issue_update(testcase, 'wontfix') logs.log( f'Closing issue {issue.id} for invalid testcase {testcase.key.id()}.') def mark_testcase_as_triaged_if_needed(testcase, issue): """Mark testcase as triage complete if both testcase and associated issue are closed.""" # Check if testcase is open. If yes, bail out. if testcase.open: return # Check if there is an associated bug in open state. If yes, bail out. if issue: # Get latest issue object to ensure our update went through. issue = issue_tracker_utils.get_issue_for_testcase(testcase) if issue.is_open: return testcase.triaged = True testcase.put() def mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue): """Mark testcase as closed if the associated issue is closed.""" # If the testcase is already closed, no more work to do. if not testcase.open: return # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is still open, no work needs to be done. Bail out. if issue.is_open: return # Make sure we passed our deadline based on issue closed timestamp. if (issue.closed_time and not dates.time_has_expired( issue.closed_time, days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE)): return # If the issue has an ignore label, don't close the testcase and bail out. # This helps to prevent new bugs from getting filed for legit WontFix cases. if issue_tracker_utils.was_label_added(issue, policy.label('ignore')): return testcase.open = False testcase.fixed = 'NA' testcase.put() logs.log(f'Closed testcase {testcase.key.id()} with issue closed.') def mark_testcase_as_closed_if_job_is_invalid(testcase, jobs): """Mark testcase as closed if the associated job type does not exist.""" # If the testcase is already closed, no more work to do. if not testcase.open: return # Check if the testcase job name is in the list of jobs. if testcase.job_type in jobs: return testcase.open = False testcase.fixed = 'NA' testcase.put() logs.log(f'Closed testcase {testcase.key.id()} with invalid job.') def notify_closed_issue_if_testcase_is_open(policy, testcase, issue): """Notify closed issue if associated testcase is still open after a certain time period.""" needs_feedback_label = policy.label('needs_feedback') if not needs_feedback_label: return # If the testcase is already closed, no more work to do. if not testcase.open: return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is still open, no work needs to be done. Bail out. if issue.is_open: return # If we have already passed our deadline based on issue closed timestamp, # no need to notify. We will close the testcase instead. if (issue.closed_time and not dates.time_has_expired( issue.closed_time, days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE)): return # Check if there is ignore label on issue already. If yes, bail out. if issue_tracker_utils.was_label_added(issue, policy.label('ignore')): return # Check if we did add the notification comment already. If yes, bail out. if issue_tracker_utils.was_label_added(issue, needs_feedback_label): return issue.labels.add(needs_feedback_label) if issue.status in [policy.status('fixed'), policy.status('verified')]: issue_comment = ( f'ClusterFuzz testcase {testcase.key.id()} is still reproducing ' 'on tip-of-tree build ' '(trunk).\n\nPlease re-test your fix against this testcase and if the ' 'fix was incorrect or incomplete, please re-open the bug.') wrong_label = policy.label('wrong') if wrong_label: issue_comment += (' Otherwise, ignore this notification and add the ' f'{issue.issue_tracker.label_text(wrong_label)}.') else: # Covers WontFix, Archived cases. issue_comment = ( f'ClusterFuzz testcase {testcase.key.id()} ' 'is still reproducing on tip-of-tree build ' '(trunk).\n\nIf this testcase was not reproducible locally or ' 'unworkable, ignore this notification and we will file another ' 'bug soon with hopefully a better and workable testcase.\n\n') ignore_label = policy.label('ignore') if ignore_label: issue_comment += ( 'Otherwise, if this is not intended to be fixed (e.g. this is an ' 'intentional crash), please add the ' f'{issue.issue_tracker.label_text(ignore_label)} to ' 'prevent future bug filing with similar crash stacktrace.') issue.save(new_comment=issue_comment, notify=True) logs.log(f'Notified closed issue for open testcase {testcase.key.id()}.') def notify_issue_if_testcase_is_invalid(policy, testcase, issue): """Leave comments on associated issues when test cases are no longer valid.""" invalid_fuzzer_label = policy.label('invalid_fuzzer') if not invalid_fuzzer_label: return if not issue or not testcase.bug_information: return # If the issue is closed, there's no work to do. if not issue.is_open: return # Currently, this only happens if a test case relies on a fuzzer that has # been deleted. This can be modified if more cases are needed in the future. if not testcase.get_metadata('fuzzer_was_deleted'): return # Check if we added this message once. If yes, bail out. if issue_tracker_utils.was_label_added(issue, invalid_fuzzer_label): return issue_comment = ( f'ClusterFuzz testcase {testcase.key.id()}' 'is associated with an obsolete fuzzer and can ' 'no longer be processed. Please close the issue if it is no longer ' 'actionable.') issue.labels.add(invalid_fuzzer_label) issue.save(new_comment=issue_comment, notify=True) logs.log(f'Closed issue {issue.id} for ' f'invalid testcase {testcase.key.id()}.') def notify_uploader_when_testcase_is_processed(policy, testcase, issue): """Notify uploader by email when all the testcase tasks are finished.""" testcase_id = testcase.key.id() # Check if this is a user upload. If not, bail out. upload_metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.testcase_id == testcase_id).get() if not upload_metadata: return # Check that we have a valid email to send the notification. If not, bail out. to_email = upload_metadata.uploader_email if not to_email: return # If this is a bundled archive with multiple testcases, then don't send email # for individual testcases. if upload_metadata.bundled: return # Check if the notification is already sent once. If yes, bail out. if data_handler.is_notification_sent(testcase_id, to_email): return # Make sure all testcase taks are done (e.g. minimization, regression, etc). if not data_handler.critical_tasks_completed(testcase): return notify = not upload_metadata.quiet_flag # If the same issue was specified at time of upload, update it. if (issue and str(issue.id) == upload_metadata.bug_information and not testcase.duplicate_of): issue_description = data_handler.get_issue_description(testcase) _update_issue_when_uploaded_testcase_is_processed( policy, testcase, issue, issue_description, upload_metadata.bug_summary_update_flag, notify) if notify: issue_description_without_crash_state = data_handler.get_issue_description( testcase, hide_crash_state=True) _send_email_to_uploader(testcase_id, to_email, issue_description_without_crash_state) # Make sure to create notification entry, as we use this to update bug. data_handler.create_notification_entry(testcase_id, to_email) def update_os_labels(policy, testcase, issue): """Add OS labels to issue.""" os_label = policy.label('os') if not os_label: return if not issue: return platforms = get_crash_occurrence_platforms(testcase) platforms = platforms.union(get_platforms_from_testcase_variants(testcase)) logs.log( f'Found {len(platforms)} platforms for the testcase {testcase.key.id()}.', platforms=platforms) for platform in platforms: label = os_label.replace('%PLATFORM%', platform.capitalize()) if not issue_tracker_utils.was_label_added(issue, label): issue.labels.add(label) issue.save(notify=False) logs.log(f'Updated labels of issue {issue.id}.', labels=issue.labels) def update_fuzz_blocker_label(policy, testcase, issue, top_crashes_by_project_and_platform_map): """Add top crash label to issue.""" fuzz_blocker_label = policy.label('fuzz_blocker') if not fuzz_blocker_label: return if not issue: return if not testcase.open: return top_crash_platforms = get_top_crash_platforms( testcase, top_crashes_by_project_and_platform_map) if not top_crash_platforms: # Not a top crasher, bail out. return if issue_tracker_utils.was_label_added(issue, fuzz_blocker_label): # Issue was already marked a top crasher, bail out. return if len(top_crash_platforms) == 1: platform_message = f'{top_crash_platforms[0]} platform' else: platform_message = f'{", ".join(top_crash_platforms[:-1])} and ' \ f'{top_crash_platforms[-1]} platforms' fuzzer_name = ( testcase.get_metadata('fuzzer_binary_name') or testcase.fuzzer_name) update_message = ( f'This crash occurs very frequently on {platform_message} and ' f'is likely preventing the fuzzer {fuzzer_name} ' 'from making much progress. Fixing this will allow more bugs ' 'to be found.') if utils.is_oss_fuzz(): update_message += OSS_FUZZ_INCORRECT_COMMENT elif utils.is_chromium(): label_text = issue.issue_tracker.label_text( data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL) update_message += '\n\nMarking this bug as a blocker for next Beta release.' update_message = _append_generic_incorrect_comment( update_message, policy, issue, f' and remove the {label_text}.') issue.labels.add(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL) # Update with the next beta for trunk, and remove existing milestone label. beta_milestone_label = ( f'M-{build_info.get_release_milestone("head", testcase.platform)}') if beta_milestone_label not in issue.labels: issue.labels.remove_by_prefix('M-') issue.labels.add(beta_milestone_label) logs.log(update_message) issue.labels.add(fuzz_blocker_label) issue.save(new_comment=update_message, notify=True) def update_component_labels(policy, testcase, issue): """Add components to the issue if needed.""" if not issue: return components = _get_predator_result_item( testcase, 'suspected_components', default=[]) # Remove components already in issue or whose more specific variants exist. filtered_components = [] for component in components: found_component_in_issue = any( component == issue_component or issue_component.startswith(component + '>') for issue_component in issue.components) if not found_component_in_issue: filtered_components.append(component) if not filtered_components: # If there are no new components to add, then we shouldn't make any changes # to issue. return # Don't run on issues we've already applied automatic components to in case # labels are removed manually. This may cause issues in the event that we # rerun a test case, but it seems like a reasonable tradeoff to avoid spam. logs.log( 'google_issue_tracker: Checking if auto_components_label %s (policy %s) ' 'is in %s. Result: %s' % (data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL, policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL), list(issue.labels), issue_tracker_utils.was_label_added( issue, policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)))) if issue_tracker_utils.was_label_added( issue, policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)): return for filtered_component in filtered_components: issue.components.add(filtered_component) issue.labels.add( policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_COMPONENTS_LABEL)) label_text = issue.issue_tracker.label_text( policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_COMPONENTS_LABEL)) issue_comment = ( 'Automatically applying components based on crash stacktrace and ' 'information from OWNERS files.\n\n' f'If this is incorrect, please apply the {label_text}.') issue.save(new_comment=issue_comment, notify=True) def update_issue_ccs_from_owners_file(policy, testcase, issue): """Add cc to an issue based on owners list from owners file. This is currently applicable to fuzz targets only.""" auto_cc_label = policy.label('auto_cc_from_owners') if not auto_cc_label: return if not issue or not issue.is_open: return if testcase.get_metadata('has_issue_ccs_from_owners_file'): return ccs_list = utils.parse_delimited( testcase.get_metadata('issue_owners', ''), delimiter=',', strip=True, remove_empty=True) if not ccs_list: return # Remove unsupported entries. ccs_list = _sanitize_ccs_list(ccs_list) # If we've assigned the ccs before, it likely means we were incorrect. # Don't try again for this particular issue. logs.log( 'google_issue_tracker: Checking if auto_cc_label %s (policy: %s) is in ' '%s. Result: %s' % (auto_cc_label, policy.label(auto_cc_label), list(issue.labels), issue_tracker_utils.was_label_added(issue, auto_cc_label))) if issue_tracker_utils.was_label_added(issue, auto_cc_label): return ccs_added = False actions = list(issue.actions) for cc in ccs_list: if cc in issue.ccs: continue # If cc was previously manually removed from the cc list, we assume that # they were incorrectly added. Don't try to add them again. cc_was_removed = any(cc in action.ccs.removed for action in actions) if cc_was_removed: continue issue.ccs.add(cc) ccs_added = True if not ccs_added: # Everyone we'd expect to see has already been cced on the issue. No need # to spam it with another comment. Also, set the metadata to avoid doing # this again. testcase.set_metadata('has_issue_ccs_from_owners_file', True) return issue_comment = ( 'Automatically adding ccs based on OWNERS file / target commit history.') if utils.is_oss_fuzz(): issue_comment += OSS_FUZZ_INCORRECT_COMMENT + '.' else: issue_comment = _append_generic_incorrect_comment(issue_comment, policy, issue, '.') issue.labels.add(auto_cc_label) issue.save(new_comment=issue_comment, notify=True) def update_issue_labels_for_flaky_testcase(policy, testcase, issue): """Update issue reproducibility label when testcase becomes flaky or unreproducible.""" if not issue or not issue.is_open: return # If the testcase is reproducible, then no change is needed. Bail out. if not testcase.one_time_crasher_flag: return # Make sure that no other reproducible testcases associated with this issue # are open. If yes, no need to update label. similar_reproducible_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_reproducible_testcase: return reproducible_label = policy.label('reproducible') unreproducible_label = policy.label('unreproducible') if not reproducible_label or not unreproducible_label: return # Make sure that this issue is not already marked Unreproducible. if unreproducible_label in issue.labels: return issue.labels.remove(reproducible_label) issue.labels.add(unreproducible_label) comment = (f'ClusterFuzz testcase {testcase.key.id()} appears to be flaky, ' f'updating reproducibility {issue.issue_tracker.label_type}.') issue.save(new_comment=comment) def update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue, only_allow_ccs=False): """Assign the issue to an appropriate owner if possible.""" logs.log(f'{update_issue_owner_and_ccs_from_predator_results}') if not issue or not issue.is_open: return logs.log('is_open') # If the issue already has an owner, we don't need to update the bug. if issue.assignee: return logs.log('noassignee') # If there are more than 3 suspected CLs, we can't be confident in the # results. Just skip any sort of notification to CL authors in this case. suspected_cls = _get_predator_result_item(testcase, 'suspected_cls') logs.log(f'suspected_cls {suspected_cls}') if not suspected_cls or len(suspected_cls) > 3: return logs.log('suspected_cls2') # If we've assigned an owner or cc once before, it likely means we were # incorrect. Don't try again for this particular issue. if (issue_tracker_utils.was_label_added( issue, policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL)) or issue_tracker_utils.was_label_added( issue, policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL))): return logs.log('never assigned') # Validate that the suspected CLs have all of the information we need before # continuing. This allows us to assume that they are well-formed later, # avoiding any potential exceptions that would interrupt this task. for suspected_cl in suspected_cls: url = suspected_cl.get('url') description = suspected_cl.get('description') author = suspected_cl.get('author') if not url or not description or not author: logs.log_error(f'Suspected CL for testcase {testcase.key.id()} ' 'is missing required information.') return if len(suspected_cls) == 1 and not only_allow_ccs: logs.log('only 1 CL') suspected_cl = suspected_cls[0] # If this owner has already been assigned before but has since been removed, # don't assign it to them again. for action in issue.actions: if action.assignee == suspected_cls[0]['author']: logs.log('already assigned') return # We have high confidence for the single-CL case, so we assign the owner. logs.log('Updating issue') issue.labels.add( policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_OWNER_LABEL)) issue.assignee = suspected_cl['author'] issue.status = policy.status('assigned') label_text = issue.issue_tracker.label_text( policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL)) issue_comment = ( 'Automatically assigning owner based on suspected regression ' f'changelist {suspected_cl["url"]} ({suspected_cl["description"]}).\n\n' 'If this is incorrect, please let us know why and apply the ' f'{label_text}. If you aren\'t the correct owner for this issue, ' 'please unassign yourself as soon as possible so it can be re-triaged.') else: if testcase.get_metadata('has_issue_ccs_from_predator_results'): logs.log('has_issue_ccs_from_predator_results') return issue_comment = ( 'Automatically adding ccs based on suspected regression changelists:' '\n\n') ccs_added = False for suspected_cl in suspected_cls: # Update the comment with the suspected CL, regardless of whether or not # we're ccing the author. This might, for example, catch the attention of # someone who has already been cced. author = suspected_cl['author'] issue_comment += f'{suspected_cl["description"]} by ' \ f'{author} - {suspected_cl["url"]}\n\n' logs.log('Suspected') if author in issue.ccs: logs.log('AUthor CCed') continue # If an author has previously been manually removed from the cc list, # we assume they were incorrectly added. Don't try to add them again. author_was_removed = False for action in issue.actions: if author in action.ccs.removed: author_was_removed = True logs.log('Breaking') break if author_was_removed: logs.log('Author removed') continue issue.ccs.add(author) ccs_added = True if not ccs_added: # Everyone we'd expect to see has already been cced on the issue. No need # to spam it with another comment. Also, set the metadata to avoid doing # this again. testcase.set_metadata('has_issue_ccs_from_owners_file', True) logs.log('not ccs_added') return label_text = issue.issue_tracker.label_text( policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_WRONG_CL_LABEL)) issue.labels.add( policy.substitution_mapping( data_types.CHROMIUM_ISSUE_PREDATOR_AUTO_CC_LABEL)) issue_comment += ( 'If this is incorrect, please let us know why and apply the ' f'{label_text}.') try: issue.save(new_comment=issue_comment, notify=True) except HttpError: # If we see such an error when we aren't setting an owner, it's unexpected. if only_allow_ccs or not issue.assignee: logs.log_error( f'Unable to update issue for test case {testcase.key.id()}.') return # Retry without setting the owner. They may not be a chromium project # member, in which case we can try falling back to cc. issue = issue_tracker_utils.get_issue_for_testcase(testcase) update_issue_owner_and_ccs_from_predator_results( policy, testcase, issue, only_allow_ccs=True) The provided code snippet includes necessary dependencies for implementing the `cleanup_testcases_and_issues` function. Write a Python function `def cleanup_testcases_and_issues()` to solve the following problem: Clean up unneeded open testcases and their associated issues. Here is the function: def cleanup_testcases_and_issues(): """Clean up unneeded open testcases and their associated issues.""" logs.log('Getting all job type names.') jobs = data_handler.get_all_job_type_names() logs.log('Getting test case keys from query.') testcase_keys = ndb_utils.get_all_from_query( data_types.Testcase.query( ndb_utils.is_false(data_types.Testcase.triaged)), keys_only=True) logs.log('Getting top crashes for all projects and platforms.') top_crashes_by_project_and_platform_map = ( get_top_crashes_for_all_projects_and_platforms()) utils.python_gc() testcases_processed = 0 empty_issue_tracker_policy = issue_tracker_policy.get_empty() for testcase_key in testcase_keys: testcase_id = testcase_key.id() try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue logs.log(f'Processing testcase {testcase_id}.') try: issue = issue_tracker_utils.get_issue_for_testcase(testcase) policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase( testcase) if not policy: logs.log('No policy') policy = empty_issue_tracker_policy # Issue updates. update_os_labels(policy, testcase, issue) logs.log('maybe updated os') update_fuzz_blocker_label(policy, testcase, issue, top_crashes_by_project_and_platform_map) logs.log('maybe updated fuzz blocker') update_component_labels(policy, testcase, issue) logs.log('maybe updated component labels') update_issue_ccs_from_owners_file(policy, testcase, issue) logs.log('maybe updated issueccs') update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue) logs.log('maybe updated update_issue_owner_and_ccs_from_predator_results') update_issue_labels_for_flaky_testcase(policy, testcase, issue) # Testcase marking rules. mark_duplicate_testcase_as_closed_with_no_issue(testcase) mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue) mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue) mark_testcase_as_closed_if_job_is_invalid(testcase, jobs) mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue) mark_unreproducible_testcase_and_issue_as_closed_after_deadline( policy, testcase, issue) mark_na_testcase_issues_as_wontfix(policy, testcase, issue) # Notification, to be done at end after testcase state is updated from # previous rules. notify_closed_issue_if_testcase_is_open(policy, testcase, issue) notify_issue_if_testcase_is_invalid(policy, testcase, issue) notify_uploader_when_testcase_is_processed(policy, testcase, issue) # Mark testcase as triage complete if both testcase and associated issue # are closed. This also need to be done before the deletion rules. mark_testcase_as_triaged_if_needed(testcase, issue) # Testcase deletion rules. delete_unreproducible_testcase_with_no_issue(testcase) except Exception: logs.log_error(f'Failed to process testcase {testcase_id}.') testcases_processed += 1 if testcases_processed % 100 == 0: utils.python_gc()
Clean up unneeded open testcases and their associated issues.
157,087
import collections import datetime import json from googleapiclient.errors import HttpError from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.chrome import build_info from clusterfuzz._internal.crash_analysis import crash_comparer from clusterfuzz._internal.crash_analysis import severity_analyzer from clusterfuzz._internal.cron.libs import mail from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import leak_blacklist from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs FUZZ_TARGET_UNUSED_THRESHOLD = 15 The provided code snippet includes necessary dependencies for implementing the `cleanup_unused_fuzz_targets_and_jobs` function. Write a Python function `def cleanup_unused_fuzz_targets_and_jobs()` to solve the following problem: Clean up unused FuzzTarget and FuzzTargetJob entities. Here is the function: def cleanup_unused_fuzz_targets_and_jobs(): """Clean up unused FuzzTarget and FuzzTargetJob entities.""" last_run_cutoff = utils.utcnow() - datetime.timedelta( days=FUZZ_TARGET_UNUSED_THRESHOLD) unused_target_jobs = data_types.FuzzTargetJob.query( data_types.FuzzTargetJob.last_run < last_run_cutoff) valid_target_jobs = data_types.FuzzTargetJob.query( data_types.FuzzTargetJob.last_run >= last_run_cutoff) to_delete = [t.key for t in unused_target_jobs] valid_fuzz_targets = {t.fuzz_target_name for t in valid_target_jobs} for fuzz_target in ndb_utils.get_all_from_model(data_types.FuzzTarget): if fuzz_target.fully_qualified_name() not in valid_fuzz_targets: to_delete.append(fuzz_target.key) ndb_utils.delete_multi(to_delete)
Clean up unused FuzzTarget and FuzzTargetJob entities.
157,088
import collections import datetime import json from googleapiclient.errors import HttpError from clusterfuzz._internal.base import dates from clusterfuzz._internal.base import errors from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.chrome import build_info from clusterfuzz._internal.crash_analysis import crash_comparer from clusterfuzz._internal.crash_analysis import severity_analyzer from clusterfuzz._internal.cron.libs import mail from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import leak_blacklist from clusterfuzz._internal.issue_management import issue_filer from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import crash_stats from clusterfuzz._internal.metrics import logs UNUSED_HEARTBEAT_THRESHOLD = 15 The provided code snippet includes necessary dependencies for implementing the `cleanup_unused_heartbeats` function. Write a Python function `def cleanup_unused_heartbeats()` to solve the following problem: Clean up unused heartbeat entities. Here is the function: def cleanup_unused_heartbeats(): """Clean up unused heartbeat entities.""" cutoff_time = utils.utcnow() - datetime.timedelta( days=UNUSED_HEARTBEAT_THRESHOLD) unused_heartbeats = ndb_utils.get_all_from_query( data_types.Heartbeat.query( data_types.Heartbeat.last_beat_time < cutoff_time), keys_only=True) ndb_utils.delete_multi(unused_heartbeats)
Clean up unused heartbeat entities.
157,089
import logging import googleapiclient from clusterfuzz._internal.base import utils def _create_client(service_name, version='v1'): """Create a googleapiclient client.""" return googleapiclient.discovery.build(service_name, version) def _service_account_id(project): """Return service account ID for project.""" # From # cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/create: # # The account id that is used to generate the service account email address # and a stable unique id. It is unique within a project, must be 6-30 # characters long, and match the regular expression [a-z]([-a-z0-9]*[a-z0-9]) # to comply with RFC1035. account_id = _ACCOUNT_PREFIX + project.replace('_', '-') if not account_id[-1].isalnum(): # Must end in '[a-z][0-9]'. account_id += '0' if len(account_id) < _MIN_LEN: # Must be at least |min_len| in length. account_id = account_id.ljust(_MIN_LEN, '0') # Use a hash prefix as the service account name if the project name is too # long. if len(account_id) > _MAX_LEN: account_id = _ACCOUNT_PREFIX + utils.string_hash(project)[:_HASH_PREFIX_LEN] assert len(account_id) >= _MIN_LEN and len(account_id) <= _MAX_LEN return account_id def get_service_account(iam, project_id, service_account_id): """Try to get a service account. Returns None if it does not exist.""" try: request = iam.projects().serviceAccounts().get( name=(f'projects/{project_id}/serviceAccounts/' f'{_service_account_email(project_id, service_account_id)}')) return request.execute() except googleapiclient.errors.HttpError as e: if e.resp.status == 404: return None raise The provided code snippet includes necessary dependencies for implementing the `get_or_create_service_account` function. Write a Python function `def get_or_create_service_account(project)` to solve the following problem: Get or create service account for the project. Here is the function: def get_or_create_service_account(project): """Get or create service account for the project.""" iam = _create_client('iam') project_id = utils.get_application_id() service_account_id = _service_account_id(project) service_account = get_service_account(iam, project_id, service_account_id) if service_account: logging.info('Using existing new service account for %s.', project) return service_account, True logging.info('Creating new service account for %s.', project) request = iam.projects().serviceAccounts().create( name='projects/' + project_id, body={ 'accountId': service_account_id, 'serviceAccount': { 'displayName': project, } }) return request.execute(), False
Get or create service account for the project.
157,090
import logging import googleapiclient from clusterfuzz._internal.base import utils def _create_client(service_name, version='v1'): """Create a googleapiclient client.""" return googleapiclient.discovery.build(service_name, version) def _add_service_account_role(policy, role, service_account): """Add a role to a service account. Returns whether or not changes were made.""" binding = _get_or_insert_iam_binding(policy, role) service_account_member = 'serviceAccount:' + service_account if service_account_member not in binding['members']: binding['members'].append(service_account_member) return True return False The provided code snippet includes necessary dependencies for implementing the `set_service_account_roles` function. Write a Python function `def set_service_account_roles(service_account)` to solve the following problem: Set roles for service account. Here is the function: def set_service_account_roles(service_account): """Set roles for service account.""" project_id = utils.get_application_id() resource_manager = _create_client('cloudresourcemanager') request = resource_manager.projects().getIamPolicy( resource=project_id, body={}) policy = request.execute() # Set logging and metrics permissions. policy_changed = False policy_changed |= _add_service_account_role(policy, 'roles/logging.logWriter', service_account['email']) policy_changed |= _add_service_account_role( policy, 'roles/monitoring.metricWriter', service_account['email']) if not policy_changed: return request = resource_manager.projects().setIamPolicy( resource=project_id, body={ 'policy': policy, }) request.execute()
Set roles for service account.
157,091
import datetime import json import os from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs def _latest_report_info_dir(bucket): """Returns a GCS URL to the latest report info for the given bucket.""" return f'gs://{bucket}/latest_report_info/' def _basename(gcs_path): """Returns the basename for the given path without file extension.""" return os.path.splitext(os.path.basename(gcs_path))[0] def _process_project(project_name, latest_project_info_url, bucket): """Collects coverage information for all fuzz targets in the given project and the total stats for the project.""" logs.log('Processing coverage for %s project.' % project_name) report_info = _read_json(latest_project_info_url) if not report_info: logs.log_warn('Skipping code coverage for %s project.' % project_name) return # Iterate through report_info['fuzzer_stats_dir'] and prepare # CoverageInformation entities for invididual fuzz targets. entities = [] for fuzzer in storage.list_blobs( report_info['fuzzer_stats_dir'], recursive=False): fuzzer_stats = _process_fuzzer_stats(fuzzer, report_info, project_name, bucket) if fuzzer_stats: entities.append(fuzzer_stats) logs.log('Processed coverage for %d targets in %s project.' % (len(entities), project_name)) # Prepare CoverageInformation entity for the total project stats. project_stats = _process_project_stats(report_info, project_name) if project_stats: entities.append(project_stats) ndb_utils.put_multi(entities) The provided code snippet includes necessary dependencies for implementing the `collect_fuzzer_coverage` function. Write a Python function `def collect_fuzzer_coverage(bucket)` to solve the following problem: Actual implementation of the fuzzer coverage task. Here is the function: def collect_fuzzer_coverage(bucket): """Actual implementation of the fuzzer coverage task.""" url = _latest_report_info_dir(bucket) for latest_project_report_info_path in storage.list_blobs( url, recursive=False): project = _basename(latest_project_report_info_path) latest_project_info_url = storage.get_cloud_storage_file_path( bucket, latest_project_report_info_path) # Path is relative to the bucket. _process_project(project, latest_project_info_url, bucket)
Actual implementation of the fuzzer coverage task.
157,092
import datetime import json import re from google.cloud import ndb import requests from clusterfuzz._internal.base import utils from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import logs class OssFuzzBuildStatusError(Exception): """Exceptions for the build status cron.""" def get_build_failure(project_name, build_type): """Return the last build failure for the project.""" key = ndb.Key(data_types.OssFuzzBuildFailure, _get_ndb_key(project_name, build_type)) return key.get() def close_build_failure(build_failure): """Delete the build failure.""" build_failure.key.delete() def get_build_time(build): """Return a datetime for when the build was done.""" # Strip the nanosecond precision from the timestamp, since it's not # supported by Python. stripped_timestamp = TIMESTAMP_PATTERN.match(build['finish_time']) if not stripped_timestamp: logs.log_error( 'Invalid timestamp %s for %s.' % (build['finish_time'], build['name'])) return None return datetime.datetime.strptime( stripped_timestamp.group(0), TIMESTAMP_FORMAT) def close_bug(issue_tracker, issue_id, project_name): """Close a build failure bug.""" logs.log('Closing build failure bug (project=%s, issue_id=%s).' % (project_name, issue_id)) issue = issue_tracker.get_original_issue(issue_id) issue.status = 'Verified' issue.save( new_comment='The latest build has succeeded, closing this issue.', notify=True) The provided code snippet includes necessary dependencies for implementing the `_close_fixed_builds` function. Write a Python function `def _close_fixed_builds(projects, build_type)` to solve the following problem: Close bugs for fixed builds. Here is the function: def _close_fixed_builds(projects, build_type): """Close bugs for fixed builds.""" issue_tracker = issue_tracker_utils.get_issue_tracker() if not issue_tracker: raise OssFuzzBuildStatusError('Failed to get issue tracker.') for project in projects: project_name = project['name'] builds = project['history'] if not builds: continue build_failure = get_build_failure(project_name, build_type) if not build_failure: continue build = builds[0] if not build['success']: continue if build_failure.last_checked_timestamp >= get_build_time(build): logs.log_error('Latest successful build time for %s in %s config is ' 'older than or equal to last failure time.' % (project_name, build_type)) continue if build_failure.issue_id is not None: close_bug(issue_tracker, build_failure.issue_id, project_name) close_build_failure(build_failure)
Close bugs for fixed builds.
157,093
import datetime import json import re from google.cloud import ndb import requests from clusterfuzz._internal.base import utils from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import logs MAIN_BUILD_TYPE = FUZZING_BUILD_TYPE MIN_CONSECUTIVE_BUILD_FAILURES = 3 REMINDER_INTERVAL = 6 class OssFuzzBuildStatusError(Exception): """Exceptions for the build status cron.""" def _get_oss_fuzz_project(project_name): """Return the OssFuzzProject entity for the given project.""" return ndb.Key(data_types.OssFuzzProject, project_name).get() def create_build_failure(project_name, failure, build_type): """Create new build failure.""" return data_types.OssFuzzBuildFailure( id=_get_ndb_key(project_name, build_type), project_name=project_name, last_checked_timestamp=get_build_time(failure), build_type=build_type) def get_build_failure(project_name, build_type): """Return the last build failure for the project.""" key = ndb.Key(data_types.OssFuzzBuildFailure, _get_ndb_key(project_name, build_type)) return key.get() def get_build_time(build): """Return a datetime for when the build was done.""" # Strip the nanosecond precision from the timestamp, since it's not # supported by Python. stripped_timestamp = TIMESTAMP_PATTERN.match(build['finish_time']) if not stripped_timestamp: logs.log_error( 'Invalid timestamp %s for %s.' % (build['finish_time'], build['name'])) return None return datetime.datetime.strptime( stripped_timestamp.group(0), TIMESTAMP_FORMAT) def file_bug(issue_tracker, project_name, build_id, ccs, build_type): """File a new bug for a build failure.""" logs.log('Filing bug for new build failure (project=%s, build_type=%s, ' 'build_id=%s).' % (project_name, build_type, build_id)) issue = issue_tracker.new_issue() issue.title = '{project_name}: {build_type} build failure'.format( project_name=project_name, build_type=build_type.capitalize()) issue.body = _get_issue_body(project_name, build_id, build_type) issue.status = 'New' issue.labels.add('Type-Build-Failure') issue.labels.add('Proj-' + project_name) for cc in ccs: issue.ccs.add(cc) issue.save() return str(issue.id) def send_reminder(issue_tracker, issue_id, build_id): """Send a reminder about the build still failing.""" issue = issue_tracker.get_original_issue(issue_id) comment = ('Friendly reminder that the build is still failing.\n' 'Please try to fix this failure to ensure that fuzzing ' 'remains productive.\n' 'Latest build log: {log_link}\n') comment = comment.format(log_link=_get_build_link(build_id)) issue.save(new_comment=comment, notify=True) The provided code snippet includes necessary dependencies for implementing the `_process_failures` function. Write a Python function `def _process_failures(projects, build_type)` to solve the following problem: Process failures. Here is the function: def _process_failures(projects, build_type): """Process failures.""" issue_tracker = issue_tracker_utils.get_issue_tracker() if not issue_tracker: raise OssFuzzBuildStatusError('Failed to get issue tracker.') for project in projects: project_name = project['name'] builds = project['history'] if not builds: continue build = builds[0] if build['success']: continue project_name = project['name'] # Do not file an issue for non-main build types, if there is a main build # failure for the same project, as the root cause might be the same. if build_type != MAIN_BUILD_TYPE: build_failure = get_build_failure(project_name, MAIN_BUILD_TYPE) if build_failure: continue build_failure = get_build_failure(project_name, build_type) build_time = get_build_time(build) if build_failure: if build_time <= build_failure.last_checked_timestamp: # No updates. continue else: build_failure = create_build_failure(project_name, build, build_type) build_failure.last_checked_timestamp = build_time build_failure.consecutive_failures += 1 if build_failure.consecutive_failures >= MIN_CONSECUTIVE_BUILD_FAILURES: if build_failure.issue_id is None: oss_fuzz_project = _get_oss_fuzz_project(project_name) if not oss_fuzz_project: logs.log( 'Project %s is disabled, skipping bug filing.' % project_name) continue build_failure.issue_id = file_bug(issue_tracker, project_name, build['build_id'], oss_fuzz_project.ccs, build_type) elif (build_failure.consecutive_failures - MIN_CONSECUTIVE_BUILD_FAILURES) % REMINDER_INTERVAL == 0: send_reminder(issue_tracker, build_failure.issue_id, build['build_id']) build_failure.put()
Process failures.
157,094
import datetime import json import re from google.cloud import ndb import requests from clusterfuzz._internal.base import utils from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.issue_management import issue_tracker_utils from clusterfuzz._internal.metrics import logs NO_BUILDS_THRESHOLD = datetime.timedelta(days=2) def get_build_time(build): """Return a datetime for when the build was done.""" # Strip the nanosecond precision from the timestamp, since it's not # supported by Python. stripped_timestamp = TIMESTAMP_PATTERN.match(build['finish_time']) if not stripped_timestamp: logs.log_error( 'Invalid timestamp %s for %s.' % (build['finish_time'], build['name'])) return None return datetime.datetime.strptime( stripped_timestamp.group(0), TIMESTAMP_FORMAT) The provided code snippet includes necessary dependencies for implementing the `_check_last_get_build_time` function. Write a Python function `def _check_last_get_build_time(projects, build_type)` to solve the following problem: Check that builds are up to date. Here is the function: def _check_last_get_build_time(projects, build_type): """Check that builds are up to date.""" for project in projects: project_name = project['name'] builds = project['history'] if not builds: continue build = builds[0] time_since_last_build = utils.utcnow() - get_build_time(build) if time_since_last_build >= NO_BUILDS_THRESHOLD: # Something likely went wrong with the build infrastructure, log errors. logs.log_error('%s has not been built in %s config for %d days.' % (project_name, build_type, time_since_last_build.days))
Check that builds are up to date.
157,095
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts def get_github_url(url): """Return contents of URL.""" github_credentials = db_config.get_value('github_credentials') if not github_credentials: raise ProjectSetupError('No github credentials.') client_id, client_secret = github_credentials.strip().split(';') response = requests.get( url, auth=(client_id, client_secret), timeout=HTTP_TIMEOUT_SECONDS) if response.status_code != 200: logs.log_error( f'Failed to get github url: {url}.', status_code=response.status_code) response.raise_for_status() return json.loads(response.text) def find_github_item_url(github_json, name): """Get url of a blob/tree from a github json response.""" for item in github_json['tree']: if item['path'] == name: return item['url'] return None The provided code snippet includes necessary dependencies for implementing the `get_oss_fuzz_projects` function. Write a Python function `def get_oss_fuzz_projects()` to solve the following problem: Return list of projects for oss-fuzz. Here is the function: def get_oss_fuzz_projects(): """Return list of projects for oss-fuzz.""" ossfuzz_tree_url = ('https://api.github.com/repos/google/oss-fuzz/' 'git/trees/master') tree = get_github_url(ossfuzz_tree_url) projects = [] projects_url = find_github_item_url(tree, 'projects') if not projects_url: logs.log_error('No projects found.') return [] tree = get_github_url(projects_url) for item in tree['tree']: if item['type'] != 'tree': continue item_json = get_github_url(item['url']) project_yaml_url = find_github_item_url(item_json, 'project.yaml') if not project_yaml_url: continue projects_yaml = get_github_url(project_yaml_url) info = yaml.safe_load(base64.b64decode(projects_yaml['content'])) has_dockerfile = ( find_github_item_url(item_json, 'Dockerfile') or 'dockerfile' in info) if not has_dockerfile: continue projects.append((item['path'], info)) return projects
Return list of projects for oss-fuzz.
157,096
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts The provided code snippet includes necessary dependencies for implementing the `get_projects_from_gcs` function. Write a Python function `def get_projects_from_gcs(gcs_url)` to solve the following problem: Get projects from GCS path. Here is the function: def get_projects_from_gcs(gcs_url): """Get projects from GCS path.""" data = json.loads(storage.read_data(gcs_url)) return [(project['name'], project) for project in data['projects']]
Get projects from GCS path.
157,097
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts JOB_MAP = { 'libfuzzer': { 'x86_64': { 'address': LIBFUZZER_ASAN_JOB, 'memory': LIBFUZZER_MSAN_JOB, 'undefined': LIBFUZZER_UBSAN_JOB, 'none': LIBFUZZER_NONE_JOB, }, 'i386': { 'address': LIBFUZZER_ASAN_I386_JOB, 'none': LIBFUZZER_NONE_I386_JOB, }, 'arm': { 'hardware': LIBFUZZER_HWASAN_JOB, 'none': LIBFUZZER_NONE_JOB, }, }, 'afl': { 'x86_64': { 'address': AFL_ASAN_JOB, }, }, 'honggfuzz': { 'x86_64': { 'address': HONGGFUZZ_ASAN_JOB, }, }, 'googlefuzztest': { 'x86_64': { 'address': GFT_ASAN_JOB, 'memory': GFT_MSAN_JOB, 'undefined': GFT_UBSAN_JOB, }, }, 'none': { 'x86_64': { 'address': NO_ENGINE_ASAN_JOB, }, }, 'centipede': { 'x86_64': { 'address': CENTIPEDE_ASAN_JOB, }, }, } DEFAULT_ARCHITECTURES = ['x86_64'] DEFAULT_SANITIZERS = ['address', 'undefined'] DEFAULT_ENGINES = ['libfuzzer', 'afl', 'honggfuzz'] def _to_experimental_job(job_info): job_info = copy.copy(job_info) job_info.experimental = True return job_info def _process_sanitizers_field(sanitizers): """Pre-process sanitizers field into a map from sanitizer name -> dict of options.""" processed_sanitizers = {} if not isinstance(sanitizers, list): return None # each field can either be a Map or a String: # sanitizers: # - undefined: # experimental: true # - address # - memory for sanitizer in sanitizers: if isinstance(sanitizer, str): processed_sanitizers[sanitizer] = {} elif isinstance(sanitizer, dict): for key, value in sanitizer.items(): processed_sanitizers[key] = value else: return None return processed_sanitizers The provided code snippet includes necessary dependencies for implementing the `get_jobs_for_project` function. Write a Python function `def get_jobs_for_project(project, info)` to solve the following problem: Return jobs for the project. Here is the function: def get_jobs_for_project(project, info): """Return jobs for the project.""" sanitizers = _process_sanitizers_field( info.get('sanitizers', DEFAULT_SANITIZERS)) if not sanitizers: logs.log_error(f'Invalid sanitizers field for {project}.') return [] engines = info.get('fuzzing_engines', DEFAULT_ENGINES) architectures = info.get('architectures', DEFAULT_ARCHITECTURES) jobs = [] for engine in engines: if engine not in JOB_MAP: continue for architecture in architectures: if architecture not in JOB_MAP[engine]: continue for sanitizer, options in sanitizers.items(): experimental = ( options.get('experimental', False) or info.get('experimental', False)) if sanitizer in JOB_MAP[engine][architecture]: job = JOB_MAP[engine][architecture][sanitizer] if experimental: job = _to_experimental_job(job) jobs.append(job) return jobs
Return jobs for the project.
157,098
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts def _add_users_to_bucket(info, client, bucket_name, iam_policy): """Add user account to bucket.""" ccs = sorted( ['user:' + convert_googlemail_to_gmail(cc) for cc in ccs_from_info(info)]) binding = storage.get_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) if binding: # buckets.getIamPolicy can return duplicate members when we add a @gmail.com # as well as @googlemail.com address for the same account. binding['members'] = sorted(list(set(binding['members']))) if binding['members'] == ccs: return iam_policy filtered_members = [ member for member in binding['members'] if member in ccs ] if len(filtered_members) != len(binding['members']): # Remove old members. binding['members'] = filtered_members iam_policy = storage.set_bucket_iam_policy(client, bucket_name, iam_policy) # We might have no binding either from start or after filtering members above. # Create a new one in those cases. binding = storage.get_or_create_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) for cc in ccs: if cc in binding['members']: continue logs.log(f'Adding {cc} to bucket IAM for {bucket_name}.') # Add CCs one at a time since the API does not work with invalid or # non-Google emails. modified_iam_policy = storage.add_single_bucket_iam( client, iam_policy, OBJECT_VIEWER_IAM_ROLE, bucket_name, cc) if modified_iam_policy: iam_policy = modified_iam_policy binding = storage.get_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) if not binding['members']: # Check that the final binding has members. Empty bindings are not valid. storage.remove_bucket_iam_binding(iam_policy, OBJECT_VIEWER_IAM_ROLE) return iam_policy def _set_bucket_service_account(service_account, client, bucket_name, iam_policy): """Set service account for a bucket.""" # Add service account as objectAdmin. binding = storage.get_or_create_bucket_iam_binding(iam_policy, OBJECT_ADMIN_IAM_ROLE) members = ['serviceAccount:' + service_account['email']] if members == binding['members']: # No changes required. return iam_policy binding['members'] = members return storage.set_bucket_iam_policy(client, bucket_name, iam_policy) The provided code snippet includes necessary dependencies for implementing the `add_bucket_iams` function. Write a Python function `def add_bucket_iams(info, client, bucket_name, service_account)` to solve the following problem: Add CC'ed users to storage bucket IAM. Here is the function: def add_bucket_iams(info, client, bucket_name, service_account): """Add CC'ed users to storage bucket IAM.""" iam_policy = storage.get_bucket_iam_policy(client, bucket_name) if not iam_policy: return iam_policy = _add_users_to_bucket(info, client, bucket_name, iam_policy) _set_bucket_service_account(service_account, client, bucket_name, iam_policy)
Add CC'ed users to storage bucket IAM.
157,099
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts The provided code snippet includes necessary dependencies for implementing the `add_service_account_to_bucket` function. Write a Python function `def add_service_account_to_bucket(client, bucket_name, service_account, role)` to solve the following problem: Add service account to the gcr.io images bucket. Here is the function: def add_service_account_to_bucket(client, bucket_name, service_account, role): """Add service account to the gcr.io images bucket.""" iam_policy = storage.get_bucket_iam_policy(client, bucket_name) if not iam_policy: return binding = storage.get_or_create_bucket_iam_binding(iam_policy, role) member = 'serviceAccount:' + service_account['email'] if member in binding['members']: # No changes required. return binding['members'].append(member) storage.set_bucket_iam_policy(client, bucket_name, iam_policy)
Add service account to the gcr.io images bucket.
157,100
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts The provided code snippet includes necessary dependencies for implementing the `has_maintainer` function. Write a Python function `def has_maintainer(info)` to solve the following problem: Return whether or not a project has at least one maintainer. Here is the function: def has_maintainer(info): """Return whether or not a project has at least one maintainer.""" return info.get('primary_contact') or info.get('auto_ccs')
Return whether or not a project has at least one maintainer.
157,101
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts MEMORY_SAFE_LANGUAGES = {'go', 'java', 'python', 'rust'} OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT = 1.0 OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT = 0.2 def ccs_from_info(info): """Get list of CC's from project info.""" def _get_ccs(field_name, allow_list=True): """Return list of emails to cc given a field name.""" if field_name not in info: return [] field_value = info.get(field_name) if allow_list and isinstance(field_value, list): return field_value if isinstance(field_value, str): return [field_value] if field_value is None: return [] raise ProjectSetupError(f'Bad value for field {field_name}: {field_value}.') ccs = [] ccs.extend(_get_ccs('primary_contact', allow_list=False)) ccs.extend(_get_ccs('auto_ccs')) ccs.extend(_get_ccs('vendor_ccs')) return [utils.normalize_email(cc) for cc in ccs] The provided code snippet includes necessary dependencies for implementing the `create_project_settings` function. Write a Python function `def create_project_settings(project, info, service_account)` to solve the following problem: Setup settings for ClusterFuzz (such as CPU distribution). Here is the function: def create_project_settings(project, info, service_account): """Setup settings for ClusterFuzz (such as CPU distribution).""" key = ndb.Key(data_types.OssFuzzProject, project) oss_fuzz_project = key.get() # Expecting to run a blackbox fuzzer, so use high end hosts. is_high_end = info.get('blackbox', False) ccs = ccs_from_info(info) language = info.get('language') if oss_fuzz_project: if oss_fuzz_project.service_account != service_account['email']: oss_fuzz_project.service_account = service_account['email'] oss_fuzz_project.put() if oss_fuzz_project.high_end != is_high_end: oss_fuzz_project.high_end = is_high_end oss_fuzz_project.put() if oss_fuzz_project.ccs != ccs: oss_fuzz_project.ccs = ccs oss_fuzz_project.put() else: if language in MEMORY_SAFE_LANGUAGES: cpu_weight = OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT else: cpu_weight = OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT data_types.OssFuzzProject( id=project, name=project, high_end=is_high_end, cpu_weight=cpu_weight, service_account=service_account['email'], ccs=ccs).put()
Setup settings for ClusterFuzz (such as CPU distribution).
157,102
import base64 import collections import copy import json import re from google.cloud import ndb import requests import yaml from clusterfuzz._internal.base import tasks from clusterfuzz._internal.base import untrusted from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.fuzzing import fuzzer_selection from clusterfuzz._internal.google_cloud_utils import pubsub from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from . import service_accounts PUBSUB_PLATFORMS = ['linux'] def _create_pubsub_topic(name, client): """Create a pubsub topic and subscription if needed.""" application_id = utils.get_application_id() topic_name = pubsub.topic_name(application_id, name) if client.get_topic(topic_name) is None: client.create_topic(topic_name) subscription_name = pubsub.subscription_name(application_id, name) if client.get_subscription(subscription_name) is None: client.create_subscription(subscription_name, topic_name) The provided code snippet includes necessary dependencies for implementing the `create_pubsub_topics_for_untrusted` function. Write a Python function `def create_pubsub_topics_for_untrusted(project)` to solve the following problem: Create pubsub topics from untrusted sources for tasks. Here is the function: def create_pubsub_topics_for_untrusted(project): """Create pubsub topics from untrusted sources for tasks.""" client = pubsub.PubSubClient() for platform in PUBSUB_PLATFORMS: name = untrusted.queue_name(project, platform) _create_pubsub_topic(name, client)
Create pubsub topics from untrusted sources for tasks.