Add files using upload-large-folder tool
Browse files- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/bash_parser.py +235 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/compile_fuzztests.sh +126 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/install_ruby.sh +25 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/install_swift.sh +67 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/precompile_honggfuzz +45 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-image/Dockerfile +46 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/README.md +31 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/bad_build_check +494 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/coverage +549 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/coverage_helper +17 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/download_corpus +30 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/generate_differential_cov_report.py +228 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/install_deps.sh +37 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/install_java.sh +46 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/install_javascript.sh +30 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/jacoco_report_converter.py +174 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/reproduce +34 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/targets_list +10 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/test_all_test.py +38 -0
- local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/test_one.py +47 -0
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/bash_parser.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/python3
|
| 2 |
+
# Copyright 2024 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
import sys
|
| 18 |
+
|
| 19 |
+
from glob import glob
|
| 20 |
+
|
| 21 |
+
import bashlex
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def find_all_bash_scripts_in_src():
|
| 25 |
+
"""Finds all bash scripts that exist in SRC/. This is used to idenfiy scripts
|
| 26 |
+
that may be needed for reading during the AST parsing. This is the case
|
| 27 |
+
when a given build script calls another build script, then we need to
|
| 28 |
+
read those."""
|
| 29 |
+
all_local_scripts = [
|
| 30 |
+
y for x in os.walk('/src/') for y in glob(os.path.join(x[0], '*.sh'))
|
| 31 |
+
]
|
| 32 |
+
scripts_we_care_about = []
|
| 33 |
+
to_ignore = {'aflplusplus', 'honggfuzz', '/fuzztest', '/centipede'}
|
| 34 |
+
for s in all_local_scripts:
|
| 35 |
+
if any([x for x in to_ignore if x in s]):
|
| 36 |
+
continue
|
| 37 |
+
scripts_we_care_about.append(s)
|
| 38 |
+
|
| 39 |
+
print(scripts_we_care_about)
|
| 40 |
+
return scripts_we_care_about
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def should_discard_command(ast_tree) -> bool:
|
| 44 |
+
"""Returns True if the command shuold be avoided, otherwise False"""
|
| 45 |
+
try:
|
| 46 |
+
first_word = ast_tree.parts[0].word
|
| 47 |
+
except: # pylint: disable=bare-except
|
| 48 |
+
return False
|
| 49 |
+
|
| 50 |
+
if ('cmake' in first_word and
|
| 51 |
+
any('--build' in part.word for part in ast_tree.parts)):
|
| 52 |
+
return False
|
| 53 |
+
|
| 54 |
+
cmds_to_avoid_replaying = {
|
| 55 |
+
'configure', 'autoheader', 'autoconf', 'autoreconf', 'cmake', 'autogen.sh'
|
| 56 |
+
}
|
| 57 |
+
if any([cmd for cmd in cmds_to_avoid_replaying if cmd in first_word]):
|
| 58 |
+
return True
|
| 59 |
+
|
| 60 |
+
# Avoid all "make clean" calls. We dont want to erase previously build
|
| 61 |
+
# files.
|
| 62 |
+
try:
|
| 63 |
+
second_word = ast_tree.parts[1].word
|
| 64 |
+
except: # pylint: disable=bare-except
|
| 65 |
+
return False
|
| 66 |
+
if 'make' in first_word and 'clean' in second_word:
|
| 67 |
+
return True
|
| 68 |
+
|
| 69 |
+
# No match was found to commands we dont want to build. There is no
|
| 70 |
+
# indication we shuold avoid.
|
| 71 |
+
return False
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def is_local_redirection(ast_node, all_local_scripts):
|
| 75 |
+
"""Return the list of scripts corresponding to the command, in case
|
| 76 |
+
the command is an execution of a local script."""
|
| 77 |
+
# print("Checking")
|
| 78 |
+
|
| 79 |
+
# Capture local script called with ./random/path/build.sh
|
| 80 |
+
|
| 81 |
+
if len(ast_node.parts) >= 2:
|
| 82 |
+
try:
|
| 83 |
+
ast_node.parts[0].word
|
| 84 |
+
except:
|
| 85 |
+
return []
|
| 86 |
+
if ast_node.parts[0].word == '.':
|
| 87 |
+
suffixes_matching = []
|
| 88 |
+
#print(ast_node.parts[1].word)
|
| 89 |
+
for bash_script in all_local_scripts:
|
| 90 |
+
#print("- %s"%(bash_script))
|
| 91 |
+
cmd_to_exec = ast_node.parts[1].word.replace('$SRC', 'src')
|
| 92 |
+
if bash_script.endswith(cmd_to_exec):
|
| 93 |
+
suffixes_matching.append(bash_script)
|
| 94 |
+
#print(suffixes_matching)
|
| 95 |
+
return suffixes_matching
|
| 96 |
+
# Capture a local script called with $SRC/random/path/build.sh
|
| 97 |
+
if len(ast_node.parts) >= 1:
|
| 98 |
+
if '$SRC' in ast_node.parts[0].word:
|
| 99 |
+
suffixes_matching = []
|
| 100 |
+
print(ast_node.parts[0].word)
|
| 101 |
+
for bash_script in all_local_scripts:
|
| 102 |
+
print("- %s" % (bash_script))
|
| 103 |
+
cmd_to_exec = ast_node.parts[0].word.replace('$SRC', 'src')
|
| 104 |
+
if bash_script.endswith(cmd_to_exec):
|
| 105 |
+
suffixes_matching.append(bash_script)
|
| 106 |
+
print(suffixes_matching)
|
| 107 |
+
return suffixes_matching
|
| 108 |
+
|
| 109 |
+
return []
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def handle_ast_command(ast_node, all_scripts_in_fs, raw_script):
|
| 113 |
+
"""Generate bash script string for command node"""
|
| 114 |
+
new_script = ''
|
| 115 |
+
if should_discard_command(ast_node):
|
| 116 |
+
return ''
|
| 117 |
+
|
| 118 |
+
matches = is_local_redirection(ast_node, all_scripts_in_fs)
|
| 119 |
+
if len(matches) == 1:
|
| 120 |
+
new_script += parse_script(matches[0], all_scripts_in_fs) + '\n'
|
| 121 |
+
return ''
|
| 122 |
+
|
| 123 |
+
# Extract the command from the script string
|
| 124 |
+
idx_start = ast_node.pos[0]
|
| 125 |
+
idx_end = ast_node.pos[1]
|
| 126 |
+
new_script += raw_script[idx_start:idx_end]
|
| 127 |
+
#new_script += '\n'
|
| 128 |
+
|
| 129 |
+
# If mkdir is used, then ensure that '-p' is provided, as
|
| 130 |
+
# otherwise we will run into failures. We don't have to worry
|
| 131 |
+
# about multiple uses of -p as `mkdir -p -p -p`` is valid.
|
| 132 |
+
new_script = new_script.replace('mkdir', 'mkdir -p')
|
| 133 |
+
return new_script
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def handle_ast_list(ast_node, all_scripts_in_fs, raw_script):
|
| 137 |
+
"""Handles bashlex AST list."""
|
| 138 |
+
new_script = ''
|
| 139 |
+
try_hard = 1
|
| 140 |
+
|
| 141 |
+
if not try_hard:
|
| 142 |
+
list_start = ast_node.pos[0]
|
| 143 |
+
list_end = ast_node.pos[1]
|
| 144 |
+
new_script += raw_script[list_start:list_end] # + '\n'
|
| 145 |
+
else:
|
| 146 |
+
# This is more refined logic. Ideally, this should work, but it's a bit
|
| 147 |
+
# more intricate to get right due to e.g. white-space between positions
|
| 148 |
+
# and more extensive parsing needed. We don't neccesarily need this
|
| 149 |
+
# level of success rate for what we're trying to achieve, so am disabling
|
| 150 |
+
# this for now.
|
| 151 |
+
for part in ast_node.parts:
|
| 152 |
+
if part.kind == 'list':
|
| 153 |
+
new_script += handle_ast_list(part, all_scripts_in_fs, raw_script)
|
| 154 |
+
elif part.kind == 'command':
|
| 155 |
+
new_script += handle_ast_command(part, all_scripts_in_fs, raw_script)
|
| 156 |
+
else:
|
| 157 |
+
idx_start = part.pos[0]
|
| 158 |
+
idx_end = part.pos[1]
|
| 159 |
+
new_script += raw_script[idx_start:idx_end]
|
| 160 |
+
new_script += ' '
|
| 161 |
+
|
| 162 |
+
# Make sure what was created is valid syntax, and otherwise return empty
|
| 163 |
+
try:
|
| 164 |
+
bashlex.parse(new_script)
|
| 165 |
+
except: # pylint: disable=bare-except
|
| 166 |
+
# Maybe return the original here instead of skipping?
|
| 167 |
+
return ''
|
| 168 |
+
return new_script
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def handle_ast_compound(ast_node, all_scripts_in_fs, raw_script):
|
| 172 |
+
"""Handles bashlex compound AST node."""
|
| 173 |
+
new_script = ''
|
| 174 |
+
list_start = ast_node.pos[0]
|
| 175 |
+
list_end = ast_node.pos[1]
|
| 176 |
+
new_script += raw_script[list_start:list_end] + '\n'
|
| 177 |
+
return new_script
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def handle_node(ast_node, all_scripts_in_fs, build_script):
|
| 181 |
+
"""Generates a bash script string for a given node"""
|
| 182 |
+
if ast_node.kind == 'command':
|
| 183 |
+
return handle_ast_command(ast_node, all_scripts_in_fs, build_script)
|
| 184 |
+
elif ast_node.kind == 'list':
|
| 185 |
+
return handle_ast_list(ast_node, all_scripts_in_fs, build_script)
|
| 186 |
+
elif ast_node.kind == 'compound':
|
| 187 |
+
print('todo: handle compound')
|
| 188 |
+
return handle_ast_compound(ast_node, all_scripts_in_fs, build_script)
|
| 189 |
+
elif ast_node.kind == 'pipeline':
|
| 190 |
+
# Not supported
|
| 191 |
+
return ''
|
| 192 |
+
else:
|
| 193 |
+
raise Exception(f'Missing node handling: {ast_node.kind}')
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def parse_script(bash_script, all_scripts) -> str:
|
| 197 |
+
"""Top-level bash script parser"""
|
| 198 |
+
new_script = ''
|
| 199 |
+
with open(bash_script, 'r', encoding='utf-8') as f:
|
| 200 |
+
build_script = f.read()
|
| 201 |
+
try:
|
| 202 |
+
parts = bashlex.parse(build_script)
|
| 203 |
+
except bashlex.errors.ParsingError:
|
| 204 |
+
return ''
|
| 205 |
+
for part in parts:
|
| 206 |
+
new_script += handle_node(part, all_scripts, build_script)
|
| 207 |
+
new_script += '\n'
|
| 208 |
+
print("-" * 45)
|
| 209 |
+
print(part.kind)
|
| 210 |
+
print(part.dump())
|
| 211 |
+
|
| 212 |
+
return new_script
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def main():
|
| 216 |
+
"""Main function"""
|
| 217 |
+
all_scripts = find_all_bash_scripts_in_src()
|
| 218 |
+
replay_bash_script = parse_script(sys.argv[1], all_scripts)
|
| 219 |
+
|
| 220 |
+
print("REPLAYABLE BASH SCRIPT")
|
| 221 |
+
print("#" * 60)
|
| 222 |
+
print(replay_bash_script)
|
| 223 |
+
print("#" * 60)
|
| 224 |
+
|
| 225 |
+
out_dir = os.getenv('OUT', '/out')
|
| 226 |
+
with open(f'{out_dir}/replay-build-script.sh', 'w', encoding='utf-8') as f:
|
| 227 |
+
f.write(replay_bash_script)
|
| 228 |
+
|
| 229 |
+
src_dir = os.getenv('SRC', '/src')
|
| 230 |
+
with open(f'{src_dir}/replay_build.sh', 'w', encoding='utf-8') as f:
|
| 231 |
+
f.write(replay_bash_script)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
if __name__ == "__main__":
|
| 235 |
+
main()
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/compile_fuzztests.sh
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eu
|
| 2 |
+
# Copyright 2022 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
set -x
|
| 19 |
+
|
| 20 |
+
# In order to identify fuzztest test case "bazel query" is used to search
|
| 21 |
+
# the project. A search of the entire project is done with a default "...",
|
| 22 |
+
# however, some projects may fail to, or have very long processing time, if
|
| 23 |
+
# searching the entire project. Additionally, it may include fuzzers in
|
| 24 |
+
# dependencies, which should not be build as part of a given project.
|
| 25 |
+
# Tensorflow is an example project that will fail when the entire project is
|
| 26 |
+
# queried. FUZZTEST_TARGET_FOLDER makes it posible to specify the folder
|
| 27 |
+
# where fuzztest fuzzers should be search for. FUZZTEST_TARGET_FOLDER is passed
|
| 28 |
+
# to "bazel query" below.
|
| 29 |
+
if [[ ${FUZZTEST_TARGET_FOLDER:-"unset"} == "unset" ]];
|
| 30 |
+
then
|
| 31 |
+
export TARGET_FOLDER="..."
|
| 32 |
+
else
|
| 33 |
+
TARGET_FOLDER=${FUZZTEST_TARGET_FOLDER}
|
| 34 |
+
fi
|
| 35 |
+
|
| 36 |
+
BUILD_ARGS="--config=oss-fuzz --subcommands"
|
| 37 |
+
if [[ ${FUZZTEST_EXTRA_ARGS:-"unset"} != "unset" ]];
|
| 38 |
+
then
|
| 39 |
+
BUILD_ARGS="$BUILD_ARGS ${FUZZTEST_EXTRA_ARGS}"
|
| 40 |
+
fi
|
| 41 |
+
|
| 42 |
+
# Trigger setup_configs rule of fuzztest as it generates the necessary
|
| 43 |
+
# configuration file based on OSS-Fuzz environment variables.
|
| 44 |
+
bazel run @com_google_fuzztest//bazel:setup_configs >> /etc/bazel.bazelrc
|
| 45 |
+
|
| 46 |
+
# Bazel target names of the fuzz binaries.
|
| 47 |
+
FUZZ_TEST_BINARIES=$(bazel query "kind(\"cc_test\", rdeps(${TARGET_FOLDER}, @com_google_fuzztest//fuzztest:fuzztest_gtest_main))")
|
| 48 |
+
|
| 49 |
+
# Bazel output paths of the fuzz binaries.
|
| 50 |
+
FUZZ_TEST_BINARIES_OUT_PATHS=$(bazel cquery "kind(\"cc_test\", rdeps(${TARGET_FOLDER}, @com_google_fuzztest//fuzztest:fuzztest_gtest_main))" --output=files)
|
| 51 |
+
|
| 52 |
+
# Build the project and fuzz binaries
|
| 53 |
+
# Expose `FUZZTEST_EXTRA_TARGETS` environment variable, in the event a project
|
| 54 |
+
# includes non-FuzzTest fuzzers then this can be used to compile these in the
|
| 55 |
+
# same `bazel build` command as when building the FuzzTest fuzzers.
|
| 56 |
+
# This is to avoid having to call `bazel build` twice.
|
| 57 |
+
bazel build $BUILD_ARGS -- ${FUZZ_TEST_BINARIES[*]} ${FUZZTEST_EXTRA_TARGETS:-}
|
| 58 |
+
|
| 59 |
+
# Iterate the fuzz binaries and list each fuzz entrypoint in the binary. For
|
| 60 |
+
# each entrypoint create a wrapper script that calls into the binaries the
|
| 61 |
+
# given entrypoint as argument.
|
| 62 |
+
# The scripts will be named:
|
| 63 |
+
# {binary_name}@{fuzztest_entrypoint}
|
| 64 |
+
for fuzz_main_file in $FUZZ_TEST_BINARIES_OUT_PATHS; do
|
| 65 |
+
FUZZ_TESTS=$($fuzz_main_file --list_fuzz_tests)
|
| 66 |
+
cp ${fuzz_main_file} $OUT/
|
| 67 |
+
fuzz_basename=$(basename $fuzz_main_file)
|
| 68 |
+
chmod -x $OUT/$fuzz_basename
|
| 69 |
+
for fuzz_entrypoint in $FUZZ_TESTS; do
|
| 70 |
+
TARGET_FUZZER="${fuzz_basename}@$fuzz_entrypoint"
|
| 71 |
+
|
| 72 |
+
# Write executer script
|
| 73 |
+
echo "#!/bin/sh
|
| 74 |
+
# LLVMFuzzerTestOneInput for fuzzer detection.
|
| 75 |
+
this_dir=\$(dirname \"\$0\")
|
| 76 |
+
chmod +x \$this_dir/$fuzz_basename
|
| 77 |
+
\$this_dir/$fuzz_basename --fuzz=$fuzz_entrypoint -- \$@" > $OUT/$TARGET_FUZZER
|
| 78 |
+
chmod +x $OUT/$TARGET_FUZZER
|
| 79 |
+
done
|
| 80 |
+
done
|
| 81 |
+
|
| 82 |
+
# Synchronise coverage directory to bazel output artifacts. This is a
|
| 83 |
+
# best-effort basis in that it will include source code in common
|
| 84 |
+
# bazel output folders.
|
| 85 |
+
# For projects that store results in non-standard folders or want to
|
| 86 |
+
# manage what code to include in the coverage report more specifically,
|
| 87 |
+
# the FUZZTEST_DO_SYNC environment variable is made available. Projects
|
| 88 |
+
# can then implement a custom way of synchronising source code with the
|
| 89 |
+
# coverage build. Set FUZZTEST_DO_SYNC to something other than "yes" and
|
| 90 |
+
# no effort will be made to automatically synchronise the source code with
|
| 91 |
+
# the code coverage visualisation utility.
|
| 92 |
+
if [[ "$SANITIZER" = "coverage" && ${FUZZTEST_DO_SYNC:-"yes"} == "yes" ]]
|
| 93 |
+
then
|
| 94 |
+
# Synchronize bazel source files to coverage collection.
|
| 95 |
+
declare -r REMAP_PATH="${OUT}/proc/self/cwd"
|
| 96 |
+
mkdir -p "${REMAP_PATH}"
|
| 97 |
+
|
| 98 |
+
# Synchronize the folder bazel-BAZEL_OUT_PROJECT.
|
| 99 |
+
declare -r RSYNC_FILTER_ARGS=("--include" "*.h" "--include" "*.cc" "--include" \
|
| 100 |
+
"*.hpp" "--include" "*.cpp" "--include" "*.c" "--include" "*/" "--include" "*.inc" \
|
| 101 |
+
"--exclude" "*")
|
| 102 |
+
|
| 103 |
+
project_folders="$(find . -name 'bazel-*' -type l -printf '%P\n' | \
|
| 104 |
+
grep -v -x -F \
|
| 105 |
+
-e 'bazel-bin' \
|
| 106 |
+
-e 'bazel-testlogs')"
|
| 107 |
+
for link in $project_folders; do
|
| 108 |
+
if [[ -d "${PWD}"/$link/external ]]
|
| 109 |
+
then
|
| 110 |
+
rsync -avLk "${RSYNC_FILTER_ARGS[@]}" "${PWD}"/$link/external "${REMAP_PATH}"
|
| 111 |
+
fi
|
| 112 |
+
# k8-opt is a common path for storing bazel output artifacts, e.g. bazel-out/k8-opt.
|
| 113 |
+
# It's the output folder for default amd-64 builds, but projects may specify custom
|
| 114 |
+
# platform output directories, see: https://github.com/bazelbuild/bazel/issues/13818
|
| 115 |
+
# We support the default at the moment, and if a project needs custom synchronizing of
|
| 116 |
+
# output artifacts and code coverage we currently recommend using FUZZTEST_DO_SYNC.
|
| 117 |
+
if [[ -d "${PWD}"/$link/k8-opt ]]
|
| 118 |
+
then
|
| 119 |
+
rsync -avLk "${RSYNC_FILTER_ARGS[@]}" "${PWD}"/$link/k8-opt "${REMAP_PATH}"/$link
|
| 120 |
+
fi
|
| 121 |
+
done
|
| 122 |
+
|
| 123 |
+
# Delete symlinks and sync the current folder.
|
| 124 |
+
find . -type l -ls -delete
|
| 125 |
+
rsync -av ${PWD}/ "${REMAP_PATH}"
|
| 126 |
+
fi
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/install_ruby.sh
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Copyright 2024 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
apt update
|
| 19 |
+
apt install -y lsb-release software-properties-common gnupg2 binutils xz-utils libyaml-dev
|
| 20 |
+
gpg2 --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
|
| 21 |
+
curl -sSL https://get.rvm.io | bash
|
| 22 |
+
|
| 23 |
+
. /etc/profile.d/rvm.sh
|
| 24 |
+
|
| 25 |
+
rvm install ruby-3.3.1
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/install_swift.sh
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eux
|
| 2 |
+
# Copyright 2021 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
SWIFT_PACKAGES="wget \
|
| 20 |
+
binutils \
|
| 21 |
+
git \
|
| 22 |
+
gnupg2 \
|
| 23 |
+
libc6-dev \
|
| 24 |
+
libcurl4 \
|
| 25 |
+
libedit2 \
|
| 26 |
+
libgcc-9-dev \
|
| 27 |
+
libpython2.7 \
|
| 28 |
+
libsqlite3-0 \
|
| 29 |
+
libstdc++-9-dev \
|
| 30 |
+
libxml2 \
|
| 31 |
+
libz3-dev \
|
| 32 |
+
pkg-config \
|
| 33 |
+
tzdata \
|
| 34 |
+
zlib1g-dev"
|
| 35 |
+
SWIFT_SYMBOLIZER_PACKAGES="build-essential make cmake ninja-build git python3 g++-multilib binutils-dev zlib1g-dev"
|
| 36 |
+
apt-get update && apt install -y $SWIFT_PACKAGES && \
|
| 37 |
+
apt install -y $SWIFT_SYMBOLIZER_PACKAGES --no-install-recommends
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
wget -q https://download.swift.org/swift-5.10.1-release/ubuntu2004/swift-5.10.1-RELEASE/swift-5.10.1-RELEASE-ubuntu20.04.tar.gz
|
| 41 |
+
tar xzf swift-5.10.1-RELEASE-ubuntu20.04.tar.gz
|
| 42 |
+
cp -r swift-5.10.1-RELEASE-ubuntu20.04/usr/* /usr/
|
| 43 |
+
rm -rf swift-5.10.1-RELEASE-ubuntu20.04.tar.gz swift-5.10.1-RELEASE-ubuntu20.04/
|
| 44 |
+
# TODO: Move to a seperate work dir
|
| 45 |
+
git clone https://github.com/llvm/llvm-project.git
|
| 46 |
+
cd llvm-project
|
| 47 |
+
git checkout 63bf228450b8403e0c5e828d276be47ffbcd00d0 # TODO: Keep in sync with base-clang.
|
| 48 |
+
git apply ../llvmsymbol.diff --verbose
|
| 49 |
+
cmake -G "Ninja" \
|
| 50 |
+
-DLIBCXX_ENABLE_SHARED=OFF \
|
| 51 |
+
-DLIBCXX_ENABLE_STATIC_ABI_LIBRARY=ON \
|
| 52 |
+
-DLIBCXXABI_ENABLE_SHARED=OFF \
|
| 53 |
+
-DCMAKE_BUILD_TYPE=Release \
|
| 54 |
+
-DLLVM_TARGETS_TO_BUILD=X86 \
|
| 55 |
+
-DCMAKE_C_COMPILER=clang \
|
| 56 |
+
-DCMAKE_CXX_COMPILER=clang++ \
|
| 57 |
+
-DLLVM_BUILD_TESTS=OFF \
|
| 58 |
+
-DLLVM_INCLUDE_TESTS=OFF llvm
|
| 59 |
+
ninja -j$(nproc) llvm-symbolizer
|
| 60 |
+
cp bin/llvm-symbolizer /usr/local/bin/llvm-symbolizer-swift
|
| 61 |
+
|
| 62 |
+
cd $SRC
|
| 63 |
+
rm -rf llvm-project llvmsymbol.diff
|
| 64 |
+
|
| 65 |
+
# TODO: Cleanup packages
|
| 66 |
+
apt-get remove --purge -y wget zlib1g-dev
|
| 67 |
+
apt-get autoremove -y
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-builder/precompile_honggfuzz
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eu
|
| 2 |
+
# Copyright 2019 Google Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
echo "Precompiling honggfuzz"
|
| 19 |
+
export BUILD_OSSFUZZ_STATIC=true
|
| 20 |
+
|
| 21 |
+
PACKAGES=(
|
| 22 |
+
libunwind8-dev
|
| 23 |
+
libblocksruntime-dev
|
| 24 |
+
liblzma-dev
|
| 25 |
+
libiberty-dev
|
| 26 |
+
zlib1g-dev
|
| 27 |
+
pkg-config)
|
| 28 |
+
|
| 29 |
+
apt-get update && apt-get install -y ${PACKAGES[@]}
|
| 30 |
+
|
| 31 |
+
pushd $SRC/honggfuzz > /dev/null
|
| 32 |
+
make clean
|
| 33 |
+
# These CFLAGs match honggfuzz's default, with the exception of -mtune to
|
| 34 |
+
# improve portability and `-D_HF_LINUX_NO_BFD` to remove assembly instructions
|
| 35 |
+
# from the filenames.
|
| 36 |
+
CC=clang CFLAGS="-O3 -funroll-loops -D_HF_LINUX_NO_BFD" make
|
| 37 |
+
|
| 38 |
+
# libhfuzz.a will be added by CC/CXX linker directly during linking,
|
| 39 |
+
# but it's defined here to satisfy the build infrastructure
|
| 40 |
+
ar rcs honggfuzz.a libhfuzz/*.o libhfcommon/*.o
|
| 41 |
+
popd > /dev/null
|
| 42 |
+
|
| 43 |
+
apt-get remove -y --purge ${PACKAGES[@]}
|
| 44 |
+
apt-get autoremove -y
|
| 45 |
+
echo "Done."
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-image/Dockerfile
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2016 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
#
|
| 15 |
+
################################################################################
|
| 16 |
+
|
| 17 |
+
# Base image for all other images.
|
| 18 |
+
|
| 19 |
+
ARG parent_image=ubuntu:20.04@sha256:4a45212e9518f35983a976eead0de5eecc555a2f047134e9dd2cfc589076a00d
|
| 20 |
+
|
| 21 |
+
FROM $parent_image
|
| 22 |
+
|
| 23 |
+
ENV DEBIAN_FRONTEND noninteractive
|
| 24 |
+
# Install tzadata to match ClusterFuzz
|
| 25 |
+
# (https://github.com/google/oss-fuzz/issues/9280).
|
| 26 |
+
|
| 27 |
+
# Use Azure mirrors for consistent apt repository access.
|
| 28 |
+
RUN cp /etc/apt/sources.list /etc/apt/sources.list.backup && \
|
| 29 |
+
sed -i 's|http://archive.ubuntu.com/ubuntu/|http://azure.archive.ubuntu.com/ubuntu/|g' /etc/apt/sources.list && \
|
| 30 |
+
sed -i 's|http://security.ubuntu.com/ubuntu/|http://azure.archive.ubuntu.com/ubuntu/|g' /etc/apt/sources.list
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
RUN apt-get update && \
|
| 34 |
+
apt-get upgrade -y && \
|
| 35 |
+
apt-get install -y libc6-dev binutils libgcc-9-dev tzdata locales locales-all && \
|
| 36 |
+
apt-get autoremove -y
|
| 37 |
+
|
| 38 |
+
ENV OUT=/out
|
| 39 |
+
ENV SRC=/src
|
| 40 |
+
ENV WORK=/work
|
| 41 |
+
ENV PATH="$PATH:/out"
|
| 42 |
+
ENV HWASAN_OPTIONS=random_tags=0
|
| 43 |
+
#set locale to utf8
|
| 44 |
+
ENV LC_ALL=C.UTF-8
|
| 45 |
+
|
| 46 |
+
RUN mkdir -p $OUT $SRC $WORK && chmod a+rwx $OUT $SRC $WORK
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/README.md
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# base-runner
|
| 2 |
+
> Base image for fuzzer runners.
|
| 3 |
+
|
| 4 |
+
```bash
|
| 5 |
+
docker run -ti ghcr.io/aixcc-finals/base-runner <command> <args>
|
| 6 |
+
```
|
| 7 |
+
|
| 8 |
+
## Commands
|
| 9 |
+
|
| 10 |
+
| Command | Description |
|
| 11 |
+
|---------|-------------|
|
| 12 |
+
| `reproduce <fuzzer_name> <fuzzer_options>` | build all fuzz targets and run specified one with testcase `/testcase` and given options.
|
| 13 |
+
| `run_fuzzer <fuzzer_name> <fuzzer_options>` | runs specified fuzzer combining options with `.options` file |
|
| 14 |
+
| `test_all.py` | runs every binary in `/out` as a fuzzer for a while to ensure it works. |
|
| 15 |
+
| `coverage <fuzzer_name>` | generate a coverage report for the given fuzzer. |
|
| 16 |
+
|
| 17 |
+
# Examples
|
| 18 |
+
|
| 19 |
+
- *Reproduce using latest OSS-Fuzz build:*
|
| 20 |
+
|
| 21 |
+
<pre>
|
| 22 |
+
docker run --rm -ti -v <b><i><testcase_path></i></b>:/testcase gcr.io/oss-fuzz/<b><i>$PROJECT_NAME</i></b> reproduce <b><i><fuzzer_name></i></b>
|
| 23 |
+
</pre>
|
| 24 |
+
|
| 25 |
+
- *Reproduce using local source checkout:*
|
| 26 |
+
|
| 27 |
+
<pre>
|
| 28 |
+
docker run --rm -ti -v <b><i><source_path></i></b>:/src/<b><i>$PROJECT_NAME</i></b> \
|
| 29 |
+
-v <b><i><testcase_path></i></b>:/testcase gcr.io/oss-fuzz/<b><i>$PROJECT_NAME</i></b> \
|
| 30 |
+
reproduce <b><i><fuzzer_name></i></b>
|
| 31 |
+
</pre>
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/bad_build_check
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -u
|
| 2 |
+
# Copyright 2017 Google Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
# A minimal number of runs to test fuzz target with a non-empty input.
|
| 19 |
+
MIN_NUMBER_OF_RUNS=4
|
| 20 |
+
|
| 21 |
+
# The "example" target has 73 with ASan, 65 with UBSan, and 6648 with MSan.
|
| 22 |
+
# Real world targets have greater values (arduinojson: 407, zlib: 664).
|
| 23 |
+
# Mercurial's bdiff_fuzzer has 116 PCs when built with ASan.
|
| 24 |
+
THRESHOLD_FOR_NUMBER_OF_EDGES=100
|
| 25 |
+
|
| 26 |
+
# A fuzz target is supposed to have at least two functions, such as
|
| 27 |
+
# LLVMFuzzerTestOneInput and an API that is being called from there.
|
| 28 |
+
THRESHOLD_FOR_NUMBER_OF_FUNCTIONS=2
|
| 29 |
+
|
| 30 |
+
# Threshold values for different sanitizers used by instrumentation checks.
|
| 31 |
+
ASAN_CALLS_THRESHOLD_FOR_ASAN_BUILD=1000
|
| 32 |
+
ASAN_CALLS_THRESHOLD_FOR_NON_ASAN_BUILD=0
|
| 33 |
+
|
| 34 |
+
# The value below can definitely be higher (like 500-1000), but avoid being too
|
| 35 |
+
# agressive here while still evaluating the DFT-based fuzzing approach.
|
| 36 |
+
DFSAN_CALLS_THRESHOLD_FOR_DFSAN_BUILD=100
|
| 37 |
+
DFSAN_CALLS_THRESHOLD_FOR_NON_DFSAN_BUILD=0
|
| 38 |
+
|
| 39 |
+
MSAN_CALLS_THRESHOLD_FOR_MSAN_BUILD=1000
|
| 40 |
+
# Some engines (e.g. honggfuzz) may make a very small number of calls to msan
|
| 41 |
+
# for memory poisoning.
|
| 42 |
+
MSAN_CALLS_THRESHOLD_FOR_NON_MSAN_BUILD=3
|
| 43 |
+
|
| 44 |
+
# Usually, a non UBSan build (e.g. ASan) has 165 calls to UBSan runtime. The
|
| 45 |
+
# majority of targets built with UBSan have 200+ UBSan calls, but there are
|
| 46 |
+
# some very small targets that may have < 200 UBSan calls even in a UBSan build.
|
| 47 |
+
# Use the threshold value of 168 (slightly > 165) for UBSan build.
|
| 48 |
+
UBSAN_CALLS_THRESHOLD_FOR_UBSAN_BUILD=168
|
| 49 |
+
|
| 50 |
+
# It would be risky to use the threshold value close to 165 for non UBSan build,
|
| 51 |
+
# as UBSan runtime may change any time and thus we could have different number
|
| 52 |
+
# of calls to UBSan runtime even in ASan build. With that, we use the threshold
|
| 53 |
+
# value of 200 that would detect unnecessary UBSan instrumentation in the vast
|
| 54 |
+
# majority of targets, except of a handful very small ones, which would not be
|
| 55 |
+
# a big concern either way as the overhead for them would not be significant.
|
| 56 |
+
UBSAN_CALLS_THRESHOLD_FOR_NON_UBSAN_BUILD=200
|
| 57 |
+
|
| 58 |
+
# ASan builds on i386 generally have about 250 UBSan runtime calls.
|
| 59 |
+
if [[ $ARCHITECTURE == 'i386' ]]
|
| 60 |
+
then
|
| 61 |
+
UBSAN_CALLS_THRESHOLD_FOR_NON_UBSAN_BUILD=280
|
| 62 |
+
fi
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Verify that the given fuzz target is correctly built to run with a particular
|
| 66 |
+
# engine.
|
| 67 |
+
function check_engine {
|
| 68 |
+
local FUZZER=$1
|
| 69 |
+
local FUZZER_NAME=$(basename $FUZZER)
|
| 70 |
+
local FUZZER_OUTPUT="/tmp/$FUZZER_NAME.output"
|
| 71 |
+
local CHECK_FAILED=0
|
| 72 |
+
|
| 73 |
+
if [[ "$FUZZING_ENGINE" == libfuzzer ]]; then
|
| 74 |
+
# Store fuzz target's output into a temp file to be used for further checks.
|
| 75 |
+
$FUZZER -seed=1337 -runs=$MIN_NUMBER_OF_RUNS &>$FUZZER_OUTPUT
|
| 76 |
+
CHECK_FAILED=$(egrep "ERROR: no interesting inputs were found. Is the code instrumented" -c $FUZZER_OUTPUT)
|
| 77 |
+
if (( $CHECK_FAILED > 0 )); then
|
| 78 |
+
echo "BAD BUILD: $FUZZER does not seem to have coverage instrumentation."
|
| 79 |
+
cat $FUZZER_OUTPUT
|
| 80 |
+
# Bail out as the further check does not make any sense, there are 0 PCs.
|
| 81 |
+
return 1
|
| 82 |
+
fi
|
| 83 |
+
|
| 84 |
+
local NUMBER_OF_EDGES=$(grep -Po "INFO: Loaded [[:digit:]]+ module.*\(.*(counters|guards)\):[[:space:]]+\K[[:digit:]]+" $FUZZER_OUTPUT)
|
| 85 |
+
|
| 86 |
+
# If a fuzz target fails to start, grep won't find anything, so bail out early to let check_startup_crash deal with it.
|
| 87 |
+
[[ -z "$NUMBER_OF_EDGES" ]] && return
|
| 88 |
+
|
| 89 |
+
if (( $NUMBER_OF_EDGES < $THRESHOLD_FOR_NUMBER_OF_EDGES )); then
|
| 90 |
+
echo "BAD BUILD: $FUZZER seems to have only partial coverage instrumentation."
|
| 91 |
+
fi
|
| 92 |
+
elif [[ "$FUZZING_ENGINE" == afl ]]; then
|
| 93 |
+
AFL_FORKSRV_INIT_TMOUT=30000 AFL_NO_UI=1 SKIP_SEED_CORPUS=1 timeout --preserve-status -s INT 35s run_fuzzer $FUZZER_NAME &>$FUZZER_OUTPUT
|
| 94 |
+
CHECK_PASSED=$(egrep "All set and ready to roll" -c $FUZZER_OUTPUT)
|
| 95 |
+
if (( $CHECK_PASSED == 0 )); then
|
| 96 |
+
echo "BAD BUILD: fuzzing $FUZZER with afl-fuzz failed."
|
| 97 |
+
cat $FUZZER_OUTPUT
|
| 98 |
+
return 1
|
| 99 |
+
fi
|
| 100 |
+
elif [[ "$FUZZING_ENGINE" == honggfuzz ]]; then
|
| 101 |
+
SKIP_SEED_CORPUS=1 timeout --preserve-status -s INT 20s run_fuzzer $FUZZER_NAME &>$FUZZER_OUTPUT
|
| 102 |
+
CHECK_PASSED=$(egrep "^Sz:[0-9]+ Tm:[0-9]+" -c $FUZZER_OUTPUT)
|
| 103 |
+
if (( $CHECK_PASSED == 0 )); then
|
| 104 |
+
echo "BAD BUILD: fuzzing $FUZZER with honggfuzz failed."
|
| 105 |
+
cat $FUZZER_OUTPUT
|
| 106 |
+
return 1
|
| 107 |
+
fi
|
| 108 |
+
elif [[ "$FUZZING_ENGINE" == dataflow ]]; then
|
| 109 |
+
$FUZZER &> $FUZZER_OUTPUT
|
| 110 |
+
local NUMBER_OF_FUNCTIONS=$(grep -Po "INFO:\s+\K[[:digit:]]+(?=\s+instrumented function.*)" $FUZZER_OUTPUT)
|
| 111 |
+
[[ -z "$NUMBER_OF_FUNCTIONS" ]] && NUMBER_OF_FUNCTIONS=0
|
| 112 |
+
if (( $NUMBER_OF_FUNCTIONS < $THRESHOLD_FOR_NUMBER_OF_FUNCTIONS )); then
|
| 113 |
+
echo "BAD BUILD: $FUZZER does not seem to be properly built in 'dataflow' config."
|
| 114 |
+
cat $FUZZER_OUTPUT
|
| 115 |
+
return 1
|
| 116 |
+
fi
|
| 117 |
+
elif [[ "$FUZZING_ENGINE" == centipede \
|
| 118 |
+
&& ("${HELPER:-}" == True || "$SANITIZER" == none ) ]]; then
|
| 119 |
+
# Performs run test on unsanitized binaries with auxiliary sanitized
|
| 120 |
+
# binaries if they are built with helper.py.
|
| 121 |
+
# Performs run test on unsanitized binaries without auxiliary sanitized
|
| 122 |
+
# binaries if they are from trial build and production build.
|
| 123 |
+
# TODO(Dongge): Support run test with sanitized binaries for trial and
|
| 124 |
+
# production build.
|
| 125 |
+
SKIP_SEED_CORPUS=1 timeout --preserve-status -s INT 20s run_fuzzer $FUZZER_NAME &>$FUZZER_OUTPUT
|
| 126 |
+
CHECK_PASSED=$(egrep "\[S0.0] begin-fuzz: ft: 0 corp: 0/0" -c $FUZZER_OUTPUT)
|
| 127 |
+
if (( $CHECK_PASSED == 0 )); then
|
| 128 |
+
echo "BAD BUILD: fuzzing $FUZZER with centipede failed."
|
| 129 |
+
cat $FUZZER_OUTPUT
|
| 130 |
+
return 1
|
| 131 |
+
fi
|
| 132 |
+
fi
|
| 133 |
+
|
| 134 |
+
return 0
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
# Verify that the given fuzz target has been built properly and works.
|
| 138 |
+
function check_startup_crash {
|
| 139 |
+
local FUZZER=$1
|
| 140 |
+
local FUZZER_NAME=$(basename $FUZZER)
|
| 141 |
+
local FUZZER_OUTPUT="/tmp/$FUZZER_NAME.output"
|
| 142 |
+
local CHECK_PASSED=0
|
| 143 |
+
|
| 144 |
+
if [[ "$FUZZING_ENGINE" = libfuzzer ]]; then
|
| 145 |
+
# Skip seed corpus as there is another explicit check that uses seed corpora.
|
| 146 |
+
SKIP_SEED_CORPUS=1 run_fuzzer $FUZZER_NAME -seed=1337 -runs=$MIN_NUMBER_OF_RUNS &>$FUZZER_OUTPUT
|
| 147 |
+
CHECK_PASSED=$(egrep "Done $MIN_NUMBER_OF_RUNS runs" -c $FUZZER_OUTPUT)
|
| 148 |
+
elif [[ "$FUZZING_ENGINE" = afl ]]; then
|
| 149 |
+
AFL_FORKSRV_INIT_TMOUT=30000 AFL_NO_UI=1 SKIP_SEED_CORPUS=1 timeout --preserve-status -s INT 35s run_fuzzer $FUZZER_NAME &>$FUZZER_OUTPUT
|
| 150 |
+
if [ $(egrep "target binary (crashed|terminated)" -c $FUZZER_OUTPUT) -eq 0 ]; then
|
| 151 |
+
CHECK_PASSED=1
|
| 152 |
+
fi
|
| 153 |
+
elif [[ "$FUZZING_ENGINE" = dataflow ]]; then
|
| 154 |
+
# TODO(https://github.com/google/oss-fuzz/issues/1632): add check for
|
| 155 |
+
# binaries compiled with dataflow engine when the interface becomes stable.
|
| 156 |
+
CHECK_PASSED=1
|
| 157 |
+
else
|
| 158 |
+
# TODO: add checks for another fuzzing engines if possible.
|
| 159 |
+
CHECK_PASSED=1
|
| 160 |
+
fi
|
| 161 |
+
|
| 162 |
+
if [ "$CHECK_PASSED" -eq "0" ]; then
|
| 163 |
+
echo "BAD BUILD: $FUZZER seems to have either startup crash or exit:"
|
| 164 |
+
cat $FUZZER_OUTPUT
|
| 165 |
+
return 1
|
| 166 |
+
fi
|
| 167 |
+
|
| 168 |
+
return 0
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
# Mixed sanitizers check for ASan build.
|
| 172 |
+
function check_asan_build {
|
| 173 |
+
local FUZZER=$1
|
| 174 |
+
local ASAN_CALLS=$2
|
| 175 |
+
local DFSAN_CALLS=$3
|
| 176 |
+
local MSAN_CALLS=$4
|
| 177 |
+
local UBSAN_CALLS=$5
|
| 178 |
+
|
| 179 |
+
# Perform all the checks for more detailed error message.
|
| 180 |
+
if (( $ASAN_CALLS < $ASAN_CALLS_THRESHOLD_FOR_ASAN_BUILD )); then
|
| 181 |
+
echo "BAD BUILD: $FUZZER does not seem to be compiled with ASan."
|
| 182 |
+
return 1
|
| 183 |
+
fi
|
| 184 |
+
|
| 185 |
+
if (( $DFSAN_CALLS > $DFSAN_CALLS_THRESHOLD_FOR_NON_DFSAN_BUILD )); then
|
| 186 |
+
echo "BAD BUILD: ASan build of $FUZZER seems to be compiled with DFSan."
|
| 187 |
+
return 1
|
| 188 |
+
fi
|
| 189 |
+
|
| 190 |
+
if (( $MSAN_CALLS > $MSAN_CALLS_THRESHOLD_FOR_NON_MSAN_BUILD )); then
|
| 191 |
+
echo "BAD BUILD: ASan build of $FUZZER seems to be compiled with MSan."
|
| 192 |
+
return 1
|
| 193 |
+
fi
|
| 194 |
+
|
| 195 |
+
if (( $UBSAN_CALLS > $UBSAN_CALLS_THRESHOLD_FOR_NON_UBSAN_BUILD )); then
|
| 196 |
+
echo "BAD BUILD: ASan build of $FUZZER seems to be compiled with UBSan."
|
| 197 |
+
return 1
|
| 198 |
+
fi
|
| 199 |
+
|
| 200 |
+
return 0
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
# Mixed sanitizers check for DFSan build.
|
| 204 |
+
function check_dfsan_build {
|
| 205 |
+
local FUZZER=$1
|
| 206 |
+
local ASAN_CALLS=$2
|
| 207 |
+
local DFSAN_CALLS=$3
|
| 208 |
+
local MSAN_CALLS=$4
|
| 209 |
+
local UBSAN_CALLS=$5
|
| 210 |
+
|
| 211 |
+
# Perform all the checks for more detailed error message.
|
| 212 |
+
if (( $ASAN_CALLS > $ASAN_CALLS_THRESHOLD_FOR_NON_ASAN_BUILD )); then
|
| 213 |
+
echo "BAD BUILD: DFSan build of $FUZZER seems to be compiled with ASan."
|
| 214 |
+
return 1
|
| 215 |
+
fi
|
| 216 |
+
|
| 217 |
+
if (( $DFSAN_CALLS < $DFSAN_CALLS_THRESHOLD_FOR_DFSAN_BUILD )); then
|
| 218 |
+
echo "BAD BUILD: $FUZZER does not seem to be compiled with DFSan."
|
| 219 |
+
return 1
|
| 220 |
+
fi
|
| 221 |
+
|
| 222 |
+
if (( $MSAN_CALLS > $MSAN_CALLS_THRESHOLD_FOR_NON_MSAN_BUILD )); then
|
| 223 |
+
echo "BAD BUILD: ASan build of $FUZZER seems to be compiled with MSan."
|
| 224 |
+
return 1
|
| 225 |
+
fi
|
| 226 |
+
|
| 227 |
+
if (( $UBSAN_CALLS > $UBSAN_CALLS_THRESHOLD_FOR_NON_UBSAN_BUILD )); then
|
| 228 |
+
echo "BAD BUILD: ASan build of $FUZZER seems to be compiled with UBSan."
|
| 229 |
+
return 1
|
| 230 |
+
fi
|
| 231 |
+
|
| 232 |
+
return 0
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
# Mixed sanitizers check for MSan build.
|
| 237 |
+
function check_msan_build {
|
| 238 |
+
local FUZZER=$1
|
| 239 |
+
local ASAN_CALLS=$2
|
| 240 |
+
local DFSAN_CALLS=$3
|
| 241 |
+
local MSAN_CALLS=$4
|
| 242 |
+
local UBSAN_CALLS=$5
|
| 243 |
+
|
| 244 |
+
# Perform all the checks for more detailed error message.
|
| 245 |
+
if (( $ASAN_CALLS > $ASAN_CALLS_THRESHOLD_FOR_NON_ASAN_BUILD )); then
|
| 246 |
+
echo "BAD BUILD: MSan build of $FUZZER seems to be compiled with ASan."
|
| 247 |
+
return 1
|
| 248 |
+
fi
|
| 249 |
+
|
| 250 |
+
if (( $DFSAN_CALLS > $DFSAN_CALLS_THRESHOLD_FOR_NON_DFSAN_BUILD )); then
|
| 251 |
+
echo "BAD BUILD: MSan build of $FUZZER seems to be compiled with DFSan."
|
| 252 |
+
return 1
|
| 253 |
+
fi
|
| 254 |
+
|
| 255 |
+
if (( $MSAN_CALLS < $MSAN_CALLS_THRESHOLD_FOR_MSAN_BUILD )); then
|
| 256 |
+
echo "BAD BUILD: $FUZZER does not seem to be compiled with MSan."
|
| 257 |
+
return 1
|
| 258 |
+
fi
|
| 259 |
+
|
| 260 |
+
if (( $UBSAN_CALLS > $UBSAN_CALLS_THRESHOLD_FOR_NON_UBSAN_BUILD )); then
|
| 261 |
+
echo "BAD BUILD: MSan build of $FUZZER seems to be compiled with UBSan."
|
| 262 |
+
return 1
|
| 263 |
+
fi
|
| 264 |
+
|
| 265 |
+
return 0
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
# Mixed sanitizers check for UBSan build.
|
| 269 |
+
function check_ubsan_build {
|
| 270 |
+
local FUZZER=$1
|
| 271 |
+
local ASAN_CALLS=$2
|
| 272 |
+
local DFSAN_CALLS=$3
|
| 273 |
+
local MSAN_CALLS=$4
|
| 274 |
+
local UBSAN_CALLS=$5
|
| 275 |
+
|
| 276 |
+
if [[ "$FUZZING_ENGINE" != libfuzzer ]]; then
|
| 277 |
+
# Ignore UBSan checks for fuzzing engines other than libFuzzer because:
|
| 278 |
+
# A) we (probably) are not going to use those with UBSan
|
| 279 |
+
# B) such builds show indistinguishable number of calls to UBSan
|
| 280 |
+
return 0
|
| 281 |
+
fi
|
| 282 |
+
|
| 283 |
+
# Perform all the checks for more detailed error message.
|
| 284 |
+
if (( $ASAN_CALLS > $ASAN_CALLS_THRESHOLD_FOR_NON_ASAN_BUILD )); then
|
| 285 |
+
echo "BAD BUILD: UBSan build of $FUZZER seems to be compiled with ASan."
|
| 286 |
+
return 1
|
| 287 |
+
fi
|
| 288 |
+
|
| 289 |
+
if (( $DFSAN_CALLS > $DFSAN_CALLS_THRESHOLD_FOR_NON_DFSAN_BUILD )); then
|
| 290 |
+
echo "BAD BUILD: UBSan build of $FUZZER seems to be compiled with DFSan."
|
| 291 |
+
return 1
|
| 292 |
+
fi
|
| 293 |
+
|
| 294 |
+
if (( $MSAN_CALLS > $MSAN_CALLS_THRESHOLD_FOR_NON_MSAN_BUILD )); then
|
| 295 |
+
echo "BAD BUILD: UBSan build of $FUZZER seems to be compiled with MSan."
|
| 296 |
+
return 1
|
| 297 |
+
fi
|
| 298 |
+
|
| 299 |
+
if (( $UBSAN_CALLS < $UBSAN_CALLS_THRESHOLD_FOR_UBSAN_BUILD )); then
|
| 300 |
+
echo "BAD BUILD: $FUZZER does not seem to be compiled with UBSan."
|
| 301 |
+
return 1
|
| 302 |
+
fi
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
# Verify that the given fuzz target is compiled with correct sanitizer.
|
| 306 |
+
function check_mixed_sanitizers {
|
| 307 |
+
local FUZZER=$1
|
| 308 |
+
local result=0
|
| 309 |
+
local CALL_INSN=
|
| 310 |
+
|
| 311 |
+
if [ "${FUZZING_LANGUAGE:-}" = "jvm" ]; then
|
| 312 |
+
# Sanitizer runtime is linked into the Jazzer driver, so this check does not
|
| 313 |
+
# apply.
|
| 314 |
+
return 0
|
| 315 |
+
fi
|
| 316 |
+
|
| 317 |
+
if [ "${FUZZING_LANGUAGE:-}" = "javascript" ]; then
|
| 318 |
+
# Jazzer.js currently does not support using sanitizers with native Node.js addons.
|
| 319 |
+
# This is not relevant anyways since supporting this will be done by preloading
|
| 320 |
+
# the sanitizers in the wrapper script starting Jazzer.js.
|
| 321 |
+
return 0
|
| 322 |
+
fi
|
| 323 |
+
|
| 324 |
+
if [ "${FUZZING_LANGUAGE:-}" = "python" ]; then
|
| 325 |
+
# Sanitizer runtime is loaded via LD_PRELOAD, so this check does not apply.
|
| 326 |
+
return 0
|
| 327 |
+
fi
|
| 328 |
+
|
| 329 |
+
# For fuzztest fuzzers point to the binary instead of launcher script.
|
| 330 |
+
if [[ $FUZZER == *"@"* ]]; then
|
| 331 |
+
FUZZER=(${FUZZER//@/ }[0])
|
| 332 |
+
fi
|
| 333 |
+
|
| 334 |
+
CALL_INSN=
|
| 335 |
+
if [[ $ARCHITECTURE == "x86_64" ]]
|
| 336 |
+
then
|
| 337 |
+
CALL_INSN="callq?\s+[0-9a-f]+\s+<"
|
| 338 |
+
elif [[ $ARCHITECTURE == "i386" ]]
|
| 339 |
+
then
|
| 340 |
+
CALL_INSN="call\s+[0-9a-f]+\s+<"
|
| 341 |
+
elif [[ $ARCHITECTURE == "aarch64" ]]
|
| 342 |
+
then
|
| 343 |
+
CALL_INSN="bl\s+[0-9a-f]+\s+<"
|
| 344 |
+
else
|
| 345 |
+
echo "UNSUPPORTED ARCHITECTURE"
|
| 346 |
+
exit 1
|
| 347 |
+
fi
|
| 348 |
+
local ASAN_CALLS=$(objdump -dC $FUZZER | egrep "${CALL_INSN}__asan" -c)
|
| 349 |
+
local DFSAN_CALLS=$(objdump -dC $FUZZER | egrep "${CALL_INSN}__dfsan" -c)
|
| 350 |
+
local MSAN_CALLS=$(objdump -dC $FUZZER | egrep "${CALL_INSN}__msan" -c)
|
| 351 |
+
local UBSAN_CALLS=$(objdump -dC $FUZZER | egrep "${CALL_INSN}__ubsan" -c)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
if [[ "$SANITIZER" = address ]]; then
|
| 355 |
+
check_asan_build $FUZZER $ASAN_CALLS $DFSAN_CALLS $MSAN_CALLS $UBSAN_CALLS
|
| 356 |
+
result=$?
|
| 357 |
+
elif [[ "$SANITIZER" = dataflow ]]; then
|
| 358 |
+
check_dfsan_build $FUZZER $ASAN_CALLS $DFSAN_CALLS $MSAN_CALLS $UBSAN_CALLS
|
| 359 |
+
result=$?
|
| 360 |
+
elif [[ "$SANITIZER" = memory ]]; then
|
| 361 |
+
check_msan_build $FUZZER $ASAN_CALLS $DFSAN_CALLS $MSAN_CALLS $UBSAN_CALLS
|
| 362 |
+
result=$?
|
| 363 |
+
elif [[ "$SANITIZER" = undefined ]]; then
|
| 364 |
+
check_ubsan_build $FUZZER $ASAN_CALLS $DFSAN_CALLS $MSAN_CALLS $UBSAN_CALLS
|
| 365 |
+
result=$?
|
| 366 |
+
elif [[ "$SANITIZER" = thread ]]; then
|
| 367 |
+
# TODO(metzman): Implement this.
|
| 368 |
+
result=0
|
| 369 |
+
fi
|
| 370 |
+
|
| 371 |
+
return $result
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
# Verify that the given fuzz target doesn't crash on the seed corpus.
|
| 375 |
+
function check_seed_corpus {
|
| 376 |
+
local FUZZER=$1
|
| 377 |
+
local FUZZER_NAME="$(basename $FUZZER)"
|
| 378 |
+
local FUZZER_OUTPUT="/tmp/$FUZZER_NAME.output"
|
| 379 |
+
|
| 380 |
+
if [[ "$FUZZING_ENGINE" != libfuzzer ]]; then
|
| 381 |
+
return 0
|
| 382 |
+
fi
|
| 383 |
+
|
| 384 |
+
# Set up common fuzzing arguments, otherwise "run_fuzzer" errors out.
|
| 385 |
+
if [ -z "$FUZZER_ARGS" ]; then
|
| 386 |
+
export FUZZER_ARGS="-rss_limit_mb=2560 -timeout=25"
|
| 387 |
+
fi
|
| 388 |
+
|
| 389 |
+
bash -c "run_fuzzer $FUZZER_NAME -runs=0" &> $FUZZER_OUTPUT
|
| 390 |
+
|
| 391 |
+
# Don't output anything if fuzz target hasn't crashed.
|
| 392 |
+
if [ $? -ne 0 ]; then
|
| 393 |
+
echo "BAD BUILD: $FUZZER has a crashing input in its seed corpus:"
|
| 394 |
+
cat $FUZZER_OUTPUT
|
| 395 |
+
return 1
|
| 396 |
+
fi
|
| 397 |
+
|
| 398 |
+
return 0
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
function check_architecture {
|
| 402 |
+
local FUZZER=$1
|
| 403 |
+
local FUZZER_NAME=$(basename $FUZZER)
|
| 404 |
+
|
| 405 |
+
if [ "${FUZZING_LANGUAGE:-}" = "jvm" ]; then
|
| 406 |
+
# The native dependencies of a JVM project are not packaged, but loaded
|
| 407 |
+
# dynamically at runtime and thus cannot be checked here.
|
| 408 |
+
return 0;
|
| 409 |
+
fi
|
| 410 |
+
|
| 411 |
+
if [ "${FUZZING_LANGUAGE:-}" = "javascript" ]; then
|
| 412 |
+
# Jazzer.js fuzzers are wrapper scripts that start the fuzz target with
|
| 413 |
+
# the Jazzer.js CLI.
|
| 414 |
+
return 0;
|
| 415 |
+
fi
|
| 416 |
+
|
| 417 |
+
if [ "${FUZZING_LANGUAGE:-}" = "python" ]; then
|
| 418 |
+
FUZZER=${FUZZER}.pkg
|
| 419 |
+
fi
|
| 420 |
+
|
| 421 |
+
# For fuzztest fuzzers point to the binary instead of launcher script.
|
| 422 |
+
if [[ $FUZZER == *"@"* ]]; then
|
| 423 |
+
FUZZER=(${FUZZER//@/ }[0])
|
| 424 |
+
fi
|
| 425 |
+
|
| 426 |
+
FILE_OUTPUT=$(file $FUZZER)
|
| 427 |
+
if [[ $ARCHITECTURE == "x86_64" ]]
|
| 428 |
+
then
|
| 429 |
+
echo $FILE_OUTPUT | grep "x86-64" > /dev/null
|
| 430 |
+
elif [[ $ARCHITECTURE == "i386" ]]
|
| 431 |
+
then
|
| 432 |
+
echo $FILE_OUTPUT | grep "80386" > /dev/null
|
| 433 |
+
elif [[ $ARCHITECTURE == "aarch64" ]]
|
| 434 |
+
then
|
| 435 |
+
echo $FILE_OUTPUT | grep "aarch64" > /dev/null
|
| 436 |
+
else
|
| 437 |
+
echo "UNSUPPORTED ARCHITECTURE"
|
| 438 |
+
return 1
|
| 439 |
+
fi
|
| 440 |
+
result=$?
|
| 441 |
+
if [[ $result != 0 ]]
|
| 442 |
+
then
|
| 443 |
+
echo "BAD BUILD $FUZZER is not built for architecture: $ARCHITECTURE"
|
| 444 |
+
echo "file command output: $FILE_OUTPUT"
|
| 445 |
+
echo "check_mixed_sanitizers test will fail."
|
| 446 |
+
fi
|
| 447 |
+
return $result
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
function main {
|
| 451 |
+
local FUZZER=$1
|
| 452 |
+
local AUXILIARY_FUZZER=${2:-}
|
| 453 |
+
local checks_failed=0
|
| 454 |
+
local result=0
|
| 455 |
+
|
| 456 |
+
export RUN_FUZZER_MODE="batch"
|
| 457 |
+
check_engine $FUZZER
|
| 458 |
+
result=$?
|
| 459 |
+
checks_failed=$(( $checks_failed + $result ))
|
| 460 |
+
|
| 461 |
+
check_architecture $FUZZER
|
| 462 |
+
result=$?
|
| 463 |
+
checks_failed=$(( $checks_failed + $result ))
|
| 464 |
+
|
| 465 |
+
if [[ "$FUZZING_ENGINE" == centipede \
|
| 466 |
+
&& "$SANITIZER" != none && "${HELPER:-}" == True ]]; then
|
| 467 |
+
check_mixed_sanitizers $AUXILIARY_FUZZER
|
| 468 |
+
else
|
| 469 |
+
check_mixed_sanitizers $FUZZER
|
| 470 |
+
fi
|
| 471 |
+
result=$?
|
| 472 |
+
checks_failed=$(( $checks_failed + $result ))
|
| 473 |
+
|
| 474 |
+
check_startup_crash $FUZZER
|
| 475 |
+
result=$?
|
| 476 |
+
checks_failed=$(( $checks_failed + $result ))
|
| 477 |
+
|
| 478 |
+
# TODO: re-enable after introducing bug auto-filing for bad builds.
|
| 479 |
+
# check_seed_corpus $FUZZER
|
| 480 |
+
return $checks_failed
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
if [ $# -ne 1 -a $# -ne 2 ]; then
|
| 485 |
+
echo "Usage: $0 <fuzz_target_binary> [<auxiliary_binary>]"
|
| 486 |
+
exit 1
|
| 487 |
+
fi
|
| 488 |
+
|
| 489 |
+
# Fuzz target path.
|
| 490 |
+
FUZZER=$1
|
| 491 |
+
AUXILIARY_FUZZER=${2:-}
|
| 492 |
+
|
| 493 |
+
main $FUZZER $AUXILIARY_FUZZER
|
| 494 |
+
exit $?
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/coverage
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -u
|
| 2 |
+
# Copyright 2018 Google Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
cd $OUT
|
| 18 |
+
|
| 19 |
+
if (( $# > 0 )); then
|
| 20 |
+
FUZZ_TARGETS="$@"
|
| 21 |
+
else
|
| 22 |
+
FUZZ_TARGETS="$(find . -maxdepth 1 -type f -executable -printf '%P\n' | \
|
| 23 |
+
grep -v -x -F \
|
| 24 |
+
-e 'llvm-symbolizer' \
|
| 25 |
+
-e 'jazzer_agent_deploy.jar' \
|
| 26 |
+
-e 'jazzer_driver' \
|
| 27 |
+
-e 'jazzer_driver_with_sanitizer' \
|
| 28 |
+
-e 'sanitizer_with_fuzzer.so')"
|
| 29 |
+
fi
|
| 30 |
+
|
| 31 |
+
COVERAGE_OUTPUT_DIR=${COVERAGE_OUTPUT_DIR:-$OUT}
|
| 32 |
+
|
| 33 |
+
DUMPS_DIR="$COVERAGE_OUTPUT_DIR/dumps"
|
| 34 |
+
FUZZERS_COVERAGE_DUMPS_DIR="$DUMPS_DIR/fuzzers_coverage"
|
| 35 |
+
MERGED_COVERAGE_DIR="$COVERAGE_OUTPUT_DIR/merged_coverage"
|
| 36 |
+
FUZZER_STATS_DIR="$COVERAGE_OUTPUT_DIR/fuzzer_stats"
|
| 37 |
+
TEXTCOV_REPORT_DIR="$COVERAGE_OUTPUT_DIR/textcov_reports"
|
| 38 |
+
LOGS_DIR="$COVERAGE_OUTPUT_DIR/logs"
|
| 39 |
+
REPORT_ROOT_DIR="$COVERAGE_OUTPUT_DIR/report"
|
| 40 |
+
REPORT_BY_TARGET_ROOT_DIR="$COVERAGE_OUTPUT_DIR/report_target"
|
| 41 |
+
PLATFORM=linux
|
| 42 |
+
REPORT_PLATFORM_DIR="$COVERAGE_OUTPUT_DIR/report/$PLATFORM"
|
| 43 |
+
|
| 44 |
+
for directory in $DUMPS_DIR $FUZZER_STATS_DIR $LOGS_DIR $REPORT_ROOT_DIR $TEXTCOV_REPORT_DIR\
|
| 45 |
+
$REPORT_PLATFORM_DIR $REPORT_BY_TARGET_ROOT_DIR $FUZZERS_COVERAGE_DUMPS_DIR $MERGED_COVERAGE_DIR; do
|
| 46 |
+
rm -rf $directory
|
| 47 |
+
mkdir -p $directory
|
| 48 |
+
done
|
| 49 |
+
|
| 50 |
+
PROFILE_FILE="$DUMPS_DIR/merged.profdata"
|
| 51 |
+
SUMMARY_FILE="$REPORT_PLATFORM_DIR/summary.json"
|
| 52 |
+
COVERAGE_TARGET_FILE="$FUZZER_STATS_DIR/coverage_targets.txt"
|
| 53 |
+
|
| 54 |
+
# Use path mapping, as $SRC directory from the builder is copied into $OUT/$SRC.
|
| 55 |
+
PATH_EQUIVALENCE_ARGS="-path-equivalence=/,$OUT"
|
| 56 |
+
|
| 57 |
+
# It's important to use $COVERAGE_EXTRA_ARGS as the last argument, because it
|
| 58 |
+
# can contain paths to source files / directories which are positional args.
|
| 59 |
+
LLVM_COV_COMMON_ARGS="$PATH_EQUIVALENCE_ARGS \
|
| 60 |
+
-ignore-filename-regex=.*src/libfuzzer/.* $COVERAGE_EXTRA_ARGS"
|
| 61 |
+
|
| 62 |
+
# Options to extract branch coverage.
|
| 63 |
+
BRANCH_COV_ARGS="--show-branches=count --show-expansions"
|
| 64 |
+
|
| 65 |
+
# Timeout for running a single fuzz target.
|
| 66 |
+
TIMEOUT=1h
|
| 67 |
+
|
| 68 |
+
# This will be used by llvm-cov command to generate the actual report.
|
| 69 |
+
objects=""
|
| 70 |
+
|
| 71 |
+
# Number of CPUs available, this is needed for running tests in parallel.
|
| 72 |
+
# Set the max number of parallel jobs to be the CPU count and a max of 10.
|
| 73 |
+
NPROC=$(nproc)
|
| 74 |
+
MAX_PARALLEL_COUNT=10
|
| 75 |
+
|
| 76 |
+
CORPUS_DIR=${CORPUS_DIR:-"/corpus"}
|
| 77 |
+
|
| 78 |
+
function run_fuzz_target {
|
| 79 |
+
local target=$1
|
| 80 |
+
|
| 81 |
+
# '%1m' will produce separate dump files for every object. For example, if a
|
| 82 |
+
# fuzz target loads a shared library, we will have dumps for both of them.
|
| 83 |
+
local profraw_file="$DUMPS_DIR/$target.%1m.profraw"
|
| 84 |
+
local profraw_file_mask="$DUMPS_DIR/$target.*.profraw"
|
| 85 |
+
local profdata_file="$DUMPS_DIR/$target.profdata"
|
| 86 |
+
local corpus_real="$CORPUS_DIR/${target}"
|
| 87 |
+
|
| 88 |
+
# -merge=1 requires an output directory, create a new, empty dir for that.
|
| 89 |
+
local corpus_dummy="$OUT/dummy_corpus_dir_for_${target}"
|
| 90 |
+
rm -rf $corpus_dummy && mkdir -p $corpus_dummy
|
| 91 |
+
|
| 92 |
+
# Use -merge=1 instead of -runs=0 because merge is crash resistant and would
|
| 93 |
+
# let to get coverage using all corpus files even if there are crash inputs.
|
| 94 |
+
# Merge should not introduce any significant overhead compared to -runs=0,
|
| 95 |
+
# because (A) corpuses are already minimized; (B) we do not use sancov, and so
|
| 96 |
+
# libFuzzer always finishes merge with an empty output dir.
|
| 97 |
+
# Use 100s timeout instead of 25s as code coverage builds can be very slow.
|
| 98 |
+
local args="-merge=1 -timeout=100 $corpus_dummy $corpus_real"
|
| 99 |
+
|
| 100 |
+
export LLVM_PROFILE_FILE=$profraw_file
|
| 101 |
+
timeout $TIMEOUT $OUT/$target $args &> $LOGS_DIR/$target.log
|
| 102 |
+
if (( $? != 0 )); then
|
| 103 |
+
echo "Error occured while running $target:"
|
| 104 |
+
cat $LOGS_DIR/$target.log
|
| 105 |
+
fi
|
| 106 |
+
|
| 107 |
+
rm -rf $corpus_dummy
|
| 108 |
+
if (( $(du -c $profraw_file_mask | tail -n 1 | cut -f 1) == 0 )); then
|
| 109 |
+
# Skip fuzz targets that failed to produce profile dumps.
|
| 110 |
+
return 0
|
| 111 |
+
fi
|
| 112 |
+
|
| 113 |
+
# If necessary translate to latest profraw version.
|
| 114 |
+
if [[ $target == *"@"* ]]; then
|
| 115 |
+
# Extract fuzztest binary name from fuzztest wrapper script.
|
| 116 |
+
target=(${target//@/ }[0])
|
| 117 |
+
fi
|
| 118 |
+
profraw_update.py $OUT/$target -i $profraw_file_mask
|
| 119 |
+
llvm-profdata merge -j=1 -sparse $profraw_file_mask -o $profdata_file
|
| 120 |
+
|
| 121 |
+
# Delete unnecessary and (potentially) large .profraw files.
|
| 122 |
+
rm $profraw_file_mask
|
| 123 |
+
|
| 124 |
+
shared_libraries=$(coverage_helper shared_libs -build-dir=$OUT -object=$target)
|
| 125 |
+
|
| 126 |
+
llvm-cov export -summary-only -instr-profile=$profdata_file -object=$target \
|
| 127 |
+
$shared_libraries $LLVM_COV_COMMON_ARGS > $FUZZER_STATS_DIR/$target.json
|
| 128 |
+
|
| 129 |
+
# For introspector.
|
| 130 |
+
llvm-cov show -instr-profile=$profdata_file -object=$target -line-coverage-gt=0 $shared_libraries $BRANCH_COV_ARGS $LLVM_COV_COMMON_ARGS > ${TEXTCOV_REPORT_DIR}/$target.covreport
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
function run_go_fuzz_target {
|
| 134 |
+
local target=$1
|
| 135 |
+
|
| 136 |
+
echo "Running go target $target"
|
| 137 |
+
export FUZZ_CORPUS_DIR="$CORPUS_DIR/${target}/"
|
| 138 |
+
export FUZZ_PROFILE_NAME="$DUMPS_DIR/$target.perf"
|
| 139 |
+
|
| 140 |
+
# setup for native go fuzzers
|
| 141 |
+
cd $OUT
|
| 142 |
+
mkdir -p "testdata/fuzz/${target}"
|
| 143 |
+
cp -r "${FUZZ_CORPUS_DIR}" "testdata/fuzz/"
|
| 144 |
+
|
| 145 |
+
# rewrite libFuzzer corpus to Std Go corpus if native fuzzing
|
| 146 |
+
grep "TestFuzzCorpus" $target > /dev/null 2>&1 && $SYSGOPATH/bin/convertcorpus $target "testdata/fuzz/${target}"
|
| 147 |
+
cd -
|
| 148 |
+
|
| 149 |
+
timeout $TIMEOUT $OUT/$target -test.coverprofile $DUMPS_DIR/$target.profdata &> $LOGS_DIR/$target.log
|
| 150 |
+
if (( $? != 0 )); then
|
| 151 |
+
echo "Error occured while running $target:"
|
| 152 |
+
cat $LOGS_DIR/$target.log
|
| 153 |
+
fi
|
| 154 |
+
|
| 155 |
+
# cleanup after native go fuzzers
|
| 156 |
+
rm -r "${OUT}/testdata/fuzz/${target}"
|
| 157 |
+
|
| 158 |
+
# The Go 1.18 fuzzers are renamed to "*_fuzz_.go" during "infra/helper.py build_fuzzers".
|
| 159 |
+
# They are are therefore refered to as "*_fuzz_.go" in the profdata files.
|
| 160 |
+
# Since the copies named "*_fuzz_.go" do not exist in the file tree during
|
| 161 |
+
# the coverage build, we change the references in the .profdata files
|
| 162 |
+
# to the original file names.
|
| 163 |
+
#sed -i "s/_test.go_fuzz_.go/_test.go/g" $DUMPS_DIR/$target.profdata
|
| 164 |
+
# translate from golangish paths to current absolute paths
|
| 165 |
+
cat $OUT/$target.gocovpath | while read i; do sed -i $i $DUMPS_DIR/$target.profdata; done
|
| 166 |
+
# cf PATH_EQUIVALENCE_ARGS
|
| 167 |
+
sed -i 's=/='$OUT'/=' $DUMPS_DIR/$target.profdata
|
| 168 |
+
$SYSGOPATH/bin/gocovsum $DUMPS_DIR/$target.profdata > $FUZZER_STATS_DIR/$target.json
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
function run_python_fuzz_target {
|
| 172 |
+
local target=$1
|
| 173 |
+
local zipped_sources="$DUMPS_DIR/$target.deps.zip"
|
| 174 |
+
local corpus_real="$CORPUS_DIR/${target}"
|
| 175 |
+
# Write dummy stats file
|
| 176 |
+
echo "{}" > "$FUZZER_STATS_DIR/$target.json"
|
| 177 |
+
|
| 178 |
+
# Run fuzzer
|
| 179 |
+
$OUT/$target $corpus_real -atheris_runs=$(ls -la $corpus_real | wc -l) > $LOGS_DIR/$target.log 2>&1
|
| 180 |
+
if (( $? != 0 )); then
|
| 181 |
+
echo "Error happened getting coverage of $target"
|
| 182 |
+
echo "This is likely because Atheris did not exit gracefully"
|
| 183 |
+
cat $LOGS_DIR/$target.log
|
| 184 |
+
return 0
|
| 185 |
+
fi
|
| 186 |
+
mv .coverage $OUT/.coverage_$target
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
function run_java_fuzz_target {
|
| 190 |
+
local target=$1
|
| 191 |
+
|
| 192 |
+
local exec_file="$DUMPS_DIR/$target.exec"
|
| 193 |
+
local class_dump_dir="$DUMPS_DIR/${target}_classes/"
|
| 194 |
+
mkdir "$class_dump_dir"
|
| 195 |
+
local corpus_real="$CORPUS_DIR/${target}"
|
| 196 |
+
|
| 197 |
+
# -merge=1 requires an output directory, create a new, empty dir for that.
|
| 198 |
+
local corpus_dummy="$OUT/dummy_corpus_dir_for_${target}"
|
| 199 |
+
rm -rf $corpus_dummy && mkdir -p $corpus_dummy
|
| 200 |
+
|
| 201 |
+
# Use 100s timeout instead of 25s as code coverage builds can be very slow.
|
| 202 |
+
local jacoco_args="destfile=$exec_file,classdumpdir=$class_dump_dir,excludes=com.code_intelligence.jazzer.*\\:com.sun.tools.attach.VirtualMachine"
|
| 203 |
+
local args="-merge=1 -timeout=100 --nohooks \
|
| 204 |
+
--additional_jvm_args=-javaagent\\:/opt/jacoco-agent.jar=$jacoco_args \
|
| 205 |
+
$corpus_dummy $corpus_real"
|
| 206 |
+
|
| 207 |
+
timeout $TIMEOUT $OUT/$target $args &> $LOGS_DIR/$target.log
|
| 208 |
+
if (( $? != 0 )); then
|
| 209 |
+
echo "Error occured while running $target:"
|
| 210 |
+
cat $LOGS_DIR/$target.log
|
| 211 |
+
fi
|
| 212 |
+
|
| 213 |
+
if (( $(du -c $exec_file | tail -n 1 | cut -f 1) == 0 )); then
|
| 214 |
+
# Skip fuzz targets that failed to produce .exec files.
|
| 215 |
+
echo "$target failed to produce .exec file."
|
| 216 |
+
return 0
|
| 217 |
+
fi
|
| 218 |
+
|
| 219 |
+
# Generate XML report only as input to jacoco_report_converter.
|
| 220 |
+
# Source files are not needed for the summary.
|
| 221 |
+
local xml_report="$DUMPS_DIR/${target}.xml"
|
| 222 |
+
local summary_file="$FUZZER_STATS_DIR/$target.json"
|
| 223 |
+
java -jar /opt/jacoco-cli.jar report $exec_file \
|
| 224 |
+
--xml $xml_report \
|
| 225 |
+
--classfiles $class_dump_dir
|
| 226 |
+
|
| 227 |
+
# Write llvm-cov summary file.
|
| 228 |
+
jacoco_report_converter.py $xml_report $summary_file
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
function run_javascript_fuzz_target {
|
| 232 |
+
local target=$1
|
| 233 |
+
local corpus_real="$CORPUS_DIR/${target}"
|
| 234 |
+
|
| 235 |
+
# -merge=1 requires an output directory, create a new, empty dir for that.
|
| 236 |
+
local corpus_dummy="$OUT/dummy_corpus_dir_for_${target}"
|
| 237 |
+
rm -rf $corpus_dummy && mkdir -p $corpus_dummy
|
| 238 |
+
|
| 239 |
+
# IstanbulJS currently does not work when the tested program creates
|
| 240 |
+
# subprocesses. For this reason, we first minimize the corpus removing
|
| 241 |
+
# any crashing inputs so that we can report source-based code coverage
|
| 242 |
+
# with a single sweep over the minimized corpus
|
| 243 |
+
local merge_args="-merge=1 -timeout=100 $corpus_dummy $corpus_real"
|
| 244 |
+
timeout $TIMEOUT $OUT/$target $merge_args &> $LOGS_DIR/$target.log
|
| 245 |
+
|
| 246 |
+
# nyc saves the coverage reports in a directory with the default name "coverage"
|
| 247 |
+
local coverage_dir="$DUMPS_DIR/coverage_dir_for_${target}"
|
| 248 |
+
rm -rf $coverage_dir && mkdir -p $coverage_dir
|
| 249 |
+
|
| 250 |
+
local nyc_json_coverage_file="$coverage_dir/coverage-final.json"
|
| 251 |
+
local nyc_json_summary_file="$coverage_dir/coverage-summary.json"
|
| 252 |
+
|
| 253 |
+
local args="-runs=0 $corpus_dummy"
|
| 254 |
+
local jazzerjs_args="--coverage --coverageDirectory $coverage_dir --coverageReporters json --coverageReporters json-summary"
|
| 255 |
+
|
| 256 |
+
JAZZERJS_EXTRA_ARGS=$jazzerjs_args $OUT/$target $args &> $LOGS_DIR/$target.log
|
| 257 |
+
|
| 258 |
+
if (( $? != 0 )); then
|
| 259 |
+
echo "Error occured while running $target:"
|
| 260 |
+
cat $LOGS_DIR/$target.log
|
| 261 |
+
fi
|
| 262 |
+
|
| 263 |
+
if [ ! -s $nyc_json_coverage_file ]; then
|
| 264 |
+
# Skip fuzz targets that failed to produce coverage-final.json file.
|
| 265 |
+
echo "$target failed to produce coverage-final.json file."
|
| 266 |
+
return 0
|
| 267 |
+
fi
|
| 268 |
+
|
| 269 |
+
cp $nyc_json_coverage_file $FUZZERS_COVERAGE_DUMPS_DIR/$target.json
|
| 270 |
+
|
| 271 |
+
local summary_file="$FUZZER_STATS_DIR/$target.json"
|
| 272 |
+
|
| 273 |
+
nyc_report_converter.py $nyc_json_summary_file $summary_file
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
function generate_html {
|
| 277 |
+
local profdata=$1
|
| 278 |
+
local shared_libraries=$2
|
| 279 |
+
local objects=$3
|
| 280 |
+
local output_dir=$4
|
| 281 |
+
|
| 282 |
+
rm -rf "$output_dir"
|
| 283 |
+
mkdir -p "$output_dir/$PLATFORM"
|
| 284 |
+
|
| 285 |
+
local llvm_cov_args="-instr-profile=$profdata $objects $LLVM_COV_COMMON_ARGS"
|
| 286 |
+
llvm-cov show -format=html -output-dir=$output_dir -Xdemangler rcfilt $llvm_cov_args
|
| 287 |
+
|
| 288 |
+
# Export coverage summary in JSON format.
|
| 289 |
+
local summary_file=$output_dir/$PLATFORM/summary.json
|
| 290 |
+
|
| 291 |
+
llvm-cov export -summary-only $llvm_cov_args > $summary_file
|
| 292 |
+
|
| 293 |
+
coverage_helper -v post_process -src-root-dir=/ -summary-file=$summary_file \
|
| 294 |
+
-output-dir=$output_dir $PATH_EQUIVALENCE_ARGS
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
export SYSGOPATH=$GOPATH
|
| 298 |
+
export GOPATH=$OUT/$GOPATH
|
| 299 |
+
# Run each fuzz target, generate raw coverage dumps.
|
| 300 |
+
for fuzz_target in $FUZZ_TARGETS; do
|
| 301 |
+
# Test if fuzz target is a golang one.
|
| 302 |
+
if [[ $FUZZING_LANGUAGE == "go" ]]; then
|
| 303 |
+
# Continue if not a fuzz target.
|
| 304 |
+
if [[ $FUZZING_ENGINE != "none" ]]; then
|
| 305 |
+
grep "FUZZ_CORPUS_DIR" $fuzz_target > /dev/null 2>&1 || grep "testing\.T" $fuzz_target > /dev/null 2>&1 || continue
|
| 306 |
+
fi
|
| 307 |
+
# Log the target in the targets file.
|
| 308 |
+
echo ${fuzz_target} >> $COVERAGE_TARGET_FILE
|
| 309 |
+
|
| 310 |
+
# Run the coverage collection.
|
| 311 |
+
run_go_fuzz_target $fuzz_target &
|
| 312 |
+
elif [[ $FUZZING_LANGUAGE == "python" ]]; then
|
| 313 |
+
echo "Entering python fuzzing"
|
| 314 |
+
# Log the target in the targets file.
|
| 315 |
+
echo ${fuzz_target} >> $COVERAGE_TARGET_FILE
|
| 316 |
+
|
| 317 |
+
# Run the coverage collection.
|
| 318 |
+
run_python_fuzz_target $fuzz_target
|
| 319 |
+
elif [[ $FUZZING_LANGUAGE == "jvm" ]]; then
|
| 320 |
+
# Continue if not a fuzz target.
|
| 321 |
+
if [[ $FUZZING_ENGINE != "none" ]]; then
|
| 322 |
+
grep "LLVMFuzzerTestOneInput" $fuzz_target > /dev/null 2>&1 || continue
|
| 323 |
+
fi
|
| 324 |
+
|
| 325 |
+
echo "Running $fuzz_target"
|
| 326 |
+
# Log the target in the targets file.
|
| 327 |
+
echo ${fuzz_target} >> $COVERAGE_TARGET_FILE
|
| 328 |
+
|
| 329 |
+
# Run the coverage collection.
|
| 330 |
+
run_java_fuzz_target $fuzz_target &
|
| 331 |
+
elif [[ $FUZZING_LANGUAGE == "javascript" ]]; then
|
| 332 |
+
# Continue if not a fuzz target.
|
| 333 |
+
if [[ $FUZZING_ENGINE != "none" ]]; then
|
| 334 |
+
grep "LLVMFuzzerTestOneInput" $fuzz_target > /dev/null 2>&1 || continue
|
| 335 |
+
fi
|
| 336 |
+
|
| 337 |
+
echo "Running $fuzz_target"
|
| 338 |
+
# Log the target in the targets file.
|
| 339 |
+
echo ${fuzz_target} >> $COVERAGE_TARGET_FILE
|
| 340 |
+
|
| 341 |
+
# Run the coverage collection.
|
| 342 |
+
run_javascript_fuzz_target $fuzz_target &
|
| 343 |
+
else
|
| 344 |
+
# Continue if not a fuzz target.
|
| 345 |
+
if [[ $FUZZING_ENGINE != "none" ]]; then
|
| 346 |
+
grep "LLVMFuzzerTestOneInput" $fuzz_target > /dev/null 2>&1 || continue
|
| 347 |
+
fi
|
| 348 |
+
|
| 349 |
+
echo "Running $fuzz_target"
|
| 350 |
+
# Log the target in the targets file.
|
| 351 |
+
echo ${fuzz_target} >> $COVERAGE_TARGET_FILE
|
| 352 |
+
|
| 353 |
+
# Run the coverage collection.
|
| 354 |
+
run_fuzz_target $fuzz_target &
|
| 355 |
+
|
| 356 |
+
# Rewrite object if its a FUZZTEST target
|
| 357 |
+
if [[ $fuzz_target == *"@"* ]]; then
|
| 358 |
+
# Extract fuzztest binary name from fuzztest wrapper script.
|
| 359 |
+
fuzz_target=(${fuzz_target//@/ }[0])
|
| 360 |
+
fi
|
| 361 |
+
if [[ -z $objects ]]; then
|
| 362 |
+
# The first object needs to be passed without -object= flag.
|
| 363 |
+
objects="$fuzz_target"
|
| 364 |
+
else
|
| 365 |
+
objects="$objects -object=$fuzz_target"
|
| 366 |
+
fi
|
| 367 |
+
fi
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
# Limit the number of processes to be spawned.
|
| 371 |
+
n_child_proc=$(jobs -rp | wc -l)
|
| 372 |
+
while [[ "$n_child_proc" -eq "$NPROC" || "$n_child_proc" -gt "$MAX_PARALLEL_COUNT" ]]; do
|
| 373 |
+
sleep 4
|
| 374 |
+
n_child_proc=$(jobs -rp | wc -l)
|
| 375 |
+
done
|
| 376 |
+
done
|
| 377 |
+
|
| 378 |
+
# Wait for background processes to finish.
|
| 379 |
+
wait
|
| 380 |
+
|
| 381 |
+
if [[ $FUZZING_LANGUAGE == "go" ]]; then
|
| 382 |
+
echo $DUMPS_DIR
|
| 383 |
+
$SYSGOPATH/bin/gocovmerge $DUMPS_DIR/*.profdata > fuzz.cov
|
| 384 |
+
gotoolcover -html=fuzz.cov -o $REPORT_ROOT_DIR/index.html
|
| 385 |
+
$SYSGOPATH/bin/gocovsum fuzz.cov > $SUMMARY_FILE
|
| 386 |
+
cp $REPORT_ROOT_DIR/index.html $REPORT_PLATFORM_DIR/index.html
|
| 387 |
+
$SYSGOPATH/bin/pprof-merge $DUMPS_DIR/*.perf.cpu.prof
|
| 388 |
+
mv merged.data $REPORT_ROOT_DIR/cpu.prof
|
| 389 |
+
$SYSGOPATH/bin/pprof-merge $DUMPS_DIR/*.perf.heap.prof
|
| 390 |
+
mv merged.data $REPORT_ROOT_DIR/heap.prof
|
| 391 |
+
#TODO some proxy for go tool pprof -http=127.0.0.1:8001 $DUMPS_DIR/cpu.prof
|
| 392 |
+
echo "Finished generating code coverage report for Go fuzz targets."
|
| 393 |
+
elif [[ $FUZZING_LANGUAGE == "python" ]]; then
|
| 394 |
+
# Extract source files from all dependency zip folders
|
| 395 |
+
mkdir -p /pythoncovmergedfiles/medio
|
| 396 |
+
PYCOVDIR=/pycovdir/
|
| 397 |
+
mkdir $PYCOVDIR
|
| 398 |
+
for fuzzer in $FUZZ_TARGETS; do
|
| 399 |
+
fuzzer_deps=${fuzzer}.pkg.deps.zip
|
| 400 |
+
unzip $OUT/${fuzzer_deps}
|
| 401 |
+
rsync -r ./medio /pythoncovmergedfiles/medio
|
| 402 |
+
rm -rf ./medio
|
| 403 |
+
|
| 404 |
+
# Translate paths in unzipped folders to paths that we can use
|
| 405 |
+
mv $OUT/.coverage_$fuzzer .coverage
|
| 406 |
+
python3 /usr/local/bin/python_coverage_runner_help.py translate /pythoncovmergedfiles/medio
|
| 407 |
+
cp .new_coverage $PYCOVDIR/.coverage_$fuzzer
|
| 408 |
+
cp .new_coverage $OUT/coverage_d_$fuzzer
|
| 409 |
+
done
|
| 410 |
+
|
| 411 |
+
# Combine coverage
|
| 412 |
+
cd $PYCOVDIR
|
| 413 |
+
python3 /usr/local/bin/python_coverage_runner_help.py combine .coverage_*
|
| 414 |
+
python3 /usr/local/bin/python_coverage_runner_help.py html
|
| 415 |
+
# Produce all_cov file used by fuzz introspector.
|
| 416 |
+
python3 /usr/local/bin/python_coverage_runner_help.py json -o ${TEXTCOV_REPORT_DIR}/all_cov.json
|
| 417 |
+
|
| 418 |
+
# Generate .json with similar format to llvm-cov output.
|
| 419 |
+
python3 /usr/local/bin/python_coverage_runner_help.py \
|
| 420 |
+
convert-to-summary-json ${TEXTCOV_REPORT_DIR}/all_cov.json $SUMMARY_FILE
|
| 421 |
+
|
| 422 |
+
# Copy coverage date out
|
| 423 |
+
cp htmlcov/status.json ${TEXTCOV_REPORT_DIR}/html_status.json
|
| 424 |
+
|
| 425 |
+
mv htmlcov/* $REPORT_PLATFORM_DIR/
|
| 426 |
+
mv .coverage_* $REPORT_PLATFORM_DIR/
|
| 427 |
+
elif [[ $FUZZING_LANGUAGE == "jvm" ]]; then
|
| 428 |
+
|
| 429 |
+
# From this point on the script does not tolerate any errors.
|
| 430 |
+
set -e
|
| 431 |
+
|
| 432 |
+
# Merge .exec files from the individual targets.
|
| 433 |
+
jacoco_merged_exec=$DUMPS_DIR/jacoco.merged.exec
|
| 434 |
+
java -jar /opt/jacoco-cli.jar merge $DUMPS_DIR/*.exec \
|
| 435 |
+
--destfile $jacoco_merged_exec
|
| 436 |
+
|
| 437 |
+
# Prepare classes directory for jacoco process
|
| 438 |
+
classes_dir=$DUMPS_DIR/classes
|
| 439 |
+
mkdir $classes_dir
|
| 440 |
+
|
| 441 |
+
# Only copy class files found in $OUT/$SRC to ensure they are
|
| 442 |
+
# lively compiled from the project, avoiding inclusion of
|
| 443 |
+
# dependency classes. This also includes the fuzzer classes.
|
| 444 |
+
find "$OUT/$SRC" -type f -name "*.class" | while read -r class_file; do
|
| 445 |
+
# Skip module-info.class
|
| 446 |
+
if [[ "$(basename "$class_file")" == "module-info.class" ]]; then
|
| 447 |
+
continue
|
| 448 |
+
fi
|
| 449 |
+
|
| 450 |
+
# Use javap to extract the fully qualified name of the class and copy it to $classes_dir
|
| 451 |
+
fqn=$(javap -verbose "$class_file" 2>/dev/null | grep "this_class:" | grep -oP '(?<=// ).*')
|
| 452 |
+
if [ -n "$fqn" ]; then
|
| 453 |
+
mkdir -p $classes_dir/$(dirname $fqn)
|
| 454 |
+
cp $class_file $classes_dir/$fqn.class
|
| 455 |
+
fi
|
| 456 |
+
done
|
| 457 |
+
|
| 458 |
+
# Heuristically determine source directories based on Maven structure.
|
| 459 |
+
# Always include the $SRC root as it likely contains the fuzzer sources.
|
| 460 |
+
sourcefiles_args=(--sourcefiles $OUT/$SRC)
|
| 461 |
+
source_dirs=$(find $OUT/$SRC -type d -name 'java')
|
| 462 |
+
for source_dir in $source_dirs; do
|
| 463 |
+
sourcefiles_args+=(--sourcefiles "$source_dir")
|
| 464 |
+
done
|
| 465 |
+
|
| 466 |
+
# Generate HTML and XML reports.
|
| 467 |
+
xml_report=$REPORT_PLATFORM_DIR/index.xml
|
| 468 |
+
java -jar /opt/jacoco-cli.jar report $jacoco_merged_exec \
|
| 469 |
+
--html $REPORT_PLATFORM_DIR \
|
| 470 |
+
--xml $xml_report \
|
| 471 |
+
--classfiles $classes_dir \
|
| 472 |
+
"${sourcefiles_args[@]}"
|
| 473 |
+
|
| 474 |
+
# Also serve the raw exec file and XML report, which can be useful for
|
| 475 |
+
# automated analysis.
|
| 476 |
+
cp $jacoco_merged_exec $REPORT_PLATFORM_DIR/jacoco.exec
|
| 477 |
+
cp $xml_report $REPORT_PLATFORM_DIR/jacoco.xml
|
| 478 |
+
cp $xml_report $TEXTCOV_REPORT_DIR/jacoco.xml
|
| 479 |
+
|
| 480 |
+
# Write llvm-cov summary file.
|
| 481 |
+
jacoco_report_converter.py $xml_report $SUMMARY_FILE
|
| 482 |
+
|
| 483 |
+
set +e
|
| 484 |
+
elif [[ $FUZZING_LANGUAGE == "javascript" ]]; then
|
| 485 |
+
|
| 486 |
+
# From this point on the script does not tolerate any errors.
|
| 487 |
+
set -e
|
| 488 |
+
|
| 489 |
+
json_report=$MERGED_COVERAGE_DIR/coverage.json
|
| 490 |
+
nyc merge $FUZZERS_COVERAGE_DUMPS_DIR $json_report
|
| 491 |
+
|
| 492 |
+
nyc report -t $MERGED_COVERAGE_DIR --report-dir $REPORT_PLATFORM_DIR --reporter=html --reporter=json-summary
|
| 493 |
+
|
| 494 |
+
nyc_json_summary_file=$REPORT_PLATFORM_DIR/coverage-summary.json
|
| 495 |
+
|
| 496 |
+
# Write llvm-cov summary file.
|
| 497 |
+
nyc_report_converter.py $nyc_json_summary_file $SUMMARY_FILE
|
| 498 |
+
|
| 499 |
+
set +e
|
| 500 |
+
else
|
| 501 |
+
|
| 502 |
+
# From this point on the script does not tolerate any errors.
|
| 503 |
+
set -e
|
| 504 |
+
|
| 505 |
+
# Merge all dumps from the individual targets.
|
| 506 |
+
rm -f $PROFILE_FILE
|
| 507 |
+
llvm-profdata merge -sparse $DUMPS_DIR/*.profdata -o $PROFILE_FILE
|
| 508 |
+
|
| 509 |
+
# TODO(mmoroz): add script from Chromium for rendering directory view reports.
|
| 510 |
+
# The first path in $objects does not have -object= prefix (llvm-cov format).
|
| 511 |
+
shared_libraries=$(coverage_helper shared_libs -build-dir=$OUT -object=$objects)
|
| 512 |
+
objects="$objects $shared_libraries"
|
| 513 |
+
|
| 514 |
+
generate_html $PROFILE_FILE "$shared_libraries" "$objects" "$REPORT_ROOT_DIR"
|
| 515 |
+
|
| 516 |
+
# Per target reports.
|
| 517 |
+
for fuzz_target in $FUZZ_TARGETS; do
|
| 518 |
+
if [[ $fuzz_target == *"@"* ]]; then
|
| 519 |
+
profdata_path=$DUMPS_DIR/$fuzz_target.profdata
|
| 520 |
+
report_dir=$REPORT_BY_TARGET_ROOT_DIR/$fuzz_target
|
| 521 |
+
# Extract fuzztest binary name from fuzztest wrapper script.
|
| 522 |
+
fuzz_target=(${fuzz_target//@/ }[0])
|
| 523 |
+
else
|
| 524 |
+
profdata_path=$DUMPS_DIR/$fuzz_target.profdata
|
| 525 |
+
report_dir=$REPORT_BY_TARGET_ROOT_DIR/$fuzz_target
|
| 526 |
+
fi
|
| 527 |
+
if [[ ! -f "$profdata_path" ]]; then
|
| 528 |
+
echo "WARNING: $fuzz_target has no profdata generated."
|
| 529 |
+
continue
|
| 530 |
+
fi
|
| 531 |
+
|
| 532 |
+
generate_html $profdata_path "$shared_libraries" "$fuzz_target" "$report_dir"
|
| 533 |
+
done
|
| 534 |
+
|
| 535 |
+
set +e
|
| 536 |
+
fi
|
| 537 |
+
|
| 538 |
+
# Make sure report is readable.
|
| 539 |
+
chmod -R +r $REPORT_ROOT_DIR $REPORT_BY_TARGET_ROOT_DIR
|
| 540 |
+
find $REPORT_ROOT_DIR $REPORT_BY_TARGET_ROOT_DIR -type d -exec chmod +x {} +
|
| 541 |
+
|
| 542 |
+
# HTTP_PORT is optional.
|
| 543 |
+
set +u
|
| 544 |
+
if [[ -n $HTTP_PORT ]]; then
|
| 545 |
+
# Serve the report locally.
|
| 546 |
+
echo "Serving the report on http://127.0.0.1:$HTTP_PORT/linux/index.html"
|
| 547 |
+
cd $REPORT_ROOT_DIR
|
| 548 |
+
python3 -m http.server $HTTP_PORT
|
| 549 |
+
fi
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/coverage_helper
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -u
|
| 2 |
+
# Copyright 2018 Google Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
python3 $CODE_COVERAGE_SRC/coverage_utils.py $@
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/download_corpus
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -u
|
| 2 |
+
# Copyright 2018 Google Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
if (( $# < 1 )); then
|
| 19 |
+
echo "Usage: $0 \"path_download_to url_download_from\" (can be repeated)" >&2
|
| 20 |
+
exit 1
|
| 21 |
+
fi
|
| 22 |
+
|
| 23 |
+
for pair in "$@"; do
|
| 24 |
+
read path url <<< "$pair"
|
| 25 |
+
wget -q -O $path $url
|
| 26 |
+
done
|
| 27 |
+
|
| 28 |
+
# Always exit with 0 as we do not track wget return codes and should not rely
|
| 29 |
+
# on the latest command execution.
|
| 30 |
+
exit 0
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/generate_differential_cov_report.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
#
|
| 3 |
+
# Copyright 2023 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
#
|
| 17 |
+
################################################################################
|
| 18 |
+
"""Script for generating differential coverage reports.
|
| 19 |
+
generate_differential_cov_report.py <profdata-dump-directory> \
|
| 20 |
+
<profdata-directory-to-subtract-from-first> <output-directory>
|
| 21 |
+
"""
|
| 22 |
+
import os
|
| 23 |
+
import shutil
|
| 24 |
+
import subprocess
|
| 25 |
+
import sys
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class ProfData:
|
| 29 |
+
"""Class representing a profdata file."""
|
| 30 |
+
|
| 31 |
+
def __init__(self, text):
|
| 32 |
+
self.function_profs = []
|
| 33 |
+
for function_prof in text.split('\n\n'):
|
| 34 |
+
if not function_prof:
|
| 35 |
+
continue
|
| 36 |
+
self.function_profs.append(FunctionProf(function_prof))
|
| 37 |
+
|
| 38 |
+
def to_string(self):
|
| 39 |
+
"""Convert back to a string."""
|
| 40 |
+
return '\n'.join(
|
| 41 |
+
[function_prof.to_string() for function_prof in self.function_profs])
|
| 42 |
+
|
| 43 |
+
def find_function(self, function, idx=None):
|
| 44 |
+
"""Find the same function in this profdata."""
|
| 45 |
+
if idx is not None:
|
| 46 |
+
try:
|
| 47 |
+
possibility = self.function_profs[idx]
|
| 48 |
+
if function.func_hash == possibility.func_hash:
|
| 49 |
+
return possibility
|
| 50 |
+
except IndexError:
|
| 51 |
+
pass
|
| 52 |
+
for function_prof in self.function_profs:
|
| 53 |
+
if function_prof.func_hash == function.func_hash:
|
| 54 |
+
return function_prof
|
| 55 |
+
return None
|
| 56 |
+
|
| 57 |
+
def subtract(self, subtrahend):
|
| 58 |
+
"""Subtract subtrahend from this profdata."""
|
| 59 |
+
for idx, function_prof in enumerate(self.function_profs):
|
| 60 |
+
subtrahend_function_prof = subtrahend.find_function(function_prof, idx)
|
| 61 |
+
function_prof.subtract(subtrahend_function_prof)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class FunctionProf:
|
| 65 |
+
"""Profile of a function."""
|
| 66 |
+
FUNC_HASH_COMMENT_LINE = '# Func Hash:'
|
| 67 |
+
NUM_COUNTERS_COMMENT_LINE = '# Num Counters:'
|
| 68 |
+
COUNTER_VALUES_COMMENT_LINE = '# Counter Values:'
|
| 69 |
+
|
| 70 |
+
def __init__(self, text):
|
| 71 |
+
print(text)
|
| 72 |
+
lines = text.splitlines()
|
| 73 |
+
self.function = lines[0]
|
| 74 |
+
assert self.FUNC_HASH_COMMENT_LINE == lines[1]
|
| 75 |
+
self.func_hash = lines[2]
|
| 76 |
+
assert self.NUM_COUNTERS_COMMENT_LINE == lines[3]
|
| 77 |
+
self.num_counters = int(lines[4])
|
| 78 |
+
assert self.COUNTER_VALUES_COMMENT_LINE == lines[5]
|
| 79 |
+
self.counter_values = [1 if int(line) else 0 for line in lines[6:]]
|
| 80 |
+
|
| 81 |
+
def to_string(self):
|
| 82 |
+
"""Convert back to text."""
|
| 83 |
+
lines = [
|
| 84 |
+
self.function,
|
| 85 |
+
self.FUNC_HASH_COMMENT_LINE,
|
| 86 |
+
self.func_hash,
|
| 87 |
+
self.NUM_COUNTERS_COMMENT_LINE,
|
| 88 |
+
str(self.num_counters),
|
| 89 |
+
self.COUNTER_VALUES_COMMENT_LINE,
|
| 90 |
+
] + [str(num) for num in self.counter_values]
|
| 91 |
+
return '\n'.join(lines)
|
| 92 |
+
|
| 93 |
+
def subtract(self, subtrahend_prof):
|
| 94 |
+
"""Subtract this other function from this function."""
|
| 95 |
+
if not subtrahend_prof:
|
| 96 |
+
print(self.function, 'has no subtrahend')
|
| 97 |
+
# Nothing to subtract.
|
| 98 |
+
return
|
| 99 |
+
self.counter_values = [
|
| 100 |
+
max(counter1 - counter2, 0) for counter1, counter2 in zip(
|
| 101 |
+
self.counter_values, subtrahend_prof.counter_values)
|
| 102 |
+
]
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_profdata_files(directory):
|
| 106 |
+
"""Returns profdata files in |directory|."""
|
| 107 |
+
profdatas = []
|
| 108 |
+
for filename in os.listdir(directory):
|
| 109 |
+
filename = os.path.join(directory, filename)
|
| 110 |
+
if filename.endswith('.profdata'):
|
| 111 |
+
profdatas.append(filename)
|
| 112 |
+
return profdatas
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def convert_profdata_to_text(profdata):
|
| 116 |
+
"""Convert a profdata binary file to a profdata text file."""
|
| 117 |
+
profdata_text = f'{profdata}.txt'
|
| 118 |
+
if os.path.exists(profdata_text):
|
| 119 |
+
os.remove(profdata_text)
|
| 120 |
+
command = [
|
| 121 |
+
'llvm-profdata', 'merge', '-j=1', '-sparse', profdata, '--text', '-o',
|
| 122 |
+
profdata_text
|
| 123 |
+
]
|
| 124 |
+
print(command)
|
| 125 |
+
subprocess.run(command, check=True)
|
| 126 |
+
return profdata_text
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def convert_text_profdata_to_bin(profdata_text):
|
| 130 |
+
"""Convert a profdata text file to a profdata binary file."""
|
| 131 |
+
profdata = profdata_text.replace('.txt', '').replace('.profdata',
|
| 132 |
+
'') + '.profdata'
|
| 133 |
+
print('bin profdata', profdata)
|
| 134 |
+
if os.path.exists(profdata):
|
| 135 |
+
os.remove(profdata)
|
| 136 |
+
command = [
|
| 137 |
+
'llvm-profdata', 'merge', '-j=1', '-sparse', profdata_text, '-o', profdata
|
| 138 |
+
]
|
| 139 |
+
print(command)
|
| 140 |
+
subprocess.run(command, check=True)
|
| 141 |
+
return profdata
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def get_difference(minuend_filename, subtrahend_filename):
|
| 145 |
+
"""Subtract subtrahend_filename from minuend_filename."""
|
| 146 |
+
with open(minuend_filename, 'r', encoding='utf-8') as minuend_file:
|
| 147 |
+
print('minuend', minuend_filename)
|
| 148 |
+
minuend = ProfData(minuend_file.read())
|
| 149 |
+
with open(subtrahend_filename, 'r', encoding='utf-8') as subtrahend_file:
|
| 150 |
+
print('subtrahend', subtrahend_filename)
|
| 151 |
+
subtrahend = ProfData(subtrahend_file.read())
|
| 152 |
+
|
| 153 |
+
minuend.subtract(subtrahend)
|
| 154 |
+
return minuend
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def profdatas_to_objects(profdatas):
|
| 158 |
+
"""Get the corresponding objects for each profdata."""
|
| 159 |
+
return [
|
| 160 |
+
os.path.splitext(os.path.basename(profdata))[0] for profdata in profdatas
|
| 161 |
+
]
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def generate_differential_cov_reports(minuend_profdatas, subtrahend_profdatas,
|
| 165 |
+
difference_dir):
|
| 166 |
+
"""Calculate the differences between all profdatas and generate differential
|
| 167 |
+
coverage reports."""
|
| 168 |
+
profdata_objects = profdatas_to_objects(minuend_profdatas)
|
| 169 |
+
real_profdata_objects = [
|
| 170 |
+
binobject for binobject in profdata_objects if binobject != 'merged'
|
| 171 |
+
]
|
| 172 |
+
for minuend, subtrahend, binobject in zip(minuend_profdatas,
|
| 173 |
+
subtrahend_profdatas,
|
| 174 |
+
profdata_objects):
|
| 175 |
+
minuend_text = convert_profdata_to_text(minuend)
|
| 176 |
+
subtrahend_text = convert_profdata_to_text(subtrahend)
|
| 177 |
+
difference = get_difference(minuend_text, subtrahend_text)
|
| 178 |
+
basename = os.path.basename(minuend_text)
|
| 179 |
+
difference_text = os.path.join(difference_dir, basename)
|
| 180 |
+
with open(difference_text, 'w', encoding='utf-8') as file_handle:
|
| 181 |
+
file_handle.write(difference.to_string())
|
| 182 |
+
difference_profdata = convert_text_profdata_to_bin(difference_text)
|
| 183 |
+
if not difference_profdata.endswith('merged.profdata'):
|
| 184 |
+
generate_html_report(difference_profdata, [binobject],
|
| 185 |
+
os.path.join(difference_dir, binobject))
|
| 186 |
+
else:
|
| 187 |
+
generate_html_report(difference_profdata, real_profdata_objects,
|
| 188 |
+
os.path.join(difference_dir, 'merged'))
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def generate_html_report(profdata, objects, directory):
|
| 192 |
+
"""Generate an HTML coverage report."""
|
| 193 |
+
# TODO(metzman): Deal with shared libs.
|
| 194 |
+
html_dir = os.path.join(directory, 'reports')
|
| 195 |
+
if os.path.exists(html_dir):
|
| 196 |
+
os.remove(html_dir)
|
| 197 |
+
os.makedirs(html_dir)
|
| 198 |
+
out_dir = os.getenv('OUT', '/out')
|
| 199 |
+
command = [
|
| 200 |
+
'llvm-cov', 'show', f'-path-equivalence=/,{out_dir}', '-format=html',
|
| 201 |
+
'-Xdemangler', 'rcfilt', f'-instr-profile={profdata}'
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
objects = [os.path.join(out_dir, binobject) for binobject in objects]
|
| 205 |
+
command += objects + ['-o', html_dir]
|
| 206 |
+
print(' '.join(command))
|
| 207 |
+
subprocess.run(command, check=True)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def main():
|
| 211 |
+
"""Generate differential coverage reports."""
|
| 212 |
+
if len(sys.argv) != 4:
|
| 213 |
+
print(
|
| 214 |
+
f'Usage: {sys.argv[0]} <minuend_dir> <subtrahend_dir> <difference_dir>')
|
| 215 |
+
minuend_dir = sys.argv[1]
|
| 216 |
+
subtrahend_dir = sys.argv[2]
|
| 217 |
+
difference_dir = sys.argv[3]
|
| 218 |
+
if os.path.exists(difference_dir):
|
| 219 |
+
shutil.rmtree(difference_dir)
|
| 220 |
+
os.makedirs(difference_dir, exist_ok=True)
|
| 221 |
+
minuend_profdatas = get_profdata_files(minuend_dir)
|
| 222 |
+
subtrahend_profdatas = get_profdata_files(subtrahend_dir)
|
| 223 |
+
generate_differential_cov_reports(minuend_profdatas, subtrahend_profdatas,
|
| 224 |
+
difference_dir)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
if __name__ == '__main__':
|
| 228 |
+
main()
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/install_deps.sh
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eux
|
| 2 |
+
# Copyright 2022 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
# Install dependencies in a platform-aware way.
|
| 19 |
+
|
| 20 |
+
apt-get update && apt-get install -y \
|
| 21 |
+
binutils \
|
| 22 |
+
file \
|
| 23 |
+
ca-certificates \
|
| 24 |
+
fonts-dejavu \
|
| 25 |
+
git \
|
| 26 |
+
libcap2 \
|
| 27 |
+
rsync \
|
| 28 |
+
unzip \
|
| 29 |
+
wget \
|
| 30 |
+
zip --no-install-recommends
|
| 31 |
+
|
| 32 |
+
case $(uname -m) in
|
| 33 |
+
x86_64)
|
| 34 |
+
# We only need to worry about i386 if we are on x86_64.
|
| 35 |
+
apt-get install -y lib32gcc1 libc6-i386
|
| 36 |
+
;;
|
| 37 |
+
esac
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/install_java.sh
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eux
|
| 2 |
+
# Copyright 2022 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
# Install java in a platform-aware way.
|
| 19 |
+
|
| 20 |
+
ARCHITECTURE=
|
| 21 |
+
case $(uname -m) in
|
| 22 |
+
x86_64)
|
| 23 |
+
ARCHITECTURE=x64
|
| 24 |
+
;;
|
| 25 |
+
aarch64)
|
| 26 |
+
ARCHITECTURE=aarch64
|
| 27 |
+
;;
|
| 28 |
+
*)
|
| 29 |
+
echo "Error: unsupported architecture: $(uname -m)"
|
| 30 |
+
exit 1
|
| 31 |
+
;;
|
| 32 |
+
esac
|
| 33 |
+
|
| 34 |
+
wget -q https://download.java.net/java/GA/jdk17.0.2/dfd4a8d0985749f896bed50d7138ee7f/8/GPL/openjdk-17.0.2_linux-"$ARCHITECTURE"_bin.tar.gz -O /tmp/openjdk-17.0.2_linux-"$ARCHITECTURE"_bin.tar.gz
|
| 35 |
+
wget -q https://download.java.net/java/GA/jdk15.0.2/0d1cfde4252546c6931946de8db48ee2/7/GPL/openjdk-15.0.2_linux-"$ARCHITECTURE"_bin.tar.gz -O /tmp/openjdk-15.0.2_linux-"$ARCHITECTURE"_bin.tar.gz
|
| 36 |
+
cd /tmp
|
| 37 |
+
mkdir -p $JAVA_HOME
|
| 38 |
+
tar -xz --strip-components=1 -f openjdk-17.0.2_linux-"$ARCHITECTURE"_bin.tar.gz --directory $JAVA_HOME
|
| 39 |
+
rm -f openjdk-17.0.2_linux-"$ARCHITECTURE"_bin.tar.gz
|
| 40 |
+
rm -rf $JAVA_HOME/jmods $JAVA_HOME/lib/src.zip
|
| 41 |
+
|
| 42 |
+
# Install OpenJDK 15 and trim its size by removing unused components. Some projects only run with Java 15.
|
| 43 |
+
mkdir -p $JAVA_15_HOME
|
| 44 |
+
tar -xz --strip-components=1 -f openjdk-15.0.2_linux-"$ARCHITECTURE"_bin.tar.gz --directory $JAVA_15_HOME
|
| 45 |
+
rm -f openjdk-15.0.2_linux-"$ARCHITECTURE"_bin.tar.gz
|
| 46 |
+
rm -rf $JAVA_15_HOME/jmods $JAVA_15_HOME/lib/src.zip
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/install_javascript.sh
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eux
|
| 2 |
+
# Copyright 2023 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
# see installation instructions: https://github.com/nodesource/distributions#available-architectures
|
| 18 |
+
apt-get update
|
| 19 |
+
apt-get install -y ca-certificates curl gnupg
|
| 20 |
+
mkdir -p /etc/apt/keyrings
|
| 21 |
+
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
|
| 22 |
+
|
| 23 |
+
NODE_MAJOR=20
|
| 24 |
+
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list
|
| 25 |
+
|
| 26 |
+
apt-get update
|
| 27 |
+
apt-get install nodejs -y
|
| 28 |
+
|
| 29 |
+
# Install latest versions of nyc for source-based coverage reporting
|
| 30 |
+
npm install --global nyc
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/jacoco_report_converter.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Copyright 2021 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
"""Helper script for creating an llvm-cov style JSON summary from a JaCoCo XML
|
| 18 |
+
report."""
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import sys
|
| 22 |
+
import xml.etree.ElementTree as ET
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def convert(xml):
|
| 26 |
+
"""Turns a JaCoCo XML report into an llvm-cov JSON summary."""
|
| 27 |
+
summary = {
|
| 28 |
+
'type': 'oss-fuzz.java.coverage.json.export',
|
| 29 |
+
'version': '1.0.0',
|
| 30 |
+
'data': [{
|
| 31 |
+
'totals': {},
|
| 32 |
+
'files': [],
|
| 33 |
+
}],
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
report = ET.fromstring(xml)
|
| 37 |
+
totals = make_element_summary(report)
|
| 38 |
+
summary['data'][0]['totals'] = totals
|
| 39 |
+
|
| 40 |
+
# Since Java compilation does not track source file location, we match
|
| 41 |
+
# coverage info to source files via the full class name, e.g. we search for
|
| 42 |
+
# a path in /out/src ending in foo/bar/Baz.java for the class foo.bar.Baz.
|
| 43 |
+
# Under the assumptions that a given project only ever contains a single
|
| 44 |
+
# version of a class and that no class name appears as a suffix of another
|
| 45 |
+
# class name, we can assign coverage info to every source file matched in that
|
| 46 |
+
# way.
|
| 47 |
+
src_files = list_src_files()
|
| 48 |
+
|
| 49 |
+
for class_element in report.findall('./package/class'):
|
| 50 |
+
# Skip fuzzer classes
|
| 51 |
+
if is_fuzzer_class(class_element):
|
| 52 |
+
continue
|
| 53 |
+
|
| 54 |
+
# Skip non class elements
|
| 55 |
+
if 'sourcefilename' not in class_element.attrib:
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
class_name = class_element.attrib['name']
|
| 59 |
+
package_name = os.path.dirname(class_name)
|
| 60 |
+
basename = class_element.attrib['sourcefilename']
|
| 61 |
+
# This path is 'foo/Bar.java' for the class element
|
| 62 |
+
# <class name="foo/Bar" sourcefilename="Bar.java">.
|
| 63 |
+
canonical_path = os.path.join(package_name, basename)
|
| 64 |
+
|
| 65 |
+
class_summary = make_element_summary(class_element)
|
| 66 |
+
for src_file in relative_to_src_path(src_files, canonical_path):
|
| 67 |
+
summary['data'][0]['files'].append({
|
| 68 |
+
'filename': src_file,
|
| 69 |
+
'summary': class_summary,
|
| 70 |
+
})
|
| 71 |
+
|
| 72 |
+
return json.dumps(summary)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def list_src_files():
|
| 76 |
+
"""Returns a map from basename to full path for all files in $OUT/$SRC."""
|
| 77 |
+
filename_to_paths = {}
|
| 78 |
+
out_path = os.environ['OUT'] + '/'
|
| 79 |
+
src_path = os.environ['SRC']
|
| 80 |
+
src_in_out = out_path + src_path
|
| 81 |
+
for dirpath, _, filenames in os.walk(src_in_out):
|
| 82 |
+
for filename in filenames:
|
| 83 |
+
full_path = dirpath + '/' + filename
|
| 84 |
+
# Map /out//src/... to /src/...
|
| 85 |
+
file_path = full_path[len(out_path):]
|
| 86 |
+
filename_to_paths.setdefault(filename, []).append(file_path)
|
| 87 |
+
return filename_to_paths
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def is_fuzzer_class(class_element):
|
| 91 |
+
"""Check if the class is fuzzer class."""
|
| 92 |
+
method_elements = class_element.find('./method[@name=\"fuzzerTestOneInput\"]')
|
| 93 |
+
if method_elements:
|
| 94 |
+
return True
|
| 95 |
+
|
| 96 |
+
return False
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def relative_to_src_path(src_files, canonical_path):
|
| 100 |
+
"""Returns all paths in src_files ending in canonical_path."""
|
| 101 |
+
basename = os.path.basename(canonical_path)
|
| 102 |
+
if basename not in src_files:
|
| 103 |
+
return []
|
| 104 |
+
candidate_paths = src_files[basename]
|
| 105 |
+
return [
|
| 106 |
+
path for path in candidate_paths if path.endswith("/" + canonical_path)
|
| 107 |
+
]
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def make_element_summary(element):
|
| 111 |
+
"""Returns a coverage summary for an element in the XML report."""
|
| 112 |
+
summary = {}
|
| 113 |
+
|
| 114 |
+
function_counter = element.find('./counter[@type=\'METHOD\']')
|
| 115 |
+
summary['functions'] = make_counter_summary(function_counter)
|
| 116 |
+
|
| 117 |
+
line_counter = element.find('./counter[@type=\'LINE\']')
|
| 118 |
+
summary['lines'] = make_counter_summary(line_counter)
|
| 119 |
+
|
| 120 |
+
# JaCoCo tracks branch coverage, which counts the covered control-flow edges
|
| 121 |
+
# between llvm-cov's regions instead of the covered regions themselves. For
|
| 122 |
+
# non-trivial code parts, the difference is usually negligible. However, if
|
| 123 |
+
# all methods of a class consist of a single region only (no branches),
|
| 124 |
+
# JaCoCo does not report any branch coverage even if there is instruction
|
| 125 |
+
# coverage. Since this would give incorrect results for CI Fuzz purposes, we
|
| 126 |
+
# increase the regions counter by 1 if there is any amount of instruction
|
| 127 |
+
# coverage.
|
| 128 |
+
instruction_counter = element.find('./counter[@type=\'INSTRUCTION\']')
|
| 129 |
+
has_some_coverage = instruction_counter is not None and int(
|
| 130 |
+
instruction_counter.attrib["covered"]) > 0
|
| 131 |
+
branch_covered_adjustment = 1 if has_some_coverage else 0
|
| 132 |
+
region_counter = element.find('./counter[@type=\'BRANCH\']')
|
| 133 |
+
summary['regions'] = make_counter_summary(
|
| 134 |
+
region_counter, covered_adjustment=branch_covered_adjustment)
|
| 135 |
+
|
| 136 |
+
return summary
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def make_counter_summary(counter_element, covered_adjustment=0):
|
| 140 |
+
"""Turns a JaCoCo <counter> element into an llvm-cov totals entry."""
|
| 141 |
+
summary = {}
|
| 142 |
+
covered = covered_adjustment
|
| 143 |
+
missed = 0
|
| 144 |
+
if counter_element is not None:
|
| 145 |
+
covered += int(counter_element.attrib['covered'])
|
| 146 |
+
missed += int(counter_element.attrib['missed'])
|
| 147 |
+
summary['covered'] = covered
|
| 148 |
+
summary['notcovered'] = missed
|
| 149 |
+
summary['count'] = summary['covered'] + summary['notcovered']
|
| 150 |
+
if summary['count'] != 0:
|
| 151 |
+
summary['percent'] = (100.0 * summary['covered']) / summary['count']
|
| 152 |
+
else:
|
| 153 |
+
summary['percent'] = 0
|
| 154 |
+
return summary
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def main():
|
| 158 |
+
"""Produces an llvm-cov style JSON summary from a JaCoCo XML report."""
|
| 159 |
+
if len(sys.argv) != 3:
|
| 160 |
+
sys.stderr.write('Usage: %s <path_to_jacoco_xml> <out_path_json>\n' %
|
| 161 |
+
sys.argv[0])
|
| 162 |
+
return 1
|
| 163 |
+
|
| 164 |
+
with open(sys.argv[1], 'r') as xml_file:
|
| 165 |
+
xml_report = xml_file.read()
|
| 166 |
+
json_summary = convert(xml_report)
|
| 167 |
+
with open(sys.argv[2], 'w') as json_file:
|
| 168 |
+
json_file.write(json_summary)
|
| 169 |
+
|
| 170 |
+
return 0
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
if __name__ == '__main__':
|
| 174 |
+
sys.exit(main())
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/reproduce
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -eux
|
| 2 |
+
# Copyright 2016 Google Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
|
| 18 |
+
FUZZER=$1
|
| 19 |
+
shift
|
| 20 |
+
|
| 21 |
+
if [ ! -v TESTCASE ]; then
|
| 22 |
+
TESTCASE="/testcase"
|
| 23 |
+
fi
|
| 24 |
+
|
| 25 |
+
if [ ! -f $TESTCASE ]; then
|
| 26 |
+
echo "Error: $TESTCASE not found, use: docker run -v <path>:$TESTCASE ..."
|
| 27 |
+
exit 1
|
| 28 |
+
fi
|
| 29 |
+
|
| 30 |
+
export RUN_FUZZER_MODE="interactive"
|
| 31 |
+
export FUZZING_ENGINE="libfuzzer"
|
| 32 |
+
export SKIP_SEED_CORPUS="1"
|
| 33 |
+
|
| 34 |
+
run_fuzzer $FUZZER $@ $TESTCASE
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/targets_list
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
for binary in $(find $OUT/ -executable -type f); do
|
| 4 |
+
[[ "$binary" != *.so ]] || continue
|
| 5 |
+
[[ $(basename "$binary") != jazzer_driver* ]] || continue
|
| 6 |
+
file "$binary" | grep -e ELF -e "shell script" > /dev/null 2>&1 || continue
|
| 7 |
+
grep "LLVMFuzzerTestOneInput" "$binary" > /dev/null 2>&1 || continue
|
| 8 |
+
|
| 9 |
+
basename "$binary"
|
| 10 |
+
done
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/test_all_test.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 Google LLC
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
#
|
| 15 |
+
################################################################################
|
| 16 |
+
"""Tests test_all.py"""
|
| 17 |
+
import unittest
|
| 18 |
+
from unittest import mock
|
| 19 |
+
|
| 20 |
+
import test_all
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TestTestAll(unittest.TestCase):
|
| 24 |
+
"""Tests for the test_all_function."""
|
| 25 |
+
|
| 26 |
+
@mock.patch('test_all.find_fuzz_targets', return_value=[])
|
| 27 |
+
@mock.patch('builtins.print')
|
| 28 |
+
def test_test_all_no_fuzz_targets(self, mock_print, _):
|
| 29 |
+
"""Tests that test_all returns False when there are no fuzz targets."""
|
| 30 |
+
outdir = '/out'
|
| 31 |
+
allowed_broken_targets_percentage = 0
|
| 32 |
+
self.assertFalse(
|
| 33 |
+
test_all.test_all(outdir, allowed_broken_targets_percentage))
|
| 34 |
+
mock_print.assert_called_with('ERROR: No fuzz targets found.')
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if __name__ == '__main__':
|
| 38 |
+
unittest.main()
|
local-test-tika-delta-01/fuzz-tooling/infra/base-images/base-runner/test_one.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Copyright 2021 Google LLC
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
################################################################################
|
| 17 |
+
"""Does bad_build_check on a fuzz target in $OUT."""
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
|
| 21 |
+
import test_all
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_one(fuzz_target):
|
| 25 |
+
"""Does bad_build_check on one fuzz target. Returns True on success."""
|
| 26 |
+
with test_all.use_different_out_dir():
|
| 27 |
+
fuzz_target_path = os.path.join(os.environ['OUT'], fuzz_target)
|
| 28 |
+
result = test_all.do_bad_build_check(fuzz_target_path)
|
| 29 |
+
if result.returncode != 0:
|
| 30 |
+
sys.stdout.buffer.write(result.stdout + result.stderr + b'\n')
|
| 31 |
+
return False
|
| 32 |
+
return True
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def main():
|
| 36 |
+
"""Does bad_build_check on one fuzz target. Returns 1 on failure, 0 on
|
| 37 |
+
success."""
|
| 38 |
+
if len(sys.argv) != 2:
|
| 39 |
+
print('Usage: %d <fuzz_target>', sys.argv[0])
|
| 40 |
+
return 1
|
| 41 |
+
|
| 42 |
+
fuzz_target_binary = sys.argv[1]
|
| 43 |
+
return 0 if test_one(fuzz_target_binary) else 1
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
if __name__ == '__main__':
|
| 47 |
+
sys.exit(main())
|