python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2016 The Brotli Authors. All rights reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT """Functions to compress and decompress data using the Brotli library.""" import _brotli # The library version. __version__ = _brotli.__version__ # The compression mode. MODE_GENERIC = _brotli.MODE_GENERIC MODE_TEXT = _brotli.MODE_TEXT MODE_FONT = _brotli.MODE_FONT # The Compressor object. Compressor = _brotli.Compressor # The Decompressor object. Decompressor = _brotli.Decompressor # Compress a byte string. def compress(string, mode=MODE_GENERIC, quality=11, lgwin=22, lgblock=0): """Compress a byte string. Args: string (bytes): The input data. mode (int, optional): The compression mode can be MODE_GENERIC (default), MODE_TEXT (for UTF-8 format text input) or MODE_FONT (for WOFF 2.0). quality (int, optional): Controls the compression-speed vs compression- density tradeoff. The higher the quality, the slower the compression. Range is 0 to 11. Defaults to 11. lgwin (int, optional): Base 2 logarithm of the sliding window size. Range is 10 to 24. Defaults to 22. lgblock (int, optional): Base 2 logarithm of the maximum input block size. Range is 16 to 24. If set to 0, the value will be set based on the quality. Defaults to 0. Returns: The compressed byte string. Raises: brotli.error: If arguments are invalid, or compressor fails. """ compressor = Compressor(mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock) return compressor.process(string) + compressor.finish() # Decompress a compressed byte string. decompress = _brotli.decompress # Raised if compression or decompression fails. error = _brotli.error
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/brotli.py
# Copyright 2016 The Brotli Authors. All rights reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT import unittest from . import _test_utils import brotli class TestCompress(_test_utils.TestCase): VARIANTS = {'quality': (1, 6, 9, 11), 'lgwin': (10, 15, 20, 24)} def _check_decompression(self, test_data, **kwargs): kwargs = {} # Write decompression to temp file and verify it matches the original. temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) temp_compressed = _test_utils.get_temp_compressed_name(test_data) original = test_data with open(temp_uncompressed, 'wb') as out_file: with open(temp_compressed, 'rb') as in_file: out_file.write(brotli.decompress(in_file.read(), **kwargs)) self.assertFilesMatch(temp_uncompressed, original) def _compress(self, test_data, **kwargs): temp_compressed = _test_utils.get_temp_compressed_name(test_data) with open(temp_compressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: out_file.write(brotli.compress(in_file.read(), **kwargs)) def _test_compress(self, test_data, **kwargs): self._compress(test_data, **kwargs) self._check_decompression(test_data, **kwargs) _test_utils.generate_test_methods(TestCompress, variants=TestCompress.VARIANTS) if __name__ == '__main__': unittest.main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/compress_test.py
# Copyright 2016 The Brotli Authors. All rights reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT import subprocess import unittest from . import _test_utils import brotli BRO_ARGS = _test_utils.BRO_ARGS TEST_ENV = _test_utils.TEST_ENV def _get_original_name(test_data): return test_data.split('.compressed')[0] class TestBroDecompress(_test_utils.TestCase): def _check_decompression(self, test_data): # Verify decompression matches the original. temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) original = _get_original_name(test_data) self.assertFilesMatch(temp_uncompressed, original) def _decompress_file(self, test_data): temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) args = BRO_ARGS + ['-f', '-d', '-i', test_data, '-o', temp_uncompressed] subprocess.check_call(args, env=TEST_ENV) def _decompress_pipe(self, test_data): temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) args = BRO_ARGS + ['-d'] with open(temp_uncompressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: subprocess.check_call( args, stdin=in_file, stdout=out_file, env=TEST_ENV) def _test_decompress_file(self, test_data): self._decompress_file(test_data) self._check_decompression(test_data) def _test_decompress_pipe(self, test_data): self._decompress_pipe(test_data) self._check_decompression(test_data) _test_utils.generate_test_methods(TestBroDecompress, for_decompression=True) class TestBroCompress(_test_utils.TestCase): VARIANTS = {'quality': (1, 6, 9, 11), 'lgwin': (10, 15, 20, 24)} def _check_decompression(self, test_data, **kwargs): # Write decompression to temp file and verify it matches the original. temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) temp_compressed = _test_utils.get_temp_compressed_name(test_data) original = test_data args = BRO_ARGS + ['-f', '-d'] args.extend(['-i', temp_compressed, '-o', temp_uncompressed]) subprocess.check_call(args, env=TEST_ENV) self.assertFilesMatch(temp_uncompressed, original) def _compress_file(self, test_data, **kwargs): temp_compressed = _test_utils.get_temp_compressed_name(test_data) args = BRO_ARGS + ['-f'] if 'quality' in kwargs: args.extend(['-q', str(kwargs['quality'])]) if 'lgwin' in kwargs: args.extend(['--lgwin', str(kwargs['lgwin'])]) args.extend(['-i', test_data, '-o', temp_compressed]) subprocess.check_call(args, env=TEST_ENV) def _compress_pipe(self, test_data, **kwargs): temp_compressed = _test_utils.get_temp_compressed_name(test_data) args = BRO_ARGS if 'quality' in kwargs: args.extend(['-q', str(kwargs['quality'])]) if 'lgwin' in kwargs: args.extend(['--lgwin', str(kwargs['lgwin'])]) with open(temp_compressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: subprocess.check_call( args, stdin=in_file, stdout=out_file, env=TEST_ENV) def _test_compress_file(self, test_data, **kwargs): self._compress_file(test_data, **kwargs) self._check_decompression(test_data) def _test_compress_pipe(self, test_data, **kwargs): self._compress_pipe(test_data, **kwargs) self._check_decompression(test_data) _test_utils.generate_test_methods( TestBroCompress, variants=TestBroCompress.VARIANTS) if __name__ == '__main__': unittest.main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/bro_test.py
# Copyright 2016 The Brotli Authors. All rights reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT import unittest from . import _test_utils import brotli def _get_original_name(test_data): return test_data.split('.compressed')[0] class TestDecompress(_test_utils.TestCase): def _check_decompression(self, test_data): # Verify decompression matches the original. temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) original = _get_original_name(test_data) self.assertFilesMatch(temp_uncompressed, original) def _decompress(self, test_data): temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) with open(temp_uncompressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: out_file.write(brotli.decompress(in_file.read())) def _test_decompress(self, test_data): self._decompress(test_data) self._check_decompression(test_data) def test_garbage_appended(self): with self.assertRaises(brotli.error): brotli.decompress(brotli.compress(b'a') + b'a') _test_utils.generate_test_methods(TestDecompress, for_decompression=True) if __name__ == '__main__': unittest.main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/decompress_test.py
# Copyright 2016 The Brotli Authors. All rights reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT import functools import unittest from . import _test_utils import brotli def _get_original_name(test_data): return test_data.split('.compressed')[0] class TestDecompressor(_test_utils.TestCase): CHUNK_SIZE = 1 def setUp(self): self.decompressor = brotli.Decompressor() def tearDown(self): self.decompressor = None def _check_decompression(self, test_data): # Verify decompression matches the original. temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) original = _get_original_name(test_data) self.assertFilesMatch(temp_uncompressed, original) def _decompress(self, test_data): temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) with open(temp_uncompressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: read_chunk = functools.partial(in_file.read, self.CHUNK_SIZE) for data in iter(read_chunk, b''): out_file.write(self.decompressor.process(data)) self.assertTrue(self.decompressor.is_finished()) def _test_decompress(self, test_data): self._decompress(test_data) self._check_decompression(test_data) def test_garbage_appended(self): with self.assertRaises(brotli.error): self.decompressor.process(brotli.compress(b'a') + b'a') def test_already_finished(self): self.decompressor.process(brotli.compress(b'a')) with self.assertRaises(brotli.error): self.decompressor.process(b'a') _test_utils.generate_test_methods(TestDecompressor, for_decompression=True) if __name__ == '__main__': unittest.main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/decompressor_test.py
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/__init__.py
from __future__ import print_function import filecmp import glob import itertools import os import sys import sysconfig import tempfile import unittest project_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..')) test_dir = os.getenv("BROTLI_TESTS_PATH") BRO_ARGS = [os.getenv("BROTLI_WRAPPER")] # Fallbacks if test_dir is None: test_dir = os.path.join(project_dir, 'tests') if BRO_ARGS[0] is None: python_exe = sys.executable or 'python' bro_path = os.path.join(project_dir, 'python', 'bro.py') BRO_ARGS = [python_exe, bro_path] # Get the platform/version-specific build folder. # By default, the distutils build base is in the same location as setup.py. platform_lib_name = 'lib.{platform}-{version[0]}.{version[1]}'.format( platform=sysconfig.get_platform(), version=sys.version_info) build_dir = os.path.join(project_dir, 'bin', platform_lib_name) # Prepend the build folder to sys.path and the PYTHONPATH environment variable. if build_dir not in sys.path: sys.path.insert(0, build_dir) TEST_ENV = os.environ.copy() if 'PYTHONPATH' not in TEST_ENV: TEST_ENV['PYTHONPATH'] = build_dir else: TEST_ENV['PYTHONPATH'] = build_dir + os.pathsep + TEST_ENV['PYTHONPATH'] TESTDATA_DIR = os.path.join(test_dir, 'testdata') TESTDATA_FILES = [ 'empty', # Empty file '10x10y', # Small text 'alice29.txt', # Large text 'random_org_10k.bin', # Small data 'mapsdatazrh', # Large data ] TESTDATA_PATHS = [os.path.join(TESTDATA_DIR, f) for f in TESTDATA_FILES] TESTDATA_PATHS_FOR_DECOMPRESSION = glob.glob( os.path.join(TESTDATA_DIR, '*.compressed')) TEMP_DIR = tempfile.mkdtemp() def get_temp_compressed_name(filename): return os.path.join(TEMP_DIR, os.path.basename(filename + '.bro')) def get_temp_uncompressed_name(filename): return os.path.join(TEMP_DIR, os.path.basename(filename + '.unbro')) def bind_method_args(method, *args, **kwargs): return lambda self: method(self, *args, **kwargs) def generate_test_methods(test_case_class, for_decompression=False, variants=None): # Add test methods for each test data file. This makes identifying problems # with specific compression scenarios easier. if for_decompression: paths = TESTDATA_PATHS_FOR_DECOMPRESSION else: paths = TESTDATA_PATHS opts = [] if variants: opts_list = [] for k, v in variants.items(): opts_list.append([r for r in itertools.product([k], v)]) for o in itertools.product(*opts_list): opts_name = '_'.join([str(i) for i in itertools.chain(*o)]) opts_dict = dict(o) opts.append([opts_name, opts_dict]) else: opts.append(['', {}]) for method in [m for m in dir(test_case_class) if m.startswith('_test')]: for testdata in paths: for (opts_name, opts_dict) in opts: f = os.path.splitext(os.path.basename(testdata))[0] name = 'test_{method}_{options}_{file}'.format( method=method, options=opts_name, file=f) func = bind_method_args( getattr(test_case_class, method), testdata, **opts_dict) setattr(test_case_class, name, func) class TestCase(unittest.TestCase): def tearDown(self): for f in TESTDATA_PATHS: try: os.unlink(get_temp_compressed_name(f)) except OSError: pass try: os.unlink(get_temp_uncompressed_name(f)) except OSError: pass def assertFilesMatch(self, first, second): self.assertTrue( filecmp.cmp(first, second, shallow=False), 'File {} differs from {}'.format(first, second))
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/_test_utils.py
# Copyright 2016 The Brotli Authors. All rights reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT import functools import unittest from . import _test_utils import brotli # Do not inherit from TestCase here to ensure that test methods # are not run automatically and instead are run as part of a specific # configuration below. class _TestCompressor(object): CHUNK_SIZE = 2048 def tearDown(self): self.compressor = None def _check_decompression(self, test_data): # Write decompression to temp file and verify it matches the original. temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data) temp_compressed = _test_utils.get_temp_compressed_name(test_data) original = test_data with open(temp_uncompressed, 'wb') as out_file: with open(temp_compressed, 'rb') as in_file: out_file.write(brotli.decompress(in_file.read())) self.assertFilesMatch(temp_uncompressed, original) def _test_single_process(self, test_data): # Write single-shot compression to temp file. temp_compressed = _test_utils.get_temp_compressed_name(test_data) with open(temp_compressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: out_file.write(self.compressor.process(in_file.read())) out_file.write(self.compressor.finish()) self._check_decompression(test_data) def _test_multiple_process(self, test_data): # Write chunked compression to temp file. temp_compressed = _test_utils.get_temp_compressed_name(test_data) with open(temp_compressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: read_chunk = functools.partial(in_file.read, self.CHUNK_SIZE) for data in iter(read_chunk, b''): out_file.write(self.compressor.process(data)) out_file.write(self.compressor.finish()) self._check_decompression(test_data) def _test_multiple_process_and_flush(self, test_data): # Write chunked and flushed compression to temp file. temp_compressed = _test_utils.get_temp_compressed_name(test_data) with open(temp_compressed, 'wb') as out_file: with open(test_data, 'rb') as in_file: read_chunk = functools.partial(in_file.read, self.CHUNK_SIZE) for data in iter(read_chunk, b''): out_file.write(self.compressor.process(data)) out_file.write(self.compressor.flush()) out_file.write(self.compressor.finish()) self._check_decompression(test_data) _test_utils.generate_test_methods(_TestCompressor) class TestCompressorQuality1(_TestCompressor, _test_utils.TestCase): def setUp(self): self.compressor = brotli.Compressor(quality=1) class TestCompressorQuality6(_TestCompressor, _test_utils.TestCase): def setUp(self): self.compressor = brotli.Compressor(quality=6) class TestCompressorQuality9(_TestCompressor, _test_utils.TestCase): def setUp(self): self.compressor = brotli.Compressor(quality=9) class TestCompressorQuality11(_TestCompressor, _test_utils.TestCase): def setUp(self): self.compressor = brotli.Compressor(quality=11) if __name__ == '__main__': unittest.main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/tests/compressor_test.py
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module""" import datetime import errno import os import re import sys from xml.dom import minidom, Node from googletest.test import gtest_test_utils from googletest.test import gtest_xml_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' GTEST_OUTPUT_FLAG = '--gtest_output' GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_' # The flag indicating stacktraces are not supported NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv if SUPPORTS_STACK_TRACES: STACK_TRACE_TEMPLATE = '\nStack trace:\n*' else: STACK_TRACE_TEMPLATE = '' # unittest.main() can't handle unknown flags sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG) EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="26" failures="5" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42"> <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="Succeeds" file="gtest_xml_output_unittest_.cc" line="51" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/> </testsuite> <testsuite name="FailedTest" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="Fails" file="gtest_xml_output_unittest_.cc" line="59" status="run" result="completed" time="*" timestamp="*" classname="FailedTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Expected equality of these values:&#x0A; 1&#x0A; 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Expected equality of these values: 1 2%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="Succeeds" file="gtest_xml_output_unittest_.cc" line="86" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest"/> <testcase name="Fails" file="gtest_xml_output_unittest_.cc" line="91" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Expected equality of these values:&#x0A; 1&#x0A; 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Expected equality of these values: 1 2%(stack)s]]></failure> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Expected equality of these values:&#x0A; 2&#x0A; 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Expected equality of these values: 2 3%(stack)s]]></failure> </testcase> <testcase name="DISABLED_test" file="gtest_xml_output_unittest_.cc" line="96" status="notrun" result="suppressed" time="*" timestamp="*" classname="MixedResultTest"/> </testsuite> <testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="OutputsCData" file="gtest_xml_output_unittest_.cc" line="100" status="run" result="completed" time="*" timestamp="*" classname="XmlQuotingTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Failed&#x0A;XML output: &lt;?xml encoding=&quot;utf-8&quot;&gt;&lt;top&gt;&lt;![CDATA[cdata text]]&gt;&lt;/top&gt;" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Failed XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]&gt;<![CDATA[</top>%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="InvalidCharactersInMessage" file="gtest_xml_output_unittest_.cc" line="107" status="run" result="completed" time="*" timestamp="*" classname="InvalidCharactersTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Failed&#x0A;Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Failed Invalid characters in brackets []%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="DisabledTest" tests="1" failures="0" disabled="1" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="DISABLED_test_not_run" file="gtest_xml_output_unittest_.cc" line="66" status="notrun" result="suppressed" time="*" timestamp="*" classname="DisabledTest"/> </testsuite> <testsuite name="SkippedTest" tests="3" failures="1" disabled="0" skipped="2" errors="0" time="*" timestamp="*"> <testcase name="Skipped" status="run" file="gtest_xml_output_unittest_.cc" line="73" result="skipped" time="*" timestamp="*" classname="SkippedTest"> <skipped message="gtest_xml_output_unittest_.cc:*&#x0A;"><![CDATA[gtest_xml_output_unittest_.cc:* %(stack)s]]></skipped> </testcase> <testcase name="SkippedWithMessage" file="gtest_xml_output_unittest_.cc" line="77" status="run" result="skipped" time="*" timestamp="*" classname="SkippedTest"> <skipped message="gtest_xml_output_unittest_.cc:*&#x0A;It is good practice to tell why you skip a test."><![CDATA[gtest_xml_output_unittest_.cc:* It is good practice to tell why you skip a test.%(stack)s]]></skipped> </testcase> <testcase name="SkippedAfterFailure" file="gtest_xml_output_unittest_.cc" line="81" status="run" result="completed" time="*" timestamp="*" classname="SkippedTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Expected equality of these values:&#x0A; 1&#x0A; 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Expected equality of these values: 1 2%(stack)s]]></failure> <skipped message="gtest_xml_output_unittest_.cc:*&#x0A;It is good practice to tell why you skip a test."><![CDATA[gtest_xml_output_unittest_.cc:* It is good practice to tell why you skip a test.%(stack)s]]></skipped> </testcase> </testsuite> <testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye"> <testcase name="OneProperty" file="gtest_xml_output_unittest_.cc" line="119" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest"> <properties> <property name="key_1" value="1"/> </properties> </testcase> <testcase name="IntValuedProperty" file="gtest_xml_output_unittest_.cc" line="123" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest"> <properties> <property name="key_int" value="1"/> </properties> </testcase> <testcase name="ThreeProperties" file="gtest_xml_output_unittest_.cc" line="127" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest"> <properties> <property name="key_1" value="1"/> <property name="key_2" value="2"/> <property name="key_3" value="3"/> </properties> </testcase> <testcase name="TwoValuesForOneKeyUsesLastValue" file="gtest_xml_output_unittest_.cc" line="133" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest"> <properties> <property name="key_1" value="2"/> </properties> </testcase> </testsuite> <testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="RecordProperty" file="gtest_xml_output_unittest_.cc" line="138" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest"> <properties> <property name="key" value="1"/> </properties> </testcase> <testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" file="gtest_xml_output_unittest_.cc" line="151" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest"> <properties> <property name="key_for_utility_int" value="1"/> </properties> </testcase> <testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" file="gtest_xml_output_unittest_.cc" line="155" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest"> <properties> <property name="key_for_utility_string" value="1"/> </properties> </testcase> </testsuite> <testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="HasValueParamAttribute/0" file="gtest_xml_output_unittest_.cc" line="162" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" /> <testcase name="HasValueParamAttribute/1" file="gtest_xml_output_unittest_.cc" line="162" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" /> <testcase name="AnotherTestThatHasValueParamAttribute/0" file="gtest_xml_output_unittest_.cc" line="163" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" /> <testcase name="AnotherTestThatHasValueParamAttribute/1" file="gtest_xml_output_unittest_.cc" line="163" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" /> </testsuite> <testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="HasTypeParamAttribute" file="gtest_xml_output_unittest_.cc" line="171" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/0" /> </testsuite> <testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="HasTypeParamAttribute" file="gtest_xml_output_unittest_.cc" line="171" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/1" /> </testsuite> <testsuite name="Single/TypeParameterizedTestSuite/0" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="HasTypeParamAttribute" file="gtest_xml_output_unittest_.cc" line="178" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/0" /> </testsuite> <testsuite name="Single/TypeParameterizedTestSuite/1" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="HasTypeParamAttribute" file="gtest_xml_output_unittest_.cc" line="178" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/1" /> </testsuite> </testsuites>""" % { 'stack': STACK_TRACE_TEMPLATE } EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42"> <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="Succeeds" file="gtest_xml_output_unittest_.cc" line="51" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/> </testsuite> </testsuites>""" EXPECTED_SHARDED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42"> <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="Succeeds" file="gtest_xml_output_unittest_.cc" line="51" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/> </testsuite> <testsuite name="PropertyRecordingTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye"> <testcase name="IntValuedProperty" file="gtest_xml_output_unittest_.cc" line="123" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest"> <properties> <property name="key_int" value="1"/> </properties> </testcase> </testsuite> <testsuite name="Single/ValueParamTest" tests="1" failures="0" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="HasValueParamAttribute/0" file="gtest_xml_output_unittest_.cc" line="162" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" /> </testsuite> </testsuites>""" EXPECTED_NO_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="0" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="NonTestSuiteFailure" tests="1" failures="1" disabled="0" skipped="0" errors="0" time="*" timestamp="*"> <testcase name="" status="run" result="completed" time="*" timestamp="*" classname=""> <failure message="gtest_no_test_unittest.cc:*&#x0A;Expected equality of these values:&#x0A; 1&#x0A; 2" type=""><![CDATA[gtest_no_test_unittest.cc:* Expected equality of these values: 1 2%(stack)s]]></failure> </testcase> </testsuite> </testsuites>""" % { 'stack': STACK_TRACE_TEMPLATE } GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME) SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess( [GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase): """ Unit test for Google Test's XML output functionality. """ # This test currently breaks on platforms that do not support typed and # type-parameterized tests, so we don't run it under them. if SUPPORTS_TYPED_TESTS: def testNonEmptyXmlOutput(self): """ Runs a test program that generates a non-empty XML output, and tests that the XML output is expected. """ self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1) def testNoTestXmlOutput(self): """Verifies XML output for a Google Test binary without actual tests. Runs a test program that generates an XML output for a binary without tests, and tests that the XML output is expected. """ self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_NO_TEST_XML, 0) def testTimestampValue(self): """Checks whether the timestamp attribute in the XML output is valid. Runs a test program that generates an empty XML output, and checks if the timestamp attribute in the testsuites tag is valid. """ actual = self._GetXmlOutput('gtest_no_test_unittest', [], {}, 0) date_time_str = actual.documentElement.getAttributeNode('timestamp').value # datetime.strptime() is only available in Python 2.5+ so we have to # parse the expected datetime manually. match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str) self.assertTrue( re.match, 'XML datettime string %s has incorrect format' % date_time_str) date_time_from_xml = datetime.datetime( year=int(match.group(1)), month=int(match.group(2)), day=int(match.group(3)), hour=int(match.group(4)), minute=int(match.group(5)), second=int(match.group(6))) time_delta = abs(datetime.datetime.now() - date_time_from_xml) # timestamp value should be near the current local time self.assertTrue(time_delta < datetime.timedelta(seconds=600), 'time_delta is %s' % time_delta) actual.unlink() def testDefaultOutputFile(self): """ Confirms that Google Test produces an XML output file with the expected default name if no name is explicitly specified. """ output_file = os.path.join(gtest_test_utils.GetTempDir(), GTEST_DEFAULT_OUTPUT_FILE) gtest_prog_path = gtest_test_utils.GetTestExecutablePath( 'gtest_no_test_unittest') try: os.remove(output_file) except OSError: e = sys.exc_info()[1] if e.errno != errno.ENOENT: raise p = gtest_test_utils.Subprocess( [gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG], working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) self.assert_(os.path.isfile(output_file)) def testSuppressedXmlOutput(self): """ Tests that no XML file is generated if the default XML listener is shut down before RUN_ALL_TESTS is invoked. """ xml_path = os.path.join(gtest_test_utils.GetTempDir(), GTEST_PROGRAM_NAME + 'out.xml') if os.path.isfile(xml_path): os.remove(xml_path) command = [GTEST_PROGRAM_PATH, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path), '--shut_down_xml'] p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: # p.signal is available only if p.terminated_by_signal is True. self.assertFalse( p.terminated_by_signal, '%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal)) else: self.assert_(p.exited) self.assertEquals(1, p.exit_code, "'%s' exited with code %s, which doesn't match " 'the expected exit code %s.' % (command, p.exit_code, 1)) self.assert_(not os.path.isfile(xml_path)) def testFilteredTestXmlOutput(self): """Verifies XML output when a filter is applied. Runs a test program that executes only some tests and verifies that non-selected tests do not show up in the XML output. """ self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0, extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG]) def testShardedTestXmlOutput(self): """Verifies XML output when run using multiple shards. Runs a test program that executes only one shard and verifies that tests from other shards do not show up in the XML output. """ self._TestXmlOutput( GTEST_PROGRAM_NAME, EXPECTED_SHARDED_TEST_XML, 0, extra_env={SHARD_INDEX_ENV_VAR: '0', TOTAL_SHARDS_ENV_VAR: '10'}) def _GetXmlOutput(self, gtest_prog_name, extra_args, extra_env, expected_exit_code): """ Returns the xml output generated by running the program gtest_prog_name. Furthermore, the program's exit code must be expected_exit_code. """ xml_path = os.path.join(gtest_test_utils.GetTempDir(), gtest_prog_name + 'out.xml') gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name) command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] + extra_args) environ_copy = os.environ.copy() if extra_env: environ_copy.update(extra_env) p = gtest_test_utils.Subprocess(command, env=environ_copy) if p.terminated_by_signal: self.assert_(False, '%s was killed by signal %d' % (gtest_prog_name, p.signal)) else: self.assert_(p.exited) self.assertEquals(expected_exit_code, p.exit_code, "'%s' exited with code %s, which doesn't match " 'the expected exit code %s.' % (command, p.exit_code, expected_exit_code)) actual = minidom.parse(xml_path) return actual def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code, extra_args=None, extra_env=None): """ Asserts that the XML document generated by running the program gtest_prog_name matches expected_xml, a string containing another XML document. Furthermore, the program's exit code must be expected_exit_code. """ actual = self._GetXmlOutput(gtest_prog_name, extra_args or [], extra_env or {}, expected_exit_code) expected = minidom.parseString(expected_xml) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink() if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '1' gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_xml_output_unittest.py
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. r"""Tests the text output of Google C++ Testing and Mocking Framework. To update the golden file: googletest_output_test.py --build_dir=BUILD/DIR --gengolden where BUILD/DIR contains the built googletest-output-test_ file. googletest_output_test.py --gengolden googletest_output_test.py """ import difflib import os import re import sys from googletest.test import gtest_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' # The flag indicating stacktraces are not supported NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_WINDOWS = os.name == 'nt' GOLDEN_NAME = 'googletest-output-test-golden-lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('googletest-output-test_') # At least one command we exercise must not have the # 'internal_skip_environment_and_ad_hoc_tests' argument. COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, '--gtest_print_time', 'internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) COMMAND_WITH_DISABLED = ( {}, [PROGRAM_PATH, '--gtest_also_run_disabled_tests', 'internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=*DISABLED_*']) COMMAND_WITH_SHARDING = ( {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, [PROGRAM_PATH, 'internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=PassingTest.*']) GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveLocations(test_output): """Removes all file location info from a Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE_NAME:#: '. """ return re.sub(r'.*[/\\]((googletest-output-test_|gtest).cc)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) def RemoveStackTraceDetails(output): """Removes all stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output) def RemoveStackTraces(output): """Removes all traces of stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output) def RemoveTypeInfoDetails(test_output): """Removes compiler-specific type info from Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with type information normalized to canonical form. """ # some compilers output the name of type 'unsigned int' as 'unsigned' return re.sub(r'unsigned int', 'unsigned', test_output) def NormalizeToCurrentPlatform(test_output): """Normalizes platform specific output details for easier comparison.""" if IS_WINDOWS: # Removes the color information that is not present on Windows. test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) # Changes failure message headers into the Windows format. test_output = re.sub(r': Failure\n', r': error: ', test_output) # Changes file(line_number) to file:line_number. test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) return test_output def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output) output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests?\.', '? tests.', output) def RemoveMatchingTests(test_output, pattern): """Removes output of specified tests from a Google Test program's output. This function strips not only the beginning and the end of a test but also all output in between. Args: test_output: A string containing the test output. pattern: A regex string that matches names of test cases or tests to remove. Returns: Contents of test_output with tests whose names match pattern removed. """ test_output = re.sub( r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( pattern, pattern), '', test_output) return re.sub(r'.*%s.*\n' % pattern, '', test_output) def NormalizeOutput(output): """Normalizes output (the output of googletest-output-test_.exe).""" output = ToUnixLineEnding(output) output = RemoveLocations(output) output = RemoveStackTraceDetails(output) output = RemoveTime(output) return output def GetShellCommandOutput(env_cmd): """Runs a command in a sub-process, and returns its output in a string. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. Returns: A string with the command's combined standard and diagnostic output. """ # Spawns cmd in a sub-process, and gets its standard I/O file objects. # Set and save the environment properly. environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output def GetCommandOutput(env_cmd): """Runs a command and returns its output with all file location info stripped off. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. """ # Disables exception pop-ups on Windows. environ, cmdline = env_cmd environ = dict(environ) # Ensures we are modifying a copy. environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) def GetOutputOfAllCommands(): """Returns concatenated output from several representative commands.""" return (GetCommandOutput(COMMAND_WITH_COLOR) + GetCommandOutput(COMMAND_WITH_TIME) + GetCommandOutput(COMMAND_WITH_DISABLED) + GetCommandOutput(COMMAND_WITH_SHARDING)) test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and SUPPORTS_THREADS and SUPPORTS_STACK_TRACES) class GTestOutputTest(gtest_test_utils.TestCase): def RemoveUnsupportedTests(self, test_output): if not SUPPORTS_DEATH_TESTS: test_output = RemoveMatchingTests(test_output, 'DeathTest') if not SUPPORTS_TYPED_TESTS: test_output = RemoveMatchingTests(test_output, 'TypedTest') test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') if not SUPPORTS_THREADS: test_output = RemoveMatchingTests(test_output, 'ExpectFailureWithThreadsTest') test_output = RemoveMatchingTests(test_output, 'ScopedFakeTestPartResultReporterTest') test_output = RemoveMatchingTests(test_output, 'WorksConcurrently') if not SUPPORTS_STACK_TRACES: test_output = RemoveStackTraces(test_output) return test_output def testOutput(self): output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'rb') # A mis-configured source control system can cause \r appear in EOL # sequences when we read the golden file irrespective of an operating # system used. Therefore, we need to strip those \r's from newlines # unconditionally. golden = ToUnixLineEnding(golden_file.read().decode()) golden_file.close() # We want the test to pass regardless of certain features being # supported or not. # We still have to remove type name specifics in all cases. normalized_actual = RemoveTypeInfoDetails(output) normalized_golden = RemoveTypeInfoDetails(golden) if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual, '\n'.join(difflib.unified_diff( normalized_golden.split('\n'), normalized_actual.split('\n'), 'golden', 'actual'))) else: normalized_actual = NormalizeToCurrentPlatform( RemoveTestCounts(normalized_actual)) normalized_golden = NormalizeToCurrentPlatform( RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): open(os.path.join( gtest_test_utils.GetSourceDir(), '_googletest-output-test_normalized_actual.txt'), 'wb').write( normalized_actual) open(os.path.join( gtest_test_utils.GetSourceDir(), '_googletest-output-test_normalized_golden.txt'), 'wb').write( normalized_golden) self.assertEqual(normalized_golden, normalized_actual) if __name__ == '__main__': if NO_STACKTRACE_SUPPORT_FLAG in sys.argv: # unittest.main() can't handle unknown flags sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG) if GENGOLDEN_FLAG in sys.argv: if CAN_GENERATE_GOLDEN_FILE: output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output.encode()) golden_file.close() else: message = ( """Unable to write a golden file when compiled in an environment that does not support all the required features (death tests, typed tests, stack traces, and multiple threads). Please build this test and generate the golden file using Blaze on Linux.""") sys.stderr.write(message) sys.exit(1) else: gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-output-test.py
#!/usr/bin/env python # # Copyright 2015 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test warns the user when not initialized properly.""" from googletest.test import gtest_test_utils binary_name = 'googletest-param-test-invalid-name1-test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) def Assert(condition): if not condition: raise AssertionError def TestExitCodeAndOutput(command): """Runs the given command and verifies its exit code and output.""" err = ('Parameterized test name \'"InvalidWithQuotes"\' is invalid') p = gtest_test_utils.Subprocess(command) Assert(p.terminated_by_signal) # Verify the output message contains appropriate output Assert(err in p.output) class GTestParamTestInvalidName1Test(gtest_test_utils.TestCase): def testExitCodeAndOutput(self): TestExitCodeAndOutput(COMMAND) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-param-test-invalid-name1-test.py
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the --help flag of Google C++ Testing and Mocking Framework. SYNOPSIS gtest_help_test.py --build_dir=BUILD/DIR # where BUILD/DIR contains the built gtest_help_test_ file. gtest_help_test.py """ import os import re import sys from googletest.test import gtest_test_utils IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_GNUHURD = os.name == 'posix' and os.uname()[0] == 'GNU' IS_GNUKFREEBSD = os.name == 'posix' and os.uname()[0] == 'GNU/kFreeBSD' IS_OPENBSD = os.name == 'posix' and os.uname()[0] == 'OpenBSD' IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') FLAG_PREFIX = '--gtest_' DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to' UNKNOWN_GTEST_PREFIXED_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing' SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess( [PROGRAM_PATH, LIST_TESTS_FLAG]).output HAS_ABSL_FLAGS = '--has_absl_flags' in sys.argv # The help message must match this regex. HELP_REGEX = re.compile( FLAG_PREFIX + r'list_tests.*' + FLAG_PREFIX + r'filter=.*' + FLAG_PREFIX + r'also_run_disabled_tests.*' + FLAG_PREFIX + r'repeat=.*' + FLAG_PREFIX + r'shuffle.*' + FLAG_PREFIX + r'random_seed=.*' + FLAG_PREFIX + r'color=.*' + FLAG_PREFIX + r'brief.*' + FLAG_PREFIX + r'print_time.*' + FLAG_PREFIX + r'output=.*' + FLAG_PREFIX + r'break_on_failure.*' + FLAG_PREFIX + r'throw_on_failure.*' + FLAG_PREFIX + r'catch_exceptions=0.*', re.DOTALL) def RunWithFlag(flag): """Runs gtest_help_test_ with the given flag. Returns: the exit code and the text output as a tuple. Args: flag: the command-line flag to pass to gtest_help_test_, or None. """ if flag is None: command = [PROGRAM_PATH] else: command = [PROGRAM_PATH, flag] child = gtest_test_utils.Subprocess(command) return child.exit_code, child.output class GTestHelpTest(gtest_test_utils.TestCase): """Tests the --help flag and its equivalent forms.""" def TestHelpFlag(self, flag): """Verifies correct behavior when help flag is specified. The right message must be printed and the tests must skipped when the given flag is specified. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) if HAS_ABSL_FLAGS: # The Abseil flags library prints the ProgramUsageMessage() with # --help and returns 1. self.assertEqual(1, exit_code) else: self.assertEqual(0, exit_code) self.assertTrue(HELP_REGEX.search(output), output) if IS_LINUX or IS_GNUHURD or IS_GNUKFREEBSD or IS_OPENBSD: self.assertIn(STREAM_RESULT_TO_FLAG, output) else: self.assertNotIn(STREAM_RESULT_TO_FLAG, output) if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: self.assertIn(DEATH_TEST_STYLE_FLAG, output) else: self.assertNotIn(DEATH_TEST_STYLE_FLAG, output) def TestUnknownFlagWithAbseil(self, flag): """Verifies correct behavior when an unknown flag is specified. The right message must be printed and the tests must skipped when the given flag is specified. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assertEqual(1, exit_code) self.assertIn('ERROR: Unknown command line flag', output) def TestNonHelpFlag(self, flag): """Verifies correct behavior when no help flag is specified. Verifies that when no help flag is specified, the tests are run and the help message is not printed. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assertNotEqual(exit_code, 0) self.assertFalse(HELP_REGEX.search(output), output) def testPrintsHelpWithFullFlag(self): self.TestHelpFlag('--help') def testPrintsHelpWithUnrecognizedGoogleTestFlag(self): # The behavior is slightly different when Abseil flags is # used. Abseil flags rejects all unknown flags, while the builtin # GTest flags implementation interprets an unknown flag with a # '--gtest_' prefix as a request for help. if HAS_ABSL_FLAGS: self.TestUnknownFlagWithAbseil(UNKNOWN_GTEST_PREFIXED_FLAG) else: self.TestHelpFlag(UNKNOWN_GTEST_PREFIXED_FLAG) def testRunsTestsWithoutHelpFlag(self): """Verifies that when no help flag is specified, the tests are run and the help message is not printed.""" self.TestNonHelpFlag(None) def testRunsTestsWithGtestInternalFlag(self): """Verifies that the tests are run and no help message is printed when a flag starting with Google Test prefix and 'internal_' is supplied.""" self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING) if __name__ == '__main__': if '--has_absl_flags' in sys.argv: sys.argv.remove('--has_absl_flags') gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_help_test.py
# Copyright 2021 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's global test environment behavior. A user can specify a global test environment via testing::AddGlobalTestEnvironment. Failures in the global environment should result in all unit tests being skipped. This script tests such functionality by invoking googletest-global-environment-unittest_ (a program written with Google Test). """ import re from googletest.test import gtest_test_utils def RunAndReturnOutput(args=None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([ gtest_test_utils.GetTestExecutablePath( 'googletest-global-environment-unittest_') ] + (args or [])).output class GTestGlobalEnvironmentUnitTest(gtest_test_utils.TestCase): """Tests global test environment failures.""" def testEnvironmentSetUpFails(self): """Tests the behavior of not specifying the fail_fast.""" # Run the test. txt = RunAndReturnOutput() # We should see the text of the global environment setup error. self.assertIn('Canned environment setup error', txt) # Our test should have been skipped due to the error, and not treated as a # pass. self.assertIn('[ SKIPPED ] 1 test', txt) self.assertIn('[ PASSED ] 0 tests', txt) # The test case shouldn't have been run. self.assertNotIn('Unexpected call', txt) def testEnvironmentSetUpAndTornDownForEachRepeat(self): """Tests the behavior of test environments and gtest_repeat.""" # When --gtest_recreate_environments_when_repeating is true, the global test # environment should be set up and torn down for each iteration. txt = RunAndReturnOutput([ '--gtest_repeat=2', '--gtest_recreate_environments_when_repeating=true', ]) expected_pattern = ('(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' 'Global test environment set-up.' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' 'Global test environment tear-down' '(.|\n)*' r'Repeating all tests \(iteration 2\)' '(.|\n)*' 'Global test environment set-up.' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' 'Global test environment tear-down' '(.|\n)*') self.assertRegex(txt, expected_pattern) def testEnvironmentSetUpAndTornDownOnce(self): """Tests environment and --gtest_recreate_environments_when_repeating.""" # By default the environment should only be set up and torn down once, at # the start and end of the test respectively. txt = RunAndReturnOutput([ '--gtest_repeat=2', ]) expected_pattern = ('(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' 'Global test environment set-up.' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' r'Repeating all tests \(iteration 2\)' '(.|\n)*' 'SomeTest.DoesFoo' '(.|\n)*' 'Global test environment tear-down' '(.|\n)*') self.assertRegex(txt, expected_pattern) self.assertEqual(len(re.findall('Global test environment set-up', txt)), 1) self.assertEqual( len(re.findall('Global test environment tear-down', txt)), 1) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-global-environment-unittest.py
# Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for Google C++ Testing and Mocking Framework.""" # Suppresses the 'Import not at the top of the file' lint complaint. # pylint: disable-msg=C6204 import os import subprocess import sys IS_WINDOWS = os.name == 'nt' IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0] IS_OS2 = os.name == 'os2' import atexit import shutil import tempfile import unittest as _test_module # pylint: enable-msg=C6204 GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT' # The environment variable for specifying the path to the premature-exit file. PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE' environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets/unsets an environment variable to a given value.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] # Here we expose a class from a particular module, depending on the # environment. The comment suppresses the 'Invalid variable name' lint # complaint. TestCase = _test_module.TestCase # pylint: disable=C6409 # Initially maps a flag to its default value. After # _ParseAndStripGTestFlags() is called, maps a flag to its actual value. _flag_map = {'source_dir': os.path.dirname(sys.argv[0]), 'build_dir': os.path.dirname(sys.argv[0])} _gtest_flags_are_parsed = False def _ParseAndStripGTestFlags(argv): """Parses and strips Google Test flags from argv. This is idempotent.""" # Suppresses the lint complaint about a global variable since we need it # here to maintain module-wide state. global _gtest_flags_are_parsed # pylint: disable=W0603 if _gtest_flags_are_parsed: return _gtest_flags_are_parsed = True for flag in _flag_map: # The environment variable overrides the default value. if flag.upper() in os.environ: _flag_map[flag] = os.environ[flag.upper()] # The command line flag overrides the environment variable. i = 1 # Skips the program name. while i < len(argv): prefix = '--' + flag + '=' if argv[i].startswith(prefix): _flag_map[flag] = argv[i][len(prefix):] del argv[i] break else: # We don't increment i in case we just found a --gtest_* flag # and removed it from argv. i += 1 def GetFlag(flag): """Returns the value of the given flag.""" # In case GetFlag() is called before Main(), we always call # _ParseAndStripGTestFlags() here to make sure the --gtest_* flags # are parsed. _ParseAndStripGTestFlags(sys.argv) return _flag_map[flag] def GetSourceDir(): """Returns the absolute path of the directory where the .py files are.""" return os.path.abspath(GetFlag('source_dir')) def GetBuildDir(): """Returns the absolute path of the directory where the test binaries are.""" return os.path.abspath(GetFlag('build_dir')) _temp_dir = None def _RemoveTempDir(): if _temp_dir: shutil.rmtree(_temp_dir, ignore_errors=True) atexit.register(_RemoveTempDir) def GetTempDir(): global _temp_dir if not _temp_dir: _temp_dir = tempfile.mkdtemp() return _temp_dir def GetTestExecutablePath(executable_name, build_dir=None): """Returns the absolute path of the test binary given its name. The function will print a message and abort the program if the resulting file doesn't exist. Args: executable_name: name of the test binary that the test script runs. build_dir: directory where to look for executables, by default the result of GetBuildDir(). Returns: The absolute path of the test binary. """ path = os.path.abspath(os.path.join(build_dir or GetBuildDir(), executable_name)) if (IS_WINDOWS or IS_CYGWIN or IS_OS2) and not path.endswith('.exe'): path += '.exe' if not os.path.exists(path): message = ( 'Unable to find the test binary "%s". Please make sure to provide\n' 'a path to the binary via the --build_dir flag or the BUILD_DIR\n' 'environment variable.' % path) print(message, file=sys.stderr) sys.exit(1) return path def GetExitStatus(exit_code): """Returns the argument to exit(), or -1 if exit() wasn't called. Args: exit_code: the result value of os.system(command). """ if os.name == 'nt': # On Windows, os.WEXITSTATUS() doesn't work and os.system() returns # the argument to exit() directly. return exit_code else: # On Unix, os.WEXITSTATUS() must be used to extract the exit status # from the result of os.system(). if os.WIFEXITED(exit_code): return os.WEXITSTATUS(exit_code) else: return -1 class Subprocess: def __init__(self, command, working_dir=None, capture_stderr=True, env=None): """Changes into a specified directory, if provided, and executes a command. Restores the old directory afterwards. Args: command: The command to run, in the form of sys.argv. working_dir: The directory to change into. capture_stderr: Determines whether to capture stderr in the output member or to discard it. env: Dictionary with environment to pass to the subprocess. Returns: An object that represents outcome of the executed process. It has the following attributes: terminated_by_signal True if and only if the child process has been terminated by a signal. exited True if and only if the child process exited normally. exit_code The code with which the child process exited. output Child process's stdout and stderr output combined in a string. """ if capture_stderr: stderr = subprocess.STDOUT else: stderr = subprocess.PIPE p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=stderr, cwd=working_dir, universal_newlines=True, env=env) # communicate returns a tuple with the file object for the child's # output. self.output = p.communicate()[0] self._return_code = p.returncode if bool(self._return_code & 0x80000000): self.terminated_by_signal = True self.exited = False else: self.terminated_by_signal = False self.exited = True self.exit_code = self._return_code def Main(): """Runs the unit test.""" # We must call _ParseAndStripGTestFlags() before calling # unittest.main(). Otherwise the latter will be confused by the # --gtest_* flags. _ParseAndStripGTestFlags(sys.argv) # The tested binaries should not be writing XML output files unless the # script explicitly instructs them to. if GTEST_OUTPUT_VAR_NAME in os.environ: del os.environ[GTEST_OUTPUT_VAR_NAME] _test_module.main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_test_utils.py
# Copyright 2018, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_json_output.""" import re def normalize(obj): """Normalize output object. Args: obj: Google Test's JSON output object to normalize. Returns: Normalized output without any references to transient information that may change from run to run. """ def _normalize(key, value): if key == 'time': return re.sub(r'^\d+(\.\d+)?s$', '*', value) elif key == 'timestamp': return re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ$', '*', value) elif key == 'failure': value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value) return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value) elif key == 'file': return re.sub(r'^.*[/\\](.*)', '\\1', value) else: return normalize(value) if isinstance(obj, dict): return {k: _normalize(k, v) for k, v in obj.items()} if isinstance(obj, list): return [normalize(x) for x in obj] else: return obj
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_json_test_utils.py
#!/usr/bin/env python # # Copyright 2015 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test warns the user when not initialized properly.""" from googletest.test import gtest_test_utils binary_name = 'googletest-param-test-invalid-name2-test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) def Assert(condition): if not condition: raise AssertionError def TestExitCodeAndOutput(command): """Runs the given command and verifies its exit code and output.""" err = ('Duplicate parameterized test name \'a\'') p = gtest_test_utils.Subprocess(command) Assert(p.terminated_by_signal) # Check for appropriate output Assert(err in p.output) class GTestParamTestInvalidName2Test(gtest_test_utils.TestCase): def testExitCodeAndOutput(self): TestExitCodeAndOutput(COMMAND) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-param-test-invalid-name2-test.py
#!/usr/bin/env python # Copyright 2018, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_json_output module.""" import datetime import errno import json import os import re import sys from googletest.test import gtest_json_test_utils from googletest.test import gtest_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' GTEST_OUTPUT_FLAG = '--gtest_output' GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.json' GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_' # The flag indicating stacktraces are not supported NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support' SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv if SUPPORTS_STACK_TRACES: STACK_TRACE_TEMPLATE = '\nStack trace:\n*' else: STACK_TRACE_TEMPLATE = '' EXPECTED_NON_EMPTY = { u'tests': 26, u'failures': 5, u'disabled': 2, u'errors': 0, u'timestamp': u'*', u'time': u'*', u'ad_hoc_property': u'42', u'name': u'AllTests', u'testsuites': [{ u'name': u'SuccessfulTest', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'Succeeds', u'file': u'gtest_xml_output_unittest_.cc', u'line': 51, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'SuccessfulTest' }] }, { u'name': u'FailedTest', u'tests': 1, u'failures': 1, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'Fails', u'file': u'gtest_xml_output_unittest_.cc', u'line': 59, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'FailedTest', u'failures': [{ u'failure': u'gtest_xml_output_unittest_.cc:*\n' u'Expected equality of these values:\n' u' 1\n 2' + STACK_TRACE_TEMPLATE, u'type': u'' }] }] }, { u'name': u'DisabledTest', u'tests': 1, u'failures': 0, u'disabled': 1, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'DISABLED_test_not_run', u'file': u'gtest_xml_output_unittest_.cc', u'line': 66, u'status': u'NOTRUN', u'result': u'SUPPRESSED', u'time': u'*', u'timestamp': u'*', u'classname': u'DisabledTest' }] }, { u'name': u'SkippedTest', u'tests': 3, u'failures': 1, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'Skipped', u'file': 'gtest_xml_output_unittest_.cc', u'line': 73, u'status': u'RUN', u'result': u'SKIPPED', u'time': u'*', u'timestamp': u'*', u'classname': u'SkippedTest' }, { u'name': u'SkippedWithMessage', u'file': 'gtest_xml_output_unittest_.cc', u'line': 77, u'status': u'RUN', u'result': u'SKIPPED', u'time': u'*', u'timestamp': u'*', u'classname': u'SkippedTest' }, { u'name': u'SkippedAfterFailure', u'file': 'gtest_xml_output_unittest_.cc', u'line': 81, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'SkippedTest', u'failures': [{ u'failure': u'gtest_xml_output_unittest_.cc:*\n' u'Expected equality of these values:\n' u' 1\n 2' + STACK_TRACE_TEMPLATE, u'type': u'' }] }] }, { u'name': u'MixedResultTest', u'tests': 3, u'failures': 1, u'disabled': 1, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'Succeeds', u'file': 'gtest_xml_output_unittest_.cc', u'line': 86, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'MixedResultTest' }, { u'name': u'Fails', u'file': u'gtest_xml_output_unittest_.cc', u'line': 91, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'MixedResultTest', u'failures': [{ u'failure': u'gtest_xml_output_unittest_.cc:*\n' u'Expected equality of these values:\n' u' 1\n 2' + STACK_TRACE_TEMPLATE, u'type': u'' }, { u'failure': u'gtest_xml_output_unittest_.cc:*\n' u'Expected equality of these values:\n' u' 2\n 3' + STACK_TRACE_TEMPLATE, u'type': u'' }] }, { u'name': u'DISABLED_test', u'file': u'gtest_xml_output_unittest_.cc', u'line': 96, u'status': u'NOTRUN', u'result': u'SUPPRESSED', u'time': u'*', u'timestamp': u'*', u'classname': u'MixedResultTest' }] }, { u'name': u'XmlQuotingTest', u'tests': 1, u'failures': 1, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'OutputsCData', u'file': u'gtest_xml_output_unittest_.cc', u'line': 100, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'XmlQuotingTest', u'failures': [{ u'failure': u'gtest_xml_output_unittest_.cc:*\n' u'Failed\nXML output: <?xml encoding="utf-8">' u'<top><![CDATA[cdata text]]></top>' + STACK_TRACE_TEMPLATE, u'type': u'' }] }] }, { u'name': u'InvalidCharactersTest', u'tests': 1, u'failures': 1, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'InvalidCharactersInMessage', u'file': u'gtest_xml_output_unittest_.cc', u'line': 107, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'InvalidCharactersTest', u'failures': [{ u'failure': u'gtest_xml_output_unittest_.cc:*\n' u'Failed\nInvalid characters in brackets' u' [\x01\x02]' + STACK_TRACE_TEMPLATE, u'type': u'' }] }] }, { u'name': u'PropertyRecordingTest', u'tests': 4, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'SetUpTestSuite': u'yes', u'TearDownTestSuite': u'aye', u'testsuite': [{ u'name': u'OneProperty', u'file': u'gtest_xml_output_unittest_.cc', u'line': 119, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyRecordingTest', u'key_1': u'1' }, { u'name': u'IntValuedProperty', u'file': u'gtest_xml_output_unittest_.cc', u'line': 123, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyRecordingTest', u'key_int': u'1' }, { u'name': u'ThreeProperties', u'file': u'gtest_xml_output_unittest_.cc', u'line': 127, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyRecordingTest', u'key_1': u'1', u'key_2': u'2', u'key_3': u'3' }, { u'name': u'TwoValuesForOneKeyUsesLastValue', u'file': u'gtest_xml_output_unittest_.cc', u'line': 133, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyRecordingTest', u'key_1': u'2' }] }, { u'name': u'NoFixtureTest', u'tests': 3, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'RecordProperty', u'file': u'gtest_xml_output_unittest_.cc', u'line': 138, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'NoFixtureTest', u'key': u'1' }, { u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty', u'file': u'gtest_xml_output_unittest_.cc', u'line': 151, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'NoFixtureTest', u'key_for_utility_int': u'1' }, { u'name': u'ExternalUtilityThatCallsRecordStringValuedProperty', u'file': u'gtest_xml_output_unittest_.cc', u'line': 155, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'NoFixtureTest', u'key_for_utility_string': u'1' }] }, { u'name': u'TypedTest/0', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'int', u'file': u'gtest_xml_output_unittest_.cc', u'line': 171, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'TypedTest/0' }] }, { u'name': u'TypedTest/1', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'long', u'file': u'gtest_xml_output_unittest_.cc', u'line': 171, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'TypedTest/1' }] }, { u'name': u'Single/TypeParameterizedTestSuite/0', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'int', u'file': u'gtest_xml_output_unittest_.cc', u'line': 178, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'Single/TypeParameterizedTestSuite/0' }] }, { u'name': u'Single/TypeParameterizedTestSuite/1', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'HasTypeParamAttribute', u'type_param': u'long', u'file': u'gtest_xml_output_unittest_.cc', u'line': 178, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'Single/TypeParameterizedTestSuite/1' }] }, { u'name': u'Single/ValueParamTest', u'tests': 4, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'HasValueParamAttribute/0', u'value_param': u'33', u'file': u'gtest_xml_output_unittest_.cc', u'line': 162, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'Single/ValueParamTest' }, { u'name': u'HasValueParamAttribute/1', u'value_param': u'42', u'file': u'gtest_xml_output_unittest_.cc', u'line': 162, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'Single/ValueParamTest' }, { u'name': u'AnotherTestThatHasValueParamAttribute/0', u'value_param': u'33', u'file': u'gtest_xml_output_unittest_.cc', u'line': 163, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'Single/ValueParamTest' }, { u'name': u'AnotherTestThatHasValueParamAttribute/1', u'value_param': u'42', u'file': u'gtest_xml_output_unittest_.cc', u'line': 163, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'Single/ValueParamTest' }] }] } EXPECTED_FILTERED = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'ad_hoc_property': u'42', u'testsuites': [{ u'name': u'SuccessfulTest', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'Succeeds', u'file': u'gtest_xml_output_unittest_.cc', u'line': 51, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'SuccessfulTest', }] }], } EXPECTED_NO_TEST = { u'tests': 0, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'NonTestSuiteFailure', u'tests': 1, u'failures': 1, u'disabled': 0, u'skipped': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'', u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'', u'failures': [{ u'failure': u'gtest_no_test_unittest.cc:*\n' u'Expected equality of these values:\n' u' 1\n 2' + STACK_TRACE_TEMPLATE, u'type': u'', }] }] }], } GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME) SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess( [GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output class GTestJsonOutputUnitTest(gtest_test_utils.TestCase): """Unit test for Google Test's JSON output functionality. """ # This test currently breaks on platforms that do not support typed and # type-parameterized tests, so we don't run it under them. if SUPPORTS_TYPED_TESTS: def testNonEmptyJsonOutput(self): """Verifies JSON output for a Google Test binary with non-empty output. Runs a test program that generates a non-empty JSON output, and tests that the JSON output is expected. """ self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY, 1) def testNoTestJsonOutput(self): """Verifies JSON output for a Google Test binary without actual tests. Runs a test program that generates an JSON output for a binary with no tests, and tests that the JSON output is expected. """ self._TestJsonOutput('gtest_no_test_unittest', EXPECTED_NO_TEST, 0) def testTimestampValue(self): """Checks whether the timestamp attribute in the JSON output is valid. Runs a test program that generates an empty JSON output, and checks if the timestamp attribute in the testsuites tag is valid. """ actual = self._GetJsonOutput('gtest_no_test_unittest', [], 0) date_time_str = actual['timestamp'] # datetime.strptime() is only available in Python 2.5+ so we have to # parse the expected datetime manually. match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str) self.assertTrue( re.match, 'JSON datettime string %s has incorrect format' % date_time_str) date_time_from_json = datetime.datetime( year=int(match.group(1)), month=int(match.group(2)), day=int(match.group(3)), hour=int(match.group(4)), minute=int(match.group(5)), second=int(match.group(6))) time_delta = abs(datetime.datetime.now() - date_time_from_json) # timestamp value should be near the current local time self.assertTrue(time_delta < datetime.timedelta(seconds=600), 'time_delta is %s' % time_delta) def testDefaultOutputFile(self): """Verifies the default output file name. Confirms that Google Test produces an JSON output file with the expected default name if no name is explicitly specified. """ output_file = os.path.join(gtest_test_utils.GetTempDir(), GTEST_DEFAULT_OUTPUT_FILE) gtest_prog_path = gtest_test_utils.GetTestExecutablePath( 'gtest_no_test_unittest') try: os.remove(output_file) except OSError: e = sys.exc_info()[1] if e.errno != errno.ENOENT: raise p = gtest_test_utils.Subprocess( [gtest_prog_path, '%s=json' % GTEST_OUTPUT_FLAG], working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) self.assert_(os.path.isfile(output_file)) def testSuppressedJsonOutput(self): """Verifies that no JSON output is generated. Tests that no JSON file is generated if the default JSON listener is shut down before RUN_ALL_TESTS is invoked. """ json_path = os.path.join(gtest_test_utils.GetTempDir(), GTEST_PROGRAM_NAME + 'out.json') if os.path.isfile(json_path): os.remove(json_path) command = [GTEST_PROGRAM_PATH, '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path), '--shut_down_xml'] p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: # p.signal is available only if p.terminated_by_signal is True. self.assertFalse( p.terminated_by_signal, '%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal)) else: self.assert_(p.exited) self.assertEquals(1, p.exit_code, "'%s' exited with code %s, which doesn't match " 'the expected exit code %s.' % (command, p.exit_code, 1)) self.assert_(not os.path.isfile(json_path)) def testFilteredTestJsonOutput(self): """Verifies JSON output when a filter is applied. Runs a test program that executes only some tests and verifies that non-selected tests do not show up in the JSON output. """ self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED, 0, extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG]) def _GetJsonOutput(self, gtest_prog_name, extra_args, expected_exit_code): """Returns the JSON output generated by running the program gtest_prog_name. Furthermore, the program's exit code must be expected_exit_code. Args: gtest_prog_name: Google Test binary name. extra_args: extra arguments to binary invocation. expected_exit_code: program's exit code. """ json_path = os.path.join(gtest_test_utils.GetTempDir(), gtest_prog_name + 'out.json') gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name) command = ( [gtest_prog_path, '%s=json:%s' % (GTEST_OUTPUT_FLAG, json_path)] + extra_args ) p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: self.assert_(False, '%s was killed by signal %d' % (gtest_prog_name, p.signal)) else: self.assert_(p.exited) self.assertEquals(expected_exit_code, p.exit_code, "'%s' exited with code %s, which doesn't match " 'the expected exit code %s.' % (command, p.exit_code, expected_exit_code)) with open(json_path) as f: actual = json.load(f) return actual def _TestJsonOutput(self, gtest_prog_name, expected, expected_exit_code, extra_args=None): """Checks the JSON output generated by the Google Test binary. Asserts that the JSON document generated by running the program gtest_prog_name matches expected_json, a string containing another JSON document. Furthermore, the program's exit code must be expected_exit_code. Args: gtest_prog_name: Google Test binary name. expected: expected output. expected_exit_code: program's exit code. extra_args: extra arguments to binary invocation. """ actual = self._GetJsonOutput(gtest_prog_name, extra_args or [], expected_exit_code) self.assertEqual(expected, gtest_json_test_utils.normalize(actual)) if __name__ == '__main__': if NO_STACKTRACE_SUPPORT_FLAG in sys.argv: # unittest.main() can't handle unknown flags sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG) os.environ['GTEST_STACK_TRACE_DEPTH'] = '1' gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-json-output-unittest.py
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's break-on-failure mode. A user can ask Google Test to seg-fault when an assertion fails, using either the GTEST_BREAK_ON_FAILURE environment variable or the --gtest_break_on_failure flag. This script tests such functionality by invoking googletest-break-on-failure-unittest_ (a program written with Google Test) with different environments and command line flags. """ import os from googletest.test import gtest_test_utils # Constants. IS_WINDOWS = os.name == 'nt' # The environment variable for enabling/disabling the break-on-failure mode. BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE' # The command line flag for enabling/disabling the break-on-failure mode. BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure' # The environment variable for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE' # The environment variable for enabling/disabling the catch-exceptions mode. CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS' # Path to the googletest-break-on-failure-unittest_ program. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'googletest-break-on-failure-unittest_') environ = gtest_test_utils.environ SetEnvVar = gtest_test_utils.SetEnvVar # Tests in this file run a Google-Test-based test program and expect it # to terminate prematurely. Therefore they are incompatible with # the premature-exit-file protocol by design. Unset the # premature-exit filepath to prevent Google Test from creating # the file. SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None) def Run(command): """Runs a command; returns 1 if it was killed by a signal, or 0 otherwise.""" p = gtest_test_utils.Subprocess(command, env=environ) if p.terminated_by_signal: return 1 else: return 0 # The tests. class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase): """Tests using the GTEST_BREAK_ON_FAILURE environment variable or the --gtest_break_on_failure flag to turn assertion failures into segmentation faults. """ def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault): """Runs googletest-break-on-failure-unittest_ and verifies that it does (or does not) have a seg-fault. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. expect_seg_fault: 1 if the program is expected to generate a seg-fault; 0 otherwise. """ SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % BREAK_ON_FAILURE_FLAG else: flag = '--%s' % BREAK_ON_FAILURE_FLAG command = [EXE_PATH] if flag: command.append(flag) if expect_seg_fault: should_or_not = 'should' else: should_or_not = 'should not' has_seg_fault = Run(command) SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' % (BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(has_seg_fault == expect_seg_fault, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, expect_seg_fault=0) def testEnvVar(self): """Tests using the GTEST_BREAK_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, expect_seg_fault=0) self.RunAndVerify(env_var_value='1', flag_value=None, expect_seg_fault=1) def testFlag(self): """Tests using the --gtest_break_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1) def testFlagOverridesEnvVar(self): """Tests that the flag overrides the environment variable.""" self.RunAndVerify(env_var_value='0', flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value='0', flag_value='1', expect_seg_fault=1) self.RunAndVerify(env_var_value='1', flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1) def testBreakOnFailureOverridesThrowOnFailure(self): """Tests that gtest_break_on_failure overrides gtest_throw_on_failure.""" SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1') try: self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1) finally: SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None) if IS_WINDOWS: def testCatchExceptionsDoesNotInterfere(self): """Tests that gtest_catch_exceptions doesn't interfere.""" SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1') try: self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1) finally: SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-break-on-failure-unittest.py
#!/usr/bin/env python # # Copyright 2019, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that SetUpTestSuite and TearDownTestSuite errors are noticed.""" from googletest.test import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath( 'googletest-setuptestsuite-test_') class GTestSetUpTestSuiteTest(gtest_test_utils.TestCase): def testSetupErrorAndTearDownError(self): p = gtest_test_utils.Subprocess(COMMAND) self.assertNotEqual(p.exit_code, 0, msg=p.output) self.assertIn( '[ FAILED ] SetupFailTest: SetUpTestSuite or TearDownTestSuite\n' '[ FAILED ] TearDownFailTest: SetUpTestSuite or TearDownTestSuite\n' '\n' ' 2 FAILED TEST SUITES\n', p.output) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-setuptestsuite-test.py
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's --gtest_list_tests flag. A user can ask Google Test to list all tests by specifying the --gtest_list_tests flag. This script tests such functionality by invoking googletest-list-tests-unittest_ (a program written with Google Test) the command line flags. """ import re from googletest.test import gtest_test_utils # Constants. # The command line flag for enabling/disabling listing all tests. LIST_TESTS_FLAG = 'gtest_list_tests' # Path to the googletest-list-tests-unittest_ program. EXE_PATH = gtest_test_utils.GetTestExecutablePath('googletest-list-tests-unittest_') # The expected output when running googletest-list-tests-unittest_ with # --gtest_list_tests EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\. Test1 Foo\. Bar1 Bar2 DISABLED_Bar3 Abc\. Xyz Def FooBar\. Baz FooTest\. Test1 DISABLED_Test2 Test3 TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. TestA TestB TypedTest/1\. # TypeParam = int\s*\*( __ptr64)? TestA TestB TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42> TestA TestB My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. TestA TestB My/TypeParamTest/1\. # TypeParam = int\s*\*( __ptr64)? TestA TestB My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42> TestA TestB MyInstantiation/ValueParamTest\. TestA/0 # GetParam\(\) = one line TestA/1 # GetParam\(\) = two\\nlines TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\. TestB/0 # GetParam\(\) = one line TestB/1 # GetParam\(\) = two\\nlines TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\. """) # The expected output when running googletest-list-tests-unittest_ with # --gtest_list_tests and --gtest_filter=Foo*. EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\. Test1 Foo\. Bar1 Bar2 DISABLED_Bar3 FooBar\. Baz FooTest\. Test1 DISABLED_Test2 Test3 """) # Utilities. def Run(args): """Runs googletest-list-tests-unittest_ and returns the list of tests printed.""" return gtest_test_utils.Subprocess([EXE_PATH] + args, capture_stderr=False).output # The unit test. class GTestListTestsUnitTest(gtest_test_utils.TestCase): """Tests using the --gtest_list_tests flag to list all tests.""" def RunAndVerify(self, flag_value, expected_output_re, other_flag): """Runs googletest-list-tests-unittest_ and verifies that it prints the correct tests. Args: flag_value: value of the --gtest_list_tests flag; None if the flag should not be present. expected_output_re: regular expression that matches the expected output after running command; other_flag: a different flag to be passed to command along with gtest_list_tests; None if the flag should not be present. """ if flag_value is None: flag = '' flag_expression = 'not set' elif flag_value == '0': flag = '--%s=0' % LIST_TESTS_FLAG flag_expression = '0' else: flag = '--%s' % LIST_TESTS_FLAG flag_expression = '1' args = [flag] if other_flag is not None: args += [other_flag] output = Run(args) if expected_output_re: self.assert_( expected_output_re.match(output), ('when %s is %s, the output of "%s" is "%s",\n' 'which does not match regex "%s"' % (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output, expected_output_re.pattern))) else: self.assert_( not EXPECTED_OUTPUT_NO_FILTER_RE.match(output), ('when %s is %s, the output of "%s" is "%s"'% (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(flag_value=None, expected_output_re=None, other_flag=None) def testFlag(self): """Tests using the --gtest_list_tests flag.""" self.RunAndVerify(flag_value='0', expected_output_re=None, other_flag=None) self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, other_flag=None) def testOverrideNonFilterFlags(self): """Tests that --gtest_list_tests overrides the non-filter flags.""" self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, other_flag='--gtest_break_on_failure') def testWithFilterFlags(self): """Tests that --gtest_list_tests takes into account the --gtest_filter flag.""" self.RunAndVerify(flag_value='1', expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE, other_flag='--gtest_filter=Foo*') if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-list-tests-unittest.py
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" import os from googletest.test import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print('Expected: %s' % (expected,)) print(' Actual: %s' % (actual,)) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs googletest-env-var-test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') SetEnvVar('TESTBRIDGE_TEST_RUNNER_FAIL_FAST', None) # For 'fail_fast' test TestFlag('fail_fast', '1', '0') TestFlag('filter', 'FooTest.Bar', '*') SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('brief', '1', '0') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') def testXmlOutputFile(self): """Tests that $XML_OUTPUT_FILE affects the output flag.""" SetEnvVar('GTEST_OUTPUT', None) SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml') AssertEq('xml:tmp/bar.xml', GetFlag('output')) def testXmlOutputFileOverride(self): """Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT.""" SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml') SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml') AssertEq('xml:tmp/foo.xml', GetFlag('output')) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-env-var-test.py
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module.""" import os from xml.dom import minidom, Node from googletest.test import gtest_test_utils from googletest.test import gtest_xml_test_utils GTEST_OUTPUT_SUBDIR = "xml_outfiles" GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_" EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="PropertyOne" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*"> <testcase name="TestSomeProperties" file="gtest_xml_outfile1_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne"> <properties> <property name="SetUpProp" value="1"/> <property name="TestSomeProperty" value="1"/> <property name="TearDownProp" value="1"/> </properties> </testcase> </testsuite> </testsuites> """ EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> <testsuite name="PropertyTwo" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*"> <testcase name="TestSomeProperties" file="gtest_xml_outfile2_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo"> <properties> <property name="SetUpProp" value="2"/> <property name="TestSomeProperty" value="2"/> <property name="TearDownProp" value="2"/> </properties> </testcase> </testsuite> </testsuites> """ class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase): """Unit test for Google Test's XML output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, "") self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml")) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml")) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1) def testOutfile2(self): self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2) def _TestOutFile(self, test_name, expected_xml): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) output_file_name1 = test_name + ".xml" output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) expected = minidom.parseString(expected_xml) if os.path.isfile(output_file1): actual = minidom.parse(output_file1) else: actual = minidom.parse(output_file2) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink() if __name__ == "__main__": os.environ["GTEST_STACK_TRACE_DEPTH"] = "0" gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_xml_outfiles_test.py
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's --gtest_list_tests flag. A user can ask Google Test to list all tests by specifying the --gtest_list_tests flag. If output is requested, via --gtest_output=xml or --gtest_output=json, the tests are listed, with extra information in the output file. This script tests such functionality by invoking gtest_list_output_unittest_ (a program written with Google Test) the command line flags. """ import os import re from googletest.test import gtest_test_utils GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' GTEST_OUTPUT_FLAG = '--gtest_output' EXPECTED_XML = """<\?xml version="1.0" encoding="UTF-8"\?> <testsuites tests="16" name="AllTests"> <testsuite name="FooTest" tests="2"> <testcase name="Test1" file=".*gtest_list_output_unittest_.cc" line="43" /> <testcase name="Test2" file=".*gtest_list_output_unittest_.cc" line="45" /> </testsuite> <testsuite name="FooTestFixture" tests="2"> <testcase name="Test3" file=".*gtest_list_output_unittest_.cc" line="48" /> <testcase name="Test4" file=".*gtest_list_output_unittest_.cc" line="49" /> </testsuite> <testsuite name="TypedTest/0" tests="2"> <testcase name="Test7" type_param="int" file=".*gtest_list_output_unittest_.cc" line="60" /> <testcase name="Test8" type_param="int" file=".*gtest_list_output_unittest_.cc" line="61" /> </testsuite> <testsuite name="TypedTest/1" tests="2"> <testcase name="Test7" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="60" /> <testcase name="Test8" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="61" /> </testsuite> <testsuite name="Single/TypeParameterizedTestSuite/0" tests="2"> <testcase name="Test9" type_param="int" file=".*gtest_list_output_unittest_.cc" line="66" /> <testcase name="Test10" type_param="int" file=".*gtest_list_output_unittest_.cc" line="67" /> </testsuite> <testsuite name="Single/TypeParameterizedTestSuite/1" tests="2"> <testcase name="Test9" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="66" /> <testcase name="Test10" type_param="bool" file=".*gtest_list_output_unittest_.cc" line="67" /> </testsuite> <testsuite name="ValueParam/ValueParamTest" tests="4"> <testcase name="Test5/0" value_param="33" file=".*gtest_list_output_unittest_.cc" line="52" /> <testcase name="Test5/1" value_param="42" file=".*gtest_list_output_unittest_.cc" line="52" /> <testcase name="Test6/0" value_param="33" file=".*gtest_list_output_unittest_.cc" line="53" /> <testcase name="Test6/1" value_param="42" file=".*gtest_list_output_unittest_.cc" line="53" /> </testsuite> </testsuites> """ EXPECTED_JSON = """{ "tests": 16, "name": "AllTests", "testsuites": \[ { "name": "FooTest", "tests": 2, "testsuite": \[ { "name": "Test1", "file": ".*gtest_list_output_unittest_.cc", "line": 43 }, { "name": "Test2", "file": ".*gtest_list_output_unittest_.cc", "line": 45 } \] }, { "name": "FooTestFixture", "tests": 2, "testsuite": \[ { "name": "Test3", "file": ".*gtest_list_output_unittest_.cc", "line": 48 }, { "name": "Test4", "file": ".*gtest_list_output_unittest_.cc", "line": 49 } \] }, { "name": "TypedTest\\\\/0", "tests": 2, "testsuite": \[ { "name": "Test7", "type_param": "int", "file": ".*gtest_list_output_unittest_.cc", "line": 60 }, { "name": "Test8", "type_param": "int", "file": ".*gtest_list_output_unittest_.cc", "line": 61 } \] }, { "name": "TypedTest\\\\/1", "tests": 2, "testsuite": \[ { "name": "Test7", "type_param": "bool", "file": ".*gtest_list_output_unittest_.cc", "line": 60 }, { "name": "Test8", "type_param": "bool", "file": ".*gtest_list_output_unittest_.cc", "line": 61 } \] }, { "name": "Single\\\\/TypeParameterizedTestSuite\\\\/0", "tests": 2, "testsuite": \[ { "name": "Test9", "type_param": "int", "file": ".*gtest_list_output_unittest_.cc", "line": 66 }, { "name": "Test10", "type_param": "int", "file": ".*gtest_list_output_unittest_.cc", "line": 67 } \] }, { "name": "Single\\\\/TypeParameterizedTestSuite\\\\/1", "tests": 2, "testsuite": \[ { "name": "Test9", "type_param": "bool", "file": ".*gtest_list_output_unittest_.cc", "line": 66 }, { "name": "Test10", "type_param": "bool", "file": ".*gtest_list_output_unittest_.cc", "line": 67 } \] }, { "name": "ValueParam\\\\/ValueParamTest", "tests": 4, "testsuite": \[ { "name": "Test5\\\\/0", "value_param": "33", "file": ".*gtest_list_output_unittest_.cc", "line": 52 }, { "name": "Test5\\\\/1", "value_param": "42", "file": ".*gtest_list_output_unittest_.cc", "line": 52 }, { "name": "Test6\\\\/0", "value_param": "33", "file": ".*gtest_list_output_unittest_.cc", "line": 53 }, { "name": "Test6\\\\/1", "value_param": "42", "file": ".*gtest_list_output_unittest_.cc", "line": 53 } \] } \] } """ class GTestListTestsOutputUnitTest(gtest_test_utils.TestCase): """Unit test for Google Test's list tests with output to file functionality. """ def testXml(self): """Verifies XML output for listing tests in a Google Test binary. Runs a test program that generates an empty XML output, and tests that the XML output is expected. """ self._TestOutput('xml', EXPECTED_XML) def testJSON(self): """Verifies XML output for listing tests in a Google Test binary. Runs a test program that generates an empty XML output, and tests that the XML output is expected. """ self._TestOutput('json', EXPECTED_JSON) def _GetOutput(self, out_format): file_path = os.path.join(gtest_test_utils.GetTempDir(), 'test_out.' + out_format) gtest_prog_path = gtest_test_utils.GetTestExecutablePath( 'gtest_list_output_unittest_') command = ([ gtest_prog_path, '%s=%s:%s' % (GTEST_OUTPUT_FLAG, out_format, file_path), '--gtest_list_tests' ]) environ_copy = os.environ.copy() p = gtest_test_utils.Subprocess( command, env=environ_copy, working_dir=gtest_test_utils.GetTempDir()) self.assertTrue(p.exited) self.assertEqual(0, p.exit_code) self.assertTrue(os.path.isfile(file_path)) with open(file_path) as f: result = f.read() return result def _TestOutput(self, test_format, expected_output): actual = self._GetOutput(test_format) actual_lines = actual.splitlines() expected_lines = expected_output.splitlines() line_count = 0 for actual_line in actual_lines: expected_line = expected_lines[line_count] expected_line_re = re.compile(expected_line.strip()) self.assertTrue( expected_line_re.match(actual_line.strip()), ('actual output of "%s",\n' 'which does not match expected regex of "%s"\n' 'on line %d' % (actual, expected_output, line_count))) line_count = line_count + 1 if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '1' gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_list_output_unittest.py
# Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_xml_output""" import re from xml.dom import minidom, Node from googletest.test import gtest_test_utils GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' class GTestXMLTestCase(gtest_test_utils.TestCase): """ Base class for tests of Google Test's XML output functionality. """ def AssertEquivalentNodes(self, expected_node, actual_node): """ Asserts that actual_node (a DOM node object) is equivalent to expected_node (another DOM node object), in that either both of them are CDATA nodes and have the same value, or both are DOM elements and actual_node meets all of the following conditions: * It has the same tag name as expected_node. * It has the same set of attributes as expected_node, each with the same value as the corresponding attribute of expected_node. Exceptions are any attribute named "time", which needs only be convertible to a floating-point number and any attribute named "type_param" which only has to be non-empty. * It has an equivalent set of child nodes (including elements and CDATA sections) as expected_node. Note that we ignore the order of the children as they are not guaranteed to be in any particular order. """ if expected_node.nodeType == Node.CDATA_SECTION_NODE: self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) return self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) self.assertEquals(expected_node.tagName, actual_node.tagName) expected_attributes = expected_node.attributes actual_attributes = actual_node.attributes self.assertEquals( expected_attributes.length, actual_attributes.length, 'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % ( actual_node.tagName, expected_attributes.keys(), actual_attributes.keys())) for i in range(expected_attributes.length): expected_attr = expected_attributes.item(i) actual_attr = actual_attributes.get(expected_attr.name) self.assert_( actual_attr is not None, 'expected attribute %s not found in element %s' % (expected_attr.name, actual_node.tagName)) self.assertEquals( expected_attr.value, actual_attr.value, ' values of attribute %s in element %s differ: %s vs %s' % (expected_attr.name, actual_node.tagName, expected_attr.value, actual_attr.value)) expected_children = self._GetChildren(expected_node) actual_children = self._GetChildren(actual_node) self.assertEquals( len(expected_children), len(actual_children), 'number of child elements differ in element ' + actual_node.tagName) for child_id, child in expected_children.items(): self.assert_(child_id in actual_children, '<%s> is not in <%s> (in element %s)' % (child_id, actual_children, actual_node.tagName)) self.AssertEquivalentNodes(child, actual_children[child_id]) identifying_attribute = { 'testsuites': 'name', 'testsuite': 'name', 'testcase': 'name', 'failure': 'message', 'skipped': 'message', 'property': 'name', } def _GetChildren(self, element): """ Fetches all of the child nodes of element, a DOM Element object. Returns them as the values of a dictionary keyed by the IDs of the children. For <testsuites>, <testsuite>, <testcase>, and <property> elements, the ID is the value of their "name" attribute; for <failure> elements, it is the value of the "message" attribute; for <properties> elements, it is the value of their parent's "name" attribute plus the literal string "properties"; CDATA sections and non-whitespace text nodes are concatenated into a single CDATA section with ID "detail". An exception is raised if any element other than the above four is encountered, if two child elements with the same identifying attributes are encountered, or if any other type of node is encountered. """ children = {} for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: if child.tagName == 'properties': self.assert_(child.parentNode is not None, 'Encountered <properties> element without a parent') child_id = child.parentNode.getAttribute('name') + '-properties' else: self.assert_(child.tagName in self.identifying_attribute, 'Encountered unknown element <%s>' % child.tagName) child_id = child.getAttribute( self.identifying_attribute[child.tagName]) self.assert_(child_id not in children) children[child_id] = child elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: if 'detail' not in children: if (child.nodeType == Node.CDATA_SECTION_NODE or not child.nodeValue.isspace()): children['detail'] = child.ownerDocument.createCDATASection( child.nodeValue) else: children['detail'].nodeValue += child.nodeValue else: self.fail('Encountered unexpected node type %d' % child.nodeType) return children def NormalizeXml(self, element): """ Normalizes Google Test's XML output to eliminate references to transient information that may change from run to run. * The "time" attribute of <testsuites>, <testsuite> and <testcase> elements is replaced with a single asterisk, if it contains only digit characters. * The "timestamp" attribute of <testsuites> elements is replaced with a single asterisk, if it contains a valid ISO8601 datetime value. * The "type_param" attribute of <testcase> elements is replaced with a single asterisk (if it sn non-empty) as it is the type name returned by the compiler and is platform dependent. * The line info reported in the first line of the "message" attribute and CDATA section of <failure> elements is replaced with the file's basename and a single asterisk for the line number. * The directory names in file paths are removed. * The stack traces are removed. """ if element.tagName == 'testcase': source_file = element.getAttributeNode('file') if source_file: source_file.value = re.sub(r'^.*[/\\](.*)', '\\1', source_file.value) if element.tagName in ('testsuites', 'testsuite', 'testcase'): timestamp = element.getAttributeNode('timestamp') timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d\d\d$', '*', timestamp.value) if element.tagName in ('testsuites', 'testsuite', 'testcase'): time = element.getAttributeNode('time') time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value) type_param = element.getAttributeNode('type_param') if type_param and type_param.value: type_param.value = '*' elif element.tagName == 'failure' or element.tagName == 'skipped': source_line_pat = r'^.*[/\\](.*:)\d+\n' # Replaces the source line information with a normalized form. message = element.getAttributeNode('message') message.value = re.sub(source_line_pat, '\\1*\n', message.value) for child in element.childNodes: if child.nodeType == Node.CDATA_SECTION_NODE: # Replaces the source line information with a normalized form. cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue) # Removes the actual stack trace. child.nodeValue = re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', cdata) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.NormalizeXml(child)
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_xml_test_utils.py
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly determines whether to use colors.""" import os from googletest.test import gtest_test_utils IS_WINDOWS = os.name == 'nt' COLOR_ENV_VAR = 'GTEST_COLOR' COLOR_FLAG = 'gtest_color' COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-color-test_') def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def UsesColor(term, color_env_var, color_flag): """Runs googletest-color-test_ and returns its exit code.""" SetEnvVar('TERM', term) SetEnvVar(COLOR_ENV_VAR, color_env_var) if color_flag is None: args = [] else: args = ['--%s=%s' % (COLOR_FLAG, color_flag)] p = gtest_test_utils.Subprocess([COMMAND] + args) return not p.exited or p.exit_code class GTestColorTest(gtest_test_utils.TestCase): def testNoEnvVarNoFlag(self): """Tests the case when there's neither GTEST_COLOR nor --gtest_color.""" if not IS_WINDOWS: self.assert_(not UsesColor('dumb', None, None)) self.assert_(not UsesColor('emacs', None, None)) self.assert_(not UsesColor('xterm-mono', None, None)) self.assert_(not UsesColor('unknown', None, None)) self.assert_(not UsesColor(None, None, None)) self.assert_(UsesColor('linux', None, None)) self.assert_(UsesColor('cygwin', None, None)) self.assert_(UsesColor('xterm', None, None)) self.assert_(UsesColor('xterm-color', None, None)) self.assert_(UsesColor('xterm-256color', None, None)) def testFlagOnly(self): """Tests the case when there's --gtest_color but not GTEST_COLOR.""" self.assert_(not UsesColor('dumb', None, 'no')) self.assert_(not UsesColor('xterm-color', None, 'no')) if not IS_WINDOWS: self.assert_(not UsesColor('emacs', None, 'auto')) self.assert_(UsesColor('xterm', None, 'auto')) self.assert_(UsesColor('dumb', None, 'yes')) self.assert_(UsesColor('xterm', None, 'yes')) def testEnvVarOnly(self): """Tests the case when there's GTEST_COLOR but not --gtest_color.""" self.assert_(not UsesColor('dumb', 'no', None)) self.assert_(not UsesColor('xterm-color', 'no', None)) if not IS_WINDOWS: self.assert_(not UsesColor('dumb', 'auto', None)) self.assert_(UsesColor('xterm-color', 'auto', None)) self.assert_(UsesColor('dumb', 'yes', None)) self.assert_(UsesColor('xterm-color', 'yes', None)) def testEnvVarAndFlag(self): """Tests the case when there are both GTEST_COLOR and --gtest_color.""" self.assert_(not UsesColor('xterm-color', 'no', 'no')) self.assert_(UsesColor('dumb', 'no', 'yes')) self.assert_(UsesColor('xterm-color', 'no', 'auto')) def testAliasesOfYesAndNo(self): """Tests using aliases in specifying --gtest_color.""" self.assert_(UsesColor('dumb', None, 'true')) self.assert_(UsesColor('dumb', None, 'YES')) self.assert_(UsesColor('dumb', None, 'T')) self.assert_(UsesColor('dumb', None, '1')) self.assert_(not UsesColor('xterm', None, 'f')) self.assert_(not UsesColor('xterm', None, 'false')) self.assert_(not UsesColor('xterm', None, '0')) self.assert_(not UsesColor('xterm', None, 'unknown')) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-color-test.py
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking googletest-filter-unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ import os import re try: from sets import Set as set # For Python 2.3 compatibility except ImportError: pass import sys from googletest.test import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. CAN_PASS_EMPTY_ENV = False if sys.executable: os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. CAN_UNSET_ENV = False if sys.executable: os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)' ]) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABLED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the googletest-filter-unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-filter-unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # Regex for parsing disabled banner from Google Test's output DISABLED_BANNER_REGEX = re.compile(r'^\[\s*DISABLED\s*\] (.*)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in googletest-filter-unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def RunAndExtractDisabledBannerList(args=None): """Runs the test program and returns tests that printed a disabled banner.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) banners_printed = [] for line in p.output.split('\n'): match = DISABLED_BANNER_REGEX.match(line) if match is not None: banners_printed.append(match.group(1)) return banners_printed def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(set(set_var), set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(set(tests_to_run) - set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of googletest-filter-unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. If check_exit_0, . Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs googletest-filter-unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABLED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testDisabledBanner(self): """Tests that the disabled banner prints only tests that match filter.""" make_filter = lambda s: ['--%s=%s' % (FILTER_FLAG, s)] banners = RunAndExtractDisabledBannerList(make_filter('*')) self.AssertSetEqual(banners, [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC' ]) banners = RunAndExtractDisabledBannerList(make_filter('Bar*')) self.AssertSetEqual( banners, ['BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive']) banners = RunAndExtractDisabledBannerList(make_filter('*-Bar*')) self.AssertSetEqual(banners, ['BazTest.DISABLED_TestC']) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-filter-unittest.py
#!/usr/bin/env python # # Copyright 2019 Google LLC. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's gtest skip in environment setup behavior. This script invokes gtest_skip_in_environment_setup_test_ and verifies its output. """ import re from googletest.test import gtest_test_utils # Path to the gtest_skip_in_environment_setup_test binary EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_skip_test') OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output # Test. class SkipEntireEnvironmentTest(gtest_test_utils.TestCase): def testSkipEntireEnvironmentTest(self): self.assertIn('Skipped\nskipping single test\n', OUTPUT) skip_fixture = 'Skipped\nskipping all tests for this fixture\n' self.assertIsNotNone( re.search(skip_fixture + '.*' + skip_fixture, OUTPUT, flags=re.DOTALL), repr(OUTPUT)) self.assertNotIn('FAILED', OUTPUT) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_skip_check_output_test.py
#!/usr/bin/env python # # Copyright 2018 Google LLC. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test uses filter provided via testbridge.""" import os from googletest.test import gtest_test_utils binary_name = 'gtest_testbridge_test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) TESTBRIDGE_NAME = 'TESTBRIDGE_TEST_ONLY' def Assert(condition): if not condition: raise AssertionError class GTestTestFilterTest(gtest_test_utils.TestCase): def testTestExecutionIsFiltered(self): """Tests that the test filter is picked up from the testbridge env var.""" subprocess_env = os.environ.copy() subprocess_env[TESTBRIDGE_NAME] = '*.TestThatSucceeds' p = gtest_test_utils.Subprocess(COMMAND, env=subprocess_env) self.assertEquals(0, p.exit_code) Assert('filter = *.TestThatSucceeds' in p.output) Assert('[ OK ] TestFilterTest.TestThatSucceeds' in p.output) Assert('[ PASSED ] 1 test.' in p.output) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_testbridge_test.py
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's throw-on-failure mode with exceptions disabled. This script invokes googletest-throw-on-failure-test_ (a program written with Google Test) with different environments and command line flags. """ import os from googletest.test import gtest_test_utils # Constants. # The command line flag for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE = 'gtest_throw_on_failure' # Path to the googletest-throw-on-failure-test_ program, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'googletest-throw-on-failure-test_') # Utilities. def SetEnvVar(env_var, value): """Sets an environment variable to a given value; unsets it when the given value is None. """ env_var = env_var.upper() if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def Run(command): """Runs a command; returns True/False if its exit code is/isn't 0.""" print('Running "%s". . .' % ' '.join(command)) p = gtest_test_utils.Subprocess(command) return p.exited and p.exit_code == 0 # The tests. class ThrowOnFailureTest(gtest_test_utils.TestCase): """Tests the throw-on-failure mode.""" def RunAndVerify(self, env_var_value, flag_value, should_fail): """Runs googletest-throw-on-failure-test_ and verifies that it does (or does not) exit with a non-zero code. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. should_fail: True if and only if the program is expected to fail. """ SetEnvVar(THROW_ON_FAILURE, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % THROW_ON_FAILURE else: flag = '--%s' % THROW_ON_FAILURE command = [EXE_PATH] if flag: command.append(flag) if should_fail: should_or_not = 'should' else: should_or_not = 'should not' failed = not Run(command) SetEnvVar(THROW_ON_FAILURE, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero ' 'exit code.' % (THROW_ON_FAILURE, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(failed == should_fail, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False) def testThrowOnFailureEnvVar(self): """Tests using the GTEST_THROW_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, should_fail=False) self.RunAndVerify(env_var_value='1', flag_value=None, should_fail=True) def testThrowOnFailureFlag(self): """Tests using the --gtest_throw_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', should_fail=False) self.RunAndVerify(env_var_value=None, flag_value='1', should_fail=True) def testThrowOnFailureFlagOverridesEnvVar(self): """Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE.""" self.RunAndVerify(env_var_value='0', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='0', flag_value='1', should_fail=True) self.RunAndVerify(env_var_value='1', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='1', flag_value='1', should_fail=True) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-throw-on-failure-test.py
#!/usr/bin/env python # # Copyright 2009 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that test shuffling works.""" import os from googletest.test import gtest_test_utils # Command to run the googletest-shuffle-test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-shuffle-test_') # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' TEST_FILTER = 'A*.A:A*.B:C*' ALL_TESTS = [] ACTIVE_TESTS = [] FILTERED_TESTS = [] SHARDED_TESTS = [] SHUFFLED_ALL_TESTS = [] SHUFFLED_ACTIVE_TESTS = [] SHUFFLED_FILTERED_TESTS = [] SHUFFLED_SHARDED_TESTS = [] def AlsoRunDisabledTestsFlag(): return '--gtest_also_run_disabled_tests' def FilterFlag(test_filter): return '--gtest_filter=%s' % (test_filter,) def RepeatFlag(n): return '--gtest_repeat=%s' % (n,) def ShuffleFlag(): return '--gtest_shuffle' def RandomSeedFlag(n): return '--gtest_random_seed=%s' % (n,) def RunAndReturnOutput(extra_env, args): """Runs the test program and returns its output.""" environ_copy = os.environ.copy() environ_copy.update(extra_env) return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output def GetTestsForAllIterations(extra_env, args): """Runs the test program and returns a list of test lists. Args: extra_env: a map from environment variables to their values args: command line flags to pass to googletest-shuffle-test_ Returns: A list where the i-th element is the list of tests run in the i-th test iteration. """ test_iterations = [] for line in RunAndReturnOutput(extra_env, args).split('\n'): if line.startswith('----'): tests = [] test_iterations.append(tests) elif line.strip(): tests.append(line.strip()) # 'TestCaseName.TestName' return test_iterations def GetTestCases(tests): """Returns a list of test cases in the given full test names. Args: tests: a list of full test names Returns: A list of test cases from 'tests', in their original order. Consecutive duplicates are removed. """ test_cases = [] for test in tests: test_case = test.split('.')[0] if not test_case in test_cases: test_cases.append(test_case) return test_cases def CalculateTestLists(): """Calculates the list of tests run under different flags.""" if not ALL_TESTS: ALL_TESTS.extend( GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]) if not ACTIVE_TESTS: ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0]) if not FILTERED_TESTS: FILTERED_TESTS.extend( GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]) if not SHARDED_TESTS: SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [])[0]) if not SHUFFLED_ALL_TESTS: SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations( {}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_ACTIVE_TESTS: SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_FILTERED_TESTS: SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0]) if not SHUFFLED_SHARDED_TESTS: SHUFFLED_SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) class GTestShuffleUnitTest(gtest_test_utils.TestCase): """Tests test shuffling.""" def setUp(self): CalculateTestLists() def testShufflePreservesNumberOfTests(self): self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS)) self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS)) self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS)) self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS)) def testShuffleChangesTestOrder(self): self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS) self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS) self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, SHUFFLED_FILTERED_TESTS) self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, SHUFFLED_SHARDED_TESTS) def testShuffleChangesTestCaseOrder(self): self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS), GetTestCases(SHUFFLED_ALL_TESTS)) self.assert_( GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS), GetTestCases(SHUFFLED_ACTIVE_TESTS)) self.assert_( GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS), GetTestCases(SHUFFLED_FILTERED_TESTS)) self.assert_( GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS), GetTestCases(SHUFFLED_SHARDED_TESTS)) def testShuffleDoesNotRepeatTest(self): for test in SHUFFLED_ALL_TESTS: self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test), '%s appears more than once' % (test,)) def testShuffleDoesNotCreateNewTest(self): for test in SHUFFLED_ALL_TESTS: self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,)) def testShuffleIncludesAllTests(self): for test in ALL_TESTS: self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,)) for test in ACTIVE_TESTS: self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,)) for test in FILTERED_TESTS: self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)) for test in SHARDED_TESTS: self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,)) def testShuffleLeavesDeathTestsAtFront(self): non_death_test_found = False for test in SHUFFLED_ACTIVE_TESTS: if 'DeathTest.' in test: self.assert_(not non_death_test_found, '%s appears after a non-death test' % (test,)) else: non_death_test_found = True def _VerifyTestCasesDoNotInterleave(self, tests): test_cases = [] for test in tests: [test_case, _] = test.split('.') if test_cases and test_cases[-1] != test_case: test_cases.append(test_case) self.assertEqual(1, test_cases.count(test_case), 'Test case %s is not grouped together in %s' % (test_case, tests)) def testShuffleDoesNotInterleaveTestCases(self): self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS) def testShuffleRestoresOrderAfterEachIteration(self): # Get the test lists in all 3 iterations, using random seed 1, 2, # and 3 respectively. Google Test picks a different seed in each # iteration, and this test depends on the current implementation # picking successive numbers. This dependency is not ideal, but # makes the test much easier to write. [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) # Make sure running the tests with random seed 1 gets the same # order as in iteration 1 above. [tests_with_seed1] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)]) self.assertEqual(tests_in_iteration1, tests_with_seed1) # Make sure running the tests with random seed 2 gets the same # order as in iteration 2 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 2. [tests_with_seed2] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(2)]) self.assertEqual(tests_in_iteration2, tests_with_seed2) # Make sure running the tests with random seed 3 gets the same # order as in iteration 3 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 3. [tests_with_seed3] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(3)]) self.assertEqual(tests_in_iteration3, tests_with_seed3) def testShuffleGeneratesNewOrderInEachIteration(self): [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) self.assert_(tests_in_iteration1 != tests_in_iteration2, tests_in_iteration1) self.assert_(tests_in_iteration1 != tests_in_iteration3, tests_in_iteration1) self.assert_(tests_in_iteration2 != tests_in_iteration3, tests_in_iteration2) def testShuffleShardedTestsPreservesPartition(self): # If we run M tests on N shards, the same M tests should be run in # total, regardless of the random seeds used by the shards. [tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '0'}, [ShuffleFlag(), RandomSeedFlag(1)]) [tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(20)]) [tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '2'}, [ShuffleFlag(), RandomSeedFlag(25)]) sorted_sharded_tests = tests1 + tests2 + tests3 sorted_sharded_tests.sort() sorted_active_tests = [] sorted_active_tests.extend(ACTIVE_TESTS) sorted_active_tests.sort() self.assertEqual(sorted_active_tests, sorted_sharded_tests) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-shuffle-test.py
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test warns the user when not initialized properly.""" from googletest.test import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-uninitialized-test_') def Assert(condition): if not condition: raise AssertionError def AssertEq(expected, actual): if expected != actual: print('Expected: %s' % (expected,)) print(' Actual: %s' % (actual,)) raise AssertionError def TestExitCodeAndOutput(command): """Runs the given command and verifies its exit code and output.""" # Verifies that 'command' exits with code 1. p = gtest_test_utils.Subprocess(command) if p.exited and p.exit_code == 0: Assert('IMPORTANT NOTICE' in p.output); Assert('InitGoogleTest' in p.output) class GTestUninitializedTest(gtest_test_utils.TestCase): def testExitCodeAndOutput(self): TestExitCodeAndOutput(COMMAND) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-uninitialized-test.py
#!/usr/bin/env python # # Copyright 2010 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's exception catching behavior. This script invokes googletest-catch-exceptions-test_ and googletest-catch-exceptions-ex-test_ (programs written with Google Test) and verifies their output. """ from googletest.test import gtest_test_utils # Constants. FLAG_PREFIX = '--gtest_' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0' FILTER_FLAG = FLAG_PREFIX + 'filter' # Path to the googletest-catch-exceptions-ex-test_ binary, compiled with # exceptions enabled. EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'googletest-catch-exceptions-ex-test_') # Path to the googletest-catch-exceptions-test_ binary, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'googletest-catch-exceptions-no-ex-test_') environ = gtest_test_utils.environ SetEnvVar = gtest_test_utils.SetEnvVar # Tests in this file run a Google-Test-based test program and expect it # to terminate prematurely. Therefore they are incompatible with # the premature-exit-file protocol by design. Unset the # premature-exit filepath to prevent Google Test from creating # the file. SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None) TEST_LIST = gtest_test_utils.Subprocess( [EXE_PATH, LIST_TESTS_FLAG], env=environ).output SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST if SUPPORTS_SEH_EXCEPTIONS: BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output EX_BINARY_OUTPUT = gtest_test_utils.Subprocess( [EX_EXE_PATH], env=environ).output # The tests. if SUPPORTS_SEH_EXCEPTIONS: # pylint:disable-msg=C6302 class CatchSehExceptionsTest(gtest_test_utils.TestCase): """Tests exception-catching behavior.""" def TestSehExceptions(self, test_output): self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s constructor' in test_output) self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s destructor' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUpTestSuite()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDownTestSuite()' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUp()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDown()' in test_output) self.assert_('SEH exception with code 0x2a thrown in the test body' in test_output) def testCatchesSehExceptionsWithCxxExceptionsEnabled(self): self.TestSehExceptions(EX_BINARY_OUTPUT) def testCatchesSehExceptionsWithCxxExceptionsDisabled(self): self.TestSehExceptions(BINARY_OUTPUT) class CatchCxxExceptionsTest(gtest_test_utils.TestCase): """Tests C++ exception-catching behavior. Tests in this test case verify that: * C++ exceptions are caught and logged as C++ (not SEH) exceptions * Exception thrown affect the remainder of the test work flow in the expected manner. """ def testCatchesCxxExceptionsInFixtureConstructor(self): self.assertTrue( 'C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s constructor' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInConstructorTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in EX_BINARY_OUTPUT): def testCatchesCxxExceptionsInFixtureDestructor(self): self.assertTrue( 'C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s destructor' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInDestructorTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUpTestCase(self): self.assertTrue( 'C++ exception with description "Standard C++ exception"' ' thrown in SetUpTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInConstructorTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest constructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest::SetUp() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest test body ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTearDownTestCase(self): self.assertTrue( 'C++ exception with description "Standard C++ exception"' ' thrown in TearDownTestSuite()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUp(self): self.assertTrue( 'C++ exception with description "Standard C++ exception"' ' thrown in SetUp()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInSetUpTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInSetUpTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInSetUpTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInSetUpTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') def testCatchesCxxExceptionsInTearDown(self): self.assertTrue( 'C++ exception with description "Standard C++ exception"' ' thrown in TearDown()' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInTearDownTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInTearDownTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTestBody(self): self.assertTrue( 'C++ exception with description "Standard C++ exception"' ' thrown in the test body' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInTestBodyTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInTestBodyTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) self.assertTrue( 'CxxExceptionInTestBodyTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) def testCatchesNonStdCxxExceptions(self): self.assertTrue( 'Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) def testUnhandledCxxExceptionsAbortTheProgram(self): # Filters out SEH exception tests on Windows. Unhandled SEH exceptions # cause tests to show pop-up windows there. FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*' # By default, Google Test doesn't catch the exceptions. uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess( [EX_EXE_PATH, NO_CATCH_EXCEPTIONS_FLAG, FITLER_OUT_SEH_TESTS_FLAG], env=environ).output self.assert_('Unhandled C++ exception terminating the program' in uncaught_exceptions_ex_binary_output) self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-catch-exceptions-test.py
#!/usr/bin/env python # Copyright 2018, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_json_output module.""" import json import os from googletest.test import gtest_json_test_utils from googletest.test import gtest_test_utils GTEST_OUTPUT_SUBDIR = 'json_outfiles' GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_' GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_' EXPECTED_1 = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'PropertyOne', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'TestSomeProperties', u'file': u'gtest_xml_outfile1_test_.cc', u'line': 41, u'status': u'RUN', u'result': u'COMPLETED', u'time': u'*', u'timestamp': u'*', u'classname': u'PropertyOne', u'SetUpProp': u'1', u'TestSomeProperty': u'1', u'TearDownProp': u'1', }], }], } EXPECTED_2 = { u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'name': u'AllTests', u'testsuites': [{ u'name': u'PropertyTwo', u'tests': 1, u'failures': 0, u'disabled': 0, u'errors': 0, u'time': u'*', u'timestamp': u'*', u'testsuite': [{ u'name': u'TestSomeProperties', u'file': u'gtest_xml_outfile2_test_.cc', u'line': 41, u'status': u'RUN', u'result': u'COMPLETED', u'timestamp': u'*', u'time': u'*', u'classname': u'PropertyTwo', u'SetUpProp': u'2', u'TestSomeProperty': u'2', u'TearDownProp': u'2', }], }], } class GTestJsonOutFilesTest(gtest_test_utils.TestCase): """Unit test for Google Test's JSON output functionality.""" def setUp(self): # We want the trailing '/' that the last "" provides in os.path.join, for # telling Google Test to create an output directory instead of a single file # for xml output. self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), GTEST_OUTPUT_SUBDIR, '') self.DeleteFilesAndDir() def tearDown(self): self.DeleteFilesAndDir() def DeleteFilesAndDir(self): try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json')) except os.error: pass try: os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json')) except os.error: pass try: os.rmdir(self.output_dir_) except os.error: pass def testOutfile1(self): self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_1) def testOutfile2(self): self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_2) def _TestOutFile(self, test_name, expected): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) output_file_name1 = test_name + '.json' output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) if os.path.isfile(output_file1): with open(output_file1) as f: actual = json.load(f) else: with open(output_file2) as f: actual = json.load(f) self.assertEqual(expected, gtest_json_test_utils.normalize(actual)) if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '0' gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-json-outfiles-test.py
#!/usr/bin/env python # # Copyright 2019 Google LLC. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's gtest skip in environment setup behavior. This script invokes gtest_skip_in_environment_setup_test_ and verifies its output. """ from googletest.test import gtest_test_utils # Path to the gtest_skip_in_environment_setup_test binary EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_skip_in_environment_setup_test') OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output # Test. class SkipEntireEnvironmentTest(gtest_test_utils.TestCase): def testSkipEntireEnvironmentTest(self): self.assertIn('Skipping the entire environment', OUTPUT) self.assertNotIn('FAILED', OUTPUT) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/gtest_skip_environment_check_output_test.py
#!/usr/bin/env python # # Copyright 2020 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test fail_fast. A user can specify if a Google Test program should continue test execution after a test failure via the GTEST_FAIL_FAST environment variable or the --gtest_fail_fast flag. The default value of the flag can also be changed by Bazel fail fast environment variable TESTBRIDGE_TEST_RUNNER_FAIL_FAST. This script tests such functionality by invoking googletest-failfast-unittest_ (a program written with Google Test) with different environments and command line flags. """ import os from googletest.test import gtest_test_utils # Constants. # Bazel testbridge environment variable for fail fast BAZEL_FAIL_FAST_ENV_VAR = 'TESTBRIDGE_TEST_RUNNER_FAIL_FAST' # The environment variable for specifying fail fast. FAIL_FAST_ENV_VAR = 'GTEST_FAIL_FAST' # The command line flag for specifying fail fast. FAIL_FAST_FLAG = 'gtest_fail_fast' # The command line flag to run disabled tests. RUN_DISABLED_FLAG = 'gtest_also_run_disabled_tests' # The command line flag for specifying a filter. FILTER_FLAG = 'gtest_filter' # Command to run the googletest-failfast-unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath( 'googletest-failfast-unittest_') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(test_suite=None, fail_fast=None, run_disabled=False): """Runs the test program and returns its output.""" args = [] xml_path = os.path.join(gtest_test_utils.GetTempDir(), '.GTestFailFastUnitTest.xml') args += ['--gtest_output=xml:' + xml_path] if fail_fast is not None: if isinstance(fail_fast, str): args += ['--%s=%s' % (FAIL_FAST_FLAG, fail_fast)] elif fail_fast: args += ['--%s' % FAIL_FAST_FLAG] else: args += ['--no%s' % FAIL_FAST_FLAG] if test_suite: args += ['--%s=%s.*' % (FILTER_FLAG, test_suite)] if run_disabled: args += ['--%s' % RUN_DISABLED_FLAG] txt_out = gtest_test_utils.Subprocess([COMMAND] + args, env=environ).output with open(xml_path) as xml_file: return txt_out, xml_file.read() # The unit test. class GTestFailFastUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag for fail_fast.""" def testDefaultBehavior(self): """Tests the behavior of not specifying the fail_fast.""" txt, _ = RunAndReturnOutput() self.assertIn('22 FAILED TEST', txt) def testGoogletestFlag(self): txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=True) self.assertIn('1 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 3 tests', txt) txt, _ = RunAndReturnOutput(test_suite='HasSimpleTest', fail_fast=False) self.assertIn('4 FAILED TEST', txt) self.assertNotIn('[ SKIPPED ]', txt) def testGoogletestEnvVar(self): """Tests the behavior of specifying fail_fast via Googletest env var.""" try: SetEnvVar(FAIL_FAST_ENV_VAR, '1') txt, _ = RunAndReturnOutput('HasSimpleTest') self.assertIn('1 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 3 tests', txt) SetEnvVar(FAIL_FAST_ENV_VAR, '0') txt, _ = RunAndReturnOutput('HasSimpleTest') self.assertIn('4 FAILED TEST', txt) self.assertNotIn('[ SKIPPED ]', txt) finally: SetEnvVar(FAIL_FAST_ENV_VAR, None) def testBazelEnvVar(self): """Tests the behavior of specifying fail_fast via Bazel testbridge.""" try: SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '1') txt, _ = RunAndReturnOutput('HasSimpleTest') self.assertIn('1 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 3 tests', txt) SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0') txt, _ = RunAndReturnOutput('HasSimpleTest') self.assertIn('4 FAILED TEST', txt) self.assertNotIn('[ SKIPPED ]', txt) finally: SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None) def testFlagOverridesEnvVar(self): """Tests precedence of flag over env var.""" try: SetEnvVar(FAIL_FAST_ENV_VAR, '0') txt, _ = RunAndReturnOutput('HasSimpleTest', True) self.assertIn('1 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 3 tests', txt) finally: SetEnvVar(FAIL_FAST_ENV_VAR, None) def testGoogletestEnvVarOverridesBazelEnvVar(self): """Tests that the Googletest native env var over Bazel testbridge.""" try: SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, '0') SetEnvVar(FAIL_FAST_ENV_VAR, '1') txt, _ = RunAndReturnOutput('HasSimpleTest') self.assertIn('1 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 3 tests', txt) finally: SetEnvVar(FAIL_FAST_ENV_VAR, None) SetEnvVar(BAZEL_FAIL_FAST_ENV_VAR, None) def testEventListener(self): txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=True) self.assertIn('1 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 3 tests', txt) for expected_count, callback in [(1, 'OnTestSuiteStart'), (5, 'OnTestStart'), (5, 'OnTestEnd'), (5, 'OnTestPartResult'), (1, 'OnTestSuiteEnd')]: self.assertEqual( expected_count, txt.count(callback), 'Expected %d calls to callback %s match count on output: %s ' % (expected_count, callback, txt)) txt, _ = RunAndReturnOutput(test_suite='HasSkipTest', fail_fast=False) self.assertIn('3 FAILED TEST', txt) self.assertIn('[ SKIPPED ] 1 test', txt) for expected_count, callback in [(1, 'OnTestSuiteStart'), (5, 'OnTestStart'), (5, 'OnTestEnd'), (5, 'OnTestPartResult'), (1, 'OnTestSuiteEnd')]: self.assertEqual( expected_count, txt.count(callback), 'Expected %d calls to callback %s match count on output: %s ' % (expected_count, callback, txt)) def assertXmlResultCount(self, result, count, xml): self.assertEqual( count, xml.count('result="%s"' % result), 'Expected \'result="%s"\' match count of %s: %s ' % (result, count, xml)) def assertXmlStatusCount(self, status, count, xml): self.assertEqual( count, xml.count('status="%s"' % status), 'Expected \'status="%s"\' match count of %s: %s ' % (status, count, xml)) def assertFailFastXmlAndTxtOutput(self, fail_fast, test_suite, passed_count, failure_count, skipped_count, suppressed_count, run_disabled=False): """Assert XML and text output of a test execution.""" txt, xml = RunAndReturnOutput(test_suite, fail_fast, run_disabled) if failure_count > 0: self.assertIn('%s FAILED TEST' % failure_count, txt) if suppressed_count > 0: self.assertIn('%s DISABLED TEST' % suppressed_count, txt) if skipped_count > 0: self.assertIn('[ SKIPPED ] %s tests' % skipped_count, txt) self.assertXmlStatusCount('run', passed_count + failure_count + skipped_count, xml) self.assertXmlStatusCount('notrun', suppressed_count, xml) self.assertXmlResultCount('completed', passed_count + failure_count, xml) self.assertXmlResultCount('skipped', skipped_count, xml) self.assertXmlResultCount('suppressed', suppressed_count, xml) def assertFailFastBehavior(self, test_suite, passed_count, failure_count, skipped_count, suppressed_count, run_disabled=False): """Assert --fail_fast via flag.""" for fail_fast in ('true', '1', 't', True): self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count, failure_count, skipped_count, suppressed_count, run_disabled) def assertNotFailFastBehavior(self, test_suite, passed_count, failure_count, skipped_count, suppressed_count, run_disabled=False): """Assert --nofail_fast via flag.""" for fail_fast in ('false', '0', 'f', False): self.assertFailFastXmlAndTxtOutput(fail_fast, test_suite, passed_count, failure_count, skipped_count, suppressed_count, run_disabled) def testFlag_HasFixtureTest(self): """Tests the behavior of fail_fast and TEST_F.""" self.assertFailFastBehavior( test_suite='HasFixtureTest', passed_count=1, failure_count=1, skipped_count=3, suppressed_count=0) self.assertNotFailFastBehavior( test_suite='HasFixtureTest', passed_count=1, failure_count=4, skipped_count=0, suppressed_count=0) def testFlag_HasSimpleTest(self): """Tests the behavior of fail_fast and TEST.""" self.assertFailFastBehavior( test_suite='HasSimpleTest', passed_count=1, failure_count=1, skipped_count=3, suppressed_count=0) self.assertNotFailFastBehavior( test_suite='HasSimpleTest', passed_count=1, failure_count=4, skipped_count=0, suppressed_count=0) def testFlag_HasParametersTest(self): """Tests the behavior of fail_fast and TEST_P.""" self.assertFailFastBehavior( test_suite='HasParametersSuite/HasParametersTest', passed_count=0, failure_count=1, skipped_count=3, suppressed_count=0) self.assertNotFailFastBehavior( test_suite='HasParametersSuite/HasParametersTest', passed_count=0, failure_count=4, skipped_count=0, suppressed_count=0) def testFlag_HasDisabledTest(self): """Tests the behavior of fail_fast and Disabled test cases.""" self.assertFailFastBehavior( test_suite='HasDisabledTest', passed_count=1, failure_count=1, skipped_count=2, suppressed_count=1, run_disabled=False) self.assertNotFailFastBehavior( test_suite='HasDisabledTest', passed_count=1, failure_count=3, skipped_count=0, suppressed_count=1, run_disabled=False) def testFlag_HasDisabledRunDisabledTest(self): """Tests the behavior of fail_fast and Disabled test cases enabled.""" self.assertFailFastBehavior( test_suite='HasDisabledTest', passed_count=1, failure_count=1, skipped_count=3, suppressed_count=0, run_disabled=True) self.assertNotFailFastBehavior( test_suite='HasDisabledTest', passed_count=1, failure_count=4, skipped_count=0, suppressed_count=0, run_disabled=True) def testFlag_HasDisabledSuiteTest(self): """Tests the behavior of fail_fast and Disabled test suites.""" self.assertFailFastBehavior( test_suite='DISABLED_HasDisabledSuite', passed_count=0, failure_count=0, skipped_count=0, suppressed_count=5, run_disabled=False) self.assertNotFailFastBehavior( test_suite='DISABLED_HasDisabledSuite', passed_count=0, failure_count=0, skipped_count=0, suppressed_count=5, run_disabled=False) def testFlag_HasDisabledSuiteRunDisabledTest(self): """Tests the behavior of fail_fast and Disabled test suites enabled.""" self.assertFailFastBehavior( test_suite='DISABLED_HasDisabledSuite', passed_count=1, failure_count=1, skipped_count=3, suppressed_count=0, run_disabled=True) self.assertNotFailFastBehavior( test_suite='DISABLED_HasDisabledSuite', passed_count=1, failure_count=4, skipped_count=0, suppressed_count=0, run_disabled=True) if SUPPORTS_DEATH_TESTS: def testFlag_HasDeathTest(self): """Tests the behavior of fail_fast and death tests.""" self.assertFailFastBehavior( test_suite='HasDeathTest', passed_count=1, failure_count=1, skipped_count=3, suppressed_count=0) self.assertNotFailFastBehavior( test_suite='HasDeathTest', passed_count=1, failure_count=4, skipped_count=0, suppressed_count=0) if __name__ == '__main__': gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googletest/test/googletest-failfast-unittest.py
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests that leaked mock objects can be caught be Google Mock.""" from googlemock.test import gmock_test_utils PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_') TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*'] TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*'] TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*'] environ = gmock_test_utils.environ SetEnvVar = gmock_test_utils.SetEnvVar # Tests in this file run a Google-Test-based test program and expect it # to terminate prematurely. Therefore they are incompatible with # the premature-exit-file protocol by design. Unset the # premature-exit filepath to prevent Google Test from creating # the file. SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None) class GMockLeakTest(gmock_test_utils.TestCase): def testCatchesLeakedMockByDefault(self): self.assertNotEqual( 0, gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL, env=environ).exit_code) self.assertNotEqual( 0, gmock_test_utils.Subprocess(TEST_WITH_ON_CALL, env=environ).exit_code) def testDoesNotCatchLeakedMockWhenDisabled(self): self.assertEquals( 0, gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL + ['--gmock_catch_leaked_mocks=0'], env=environ).exit_code) self.assertEquals( 0, gmock_test_utils.Subprocess(TEST_WITH_ON_CALL + ['--gmock_catch_leaked_mocks=0'], env=environ).exit_code) def testCatchesLeakedMockWhenEnabled(self): self.assertNotEqual( 0, gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL + ['--gmock_catch_leaked_mocks'], env=environ).exit_code) self.assertNotEqual( 0, gmock_test_utils.Subprocess(TEST_WITH_ON_CALL + ['--gmock_catch_leaked_mocks'], env=environ).exit_code) def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self): self.assertNotEqual( 0, gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL + ['--gmock_catch_leaked_mocks=1'], env=environ).exit_code) def testCatchesMultipleLeakedMocks(self): self.assertNotEqual( 0, gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS + ['--gmock_catch_leaked_mocks'], env=environ).exit_code) if __name__ == '__main__': gmock_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googlemock/test/gmock_leak_test.py
# Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for Google C++ Mocking Framework.""" import os # pylint: disable=C6204 from googletest.test import gtest_test_utils def GetSourceDir(): """Returns the absolute path of the directory where the .py files are.""" return gtest_test_utils.GetSourceDir() def GetTestExecutablePath(executable_name): """Returns the absolute path of the test binary given its name. The function will print a message and abort the program if the resulting file doesn't exist. Args: executable_name: name of the test binary that the test script runs. Returns: The absolute path of the test binary. """ return gtest_test_utils.GetTestExecutablePath(executable_name) def GetExitStatus(exit_code): """Returns the argument to exit(), or -1 if exit() wasn't called. Args: exit_code: the result value of os.system(command). """ if os.name == 'nt': # On Windows, os.WEXITSTATUS() doesn't work and os.system() returns # the argument to exit() directly. return exit_code else: # On Unix, os.WEXITSTATUS() must be used to extract the exit status # from the result of os.system(). if os.WIFEXITED(exit_code): return os.WEXITSTATUS(exit_code) else: return -1 # Suppresses the "Invalid const name" lint complaint # pylint: disable-msg=C6409 # Exposes utilities from gtest_test_utils. Subprocess = gtest_test_utils.Subprocess TestCase = gtest_test_utils.TestCase environ = gtest_test_utils.environ SetEnvVar = gtest_test_utils.SetEnvVar PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR # pylint: enable-msg=C6409 def Main(): """Runs the unit test.""" gtest_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googlemock/test/gmock_test_utils.py
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. r"""Tests the text output of Google C++ Mocking Framework. To update the golden file: gmock_output_test.py --build_dir=BUILD/DIR --gengolden where BUILD/DIR contains the built gmock_output_test_ file. gmock_output_test.py --gengolden gmock_output_test.py """ from io import open # pylint: disable=redefined-builtin, g-importing-member import os import re import sys from googlemock.test import gmock_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_') COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0'] GOLDEN_NAME = 'gmock_output_test_golden.txt' GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveReportHeaderAndFooter(output): """Removes Google Test result report's header and footer from the output.""" output = re.sub(r'.*gtest_main.*\n', '', output) output = re.sub(r'\[.*\d+ tests.*\n', '', output) output = re.sub(r'\[.* test environment .*\n', '', output) output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output) output = re.sub(r'.* FAILED TESTS\n', '', output) return output def RemoveLocations(output): """Removes all file location info from a Google Test program's output. Args: output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output) def NormalizeErrorMarker(output): """Normalizes the error marker, which is different on Windows vs on Linux.""" return re.sub(r' error: ', ' Failure\n', output) def RemoveMemoryAddresses(output): """Removes memory addresses from the test output.""" return re.sub(r'@\w+', '@0x#', output) def RemoveTestNamesOfLeakedMocks(output): """Removes the test names of leaked mock objects from the test output.""" return re.sub(r'\(used in test .+\) ', '', output) def GetLeakyTests(output): """Returns a list of test names that leak mock objects.""" # findall() returns a list of all matches of the regex in output. # For example, if '(used in test FooTest.Bar)' is in output, the # list will contain 'FooTest.Bar'. return re.findall(r'\(used in test (.+)\)', output) def GetNormalizedOutputAndLeakyTests(output): """Normalizes the output of gmock_output_test_. Args: output: The test output. Returns: A tuple (the normalized test output, the list of test names that have leaked mocks). """ output = ToUnixLineEnding(output) output = RemoveReportHeaderAndFooter(output) output = NormalizeErrorMarker(output) output = RemoveLocations(output) output = RemoveMemoryAddresses(output) return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output)) def GetShellCommandOutput(cmd): """Runs a command in a sub-process, and returns its STDOUT in a string.""" return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output def GetNormalizedCommandOutputAndLeakyTests(cmd): """Runs a command and returns its normalized output and a list of leaky tests. Args: cmd: the shell command. """ # Disables exception pop-ups on Windows. os.environ['GTEST_CATCH_EXCEPTIONS'] = '1' return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd)) class GMockOutputTest(gmock_test_utils.TestCase): def testOutput(self): (output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND) golden_file = open(GOLDEN_PATH, 'rb') golden = golden_file.read().decode('utf-8') golden_file.close() # The normalized output should match the golden file. self.assertEqual(golden, output) # The raw output should contain 2 leaked mock object errors for # test GMockOutputTest.CatchesLeakedMocks. self.assertEqual(['GMockOutputTest.CatchesLeakedMocks', 'GMockOutputTest.CatchesLeakedMocks'], leaky_tests) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: (output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND) golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() # Suppress the error "googletest was imported but a call to its main() # was never detected." os._exit(0) else: gmock_test_utils.Main()
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/GoogleTestLib/googletest/googlemock/test/gmock_output_test.py
import os import ycm_core flags = [ '-Wall', '-Wextra', '-Werror', '-x', 'c', '-Iinclude', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = 'obj' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT. try: final_flags.remove( '-stdlib=libc++' ) except ValueError: pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/UnitTestFrameworkPkg/Library/CmockaLib/cmocka/.ycm_extra_conf.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("ArmVirtPkg",) ArchSupported = ("AARCH64", "ARM") TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('armvirt', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) DscName = os.path.join("ArmVirtPkg", "ArmVirtQemuKernel.dsc") # this platform produces an executable image that is invoked using # the Linux/arm64 kernel boot protocol FvQemuArg = " -kernel " import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/ArmVirtPkg/PlatformCI/QemuKernelBuild.py
# @file # Script to Build ArmVirtPkg UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import logging import io from edk2toolext.environment import shell_environment from edk2toolext.environment.uefi_build import UefiBuilder from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule from edk2toolext.invocables.edk2_update import UpdateSettingsManager from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager from edk2toollib.utility_functions import RunCmd from edk2toollib.utility_functions import GetHostInfo # ####################################################################################### # # Configuration for Update & Setup # # ####################################################################################### # class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager): def GetPackagesSupported(self): ''' return iterable of edk2 packages supported by this build. These should be edk2 workspace relative paths ''' return CommonPlatform.PackagesSupported def GetArchitecturesSupported(self): ''' return iterable of edk2 architectures supported by this build ''' return CommonPlatform.ArchSupported def GetTargetsSupported(self): ''' return iterable of edk2 target tags supported by this build ''' return CommonPlatform.TargetsSupported def GetRequiredSubmodules(self): ''' return iterable containing RequiredSubmodule objects. If no RequiredSubmodules return an empty iterable ''' rs = [] # intentionally declare this one with recursive false to avoid overhead rs.append(RequiredSubmodule( "CryptoPkg/Library/OpensslLib/openssl", False)) # To avoid maintenance of this file for every new submodule # lets just parse the .gitmodules and add each if not already in list. # The GetRequiredSubmodules is designed to allow a build to optimize # the desired submodules but it isn't necessary for this repository. result = io.StringIO() ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result) # Cmd output is expected to look like: # submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl # submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 if ret == 0: for line in result.getvalue().splitlines(): _, _, path = line.partition(" ") if path is not None: if path not in [x.path for x in rs]: rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know return rs def SetArchitectures(self, list_of_requested_architectures): ''' Confirm the requests architecture list is valid and configure SettingsManager to run only the requested architectures. Raise Exception if a list_of_requested_architectures is not supported ''' unsupported = set(list_of_requested_architectures) - \ set(self.GetArchitecturesSupported()) if(len(unsupported) > 0): errorString = ( "Unsupported Architecture Requested: " + " ".join(unsupported)) logging.critical(errorString) raise Exception(errorString) self.ActualArchitectures = list_of_requested_architectures def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' scopes = CommonPlatform.Scopes ActualToolChainTag = shell_environment.GetBuildVars().GetValue("TOOL_CHAIN_TAG", "") if GetHostInfo().os.upper() == "LINUX" and ActualToolChainTag.upper().startswith("GCC"): if "AARCH64" in self.ActualArchitectures: scopes += ("gcc_aarch64_linux",) if "ARM" in self.ActualArchitectures: scopes += ("gcc_arm_linux",) return scopes def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list: ''' Filter other cases that this package should be built based on changed files. This should cover things that can't be detected as dependencies. ''' build_these_packages = [] possible_packages = potentialPackagesList.copy() for f in changedFilesList: # BaseTools files that might change the build if "BaseTools" in f: if os.path.splitext(f) not in [".txt", ".md"]: build_these_packages = possible_packages break # if the azure pipeline platform template file changed if "platform-build-run-steps.yml" in f: build_these_packages = possible_packages break return build_these_packages def GetPlatformDscAndConfig(self) -> tuple: ''' If a platform desires to provide its DSC then Policy 4 will evaluate if any of the changes will be built in the dsc. The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>) ''' return (CommonPlatform.DscName, {}) # ####################################################################################### # # Actual Configuration for Platform Build # # ####################################################################################### # class PlatformBuilder(UefiBuilder, BuildSettingsManager): def __init__(self): UefiBuilder.__init__(self) def AddCommandLineOptions(self, parserObj): ''' Add command line options to the argparser ''' parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="AARCH64", help="Optional - Architecture to build. Default = AARCH64") def RetrieveCommandLineOptions(self, args): ''' Retrieve command line options from the argparser ''' shell_environment.GetBuildVars().SetValue( "TARGET_ARCH", args.build_arch.upper(), "From CmdLine") shell_environment.GetBuildVars().SetValue( "ACTIVE_PLATFORM", CommonPlatform.DscName, "From CmdLine") def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetPackagesPath(self): ''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath ''' return () def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' scopes = CommonPlatform.Scopes ActualToolChainTag = shell_environment.GetBuildVars().GetValue("TOOL_CHAIN_TAG", "") Arch = shell_environment.GetBuildVars().GetValue("TARGET_ARCH", "") if GetHostInfo().os.upper() == "LINUX" and ActualToolChainTag.upper().startswith("GCC"): if "AARCH64" == Arch: scopes += ("gcc_aarch64_linux",) elif "ARM" == Arch: scopes += ("gcc_arm_linux",) return scopes def GetName(self): ''' Get the name of the repo, platform, or product being build ''' ''' Used for naming the log file, among others ''' # check the startup nsh flag and if set then rename the log file. # this helps in CI so we don't overwrite the build log since running # uses the stuart_build command. if(shell_environment.GetBuildVars().GetValue("MAKE_STARTUP_NSH", "FALSE") == "TRUE"): return "ArmVirtPkg_With_Run" return "ArmVirtPkg" def GetLoggingLevel(self, loggerType): ''' Get the logging level for a given type base == lowest logging level supported con == Screen logging txt == plain text file logging md == markdown file logging ''' return logging.DEBUG def SetPlatformEnv(self): logging.debug("PlatformBuilder SetPlatformEnv") self.env.SetValue("PRODUCT_NAME", "ArmVirtQemu", "Platform Hardcoded") self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false") self.env.SetValue("QEMU_HEADLESS", "FALSE", "Default to false") return 0 def PlatformPreBuild(self): return 0 def PlatformPostBuild(self): return 0 def FlashRomImage(self): VirtualDrive = os.path.join(self.env.GetValue( "BUILD_OUTPUT_BASE"), "VirtualDrive") os.makedirs(VirtualDrive, exist_ok=True) OutputPath_FV = os.path.join( self.env.GetValue("BUILD_OUTPUT_BASE"), "FV") Built_FV = os.path.join(OutputPath_FV, "QEMU_EFI.fd") # pad fd to 64mb with open(Built_FV, "ab") as fvfile: fvfile.seek(0, os.SEEK_END) additional = b'\0' * ((64 * 1024 * 1024)-fvfile.tell()) fvfile.write(additional) # QEMU must be on that path # Unique Command and Args parameters per ARCH if (self.env.GetValue("TARGET_ARCH").upper() == "AARCH64"): cmd = "qemu-system-aarch64" args = "-M virt" args += " -cpu cortex-a57" # emulate cpu elif(self.env.GetValue("TARGET_ARCH").upper() == "ARM"): cmd = "qemu-system-arm" args = "-M virt,highmem=off" args += " -cpu cortex-a15" # emulate cpu else: raise NotImplementedError() # Common Args args += CommonPlatform.FvQemuArg + Built_FV # path to fw args += " -m 1024" # 1gb memory # turn off network args += " -net none" # Serial messages out args += " -serial stdio" # Mount disk with startup.nsh args += f" -drive file=fat:rw:{VirtualDrive},format=raw,media=disk" # Conditional Args if (self.env.GetValue("QEMU_HEADLESS").upper() == "TRUE"): args += " -display none" # no graphics if (self.env.GetValue("MAKE_STARTUP_NSH").upper() == "TRUE"): f = open(os.path.join(VirtualDrive, "startup.nsh"), "w") f.write("BOOT SUCCESS !!! \n") # add commands here f.write("reset -s\n") f.close() ret = RunCmd(cmd, args) if ret == 0xc0000005: # for some reason getting a c0000005 on successful return return 0 return ret
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/ArmVirtPkg/PlatformCI/PlatformBuildLib.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("ArmVirtPkg",) ArchSupported = ("AARCH64", "ARM") TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('armvirt', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) DscName = os.path.join("ArmVirtPkg", "ArmVirtQemu.dsc") # this platform produces a bootable NOR flash image FvQemuArg = " -pflash " import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/ArmVirtPkg/PlatformCI/QemuBuild.py
# @file # Script to Build ArmVirtPkg UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("ArmVirtPkg",) ArchSupported = ("AARCH64", "ARM") TargetsSupported = ("DEBUG", "RELEASE") Scopes = ('armvirt', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) DscName = os.path.join("ArmVirtPkg", "ArmVirtKvmTool.dsc") FvQemuArg = "" # ignored import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
nvtrust-main
infrastructure/kvm/ovmf/ovmf_source/ArmVirtPkg/PlatformCI/KvmToolBuild.py
# Keycode Map Generator Python Tests # # Copyright 2017 Pierre Ossman for Cendio AB # # This file is dual license under the terms of the GPLv2 or later # and 3-clause BSD licenses. import osx2win32 import osx2win32_name import osx2xkb import osx2xkb_name import html2win32 import html2win32_name import osx import osx_name assert osx2win32.code_map_osx_to_win32[0x1d] == 0x30 assert osx2win32_name.name_map_osx_to_win32[0x1d] == "VK_0" assert osx2xkb.code_map_osx_to_xkb[0x1d] == "AE10" assert osx2xkb_name.name_map_osx_to_xkb[0x1d] == "AE10" assert html2win32.code_map_html_to_win32["ControlLeft"] == 0x11 assert html2win32_name.name_map_html_to_win32["ControlLeft"] == "VK_CONTROL" assert osx.code_table_osx[0x1d] == 0x3b; assert osx_name.name_table_osx[0x1d] == "Control";
nvtrust-main
infrastructure/kvm/qemu/qemu_source/ui/keycodemapdb/tests/test.py
nvtrust-main
infrastructure/kvm/qemu/qemu_source/ui/keycodemapdb/thirdparty/__init__.py
# Author: Steven J. Bethard <steven.bethard@gmail.com>. # Maintainer: Thomas Waldmann <tw@waldmann-edv.de> """Command-line parsing library This module is an optparse-inspired command-line parsing library that: - handles both optional and positional arguments - produces highly informative usage messages - supports parsers that dispatch to sub-parsers The following is a simple usage example that sums integers from the command-line and writes the result to a file:: parser = argparse.ArgumentParser( description='sum the integers at the command line') parser.add_argument( 'integers', metavar='int', nargs='+', type=int, help='an integer to be summed') parser.add_argument( '--log', default=sys.stdout, type=argparse.FileType('w'), help='the file where the sum should be written') args = parser.parse_args() args.log.write('%s' % sum(args.integers)) args.log.close() The module contains the following public classes: - ArgumentParser -- The main entry point for command-line parsing. As the example above shows, the add_argument() method is used to populate the parser with actions for optional and positional arguments. Then the parse_args() method is invoked to convert the args at the command-line into an object with attributes. - ArgumentError -- The exception raised by ArgumentParser objects when there are errors with the parser's actions. Errors raised while parsing the command-line are caught by ArgumentParser and emitted as command-line messages. - FileType -- A factory for defining types of files to be created. As the example above shows, instances of FileType are typically passed as the type= argument of add_argument() calls. - Action -- The base class for parser actions. Typically actions are selected by passing strings like 'store_true' or 'append_const' to the action= argument of add_argument(). However, for greater customization of ArgumentParser actions, subclasses of Action may be defined and passed as the action= argument. - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, ArgumentDefaultsHelpFormatter -- Formatter classes which may be passed as the formatter_class= argument to the ArgumentParser constructor. HelpFormatter is the default, RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser not to change the formatting for help text, and ArgumentDefaultsHelpFormatter adds information about argument defaults to the help. All other classes in this module are considered implementation details. (Also note that HelpFormatter and RawDescriptionHelpFormatter are only considered public as object names -- the API of the formatter objects is still considered an implementation detail.) """ __version__ = '1.4.0' # we use our own version number independant of the # one in stdlib and we release this on pypi. __external_lib__ = True # to make sure the tests really test THIS lib, # not the builtin one in Python stdlib __all__ = [ 'ArgumentParser', 'ArgumentError', 'ArgumentTypeError', 'FileType', 'HelpFormatter', 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', 'OPTIONAL', 'PARSER', 'REMAINDER', 'SUPPRESS', 'ZERO_OR_MORE', ] import copy as _copy import os as _os import re as _re import sys as _sys import textwrap as _textwrap from gettext import gettext as _ try: set except NameError: # for python < 2.4 compatibility (sets module is there since 2.3): from sets import Set as set try: basestring except NameError: basestring = str try: sorted except NameError: # for python < 2.4 compatibility: def sorted(iterable, reverse=False): result = list(iterable) result.sort() if reverse: result.reverse() return result def _callable(obj): return hasattr(obj, '__call__') or hasattr(obj, '__bases__') SUPPRESS = '==SUPPRESS==' OPTIONAL = '?' ZERO_OR_MORE = '*' ONE_OR_MORE = '+' PARSER = 'A...' REMAINDER = '...' _UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' # ============================= # Utility functions and classes # ============================= class _AttributeHolder(object): """Abstract base class that provides __repr__. The __repr__ method returns a string in the format:: ClassName(attr=name, attr=name, ...) The attributes are determined either by a class-level attribute, '_kwarg_names', or by inspecting the instance __dict__. """ def __repr__(self): type_name = type(self).__name__ arg_strings = [] for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): arg_strings.append('%s=%r' % (name, value)) return '%s(%s)' % (type_name, ', '.join(arg_strings)) def _get_kwargs(self): return sorted(self.__dict__.items()) def _get_args(self): return [] def _ensure_value(namespace, name, value): if getattr(namespace, name, None) is None: setattr(namespace, name, value) return getattr(namespace, name) # =============== # Formatting Help # =============== class HelpFormatter(object): """Formatter for generating usage messages and argument help strings. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def __init__(self, prog, indent_increment=2, max_help_position=24, width=None): # default setting for width if width is None: try: width = int(_os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position self._width = width self._current_indent = 0 self._level = 0 self._action_max_length = 0 self._root_section = self._Section(self, None) self._current_section = self._root_section self._whitespace_matcher = _re.compile(r'\s+') self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== # Section and indentation methods # =============================== def _indent(self): self._current_indent += self._indent_increment self._level += 1 def _dedent(self): self._current_indent -= self._indent_increment assert self._current_indent >= 0, 'Indent decreased below 0.' self._level -= 1 class _Section(object): def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts for func, args in self.items: func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent heading = '%*s%s:\n' % (current_indent, '', self.heading) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def _add_item(self, func, args): self._current_section.items.append((func, args)) # ======================== # Message building methods # ======================== def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def end_section(self): self._current_section = self._current_section.parent self._dedent() def add_text(self, text): if text is not SUPPRESS and text is not None: self._add_item(self._format_text, [text]) def add_usage(self, usage, actions, groups, prefix=None): if usage is not SUPPRESS: args = usage, actions, groups, prefix self._add_item(self._format_usage, args) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def add_arguments(self, actions): for action in actions: self.add_argument(action) # ======================= # Help-formatting methods # ======================= def format_help(self): help = self._root_section.format_help() if help: help = self._long_break_matcher.sub('\n\n', help) help = help.strip('\n') + '\n' return help def _join_parts(self, part_strings): return ''.join([part for part in part_strings if part and part is not SUPPRESS]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: prefix = _('usage: ') # if usage is specified, use that if usage is not None: usage = usage % dict(prog=self._prog) # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) # split optionals from positionals optionals = [] positionals = [] for action in actions: if action.option_strings: optionals.append(action) else: positionals.append(action) # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) usage = ' '.join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: # break usage into wrappable parts part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) assert ' '.join(opt_parts) == opt_usage assert ' '.join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): lines = [] line = [] if prefix is not None: line_len = len(prefix) - 1 else: line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: lines[0] = lines[0][len(indent):] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: indent = ' ' * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) elif pos_parts: lines = get_lines([prog] + pos_parts, indent, prefix) else: lines = [prog] # if prog is long, put it on its own line else: indent = ' ' * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: lines = [] lines.extend(get_lines(opt_parts, indent)) lines.extend(get_lines(pos_parts, indent)) lines = [prog] + lines # join lines into usage usage = '\n'.join(lines) # prefix with 'usage:' return '%s%s\n\n' % (prefix, usage) def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups group_actions = set() inserts = {} for group in groups: try: start = actions.index(group._group_actions[0]) except ValueError: continue else: end = start + len(group._group_actions) if actions[start:end] == group._group_actions: for action in group._group_actions: group_actions.add(action) if not group.required: if start in inserts: inserts[start] += ' [' else: inserts[start] = '[' inserts[end] = ']' else: if start in inserts: inserts[start] += ' (' else: inserts[start] = '(' inserts[end] = ')' for i in range(start + 1, end): inserts[i] = '|' # collect all actions format strings parts = [] for i, action in enumerate(actions): # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) if inserts.get(i) == '|': inserts.pop(i) elif inserts.get(i + 1) == '|': inserts.pop(i + 1) # produce all arg strings elif not action.option_strings: part = self._format_args(action, action.dest) # if it's in a group, strip the outer [] if action in group_actions: if part[0] == '[' and part[-1] == ']': part = part[1:-1] # add the action string to the list parts.append(part) # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: part = '%s' % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) # make it look optional if it's not required or in a group if not action.required and action not in group_actions: part = '[%s]' % part # add the action string to the list parts.append(part) # insert things at the necessary indices for i in sorted(inserts, reverse=True): parts[i:i] = [inserts[i]] # join all the action items with spaces text = ' '.join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(r'%s *%s' % (open, close), r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text return text def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' def _format_action(self, action): # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header action_header = '%*s%-*s ' % tup indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] # if the Optional doesn't take a value, format is: # -s, --long if action.nargs == 0: parts.extend(action.option_strings) # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] result = '{%s}' % ','.join(choice_strs) else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result, ) * tuple_size return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: result = '%s' % get_metavar(1) elif action.nargs == OPTIONAL: result = '[%s]' % get_metavar(1) elif action.nargs == ZERO_OR_MORE: result = '[%s [%s ...]]' % get_metavar(2) elif action.nargs == ONE_OR_MORE: result = '%s [%s ...]' % get_metavar(2) elif action.nargs == REMAINDER: result = '...' elif action.nargs == PARSER: result = '%s ...' % get_metavar(1) else: formats = ['%s' for _ in range(action.nargs)] result = ' '.join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() for subaction in get_subactions(): yield subaction self._dedent() def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent) def _get_help_string(self, action): return action.help class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _fill_text(self, text, width, indent): return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): """Help message formatter which retains formatting of all help text. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _split_lines(self, text, width): return text.splitlines() class ArgumentDefaultsHelpFormatter(HelpFormatter): """Help message formatter which adds default values to argument help. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += ' (default: %(default)s)' return help # ===================== # Options and Arguments # ===================== def _get_action_name(argument): if argument is None: return None elif argument.option_strings: return '/'.join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): return argument.dest else: return None class ArgumentError(Exception): """An error from creating or using an argument (optional or positional). The string value of this exception is the message, augmented with information about the argument that caused it. """ def __init__(self, argument, message): self.argument_name = _get_action_name(argument) self.message = message def __str__(self): if self.argument_name is None: format = '%(message)s' else: format = 'argument %(argument_name)s: %(message)s' return format % dict(message=self.message, argument_name=self.argument_name) class ArgumentTypeError(Exception): """An error from trying to convert a command line string to a type.""" pass # ============== # Action classes # ============== class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. Action objects are used by an ArgumentParser to represent the information needed to parse a single argument from one or more strings from the command line. The keyword arguments to the Action constructor are also all attributes of Action instances. Keyword Arguments: - option_strings -- A list of command-line option strings which should be associated with this action. - dest -- The name of the attribute to hold the created object(s) - nargs -- The number of command-line arguments that should be consumed. By default, one argument will be consumed and a single value will be produced. Other values include: - N (an integer) consumes N arguments (and produces a list) - '?' consumes zero or one arguments - '*' consumes zero or more arguments (and produces a list) - '+' consumes one or more arguments (and produces a list) Note that the difference between the default and nargs=1 is that with the default, a single value will be produced, while with nargs=1, a list containing a single value will be produced. - const -- The value to be produced if the option is specified and the option uses an action that takes no values. - default -- The value to be produced if the option is not specified. - type -- The type which the command-line arguments should be converted to, should be one of 'string', 'int', 'float', 'complex' or a callable object that accepts a single string argument. If None, 'string' is assumed. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate type, an exception will be raised if it is not a member of this collection. - required -- True if the action must always be specified at the command line. This is only meaningful for optional command-line arguments. - help -- The help string describing the argument. - metavar -- The name to be used for the option's argument with the help string. If None, the 'dest' value will be used as the name. """ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): self.option_strings = option_strings self.dest = dest self.nargs = nargs self.const = const self.default = default self.type = type self.choices = choices self.required = required self.help = help self.metavar = metavar def _get_kwargs(self): names = [ 'option_strings', 'dest', 'nargs', 'const', 'default', 'type', 'choices', 'help', 'metavar', ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) class _StoreAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for store actions must be > 0; if you ' 'have nothing to store, actions such as store ' 'true or store const may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_StoreAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_StoreConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): def __init__(self, option_strings, dest, default=False, required=False, help=None): super(_StoreTrueAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help) class _StoreFalseAction(_StoreConstAction): def __init__(self, option_strings, dest, default=True, required=False, help=None): super(_StoreFalseAction, self).__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, help=help) class _AppendAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for append actions must be > 0; if arg ' 'strings are not supplying the value to append, ' 'the append const action may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_AppendAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(values) setattr(namespace, self.dest, items) class _AppendConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_AppendConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(self.const) setattr(namespace, self.dest, items) class _CountAction(Action): def __init__(self, option_strings, dest, default=None, required=False, help=None): super(_CountAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, new_count) class _HelpAction(Action): def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super(_HelpAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() parser.exit() class _VersionAction(Action): def __init__(self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, help="show program's version number and exit"): super(_VersionAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) self.version = version def __call__(self, parser, namespace, values, option_string=None): version = self.version if version is None: version = parser.version formatter = parser._get_formatter() formatter.add_text(version) parser.exit(message=formatter.format_help()) class _SubParsersAction(Action): class _ChoicesPseudoAction(Action): def __init__(self, name, aliases, help): metavar = dest = name if aliases: metavar += ' (%s)' % ', '.join(aliases) sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=dest, help=help, metavar=metavar) def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = {} self._choices_actions = [] super(_SubParsersAction, self).__init__( option_strings=option_strings, dest=dest, nargs=PARSER, choices=self._name_parser_map, help=help, metavar=metavar) def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = '%s %s' % (self._prog_prefix, name) aliases = kwargs.pop('aliases', ()) # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, aliases, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser # make parser available under aliases also for alias in aliases: self._name_parser_map[alias] = parser return parser def _get_subactions(self): return self._choices_actions def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] arg_strings = values[1:] # set the parser name if requested if self.dest is not SUPPRESS: setattr(namespace, self.dest, parser_name) # select the parser try: parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) msg = _('unknown parser %r (choices: %s)' % tup) raise ArgumentError(self, msg) # parse all the remaining options into the namespace # store any unrecognized options on the object, so that the top # level parser can decide what to do with them namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) if arg_strings: vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) # ============== # Type classes # ============== class FileType(object): """Factory for creating file object types Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function. """ def __init__(self, mode='r', bufsize=None): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r' % self._mode) raise ValueError(msg) try: # all other arguments are used as file names if self._bufsize: return open(string, self._mode, self._bufsize) else: return open(string, self._mode) except IOError: err = _sys.exc_info()[1] message = _("can't open '%s': %s") raise ArgumentTypeError(message % (string, err)) def __repr__(self): args = [self._mode, self._bufsize] args_str = ', '.join([repr(arg) for arg in args if arg is not None]) return '%s(%s)' % (type(self).__name__, args_str) # =========================== # Optional and Positional Parsing # =========================== class Namespace(_AttributeHolder): """Simple object for storing attributes. Implements equality by attribute names and values, and provides a simple string representation. """ def __init__(self, **kwargs): for name in kwargs: setattr(self, name, kwargs[name]) __hash__ = None def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not (self == other) def __contains__(self, key): return key in self.__dict__ class _ActionsContainer(object): def __init__(self, description, prefix_chars, argument_default, conflict_handler): super(_ActionsContainer, self).__init__() self.description = description self.argument_default = argument_default self.prefix_chars = prefix_chars self.conflict_handler = conflict_handler # set up registries self._registries = {} # register actions self.register('action', None, _StoreAction) self.register('action', 'store', _StoreAction) self.register('action', 'store_const', _StoreConstAction) self.register('action', 'store_true', _StoreTrueAction) self.register('action', 'store_false', _StoreFalseAction) self.register('action', 'append', _AppendAction) self.register('action', 'append_const', _AppendConstAction) self.register('action', 'count', _CountAction) self.register('action', 'help', _HelpAction) self.register('action', 'version', _VersionAction) self.register('action', 'parsers', _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() # action storage self._actions = [] self._option_string_actions = {} # groups self._action_groups = [] self._mutually_exclusive_groups = [] # defaults storage self._defaults = {} # determines whether an "option" looks like a negative number self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited self._has_negative_number_optionals = [] # ==================== # Registration methods # ==================== def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default) # ================================== # Namespace default accessor methods # ================================== def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] def get_default(self, dest): for action in self._actions: if action.dest == dest and action.default is not None: return action.default return self._defaults.get(dest, None) # ======================= # Adding argument actions # ======================= def add_argument(self, *args, **kwargs): """ add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...) """ # if no positional args are supplied or only one is supplied and # it doesn't look like an option string, parse a positional # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: if args and 'dest' in kwargs: raise ValueError('dest supplied twice for positional argument') kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument else: kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default if 'default' not in kwargs: dest = kwargs['dest'] if dest in self._defaults: kwargs['default'] = self._defaults[dest] elif self.argument_default is not None: kwargs['default'] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not _callable(action_class): raise ValueError('unknown action "%s"' % action_class) action = action_class(**kwargs) # raise an error if the action type is not callable type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): raise ValueError('%r is not callable' % type_func) return self._add_action(action) def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action def _remove_action(self, action): self._actions.remove(action) def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action) def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: msg = _('invalid option string %r: ' 'must start with a character %r') tup = option_string, self.prefix_chars raise ValueError(msg % tup) # strings starting with two prefix characters are long options option_strings.append(option_string) if option_string[0] in self.prefix_chars: if len(option_string) > 1: if option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: msg = _('dest= is required for options like %r') raise ValueError(msg % option_string) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action) def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): message = _('conflicting option string(s): %s') conflict_string = ', '.join([option_string for option_string, action in conflicting_actions]) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action) class _ArgumentGroup(_ActionsContainer): def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault update('conflict_handler', container.conflict_handler) update('prefix_chars', container.prefix_chars) update('argument_default', container.argument_default) super_init = super(_ArgumentGroup, self).__init__ super_init(description=description, **kwargs) # group attributes self.title = title self._group_actions = [] # share most attributes with the container self._registries = container._registries self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): super(_ArgumentGroup, self)._remove_action(action) self._group_actions.remove(action) class _MutuallyExclusiveGroup(_ArgumentGroup): def __init__(self, container, required=False): super(_MutuallyExclusiveGroup, self).__init__(container) self.required = required self._container = container def _add_action(self, action): if action.required: msg = _('mutually exclusive arguments must be optional') raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): self._container._remove_action(action) self._group_actions.remove(action) class ArgumentParser(_AttributeHolder, _ActionsContainer): """Object for parsing command line strings into Python objects. Keyword Arguments: - prog -- The name of the program (default: sys.argv[0]) - usage -- A usage message (default: auto-generated from arguments) - description -- A description of what the program does - epilog -- Text following the argument descriptions - parents -- Parsers whose arguments should be copied into this one - formatter_class -- HelpFormatter class for printing help messages - prefix_chars -- Characters that prefix optional arguments - fromfile_prefix_chars -- Characters that prefix files containing additional arguments - argument_default -- The default value for all arguments - conflict_handler -- String indicating how to handle conflicts - add_help -- Add a -h/-help option """ def __init__(self, prog=None, usage=None, description=None, epilog=None, version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True): if version is not None: import warnings warnings.warn( """The "version" argument to ArgumentParser is deprecated. """ """Please use """ """"add_argument(..., action='version', version="N", ...)" """ """instead""", DeprecationWarning) superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler) # default setting for prog if prog is None: prog = _os.path.basename(_sys.argv[0]) self.prog = prog self.usage = usage self.epilog = epilog self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('optional arguments')) self._subparsers = None # register types def identity(string): return string self.register('type', None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) if '-' in prefix_chars: default_prefix = '-' else: default_prefix = prefix_chars[0] if self.add_help: self.add_argument( default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) if self.version: self.add_argument( default_prefix+'v', default_prefix*2+'version', action='version', default=SUPPRESS, version=self.version, help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: self._add_container_actions(parent) try: defaults = parent._defaults except AttributeError: pass else: self._defaults.update(defaults) # ======================= # Pretty __repr__ methods # ======================= def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'version', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names] # ================================== # Optional/Positional adding methods # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action def _get_optional_actions(self): return [action for action in self._actions if action.option_strings] def _get_positional_actions(self): return [action for action in self._actions if not action.option_strings] # ===================================== # Command line argument parsing methods # ===================================== def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args def parse_known_args(self, args=None, namespace=None): # args default to the system args if args is None: args = _sys.argv[1:] # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors try: namespace, args = self._parse_known_args(args, namespace) if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) return namespace, args except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() seen_non_default_actions = set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if arg_count == 0 and option_string[1] not in chars: action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] new_explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] explicit_arg = new_explicit_arg else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min([ index for index in option_string_indices if index >= start_index]) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # if we didn't use all the Positional objects, there were too few # arg strings supplied. if positionals: self.error(_('too few arguments')) # make sure all required actions were present, and convert defaults. for action in self._actions: if action not in seen_actions: if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace if (action.default is not None and isinstance(action.default, basestring) and hasattr(namespace, action.dest) and action.default is getattr(namespace, action.dest)): setattr(namespace, action.dest, self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except IOError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def convert_arg_line_to_args(self, arg_line): return [arg_line] def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } default = _('expected %s argument(s)') % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1)) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # if it's just a single character, it was meant to be positional if len(arg_string) == 1: return None # if the option string before the "=" is present, return the action if '=' in arg_string: option_string, explicit_arg = arg_string.split('=', 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join([option_string for action, option_string, explicit_arg in option_tuples]) tup = arg_string, options self.error(_('ambiguous option: %s could match %s') % tup) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern # ======================== # Value conversion methods # ======================== def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs not in [PARSER, REMAINDER]: arg_strings = [s for s in arg_strings if s != '--'] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, basestring): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # ArgumentTypeErrors indicate errors except ArgumentTypeError: name = getattr(action.type, '__name__', repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) msg = _('invalid %s value: %r') raise ArgumentError(action, msg % (name, arg_string)) # return the converted value return result def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: tup = value, ', '.join(map(repr, action.choices)) msg = _('invalid choice: %r (choose from %s)') % tup raise ArgumentError(action, msg) # ======================= # Help-formatting methods # ======================= def format_usage(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def format_version(self): import warnings warnings.warn( 'The format_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() def _get_formatter(self): return self.formatter_class(prog=self.prog) # ===================== # Help-printing methods # ===================== def print_usage(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_usage(), file) def print_help(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_help(), file) def print_version(self, file=None): import warnings warnings.warn( 'The print_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) self._print_message(self.format_version(), file) def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message) # =============== # Exiting methods # =============== def exit(self, status=0, message=None): if message: self._print_message(message, _sys.stderr) _sys.exit(status) def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, message))
nvtrust-main
infrastructure/kvm/qemu/qemu_source/ui/keycodemapdb/thirdparty/argparse.py
#!/usr/bin/env python3 """ QEMU tooling installer script Copyright (c) 2020-2021 John Snow for Red Hat, Inc. """ import setuptools import pkg_resources def main(): """ QEMU tooling installer """ # https://medium.com/@daveshawley/safely-using-setup-cfg-for-metadata-1babbe54c108 pkg_resources.require('setuptools>=39.2') setuptools.setup() if __name__ == '__main__': main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/setup.py
import asyncio from contextlib import contextmanager import os import socket from tempfile import TemporaryDirectory import avocado from qemu.aqmp import ConnectError, Runstate from qemu.aqmp.protocol import AsyncProtocol, StateError from qemu.aqmp.util import asyncio_run, create_task class NullProtocol(AsyncProtocol[None]): """ NullProtocol is a test mockup of an AsyncProtocol implementation. It adds a fake_session instance variable that enables a code path that bypasses the actual connection logic, but still allows the reader/writers to start. Because the message type is defined as None, an asyncio.Event named 'trigger_input' is created that prohibits the reader from incessantly being able to yield None; this event can be poked to simulate an incoming message. For testing symmetry with do_recv, an interface is added to "send" a Null message. For testing purposes, a "simulate_disconnection" method is also added which allows us to trigger a bottom half disconnect without injecting any real errors into the reader/writer loops; in essence it performs exactly half of what disconnect() normally does. """ def __init__(self, name=None): self.fake_session = False self.trigger_input: asyncio.Event super().__init__(name) async def _establish_session(self): self.trigger_input = asyncio.Event() await super()._establish_session() async def _do_accept(self, address, ssl=None): if not self.fake_session: await super()._do_accept(address, ssl) async def _do_connect(self, address, ssl=None): if not self.fake_session: await super()._do_connect(address, ssl) async def _do_recv(self) -> None: await self.trigger_input.wait() self.trigger_input.clear() def _do_send(self, msg: None) -> None: pass async def send_msg(self) -> None: await self._outgoing.put(None) async def simulate_disconnect(self) -> None: """ Simulates a bottom-half disconnect. This method schedules a disconnection but does not wait for it to complete. This is used to put the loop into the DISCONNECTING state without fully quiescing it back to IDLE. This is normally something you cannot coax AsyncProtocol to do on purpose, but it will be similar to what happens with an unhandled Exception in the reader/writer. Under normal circumstances, the library design requires you to await on disconnect(), which awaits the disconnect task and returns bottom half errors as a pre-condition to allowing the loop to return back to IDLE. """ self._schedule_disconnect() class LineProtocol(AsyncProtocol[str]): def __init__(self, name=None): super().__init__(name) self.rx_history = [] async def _do_recv(self) -> str: raw = await self._readline() msg = raw.decode() self.rx_history.append(msg) return msg def _do_send(self, msg: str) -> None: assert self._writer is not None self._writer.write(msg.encode() + b'\n') async def send_msg(self, msg: str) -> None: await self._outgoing.put(msg) def run_as_task(coro, allow_cancellation=False): """ Run a given coroutine as a task. Optionally, wrap it in a try..except block that allows this coroutine to be canceled gracefully. """ async def _runner(): try: await coro except asyncio.CancelledError: if allow_cancellation: return raise return create_task(_runner()) @contextmanager def jammed_socket(): """ Opens up a random unused TCP port on localhost, then jams it. """ socks = [] try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('127.0.0.1', 0)) sock.listen(1) address = sock.getsockname() socks.append(sock) # I don't *fully* understand why, but it takes *two* un-accepted # connections to start jamming the socket. for _ in range(2): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(address) socks.append(sock) yield address finally: for sock in socks: sock.close() class Smoke(avocado.Test): def setUp(self): self.proto = NullProtocol() def test__repr__(self): self.assertEqual( repr(self.proto), "<NullProtocol runstate=IDLE>" ) def testRunstate(self): self.assertEqual( self.proto.runstate, Runstate.IDLE ) def testDefaultName(self): self.assertEqual( self.proto.name, None ) def testLogger(self): self.assertEqual( self.proto.logger.name, 'qemu.aqmp.protocol' ) def testName(self): self.proto = NullProtocol('Steve') self.assertEqual( self.proto.name, 'Steve' ) self.assertEqual( self.proto.logger.name, 'qemu.aqmp.protocol.Steve' ) self.assertEqual( repr(self.proto), "<NullProtocol name='Steve' runstate=IDLE>" ) class TestBase(avocado.Test): def setUp(self): self.proto = NullProtocol(type(self).__name__) self.assertEqual(self.proto.runstate, Runstate.IDLE) self.runstate_watcher = None def tearDown(self): self.assertEqual(self.proto.runstate, Runstate.IDLE) async def _asyncSetUp(self): pass async def _asyncTearDown(self): if self.runstate_watcher: await self.runstate_watcher @staticmethod def async_test(async_test_method): """ Decorator; adds SetUp and TearDown to async tests. """ async def _wrapper(self, *args, **kwargs): loop = asyncio.get_event_loop() loop.set_debug(True) await self._asyncSetUp() await async_test_method(self, *args, **kwargs) await self._asyncTearDown() return _wrapper # Definitions # The states we expect a "bad" connect/accept attempt to transition through BAD_CONNECTION_STATES = ( Runstate.CONNECTING, Runstate.DISCONNECTING, Runstate.IDLE, ) # The states we expect a "good" session to transition through GOOD_CONNECTION_STATES = ( Runstate.CONNECTING, Runstate.RUNNING, Runstate.DISCONNECTING, Runstate.IDLE, ) # Helpers async def _watch_runstates(self, *states): """ This launches a task alongside (most) tests below to confirm that the sequence of runstate changes that occur is exactly as anticipated. """ async def _watcher(): for state in states: new_state = await self.proto.runstate_changed() self.assertEqual( new_state, state, msg=f"Expected state '{state.name}'", ) self.runstate_watcher = create_task(_watcher()) # Kick the loop and force the task to block on the event. await asyncio.sleep(0) class State(TestBase): @TestBase.async_test async def testSuperfluousDisconnect(self): """ Test calling disconnect() while already disconnected. """ await self._watch_runstates( Runstate.DISCONNECTING, Runstate.IDLE, ) await self.proto.disconnect() class Connect(TestBase): """ Tests primarily related to calling Connect(). """ async def _bad_connection(self, family: str): assert family in ('INET', 'UNIX') if family == 'INET': await self.proto.connect(('127.0.0.1', 0)) elif family == 'UNIX': await self.proto.connect('/dev/null') async def _hanging_connection(self): with jammed_socket() as addr: await self.proto.connect(addr) async def _bad_connection_test(self, family: str): await self._watch_runstates(*self.BAD_CONNECTION_STATES) with self.assertRaises(ConnectError) as context: await self._bad_connection(family) self.assertIsInstance(context.exception.exc, OSError) self.assertEqual( context.exception.error_message, "Failed to establish connection" ) @TestBase.async_test async def testBadINET(self): """ Test an immediately rejected call to an IP target. """ await self._bad_connection_test('INET') @TestBase.async_test async def testBadUNIX(self): """ Test an immediately rejected call to a UNIX socket target. """ await self._bad_connection_test('UNIX') @TestBase.async_test async def testCancellation(self): """ Test what happens when a connection attempt is aborted. """ # Note that accept() cannot be cancelled outright, as it isn't a task. # However, we can wrap it in a task and cancel *that*. await self._watch_runstates(*self.BAD_CONNECTION_STATES) task = run_as_task(self._hanging_connection(), allow_cancellation=True) state = await self.proto.runstate_changed() self.assertEqual(state, Runstate.CONNECTING) # This is insider baseball, but the connection attempt has # yielded *just* before the actual connection attempt, so kick # the loop to make sure it's truly wedged. await asyncio.sleep(0) task.cancel() await task @TestBase.async_test async def testTimeout(self): """ Test what happens when a connection attempt times out. """ await self._watch_runstates(*self.BAD_CONNECTION_STATES) task = run_as_task(self._hanging_connection()) # More insider baseball: to improve the speed of this test while # guaranteeing that the connection even gets a chance to start, # verify that the connection hangs *first*, then await the # result of the task with a nearly-zero timeout. state = await self.proto.runstate_changed() self.assertEqual(state, Runstate.CONNECTING) await asyncio.sleep(0) with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for(task, timeout=0) @TestBase.async_test async def testRequire(self): """ Test what happens when a connection attempt is made while CONNECTING. """ await self._watch_runstates(*self.BAD_CONNECTION_STATES) task = run_as_task(self._hanging_connection(), allow_cancellation=True) state = await self.proto.runstate_changed() self.assertEqual(state, Runstate.CONNECTING) with self.assertRaises(StateError) as context: await self._bad_connection('UNIX') self.assertEqual( context.exception.error_message, "NullProtocol is currently connecting." ) self.assertEqual(context.exception.state, Runstate.CONNECTING) self.assertEqual(context.exception.required, Runstate.IDLE) task.cancel() await task @TestBase.async_test async def testImplicitRunstateInit(self): """ Test what happens if we do not wait on the runstate event until AFTER a connection is made, i.e., connect()/accept() themselves initialize the runstate event. All of the above tests force the initialization by waiting on the runstate *first*. """ task = run_as_task(self._hanging_connection(), allow_cancellation=True) # Kick the loop to coerce the state change await asyncio.sleep(0) assert self.proto.runstate == Runstate.CONNECTING # We already missed the transition to CONNECTING await self._watch_runstates(Runstate.DISCONNECTING, Runstate.IDLE) task.cancel() await task class Accept(Connect): """ All of the same tests as Connect, but using the accept() interface. """ async def _bad_connection(self, family: str): assert family in ('INET', 'UNIX') if family == 'INET': await self.proto.accept(('example.com', 1)) elif family == 'UNIX': await self.proto.accept('/dev/null') async def _hanging_connection(self): with TemporaryDirectory(suffix='.aqmp') as tmpdir: sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock") await self.proto.accept(sock) class FakeSession(TestBase): def setUp(self): super().setUp() self.proto.fake_session = True async def _asyncSetUp(self): await super()._asyncSetUp() await self._watch_runstates(*self.GOOD_CONNECTION_STATES) async def _asyncTearDown(self): await self.proto.disconnect() await super()._asyncTearDown() #### @TestBase.async_test async def testFakeConnect(self): """Test the full state lifecycle (via connect) with a no-op session.""" await self.proto.connect('/not/a/real/path') self.assertEqual(self.proto.runstate, Runstate.RUNNING) @TestBase.async_test async def testFakeAccept(self): """Test the full state lifecycle (via accept) with a no-op session.""" await self.proto.accept('/not/a/real/path') self.assertEqual(self.proto.runstate, Runstate.RUNNING) @TestBase.async_test async def testFakeRecv(self): """Test receiving a fake/null message.""" await self.proto.accept('/not/a/real/path') logname = self.proto.logger.name with self.assertLogs(logname, level='DEBUG') as context: self.proto.trigger_input.set() self.proto.trigger_input.clear() await asyncio.sleep(0) # Kick reader. self.assertEqual( context.output, [f"DEBUG:{logname}:<-- None"], ) @TestBase.async_test async def testFakeSend(self): """Test sending a fake/null message.""" await self.proto.accept('/not/a/real/path') logname = self.proto.logger.name with self.assertLogs(logname, level='DEBUG') as context: # Cheat: Send a Null message to nobody. await self.proto.send_msg() # Kick writer; awaiting on a queue.put isn't sufficient to yield. await asyncio.sleep(0) self.assertEqual( context.output, [f"DEBUG:{logname}:--> None"], ) async def _prod_session_api( self, current_state: Runstate, error_message: str, accept: bool = True ): with self.assertRaises(StateError) as context: if accept: await self.proto.accept('/not/a/real/path') else: await self.proto.connect('/not/a/real/path') self.assertEqual(context.exception.error_message, error_message) self.assertEqual(context.exception.state, current_state) self.assertEqual(context.exception.required, Runstate.IDLE) @TestBase.async_test async def testAcceptRequireRunning(self): """Test that accept() cannot be called when Runstate=RUNNING""" await self.proto.accept('/not/a/real/path') await self._prod_session_api( Runstate.RUNNING, "NullProtocol is already connected and running.", accept=True, ) @TestBase.async_test async def testConnectRequireRunning(self): """Test that connect() cannot be called when Runstate=RUNNING""" await self.proto.accept('/not/a/real/path') await self._prod_session_api( Runstate.RUNNING, "NullProtocol is already connected and running.", accept=False, ) @TestBase.async_test async def testAcceptRequireDisconnecting(self): """Test that accept() cannot be called when Runstate=DISCONNECTING""" await self.proto.accept('/not/a/real/path') # Cheat: force a disconnect. await self.proto.simulate_disconnect() await self._prod_session_api( Runstate.DISCONNECTING, ("NullProtocol is disconnecting." " Call disconnect() to return to IDLE state."), accept=True, ) @TestBase.async_test async def testConnectRequireDisconnecting(self): """Test that connect() cannot be called when Runstate=DISCONNECTING""" await self.proto.accept('/not/a/real/path') # Cheat: force a disconnect. await self.proto.simulate_disconnect() await self._prod_session_api( Runstate.DISCONNECTING, ("NullProtocol is disconnecting." " Call disconnect() to return to IDLE state."), accept=False, ) class SimpleSession(TestBase): def setUp(self): super().setUp() self.server = LineProtocol(type(self).__name__ + '-server') async def _asyncSetUp(self): await super()._asyncSetUp() await self._watch_runstates(*self.GOOD_CONNECTION_STATES) async def _asyncTearDown(self): await self.proto.disconnect() try: await self.server.disconnect() except EOFError: pass await super()._asyncTearDown() @TestBase.async_test async def testSmoke(self): with TemporaryDirectory(suffix='.aqmp') as tmpdir: sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock") server_task = create_task(self.server.accept(sock)) # give the server a chance to start listening [...] await asyncio.sleep(0) await self.proto.connect(sock)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/tests/protocol.py
""" AQMP Error Classes This package seeks to provide semantic error classes that are intended to be used directly by clients when they would like to handle particular semantic failures (e.g. "failed to connect") without needing to know the enumeration of possible reasons for that failure. AQMPError serves as the ancestor for all exceptions raised by this package, and is suitable for use in handling semantic errors from this library. In most cases, individual public methods will attempt to catch and re-encapsulate various exceptions to provide a semantic error-handling interface. .. admonition:: AQMP Exception Hierarchy Reference | `Exception` | +-- `AQMPError` | +-- `ConnectError` | +-- `StateError` | +-- `ExecInterruptedError` | +-- `ExecuteError` | +-- `ListenerError` | +-- `ProtocolError` | +-- `DeserializationError` | +-- `UnexpectedTypeError` | +-- `ServerParseError` | +-- `BadReplyError` | +-- `GreetingError` | +-- `NegotiationError` """ class AQMPError(Exception): """Abstract error class for all errors originating from this package.""" class ProtocolError(AQMPError): """ Abstract error class for protocol failures. Semantically, these errors are generally the fault of either the protocol server or as a result of a bug in this library. :param error_message: Human-readable string describing the error. """ def __init__(self, error_message: str): super().__init__(error_message) #: Human-readable error message, without any prefix. self.error_message: str = error_message
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/error.py
""" QMP Data Models This module provides simplistic data classes that represent the few structures that the QMP spec mandates; they are used to verify incoming data to make sure it conforms to spec. """ # pylint: disable=too-few-public-methods from collections import abc import copy from typing import ( Any, Dict, Mapping, Optional, Sequence, ) class Model: """ Abstract data model, representing some QMP object of some kind. :param raw: The raw object to be validated. :raise KeyError: If any required fields are absent. :raise TypeError: If any required fields have the wrong type. """ def __init__(self, raw: Mapping[str, Any]): self._raw = raw def _check_key(self, key: str) -> None: if key not in self._raw: raise KeyError(f"'{self._name}' object requires '{key}' member") def _check_value(self, key: str, type_: type, typestr: str) -> None: assert key in self._raw if not isinstance(self._raw[key], type_): raise TypeError( f"'{self._name}' member '{key}' must be a {typestr}" ) def _check_member(self, key: str, type_: type, typestr: str) -> None: self._check_key(key) self._check_value(key, type_, typestr) @property def _name(self) -> str: return type(self).__name__ def __repr__(self) -> str: return f"{self._name}({self._raw!r})" class Greeting(Model): """ Defined in qmp-spec.txt, section 2.2, "Server Greeting". :param raw: The raw Greeting object. :raise KeyError: If any required fields are absent. :raise TypeError: If any required fields have the wrong type. """ def __init__(self, raw: Mapping[str, Any]): super().__init__(raw) #: 'QMP' member self.QMP: QMPGreeting # pylint: disable=invalid-name self._check_member('QMP', abc.Mapping, "JSON object") self.QMP = QMPGreeting(self._raw['QMP']) def _asdict(self) -> Dict[str, object]: """ For compatibility with the iotests sync QMP wrapper. The legacy QMP interface needs Greetings as a garden-variety Dict. This interface is private in the hopes that it will be able to be dropped again in the near-future. Caller beware! """ return dict(copy.deepcopy(self._raw)) class QMPGreeting(Model): """ Defined in qmp-spec.txt, section 2.2, "Server Greeting". :param raw: The raw QMPGreeting object. :raise KeyError: If any required fields are absent. :raise TypeError: If any required fields have the wrong type. """ def __init__(self, raw: Mapping[str, Any]): super().__init__(raw) #: 'version' member self.version: Mapping[str, object] #: 'capabilities' member self.capabilities: Sequence[object] self._check_member('version', abc.Mapping, "JSON object") self.version = self._raw['version'] self._check_member('capabilities', abc.Sequence, "JSON array") self.capabilities = self._raw['capabilities'] class ErrorResponse(Model): """ Defined in qmp-spec.txt, section 2.4.2, "error". :param raw: The raw ErrorResponse object. :raise KeyError: If any required fields are absent. :raise TypeError: If any required fields have the wrong type. """ def __init__(self, raw: Mapping[str, Any]): super().__init__(raw) #: 'error' member self.error: ErrorInfo #: 'id' member self.id: Optional[object] = None # pylint: disable=invalid-name self._check_member('error', abc.Mapping, "JSON object") self.error = ErrorInfo(self._raw['error']) if 'id' in raw: self.id = raw['id'] class ErrorInfo(Model): """ Defined in qmp-spec.txt, section 2.4.2, "error". :param raw: The raw ErrorInfo object. :raise KeyError: If any required fields are absent. :raise TypeError: If any required fields have the wrong type. """ def __init__(self, raw: Mapping[str, Any]): super().__init__(raw) #: 'class' member, with an underscore to avoid conflicts in Python. self.class_: str #: 'desc' member self.desc: str self._check_member('class', str, "string") self.class_ = self._raw['class'] self._check_member('desc', str, "string") self.desc = self._raw['desc']
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/models.py
""" QMP Protocol Implementation This module provides the `QMPClient` class, which can be used to connect and send commands to a QMP server such as QEMU. The QMP class can be used to either connect to a listening server, or used to listen and accept an incoming connection from that server. """ import asyncio import logging import socket import struct from typing import ( Dict, List, Mapping, Optional, Union, cast, ) from .error import AQMPError, ProtocolError from .events import Events from .message import Message from .models import ErrorResponse, Greeting from .protocol import AsyncProtocol, Runstate, require from .util import ( bottom_half, exception_summary, pretty_traceback, upper_half, ) class _WrappedProtocolError(ProtocolError): """ Abstract exception class for Protocol errors that wrap an Exception. :param error_message: Human-readable string describing the error. :param exc: The root-cause exception. """ def __init__(self, error_message: str, exc: Exception): super().__init__(error_message) self.exc = exc def __str__(self) -> str: return f"{self.error_message}: {self.exc!s}" class GreetingError(_WrappedProtocolError): """ An exception occurred during the Greeting phase. :param error_message: Human-readable string describing the error. :param exc: The root-cause exception. """ class NegotiationError(_WrappedProtocolError): """ An exception occurred during the Negotiation phase. :param error_message: Human-readable string describing the error. :param exc: The root-cause exception. """ class ExecuteError(AQMPError): """ Exception raised by `QMPClient.execute()` on RPC failure. :param error_response: The RPC error response object. :param sent: The sent RPC message that caused the failure. :param received: The raw RPC error reply received. """ def __init__(self, error_response: ErrorResponse, sent: Message, received: Message): super().__init__(error_response.error.desc) #: The sent `Message` that caused the failure self.sent: Message = sent #: The received `Message` that indicated failure self.received: Message = received #: The parsed error response self.error: ErrorResponse = error_response #: The QMP error class self.error_class: str = error_response.error.class_ class ExecInterruptedError(AQMPError): """ Exception raised by `execute()` (et al) when an RPC is interrupted. This error is raised when an `execute()` statement could not be completed. This can occur because the connection itself was terminated before a reply was received. The true cause of the interruption will be available via `disconnect()`. """ class _MsgProtocolError(ProtocolError): """ Abstract error class for protocol errors that have a `Message` object. This Exception class is used for protocol errors where the `Message` was mechanically understood, but was found to be inappropriate or malformed. :param error_message: Human-readable string describing the error. :param msg: The QMP `Message` that caused the error. """ def __init__(self, error_message: str, msg: Message): super().__init__(error_message) #: The received `Message` that caused the error. self.msg: Message = msg def __str__(self) -> str: return "\n".join([ super().__str__(), f" Message was: {str(self.msg)}\n", ]) class ServerParseError(_MsgProtocolError): """ The Server sent a `Message` indicating parsing failure. i.e. A reply has arrived from the server, but it is missing the "ID" field, indicating a parsing error. :param error_message: Human-readable string describing the error. :param msg: The QMP `Message` that caused the error. """ class BadReplyError(_MsgProtocolError): """ An execution reply was successfully routed, but not understood. If a QMP message is received with an 'id' field to allow it to be routed, but is otherwise malformed, this exception will be raised. A reply message is malformed if it is missing either the 'return' or 'error' keys, or if the 'error' value has missing keys or members of the wrong type. :param error_message: Human-readable string describing the error. :param msg: The malformed reply that was received. :param sent: The message that was sent that prompted the error. """ def __init__(self, error_message: str, msg: Message, sent: Message): super().__init__(error_message, msg) #: The sent `Message` that caused the failure self.sent = sent class QMPClient(AsyncProtocol[Message], Events): """ Implements a QMP client connection. QMP can be used to establish a connection as either the transport client or server, though this class always acts as the QMP client. :param name: Optional nickname for the connection, used for logging. Basic script-style usage looks like this:: qmp = QMPClient('my_virtual_machine_name') await qmp.connect(('127.0.0.1', 1234)) ... res = await qmp.execute('block-query') ... await qmp.disconnect() Basic async client-style usage looks like this:: class Client: def __init__(self, name: str): self.qmp = QMPClient(name) async def watch_events(self): try: async for event in self.qmp.events: print(f"Event: {event['event']}") except asyncio.CancelledError: return async def run(self, address='/tmp/qemu.socket'): await self.qmp.connect(address) asyncio.create_task(self.watch_events()) await self.qmp.runstate_changed.wait() await self.disconnect() See `aqmp.events` for more detail on event handling patterns. """ #: Logger object used for debugging messages. logger = logging.getLogger(__name__) # Read buffer limit; large enough to accept query-qmp-schema _limit = (256 * 1024) # Type alias for pending execute() result items _PendingT = Union[Message, ExecInterruptedError] def __init__(self, name: Optional[str] = None) -> None: super().__init__(name) Events.__init__(self) #: Whether or not to await a greeting after establishing a connection. self.await_greeting: bool = True #: Whether or not to perform capabilities negotiation upon connection. #: Implies `await_greeting`. self.negotiate: bool = True # Cached Greeting, if one was awaited. self._greeting: Optional[Greeting] = None # Command ID counter self._execute_id = 0 # Incoming RPC reply messages. self._pending: Dict[ Union[str, None], 'asyncio.Queue[QMPClient._PendingT]' ] = {} @property def greeting(self) -> Optional[Greeting]: """The `Greeting` from the QMP server, if any.""" return self._greeting @upper_half async def _establish_session(self) -> None: """ Initiate the QMP session. Wait for the QMP greeting and perform capabilities negotiation. :raise GreetingError: When the greeting is not understood. :raise NegotiationError: If the negotiation fails. :raise EOFError: When the server unexpectedly hangs up. :raise OSError: For underlying stream errors. """ self._greeting = None self._pending = {} if self.await_greeting or self.negotiate: self._greeting = await self._get_greeting() if self.negotiate: await self._negotiate() # This will start the reader/writers: await super()._establish_session() @upper_half async def _get_greeting(self) -> Greeting: """ :raise GreetingError: When the greeting is not understood. :raise EOFError: When the server unexpectedly hangs up. :raise OSError: For underlying stream errors. :return: the Greeting object given by the server. """ self.logger.debug("Awaiting greeting ...") try: msg = await self._recv() return Greeting(msg) except (ProtocolError, KeyError, TypeError) as err: emsg = "Did not understand Greeting" self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) raise GreetingError(emsg, err) from err except BaseException as err: # EOFError, OSError, or something unexpected. emsg = "Failed to receive Greeting" self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) raise @upper_half async def _negotiate(self) -> None: """ Perform QMP capabilities negotiation. :raise NegotiationError: When negotiation fails. :raise EOFError: When the server unexpectedly hangs up. :raise OSError: For underlying stream errors. """ self.logger.debug("Negotiating capabilities ...") arguments: Dict[str, List[str]] = {'enable': []} if self._greeting and 'oob' in self._greeting.QMP.capabilities: arguments['enable'].append('oob') msg = self.make_execute_msg('qmp_capabilities', arguments=arguments) # It's not safe to use execute() here, because the reader/writers # aren't running. AsyncProtocol *requires* that a new session # does not fail after the reader/writers are running! try: await self._send(msg) reply = await self._recv() assert 'return' in reply assert 'error' not in reply except (ProtocolError, AssertionError) as err: emsg = "Negotiation failed" self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) raise NegotiationError(emsg, err) from err except BaseException as err: # EOFError, OSError, or something unexpected. emsg = "Negotiation failed" self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) raise @bottom_half async def _bh_disconnect(self) -> None: try: await super()._bh_disconnect() finally: if self._pending: self.logger.debug("Cancelling pending executions") keys = self._pending.keys() for key in keys: self.logger.debug("Cancelling execution '%s'", key) self._pending[key].put_nowait( ExecInterruptedError("Disconnected") ) self.logger.debug("QMP Disconnected.") @upper_half def _cleanup(self) -> None: super()._cleanup() assert not self._pending @bottom_half async def _on_message(self, msg: Message) -> None: """ Add an incoming message to the appropriate queue/handler. :raise ServerParseError: When Message indicates server parse failure. """ # Incoming messages are not fully parsed/validated here; # do only light peeking to know how to route the messages. if 'event' in msg: await self._event_dispatch(msg) return # Below, we assume everything left is an execute/exec-oob response. exec_id = cast(Optional[str], msg.get('id')) if exec_id in self._pending: await self._pending[exec_id].put(msg) return # We have a message we can't route back to a caller. is_error = 'error' in msg has_id = 'id' in msg if is_error and not has_id: # This is very likely a server parsing error. # It doesn't inherently belong to any pending execution. # Instead of performing clever recovery, just terminate. # See "NOTE" in qmp-spec.txt, section 2.4.2 raise ServerParseError( ("Server sent an error response without an ID, " "but there are no ID-less executions pending. " "Assuming this is a server parser failure."), msg ) # qmp-spec.txt, section 2.4: # 'Clients should drop all the responses # that have an unknown "id" field.' self.logger.log( logging.ERROR if is_error else logging.WARNING, "Unknown ID '%s', message dropped.", exec_id, ) self.logger.debug("Unroutable message: %s", str(msg)) @upper_half @bottom_half async def _do_recv(self) -> Message: """ :raise OSError: When a stream error is encountered. :raise EOFError: When the stream is at EOF. :raise ProtocolError: When the Message is not understood. See also `Message._deserialize`. :return: A single QMP `Message`. """ msg_bytes = await self._readline() msg = Message(msg_bytes, eager=True) return msg @upper_half @bottom_half def _do_send(self, msg: Message) -> None: """ :raise ValueError: JSON serialization failure :raise TypeError: JSON serialization failure :raise OSError: When a stream error is encountered. """ assert self._writer is not None self._writer.write(bytes(msg)) @upper_half def _get_exec_id(self) -> str: exec_id = f"__aqmp#{self._execute_id:05d}" self._execute_id += 1 return exec_id @upper_half async def _issue(self, msg: Message) -> Union[None, str]: """ Issue a QMP `Message` and do not wait for a reply. :param msg: The QMP `Message` to send to the server. :return: The ID of the `Message` sent. """ msg_id: Optional[str] = None if 'id' in msg: assert isinstance(msg['id'], str) msg_id = msg['id'] self._pending[msg_id] = asyncio.Queue(maxsize=1) await self._outgoing.put(msg) return msg_id @upper_half async def _reply(self, msg_id: Union[str, None]) -> Message: """ Await a reply to a previously issued QMP message. :param msg_id: The ID of the previously issued message. :return: The reply from the server. :raise ExecInterruptedError: When the reply could not be retrieved because the connection was lost, or some other problem. """ queue = self._pending[msg_id] result = await queue.get() try: if isinstance(result, ExecInterruptedError): raise result return result finally: del self._pending[msg_id] @upper_half async def _execute(self, msg: Message, assign_id: bool = True) -> Message: """ Send a QMP `Message` to the server and await a reply. This method *assumes* you are sending some kind of an execute statement that *will* receive a reply. An execution ID will be assigned if assign_id is `True`. It can be disabled, but this requires that an ID is manually assigned instead. For manually assigned IDs, you must not use the string '__aqmp#' anywhere in the ID. :param msg: The QMP `Message` to execute. :param assign_id: If True, assign a new execution ID. :return: Execution reply from the server. :raise ExecInterruptedError: When the reply could not be retrieved because the connection was lost, or some other problem. """ if assign_id: msg['id'] = self._get_exec_id() elif 'id' in msg: assert isinstance(msg['id'], str) assert '__aqmp#' not in msg['id'] exec_id = await self._issue(msg) return await self._reply(exec_id) @upper_half @require(Runstate.RUNNING) async def _raw( self, msg: Union[Message, Mapping[str, object], bytes], assign_id: bool = True, ) -> Message: """ Issue a raw `Message` to the QMP server and await a reply. :param msg: A Message to send to the server. It may be a `Message`, any Mapping (including Dict), or raw bytes. :param assign_id: Assign an arbitrary execution ID to this message. If `False`, the existing id must either be absent (and no other such pending execution may omit an ID) or a string. If it is a string, it must not start with '__aqmp#' and no other such pending execution may currently be using that ID. :return: Execution reply from the server. :raise ExecInterruptedError: When the reply could not be retrieved because the connection was lost, or some other problem. :raise TypeError: When assign_id is `False`, an ID is given, and it is not a string. :raise ValueError: When assign_id is `False`, but the ID is not usable; Either because it starts with '__aqmp#' or it is already in-use. """ # 1. convert generic Mapping or bytes to a QMP Message # 2. copy Message objects so that we assign an ID only to the copy. msg = Message(msg) exec_id = msg.get('id') if not assign_id and 'id' in msg: if not isinstance(exec_id, str): raise TypeError(f"ID ('{exec_id}') must be a string.") if exec_id.startswith('__aqmp#'): raise ValueError( f"ID ('{exec_id}') must not start with '__aqmp#'." ) if not assign_id and exec_id in self._pending: raise ValueError( f"ID '{exec_id}' is in-use and cannot be used." ) return await self._execute(msg, assign_id=assign_id) @upper_half @require(Runstate.RUNNING) async def execute_msg(self, msg: Message) -> object: """ Execute a QMP command and return its value. :param msg: The QMP `Message` to execute. :return: The command execution return value from the server. The type of object returned depends on the command that was issued, though most in QEMU return a `dict`. :raise ValueError: If the QMP `Message` does not have either the 'execute' or 'exec-oob' fields set. :raise ExecuteError: When the server returns an error response. :raise ExecInterruptedError: if the connection was terminated early. """ if not ('execute' in msg or 'exec-oob' in msg): raise ValueError("Requires 'execute' or 'exec-oob' message") # Copy the Message so that the ID assigned by _execute() is # local to this method; allowing the ID to be seen in raised # Exceptions but without modifying the caller's held copy. msg = Message(msg) reply = await self._execute(msg) if 'error' in reply: try: error_response = ErrorResponse(reply) except (KeyError, TypeError) as err: # Error response was malformed. raise BadReplyError( "QMP error reply is malformed", reply, msg, ) from err raise ExecuteError(error_response, msg, reply) if 'return' not in reply: raise BadReplyError( "QMP reply is missing a 'error' or 'return' member", reply, msg, ) return reply['return'] @classmethod def make_execute_msg(cls, cmd: str, arguments: Optional[Mapping[str, object]] = None, oob: bool = False) -> Message: """ Create an executable message to be sent by `execute_msg` later. :param cmd: QMP command name. :param arguments: Arguments (if any). Must be JSON-serializable. :param oob: If `True`, execute "out of band". :return: An executable QMP `Message`. """ msg = Message({'exec-oob' if oob else 'execute': cmd}) if arguments is not None: msg['arguments'] = arguments return msg @upper_half async def execute(self, cmd: str, arguments: Optional[Mapping[str, object]] = None, oob: bool = False) -> object: """ Execute a QMP command and return its value. :param cmd: QMP command name. :param arguments: Arguments (if any). Must be JSON-serializable. :param oob: If `True`, execute "out of band". :return: The command execution return value from the server. The type of object returned depends on the command that was issued, though most in QEMU return a `dict`. :raise ExecuteError: When the server returns an error response. :raise ExecInterruptedError: if the connection was terminated early. """ msg = self.make_execute_msg(cmd, arguments, oob=oob) return await self.execute_msg(msg) @upper_half @require(Runstate.RUNNING) def send_fd_scm(self, fd: int) -> None: """ Send a file descriptor to the remote via SCM_RIGHTS. """ assert self._writer is not None sock = self._writer.transport.get_extra_info('socket') if sock.family != socket.AF_UNIX: raise AQMPError("Sending file descriptors requires a UNIX socket.") # Void the warranty sticker. # Access to sendmsg in asyncio is scheduled for removal in Python 3.11. sock = sock._sock # pylint: disable=protected-access sock.sendmsg( [b' '], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('@i', fd))] )
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/qmp_client.py
""" Miscellaneous Utilities This module provides asyncio utilities and compatibility wrappers for Python 3.6 to provide some features that otherwise become available in Python 3.7+. Various logging and debugging utilities are also provided, such as `exception_summary()` and `pretty_traceback()`, used primarily for adding information into the logging stream. """ import asyncio import sys import traceback from typing import ( Any, Coroutine, Optional, TypeVar, cast, ) T = TypeVar('T') # -------------------------- # Section: Utility Functions # -------------------------- async def flush(writer: asyncio.StreamWriter) -> None: """ Utility function to ensure a StreamWriter is *fully* drained. `asyncio.StreamWriter.drain` only promises we will return to below the "high-water mark". This function ensures we flush the entire buffer -- by setting the high water mark to 0 and then calling drain. The flow control limits are restored after the call is completed. """ transport = cast(asyncio.WriteTransport, writer.transport) # https://github.com/python/typeshed/issues/5779 low, high = transport.get_write_buffer_limits() # type: ignore transport.set_write_buffer_limits(0, 0) try: await writer.drain() finally: transport.set_write_buffer_limits(high, low) def upper_half(func: T) -> T: """ Do-nothing decorator that annotates a method as an "upper-half" method. These methods must not call bottom-half functions directly, but can schedule them to run. """ return func def bottom_half(func: T) -> T: """ Do-nothing decorator that annotates a method as a "bottom-half" method. These methods must take great care to handle their own exceptions whenever possible. If they go unhandled, they will cause termination of the loop. These methods do not, in general, have the ability to directly report information to a caller’s context and will usually be collected as a Task result instead. They must not call upper-half functions directly. """ return func # ------------------------------- # Section: Compatibility Wrappers # ------------------------------- def create_task(coro: Coroutine[Any, Any, T], loop: Optional[asyncio.AbstractEventLoop] = None ) -> 'asyncio.Future[T]': """ Python 3.6-compatible `asyncio.create_task` wrapper. :param coro: The coroutine to execute in a task. :param loop: Optionally, the loop to create the task in. :return: An `asyncio.Future` object. """ if sys.version_info >= (3, 7): if loop is not None: return loop.create_task(coro) return asyncio.create_task(coro) # pylint: disable=no-member # Python 3.6: return asyncio.ensure_future(coro, loop=loop) def is_closing(writer: asyncio.StreamWriter) -> bool: """ Python 3.6-compatible `asyncio.StreamWriter.is_closing` wrapper. :param writer: The `asyncio.StreamWriter` object. :return: `True` if the writer is closing, or closed. """ if sys.version_info >= (3, 7): return writer.is_closing() # Python 3.6: transport = writer.transport assert isinstance(transport, asyncio.WriteTransport) return transport.is_closing() async def wait_closed(writer: asyncio.StreamWriter) -> None: """ Python 3.6-compatible `asyncio.StreamWriter.wait_closed` wrapper. :param writer: The `asyncio.StreamWriter` to wait on. """ if sys.version_info >= (3, 7): await writer.wait_closed() return # Python 3.6 transport = writer.transport assert isinstance(transport, asyncio.WriteTransport) while not transport.is_closing(): await asyncio.sleep(0) # This is an ugly workaround, but it's the best I can come up with. sock = transport.get_extra_info('socket') if sock is None: # Our transport doesn't have a socket? ... # Nothing we can reasonably do. return while sock.fileno() != -1: await asyncio.sleep(0) def asyncio_run(coro: Coroutine[Any, Any, T], *, debug: bool = False) -> T: """ Python 3.6-compatible `asyncio.run` wrapper. :param coro: A coroutine to execute now. :return: The return value from the coroutine. """ if sys.version_info >= (3, 7): return asyncio.run(coro, debug=debug) # Python 3.6 loop = asyncio.get_event_loop() loop.set_debug(debug) ret = loop.run_until_complete(coro) loop.close() return ret # ---------------------------- # Section: Logging & Debugging # ---------------------------- def exception_summary(exc: BaseException) -> str: """ Return a summary string of an arbitrary exception. It will be of the form "ExceptionType: Error Message", if the error string is non-empty, and just "ExceptionType" otherwise. """ name = type(exc).__qualname__ smod = type(exc).__module__ if smod not in ("__main__", "builtins"): name = smod + '.' + name error = str(exc) if error: return f"{name}: {error}" return name def pretty_traceback(prefix: str = " | ") -> str: """ Formats the current traceback, indented to provide visual distinction. This is useful for printing a traceback within a traceback for debugging purposes when encapsulating errors to deliver them up the stack; when those errors are printed, this helps provide a nice visual grouping to quickly identify the parts of the error that belong to the inner exception. :param prefix: The prefix to append to each line of the traceback. :return: A string, formatted something like the following:: | Traceback (most recent call last): | File "foobar.py", line 42, in arbitrary_example | foo.baz() | ArbitraryError: [Errno 42] Something bad happened! """ output = "".join(traceback.format_exception(*sys.exc_info())) exc_lines = [] for line in output.split('\n'): exc_lines.append(prefix + line) # The last line is always empty, omit it return "\n".join(exc_lines[:-1])
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/util.py
""" AQMP Events and EventListeners Asynchronous QMP uses `EventListener` objects to listen for events. An `EventListener` is a FIFO event queue that can be pre-filtered to listen for only specific events. Each `EventListener` instance receives its own copy of events that it hears, so events may be consumed without fear or worry for depriving other listeners of events they need to hear. EventListener Tutorial ---------------------- In all of the following examples, we assume that we have a `QMPClient` instantiated named ``qmp`` that is already connected. `listener()` context blocks with one name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The most basic usage is by using the `listener()` context manager to construct them: .. code:: python with qmp.listener('STOP') as listener: await qmp.execute('stop') await listener.get() The listener is active only for the duration of the ‘with’ block. This instance listens only for ‘STOP’ events. `listener()` context blocks with two or more names ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Multiple events can be selected for by providing any ``Iterable[str]``: .. code:: python with qmp.listener(('STOP', 'RESUME')) as listener: await qmp.execute('stop') event = await listener.get() assert event['event'] == 'STOP' await qmp.execute('cont') event = await listener.get() assert event['event'] == 'RESUME' `listener()` context blocks with no names ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By omitting names entirely, you can listen to ALL events. .. code:: python with qmp.listener() as listener: await qmp.execute('stop') event = await listener.get() assert event['event'] == 'STOP' This isn’t a very good use case for this feature: In a non-trivial running system, we may not know what event will arrive next. Grabbing the top of a FIFO queue returning multiple kinds of events may be prone to error. Using async iterators to retrieve events ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you’d like to simply watch what events happen to arrive, you can use the listener as an async iterator: .. code:: python with qmp.listener() as listener: async for event in listener: print(f"Event arrived: {event['event']}") This is analogous to the following code: .. code:: python with qmp.listener() as listener: while True: event = listener.get() print(f"Event arrived: {event['event']}") This event stream will never end, so these blocks will never terminate. Using asyncio.Task to concurrently retrieve events ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since a listener’s event stream will never terminate, it is not likely useful to use that form in a script. For longer-running clients, we can create event handlers by using `asyncio.Task` to create concurrent coroutines: .. code:: python async def print_events(listener): try: async for event in listener: print(f"Event arrived: {event['event']}") except asyncio.CancelledError: return with qmp.listener() as listener: task = asyncio.Task(print_events(listener)) await qmp.execute('stop') await qmp.execute('cont') task.cancel() await task However, there is no guarantee that these events will be received by the time we leave this context block. Once the context block is exited, the listener will cease to hear any new events, and becomes inert. Be mindful of the timing: the above example will *probably*– but does not *guarantee*– that both STOP/RESUMED events will be printed. The example below outlines how to use listeners outside of a context block. Using `register_listener()` and `remove_listener()` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To create a listener with a longer lifetime, beyond the scope of a single block, create a listener and then call `register_listener()`: .. code:: python class MyClient: def __init__(self, qmp): self.qmp = qmp self.listener = EventListener() async def print_events(self): try: async for event in self.listener: print(f"Event arrived: {event['event']}") except asyncio.CancelledError: return async def run(self): self.task = asyncio.Task(self.print_events) self.qmp.register_listener(self.listener) await qmp.execute('stop') await qmp.execute('cont') async def stop(self): self.task.cancel() await self.task self.qmp.remove_listener(self.listener) The listener can be deactivated by using `remove_listener()`. When it is removed, any possible pending events are cleared and it can be re-registered at a later time. Using the built-in all events listener ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The `QMPClient` object creates its own default listener named :py:obj:`~Events.events` that can be used for the same purpose without having to create your own: .. code:: python async def print_events(listener): try: async for event in listener: print(f"Event arrived: {event['event']}") except asyncio.CancelledError: return task = asyncio.Task(print_events(qmp.events)) await qmp.execute('stop') await qmp.execute('cont') task.cancel() await task Using both .get() and async iterators ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The async iterator and `get()` methods pull events from the same FIFO queue. If you mix the usage of both, be aware: Events are emitted precisely once per listener. If multiple contexts try to pull events from the same listener instance, events are still emitted only precisely once. This restriction can be lifted by creating additional listeners. Creating multiple listeners ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Additional `EventListener` objects can be created at-will. Each one receives its own copy of events, with separate FIFO event queues. .. code:: python my_listener = EventListener() qmp.register_listener(my_listener) await qmp.execute('stop') copy1 = await my_listener.get() copy2 = await qmp.events.get() assert copy1 == copy2 In this example, we await an event from both a user-created `EventListener` and the built-in events listener. Both receive the same event. Clearing listeners ~~~~~~~~~~~~~~~~~~ `EventListener` objects can be cleared, clearing all events seen thus far: .. code:: python await qmp.execute('stop') qmp.events.clear() await qmp.execute('cont') event = await qmp.events.get() assert event['event'] == 'RESUME' `EventListener` objects are FIFO queues. If events are not consumed, they will remain in the queue until they are witnessed or discarded via `clear()`. FIFO queues will be drained automatically upon leaving a context block, or when calling `remove_listener()`. Accessing listener history ~~~~~~~~~~~~~~~~~~~~~~~~~~ `EventListener` objects record their history. Even after being cleared, you can obtain a record of all events seen so far: .. code:: python await qmp.execute('stop') await qmp.execute('cont') qmp.events.clear() assert len(qmp.events.history) == 2 assert qmp.events.history[0]['event'] == 'STOP' assert qmp.events.history[1]['event'] == 'RESUME' The history is updated immediately and does not require the event to be witnessed first. Using event filters ~~~~~~~~~~~~~~~~~~~ `EventListener` objects can be given complex filtering criteria if names are not sufficient: .. code:: python def job1_filter(event) -> bool: event_data = event.get('data', {}) event_job_id = event_data.get('id') return event_job_id == "job1" with qmp.listener('JOB_STATUS_CHANGE', job1_filter) as listener: await qmp.execute('blockdev-backup', arguments={'job-id': 'job1', ...}) async for event in listener: if event['data']['status'] == 'concluded': break These filters might be most useful when parameterized. `EventListener` objects expect a function that takes only a single argument (the raw event, as a `Message`) and returns a bool; True if the event should be accepted into the stream. You can create a function that adapts this signature to accept configuration parameters: .. code:: python def job_filter(job_id: str) -> EventFilter: def filter(event: Message) -> bool: return event['data']['id'] == job_id return filter with qmp.listener('JOB_STATUS_CHANGE', job_filter('job2')) as listener: await qmp.execute('blockdev-backup', arguments={'job-id': 'job2', ...}) async for event in listener: if event['data']['status'] == 'concluded': break Activating an existing listener with `listen()` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Listeners with complex, long configurations can also be created manually and activated temporarily by using `listen()` instead of `listener()`: .. code:: python listener = EventListener(('BLOCK_JOB_COMPLETED', 'BLOCK_JOB_CANCELLED', 'BLOCK_JOB_ERROR', 'BLOCK_JOB_READY', 'BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE')) with qmp.listen(listener): await qmp.execute('blockdev-backup', arguments={'job-id': 'job3', ...}) async for event in listener: print(event) if event['event'] == 'BLOCK_JOB_COMPLETED': break Any events that are not witnessed by the time the block is left will be cleared from the queue; entering the block is an implicit `register_listener()` and leaving the block is an implicit `remove_listener()`. Activating multiple existing listeners with `listen()` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While `listener()` is only capable of creating a single listener, `listen()` is capable of activating multiple listeners simultaneously: .. code:: python def job_filter(job_id: str) -> EventFilter: def filter(event: Message) -> bool: return event['data']['id'] == job_id return filter jobA = EventListener('JOB_STATUS_CHANGE', job_filter('jobA')) jobB = EventListener('JOB_STATUS_CHANGE', job_filter('jobB')) with qmp.listen(jobA, jobB): qmp.execute('blockdev-create', arguments={'job-id': 'jobA', ...}) qmp.execute('blockdev-create', arguments={'job-id': 'jobB', ...}) async for event in jobA.get(): if event['data']['status'] == 'concluded': break async for event in jobB.get(): if event['data']['status'] == 'concluded': break Extending the `EventListener` class ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case that a more specialized `EventListener` is desired to provide either more functionality or more compact syntax for specialized cases, it can be extended. One of the key methods to extend or override is :py:meth:`~EventListener.accept()`. The default implementation checks an incoming message for: 1. A qualifying name, if any :py:obj:`~EventListener.names` were specified at initialization time 2. That :py:obj:`~EventListener.event_filter()` returns True. This can be modified however you see fit to change the criteria for inclusion in the stream. For convenience, a ``JobListener`` class could be created that simply bakes in configuration so it does not need to be repeated: .. code:: python class JobListener(EventListener): def __init__(self, job_id: str): super().__init__(('BLOCK_JOB_COMPLETED', 'BLOCK_JOB_CANCELLED', 'BLOCK_JOB_ERROR', 'BLOCK_JOB_READY', 'BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE')) self.job_id = job_id def accept(self, event) -> bool: if not super().accept(event): return False if event['event'] in ('BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE'): return event['data']['id'] == job_id return event['data']['device'] == job_id From here on out, you can conjure up a custom-purpose listener that listens only for job-related events for a specific job-id easily: .. code:: python listener = JobListener('job4') with qmp.listener(listener): await qmp.execute('blockdev-backup', arguments={'job-id': 'job4', ...}) async for event in listener: print(event) if event['event'] == 'BLOCK_JOB_COMPLETED': break Experimental Interfaces & Design Issues --------------------------------------- These interfaces are not ones I am sure I will keep or otherwise modify heavily. qmp.listener()’s type signature ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `listener()` does not return anything, because it was assumed the caller already had a handle to the listener. However, for ``qmp.listener(EventListener())`` forms, the caller will not have saved a handle to the listener. Because this function can accept *many* listeners, I found it hard to accurately type in a way where it could be used in both “one” or “many” forms conveniently and in a statically type-safe manner. Ultimately, I removed the return altogether, but perhaps with more time I can work out a way to re-add it. API Reference ------------- """ import asyncio from contextlib import contextmanager import logging from typing import ( AsyncIterator, Callable, Iterable, Iterator, List, Optional, Set, Tuple, Union, ) from .error import AQMPError from .message import Message EventNames = Union[str, Iterable[str], None] EventFilter = Callable[[Message], bool] class ListenerError(AQMPError): """ Generic error class for `EventListener`-related problems. """ class EventListener: """ Selectively listens for events with runtime configurable filtering. This class is designed to be directly usable for the most common cases, but it can be extended to provide more rigorous control. :param names: One or more names of events to listen for. When not provided, listen for ALL events. :param event_filter: An optional event filtering function. When names are also provided, this acts as a secondary filter. When ``names`` and ``event_filter`` are both provided, the names will be filtered first, and then the filter function will be called second. The event filter function can assume that the format of the event is a known format. """ def __init__( self, names: EventNames = None, event_filter: Optional[EventFilter] = None, ): # Queue of 'heard' events yet to be witnessed by a caller. self._queue: 'asyncio.Queue[Message]' = asyncio.Queue() # Intended as a historical record, NOT a processing queue or backlog. self._history: List[Message] = [] #: Primary event filter, based on one or more event names. self.names: Set[str] = set() if isinstance(names, str): self.names.add(names) elif names is not None: self.names.update(names) #: Optional, secondary event filter. self.event_filter: Optional[EventFilter] = event_filter @property def history(self) -> Tuple[Message, ...]: """ A read-only history of all events seen so far. This represents *every* event, including those not yet witnessed via `get()` or ``async for``. It persists between `clear()` calls and is immutable. """ return tuple(self._history) def accept(self, event: Message) -> bool: """ Determine if this listener accepts this event. This method determines which events will appear in the stream. The default implementation simply checks the event against the list of names and the event_filter to decide if this `EventListener` accepts a given event. It can be overridden/extended to provide custom listener behavior. User code is not expected to need to invoke this method. :param event: The event under consideration. :return: `True`, if this listener accepts this event. """ name_ok = (not self.names) or (event['event'] in self.names) return name_ok and ( (not self.event_filter) or self.event_filter(event) ) async def put(self, event: Message) -> None: """ Conditionally put a new event into the FIFO queue. This method is not designed to be invoked from user code, and it should not need to be overridden. It is a public interface so that `QMPClient` has an interface by which it can inform registered listeners of new events. The event will be put into the queue if :py:meth:`~EventListener.accept()` returns `True`. :param event: The new event to put into the FIFO queue. """ if not self.accept(event): return self._history.append(event) await self._queue.put(event) async def get(self) -> Message: """ Wait for the very next event in this stream. If one is already available, return that one. """ return await self._queue.get() def empty(self) -> bool: """ Return `True` if there are no pending events. """ return self._queue.empty() def clear(self) -> List[Message]: """ Clear this listener of all pending events. Called when an `EventListener` is being unregistered, this clears the pending FIFO queue synchronously. It can be also be used to manually clear any pending events, if desired. :return: The cleared events, if any. .. warning:: Take care when discarding events. Cleared events will be silently tossed on the floor. All events that were ever accepted by this listener are visible in `history()`. """ events = [] while True: try: events.append(self._queue.get_nowait()) except asyncio.QueueEmpty: break return events def __aiter__(self) -> AsyncIterator[Message]: return self async def __anext__(self) -> Message: """ Enables the `EventListener` to function as an async iterator. It may be used like this: .. code:: python async for event in listener: print(event) These iterators will never terminate of their own accord; you must provide break conditions or otherwise prepare to run them in an `asyncio.Task` that can be cancelled. """ return await self.get() class Events: """ Events is a mix-in class that adds event functionality to the QMP class. It's designed specifically as a mix-in for `QMPClient`, and it relies upon the class it is being mixed into having a 'logger' property. """ def __init__(self) -> None: self._listeners: List[EventListener] = [] #: Default, all-events `EventListener`. self.events: EventListener = EventListener() self.register_listener(self.events) # Parent class needs to have a logger self.logger: logging.Logger async def _event_dispatch(self, msg: Message) -> None: """ Given a new event, propagate it to all of the active listeners. :param msg: The event to propagate. """ for listener in self._listeners: await listener.put(msg) def register_listener(self, listener: EventListener) -> None: """ Register and activate an `EventListener`. :param listener: The listener to activate. :raise ListenerError: If the given listener is already registered. """ if listener in self._listeners: raise ListenerError("Attempted to re-register existing listener") self.logger.debug("Registering %s.", str(listener)) self._listeners.append(listener) def remove_listener(self, listener: EventListener) -> None: """ Unregister and deactivate an `EventListener`. The removed listener will have its pending events cleared via `clear()`. The listener can be re-registered later when desired. :param listener: The listener to deactivate. :raise ListenerError: If the given listener is not registered. """ if listener == self.events: raise ListenerError("Cannot remove the default listener.") self.logger.debug("Removing %s.", str(listener)) listener.clear() self._listeners.remove(listener) @contextmanager def listen(self, *listeners: EventListener) -> Iterator[None]: r""" Context manager: Temporarily listen with an `EventListener`. Accepts one or more `EventListener` objects and registers them, activating them for the duration of the context block. `EventListener` objects will have any pending events in their FIFO queue cleared upon exiting the context block, when they are deactivated. :param \*listeners: One or more EventListeners to activate. :raise ListenerError: If the given listener(s) are already active. """ _added = [] try: for listener in listeners: self.register_listener(listener) _added.append(listener) yield finally: for listener in _added: self.remove_listener(listener) @contextmanager def listener( self, names: EventNames = (), event_filter: Optional[EventFilter] = None ) -> Iterator[EventListener]: """ Context manager: Temporarily listen with a new `EventListener`. Creates an `EventListener` object and registers it, activating it for the duration of the context block. :param names: One or more names of events to listen for. When not provided, listen for ALL events. :param event_filter: An optional event filtering function. When names are also provided, this acts as a secondary filter. :return: The newly created and active `EventListener`. """ listener = EventListener(names, event_filter) with self.listen(listener): yield listener
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/events.py
""" Generic Asynchronous Message-based Protocol Support This module provides a generic framework for sending and receiving messages over an asyncio stream. `AsyncProtocol` is an abstract class that implements the core mechanisms of a simple send/receive protocol, and is designed to be extended. In this package, it is used as the implementation for the `QMPClient` class. """ import asyncio from asyncio import StreamReader, StreamWriter from enum import Enum from functools import wraps import logging from ssl import SSLContext from typing import ( Any, Awaitable, Callable, Generic, List, Optional, Tuple, TypeVar, Union, cast, ) from .error import AQMPError from .util import ( bottom_half, create_task, exception_summary, flush, is_closing, pretty_traceback, upper_half, wait_closed, ) T = TypeVar('T') _TaskFN = Callable[[], Awaitable[None]] # aka ``async def func() -> None`` _FutureT = TypeVar('_FutureT', bound=Optional['asyncio.Future[Any]']) class Runstate(Enum): """Protocol session runstate.""" #: Fully quiesced and disconnected. IDLE = 0 #: In the process of connecting or establishing a session. CONNECTING = 1 #: Fully connected and active session. RUNNING = 2 #: In the process of disconnecting. #: Runstate may be returned to `IDLE` by calling `disconnect()`. DISCONNECTING = 3 class ConnectError(AQMPError): """ Raised when the initial connection process has failed. This Exception always wraps a "root cause" exception that can be interrogated for additional information. :param error_message: Human-readable string describing the error. :param exc: The root-cause exception. """ def __init__(self, error_message: str, exc: Exception): super().__init__(error_message) #: Human-readable error string self.error_message: str = error_message #: Wrapped root cause exception self.exc: Exception = exc def __str__(self) -> str: return f"{self.error_message}: {self.exc!s}" class StateError(AQMPError): """ An API command (connect, execute, etc) was issued at an inappropriate time. This error is raised when a command like :py:meth:`~AsyncProtocol.connect()` is issued at an inappropriate time. :param error_message: Human-readable string describing the state violation. :param state: The actual `Runstate` seen at the time of the violation. :param required: The `Runstate` required to process this command. """ def __init__(self, error_message: str, state: Runstate, required: Runstate): super().__init__(error_message) self.error_message = error_message self.state = state self.required = required F = TypeVar('F', bound=Callable[..., Any]) # pylint: disable=invalid-name # Don't Panic. def require(required_state: Runstate) -> Callable[[F], F]: """ Decorator: protect a method so it can only be run in a certain `Runstate`. :param required_state: The `Runstate` required to invoke this method. :raise StateError: When the required `Runstate` is not met. """ def _decorator(func: F) -> F: # _decorator is the decorator that is built by calling the # require() decorator factory; e.g.: # # @require(Runstate.IDLE) def foo(): ... # will replace 'foo' with the result of '_decorator(foo)'. @wraps(func) def _wrapper(proto: 'AsyncProtocol[Any]', *args: Any, **kwargs: Any) -> Any: # _wrapper is the function that gets executed prior to the # decorated method. name = type(proto).__name__ if proto.runstate != required_state: if proto.runstate == Runstate.CONNECTING: emsg = f"{name} is currently connecting." elif proto.runstate == Runstate.DISCONNECTING: emsg = (f"{name} is disconnecting." " Call disconnect() to return to IDLE state.") elif proto.runstate == Runstate.RUNNING: emsg = f"{name} is already connected and running." elif proto.runstate == Runstate.IDLE: emsg = f"{name} is disconnected and idle." else: assert False raise StateError(emsg, proto.runstate, required_state) # No StateError, so call the wrapped method. return func(proto, *args, **kwargs) # Return the decorated method; # Transforming Func to Decorated[Func]. return cast(F, _wrapper) # Return the decorator instance from the decorator factory. Phew! return _decorator class AsyncProtocol(Generic[T]): """ AsyncProtocol implements a generic async message-based protocol. This protocol assumes the basic unit of information transfer between client and server is a "message", the details of which are left up to the implementation. It assumes the sending and receiving of these messages is full-duplex and not necessarily correlated; i.e. it supports asynchronous inbound messages. It is designed to be extended by a specific protocol which provides the implementations for how to read and send messages. These must be defined in `_do_recv()` and `_do_send()`, respectively. Other callbacks have a default implementation, but are intended to be either extended or overridden: - `_establish_session`: The base implementation starts the reader/writer tasks. A protocol implementation can override this call, inserting actions to be taken prior to starting the reader/writer tasks before the super() call; actions needing to occur afterwards can be written after the super() call. - `_on_message`: Actions to be performed when a message is received. - `_cb_outbound`: Logging/Filtering hook for all outbound messages. - `_cb_inbound`: Logging/Filtering hook for all inbound messages. This hook runs *before* `_on_message()`. :param name: Name used for logging messages, if any. By default, messages will log to 'qemu.aqmp.protocol', but each individual connection can be given its own logger by giving it a name; messages will then log to 'qemu.aqmp.protocol.${name}'. """ # pylint: disable=too-many-instance-attributes #: Logger object for debugging messages from this connection. logger = logging.getLogger(__name__) # Maximum allowable size of read buffer _limit = (64 * 1024) # ------------------------- # Section: Public interface # ------------------------- def __init__(self, name: Optional[str] = None) -> None: #: The nickname for this connection, if any. self.name: Optional[str] = name if self.name is not None: self.logger = self.logger.getChild(self.name) # stream I/O self._reader: Optional[StreamReader] = None self._writer: Optional[StreamWriter] = None # Outbound Message queue self._outgoing: asyncio.Queue[T] # Special, long-running tasks: self._reader_task: Optional[asyncio.Future[None]] = None self._writer_task: Optional[asyncio.Future[None]] = None # Aggregate of the above two tasks, used for Exception management. self._bh_tasks: Optional[asyncio.Future[Tuple[None, None]]] = None #: Disconnect task. The disconnect implementation runs in a task #: so that asynchronous disconnects (initiated by the #: reader/writer) are allowed to wait for the reader/writers to #: exit. self._dc_task: Optional[asyncio.Future[None]] = None self._runstate = Runstate.IDLE self._runstate_changed: Optional[asyncio.Event] = None def __repr__(self) -> str: cls_name = type(self).__name__ tokens = [] if self.name is not None: tokens.append(f"name={self.name!r}") tokens.append(f"runstate={self.runstate.name}") return f"<{cls_name} {' '.join(tokens)}>" @property # @upper_half def runstate(self) -> Runstate: """The current `Runstate` of the connection.""" return self._runstate @upper_half async def runstate_changed(self) -> Runstate: """ Wait for the `runstate` to change, then return that runstate. """ await self._runstate_event.wait() return self.runstate @upper_half @require(Runstate.IDLE) async def accept(self, address: Union[str, Tuple[str, int]], ssl: Optional[SSLContext] = None) -> None: """ Accept a connection and begin processing message queues. If this call fails, `runstate` is guaranteed to be set back to `IDLE`. :param address: Address to listen to; UNIX socket path or TCP address/port. :param ssl: SSL context to use, if any. :raise StateError: When the `Runstate` is not `IDLE`. :raise ConnectError: If a connection could not be accepted. """ await self._new_session(address, ssl, accept=True) @upper_half @require(Runstate.IDLE) async def connect(self, address: Union[str, Tuple[str, int]], ssl: Optional[SSLContext] = None) -> None: """ Connect to the server and begin processing message queues. If this call fails, `runstate` is guaranteed to be set back to `IDLE`. :param address: Address to connect to; UNIX socket path or TCP address/port. :param ssl: SSL context to use, if any. :raise StateError: When the `Runstate` is not `IDLE`. :raise ConnectError: If a connection cannot be made to the server. """ await self._new_session(address, ssl) @upper_half async def disconnect(self) -> None: """ Disconnect and wait for all tasks to fully stop. If there was an exception that caused the reader/writers to terminate prematurely, it will be raised here. :raise Exception: When the reader or writer terminate unexpectedly. """ self.logger.debug("disconnect() called.") self._schedule_disconnect() await self._wait_disconnect() # -------------------------- # Section: Session machinery # -------------------------- @property def _runstate_event(self) -> asyncio.Event: # asyncio.Event() objects should not be created prior to entrance into # an event loop, so we can ensure we create it in the correct context. # Create it on-demand *only* at the behest of an 'async def' method. if not self._runstate_changed: self._runstate_changed = asyncio.Event() return self._runstate_changed @upper_half @bottom_half def _set_state(self, state: Runstate) -> None: """ Change the `Runstate` of the protocol connection. Signals the `runstate_changed` event. """ if state == self._runstate: return self.logger.debug("Transitioning from '%s' to '%s'.", str(self._runstate), str(state)) self._runstate = state self._runstate_event.set() self._runstate_event.clear() @upper_half async def _new_session(self, address: Union[str, Tuple[str, int]], ssl: Optional[SSLContext] = None, accept: bool = False) -> None: """ Establish a new connection and initialize the session. Connect or accept a new connection, then begin the protocol session machinery. If this call fails, `runstate` is guaranteed to be set back to `IDLE`. :param address: Address to connect to/listen on; UNIX socket path or TCP address/port. :param ssl: SSL context to use, if any. :param accept: Accept a connection instead of connecting when `True`. :raise ConnectError: When a connection or session cannot be established. This exception will wrap a more concrete one. In most cases, the wrapped exception will be `OSError` or `EOFError`. If a protocol-level failure occurs while establishing a new session, the wrapped error may also be an `AQMPError`. """ assert self.runstate == Runstate.IDLE try: phase = "connection" await self._establish_connection(address, ssl, accept) phase = "session" await self._establish_session() except BaseException as err: emsg = f"Failed to establish {phase}" self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) try: # Reset from CONNECTING back to IDLE. await self.disconnect() except: emsg = "Unexpected bottom half exception" self.logger.critical("%s:\n%s\n", emsg, pretty_traceback()) raise # NB: CancelledError is not a BaseException before Python 3.8 if isinstance(err, asyncio.CancelledError): raise if isinstance(err, Exception): raise ConnectError(emsg, err) from err # Raise BaseExceptions un-wrapped, they're more important. raise assert self.runstate == Runstate.RUNNING @upper_half async def _establish_connection( self, address: Union[str, Tuple[str, int]], ssl: Optional[SSLContext] = None, accept: bool = False ) -> None: """ Establish a new connection. :param address: Address to connect to/listen on; UNIX socket path or TCP address/port. :param ssl: SSL context to use, if any. :param accept: Accept a connection instead of connecting when `True`. """ assert self.runstate == Runstate.IDLE self._set_state(Runstate.CONNECTING) # Allow runstate watchers to witness 'CONNECTING' state; some # failures in the streaming layer are synchronous and will not # otherwise yield. await asyncio.sleep(0) if accept: await self._do_accept(address, ssl) else: await self._do_connect(address, ssl) @upper_half async def _do_accept(self, address: Union[str, Tuple[str, int]], ssl: Optional[SSLContext] = None) -> None: """ Acting as the transport server, accept a single connection. :param address: Address to listen on; UNIX socket path or TCP address/port. :param ssl: SSL context to use, if any. :raise OSError: For stream-related errors. """ self.logger.debug("Awaiting connection on %s ...", address) connected = asyncio.Event() server: Optional[asyncio.AbstractServer] = None async def _client_connected_cb(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: """Used to accept a single incoming connection, see below.""" nonlocal server nonlocal connected # A connection has been accepted; stop listening for new ones. assert server is not None server.close() await server.wait_closed() server = None # Register this client as being connected self._reader, self._writer = (reader, writer) # Signal back: We've accepted a client! connected.set() if isinstance(address, tuple): coro = asyncio.start_server( _client_connected_cb, host=address[0], port=address[1], ssl=ssl, backlog=1, limit=self._limit, ) else: coro = asyncio.start_unix_server( _client_connected_cb, path=address, ssl=ssl, backlog=1, limit=self._limit, ) server = await coro # Starts listening await connected.wait() # Waits for the callback to fire (and finish) assert server is None self.logger.debug("Connection accepted.") @upper_half async def _do_connect(self, address: Union[str, Tuple[str, int]], ssl: Optional[SSLContext] = None) -> None: """ Acting as the transport client, initiate a connection to a server. :param address: Address to connect to; UNIX socket path or TCP address/port. :param ssl: SSL context to use, if any. :raise OSError: For stream-related errors. """ self.logger.debug("Connecting to %s ...", address) if isinstance(address, tuple): connect = asyncio.open_connection( address[0], address[1], ssl=ssl, limit=self._limit, ) else: connect = asyncio.open_unix_connection( path=address, ssl=ssl, limit=self._limit, ) self._reader, self._writer = await connect self.logger.debug("Connected.") @upper_half async def _establish_session(self) -> None: """ Establish a new session. Starts the readers/writer tasks; subclasses may perform their own negotiations here. The Runstate will be RUNNING upon successful conclusion. """ assert self.runstate == Runstate.CONNECTING self._outgoing = asyncio.Queue() reader_coro = self._bh_loop_forever(self._bh_recv_message, 'Reader') writer_coro = self._bh_loop_forever(self._bh_send_message, 'Writer') self._reader_task = create_task(reader_coro) self._writer_task = create_task(writer_coro) self._bh_tasks = asyncio.gather( self._reader_task, self._writer_task, ) self._set_state(Runstate.RUNNING) await asyncio.sleep(0) # Allow runstate_event to process @upper_half @bottom_half def _schedule_disconnect(self) -> None: """ Initiate a disconnect; idempotent. This method is used both in the upper-half as a direct consequence of `disconnect()`, and in the bottom-half in the case of unhandled exceptions in the reader/writer tasks. It can be invoked no matter what the `runstate` is. """ if not self._dc_task: self._set_state(Runstate.DISCONNECTING) self.logger.debug("Scheduling disconnect.") self._dc_task = create_task(self._bh_disconnect()) @upper_half async def _wait_disconnect(self) -> None: """ Waits for a previously scheduled disconnect to finish. This method will gather any bottom half exceptions and re-raise the one that occurred first; presuming it to be the root cause of any subsequent Exceptions. It is intended to be used in the upper half of the call chain. :raise Exception: Arbitrary exception re-raised on behalf of the reader/writer. """ assert self.runstate == Runstate.DISCONNECTING assert self._dc_task aws: List[Awaitable[object]] = [self._dc_task] if self._bh_tasks: aws.insert(0, self._bh_tasks) all_defined_tasks = asyncio.gather(*aws) # Ensure disconnect is done; Exception (if any) is not raised here: await asyncio.wait((self._dc_task,)) try: await all_defined_tasks # Raise Exceptions from the bottom half. finally: self._cleanup() self._set_state(Runstate.IDLE) @upper_half def _cleanup(self) -> None: """ Fully reset this object to a clean state and return to `IDLE`. """ def _paranoid_task_erase(task: _FutureT) -> Optional[_FutureT]: # Help to erase a task, ENSURING it is fully quiesced first. assert (task is None) or task.done() return None if (task and task.done()) else task assert self.runstate == Runstate.DISCONNECTING self._dc_task = _paranoid_task_erase(self._dc_task) self._reader_task = _paranoid_task_erase(self._reader_task) self._writer_task = _paranoid_task_erase(self._writer_task) self._bh_tasks = _paranoid_task_erase(self._bh_tasks) self._reader = None self._writer = None # NB: _runstate_changed cannot be cleared because we still need it to # send the final runstate changed event ...! # ---------------------------- # Section: Bottom Half methods # ---------------------------- @bottom_half async def _bh_disconnect(self) -> None: """ Disconnect and cancel all outstanding tasks. It is designed to be called from its task context, :py:obj:`~AsyncProtocol._dc_task`. By running in its own task, it is free to wait on any pending actions that may still need to occur in either the reader or writer tasks. """ assert self.runstate == Runstate.DISCONNECTING def _done(task: Optional['asyncio.Future[Any]']) -> bool: return task is not None and task.done() # NB: We can't rely on _bh_tasks being done() here, it may not # yet have had a chance to run and gather itself. tasks = tuple(filter(None, (self._writer_task, self._reader_task))) error_pathway = _done(self._reader_task) or _done(self._writer_task) try: # Try to flush the writer, if possible: if not error_pathway: await self._bh_flush_writer() except BaseException as err: error_pathway = True emsg = "Failed to flush the writer" self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) raise finally: # Cancel any still-running tasks: if self._writer_task is not None and not self._writer_task.done(): self.logger.debug("Cancelling writer task.") self._writer_task.cancel() if self._reader_task is not None and not self._reader_task.done(): self.logger.debug("Cancelling reader task.") self._reader_task.cancel() # Close out the tasks entirely (Won't raise): if tasks: self.logger.debug("Waiting for tasks to complete ...") await asyncio.wait(tasks) # Lastly, close the stream itself. (May raise): await self._bh_close_stream(error_pathway) self.logger.debug("Disconnected.") @bottom_half async def _bh_flush_writer(self) -> None: if not self._writer_task: return self.logger.debug("Draining the outbound queue ...") await self._outgoing.join() if self._writer is not None: self.logger.debug("Flushing the StreamWriter ...") await flush(self._writer) @bottom_half async def _bh_close_stream(self, error_pathway: bool = False) -> None: # NB: Closing the writer also implcitly closes the reader. if not self._writer: return if not is_closing(self._writer): self.logger.debug("Closing StreamWriter.") self._writer.close() self.logger.debug("Waiting for StreamWriter to close ...") try: await wait_closed(self._writer) except Exception: # pylint: disable=broad-except # It's hard to tell if the Stream is already closed or # not. Even if one of the tasks has failed, it may have # failed for a higher-layered protocol reason. The # stream could still be open and perfectly fine. # I don't know how to discern its health here. if error_pathway: # We already know that *something* went wrong. Let's # just trust that the Exception we already have is the # better one to present to the user, even if we don't # genuinely *know* the relationship between the two. self.logger.debug( "Discarding Exception from wait_closed:\n%s\n", pretty_traceback(), ) else: # Oops, this is a brand-new error! raise finally: self.logger.debug("StreamWriter closed.") @bottom_half async def _bh_loop_forever(self, async_fn: _TaskFN, name: str) -> None: """ Run one of the bottom-half methods in a loop forever. If the bottom half ever raises any exception, schedule a disconnect that will terminate the entire loop. :param async_fn: The bottom-half method to run in a loop. :param name: The name of this task, used for logging. """ try: while True: await async_fn() except asyncio.CancelledError: # We have been cancelled by _bh_disconnect, exit gracefully. self.logger.debug("Task.%s: cancelled.", name) return except BaseException as err: self.logger.log( logging.INFO if isinstance(err, EOFError) else logging.ERROR, "Task.%s: %s", name, exception_summary(err) ) self.logger.debug("Task.%s: failure:\n%s\n", name, pretty_traceback()) self._schedule_disconnect() raise finally: self.logger.debug("Task.%s: exiting.", name) @bottom_half async def _bh_send_message(self) -> None: """ Wait for an outgoing message, then send it. Designed to be run in `_bh_loop_forever()`. """ msg = await self._outgoing.get() try: await self._send(msg) finally: self._outgoing.task_done() @bottom_half async def _bh_recv_message(self) -> None: """ Wait for an incoming message and call `_on_message` to route it. Designed to be run in `_bh_loop_forever()`. """ msg = await self._recv() await self._on_message(msg) # -------------------- # Section: Message I/O # -------------------- @upper_half @bottom_half def _cb_outbound(self, msg: T) -> T: """ Callback: outbound message hook. This is intended for subclasses to be able to add arbitrary hooks to filter or manipulate outgoing messages. The base implementation does nothing but log the message without any manipulation of the message. :param msg: raw outbound message :return: final outbound message """ self.logger.debug("--> %s", str(msg)) return msg @upper_half @bottom_half def _cb_inbound(self, msg: T) -> T: """ Callback: inbound message hook. This is intended for subclasses to be able to add arbitrary hooks to filter or manipulate incoming messages. The base implementation does nothing but log the message without any manipulation of the message. This method does not "handle" incoming messages; it is a filter. The actual "endpoint" for incoming messages is `_on_message()`. :param msg: raw inbound message :return: processed inbound message """ self.logger.debug("<-- %s", str(msg)) return msg @upper_half @bottom_half async def _readline(self) -> bytes: """ Wait for a newline from the incoming reader. This method is provided as a convenience for upper-layer protocols, as many are line-based. This method *may* return a sequence of bytes without a trailing newline if EOF occurs, but *some* bytes were received. In this case, the next call will raise `EOFError`. It is assumed that the layer 5 protocol will decide if there is anything meaningful to be done with a partial message. :raise OSError: For stream-related errors. :raise EOFError: If the reader stream is at EOF and there are no bytes to return. :return: bytes, including the newline. """ assert self._reader is not None msg_bytes = await self._reader.readline() if not msg_bytes: if self._reader.at_eof(): raise EOFError return msg_bytes @upper_half @bottom_half async def _do_recv(self) -> T: """ Abstract: Read from the stream and return a message. Very low-level; intended to only be called by `_recv()`. """ raise NotImplementedError @upper_half @bottom_half async def _recv(self) -> T: """ Read an arbitrary protocol message. .. warning:: This method is intended primarily for `_bh_recv_message()` to use in an asynchronous task loop. Using it outside of this loop will "steal" messages from the normal routing mechanism. It is safe to use prior to `_establish_session()`, but should not be used otherwise. This method uses `_do_recv()` to retrieve the raw message, and then transforms it using `_cb_inbound()`. :return: A single (filtered, processed) protocol message. """ message = await self._do_recv() return self._cb_inbound(message) @upper_half @bottom_half def _do_send(self, msg: T) -> None: """ Abstract: Write a message to the stream. Very low-level; intended to only be called by `_send()`. """ raise NotImplementedError @upper_half @bottom_half async def _send(self, msg: T) -> None: """ Send an arbitrary protocol message. This method will transform any outgoing messages according to `_cb_outbound()`. .. warning:: Like `_recv()`, this method is intended to be called by the writer task loop that processes outgoing messages. Calling it directly may circumvent logic implemented by the caller meant to correlate outgoing and incoming messages. :raise OSError: For problems with the underlying stream. """ msg = self._cb_outbound(msg) self._do_send(msg) @bottom_half async def _on_message(self, msg: T) -> None: """ Called to handle the receipt of a new message. .. caution:: This is executed from within the reader loop, so be advised that waiting on either the reader or writer task will lead to deadlock. Additionally, any unhandled exceptions will directly cause the loop to halt, so logic may be best-kept to a minimum if at all possible. :param msg: The incoming message, already logged/filtered. """ # Nothing to do in the abstract case.
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/protocol.py
""" QEMU Monitor Protocol (QMP) development library & tooling. This package provides a fairly low-level class for communicating asynchronously with QMP protocol servers, as implemented by QEMU, the QEMU Guest Agent, and the QEMU Storage Daemon. `QMPClient` provides the main functionality of this package. All errors raised by this library dervive from `AQMPError`, see `aqmp.error` for additional detail. See `aqmp.events` for an in-depth tutorial on managing QMP events. """ # Copyright (C) 2020, 2021 John Snow for Red Hat, Inc. # # Authors: # John Snow <jsnow@redhat.com> # # Based on earlier work by Luiz Capitulino <lcapitulino@redhat.com>. # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. import logging import warnings from .error import AQMPError from .events import EventListener from .message import Message from .protocol import ConnectError, Runstate, StateError from .qmp_client import ExecInterruptedError, ExecuteError, QMPClient _WMSG = """ The Asynchronous QMP library is currently in development and its API should be considered highly fluid and subject to change. It should not be used by any other scripts checked into the QEMU tree. Proceed with caution! """ warnings.warn(_WMSG, FutureWarning) # Suppress logging unless an application engages it. logging.getLogger('qemu.aqmp').addHandler(logging.NullHandler()) # The order of these fields impact the Sphinx documentation order. __all__ = ( # Classes, most to least important 'QMPClient', 'Message', 'EventListener', 'Runstate', # Exceptions, most generic to most explicit 'AQMPError', 'StateError', 'ConnectError', 'ExecuteError', 'ExecInterruptedError', )
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/__init__.py
""" QMP Message Format This module provides the `Message` class, which represents a single QMP message sent to or from the server. """ import json from json import JSONDecodeError from typing import ( Dict, Iterator, Mapping, MutableMapping, Optional, Union, ) from .error import ProtocolError class Message(MutableMapping[str, object]): """ Represents a single QMP protocol message. QMP uses JSON objects as its basic communicative unit; so this Python object is a :py:obj:`~collections.abc.MutableMapping`. It may be instantiated from either another mapping (like a `dict`), or from raw `bytes` that still need to be deserialized. Once instantiated, it may be treated like any other MutableMapping:: >>> msg = Message(b'{"hello": "world"}') >>> assert msg['hello'] == 'world' >>> msg['id'] = 'foobar' >>> print(msg) { "hello": "world", "id": "foobar" } It can be converted to `bytes`:: >>> msg = Message({"hello": "world"}) >>> print(bytes(msg)) b'{"hello":"world","id":"foobar"}' Or back into a garden-variety `dict`:: >>> dict(msg) {'hello': 'world'} :param value: Initial value, if any. :param eager: When `True`, attempt to serialize or deserialize the initial value immediately, so that conversion exceptions are raised during the call to ``__init__()``. """ # pylint: disable=too-many-ancestors def __init__(self, value: Union[bytes, Mapping[str, object]] = b'{}', *, eager: bool = True): self._data: Optional[bytes] = None self._obj: Optional[Dict[str, object]] = None if isinstance(value, bytes): self._data = value if eager: self._obj = self._deserialize(self._data) else: self._obj = dict(value) if eager: self._data = self._serialize(self._obj) # Methods necessary to implement the MutableMapping interface, see: # https://docs.python.org/3/library/collections.abc.html#collections.abc.MutableMapping # We get pop, popitem, clear, update, setdefault, __contains__, # keys, items, values, get, __eq__ and __ne__ for free. def __getitem__(self, key: str) -> object: return self._object[key] def __setitem__(self, key: str, value: object) -> None: self._object[key] = value self._data = None def __delitem__(self, key: str) -> None: del self._object[key] self._data = None def __iter__(self) -> Iterator[str]: return iter(self._object) def __len__(self) -> int: return len(self._object) # Dunder methods not related to MutableMapping: def __repr__(self) -> str: if self._obj is not None: return f"Message({self._object!r})" return f"Message({bytes(self)!r})" def __str__(self) -> str: """Pretty-printed representation of this QMP message.""" return json.dumps(self._object, indent=2) def __bytes__(self) -> bytes: """bytes representing this QMP message.""" if self._data is None: self._data = self._serialize(self._obj or {}) return self._data # Conversion Methods @property def _object(self) -> Dict[str, object]: """ A `dict` representing this QMP message. Generated on-demand, if required. This property is private because it returns an object that could be used to invalidate the internal state of the `Message` object. """ if self._obj is None: self._obj = self._deserialize(self._data or b'{}') return self._obj @classmethod def _serialize(cls, value: object) -> bytes: """ Serialize a JSON object as `bytes`. :raise ValueError: When the object cannot be serialized. :raise TypeError: When the object cannot be serialized. :return: `bytes` ready to be sent over the wire. """ return json.dumps(value, separators=(',', ':')).encode('utf-8') @classmethod def _deserialize(cls, data: bytes) -> Dict[str, object]: """ Deserialize JSON `bytes` into a native Python `dict`. :raise DeserializationError: If JSON deserialization fails for any reason. :raise UnexpectedTypeError: If the data does not represent a JSON object. :return: A `dict` representing this QMP message. """ try: obj = json.loads(data) except JSONDecodeError as err: emsg = "Failed to deserialize QMP message." raise DeserializationError(emsg, data) from err if not isinstance(obj, dict): raise UnexpectedTypeError( "QMP message is not a JSON object.", obj ) return obj class DeserializationError(ProtocolError): """ A QMP message was not understood as JSON. When this Exception is raised, ``__cause__`` will be set to the `json.JSONDecodeError` Exception, which can be interrogated for further details. :param error_message: Human-readable string describing the error. :param raw: The raw `bytes` that prompted the failure. """ def __init__(self, error_message: str, raw: bytes): super().__init__(error_message) #: The raw `bytes` that were not understood as JSON. self.raw: bytes = raw def __str__(self) -> str: return "\n".join([ super().__str__(), f" raw bytes were: {str(self.raw)}", ]) class UnexpectedTypeError(ProtocolError): """ A QMP message was JSON, but not a JSON object. :param error_message: Human-readable string describing the error. :param value: The deserialized JSON value that wasn't an object. """ def __init__(self, error_message: str, value: object): super().__init__(error_message) #: The JSON value that was expected to be an object. self.value: object = value def __str__(self) -> str: strval = json.dumps(self.value, indent=2) return "\n".join([ super().__str__(), f" json value was: {strval}", ])
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/message.py
# Copyright (c) 2021 # # Authors: # Niteesh Babu G S <niteesh.gs@gmail.com> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. """ AQMP TUI AQMP TUI is an asynchronous interface built on top the of the AQMP library. It is the successor of QMP-shell and is bought-in as a replacement for it. Example Usage: aqmp-tui <SOCKET | TCP IP:PORT> Full Usage: aqmp-tui --help """ import argparse import asyncio import json import logging from logging import Handler, LogRecord import signal from typing import ( List, Optional, Tuple, Type, Union, cast, ) from pygments import lexers from pygments import token as Token import urwid import urwid_readline from ..qmp import QEMUMonitorProtocol, QMPBadPortError from .error import ProtocolError from .message import DeserializationError, Message, UnexpectedTypeError from .protocol import ConnectError, Runstate from .qmp_client import ExecInterruptedError, QMPClient from .util import create_task, pretty_traceback # The name of the signal that is used to update the history list UPDATE_MSG: str = 'UPDATE_MSG' palette = [ (Token.Punctuation, '', '', '', 'h15,bold', 'g7'), (Token.Text, '', '', '', '', 'g7'), (Token.Name.Tag, '', '', '', 'bold,#f88', 'g7'), (Token.Literal.Number.Integer, '', '', '', '#fa0', 'g7'), (Token.Literal.String.Double, '', '', '', '#6f6', 'g7'), (Token.Keyword.Constant, '', '', '', '#6af', 'g7'), ('DEBUG', '', '', '', '#ddf', 'g7'), ('INFO', '', '', '', 'g100', 'g7'), ('WARNING', '', '', '', '#ff6', 'g7'), ('ERROR', '', '', '', '#a00', 'g7'), ('CRITICAL', '', '', '', '#a00', 'g7'), ('background', '', 'black', '', '', 'g7'), ] def format_json(msg: str) -> str: """ Formats valid/invalid multi-line JSON message into a single-line message. Formatting is first tried using the standard json module. If that fails due to an decoding error then a simple string manipulation is done to achieve a single line JSON string. Converting into single line is more asthetically pleasing when looking along with error messages. Eg: Input: [ 1, true, 3 ] The above input is not a valid QMP message and produces the following error "QMP message is not a JSON object." When displaying this in TUI in multiline mode we get [ 1, true, 3 ]: QMP message is not a JSON object. whereas in singleline mode we get the following [1, true, 3]: QMP message is not a JSON object. The single line mode is more asthetically pleasing. :param msg: The message to formatted into single line. :return: Formatted singleline message. """ try: msg = json.loads(msg) return str(json.dumps(msg)) except json.decoder.JSONDecodeError: msg = msg.replace('\n', '') words = msg.split(' ') words = list(filter(None, words)) return ' '.join(words) def has_handler_type(logger: logging.Logger, handler_type: Type[Handler]) -> bool: """ The Logger class has no interface to check if a certain type of handler is installed or not. So we provide an interface to do so. :param logger: Logger object :param handler_type: The type of the handler to be checked. :return: returns True if handler of type `handler_type`. """ for handler in logger.handlers: if isinstance(handler, handler_type): return True return False class App(QMPClient): """ Implements the AQMP TUI. Initializes the widgets and starts the urwid event loop. :param address: Address of the server to connect to. :param num_retries: The number of times to retry before stopping to reconnect. :param retry_delay: The delay(sec) before each retry """ def __init__(self, address: Union[str, Tuple[str, int]], num_retries: int, retry_delay: Optional[int]) -> None: urwid.register_signal(type(self), UPDATE_MSG) self.window = Window(self) self.address = address self.aloop: Optional[asyncio.AbstractEventLoop] = None self.num_retries = num_retries self.retry_delay = retry_delay if retry_delay else 2 self.retry: bool = False self.exiting: bool = False super().__init__() def add_to_history(self, msg: str, level: Optional[str] = None) -> None: """ Appends the msg to the history list. :param msg: The raw message to be appended in string type. """ urwid.emit_signal(self, UPDATE_MSG, msg, level) def _cb_outbound(self, msg: Message) -> Message: """ Callback: outbound message hook. Appends the outgoing messages to the history box. :param msg: raw outbound message. :return: final outbound message. """ str_msg = str(msg) if not has_handler_type(logging.getLogger(), TUILogHandler): logging.debug('Request: %s', str_msg) self.add_to_history('<-- ' + str_msg) return msg def _cb_inbound(self, msg: Message) -> Message: """ Callback: outbound message hook. Appends the incoming messages to the history box. :param msg: raw inbound message. :return: final inbound message. """ str_msg = str(msg) if not has_handler_type(logging.getLogger(), TUILogHandler): logging.debug('Request: %s', str_msg) self.add_to_history('--> ' + str_msg) return msg async def _send_to_server(self, msg: Message) -> None: """ This coroutine sends the message to the server. The message has to be pre-validated. :param msg: Pre-validated message to be to sent to the server. :raise Exception: When an unhandled exception is caught. """ try: await self._raw(msg, assign_id='id' not in msg) except ExecInterruptedError as err: logging.info('Error server disconnected before reply %s', str(err)) self.add_to_history('Server disconnected before reply', 'ERROR') except Exception as err: logging.error('Exception from _send_to_server: %s', str(err)) raise err def cb_send_to_server(self, raw_msg: str) -> None: """ Validates and sends the message to the server. The raw string message is first converted into a Message object and is then sent to the server. :param raw_msg: The raw string message to be sent to the server. :raise Exception: When an unhandled exception is caught. """ try: msg = Message(bytes(raw_msg, encoding='utf-8')) create_task(self._send_to_server(msg)) except (DeserializationError, UnexpectedTypeError) as err: raw_msg = format_json(raw_msg) logging.info('Invalid message: %s', err.error_message) self.add_to_history(f'{raw_msg}: {err.error_message}', 'ERROR') def unhandled_input(self, key: str) -> None: """ Handle's keys which haven't been handled by the child widgets. :param key: Unhandled key """ if key == 'esc': self.kill_app() def kill_app(self) -> None: """ Initiates killing of app. A bridge between asynchronous and synchronous code. """ create_task(self._kill_app()) async def _kill_app(self) -> None: """ This coroutine initiates the actual disconnect process and calls urwid.ExitMainLoop() to kill the TUI. :raise Exception: When an unhandled exception is caught. """ self.exiting = True await self.disconnect() logging.debug('Disconnect finished. Exiting app') raise urwid.ExitMainLoop() async def disconnect(self) -> None: """ Overrides the disconnect method to handle the errors locally. """ try: await super().disconnect() except (OSError, EOFError) as err: logging.info('disconnect: %s', str(err)) self.retry = True except ProtocolError as err: logging.info('disconnect: %s', str(err)) except Exception as err: logging.error('disconnect: Unhandled exception %s', str(err)) raise err def _set_status(self, msg: str) -> None: """ Sets the message as the status. :param msg: The message to be displayed in the status bar. """ self.window.footer.set_text(msg) def _get_formatted_address(self) -> str: """ Returns a formatted version of the server's address. :return: formatted address """ if isinstance(self.address, tuple): host, port = self.address addr = f'{host}:{port}' else: addr = f'{self.address}' return addr async def _initiate_connection(self) -> Optional[ConnectError]: """ Tries connecting to a server a number of times with a delay between each try. If all retries failed then return the error faced during the last retry. :return: Error faced during last retry. """ current_retries = 0 err = None # initial try await self.connect_server() while self.retry and current_retries < self.num_retries: logging.info('Connection Failed, retrying in %d', self.retry_delay) status = f'[Retry #{current_retries} ({self.retry_delay}s)]' self._set_status(status) await asyncio.sleep(self.retry_delay) err = await self.connect_server() current_retries += 1 # If all retries failed report the last error if err: logging.info('All retries failed: %s', err) return err return None async def manage_connection(self) -> None: """ Manage the connection based on the current run state. A reconnect is issued when the current state is IDLE and the number of retries is not exhausted. A disconnect is issued when the current state is DISCONNECTING. """ while not self.exiting: if self.runstate == Runstate.IDLE: err = await self._initiate_connection() # If retry is still true then, we have exhausted all our tries. if err: self._set_status(f'[Error: {err.error_message}]') else: addr = self._get_formatted_address() self._set_status(f'[Connected {addr}]') elif self.runstate == Runstate.DISCONNECTING: self._set_status('[Disconnected]') await self.disconnect() # check if a retry is needed if self.runstate == Runstate.IDLE: continue await self.runstate_changed() async def connect_server(self) -> Optional[ConnectError]: """ Initiates a connection to the server at address `self.address` and in case of a failure, sets the status to the respective error. """ try: await self.connect(self.address) self.retry = False except ConnectError as err: logging.info('connect_server: ConnectError %s', str(err)) self.retry = True return err return None def run(self, debug: bool = False) -> None: """ Starts the long running co-routines and the urwid event loop. :param debug: Enables/Disables asyncio event loop debugging """ screen = urwid.raw_display.Screen() screen.set_terminal_properties(256) self.aloop = asyncio.get_event_loop() self.aloop.set_debug(debug) # Gracefully handle SIGTERM and SIGINT signals cancel_signals = [signal.SIGTERM, signal.SIGINT] for sig in cancel_signals: self.aloop.add_signal_handler(sig, self.kill_app) event_loop = urwid.AsyncioEventLoop(loop=self.aloop) main_loop = urwid.MainLoop(urwid.AttrMap(self.window, 'background'), unhandled_input=self.unhandled_input, screen=screen, palette=palette, handle_mouse=True, event_loop=event_loop) create_task(self.manage_connection(), self.aloop) try: main_loop.run() except Exception as err: logging.error('%s\n%s\n', str(err), pretty_traceback()) raise err class StatusBar(urwid.Text): """ A simple statusbar modelled using the Text widget. The status can be set using the set_text function. All text set is aligned to right. :param text: Initial text to be displayed. Default is empty str. """ def __init__(self, text: str = ''): super().__init__(text, align='right') class Editor(urwid_readline.ReadlineEdit): """ A simple editor modelled using the urwid_readline.ReadlineEdit widget. Mimcs GNU readline shortcuts and provides history support. The readline shortcuts can be found below: https://github.com/rr-/urwid_readline#features Along with the readline features, this editor also has support for history. Pressing the 'up'/'down' switches between the prev/next messages available in the history. Currently there is no support to save the history to a file. The history of previous commands is lost on exit. :param parent: Reference to the TUI object. """ def __init__(self, parent: App) -> None: super().__init__(caption='> ', multiline=True) self.parent = parent self.history: List[str] = [] self.last_index: int = -1 self.show_history: bool = False def keypress(self, size: Tuple[int, int], key: str) -> Optional[str]: """ Handles the keypress on this widget. :param size: The current size of the widget. :param key: The key to be handled. :return: Unhandled key if any. """ msg = self.get_edit_text() if key == 'up' and not msg: # Show the history when 'up arrow' is pressed with no input text. # NOTE: The show_history logic is necessary because in 'multiline' # mode (which we use) 'up arrow' is used to move between lines. if not self.history: return None self.show_history = True last_msg = self.history[self.last_index] self.set_edit_text(last_msg) self.edit_pos = len(last_msg) elif key == 'up' and self.show_history: self.last_index = max(self.last_index - 1, -len(self.history)) self.set_edit_text(self.history[self.last_index]) self.edit_pos = len(self.history[self.last_index]) elif key == 'down' and self.show_history: if self.last_index == -1: self.set_edit_text('') self.show_history = False else: self.last_index += 1 self.set_edit_text(self.history[self.last_index]) self.edit_pos = len(self.history[self.last_index]) elif key == 'meta enter': # When using multiline, enter inserts a new line into the editor # send the input to the server on alt + enter self.parent.cb_send_to_server(msg) self.history.append(msg) self.set_edit_text('') self.last_index = -1 self.show_history = False else: self.show_history = False self.last_index = -1 return cast(Optional[str], super().keypress(size, key)) return None class EditorWidget(urwid.Filler): """ Wrapper around the editor widget. The Editor is a flow widget and has to wrapped inside a box widget. This class wraps the Editor inside filler widget. :param parent: Reference to the TUI object. """ def __init__(self, parent: App) -> None: super().__init__(Editor(parent), valign='top') class HistoryBox(urwid.ListBox): """ This widget is modelled using the ListBox widget, contains the list of all messages both QMP messages and log messsages to be shown in the TUI. The messages are urwid.Text widgets. On every append of a message, the focus is shifted to the last appended message. :param parent: Reference to the TUI object. """ def __init__(self, parent: App) -> None: self.parent = parent self.history = urwid.SimpleFocusListWalker([]) super().__init__(self.history) def add_to_history(self, history: Union[str, List[Tuple[str, str]]]) -> None: """ Appends a message to the list and set the focus to the last appended message. :param history: The history item(message/event) to be appended to the list. """ self.history.append(urwid.Text(history)) self.history.set_focus(len(self.history) - 1) def mouse_event(self, size: Tuple[int, int], _event: str, button: float, _x: int, _y: int, focus: bool) -> None: # Unfortunately there are no urwid constants that represent the mouse # events. if button == 4: # Scroll up event super().keypress(size, 'up') elif button == 5: # Scroll down event super().keypress(size, 'down') class HistoryWindow(urwid.Frame): """ This window composes the HistoryBox and EditorWidget in a horizontal split. By default the first focus is given to the history box. :param parent: Reference to the TUI object. """ def __init__(self, parent: App) -> None: self.parent = parent self.editor_widget = EditorWidget(parent) self.editor = urwid.LineBox(self.editor_widget) self.history = HistoryBox(parent) self.body = urwid.Pile([('weight', 80, self.history), ('weight', 20, self.editor)]) super().__init__(self.body) urwid.connect_signal(self.parent, UPDATE_MSG, self.cb_add_to_history) def cb_add_to_history(self, msg: str, level: Optional[str] = None) -> None: """ Appends a message to the history box :param msg: The message to be appended to the history box. :param level: The log level of the message, if it is a log message. """ formatted = [] if level: msg = f'[{level}]: {msg}' formatted.append((level, msg)) else: lexer = lexers.JsonLexer() # pylint: disable=no-member for token in lexer.get_tokens(msg): formatted.append(token) self.history.add_to_history(formatted) class Window(urwid.Frame): """ This window is the top most widget of the TUI and will contain other windows. Each child of this widget is responsible for displaying a specific functionality. :param parent: Reference to the TUI object. """ def __init__(self, parent: App) -> None: self.parent = parent footer = StatusBar() body = HistoryWindow(parent) super().__init__(body, footer=footer) class TUILogHandler(Handler): """ This handler routes all the log messages to the TUI screen. It is installed to the root logger to so that the log message from all libraries begin used is routed to the screen. :param tui: Reference to the TUI object. """ def __init__(self, tui: App) -> None: super().__init__() self.tui = tui def emit(self, record: LogRecord) -> None: """ Emits a record to the TUI screen. Appends the log message to the TUI screen """ level = record.levelname msg = record.getMessage() self.tui.add_to_history(msg, level) def main() -> None: """ Driver of the whole script, parses arguments, initialize the TUI and the logger. """ parser = argparse.ArgumentParser(description='AQMP TUI') parser.add_argument('qmp_server', help='Address of the QMP server. ' 'Format <UNIX socket path | TCP addr:port>') parser.add_argument('--num-retries', type=int, default=10, help='Number of times to reconnect before giving up.') parser.add_argument('--retry-delay', type=int, help='Time(s) to wait before next retry. ' 'Default action is to wait 2s between each retry.') parser.add_argument('--log-file', help='The Log file name') parser.add_argument('--log-level', default='WARNING', help='Log level <CRITICAL|ERROR|WARNING|INFO|DEBUG|>') parser.add_argument('--asyncio-debug', action='store_true', help='Enable debug mode for asyncio loop. ' 'Generates lot of output, makes TUI unusable when ' 'logs are logged in the TUI. ' 'Use only when logging to a file.') args = parser.parse_args() try: address = QEMUMonitorProtocol.parse_address(args.qmp_server) except QMPBadPortError as err: parser.error(str(err)) app = App(address, args.num_retries, args.retry_delay) root_logger = logging.getLogger() root_logger.setLevel(logging.getLevelName(args.log_level)) if args.log_file: root_logger.addHandler(logging.FileHandler(args.log_file)) else: root_logger.addHandler(TUILogHandler(app)) app.run(args.asyncio_debug) if __name__ == '__main__': main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/aqmp/aqmp_tui.py
""" QEMU Guest Agent Client Usage: Start QEMU with: # qemu [...] -chardev socket,path=/tmp/qga.sock,server,wait=off,id=qga0 \ -device virtio-serial \ -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 Run the script: $ qemu-ga-client --address=/tmp/qga.sock <command> [args...] or $ export QGA_CLIENT_ADDRESS=/tmp/qga.sock $ qemu-ga-client <command> [args...] For example: $ qemu-ga-client cat /etc/resolv.conf # Generated by NetworkManager nameserver 10.0.2.3 $ qemu-ga-client fsfreeze status thawed $ qemu-ga-client fsfreeze freeze 2 filesystems frozen See also: https://wiki.qemu.org/Features/QAPI/GuestAgent """ # Copyright (C) 2012 Ryota Ozaki <ozaki.ryota@gmail.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. import argparse import base64 import errno import os import random import sys from typing import ( Any, Callable, Dict, Optional, Sequence, ) from qemu import qmp from qemu.qmp import SocketAddrT # This script has not seen many patches or careful attention in quite # some time. If you would like to improve it, please review the design # carefully and add docstrings at that point in time. Until then: # pylint: disable=missing-docstring class QemuGuestAgent(qmp.QEMUMonitorProtocol): def __getattr__(self, name: str) -> Callable[..., Any]: def wrapper(**kwds: object) -> object: return self.command('guest-' + name.replace('_', '-'), **kwds) return wrapper class QemuGuestAgentClient: def __init__(self, address: SocketAddrT): self.qga = QemuGuestAgent(address) self.qga.connect(negotiate=False) def sync(self, timeout: Optional[float] = 3) -> None: # Avoid being blocked forever if not self.ping(timeout): raise EnvironmentError('Agent seems not alive') uid = random.randint(0, (1 << 32) - 1) while True: ret = self.qga.sync(id=uid) if isinstance(ret, int) and int(ret) == uid: break def __file_read_all(self, handle: int) -> bytes: eof = False data = b'' while not eof: ret = self.qga.file_read(handle=handle, count=1024) _data = base64.b64decode(ret['buf-b64']) data += _data eof = ret['eof'] return data def read(self, path: str) -> bytes: handle = self.qga.file_open(path=path) try: data = self.__file_read_all(handle) finally: self.qga.file_close(handle=handle) return data def info(self) -> str: info = self.qga.info() msgs = [] msgs.append('version: ' + info['version']) msgs.append('supported_commands:') enabled = [c['name'] for c in info['supported_commands'] if c['enabled']] msgs.append('\tenabled: ' + ', '.join(enabled)) disabled = [c['name'] for c in info['supported_commands'] if not c['enabled']] msgs.append('\tdisabled: ' + ', '.join(disabled)) return '\n'.join(msgs) @classmethod def __gen_ipv4_netmask(cls, prefixlen: int) -> str: mask = int('1' * prefixlen + '0' * (32 - prefixlen), 2) return '.'.join([str(mask >> 24), str((mask >> 16) & 0xff), str((mask >> 8) & 0xff), str(mask & 0xff)]) def ifconfig(self) -> str: nifs = self.qga.network_get_interfaces() msgs = [] for nif in nifs: msgs.append(nif['name'] + ':') if 'ip-addresses' in nif: for ipaddr in nif['ip-addresses']: if ipaddr['ip-address-type'] == 'ipv4': addr = ipaddr['ip-address'] mask = self.__gen_ipv4_netmask(int(ipaddr['prefix'])) msgs.append(f"\tinet {addr} netmask {mask}") elif ipaddr['ip-address-type'] == 'ipv6': addr = ipaddr['ip-address'] prefix = ipaddr['prefix'] msgs.append(f"\tinet6 {addr} prefixlen {prefix}") if nif['hardware-address'] != '00:00:00:00:00:00': msgs.append("\tether " + nif['hardware-address']) return '\n'.join(msgs) def ping(self, timeout: Optional[float]) -> bool: self.qga.settimeout(timeout) try: self.qga.ping() except TimeoutError: return False return True def fsfreeze(self, cmd: str) -> object: if cmd not in ['status', 'freeze', 'thaw']: raise Exception('Invalid command: ' + cmd) # Can be int (freeze, thaw) or GuestFsfreezeStatus (status) return getattr(self.qga, 'fsfreeze' + '_' + cmd)() def fstrim(self, minimum: int) -> Dict[str, object]: # returns GuestFilesystemTrimResponse ret = getattr(self.qga, 'fstrim')(minimum=minimum) assert isinstance(ret, dict) return ret def suspend(self, mode: str) -> None: if mode not in ['disk', 'ram', 'hybrid']: raise Exception('Invalid mode: ' + mode) try: getattr(self.qga, 'suspend' + '_' + mode)() # On error exception will raise except TimeoutError: # On success command will timed out return def shutdown(self, mode: str = 'powerdown') -> None: if mode not in ['powerdown', 'halt', 'reboot']: raise Exception('Invalid mode: ' + mode) try: self.qga.shutdown(mode=mode) except TimeoutError: pass def _cmd_cat(client: QemuGuestAgentClient, args: Sequence[str]) -> None: if len(args) != 1: print('Invalid argument') print('Usage: cat <file>') sys.exit(1) print(client.read(args[0])) def _cmd_fsfreeze(client: QemuGuestAgentClient, args: Sequence[str]) -> None: usage = 'Usage: fsfreeze status|freeze|thaw' if len(args) != 1: print('Invalid argument') print(usage) sys.exit(1) if args[0] not in ['status', 'freeze', 'thaw']: print('Invalid command: ' + args[0]) print(usage) sys.exit(1) cmd = args[0] ret = client.fsfreeze(cmd) if cmd == 'status': print(ret) return assert isinstance(ret, int) verb = 'frozen' if cmd == 'freeze' else 'thawed' print(f"{ret:d} filesystems {verb}") def _cmd_fstrim(client: QemuGuestAgentClient, args: Sequence[str]) -> None: if len(args) == 0: minimum = 0 else: minimum = int(args[0]) print(client.fstrim(minimum)) def _cmd_ifconfig(client: QemuGuestAgentClient, args: Sequence[str]) -> None: assert not args print(client.ifconfig()) def _cmd_info(client: QemuGuestAgentClient, args: Sequence[str]) -> None: assert not args print(client.info()) def _cmd_ping(client: QemuGuestAgentClient, args: Sequence[str]) -> None: timeout = 3.0 if len(args) == 0 else float(args[0]) alive = client.ping(timeout) if not alive: print("Not responded in %s sec" % args[0]) sys.exit(1) def _cmd_suspend(client: QemuGuestAgentClient, args: Sequence[str]) -> None: usage = 'Usage: suspend disk|ram|hybrid' if len(args) != 1: print('Less argument') print(usage) sys.exit(1) if args[0] not in ['disk', 'ram', 'hybrid']: print('Invalid command: ' + args[0]) print(usage) sys.exit(1) client.suspend(args[0]) def _cmd_shutdown(client: QemuGuestAgentClient, args: Sequence[str]) -> None: assert not args client.shutdown() _cmd_powerdown = _cmd_shutdown def _cmd_halt(client: QemuGuestAgentClient, args: Sequence[str]) -> None: assert not args client.shutdown('halt') def _cmd_reboot(client: QemuGuestAgentClient, args: Sequence[str]) -> None: assert not args client.shutdown('reboot') commands = [m.replace('_cmd_', '') for m in dir() if '_cmd_' in m] def send_command(address: str, cmd: str, args: Sequence[str]) -> None: if not os.path.exists(address): print('%s not found' % address) sys.exit(1) if cmd not in commands: print('Invalid command: ' + cmd) print('Available commands: ' + ', '.join(commands)) sys.exit(1) try: client = QemuGuestAgentClient(address) except OSError as err: print(err) if err.errno == errno.ECONNREFUSED: print('Hint: qemu is not running?') sys.exit(1) if cmd == 'fsfreeze' and args[0] == 'freeze': client.sync(60) elif cmd != 'ping': client.sync() globals()['_cmd_' + cmd](client, args) def main() -> None: address = os.environ.get('QGA_CLIENT_ADDRESS') parser = argparse.ArgumentParser() parser.add_argument('--address', action='store', default=address, help='Specify a ip:port pair or a unix socket path') parser.add_argument('command', choices=commands) parser.add_argument('args', nargs='*') args = parser.parse_args() if args.address is None: parser.error('address is not specified') sys.exit(1) send_command(args.address, args.command, args.args) if __name__ == '__main__': main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/qmp/qemu_ga_client.py
""" QOM Command abstractions. """ ## # Copyright John Snow 2020, for Red Hat, Inc. # Copyright IBM, Corp. 2011 # # Authors: # John Snow <jsnow@redhat.com> # Anthony Liguori <aliguori@amazon.com> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # Based on ./scripts/qmp/qom-[set|get|tree|list] ## import argparse import os import sys from typing import ( Any, Dict, List, Optional, Type, TypeVar, ) from . import QEMUMonitorProtocol, QMPError # The following is needed only for a type alias. Subparsers = argparse._SubParsersAction # pylint: disable=protected-access class ObjectPropertyInfo: """ Represents the return type from e.g. qom-list. """ def __init__(self, name: str, type_: str, description: Optional[str] = None, default_value: Optional[object] = None): self.name = name self.type = type_ self.description = description self.default_value = default_value @classmethod def make(cls, value: Dict[str, Any]) -> 'ObjectPropertyInfo': """ Build an ObjectPropertyInfo from a Dict with an unknown shape. """ assert value.keys() >= {'name', 'type'} assert value.keys() <= {'name', 'type', 'description', 'default-value'} return cls(value['name'], value['type'], value.get('description'), value.get('default-value')) @property def child(self) -> bool: """Is this property a child property?""" return self.type.startswith('child<') @property def link(self) -> bool: """Is this property a link property?""" return self.type.startswith('link<') CommandT = TypeVar('CommandT', bound='QOMCommand') class QOMCommand: """ Represents a QOM sub-command. :param args: Parsed arguments, as returned from parser.parse_args. """ name: str help: str def __init__(self, args: argparse.Namespace): if args.socket is None: raise QMPError("No QMP socket path or address given") self.qmp = QEMUMonitorProtocol( QEMUMonitorProtocol.parse_address(args.socket) ) self.qmp.connect() @classmethod def register(cls, subparsers: Subparsers) -> None: """ Register this command with the argument parser. :param subparsers: argparse subparsers object, from "add_subparsers". """ subparser = subparsers.add_parser(cls.name, help=cls.help, description=cls.help) cls.configure_parser(subparser) @classmethod def configure_parser(cls, parser: argparse.ArgumentParser) -> None: """ Configure a parser with this command's arguments. :param parser: argparse parser or subparser object. """ default_path = os.environ.get('QMP_SOCKET') parser.add_argument( '--socket', '-s', dest='socket', action='store', help='QMP socket path or address (addr:port).' ' May also be set via QMP_SOCKET environment variable.', default=default_path ) parser.set_defaults(cmd_class=cls) @classmethod def add_path_prop_arg(cls, parser: argparse.ArgumentParser) -> None: """ Add the <path>.<proptery> positional argument to this command. :param parser: The parser to add the argument to. """ parser.add_argument( 'path_prop', metavar='<path>.<property>', action='store', help="QOM path and property, separated by a period '.'" ) def run(self) -> int: """ Run this command. :return: 0 on success, 1 otherwise. """ raise NotImplementedError def qom_list(self, path: str) -> List[ObjectPropertyInfo]: """ :return: a strongly typed list from the 'qom-list' command. """ rsp = self.qmp.command('qom-list', path=path) # qom-list returns List[ObjectPropertyInfo] assert isinstance(rsp, list) return [ObjectPropertyInfo.make(x) for x in rsp] @classmethod def command_runner( cls: Type[CommandT], args: argparse.Namespace ) -> int: """ Run a fully-parsed subcommand, with error-handling for the CLI. :return: The return code from `run()`. """ try: cmd = cls(args) return cmd.run() except QMPError as err: print(f"{type(err).__name__}: {err!s}", file=sys.stderr) return -1 @classmethod def entry_point(cls) -> int: """ Build this command's parser, parse arguments, and run the command. :return: `run`'s return code. """ parser = argparse.ArgumentParser(description=cls.help) cls.configure_parser(parser) args = parser.parse_args() return cls.command_runner(args)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/qmp/qom_common.py
""" QEMU Object Model FUSE filesystem tool This script offers a simple FUSE filesystem within which the QOM tree may be browsed, queried and edited using traditional shell tooling. This script requires the 'fusepy' python package. usage: qom-fuse [-h] [--socket SOCKET] <mount> Mount a QOM tree as a FUSE filesystem positional arguments: <mount> Mount point optional arguments: -h, --help show this help message and exit --socket SOCKET, -s SOCKET QMP socket path or address (addr:port). May also be set via QMP_SOCKET environment variable. """ ## # Copyright IBM, Corp. 2012 # Copyright (C) 2020 Red Hat, Inc. # # Authors: # Anthony Liguori <aliguori@us.ibm.com> # Markus Armbruster <armbru@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. ## import argparse from errno import ENOENT, EPERM import stat import sys from typing import ( IO, Dict, Iterator, Mapping, Optional, Union, ) import fuse from fuse import FUSE, FuseOSError, Operations from . import QMPResponseError from .qom_common import QOMCommand fuse.fuse_python_api = (0, 2) class QOMFuse(QOMCommand, Operations): """ QOMFuse implements both fuse.Operations and QOMCommand. Operations implements the FS, and QOMCommand implements the CLI command. """ name = 'fuse' help = 'Mount a QOM tree as a FUSE filesystem' fuse: FUSE @classmethod def configure_parser(cls, parser: argparse.ArgumentParser) -> None: super().configure_parser(parser) parser.add_argument( 'mount', metavar='<mount>', action='store', help="Mount point", ) def __init__(self, args: argparse.Namespace): super().__init__(args) self.mount = args.mount self.ino_map: Dict[str, int] = {} self.ino_count = 1 def run(self) -> int: print(f"Mounting QOMFS to '{self.mount}'", file=sys.stderr) self.fuse = FUSE(self, self.mount, foreground=True) return 0 def get_ino(self, path: str) -> int: """Get an inode number for a given QOM path.""" if path in self.ino_map: return self.ino_map[path] self.ino_map[path] = self.ino_count self.ino_count += 1 return self.ino_map[path] def is_object(self, path: str) -> bool: """Is the given QOM path an object?""" try: self.qom_list(path) return True except QMPResponseError: return False def is_property(self, path: str) -> bool: """Is the given QOM path a property?""" path, prop = path.rsplit('/', 1) if path == '': path = '/' try: for item in self.qom_list(path): if item.name == prop: return True return False except QMPResponseError: return False def is_link(self, path: str) -> bool: """Is the given QOM path a link?""" path, prop = path.rsplit('/', 1) if path == '': path = '/' try: for item in self.qom_list(path): if item.name == prop and item.link: return True return False except QMPResponseError: return False def read(self, path: str, size: int, offset: int, fh: IO[bytes]) -> bytes: if not self.is_property(path): raise FuseOSError(ENOENT) path, prop = path.rsplit('/', 1) if path == '': path = '/' try: data = str(self.qmp.command('qom-get', path=path, property=prop)) data += '\n' # make values shell friendly except QMPResponseError as err: raise FuseOSError(EPERM) from err if offset > len(data): return b'' return bytes(data[offset:][:size], encoding='utf-8') def readlink(self, path: str) -> Union[bool, str]: if not self.is_link(path): return False path, prop = path.rsplit('/', 1) prefix = '/'.join(['..'] * (len(path.split('/')) - 1)) return prefix + str(self.qmp.command('qom-get', path=path, property=prop)) def getattr(self, path: str, fh: Optional[IO[bytes]] = None) -> Mapping[str, object]: if self.is_link(path): value = { 'st_mode': 0o755 | stat.S_IFLNK, 'st_ino': self.get_ino(path), 'st_dev': 0, 'st_nlink': 2, 'st_uid': 1000, 'st_gid': 1000, 'st_size': 4096, 'st_atime': 0, 'st_mtime': 0, 'st_ctime': 0 } elif self.is_object(path): value = { 'st_mode': 0o755 | stat.S_IFDIR, 'st_ino': self.get_ino(path), 'st_dev': 0, 'st_nlink': 2, 'st_uid': 1000, 'st_gid': 1000, 'st_size': 4096, 'st_atime': 0, 'st_mtime': 0, 'st_ctime': 0 } elif self.is_property(path): value = { 'st_mode': 0o644 | stat.S_IFREG, 'st_ino': self.get_ino(path), 'st_dev': 0, 'st_nlink': 1, 'st_uid': 1000, 'st_gid': 1000, 'st_size': 4096, 'st_atime': 0, 'st_mtime': 0, 'st_ctime': 0 } else: raise FuseOSError(ENOENT) return value def readdir(self, path: str, fh: IO[bytes]) -> Iterator[str]: yield '.' yield '..' for item in self.qom_list(path): yield item.name
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/qmp/qom_fuse.py
""" QEMU Monitor Protocol (QMP) development library & tooling. This package provides a fairly low-level class for communicating to QMP protocol servers, as implemented by QEMU, the QEMU Guest Agent, and the QEMU Storage Daemon. This library is not intended for production use. `QEMUMonitorProtocol` is the primary class of interest, and all errors raised derive from `QMPError`. """ # Copyright (C) 2009, 2010 Red Hat Inc. # # Authors: # Luiz Capitulino <lcapitulino@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. import errno import json import logging import socket import struct from types import TracebackType from typing import ( Any, Dict, List, Optional, TextIO, Tuple, Type, TypeVar, Union, cast, ) #: QMPMessage is an entire QMP message of any kind. QMPMessage = Dict[str, Any] #: QMPReturnValue is the 'return' value of a command. QMPReturnValue = object #: QMPObject is any object in a QMP message. QMPObject = Dict[str, object] # QMPMessage can be outgoing commands or incoming events/returns. # QMPReturnValue is usually a dict/json object, but due to QAPI's # 'returns-whitelist', it can actually be anything. # # {'return': {}} is a QMPMessage, # {} is the QMPReturnValue. InternetAddrT = Tuple[str, int] UnixAddrT = str SocketAddrT = Union[InternetAddrT, UnixAddrT] class QMPError(Exception): """ QMP base exception """ class QMPConnectError(QMPError): """ QMP connection exception """ class QMPCapabilitiesError(QMPError): """ QMP negotiate capabilities exception """ class QMPTimeoutError(QMPError): """ QMP timeout exception """ class QMPProtocolError(QMPError): """ QMP protocol error; unexpected response """ class QMPResponseError(QMPError): """ Represents erroneous QMP monitor reply """ def __init__(self, reply: QMPMessage): try: desc = reply['error']['desc'] except KeyError: desc = reply super().__init__(desc) self.reply = reply class QMPBadPortError(QMPError): """ Unable to parse socket address: Port was non-numerical. """ class QEMUMonitorProtocol: """ Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) and then allow to handle commands and events. """ #: Logger object for debugging messages logger = logging.getLogger('QMP') def __init__(self, address: SocketAddrT, server: bool = False, nickname: Optional[str] = None): """ Create a QEMUMonitorProtocol class. @param address: QEMU address, can be either a unix socket path (string) or a tuple in the form ( address, port ) for a TCP connection @param server: server mode listens on the socket (bool) @raise OSError on socket connection errors @note No connection is established, this is done by the connect() or accept() methods """ self.__events: List[QMPMessage] = [] self.__address = address self.__sock = self.__get_sock() self.__sockfile: Optional[TextIO] = None self._nickname = nickname if self._nickname: self.logger = logging.getLogger('QMP').getChild(self._nickname) if server: self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.__sock.bind(self.__address) self.__sock.listen(1) def __get_sock(self) -> socket.socket: if isinstance(self.__address, tuple): family = socket.AF_INET else: family = socket.AF_UNIX return socket.socket(family, socket.SOCK_STREAM) def __negotiate_capabilities(self) -> QMPMessage: greeting = self.__json_read() if greeting is None or "QMP" not in greeting: raise QMPConnectError # Greeting seems ok, negotiate capabilities resp = self.cmd('qmp_capabilities') if resp and "return" in resp: return greeting raise QMPCapabilitiesError def __json_read(self, only_event: bool = False) -> Optional[QMPMessage]: assert self.__sockfile is not None while True: data = self.__sockfile.readline() if not data: return None # By definition, any JSON received from QMP is a QMPMessage, # and we are asserting only at static analysis time that it # has a particular shape. resp: QMPMessage = json.loads(data) if 'event' in resp: self.logger.debug("<<< %s", resp) self.__events.append(resp) if not only_event: continue return resp def __get_events(self, wait: Union[bool, float] = False) -> None: """ Check for new events in the stream and cache them in __events. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @raise QMPTimeoutError: If a timeout float is provided and the timeout period elapses. @raise QMPConnectError: If wait is True but no events could be retrieved or if some other error occurred. """ # Current timeout and blocking status current_timeout = self.__sock.gettimeout() # Check for new events regardless and pull them into the cache: self.__sock.settimeout(0) # i.e. setblocking(False) try: self.__json_read() except OSError as err: # EAGAIN: No data available; not critical if err.errno != errno.EAGAIN: raise finally: self.__sock.settimeout(current_timeout) # Wait for new events, if needed. # if wait is 0.0, this means "no wait" and is also implicitly false. if not self.__events and wait: if isinstance(wait, float): self.__sock.settimeout(wait) try: ret = self.__json_read(only_event=True) except socket.timeout as err: raise QMPTimeoutError("Timeout waiting for event") from err except Exception as err: msg = "Error while reading from socket" raise QMPConnectError(msg) from err finally: self.__sock.settimeout(current_timeout) if ret is None: raise QMPConnectError("Error while reading from socket") T = TypeVar('T') def __enter__(self: T) -> T: # Implement context manager enter function. return self def __exit__(self, # pylint: disable=duplicate-code # see https://github.com/PyCQA/pylint/issues/3619 exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: # Implement context manager exit function. self.close() @classmethod def parse_address(cls, address: str) -> SocketAddrT: """ Parse a string into a QMP address. Figure out if the argument is in the port:host form. If it's not, it's probably a file path. """ components = address.split(':') if len(components) == 2: try: port = int(components[1]) except ValueError: msg = f"Bad port: '{components[1]}' in '{address}'." raise QMPBadPortError(msg) from None return (components[0], port) # Treat as filepath. return address def connect(self, negotiate: bool = True) -> Optional[QMPMessage]: """ Connect to the QMP Monitor and perform capabilities negotiation. @return QMP greeting dict, or None if negotiate is false @raise OSError on socket connection errors @raise QMPConnectError if the greeting is not received @raise QMPCapabilitiesError if fails to negotiate capabilities """ self.__sock.connect(self.__address) self.__sockfile = self.__sock.makefile(mode='r') if negotiate: return self.__negotiate_capabilities() return None def accept(self, timeout: Optional[float] = 15.0) -> QMPMessage: """ Await connection from QMP Monitor and perform capabilities negotiation. @param timeout: timeout in seconds (nonnegative float number, or None). The value passed will set the behavior of the underneath QMP socket as described in [1]. Default value is set to 15.0. @return QMP greeting dict @raise OSError on socket connection errors @raise QMPConnectError if the greeting is not received @raise QMPCapabilitiesError if fails to negotiate capabilities [1] https://docs.python.org/3/library/socket.html#socket.socket.settimeout """ self.__sock.settimeout(timeout) self.__sock, _ = self.__sock.accept() self.__sockfile = self.__sock.makefile(mode='r') return self.__negotiate_capabilities() def cmd_obj(self, qmp_cmd: QMPMessage) -> QMPMessage: """ Send a QMP command to the QMP Monitor. @param qmp_cmd: QMP command to be sent as a Python dict @return QMP response as a Python dict """ self.logger.debug(">>> %s", qmp_cmd) self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8')) resp = self.__json_read() if resp is None: raise QMPConnectError("Unexpected empty reply from server") self.logger.debug("<<< %s", resp) return resp def cmd(self, name: str, args: Optional[Dict[str, object]] = None, cmd_id: Optional[object] = None) -> QMPMessage: """ Build a QMP command and send it to the QMP Monitor. @param name: command name (string) @param args: command arguments (dict) @param cmd_id: command id (dict, list, string or int) """ qmp_cmd: QMPMessage = {'execute': name} if args: qmp_cmd['arguments'] = args if cmd_id: qmp_cmd['id'] = cmd_id return self.cmd_obj(qmp_cmd) def command(self, cmd: str, **kwds: object) -> QMPReturnValue: """ Build and send a QMP command to the monitor, report errors if any """ ret = self.cmd(cmd, kwds) if 'error' in ret: raise QMPResponseError(ret) if 'return' not in ret: raise QMPProtocolError( "'return' key not found in QMP response '{}'".format(str(ret)) ) return cast(QMPReturnValue, ret['return']) def pull_event(self, wait: Union[bool, float] = False) -> Optional[QMPMessage]: """ Pulls a single event. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @raise QMPTimeoutError: If a timeout float is provided and the timeout period elapses. @raise QMPConnectError: If wait is True but no events could be retrieved or if some other error occurred. @return The first available QMP event, or None. """ self.__get_events(wait) if self.__events: return self.__events.pop(0) return None def get_events(self, wait: bool = False) -> List[QMPMessage]: """ Get a list of available QMP events and clear all pending events. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @raise QMPTimeoutError: If a timeout float is provided and the timeout period elapses. @raise QMPConnectError: If wait is True but no events could be retrieved or if some other error occurred. @return The list of available QMP events. """ self.__get_events(wait) events = self.__events self.__events = [] return events def clear_events(self) -> None: """ Clear current list of pending events. """ self.__events = [] def close(self) -> None: """ Close the socket and socket file. """ if self.__sock: self.__sock.close() if self.__sockfile: self.__sockfile.close() def settimeout(self, timeout: Optional[float]) -> None: """ Set the socket timeout. @param timeout (float): timeout in seconds (non-zero), or None. @note This is a wrap around socket.settimeout @raise ValueError: if timeout was set to 0. """ if timeout == 0: msg = "timeout cannot be 0; this engages non-blocking mode." msg += " Use 'None' instead to disable timeouts." raise ValueError(msg) self.__sock.settimeout(timeout) def send_fd_scm(self, fd: int) -> None: """ Send a file descriptor to the remote via SCM_RIGHTS. """ if self.__sock.family != socket.AF_UNIX: raise RuntimeError("Can't use SCM_RIGHTS on non-AF_UNIX socket.") self.__sock.sendmsg( [b' '], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('@i', fd))] )
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/qmp/__init__.py
# # Copyright (C) 2009, 2010 Red Hat Inc. # # Authors: # Luiz Capitulino <lcapitulino@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # """ Low-level QEMU shell on top of QMP. usage: qmp-shell [-h] [-H] [-N] [-v] [-p] qmp_server positional arguments: qmp_server < UNIX socket path | TCP address:port > optional arguments: -h, --help show this help message and exit -H, --hmp Use HMP interface -N, --skip-negotiation Skip negotiate (for qemu-ga) -v, --verbose Verbose (echo commands sent and received) -p, --pretty Pretty-print JSON Start QEMU with: # qemu [...] -qmp unix:./qmp-sock,server Run the shell: $ qmp-shell ./qmp-sock Commands have the following format: < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] For example: (QEMU) device_add driver=e1000 id=net1 {'return': {}} (QEMU) key=value pairs also support Python or JSON object literal subset notations, without spaces. Dictionaries/objects {} are supported as are arrays []. example-command arg-name1={'key':'value','obj'={'prop':"value"}} Both JSON and Python formatting should work, including both styles of string literal quotes. Both paradigms of literal values should work, including null/true/false for JSON and None/True/False for Python. Transactions have the following multi-line format: transaction( action-name1 [ arg-name1=arg1 ] ... [arg-nameN=argN ] ... action-nameN [ arg-name1=arg1 ] ... [arg-nameN=argN ] ) One line transactions are also supported: transaction( action-name1 ... ) For example: (QEMU) transaction( TRANS> block-dirty-bitmap-add node=drive0 name=bitmap1 TRANS> block-dirty-bitmap-clear node=drive0 name=bitmap0 TRANS> ) {"return": {}} (QEMU) Use the -v and -p options to activate the verbose and pretty-print options, which will echo back the properly formatted JSON-compliant QMP that is being sent to QEMU, which is useful for debugging and documentation generation. """ import argparse import ast import json import logging import os import re import readline import sys from typing import ( Iterator, List, NoReturn, Optional, Sequence, ) from qemu import qmp from qemu.qmp import QMPMessage LOG = logging.getLogger(__name__) class QMPCompleter: """ QMPCompleter provides a readline library tab-complete behavior. """ # NB: Python 3.9+ will probably allow us to subclass list[str] directly, # but pylint as of today does not know that List[str] is simply 'list'. def __init__(self) -> None: self._matches: List[str] = [] def append(self, value: str) -> None: """Append a new valid completion to the list of possibilities.""" return self._matches.append(value) def complete(self, text: str, state: int) -> Optional[str]: """readline.set_completer() callback implementation.""" for cmd in self._matches: if cmd.startswith(text): if state == 0: return cmd state -= 1 return None class QMPShellError(qmp.QMPError): """ QMP Shell Base error class. """ class FuzzyJSON(ast.NodeTransformer): """ This extension of ast.NodeTransformer filters literal "true/false/null" values in a Python AST and replaces them by proper "True/False/None" values that Python can properly evaluate. """ @classmethod def visit_Name(cls, # pylint: disable=invalid-name node: ast.Name) -> ast.AST: """ Transform Name nodes with certain values into Constant (keyword) nodes. """ if node.id == 'true': return ast.Constant(value=True) if node.id == 'false': return ast.Constant(value=False) if node.id == 'null': return ast.Constant(value=None) return node class QMPShell(qmp.QEMUMonitorProtocol): """ QMPShell provides a basic readline-based QMP shell. :param address: Address of the QMP server. :param pretty: Pretty-print QMP messages. :param verbose: Echo outgoing QMP messages to console. """ def __init__(self, address: qmp.SocketAddrT, pretty: bool = False, verbose: bool = False): super().__init__(address) self._greeting: Optional[QMPMessage] = None self._completer = QMPCompleter() self._transmode = False self._actions: List[QMPMessage] = [] self._histfile = os.path.join(os.path.expanduser('~'), '.qmp-shell_history') self.pretty = pretty self.verbose = verbose def close(self) -> None: # Hook into context manager of parent to save shell history. self._save_history() super().close() def _fill_completion(self) -> None: cmds = self.cmd('query-commands') if 'error' in cmds: return for cmd in cmds['return']: self._completer.append(cmd['name']) def _completer_setup(self) -> None: self._completer = QMPCompleter() self._fill_completion() readline.set_history_length(1024) readline.set_completer(self._completer.complete) readline.parse_and_bind("tab: complete") # NB: default delimiters conflict with some command names # (eg. query-), clearing everything as it doesn't seem to matter readline.set_completer_delims('') try: readline.read_history_file(self._histfile) except FileNotFoundError: pass except IOError as err: msg = f"Failed to read history '{self._histfile}': {err!s}" LOG.warning(msg) def _save_history(self) -> None: try: readline.write_history_file(self._histfile) except IOError as err: msg = f"Failed to save history file '{self._histfile}': {err!s}" LOG.warning(msg) @classmethod def _parse_value(cls, val: str) -> object: try: return int(val) except ValueError: pass if val.lower() == 'true': return True if val.lower() == 'false': return False if val.startswith(('{', '[')): # Try first as pure JSON: try: return json.loads(val) except ValueError: pass # Try once again as FuzzyJSON: try: tree = ast.parse(val, mode='eval') transformed = FuzzyJSON().visit(tree) return ast.literal_eval(transformed) except (SyntaxError, ValueError): pass return val def _cli_expr(self, tokens: Sequence[str], parent: qmp.QMPObject) -> None: for arg in tokens: (key, sep, val) = arg.partition('=') if sep != '=': raise QMPShellError( f"Expected a key=value pair, got '{arg!s}'" ) value = self._parse_value(val) optpath = key.split('.') curpath = [] for path in optpath[:-1]: curpath.append(path) obj = parent.get(path, {}) if not isinstance(obj, dict): msg = 'Cannot use "{:s}" as both leaf and non-leaf key' raise QMPShellError(msg.format('.'.join(curpath))) parent[path] = obj parent = obj if optpath[-1] in parent: if isinstance(parent[optpath[-1]], dict): msg = 'Cannot use "{:s}" as both leaf and non-leaf key' raise QMPShellError(msg.format('.'.join(curpath))) raise QMPShellError(f'Cannot set "{key}" multiple times') parent[optpath[-1]] = value def _build_cmd(self, cmdline: str) -> Optional[QMPMessage]: """ Build a QMP input object from a user provided command-line in the following format: < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] """ argument_regex = r'''(?:[^\s"']|"(?:\\.|[^"])*"|'(?:\\.|[^'])*')+''' cmdargs = re.findall(argument_regex, cmdline) qmpcmd: QMPMessage # Transactional CLI entry: if cmdargs and cmdargs[0] == 'transaction(': self._transmode = True self._actions = [] cmdargs.pop(0) # Transactional CLI exit: if cmdargs and cmdargs[0] == ')' and self._transmode: self._transmode = False if len(cmdargs) > 1: msg = 'Unexpected input after close of Transaction sub-shell' raise QMPShellError(msg) qmpcmd = { 'execute': 'transaction', 'arguments': {'actions': self._actions} } return qmpcmd # No args, or no args remaining if not cmdargs: return None if self._transmode: # Parse and cache this Transactional Action finalize = False action = {'type': cmdargs[0], 'data': {}} if cmdargs[-1] == ')': cmdargs.pop(-1) finalize = True self._cli_expr(cmdargs[1:], action['data']) self._actions.append(action) return self._build_cmd(')') if finalize else None # Standard command: parse and return it to be executed. qmpcmd = {'execute': cmdargs[0], 'arguments': {}} self._cli_expr(cmdargs[1:], qmpcmd['arguments']) return qmpcmd def _print(self, qmp_message: object) -> None: jsobj = json.dumps(qmp_message, indent=4 if self.pretty else None, sort_keys=self.pretty) print(str(jsobj)) def _execute_cmd(self, cmdline: str) -> bool: try: qmpcmd = self._build_cmd(cmdline) except QMPShellError as err: print( f"Error while parsing command line: {err!s}\n" "command format: <command-name> " "[arg-name1=arg1] ... [arg-nameN=argN", file=sys.stderr ) return True # For transaction mode, we may have just cached the action: if qmpcmd is None: return True if self.verbose: self._print(qmpcmd) resp = self.cmd_obj(qmpcmd) if resp is None: print('Disconnected') return False self._print(resp) return True def connect(self, negotiate: bool = True) -> None: self._greeting = super().connect(negotiate) self._completer_setup() def show_banner(self, msg: str = 'Welcome to the QMP low-level shell!') -> None: """ Print to stdio a greeting, and the QEMU version if available. """ print(msg) if not self._greeting: print('Connected') return version = self._greeting['QMP']['version']['qemu'] print("Connected to QEMU {major}.{minor}.{micro}\n".format(**version)) @property def prompt(self) -> str: """ Return the current shell prompt, including a trailing space. """ if self._transmode: return 'TRANS> ' return '(QEMU) ' def read_exec_command(self) -> bool: """ Read and execute a command. @return True if execution was ok, return False if disconnected. """ try: cmdline = input(self.prompt) except EOFError: print() return False if cmdline == '': for event in self.get_events(): print(event) return True return self._execute_cmd(cmdline) def repl(self) -> Iterator[None]: """ Return an iterator that implements the REPL. """ self.show_banner() while self.read_exec_command(): yield self.close() class HMPShell(QMPShell): """ HMPShell provides a basic readline-based HMP shell, tunnelled via QMP. :param address: Address of the QMP server. :param pretty: Pretty-print QMP messages. :param verbose: Echo outgoing QMP messages to console. """ def __init__(self, address: qmp.SocketAddrT, pretty: bool = False, verbose: bool = False): super().__init__(address, pretty, verbose) self._cpu_index = 0 def _cmd_completion(self) -> None: for cmd in self._cmd_passthrough('help')['return'].split('\r\n'): if cmd and cmd[0] != '[' and cmd[0] != '\t': name = cmd.split()[0] # drop help text if name == 'info': continue if name.find('|') != -1: # Command in the form 'foobar|f' or 'f|foobar', take the # full name opt = name.split('|') if len(opt[0]) == 1: name = opt[1] else: name = opt[0] self._completer.append(name) self._completer.append('help ' + name) # help completion def _info_completion(self) -> None: for cmd in self._cmd_passthrough('info')['return'].split('\r\n'): if cmd: self._completer.append('info ' + cmd.split()[1]) def _other_completion(self) -> None: # special cases self._completer.append('help info') def _fill_completion(self) -> None: self._cmd_completion() self._info_completion() self._other_completion() def _cmd_passthrough(self, cmdline: str, cpu_index: int = 0) -> QMPMessage: return self.cmd_obj({ 'execute': 'human-monitor-command', 'arguments': { 'command-line': cmdline, 'cpu-index': cpu_index } }) def _execute_cmd(self, cmdline: str) -> bool: if cmdline.split()[0] == "cpu": # trap the cpu command, it requires special setting try: idx = int(cmdline.split()[1]) if 'return' not in self._cmd_passthrough('info version', idx): print('bad CPU index') return True self._cpu_index = idx except ValueError: print('cpu command takes an integer argument') return True resp = self._cmd_passthrough(cmdline, self._cpu_index) if resp is None: print('Disconnected') return False assert 'return' in resp or 'error' in resp if 'return' in resp: # Success if len(resp['return']) > 0: print(resp['return'], end=' ') else: # Error print('%s: %s' % (resp['error']['class'], resp['error']['desc'])) return True def show_banner(self, msg: str = 'Welcome to the HMP shell!') -> None: QMPShell.show_banner(self, msg) def die(msg: str) -> NoReturn: """Write an error to stderr, then exit with a return code of 1.""" sys.stderr.write('ERROR: %s\n' % msg) sys.exit(1) def main() -> None: """ qmp-shell entry point: parse command line arguments and start the REPL. """ parser = argparse.ArgumentParser() parser.add_argument('-H', '--hmp', action='store_true', help='Use HMP interface') parser.add_argument('-N', '--skip-negotiation', action='store_true', help='Skip negotiate (for qemu-ga)') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose (echo commands sent and received)') parser.add_argument('-p', '--pretty', action='store_true', help='Pretty-print JSON') default_server = os.environ.get('QMP_SOCKET') parser.add_argument('qmp_server', action='store', default=default_server, help='< UNIX socket path | TCP address:port >') args = parser.parse_args() if args.qmp_server is None: parser.error("QMP socket or TCP address must be specified") shell_class = HMPShell if args.hmp else QMPShell try: address = shell_class.parse_address(args.qmp_server) except qmp.QMPBadPortError: parser.error(f"Bad port number: {args.qmp_server}") return # pycharm doesn't know error() is noreturn with shell_class(address, args.pretty, args.verbose) as qemu: try: qemu.connect(negotiate=not args.skip_negotiation) except qmp.QMPConnectError: die("Didn't get QMP greeting message") except qmp.QMPCapabilitiesError: die("Couldn't negotiate capabilities") except OSError as err: die(f"Couldn't connect to {args.qmp_server}: {err!s}") for _ in qemu.repl(): pass if __name__ == '__main__': main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/qmp/qmp_shell.py
""" QEMU Object Model testing tools. usage: qom [-h] {set,get,list,tree,fuse} ... Query and manipulate QOM data optional arguments: -h, --help show this help message and exit QOM commands: {set,get,list,tree,fuse} set Set a QOM property value get Get a QOM property value list List QOM properties at a given path tree Show QOM tree from a given path fuse Mount a QOM tree as a FUSE filesystem """ ## # Copyright John Snow 2020, for Red Hat, Inc. # Copyright IBM, Corp. 2011 # # Authors: # John Snow <jsnow@redhat.com> # Anthony Liguori <aliguori@amazon.com> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # Based on ./scripts/qmp/qom-[set|get|tree|list] ## import argparse from . import QMPResponseError from .qom_common import QOMCommand try: from .qom_fuse import QOMFuse except ModuleNotFoundError as _err: if _err.name != 'fuse': raise else: assert issubclass(QOMFuse, QOMCommand) class QOMSet(QOMCommand): """ QOM Command - Set a property to a given value. usage: qom-set [-h] [--socket SOCKET] <path>.<property> <value> Set a QOM property value positional arguments: <path>.<property> QOM path and property, separated by a period '.' <value> new QOM property value optional arguments: -h, --help show this help message and exit --socket SOCKET, -s SOCKET QMP socket path or address (addr:port). May also be set via QMP_SOCKET environment variable. """ name = 'set' help = 'Set a QOM property value' @classmethod def configure_parser(cls, parser: argparse.ArgumentParser) -> None: super().configure_parser(parser) cls.add_path_prop_arg(parser) parser.add_argument( 'value', metavar='<value>', action='store', help='new QOM property value' ) def __init__(self, args: argparse.Namespace): super().__init__(args) self.path, self.prop = args.path_prop.rsplit('.', 1) self.value = args.value def run(self) -> int: rsp = self.qmp.command( 'qom-set', path=self.path, property=self.prop, value=self.value ) print(rsp) return 0 class QOMGet(QOMCommand): """ QOM Command - Get a property's current value. usage: qom-get [-h] [--socket SOCKET] <path>.<property> Get a QOM property value positional arguments: <path>.<property> QOM path and property, separated by a period '.' optional arguments: -h, --help show this help message and exit --socket SOCKET, -s SOCKET QMP socket path or address (addr:port). May also be set via QMP_SOCKET environment variable. """ name = 'get' help = 'Get a QOM property value' @classmethod def configure_parser(cls, parser: argparse.ArgumentParser) -> None: super().configure_parser(parser) cls.add_path_prop_arg(parser) def __init__(self, args: argparse.Namespace): super().__init__(args) try: tmp = args.path_prop.rsplit('.', 1) except ValueError as err: raise ValueError('Invalid format for <path>.<property>') from err self.path = tmp[0] self.prop = tmp[1] def run(self) -> int: rsp = self.qmp.command( 'qom-get', path=self.path, property=self.prop ) if isinstance(rsp, dict): for key, value in rsp.items(): print(f"{key}: {value}") else: print(rsp) return 0 class QOMList(QOMCommand): """ QOM Command - List the properties at a given path. usage: qom-list [-h] [--socket SOCKET] <path> List QOM properties at a given path positional arguments: <path> QOM path optional arguments: -h, --help show this help message and exit --socket SOCKET, -s SOCKET QMP socket path or address (addr:port). May also be set via QMP_SOCKET environment variable. """ name = 'list' help = 'List QOM properties at a given path' @classmethod def configure_parser(cls, parser: argparse.ArgumentParser) -> None: super().configure_parser(parser) parser.add_argument( 'path', metavar='<path>', action='store', help='QOM path', ) def __init__(self, args: argparse.Namespace): super().__init__(args) self.path = args.path def run(self) -> int: rsp = self.qom_list(self.path) for item in rsp: if item.child: print(f"{item.name}/") elif item.link: print(f"@{item.name}/") else: print(item.name) return 0 class QOMTree(QOMCommand): """ QOM Command - Show the full tree below a given path. usage: qom-tree [-h] [--socket SOCKET] [<path>] Show QOM tree from a given path positional arguments: <path> QOM path optional arguments: -h, --help show this help message and exit --socket SOCKET, -s SOCKET QMP socket path or address (addr:port). May also be set via QMP_SOCKET environment variable. """ name = 'tree' help = 'Show QOM tree from a given path' @classmethod def configure_parser(cls, parser: argparse.ArgumentParser) -> None: super().configure_parser(parser) parser.add_argument( 'path', metavar='<path>', action='store', help='QOM path', nargs='?', default='/' ) def __init__(self, args: argparse.Namespace): super().__init__(args) self.path = args.path def _list_node(self, path: str) -> None: print(path) items = self.qom_list(path) for item in items: if item.child: continue try: rsp = self.qmp.command('qom-get', path=path, property=item.name) print(f" {item.name}: {rsp} ({item.type})") except QMPResponseError as err: print(f" {item.name}: <EXCEPTION: {err!s}> ({item.type})") print('') for item in items: if not item.child: continue if path == '/': path = '' self._list_node(f"{path}/{item.name}") def run(self) -> int: self._list_node(self.path) return 0 def main() -> int: """QOM script main entry point.""" parser = argparse.ArgumentParser( description='Query and manipulate QOM data' ) subparsers = parser.add_subparsers( title='QOM commands', dest='command' ) for command in QOMCommand.__subclasses__(): command.register(subparsers) args = parser.parse_args() if args.command is None: parser.error('Command not specified.') return 1 cmd_class = args.cmd_class assert isinstance(cmd_class, type(QOMCommand)) return cmd_class.command_runner(args)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/qmp/qom.py
""" QEMU accel module: This module provides utilities for discover and check the availability of accelerators. """ # Copyright (C) 2015-2016 Red Hat Inc. # Copyright (C) 2012 IBM Corp. # # Authors: # Fam Zheng <famz@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # import logging import os import subprocess from typing import List, Optional LOG = logging.getLogger(__name__) # Mapping host architecture to any additional architectures it can # support which often includes its 32 bit cousin. ADDITIONAL_ARCHES = { "x86_64": "i386", "aarch64": "armhf", "ppc64le": "ppc64", } def list_accel(qemu_bin: str) -> List[str]: """ List accelerators enabled in the QEMU binary. @param qemu_bin (str): path to the QEMU binary. @raise Exception: if failed to run ``qemu -accel help`` @return a list of accelerator names. """ if not qemu_bin: return [] try: out = subprocess.check_output([qemu_bin, '-accel', 'help'], universal_newlines=True) except: LOG.debug("Failed to get the list of accelerators in %s", qemu_bin) raise # Skip the first line which is the header. return [acc.strip() for acc in out.splitlines()[1:]] def kvm_available(target_arch: Optional[str] = None, qemu_bin: Optional[str] = None) -> bool: """ Check if KVM is available using the following heuristic: - Kernel module is present in the host; - Target and host arches don't mismatch; - KVM is enabled in the QEMU binary. @param target_arch (str): target architecture @param qemu_bin (str): path to the QEMU binary @return True if kvm is available, otherwise False. """ if not os.access("/dev/kvm", os.R_OK | os.W_OK): return False if target_arch: host_arch = os.uname()[4] if target_arch != host_arch: if target_arch != ADDITIONAL_ARCHES.get(host_arch): return False if qemu_bin and "kvm" not in list_accel(qemu_bin): return False return True def tcg_available(qemu_bin: str) -> bool: """ Check if TCG is available. @param qemu_bin (str): path to the QEMU binary """ return 'tcg' in list_accel(qemu_bin)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/utils/accel.py
""" QEMU development and testing utilities This package provides a small handful of utilities for performing various tasks not directly related to the launching of a VM. """ # Copyright (C) 2021 Red Hat Inc. # # Authors: # John Snow <jsnow@redhat.com> # Cleber Rosa <crosa@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # import re from typing import Optional # pylint: disable=import-error from .accel import kvm_available, list_accel, tcg_available __all__ = ( 'get_info_usernet_hostfwd_port', 'kvm_available', 'list_accel', 'tcg_available', ) def get_info_usernet_hostfwd_port(info_usernet_output: str) -> Optional[int]: """ Returns the port given to the hostfwd parameter via info usernet :param info_usernet_output: output generated by hmp command "info usernet" :return: the port number allocated by the hostfwd option """ for line in info_usernet_output.split('\r\n'): regex = r'TCP.HOST_FORWARD.*127\.0\.0\.1\s+(\d+)\s+10\.' match = re.search(regex, line) if match is not None: return int(match[1]) return None
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/utils/__init__.py
""" QEMU Console Socket Module: This python module implements a ConsoleSocket object, which can drain a socket and optionally dump the bytes to file. """ # Copyright 2020 Linaro # # Authors: # Robert Foley <robert.foley@linaro.org> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. # from collections import deque import socket import threading import time from typing import Deque, Optional class ConsoleSocket(socket.socket): """ ConsoleSocket represents a socket attached to a char device. Optionally (if drain==True), drains the socket and places the bytes into an in memory buffer for later processing. Optionally a file path can be passed in and we will also dump the characters to this file for debugging purposes. """ def __init__(self, address: str, file: Optional[str] = None, drain: bool = False): self._recv_timeout_sec = 300.0 self._sleep_time = 0.5 self._buffer: Deque[int] = deque() socket.socket.__init__(self, socket.AF_UNIX, socket.SOCK_STREAM) self.connect(address) self._logfile = None if file: # pylint: disable=consider-using-with self._logfile = open(file, "bw") self._open = True self._drain_thread = None if drain: self._drain_thread = self._thread_start() def __repr__(self) -> str: tmp = super().__repr__() tmp = tmp.rstrip(">") tmp = "%s, logfile=%s, drain_thread=%s>" % (tmp, self._logfile, self._drain_thread) return tmp def _drain_fn(self) -> None: """Drains the socket and runs while the socket is open.""" while self._open: try: self._drain_socket() except socket.timeout: # The socket is expected to timeout since we set a # short timeout to allow the thread to exit when # self._open is set to False. time.sleep(self._sleep_time) def _thread_start(self) -> threading.Thread: """Kick off a thread to drain the socket.""" # Configure socket to not block and timeout. # This allows our drain thread to not block # on recieve and exit smoothly. socket.socket.setblocking(self, False) socket.socket.settimeout(self, 1) drain_thread = threading.Thread(target=self._drain_fn) drain_thread.daemon = True drain_thread.start() return drain_thread def close(self) -> None: """Close the base object and wait for the thread to terminate""" if self._open: self._open = False if self._drain_thread is not None: thread, self._drain_thread = self._drain_thread, None thread.join() socket.socket.close(self) if self._logfile: self._logfile.close() self._logfile = None def _drain_socket(self) -> None: """process arriving characters into in memory _buffer""" data = socket.socket.recv(self, 1) if self._logfile: self._logfile.write(data) self._logfile.flush() self._buffer.extend(data) def recv(self, bufsize: int = 1, flags: int = 0) -> bytes: """Return chars from in memory buffer. Maintains the same API as socket.socket.recv. """ if self._drain_thread is None: # Not buffering the socket, pass thru to socket. return socket.socket.recv(self, bufsize, flags) assert not flags, "Cannot pass flags to recv() in drained mode" start_time = time.time() while len(self._buffer) < bufsize: time.sleep(self._sleep_time) elapsed_sec = time.time() - start_time if elapsed_sec > self._recv_timeout_sec: raise socket.timeout return bytes((self._buffer.popleft() for i in range(bufsize))) def setblocking(self, value: bool) -> None: """When not draining we pass thru to the socket, since when draining we control socket blocking. """ if self._drain_thread is None: socket.socket.setblocking(self, value) def settimeout(self, value: Optional[float]) -> None: """When not draining we pass thru to the socket, since when draining we control the timeout. """ if value is not None: self._recv_timeout_sec = value if self._drain_thread is None: socket.socket.settimeout(self, value)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/machine/console_socket.py
""" QEMU development and testing library. This library provides a few high-level classes for driving QEMU from a test suite, not intended for production use. | QEMUQtestProtocol: send/receive qtest messages. | QEMUMachine: Configure and Boot a QEMU VM | +-- QEMUQtestMachine: VM class, with a qtest socket. """ # Copyright (C) 2020-2021 John Snow for Red Hat Inc. # Copyright (C) 2015-2016 Red Hat Inc. # Copyright (C) 2012 IBM Corp. # # Authors: # John Snow <jsnow@redhat.com> # Fam Zheng <fam@euphon.net> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # # pylint: disable=import-error # see: https://github.com/PyCQA/pylint/issues/3624 # see: https://github.com/PyCQA/pylint/issues/3651 from .machine import QEMUMachine from .qtest import QEMUQtestMachine, QEMUQtestProtocol __all__ = ( 'QEMUMachine', 'QEMUQtestProtocol', 'QEMUQtestMachine', )
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/machine/__init__.py
""" QEMU machine module: The machine module primarily provides the QEMUMachine class, which provides facilities for managing the lifetime of a QEMU VM. """ # Copyright (C) 2015-2016 Red Hat Inc. # Copyright (C) 2012 IBM Corp. # # Authors: # Fam Zheng <famz@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # # Based on qmp.py. # import errno from itertools import chain import locale import logging import os import shutil import signal import socket import subprocess import tempfile from types import TracebackType from typing import ( Any, BinaryIO, Dict, List, Optional, Sequence, Tuple, Type, TypeVar, ) from qemu.qmp import ( # pylint: disable=import-error QEMUMonitorProtocol, QMPMessage, QMPReturnValue, SocketAddrT, ) from . import console_socket LOG = logging.getLogger(__name__) class QEMUMachineError(Exception): """ Exception called when an error in QEMUMachine happens. """ class QEMUMachineAddDeviceError(QEMUMachineError): """ Exception raised when a request to add a device can not be fulfilled The failures are caused by limitations, lack of information or conflicting requests on the QEMUMachine methods. This exception does not represent failures reported by the QEMU binary itself. """ class AbnormalShutdown(QEMUMachineError): """ Exception raised when a graceful shutdown was requested, but not performed. """ _T = TypeVar('_T', bound='QEMUMachine') class QEMUMachine: """ A QEMU VM. Use this object as a context manager to ensure the QEMU process terminates:: with VM(binary) as vm: ... # vm is guaranteed to be shut down here """ # pylint: disable=too-many-instance-attributes, too-many-public-methods def __init__(self, binary: str, args: Sequence[str] = (), wrapper: Sequence[str] = (), name: Optional[str] = None, base_temp_dir: str = "/var/tmp", monitor_address: Optional[SocketAddrT] = None, sock_dir: Optional[str] = None, drain_console: bool = False, console_log: Optional[str] = None, log_dir: Optional[str] = None, qmp_timer: Optional[float] = None): ''' Initialize a QEMUMachine @param binary: path to the qemu binary @param args: list of extra arguments @param wrapper: list of arguments used as prefix to qemu binary @param name: prefix for socket and log file names (default: qemu-PID) @param base_temp_dir: default location where temp files are created @param monitor_address: address for QMP monitor @param sock_dir: where to create socket (defaults to base_temp_dir) @param drain_console: (optional) True to drain console socket to buffer @param console_log: (optional) path to console log file @param log_dir: where to create and keep log files @param qmp_timer: (optional) default QMP socket timeout @note: Qemu process is not started until launch() is used. ''' # pylint: disable=too-many-arguments # Direct user configuration self._binary = binary self._args = list(args) self._wrapper = wrapper self._qmp_timer = qmp_timer self._name = name or "qemu-%d" % os.getpid() self._base_temp_dir = base_temp_dir self._sock_dir = sock_dir or self._base_temp_dir self._log_dir = log_dir if monitor_address is not None: self._monitor_address = monitor_address self._remove_monitor_sockfile = False else: self._monitor_address = os.path.join( self._sock_dir, f"{self._name}-monitor.sock" ) self._remove_monitor_sockfile = True self._console_log_path = console_log if self._console_log_path: # In order to log the console, buffering needs to be enabled. self._drain_console = True else: self._drain_console = drain_console # Runstate self._qemu_log_path: Optional[str] = None self._qemu_log_file: Optional[BinaryIO] = None self._popen: Optional['subprocess.Popen[bytes]'] = None self._events: List[QMPMessage] = [] self._iolog: Optional[str] = None self._qmp_set = True # Enable QMP monitor by default. self._qmp_connection: Optional[QEMUMonitorProtocol] = None self._qemu_full_args: Tuple[str, ...] = () self._temp_dir: Optional[str] = None self._launched = False self._machine: Optional[str] = None self._console_index = 0 self._console_set = False self._console_device_type: Optional[str] = None self._console_address = os.path.join( self._sock_dir, f"{self._name}-console.sock" ) self._console_socket: Optional[socket.socket] = None self._remove_files: List[str] = [] self._user_killed = False def __enter__(self: _T) -> _T: return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: self.shutdown() def add_monitor_null(self) -> None: """ This can be used to add an unused monitor instance. """ self._args.append('-monitor') self._args.append('null') def add_fd(self: _T, fd: int, fdset: int, opaque: str, opts: str = '') -> _T: """ Pass a file descriptor to the VM """ options = ['fd=%d' % fd, 'set=%d' % fdset, 'opaque=%s' % opaque] if opts: options.append(opts) # This did not exist before 3.4, but since then it is # mandatory for our purpose if hasattr(os, 'set_inheritable'): os.set_inheritable(fd, True) self._args.append('-add-fd') self._args.append(','.join(options)) return self def send_fd_scm(self, fd: Optional[int] = None, file_path: Optional[str] = None) -> int: """ Send an fd or file_path to the remote via SCM_RIGHTS. Exactly one of fd and file_path must be given. If it is file_path, the file will be opened read-only and the new file descriptor will be sent to the remote. """ if file_path is not None: assert fd is None with open(file_path, "rb") as passfile: fd = passfile.fileno() self._qmp.send_fd_scm(fd) else: assert fd is not None self._qmp.send_fd_scm(fd) return 0 @staticmethod def _remove_if_exists(path: str) -> None: """ Remove file object at path if it exists """ try: os.remove(path) except OSError as exception: if exception.errno == errno.ENOENT: return raise def is_running(self) -> bool: """Returns true if the VM is running.""" return self._popen is not None and self._popen.poll() is None @property def _subp(self) -> 'subprocess.Popen[bytes]': if self._popen is None: raise QEMUMachineError('Subprocess pipe not present') return self._popen def exitcode(self) -> Optional[int]: """Returns the exit code if possible, or None.""" if self._popen is None: return None return self._popen.poll() def get_pid(self) -> Optional[int]: """Returns the PID of the running process, or None.""" if not self.is_running(): return None return self._subp.pid def _load_io_log(self) -> None: # Assume that the output encoding of QEMU's terminal output is # defined by our locale. If indeterminate, allow open() to fall # back to the platform default. _, encoding = locale.getlocale() if self._qemu_log_path is not None: with open(self._qemu_log_path, "r", encoding=encoding) as iolog: self._iolog = iolog.read() @property def _base_args(self) -> List[str]: args = ['-display', 'none', '-vga', 'none'] if self._qmp_set: if isinstance(self._monitor_address, tuple): moncdev = "socket,id=mon,host={},port={}".format( *self._monitor_address ) else: moncdev = f"socket,id=mon,path={self._monitor_address}" args.extend(['-chardev', moncdev, '-mon', 'chardev=mon,mode=control']) if self._machine is not None: args.extend(['-machine', self._machine]) for _ in range(self._console_index): args.extend(['-serial', 'null']) if self._console_set: chardev = ('socket,id=console,path=%s,server=on,wait=off' % self._console_address) args.extend(['-chardev', chardev]) if self._console_device_type is None: args.extend(['-serial', 'chardev:console']) else: device = '%s,chardev=console' % self._console_device_type args.extend(['-device', device]) return args @property def args(self) -> List[str]: """Returns the list of arguments given to the QEMU binary.""" return self._args def _pre_launch(self) -> None: if self._console_set: self._remove_files.append(self._console_address) if self._qmp_set: if self._remove_monitor_sockfile: assert isinstance(self._monitor_address, str) self._remove_files.append(self._monitor_address) self._qmp_connection = QEMUMonitorProtocol( self._monitor_address, server=True, nickname=self._name ) # NOTE: Make sure any opened resources are *definitely* freed in # _post_shutdown()! # pylint: disable=consider-using-with self._qemu_log_path = os.path.join(self.log_dir, self._name + ".log") self._qemu_log_file = open(self._qemu_log_path, 'wb') def _post_launch(self) -> None: if self._qmp_connection: self._qmp.accept(self._qmp_timer) def _close_qemu_log_file(self) -> None: if self._qemu_log_file is not None: self._qemu_log_file.close() self._qemu_log_file = None def _post_shutdown(self) -> None: """ Called to cleanup the VM instance after the process has exited. May also be called after a failed launch. """ # Comprehensive reset for the failed launch case: self._early_cleanup() if self._qmp_connection: self._qmp.close() self._qmp_connection = None self._close_qemu_log_file() self._load_io_log() self._qemu_log_path = None if self._temp_dir is not None: shutil.rmtree(self._temp_dir) self._temp_dir = None while len(self._remove_files) > 0: self._remove_if_exists(self._remove_files.pop()) exitcode = self.exitcode() if (exitcode is not None and exitcode < 0 and not (self._user_killed and exitcode == -signal.SIGKILL)): msg = 'qemu received signal %i; command: "%s"' if self._qemu_full_args: command = ' '.join(self._qemu_full_args) else: command = '' LOG.warning(msg, -int(exitcode), command) self._user_killed = False self._launched = False def launch(self) -> None: """ Launch the VM and make sure we cleanup and expose the command line/output in case of exception """ if self._launched: raise QEMUMachineError('VM already launched') self._iolog = None self._qemu_full_args = () try: self._launch() self._launched = True except: self._post_shutdown() LOG.debug('Error launching VM') if self._qemu_full_args: LOG.debug('Command: %r', ' '.join(self._qemu_full_args)) if self._iolog: LOG.debug('Output: %r', self._iolog) raise def _launch(self) -> None: """ Launch the VM and establish a QMP connection """ self._pre_launch() self._qemu_full_args = tuple( chain(self._wrapper, [self._binary], self._base_args, self._args) ) LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args)) # Cleaning up of this subprocess is guaranteed by _do_shutdown. # pylint: disable=consider-using-with self._popen = subprocess.Popen(self._qemu_full_args, stdin=subprocess.DEVNULL, stdout=self._qemu_log_file, stderr=subprocess.STDOUT, shell=False, close_fds=False) self._post_launch() def _early_cleanup(self) -> None: """ Perform any cleanup that needs to happen before the VM exits. May be invoked by both soft and hard shutdown in failover scenarios. Called additionally by _post_shutdown for comprehensive cleanup. """ # If we keep the console socket open, we may deadlock waiting # for QEMU to exit, while QEMU is waiting for the socket to # become writeable. if self._console_socket is not None: self._console_socket.close() self._console_socket = None def _hard_shutdown(self) -> None: """ Perform early cleanup, kill the VM, and wait for it to terminate. :raise subprocess.Timeout: When timeout is exceeds 60 seconds waiting for the QEMU process to terminate. """ self._early_cleanup() self._subp.kill() self._subp.wait(timeout=60) def _soft_shutdown(self, timeout: Optional[int], has_quit: bool = False) -> None: """ Perform early cleanup, attempt to gracefully shut down the VM, and wait for it to terminate. :param timeout: Timeout in seconds for graceful shutdown. A value of None is an infinite wait. :param has_quit: When True, don't attempt to issue 'quit' QMP command :raise ConnectionReset: On QMP communication errors :raise subprocess.TimeoutExpired: When timeout is exceeded waiting for the QEMU process to terminate. """ self._early_cleanup() if self._qmp_connection: if not has_quit: # Might raise ConnectionReset self._qmp.cmd('quit') # May raise subprocess.TimeoutExpired self._subp.wait(timeout=timeout) def _do_shutdown(self, timeout: Optional[int], has_quit: bool = False) -> None: """ Attempt to shutdown the VM gracefully; fallback to a hard shutdown. :param timeout: Timeout in seconds for graceful shutdown. A value of None is an infinite wait. :param has_quit: When True, don't attempt to issue 'quit' QMP command :raise AbnormalShutdown: When the VM could not be shut down gracefully. The inner exception will likely be ConnectionReset or subprocess.TimeoutExpired. In rare cases, non-graceful termination may result in its own exceptions, likely subprocess.TimeoutExpired. """ try: self._soft_shutdown(timeout, has_quit) except Exception as exc: self._hard_shutdown() raise AbnormalShutdown("Could not perform graceful shutdown") \ from exc def shutdown(self, has_quit: bool = False, hard: bool = False, timeout: Optional[int] = 30) -> None: """ Terminate the VM (gracefully if possible) and perform cleanup. Cleanup will always be performed. If the VM has not yet been launched, or shutdown(), wait(), or kill() have already been called, this method does nothing. :param has_quit: When true, do not attempt to issue 'quit' QMP command. :param hard: When true, do not attempt graceful shutdown, and suppress the SIGKILL warning log message. :param timeout: Optional timeout in seconds for graceful shutdown. Default 30 seconds, A `None` value is an infinite wait. """ if not self._launched: return try: if hard: self._user_killed = True self._hard_shutdown() else: self._do_shutdown(timeout, has_quit) finally: self._post_shutdown() def kill(self) -> None: """ Terminate the VM forcefully, wait for it to exit, and perform cleanup. """ self.shutdown(hard=True) def wait(self, timeout: Optional[int] = 30) -> None: """ Wait for the VM to power off and perform post-shutdown cleanup. :param timeout: Optional timeout in seconds. Default 30 seconds. A value of `None` is an infinite wait. """ self.shutdown(has_quit=True, timeout=timeout) def set_qmp_monitor(self, enabled: bool = True) -> None: """ Set the QMP monitor. @param enabled: if False, qmp monitor options will be removed from the base arguments of the resulting QEMU command line. Default is True. .. note:: Call this function before launch(). """ self._qmp_set = enabled @property def _qmp(self) -> QEMUMonitorProtocol: if self._qmp_connection is None: raise QEMUMachineError("Attempt to access QMP with no connection") return self._qmp_connection @classmethod def _qmp_args(cls, conv_keys: bool, args: Dict[str, Any]) -> Dict[str, object]: if conv_keys: return {k.replace('_', '-'): v for k, v in args.items()} return args def qmp(self, cmd: str, args_dict: Optional[Dict[str, object]] = None, conv_keys: Optional[bool] = None, **args: Any) -> QMPMessage: """ Invoke a QMP command and return the response dict """ if args_dict is not None: assert not args assert conv_keys is None args = args_dict conv_keys = False if conv_keys is None: conv_keys = True qmp_args = self._qmp_args(conv_keys, args) return self._qmp.cmd(cmd, args=qmp_args) def command(self, cmd: str, conv_keys: bool = True, **args: Any) -> QMPReturnValue: """ Invoke a QMP command. On success return the response dict. On failure raise an exception. """ qmp_args = self._qmp_args(conv_keys, args) return self._qmp.command(cmd, **qmp_args) def get_qmp_event(self, wait: bool = False) -> Optional[QMPMessage]: """ Poll for one queued QMP events and return it """ if self._events: return self._events.pop(0) return self._qmp.pull_event(wait=wait) def get_qmp_events(self, wait: bool = False) -> List[QMPMessage]: """ Poll for queued QMP events and return a list of dicts """ events = self._qmp.get_events(wait=wait) events.extend(self._events) del self._events[:] return events @staticmethod def event_match(event: Any, match: Optional[Any]) -> bool: """ Check if an event matches optional match criteria. The match criteria takes the form of a matching subdict. The event is checked to be a superset of the subdict, recursively, with matching values whenever the subdict values are not None. This has a limitation that you cannot explicitly check for None values. Examples, with the subdict queries on the left: - None matches any object. - {"foo": None} matches {"foo": {"bar": 1}} - {"foo": None} matches {"foo": 5} - {"foo": {"abc": None}} does not match {"foo": {"bar": 1}} - {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}} """ if match is None: return True try: for key in match: if key in event: if not QEMUMachine.event_match(event[key], match[key]): return False else: return False return True except TypeError: # either match or event wasn't iterable (not a dict) return bool(match == event) def event_wait(self, name: str, timeout: float = 60.0, match: Optional[QMPMessage] = None) -> Optional[QMPMessage]: """ event_wait waits for and returns a named event from QMP with a timeout. name: The event to wait for. timeout: QEMUMonitorProtocol.pull_event timeout parameter. match: Optional match criteria. See event_match for details. """ return self.events_wait([(name, match)], timeout) def events_wait(self, events: Sequence[Tuple[str, Any]], timeout: float = 60.0) -> Optional[QMPMessage]: """ events_wait waits for and returns a single named event from QMP. In the case of multiple qualifying events, this function returns the first one. :param events: A sequence of (name, match_criteria) tuples. The match criteria are optional and may be None. See event_match for details. :param timeout: Optional timeout, in seconds. See QEMUMonitorProtocol.pull_event. :raise QMPTimeoutError: If timeout was non-zero and no matching events were found. :return: A QMP event matching the filter criteria. If timeout was 0 and no event matched, None. """ def _match(event: QMPMessage) -> bool: for name, match in events: if event['event'] == name and self.event_match(event, match): return True return False event: Optional[QMPMessage] # Search cached events for event in self._events: if _match(event): self._events.remove(event) return event # Poll for new events while True: event = self._qmp.pull_event(wait=timeout) if event is None: # NB: None is only returned when timeout is false-ish. # Timeouts raise QMPTimeoutError instead! break if _match(event): return event self._events.append(event) return None def get_log(self) -> Optional[str]: """ After self.shutdown or failed qemu execution, this returns the output of the qemu process. """ return self._iolog def add_args(self, *args: str) -> None: """ Adds to the list of extra arguments to be given to the QEMU binary """ self._args.extend(args) def set_machine(self, machine_type: str) -> None: """ Sets the machine type If set, the machine type will be added to the base arguments of the resulting QEMU command line. """ self._machine = machine_type def set_console(self, device_type: Optional[str] = None, console_index: int = 0) -> None: """ Sets the device type for a console device If set, the console device and a backing character device will be added to the base arguments of the resulting QEMU command line. This is a convenience method that will either use the provided device type, or default to a "-serial chardev:console" command line argument. The actual setting of command line arguments will be be done at machine launch time, as it depends on the temporary directory to be created. @param device_type: the device type, such as "isa-serial". If None is given (the default value) a "-serial chardev:console" command line argument will be used instead, resorting to the machine's default device type. @param console_index: the index of the console device to use. If not zero, the command line will create 'index - 1' consoles and connect them to the 'null' backing character device. """ self._console_set = True self._console_device_type = device_type self._console_index = console_index @property def console_socket(self) -> socket.socket: """ Returns a socket connected to the console """ if self._console_socket is None: self._console_socket = console_socket.ConsoleSocket( self._console_address, file=self._console_log_path, drain=self._drain_console) return self._console_socket @property def temp_dir(self) -> str: """ Returns a temporary directory to be used for this machine """ if self._temp_dir is None: self._temp_dir = tempfile.mkdtemp(prefix="qemu-machine-", dir=self._base_temp_dir) return self._temp_dir @property def log_dir(self) -> str: """ Returns a directory to be used for writing logs """ if self._log_dir is None: return self.temp_dir return self._log_dir
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/machine/machine.py
""" QEMU qtest library qtest offers the QEMUQtestProtocol and QEMUQTestMachine classes, which offer a connection to QEMU's qtest protocol socket, and a qtest-enabled subclass of QEMUMachine, respectively. """ # Copyright (C) 2015 Red Hat Inc. # # Authors: # Fam Zheng <famz@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. # # Based on qmp.py. # import os import socket from typing import ( List, Optional, Sequence, TextIO, ) from qemu.qmp import SocketAddrT # pylint: disable=import-error from .machine import QEMUMachine class QEMUQtestProtocol: """ QEMUQtestProtocol implements a connection to a qtest socket. :param address: QEMU address, can be either a unix socket path (string) or a tuple in the form ( address, port ) for a TCP connection :param server: server mode, listens on the socket (bool) :raise socket.error: on socket connection errors .. note:: No conection is estabalished by __init__(), this is done by the connect() or accept() methods. """ def __init__(self, address: SocketAddrT, server: bool = False): self._address = address self._sock = self._get_sock() self._sockfile: Optional[TextIO] = None if server: self._sock.bind(self._address) self._sock.listen(1) def _get_sock(self) -> socket.socket: if isinstance(self._address, tuple): family = socket.AF_INET else: family = socket.AF_UNIX return socket.socket(family, socket.SOCK_STREAM) def connect(self) -> None: """ Connect to the qtest socket. @raise socket.error on socket connection errors """ self._sock.connect(self._address) self._sockfile = self._sock.makefile(mode='r') def accept(self) -> None: """ Await connection from QEMU. @raise socket.error on socket connection errors """ self._sock, _ = self._sock.accept() self._sockfile = self._sock.makefile(mode='r') def cmd(self, qtest_cmd: str) -> str: """ Send a qtest command on the wire. @param qtest_cmd: qtest command text to be sent """ assert self._sockfile is not None self._sock.sendall((qtest_cmd + "\n").encode('utf-8')) resp = self._sockfile.readline() return resp def close(self) -> None: """ Close this socket. """ self._sock.close() if self._sockfile: self._sockfile.close() self._sockfile = None def settimeout(self, timeout: Optional[float]) -> None: """Set a timeout, in seconds.""" self._sock.settimeout(timeout) class QEMUQtestMachine(QEMUMachine): """ A QEMU VM, with a qtest socket available. """ def __init__(self, binary: str, args: Sequence[str] = (), wrapper: Sequence[str] = (), name: Optional[str] = None, base_temp_dir: str = "/var/tmp", sock_dir: Optional[str] = None, qmp_timer: Optional[float] = None): # pylint: disable=too-many-arguments if name is None: name = "qemu-%d" % os.getpid() if sock_dir is None: sock_dir = base_temp_dir super().__init__(binary, args, wrapper=wrapper, name=name, base_temp_dir=base_temp_dir, sock_dir=sock_dir, qmp_timer=qmp_timer) self._qtest: Optional[QEMUQtestProtocol] = None self._qtest_path = os.path.join(sock_dir, name + "-qtest.sock") @property def _base_args(self) -> List[str]: args = super()._base_args args.extend([ '-qtest', f"unix:path={self._qtest_path}", '-accel', 'qtest' ]) return args def _pre_launch(self) -> None: super()._pre_launch() self._qtest = QEMUQtestProtocol(self._qtest_path, server=True) def _post_launch(self) -> None: assert self._qtest is not None super()._post_launch() self._qtest.accept() def _post_shutdown(self) -> None: super()._post_shutdown() self._remove_if_exists(self._qtest_path) def qtest(self, cmd: str) -> str: """ Send a qtest command to the guest. :param cmd: qtest command to send :return: qtest server response """ if self._qtest is None: raise RuntimeError("qtest socket not available") return self._qtest.cmd(cmd)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/python/qemu/machine/qtest.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common ## ## Generate the register and immediate operands for each instruction ## def calculate_regid_reg(tag): def letter_inc(x): return chr(ord(x)+1) ordered_implregs = [ 'SP','FP','LR' ] srcdst_lett = 'X' src_lett = 'S' dst_lett = 'D' retstr = "" mapdict = {} for reg in ordered_implregs: reg_rd = 0 reg_wr = 0 if ('A_IMPLICIT_WRITES_'+reg) in hex_common.attribdict[tag]: reg_wr = 1 if reg_rd and reg_wr: retstr += srcdst_lett mapdict[srcdst_lett] = reg srcdst_lett = letter_inc(srcdst_lett) elif reg_rd: retstr += src_lett mapdict[src_lett] = reg src_lett = letter_inc(src_lett) elif reg_wr: retstr += dst_lett mapdict[dst_lett] = reg dst_lett = letter_inc(dst_lett) return retstr,mapdict def calculate_regid_letters(tag): retstr,mapdict = calculate_regid_reg(tag) return retstr def strip_reg_prefix(x): y=x.replace('UREG.','') y=y.replace('MREG.','') return y.replace('GREG.','') def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() with open(sys.argv[3], 'w') as f: for tag in hex_common.tags: regs = tagregs[tag] rregs = [] wregs = [] regids = "" for regtype,regid,toss,numregs in regs: if hex_common.is_read(regid): if regid[0] not in regids: regids += regid[0] rregs.append(regtype+regid+numregs) if hex_common.is_written(regid): wregs.append(regtype+regid+numregs) if regid[0] not in regids: regids += regid[0] for attrib in hex_common.attribdict[tag]: if hex_common.attribinfo[attrib]['rreg']: rregs.append(strip_reg_prefix(attribinfo[attrib]['rreg'])) if hex_common.attribinfo[attrib]['wreg']: wregs.append(strip_reg_prefix(attribinfo[attrib]['wreg'])) regids += calculate_regid_letters(tag) f.write('REGINFO(%s,"%s",\t/*RD:*/\t"%s",\t/*WR:*/\t"%s")\n' % \ (tag,regids,",".join(rregs),",".join(wregs))) for tag in hex_common.tags: imms = tagimms[tag] f.write( 'IMMINFO(%s' % tag) if not imms: f.write(''','u',0,0,'U',0,0''') for sign,size,shamt in imms: if sign == 'r': sign = 's' if not shamt: shamt = "0" f.write(''','%s',%s,%s''' % (sign,size,shamt)) if len(imms) == 1: if sign.isupper(): myu = 'u' else: myu = 'U' f.write(''','%s',0,0''' % myu) f.write(')\n') if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_op_regs.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common ## ## Helpers for gen_helper_function ## def gen_decl_ea(f): f.write(" uint32_t EA;\n") def gen_helper_return_type(f,regtype,regid,regno): if regno > 1 : f.write(", ") f.write("int32_t") def gen_helper_return_type_pair(f,regtype,regid,regno): if regno > 1 : f.write(", ") f.write("int64_t") def gen_helper_arg(f,regtype,regid,regno): if regno > 0 : f.write(", " ) f.write("int32_t %s%sV" % (regtype,regid)) def gen_helper_arg_new(f,regtype,regid,regno): if regno >= 0 : f.write(", " ) f.write("int32_t %s%sN" % (regtype,regid)) def gen_helper_arg_pair(f,regtype,regid,regno): if regno >= 0 : f.write(", ") f.write("int64_t %s%sV" % (regtype,regid)) def gen_helper_arg_opn(f,regtype,regid,i,tag): if (hex_common.is_pair(regid)): gen_helper_arg_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): gen_helper_arg(f,regtype,regid,i) elif hex_common.is_new_val(regtype, regid, tag): gen_helper_arg_new(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) def gen_helper_arg_imm(f,immlett): f.write(", int32_t %s" % (hex_common.imm_name(immlett))) def gen_helper_dest_decl(f,regtype,regid,regno,subfield=""): f.write(" int32_t %s%sV%s = 0;\n" % \ (regtype,regid,subfield)) def gen_helper_dest_decl_pair(f,regtype,regid,regno,subfield=""): f.write(" int64_t %s%sV%s = 0;\n" % \ (regtype,regid,subfield)) def gen_helper_dest_decl_opn(f,regtype,regid,i): if (hex_common.is_pair(regid)): gen_helper_dest_decl_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): gen_helper_dest_decl(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) def gen_helper_return(f,regtype,regid,regno): f.write(" return %s%sV;\n" % (regtype,regid)) def gen_helper_return_pair(f,regtype,regid,regno): f.write(" return %s%sV;\n" % (regtype,regid)) def gen_helper_return_opn(f, regtype, regid, i): if (hex_common.is_pair(regid)): gen_helper_return_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): gen_helper_return(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) ## ## Generate the TCG code to call the helper ## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} ## We produce: ## int32_t HELPER(A2_add)(CPUHexagonState *env, int32_t RsV, int32_t RtV) ## { ## uint32_t slot __attribute__(unused)) = 4; ## int32_t RdV = 0; ## { RdV=RsV+RtV;} ## COUNT_HELPER(A2_add); ## return RdV; ## } ## def gen_helper_function(f, tag, tagregs, tagimms): regs = tagregs[tag] imms = tagimms[tag] numresults = 0 numscalarresults = 0 numscalarreadwrite = 0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): numresults += 1 if (hex_common.is_scalar_reg(regtype)): numscalarresults += 1 if (hex_common.is_readwrite(regid)): if (hex_common.is_scalar_reg(regtype)): numscalarreadwrite += 1 if (numscalarresults > 1): ## The helper is bogus when there is more than one result f.write("void HELPER(%s)(CPUHexagonState *env) { BOGUS_HELPER(%s); }\n" % (tag, tag)) else: ## The return type of the function is the type of the destination ## register i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): if (hex_common.is_pair(regid)): gen_helper_return_type_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): gen_helper_return_type(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) i += 1 if (numscalarresults == 0): f.write("void") f.write(" HELPER(%s)(CPUHexagonState *env" % tag) i = 1 ## Arguments to the helper function are the source regs and immediates for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): gen_helper_arg_opn(f,regtype,regid,i,tag) i += 1 for immlett,bits,immshift in imms: gen_helper_arg_imm(f,immlett) i += 1 if hex_common.need_slot(tag): if i > 0: f.write(", ") f.write("uint32_t slot") i += 1 if hex_common.need_part1(tag): if i > 0: f.write(", ") f.write("uint32_t part1") f.write(")\n{\n") if (not hex_common.need_slot(tag)): f.write(" uint32_t slot __attribute__((unused)) = 4;\n" ) if hex_common.need_ea(tag): gen_decl_ea(f) ## Declare the return variable i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_writeonly(regid)): gen_helper_dest_decl_opn(f,regtype,regid,i) i += 1 if 'A_FPOP' in hex_common.attribdict[tag]: f.write(' arch_fpop_start(env);\n'); f.write(" %s\n" % hex_common.semdict[tag]) if 'A_FPOP' in hex_common.attribdict[tag]: f.write(' arch_fpop_end(env);\n'); ## Save/return the return variable for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): gen_helper_return_opn(f, regtype, regid, i) f.write("}\n\n") ## End of the helper definition def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() with open(sys.argv[4], 'w') as f: for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : continue ## Skip the guest instructions if ( "A_GUEST" in hex_common.attribdict[tag] ) : continue ## Skip the diag instructions if ( tag == "Y6_diag" ) : continue if ( tag == "Y6_diag0" ) : continue if ( tag == "Y6_diag1" ) : continue if ( hex_common.skip_qemu_helper(tag) ): continue gen_helper_function(f, tag, tagregs, tagimms) if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_helper_funcs.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common ## ## Generate data for printing each instruction (format string + operands) ## def regprinter(m): str = m.group(1) str += ":".join(["%d"]*len(m.group(2))) str += m.group(3) if ('S' in m.group(1)) and (len(m.group(2)) == 1): str += "/%s" elif ('C' in m.group(1)) and (len(m.group(2)) == 1): str += "/%s" return str def spacify(s): # Regular expression that matches any operator that contains '=' character: opswithequal_re = '[-+^&|!<>=]?=' # Regular expression that matches any assignment operator. assignment_re = '[-+^&|]?=' # Out of the operators that contain the = sign, if the operator is also an # assignment, spaces will be added around it, unless it's enclosed within # parentheses, or spaces are already present. equals = re.compile(opswithequal_re) assign = re.compile(assignment_re) slen = len(s) paren_count = {} i = 0 pc = 0 while i < slen: c = s[i] if c == '(': pc += 1 elif c == ')': pc -= 1 paren_count[i] = pc i += 1 # Iterate over all operators that contain the equal sign. If any # match is also an assignment operator, add spaces around it if # the parenthesis count is 0. pos = 0 out = [] for m in equals.finditer(s): ms = m.start() me = m.end() # t is the string that matched opswithequal_re. t = m.string[ms:me] out += s[pos:ms] pos = me if paren_count[ms] == 0: # Check if the entire string t is an assignment. am = assign.match(t) if am and len(am.group(0)) == me-ms: # Don't add spaces if they are already there. if ms > 0 and s[ms-1] != ' ': out.append(' ') out += t if me < slen and s[me] != ' ': out.append(' ') continue # If this is not an assignment, just append it to the output # string. out += t # Append the remaining part of the string. out += s[pos:len(s)] return ''.join(out) def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) immext_casere = re.compile(r'IMMEXT\(([A-Za-z])') with open(sys.argv[3], 'w') as f: for tag in hex_common.tags: if not hex_common.behdict[tag]: continue extendable_upper_imm = False extendable_lower_imm = False m = immext_casere.search(hex_common.semdict[tag]) if m: if m.group(1).isupper(): extendable_upper_imm = True else: extendable_lower_imm = True beh = hex_common.behdict[tag] beh = hex_common.regre.sub(regprinter,beh) beh = hex_common.absimmre.sub(r"#%s0x%x",beh) beh = hex_common.relimmre.sub(r"PC+%s%d",beh) beh = spacify(beh) # Print out a literal "%s" at the end, used to match empty string # so C won't complain at us if ("A_VECX" in hex_common.attribdict[tag]): macname = "DEF_VECX_PRINTINFO" else: macname = "DEF_PRINTINFO" f.write('%s(%s,"%s%%s"' % (macname,tag,beh)) regs_or_imms = \ hex_common.reg_or_immre.findall(hex_common.behdict[tag]) ri = 0 seenregs = {} for allregs,a,b,c,d,allimm,immlett,bits,immshift in regs_or_imms: if a: #register if b in seenregs: regno = seenregs[b] else: regno = ri if len(b) == 1: f.write(', insn->regno[%d]' % regno) if 'S' in a: f.write(', sreg2str(insn->regno[%d])' % regno) elif 'C' in a: f.write(', creg2str(insn->regno[%d])' % regno) elif len(b) == 2: f.write(', insn->regno[%d] + 1, insn->regno[%d]' % \ (regno,regno)) else: print("Put some stuff to handle quads here") if b not in seenregs: seenregs[b] = ri ri += 1 else: #immediate if (immlett.isupper()): if extendable_upper_imm: if immlett in 'rR': f.write(',insn->extension_valid?"##":""') else: f.write(',insn->extension_valid?"#":""') else: f.write(',""') ii = 1 else: if extendable_lower_imm: if immlett in 'rR': f.write(',insn->extension_valid?"##":""') else: f.write(',insn->extension_valid?"#":""') else: f.write(',""') ii = 0 f.write(', insn->immed[%d]' % ii) # append empty string so there is at least one more arg f.write(',"")\n') if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_printinsn.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common def main(): hex_common.read_semantics_file(sys.argv[1]) ## ## Generate a list of all the opcodes ## with open(sys.argv[3], 'w') as f: for tag in hex_common.tags: f.write ( "OPCODE(%s),\n" % (tag) ) if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_opcodes_def.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string behdict = {} # tag ->behavior semdict = {} # tag -> semantics attribdict = {} # tag -> attributes macros = {} # macro -> macro information... attribinfo = {} # Register information and misc tags = [] # list of all tags overrides = {} # tags with helper overrides # We should do this as a hash for performance, # but to keep order let's keep it as a list. def uniquify(seq): seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)] regre = re.compile( r"((?<!DUP)[MNORCPQXSGVZA])([stuvwxyzdefg]+)([.]?[LlHh]?)(\d+S?)") immre = re.compile(r"[#]([rRsSuUm])(\d+)(?:[:](\d+))?") reg_or_immre = \ re.compile(r"(((?<!DUP)[MNRCOPQXSGVZA])([stuvwxyzdefg]+)" + \ "([.]?[LlHh]?)(\d+S?))|([#]([rRsSuUm])(\d+)[:]?(\d+)?)") relimmre = re.compile(r"[#]([rR])(\d+)(?:[:](\d+))?") absimmre = re.compile(r"[#]([sSuUm])(\d+)(?:[:](\d+))?") finished_macros = set() def expand_macro_attribs(macro,allmac_re): if macro.key not in finished_macros: # Get a list of all things that might be macros l = allmac_re.findall(macro.beh) for submacro in l: if not submacro: continue if not macros[submacro]: raise Exception("Couldn't find macro: <%s>" % l) macro.attribs |= expand_macro_attribs( macros[submacro], allmac_re) finished_macros.add(macro.key) return macro.attribs # When qemu needs an attribute that isn't in the imported files, # we'll add it here. def add_qemu_macro_attrib(name, attrib): macros[name].attribs.add(attrib) immextre = re.compile(r'f(MUST_)?IMMEXT[(]([UuSsRr])') def calculate_attribs(): add_qemu_macro_attrib('fREAD_PC', 'A_IMPLICIT_READS_PC') add_qemu_macro_attrib('fTRAP', 'A_IMPLICIT_READS_PC') add_qemu_macro_attrib('fWRITE_P0', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P1', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P2', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG') # Recurse down macros, find attributes from sub-macros macroValues = list(macros.values()) allmacros_restr = "|".join(set([ m.re.pattern for m in macroValues ])) allmacros_re = re.compile(allmacros_restr) for macro in macroValues: expand_macro_attribs(macro,allmacros_re) # Append attributes to all instructions for tag in tags: for macname in allmacros_re.findall(semdict[tag]): if not macname: continue macro = macros[macname] attribdict[tag] |= set(macro.attribs) # Figure out which instructions write predicate registers tagregs = get_tagregs() for tag in tags: regs = tagregs[tag] for regtype, regid, toss, numregs in regs: if regtype == "P" and is_written(regid): attribdict[tag].add('A_WRITES_PRED_REG') def SEMANTICS(tag, beh, sem): #print tag,beh,sem behdict[tag] = beh semdict[tag] = sem attribdict[tag] = set() tags.append(tag) # dicts have no order, this is for order def ATTRIBUTES(tag, attribstring): attribstring = \ attribstring.replace("ATTRIBS","").replace("(","").replace(")","") if not attribstring: return attribs = attribstring.split(",") for attrib in attribs: attribdict[tag].add(attrib.strip()) class Macro(object): __slots__ = ['key','name', 'beh', 'attribs', 're'] def __init__(self, name, beh, attribs): self.key = name self.name = name self.beh = beh self.attribs = set(attribs) self.re = re.compile("\\b" + name + "\\b") def MACROATTRIB(macname,beh,attribstring): attribstring = attribstring.replace("(","").replace(")","") if attribstring: attribs = attribstring.split(",") else: attribs = [] macros[macname] = Macro(macname,beh,attribs) def compute_tag_regs(tag): return uniquify(regre.findall(behdict[tag])) def compute_tag_immediates(tag): return uniquify(immre.findall(behdict[tag])) ## ## tagregs is the main data structure we'll use ## tagregs[tag] will contain the registers used by an instruction ## Within each entry, we'll use the regtype and regid fields ## regtype can be one of the following ## C control register ## N new register value ## P predicate register ## R GPR register ## M modifier register ## regid can be one of the following ## d, e destination register ## dd destination register pair ## s, t, u, v, w source register ## ss, tt, uu, vv source register pair ## x, y read-write register ## xx, yy read-write register pair ## def get_tagregs(): return dict(zip(tags, list(map(compute_tag_regs, tags)))) def get_tagimms(): return dict(zip(tags, list(map(compute_tag_immediates, tags)))) def is_pair(regid): return len(regid) == 2 def is_single(regid): return len(regid) == 1 def is_written(regid): return regid[0] in "dexy" def is_writeonly(regid): return regid[0] in "de" def is_read(regid): return regid[0] in "stuvwxy" def is_readwrite(regid): return regid[0] in "xy" def is_scalar_reg(regtype): return regtype in "RPC" def is_old_val(regtype, regid, tag): return regtype+regid+'V' in semdict[tag] def is_new_val(regtype, regid, tag): return regtype+regid+'N' in semdict[tag] def need_slot(tag): if ('A_CONDEXEC' in attribdict[tag] or 'A_STORE' in attribdict[tag] or 'A_LOAD' in attribdict[tag]): return 1 else: return 0 def need_part1(tag): return re.compile(r"fPART1").search(semdict[tag]) def need_ea(tag): return re.compile(r"\bEA\b").search(semdict[tag]) def skip_qemu_helper(tag): return tag in overrides.keys() def imm_name(immlett): return "%siV" % immlett def read_semantics_file(name): eval_line = "" for line in open(name, 'rt').readlines(): if not line.startswith("#"): eval_line += line if line.endswith("\\\n"): eval_line.rstrip("\\\n") else: eval(eval_line.strip()) eval_line = "" def read_attribs_file(name): attribre = re.compile(r'DEF_ATTRIB\(([A-Za-z0-9_]+), ([^,]*), ' + r'"([A-Za-z0-9_\.]*)", "([A-Za-z0-9_\.]*)"\)') for line in open(name, 'rt').readlines(): if not attribre.match(line): continue (attrib_base,descr,rreg,wreg) = attribre.findall(line)[0] attrib_base = 'A_' + attrib_base attribinfo[attrib_base] = {'rreg':rreg, 'wreg':wreg, 'descr':descr} def read_overrides_file(name): overridere = re.compile("#define fGEN_TCG_([A-Za-z0-9_]+)\(.*") for line in open(name, 'rt').readlines(): if not overridere.match(line): continue tag = overridere.findall(line)[0] overrides[tag] = True
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/hex_common.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import io import re import sys import iset encs = {tag : ''.join(reversed(iset.iset[tag]['enc'].replace(' ', ''))) for tag in iset.tags if iset.iset[tag]['enc'] != 'MISSING ENCODING'} enc_classes = set([iset.iset[tag]['enc_class'] for tag in encs.keys()]) subinsn_enc_classes = \ set([enc_class for enc_class in enc_classes \ if enc_class.startswith('SUBINSN_')]) ext_enc_classes = \ set([enc_class for enc_class in enc_classes \ if enc_class not in ('NORMAL', '16BIT') and \ not enc_class.startswith('SUBINSN_')]) try: subinsn_groupings = iset.subinsn_groupings except AttributeError: subinsn_groupings = {} for (tag, subinsn_grouping) in subinsn_groupings.items(): encs[tag] = ''.join(reversed(subinsn_grouping['enc'].replace(' ', ''))) dectree_normal = {'leaves' : set()} dectree_16bit = {'leaves' : set()} dectree_subinsn_groupings = {'leaves' : set()} dectree_subinsns = {name : {'leaves' : set()} for name in subinsn_enc_classes} dectree_extensions = {name : {'leaves' : set()} for name in ext_enc_classes} for tag in encs.keys(): if tag in subinsn_groupings: dectree_subinsn_groupings['leaves'].add(tag) continue enc_class = iset.iset[tag]['enc_class'] if enc_class.startswith('SUBINSN_'): if len(encs[tag]) != 32: encs[tag] = encs[tag] + '0' * (32 - len(encs[tag])) dectree_subinsns[enc_class]['leaves'].add(tag) elif enc_class == '16BIT': if len(encs[tag]) != 16: raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' + 'width of 16 bits!'.format(tag, enc_class)) dectree_16bit['leaves'].add(tag) else: if len(encs[tag]) != 32: raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' + 'width of 32 bits!'.format(tag, enc_class)) if enc_class == 'NORMAL': dectree_normal['leaves'].add(tag) else: dectree_extensions[enc_class]['leaves'].add(tag) faketags = set() for (tag, enc) in iset.enc_ext_spaces.items(): faketags.add(tag) encs[tag] = ''.join(reversed(enc.replace(' ', ''))) dectree_normal['leaves'].add(tag) faketags |= set(subinsn_groupings.keys()) def every_bit_counts(bitset): for i in range(1, len(next(iter(bitset)))): if len(set([bits[:i] + bits[i+1:] for bits in bitset])) == len(bitset): return False return True def auto_separate(node): tags = node['leaves'] if len(tags) <= 1: return enc_width = len(encs[next(iter(tags))]) opcode_bit_for_all = \ [all([encs[tag][i] in '01' \ for tag in tags]) for i in range(enc_width)] opcode_bit_is_0_for_all = \ [opcode_bit_for_all[i] and all([encs[tag][i] == '0' \ for tag in tags]) for i in range(enc_width)] opcode_bit_is_1_for_all = \ [opcode_bit_for_all[i] and all([encs[tag][i] == '1' \ for tag in tags]) for i in range(enc_width)] differentiator_opcode_bit = \ [opcode_bit_for_all[i] and \ not (opcode_bit_is_0_for_all[i] or \ opcode_bit_is_1_for_all[i]) \ for i in range(enc_width)] best_width = 0 for width in range(4, 0, -1): for lsb in range(enc_width - width, -1, -1): bitset = set([encs[tag][lsb:lsb+width] for tag in tags]) if all(differentiator_opcode_bit[lsb:lsb+width]) and \ (len(bitset) == len(tags) or every_bit_counts(bitset)): best_width = width best_lsb = lsb caught_all_tags = len(bitset) == len(tags) break if best_width != 0: break if best_width == 0: raise Exception('Could not find a way to differentiate the encodings ' + 'of the following tags:\n{}'.format('\n'.join(tags))) if caught_all_tags: for width in range(1, best_width): for lsb in range(enc_width - width, -1, -1): bitset = set([encs[tag][lsb:lsb+width] for tag in tags]) if all(differentiator_opcode_bit[lsb:lsb+width]) and \ len(bitset) == len(tags): best_width = width best_lsb = lsb break else: continue break node['separator_lsb'] = best_lsb node['separator_width'] = best_width node['children'] = [] for value in range(2 ** best_width): child = {} bits = ''.join(reversed('{:0{}b}'.format(value, best_width))) child['leaves'] = \ set([tag for tag in tags \ if encs[tag][best_lsb:best_lsb+best_width] == bits]) node['children'].append(child) for child in node['children']: auto_separate(child) auto_separate(dectree_normal) auto_separate(dectree_16bit) if subinsn_groupings: auto_separate(dectree_subinsn_groupings) for dectree_subinsn in dectree_subinsns.values(): auto_separate(dectree_subinsn) for dectree_ext in dectree_extensions.values(): auto_separate(dectree_ext) for tag in faketags: del encs[tag] def table_name(parents, node): path = parents + [node] root = path[0] tag = next(iter(node['leaves'])) if tag in subinsn_groupings: enc_width = len(subinsn_groupings[tag]['enc'].replace(' ', '')) else: tag = next(iter(node['leaves'] - faketags)) enc_width = len(encs[tag]) determining_bits = ['_'] * enc_width for (parent, child) in zip(path[:-1], path[1:]): lsb = parent['separator_lsb'] width = parent['separator_width'] value = parent['children'].index(child) determining_bits[lsb:lsb+width] = \ list(reversed('{:0{}b}'.format(value, width))) if tag in subinsn_groupings: name = 'DECODE_ROOT_EE' else: enc_class = iset.iset[tag]['enc_class'] if enc_class in ext_enc_classes: name = 'DECODE_EXT_{}'.format(enc_class) elif enc_class in subinsn_enc_classes: name = 'DECODE_SUBINSN_{}'.format(enc_class) else: name = 'DECODE_ROOT_{}'.format(enc_width) if node != root: name += '_' + ''.join(reversed(determining_bits)) return name def print_node(f, node, parents): if len(node['leaves']) <= 1: return name = table_name(parents, node) lsb = node['separator_lsb'] width = node['separator_width'] print('DECODE_NEW_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\ format(name, 2 ** width, lsb, width), file=f) for child in node['children']: if len(child['leaves']) == 0: print('INVALID()', file=f) elif len(child['leaves']) == 1: (tag,) = child['leaves'] if tag in subinsn_groupings: class_a = subinsn_groupings[tag]['class_a'] class_b = subinsn_groupings[tag]['class_b'] enc = subinsn_groupings[tag]['enc'].replace(' ', '') if 'RESERVED' in tag: print('INVALID()', file=f) else: print('SUBINSNS({},{},{},"{}")'.\ format(tag, class_a, class_b, enc), file=f) elif tag in iset.enc_ext_spaces: enc = iset.enc_ext_spaces[tag].replace(' ', '') print('EXTSPACE({},"{}")'.format(tag, enc), file=f) else: enc = ''.join(reversed(encs[tag])) print('TERMINAL({},"{}")'.format(tag, enc), file=f) else: print('TABLE_LINK({})'.format(table_name(parents + [node], child)), file=f) print('DECODE_END_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\ format(name, 2 ** width, lsb, width), file=f) print(file=f) parents.append(node) for child in node['children']: print_node(f, child, parents) parents.pop() def print_tree(f, tree): print_node(f, tree, []) def print_match_info(f): for tag in sorted(encs.keys(), key=iset.tags.index): enc = ''.join(reversed(encs[tag])) mask = int(re.sub(r'[^1]', r'0', enc.replace('0', '1')), 2) match = int(re.sub(r'[^01]', r'0', enc), 2) suffix = '' print('DECODE{}_MATCH_INFO({},0x{:x}U,0x{:x}U)'.\ format(suffix, tag, mask, match), file=f) regre = re.compile( r'((?<!DUP)[MNORCPQXSGVZA])([stuvwxyzdefg]+)([.]?[LlHh]?)(\d+S?)') immre = re.compile(r'[#]([rRsSuUm])(\d+)(?:[:](\d+))?') def ordered_unique(l): return sorted(set(l), key=l.index) implicit_registers = { 'SP' : 29, 'FP' : 30, 'LR' : 31 } num_registers = { 'R' : 32, 'V' : 32 } def print_op_info(f): for tag in sorted(encs.keys(), key=iset.tags.index): enc = encs[tag] print(file=f) print('DECODE_OPINFO({},'.format(tag), file=f) regs = ordered_unique(regre.findall(iset.iset[tag]['syntax'])) imms = ordered_unique(immre.findall(iset.iset[tag]['syntax'])) regno = 0 for reg in regs: reg_type = reg[0] reg_letter = reg[1][0] reg_num_choices = int(reg[3].rstrip('S')) reg_mapping = reg[0] + ''.join(['_' for letter in reg[1]]) + reg[3] reg_enc_fields = re.findall(reg_letter + '+', enc) if len(reg_enc_fields) == 0: raise Exception('Tag "{}" missing register field!'.format(tag)) if len(reg_enc_fields) > 1: raise Exception('Tag "{}" has split register field!'.\ format(tag)) reg_enc_field = reg_enc_fields[0] if 2 ** len(reg_enc_field) != reg_num_choices: raise Exception('Tag "{}" has incorrect register field width!'.\ format(tag)) print(' DECODE_REG({},{},{})'.\ format(regno, len(reg_enc_field), enc.index(reg_enc_field)), file=f) if reg_type in num_registers and \ reg_num_choices != num_registers[reg_type]: print(' DECODE_MAPPED_REG({},{})'.\ format(regno, reg_mapping), file=f) regno += 1 def implicit_register_key(reg): return implicit_registers[reg] for reg in sorted( set([r for r in (iset.iset[tag]['rregs'].split(',') + \ iset.iset[tag]['wregs'].split(',')) \ if r in implicit_registers]), key=implicit_register_key): print(' DECODE_IMPL_REG({},{})'.\ format(regno, implicit_registers[reg]), file=f) regno += 1 if imms and imms[0][0].isupper(): imms = reversed(imms) for imm in imms: if imm[0].isupper(): immno = 1 else: immno = 0 imm_type = imm[0] imm_width = int(imm[1]) imm_shift = imm[2] if imm_shift: imm_shift = int(imm_shift) else: imm_shift = 0 if imm_type.islower(): imm_letter = 'i' else: imm_letter = 'I' remainder = imm_width for m in reversed(list(re.finditer(imm_letter + '+', enc))): remainder -= m.end() - m.start() print(' DECODE_IMM({},{},{},{})'.\ format(immno, m.end() - m.start(), m.start(), remainder), file=f) if remainder != 0: if imm[2]: imm[2] = ':' + imm[2] raise Exception('Tag "{}" has an incorrect number of ' + \ 'encoding bits for immediate "{}"'.\ format(tag, ''.join(imm))) if imm_type.lower() in 'sr': print(' DECODE_IMM_SXT({},{})'.\ format(immno, imm_width), file=f) if imm_type.lower() == 'n': print(' DECODE_IMM_NEG({},{})'.\ format(immno, imm_width), file=f) if imm_shift: print(' DECODE_IMM_SHIFT({},{})'.\ format(immno, imm_shift), file=f) print(')', file=f) if __name__ == '__main__': with open(sys.argv[1], 'w') as f: print_tree(f, dectree_normal) print_tree(f, dectree_16bit) if subinsn_groupings: print_tree(f, dectree_subinsn_groupings) for (name, dectree_subinsn) in sorted(dectree_subinsns.items()): print_tree(f, dectree_subinsn) for (name, dectree_ext) in sorted(dectree_extensions.items()): print_tree(f, dectree_ext) print_match_info(f) print_op_info(f)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/dectree.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common ## ## Helpers for gen_helper_prototype ## def_helper_types = { 'N' : 's32', 'O' : 's32', 'P' : 's32', 'M' : 's32', 'C' : 's32', 'R' : 's32', 'V' : 'ptr', 'Q' : 'ptr' } def_helper_types_pair = { 'R' : 's64', 'C' : 's64', 'S' : 's64', 'G' : 's64', 'V' : 'ptr', 'Q' : 'ptr' } def gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i): if (hex_common.is_pair(regid)): f.write(", %s" % (def_helper_types_pair[regtype])) elif (hex_common.is_single(regid)): f.write(", %s" % (def_helper_types[regtype])) else: print("Bad register parse: ",regtype,regid,toss,numregs) ## ## Generate the DEF_HELPER prototype for an instruction ## For A2_add: Rd32=add(Rs32,Rt32) ## We produce: ## DEF_HELPER_3(A2_add, s32, env, s32, s32) ## def gen_helper_prototype(f, tag, tagregs, tagimms): regs = tagregs[tag] imms = tagimms[tag] numresults = 0 numscalarresults = 0 numscalarreadwrite = 0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): numresults += 1 if (hex_common.is_scalar_reg(regtype)): numscalarresults += 1 if (hex_common.is_readwrite(regid)): if (hex_common.is_scalar_reg(regtype)): numscalarreadwrite += 1 if (numscalarresults > 1): ## The helper is bogus when there is more than one result f.write('DEF_HELPER_1(%s, void, env)\n' % tag) else: ## Figure out how many arguments the helper will take if (numscalarresults == 0): def_helper_size = len(regs)+len(imms)+numscalarreadwrite+1 if hex_common.need_part1(tag): def_helper_size += 1 if hex_common.need_slot(tag): def_helper_size += 1 f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag)) ## The return type is void f.write(', void' ) else: def_helper_size = len(regs)+len(imms)+numscalarreadwrite if hex_common.need_part1(tag): def_helper_size += 1 if hex_common.need_slot(tag): def_helper_size += 1 f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag)) ## Generate the qemu DEF_HELPER type for each result i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 ## Put the env between the outputs and inputs f.write(', env' ) i += 1 ## Generate the qemu type for each input operand (regs and immediates) for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: f.write(", s32") ## Add the arguments for the instruction slot and part1 (if needed) if hex_common.need_slot(tag): f.write(', i32' ) if hex_common.need_part1(tag): f.write(' , i32' ) f.write(')\n') def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() with open(sys.argv[4], 'w') as f: for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : continue ## Skip the guest instructions if ( "A_GUEST" in hex_common.attribdict[tag] ) : continue ## Skip the diag instructions if ( tag == "Y6_diag" ) : continue if ( tag == "Y6_diag0" ) : continue if ( tag == "Y6_diag1" ) : continue if ( hex_common.skip_qemu_helper(tag) ): continue gen_helper_prototype(f, tag, tagregs, tagimms) if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_helper_protos.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common def gen_shortcode(f, tag): f.write('DEF_SHORTCODE(%s, %s)\n' % (tag, hex_common.semdict[tag])) def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() with open(sys.argv[3], 'w') as f: f.write("#ifndef DEF_SHORTCODE\n") f.write("#define DEF_SHORTCODE(TAG,SHORTCODE) /* Nothing */\n") f.write("#endif\n") for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : continue ## Skip the guest instructions if ( "A_GUEST" in hex_common.attribdict[tag] ) : continue ## Skip the diag instructions if ( tag == "Y6_diag" ) : continue if ( tag == "Y6_diag0" ) : continue if ( tag == "Y6_diag1" ) : continue gen_shortcode(f, tag) f.write("#undef DEF_SHORTCODE\n") if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_shortcode.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common ## ## Helpers for gen_tcg_func ## def gen_decl_ea_tcg(f, tag): if ('A_CONDEXEC' in hex_common.attribdict[tag] or 'A_LOAD' in hex_common.attribdict[tag]): f.write(" TCGv EA = tcg_temp_local_new();\n") else: f.write(" TCGv EA = tcg_temp_new();\n") def gen_free_ea_tcg(f): f.write(" tcg_temp_free(EA);\n") def genptr_decl_pair_writable(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ (regtype, regid)) if (regtype == "C"): f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ (regN, regno)) else: f.write(" const int %s = insn->regno[%d];\n" % (regN, regno)) if ('A_CONDEXEC' in hex_common.attribdict[tag]): f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN) f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \ (regN, regN)) f.write(" }\n") f.write(" if (!is_preloaded(ctx, %s + 1)) {\n" % regN) f.write(" tcg_gen_mov_tl(hex_new_value[%s + 1], hex_gpr[%s + 1]);\n" % \ (regN, regN)) f.write(" }\n") def genptr_decl_writable(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \ (regtype, regid)) if (regtype == "C"): f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ (regN, regno)) else: f.write(" const int %s = insn->regno[%d];\n" % (regN, regno)) if ('A_CONDEXEC' in hex_common.attribdict[tag]): f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN) f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \ (regN, regN)) f.write(" }\n") def genptr_decl(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) if (regtype == "R"): if (regid in {"ss", "tt"}): f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ (regtype, regid)) f.write(" const int %s = insn->regno[%d];\n" % \ (regN, regno)) elif (regid in {"dd", "ee", "xx", "yy"}): genptr_decl_pair_writable(f, tag, regtype, regid, regno) elif (regid in {"s", "t", "u", "v"}): f.write(" TCGv %s%sV = hex_gpr[insn->regno[%d]];\n" % \ (regtype, regid, regno)) elif (regid in {"d", "e", "x", "y"}): genptr_decl_writable(f, tag, regtype, regid, regno) else: print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid in {"s", "t", "u", "v"}): f.write(" TCGv %s%sV = hex_pred[insn->regno[%d]];\n" % \ (regtype, regid, regno)) elif (regid in {"d", "e", "x"}): genptr_decl_writable(f, tag, regtype, regid, regno) else: print("Bad register parse: ", regtype, regid) elif (regtype == "C"): if (regid == "ss"): f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \ (regtype, regid)) f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \ (regN, regno)) elif (regid == "dd"): genptr_decl_pair_writable(f, tag, regtype, regid, regno) elif (regid == "s"): f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \ (regtype, regid)) f.write(" const int %s%sN = insn->regno[%d] + HEX_REG_SA0;\n" % \ (regtype, regid, regno)) elif (regid == "d"): genptr_decl_writable(f, tag, regtype, regid, regno) else: print("Bad register parse: ", regtype, regid) elif (regtype == "M"): if (regid == "u"): f.write(" const int %s%sN = insn->regno[%d];\n"% \ (regtype, regid, regno)) f.write(" TCGv %s%sV = hex_gpr[%s%sN + HEX_REG_M0];\n" % \ (regtype, regid, regtype, regid)) else: print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_decl_new(f,regtype,regid,regno): if (regtype == "N"): if (regid in {"s", "t"}): f.write(" TCGv %s%sN = hex_new_value[insn->regno[%d]];\n" % \ (regtype, regid, regno)) else: print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid in {"t", "u", "v"}): f.write(" TCGv %s%sN = hex_new_pred_value[insn->regno[%d]];\n" % \ (regtype, regid, regno)) else: print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i): if (hex_common.is_pair(regid)): genptr_decl(f, tag, regtype, regid, i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): genptr_decl(f,tag, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): genptr_decl_new(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) def genptr_decl_imm(f,immlett): if (immlett.isupper()): i = 1 else: i = 0 f.write(" int %s = insn->immed[%d];\n" % \ (hex_common.imm_name(immlett), i)) def genptr_free(f,regtype,regid,regno): if (regtype == "R"): if (regid in {"dd", "ss", "tt", "xx", "yy"}): f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) elif (regid in {"d", "e", "x", "y"}): f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) elif (regid not in {"s", "t", "u", "v"}): print("Bad register parse: ",regtype,regid) elif (regtype == "P"): if (regid in {"d", "e", "x"}): f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) elif (regid not in {"s", "t", "u", "v"}): print("Bad register parse: ",regtype,regid) elif (regtype == "C"): if (regid in {"dd", "ss"}): f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) elif (regid in {"d", "s"}): f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) else: print("Bad register parse: ",regtype,regid) elif (regtype == "M"): if (regid != "u"): print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_free_new(f,regtype,regid,regno): if (regtype == "N"): if (regid not in {"s", "t"}): print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid not in {"t", "u", "v"}): print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_free_opn(f,regtype,regid,i,tag): if (hex_common.is_pair(regid)): genptr_free(f,regtype,regid,i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): genptr_free(f,regtype,regid,i) elif hex_common.is_new_val(regtype, regid, tag): genptr_free_new(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) def genptr_src_read(f,regtype,regid): if (regtype == "R"): if (regid in {"ss", "tt", "xx", "yy"}): f.write(" tcg_gen_concat_i32_i64(%s%sV, hex_gpr[%s%sN],\n" % \ (regtype, regid, regtype, regid)) f.write(" hex_gpr[%s%sN + 1]);\n" % \ (regtype, regid)) elif (regid in {"x", "y"}): f.write(" tcg_gen_mov_tl(%s%sV, hex_gpr[%s%sN]);\n" % \ (regtype,regid,regtype,regid)) elif (regid not in {"s", "t", "u", "v"}): print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid == "x"): f.write(" tcg_gen_mov_tl(%s%sV, hex_pred[%s%sN]);\n" % \ (regtype, regid, regtype, regid)) elif (regid not in {"s", "t", "u", "v"}): print("Bad register parse: ", regtype, regid) elif (regtype == "C"): if (regid == "ss"): f.write(" gen_read_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) elif (regid == "s"): f.write(" gen_read_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) else: print("Bad register parse: ", regtype, regid) elif (regtype == "M"): if (regid != "u"): print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_src_read_new(f,regtype,regid): if (regtype == "N"): if (regid not in {"s", "t"}): print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid not in {"t", "u", "v"}): print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_src_read_opn(f,regtype,regid,tag): if (hex_common.is_pair(regid)): genptr_src_read(f,regtype,regid) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): genptr_src_read(f,regtype,regid) elif hex_common.is_new_val(regtype, regid, tag): genptr_src_read_new(f,regtype,regid) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) def gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i): if (i > 0): f.write(", ") if (hex_common.is_pair(regid)): f.write("%s%sV" % (regtype,regid)) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): f.write("%s%sV" % (regtype,regid)) elif hex_common.is_new_val(regtype, regid, tag): f.write("%s%sN" % (regtype,regid)) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) def gen_helper_decl_imm(f,immlett): f.write(" TCGv tcgv_%s = tcg_const_tl(%s);\n" % \ (hex_common.imm_name(immlett), hex_common.imm_name(immlett))) def gen_helper_call_imm(f,immlett): f.write(", tcgv_%s" % hex_common.imm_name(immlett)) def gen_helper_free_imm(f,immlett): f.write(" tcg_temp_free(tcgv_%s);\n" % hex_common.imm_name(immlett)) def genptr_dst_write_pair(f, tag, regtype, regid): if ('A_CONDEXEC' in hex_common.attribdict[tag]): f.write(" gen_log_predicated_reg_write_pair(%s%sN, %s%sV, insn->slot);\n" % \ (regtype, regid, regtype, regid)) else: f.write(" gen_log_reg_write_pair(%s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) f.write(" ctx_log_reg_write_pair(ctx, %s%sN);\n" % \ (regtype, regid)) def genptr_dst_write(f, tag, regtype, regid): if (regtype == "R"): if (regid in {"dd", "xx", "yy"}): genptr_dst_write_pair(f, tag, regtype, regid) elif (regid in {"d", "e", "x", "y"}): if ('A_CONDEXEC' in hex_common.attribdict[tag]): f.write(" gen_log_predicated_reg_write(%s%sN, %s%sV,\n" % \ (regtype, regid, regtype, regid)) f.write(" insn->slot);\n") else: f.write(" gen_log_reg_write(%s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) f.write(" ctx_log_reg_write(ctx, %s%sN);\n" % \ (regtype, regid)) else: print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid in {"d", "e", "x"}): f.write(" gen_log_pred_write(ctx, %s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) f.write(" ctx_log_pred_write(ctx, %s%sN);\n" % \ (regtype, regid)) else: print("Bad register parse: ", regtype, regid) elif (regtype == "C"): if (regid == "dd"): f.write(" gen_write_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) elif (regid == "d"): f.write(" gen_write_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \ (regtype, regid, regtype, regid)) else: print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_dst_write_opn(f,regtype, regid, tag): if (hex_common.is_pair(regid)): genptr_dst_write(f, tag, regtype, regid) elif (hex_common.is_single(regid)): genptr_dst_write(f, tag, regtype, regid) else: print("Bad register parse: ",regtype,regid,toss,numregs) ## ## Generate the TCG code to call the helper ## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;} ## We produce: ## static void generate_A2_add() ## CPUHexagonState *env ## DisasContext *ctx, ## Insn *insn, ## Packet *pkt) ## { ## TCGv RdV = tcg_temp_local_new(); ## const int RdN = insn->regno[0]; ## TCGv RsV = hex_gpr[insn->regno[1]]; ## TCGv RtV = hex_gpr[insn->regno[2]]; ## <GEN> ## gen_log_reg_write(RdN, RdV); ## ctx_log_reg_write(ctx, RdN); ## tcg_temp_free(RdV); ## } ## ## where <GEN> depends on hex_common.skip_qemu_helper(tag) ## if hex_common.skip_qemu_helper(tag) is True ## <GEN> is fGEN_TCG_A2_add({ RdV=RsV+RtV;}); ## if hex_common.skip_qemu_helper(tag) is False ## <GEN> is gen_helper_A2_add(RdV, cpu_env, RsV, RtV); ## def gen_tcg_func(f, tag, regs, imms): f.write("static void generate_%s(\n" %tag) f.write(" CPUHexagonState *env,\n") f.write(" DisasContext *ctx,\n") f.write(" Insn *insn,\n") f.write(" Packet *pkt)\n") f.write('{\n') if hex_common.need_ea(tag): gen_decl_ea_tcg(f, tag) i=0 ## Declare all the operands (regs and immediates) for regtype,regid,toss,numregs in regs: genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: genptr_decl_imm(f,immlett) if 'A_PRIV' in hex_common.attribdict[tag]: f.write(' fCHECKFORPRIV();\n') if 'A_GUEST' in hex_common.attribdict[tag]: f.write(' fCHECKFORGUEST();\n') ## Read all the inputs for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): genptr_src_read_opn(f,regtype,regid,tag) if ( hex_common.skip_qemu_helper(tag) ): f.write(" fGEN_TCG_%s(%s);\n" % (tag, hex_common.semdict[tag])) else: ## Generate the call to the helper for immlett,bits,immshift in imms: gen_helper_decl_imm(f,immlett) if hex_common.need_part1(tag): f.write(" TCGv part1 = tcg_const_tl(insn->part1);\n") if hex_common.need_slot(tag): f.write(" TCGv slot = tcg_constant_tl(insn->slot);\n") f.write(" gen_helper_%s(" % (tag)) i=0 ## If there is a scalar result, it is the return type for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 if (i > 0): f.write(", ") f.write("cpu_env") i=1 for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: gen_helper_call_imm(f,immlett) if hex_common.need_slot(tag): f.write(", slot") if hex_common.need_part1(tag): f.write(", part1" ) f.write(");\n") if hex_common.need_part1(tag): f.write(" tcg_temp_free(part1);\n") for immlett,bits,immshift in imms: gen_helper_free_imm(f,immlett) ## Write all the outputs for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): genptr_dst_write_opn(f,regtype, regid, tag) ## Free all the operands (regs and immediates) if hex_common.need_ea(tag): gen_free_ea_tcg(f) for regtype,regid,toss,numregs in regs: genptr_free_opn(f,regtype,regid,i,tag) i += 1 f.write("}\n\n") def gen_def_tcg_func(f, tag, tagregs, tagimms): regs = tagregs[tag] imms = tagimms[tag] gen_tcg_func(f, tag, regs, imms) def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() with open(sys.argv[4], 'w') as f: f.write("#ifndef HEXAGON_TCG_FUNCS_H\n") f.write("#define HEXAGON_TCG_FUNCS_H\n\n") for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : continue ## Skip the guest instructions if ( "A_GUEST" in hex_common.attribdict[tag] ) : continue ## Skip the diag instructions if ( tag == "Y6_diag" ) : continue if ( tag == "Y6_diag0" ) : continue if ( tag == "Y6_diag1" ) : continue gen_def_tcg_func(f, tag, tagregs, tagimms) f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n") if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_tcg_funcs.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.calculate_attribs() ## ## Generate all the attributes associated with each instruction ## with open(sys.argv[3], 'w') as f: for tag in hex_common.tags: f.write('OP_ATTRIB(%s,ATTRIBS(%s))\n' % \ (tag, ','.join(sorted(hex_common.attribdict[tag])))) if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_op_attribs.py
#!/usr/bin/env python3 ## ## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sys import re import string import hex_common def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() with open(sys.argv[3], 'w') as f: f.write("#ifndef HEXAGON_FUNC_TABLE_H\n") f.write("#define HEXAGON_FUNC_TABLE_H\n\n") f.write("const SemanticInsn opcode_genptr[XX_LAST_OPCODE] = {\n") for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : continue ## Skip the guest instructions if ( "A_GUEST" in hex_common.attribdict[tag] ) : continue ## Skip the diag instructions if ( tag == "Y6_diag" ) : continue if ( tag == "Y6_diag0" ) : continue if ( tag == "Y6_diag1" ) : continue f.write(" [%s] = generate_%s,\n" % (tag, tag)) f.write("};\n\n") f.write("#endif /* HEXAGON_FUNC_TABLE_H */\n") if __name__ == "__main__": main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/target/hexagon/gen_tcg_func_table.py
from __future__ import print_function # # Test that signals and debugging mix well together on s390x. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): """Report success/fail of test""" if cond: print("PASS: %s" % (msg)) else: print("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): """Run through the tests one by one""" illegal_op = gdb.Breakpoint("illegal_op") stg = gdb.Breakpoint("stg") mvc_8 = gdb.Breakpoint("mvc_8") # Expect the following events: # 1x illegal_op breakpoint # 2x stg breakpoint, segv, breakpoint # 2x mvc_8 breakpoint, segv, breakpoint for _ in range(14): gdb.execute("c") report(illegal_op.hit_count == 1, "illegal_op.hit_count == 1") report(stg.hit_count == 4, "stg.hit_count == 4") report(mvc_8.hit_count == 4, "mvc_8.hit_count == 4") # The test must succeed. gdb.Breakpoint("_exit") gdb.execute("c") status = int(gdb.parse_and_eval("$r2")) report(status == 0, "status == 0"); # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval("$pc") == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/tcg/s390x/gdbstub/test-signals-s390x.py
from __future__ import print_function # # Test auxiliary vector is loaded via gdbstub # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): "Run through the tests one by one" auxv = gdb.execute("info auxv", False, True) report(isinstance(auxv, str), "Fetched auxv from inferior") report(auxv.find("sha1"), "Found test binary name in auxv") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py
from __future__ import print_function # # Test some of the softmmu debug features with the multiarch memory # test. It is a port of the original vmlinux focused test case but # using the "memory" test instead. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print("PASS: %s" % (msg)) else: print("FAIL: %s" % (msg)) global failcount failcount += 1 def check_step(): "Step an instruction, check it moved." start_pc = gdb.parse_and_eval('$pc') gdb.execute("si") end_pc = gdb.parse_and_eval('$pc') return not (start_pc == end_pc) # # Currently it's hard to create a hbreak with the pure python API and # manually matching PC to symbol address is a bit flaky thanks to # function prologues. However internally QEMU's gdbstub treats them # the same as normal breakpoints so it will do for now. # def check_break(sym_name): "Setup breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) bp = gdb.Breakpoint(sym_name, gdb.BP_BREAKPOINT) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') report(bp.hit_count == 1, "break @ %s (%s %d hits)" % (end_pc, sym.value(), bp.hit_count)) bp.delete() def do_one_watch(sym, wtype, text): wp = gdb.Breakpoint(sym, gdb.BP_WATCHPOINT, wtype) gdb.execute("c") report_str = "%s for %s" % (text, sym) if wp.hit_count > 0: report(True, report_str) wp.delete() else: report(False, report_str) def check_watches(sym_name): "Watch a symbol for any access." # Should hit for any read do_one_watch(sym_name, gdb.WP_ACCESS, "awatch") # Again should hit for reads do_one_watch(sym_name, gdb.WP_READ, "rwatch") # Finally when it is written do_one_watch(sym_name, gdb.WP_WRITE, "watch") def run_test(): "Run through the tests one by one" print("Checking we can step the first few instructions") step_ok = 0 for i in range(3): if check_step(): step_ok += 1 report(step_ok == 3, "single step in boot code") # If we get here we have missed some of the other breakpoints. print("Setup catch-all for _exit") cbp = gdb.Breakpoint("_exit", gdb.BP_BREAKPOINT) check_break("main") check_watches("test_data[128]") report(cbp.hit_count == 0, "didn't reach backstop") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") # Run the actual tests run_test() except (gdb.error): print("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass # Finally kill the inferior and exit gdb with a count of failures gdb.execute("kill") exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/tcg/multiarch/gdbstub/memory.py
from __future__ import print_function # # A very simple smoke test for debugging the SHA1 userspace test on # each target. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys initial_vlen = 0 failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print("PASS: %s" % (msg)) else: print("FAIL: %s" % (msg)) global failcount failcount += 1 def check_break(sym_name): "Setup breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) bp = gdb.Breakpoint(sym_name) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') report(bp.hit_count == 1, "break @ %s (%s %d hits)" % (end_pc, sym.value(), bp.hit_count)) bp.delete() def run_test(): "Run through the tests one by one" check_break("SHA1Init") # Check step and inspect values. We do a double next after the # breakpoint as depending on the version of gdb we may step the # preamble and not the first actual line of source. gdb.execute("next") gdb.execute("next") val_ctx = gdb.parse_and_eval("context->state[0]") exp_ctx = 0x67452301 report(int(val_ctx) == exp_ctx, "context->state[0] == %x" % exp_ctx); gdb.execute("next") val_ctx = gdb.parse_and_eval("context->state[1]") exp_ctx = 0xEFCDAB89 report(int(val_ctx) == exp_ctx, "context->state[1] == %x" % exp_ctx); # finally check we don't barf inspecting registers gdb.execute("info registers") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/tcg/multiarch/gdbstub/sha1.py
from __future__ import print_function # # Test the SVE registers are visable and changeable via gdbstub # # This is launched via tests/guest-debug/run-test.py # import gdb import sys MAGIC = 0xDEADBEEF failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): "Run through the tests one by one" gdb.execute("info registers") report(True, "info registers") gdb.execute("info registers vector") report(True, "info registers vector") # Now all the zregs frame = gdb.selected_frame() for i in range(0, 32): rname = "z%d" % (i) zreg = frame.read_register(rname) report(True, "Reading %s" % rname) for j in range(0, 4): cmd = "set $%s.q.u[%d] = 0x%x" % (rname, j, MAGIC) gdb.execute(cmd) report(True, "%s" % cmd) for j in range(0, 4): reg = "$%s.q.u[%d]" % (rname, j) v = gdb.parse_and_eval(reg) report(str(v.type) == "uint128_t", "size of %s" % (reg)) for j in range(0, 8): cmd = "set $%s.d.u[%d] = 0x%x" % (rname, j, MAGIC) gdb.execute(cmd) report(True, "%s" % cmd) for j in range(0, 8): reg = "$%s.d.u[%d]" % (rname, j) v = gdb.parse_and_eval(reg) report(str(v.type) == "uint64_t", "size of %s" % (reg)) report(int(v) == MAGIC, "%s is 0x%x" % (reg, MAGIC)) # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() report(arch.name() == "aarch64", "connected to aarch64") except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") # Run the actual tests run_test() except: print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 print("All tests complete: %d failures" % failcount) exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/tcg/aarch64/gdbstub/test-sve.py
from __future__ import print_function # # Test the SVE ZReg reports the right amount of data. It uses the # sve-ioctl test and examines the register data each time the # __sve_ld_done breakpoint is hit. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys initial_vlen = 0 failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 class TestBreakpoint(gdb.Breakpoint): def __init__(self, sym_name="__sve_ld_done"): super(TestBreakpoint, self).__init__(sym_name) # self.sym, ok = gdb.lookup_symbol(sym_name) def stop(self): val_i = gdb.parse_and_eval('i') global initial_vlen try: for i in range(0, int(val_i)): val_z = gdb.parse_and_eval("$z0.b.u[%d]" % i) report(int(val_z) == i, "z0.b.u[%d] == %d" % (i, i)) for i in range(i + 1, initial_vlen): val_z = gdb.parse_and_eval("$z0.b.u[%d]" % i) report(int(val_z) == 0, "z0.b.u[%d] == 0" % (i)) except gdb.error: report(False, "checking zregs (out of range)") # Check the aliased V registers are set and GDB has correctly # created them for us having recognised and handled SVE. try: for i in range(0, 16): val_z = gdb.parse_and_eval("$z0.b.u[%d]" % i) val_v = gdb.parse_and_eval("$v0.b.u[%d]" % i) report(int(val_z) == int(val_v), "v0.b.u[%d] == z0.b.u[%d]" % (i, i)) except gdb.error: report(False, "checking vregs (out of range)") def run_test(): "Run through the tests one by one" print ("Setup breakpoint") bp = TestBreakpoint() global initial_vlen vg = gdb.parse_and_eval("$vg") initial_vlen = int(vg) * 8 gdb.execute("c") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() report(arch.name() == "aarch64", "connected to aarch64") except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") # Run the actual tests run_test() except: print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 import code code.InteractiveConsole(locals=globals()).interact() raise print("All tests complete: %d failures" % failcount) exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py
#!/usr/bin/env python3 # # Docker controlling module # # Copyright (c) 2016 Red Hat Inc. # # Authors: # Fam Zheng <famz@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2 # or (at your option) any later version. See the COPYING file in # the top-level directory. import os import sys import subprocess import json import hashlib import atexit import uuid import argparse import enum import tempfile import re import signal from tarfile import TarFile, TarInfo from io import StringIO, BytesIO from shutil import copy, rmtree from pwd import getpwuid from datetime import datetime, timedelta FILTERED_ENV_NAMES = ['ftp_proxy', 'http_proxy', 'https_proxy'] DEVNULL = open(os.devnull, 'wb') class EngineEnum(enum.IntEnum): AUTO = 1 DOCKER = 2 PODMAN = 3 def __str__(self): return self.name.lower() def __repr__(self): return str(self) @staticmethod def argparse(s): try: return EngineEnum[s.upper()] except KeyError: return s USE_ENGINE = EngineEnum.AUTO def _bytes_checksum(bytes): """Calculate a digest string unique to the text content""" return hashlib.sha1(bytes).hexdigest() def _text_checksum(text): """Calculate a digest string unique to the text content""" return _bytes_checksum(text.encode('utf-8')) def _read_dockerfile(path): return open(path, 'rt', encoding='utf-8').read() def _file_checksum(filename): return _bytes_checksum(open(filename, 'rb').read()) def _guess_engine_command(): """ Guess a working engine command or raise exception if not found""" commands = [] if USE_ENGINE in [EngineEnum.AUTO, EngineEnum.PODMAN]: commands += [["podman"]] if USE_ENGINE in [EngineEnum.AUTO, EngineEnum.DOCKER]: commands += [["docker"], ["sudo", "-n", "docker"]] for cmd in commands: try: # docker version will return the client details in stdout # but still report a status of 1 if it can't contact the daemon if subprocess.call(cmd + ["version"], stdout=DEVNULL, stderr=DEVNULL) == 0: return cmd except OSError: pass commands_txt = "\n".join([" " + " ".join(x) for x in commands]) raise Exception("Cannot find working engine command. Tried:\n%s" % commands_txt) def _copy_with_mkdir(src, root_dir, sub_path='.', name=None): """Copy src into root_dir, creating sub_path as needed.""" dest_dir = os.path.normpath("%s/%s" % (root_dir, sub_path)) try: os.makedirs(dest_dir) except OSError: # we can safely ignore already created directories pass dest_file = "%s/%s" % (dest_dir, name if name else os.path.basename(src)) try: copy(src, dest_file) except FileNotFoundError: print("Couldn't copy %s to %s" % (src, dest_file)) pass def _get_so_libs(executable): """Return a list of libraries associated with an executable. The paths may be symbolic links which would need to be resolved to ensure the right data is copied.""" libs = [] ldd_re = re.compile(r"(?:\S+ => )?(\S*) \(:?0x[0-9a-f]+\)") try: ldd_output = subprocess.check_output(["ldd", executable]).decode('utf-8') for line in ldd_output.split("\n"): search = ldd_re.search(line) if search: try: libs.append(search.group(1)) except IndexError: pass except subprocess.CalledProcessError: print("%s had no associated libraries (static build?)" % (executable)) return libs def _copy_binary_with_libs(src, bin_dest, dest_dir): """Maybe copy a binary and all its dependent libraries. If bin_dest isn't set we only copy the support libraries because we don't need qemu in the docker path to run (due to persistent mapping). Indeed users may get confused if we aren't running what is in the image. This does rely on the host file-system being fairly multi-arch aware so the file don't clash with the guests layout. """ if bin_dest: _copy_with_mkdir(src, dest_dir, os.path.dirname(bin_dest)) else: print("only copying support libraries for %s" % (src)) libs = _get_so_libs(src) if libs: for l in libs: so_path = os.path.dirname(l) name = os.path.basename(l) real_l = os.path.realpath(l) _copy_with_mkdir(real_l, dest_dir, so_path, name) def _check_binfmt_misc(executable): """Check binfmt_misc has entry for executable in the right place. The details of setting up binfmt_misc are outside the scope of this script but we should at least fail early with a useful message if it won't work. Returns the configured binfmt path and a valid flag. For persistent configurations we will still want to copy and dependent libraries. """ binary = os.path.basename(executable) binfmt_entry = "/proc/sys/fs/binfmt_misc/%s" % (binary) if not os.path.exists(binfmt_entry): print ("No binfmt_misc entry for %s" % (binary)) return None, False with open(binfmt_entry) as x: entry = x.read() if re.search("flags:.*F.*\n", entry): print("binfmt_misc for %s uses persistent(F) mapping to host binary" % (binary)) return None, True m = re.search("interpreter (\S+)\n", entry) interp = m.group(1) if interp and interp != executable: print("binfmt_misc for %s does not point to %s, using %s" % (binary, executable, interp)) return interp, True def _read_qemu_dockerfile(img_name): # special case for Debian linux-user images if img_name.startswith("debian") and img_name.endswith("user"): img_name = "debian-bootstrap" df = os.path.join(os.path.dirname(__file__), "dockerfiles", img_name + ".docker") return _read_dockerfile(df) def _dockerfile_preprocess(df): out = "" for l in df.splitlines(): if len(l.strip()) == 0 or l.startswith("#"): continue from_pref = "FROM qemu/" if l.startswith(from_pref): # TODO: Alternatively we could replace this line with "FROM $ID" # where $ID is the image's hex id obtained with # $ docker images $IMAGE --format="{{.Id}}" # but unfortunately that's not supported by RHEL 7. inlining = _read_qemu_dockerfile(l[len(from_pref):]) out += _dockerfile_preprocess(inlining) continue out += l + "\n" return out class Docker(object): """ Running Docker commands """ def __init__(self): self._command = _guess_engine_command() if ("docker" in self._command and "TRAVIS" not in os.environ and "GITLAB_CI" not in os.environ): os.environ["DOCKER_BUILDKIT"] = "1" self._buildkit = True else: self._buildkit = False self._instance = None atexit.register(self._kill_instances) signal.signal(signal.SIGTERM, self._kill_instances) signal.signal(signal.SIGHUP, self._kill_instances) def _do(self, cmd, quiet=True, **kwargs): if quiet: kwargs["stdout"] = DEVNULL return subprocess.call(self._command + cmd, **kwargs) def _do_check(self, cmd, quiet=True, **kwargs): if quiet: kwargs["stdout"] = DEVNULL return subprocess.check_call(self._command + cmd, **kwargs) def _do_kill_instances(self, only_known, only_active=True): cmd = ["ps", "-q"] if not only_active: cmd.append("-a") filter = "--filter=label=com.qemu.instance.uuid" if only_known: if self._instance: filter += "=%s" % (self._instance) else: # no point trying to kill, we finished return print("filter=%s" % (filter)) cmd.append(filter) for i in self._output(cmd).split(): self._do(["rm", "-f", i]) def clean(self): self._do_kill_instances(False, False) return 0 def _kill_instances(self, *args, **kwargs): return self._do_kill_instances(True) def _output(self, cmd, **kwargs): try: return subprocess.check_output(self._command + cmd, stderr=subprocess.STDOUT, encoding='utf-8', **kwargs) except TypeError: # 'encoding' argument was added in 3.6+ return subprocess.check_output(self._command + cmd, stderr=subprocess.STDOUT, **kwargs).decode('utf-8') def inspect_tag(self, tag): try: return self._output(["inspect", tag]) except subprocess.CalledProcessError: return None def get_image_creation_time(self, info): return json.loads(info)[0]["Created"] def get_image_dockerfile_checksum(self, tag): resp = self.inspect_tag(tag) labels = json.loads(resp)[0]["Config"].get("Labels", {}) return labels.get("com.qemu.dockerfile-checksum", "") def build_image(self, tag, docker_dir, dockerfile, quiet=True, user=False, argv=None, registry=None, extra_files_cksum=[]): if argv is None: argv = [] # pre-calculate the docker checksum before any # substitutions we make for caching checksum = _text_checksum(_dockerfile_preprocess(dockerfile)) if registry is not None: sources = re.findall("FROM qemu\/(.*)", dockerfile) # Fetch any cache layers we can, may fail for s in sources: pull_args = ["pull", "%s/qemu/%s" % (registry, s)] if self._do(pull_args, quiet=quiet) != 0: registry = None break # Make substitutions if registry is not None: dockerfile = dockerfile.replace("FROM qemu/", "FROM %s/qemu/" % (registry)) tmp_df = tempfile.NamedTemporaryFile(mode="w+t", encoding='utf-8', dir=docker_dir, suffix=".docker") tmp_df.write(dockerfile) if user: uid = os.getuid() uname = getpwuid(uid).pw_name tmp_df.write("\n") tmp_df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" % (uname, uid, uname)) tmp_df.write("\n") tmp_df.write("LABEL com.qemu.dockerfile-checksum=%s\n" % (checksum)) for f, c in extra_files_cksum: tmp_df.write("LABEL com.qemu.%s-checksum=%s\n" % (f, c)) tmp_df.flush() build_args = ["build", "-t", tag, "-f", tmp_df.name] if self._buildkit: build_args += ["--build-arg", "BUILDKIT_INLINE_CACHE=1"] if registry is not None: pull_args = ["pull", "%s/%s" % (registry, tag)] self._do(pull_args, quiet=quiet) cache = "%s/%s" % (registry, tag) build_args += ["--cache-from", cache] build_args += argv build_args += [docker_dir] self._do_check(build_args, quiet=quiet) def update_image(self, tag, tarball, quiet=True): "Update a tagged image using " self._do_check(["build", "-t", tag, "-"], quiet=quiet, stdin=tarball) def image_matches_dockerfile(self, tag, dockerfile): try: checksum = self.get_image_dockerfile_checksum(tag) except Exception: return False return checksum == _text_checksum(_dockerfile_preprocess(dockerfile)) def run(self, cmd, keep, quiet, as_user=False): label = uuid.uuid4().hex if not keep: self._instance = label if as_user: uid = os.getuid() cmd = [ "-u", str(uid) ] + cmd # podman requires a bit more fiddling if self._command[0] == "podman": cmd.insert(0, '--userns=keep-id') ret = self._do_check(["run", "--rm", "--label", "com.qemu.instance.uuid=" + label] + cmd, quiet=quiet) if not keep: self._instance = None return ret def command(self, cmd, argv, quiet): return self._do([cmd] + argv, quiet=quiet) class SubCommand(object): """A SubCommand template base class""" name = None # Subcommand name def shared_args(self, parser): parser.add_argument("--quiet", action="store_true", help="Run quietly unless an error occurred") def args(self, parser): """Setup argument parser""" pass def run(self, args, argv): """Run command. args: parsed argument by argument parser. argv: remaining arguments from sys.argv. """ pass class RunCommand(SubCommand): """Invoke docker run and take care of cleaning up""" name = "run" def args(self, parser): parser.add_argument("--keep", action="store_true", help="Don't remove image when command completes") parser.add_argument("--run-as-current-user", action="store_true", help="Run container using the current user's uid") def run(self, args, argv): return Docker().run(argv, args.keep, quiet=args.quiet, as_user=args.run_as_current_user) class BuildCommand(SubCommand): """ Build docker image out of a dockerfile. Arg: <tag> <dockerfile>""" name = "build" def args(self, parser): parser.add_argument("--include-executable", "-e", help="""Specify a binary that will be copied to the container together with all its dependent libraries""") parser.add_argument("--skip-binfmt", action="store_true", help="""Skip binfmt entry check (used for testing)""") parser.add_argument("--extra-files", nargs='*', help="""Specify files that will be copied in the Docker image, fulfilling the ADD directive from the Dockerfile""") parser.add_argument("--add-current-user", "-u", dest="user", action="store_true", help="Add the current user to image's passwd") parser.add_argument("--registry", "-r", help="cache from docker registry") parser.add_argument("-t", dest="tag", help="Image Tag") parser.add_argument("-f", dest="dockerfile", help="Dockerfile name") def run(self, args, argv): dockerfile = _read_dockerfile(args.dockerfile) tag = args.tag dkr = Docker() if "--no-cache" not in argv and \ dkr.image_matches_dockerfile(tag, dockerfile): if not args.quiet: print("Image is up to date.") else: # Create a docker context directory for the build docker_dir = tempfile.mkdtemp(prefix="docker_build") # Validate binfmt_misc will work if args.skip_binfmt: qpath = args.include_executable elif args.include_executable: qpath, enabled = _check_binfmt_misc(args.include_executable) if not enabled: return 1 # Is there a .pre file to run in the build context? docker_pre = os.path.splitext(args.dockerfile)[0]+".pre" if os.path.exists(docker_pre): stdout = DEVNULL if args.quiet else None rc = subprocess.call(os.path.realpath(docker_pre), cwd=docker_dir, stdout=stdout) if rc == 3: print("Skip") return 0 elif rc != 0: print("%s exited with code %d" % (docker_pre, rc)) return 1 # Copy any extra files into the Docker context. These can be # included by the use of the ADD directive in the Dockerfile. cksum = [] if args.include_executable: # FIXME: there is no checksum of this executable and the linked # libraries, once the image built any change of this executable # or any library won't trigger another build. _copy_binary_with_libs(args.include_executable, qpath, docker_dir) for filename in args.extra_files or []: _copy_with_mkdir(filename, docker_dir) cksum += [(filename, _file_checksum(filename))] argv += ["--build-arg=" + k.lower() + "=" + v for k, v in os.environ.items() if k.lower() in FILTERED_ENV_NAMES] dkr.build_image(tag, docker_dir, dockerfile, quiet=args.quiet, user=args.user, argv=argv, registry=args.registry, extra_files_cksum=cksum) rmtree(docker_dir) return 0 class FetchCommand(SubCommand): """ Fetch a docker image from the registry. Args: <tag> <registry>""" name = "fetch" def args(self, parser): parser.add_argument("tag", help="Local tag for image") parser.add_argument("registry", help="Docker registry") def run(self, args, argv): dkr = Docker() dkr.command(cmd="pull", quiet=args.quiet, argv=["%s/%s" % (args.registry, args.tag)]) dkr.command(cmd="tag", quiet=args.quiet, argv=["%s/%s" % (args.registry, args.tag), args.tag]) class UpdateCommand(SubCommand): """ Update a docker image. Args: <tag> <actions>""" name = "update" def args(self, parser): parser.add_argument("tag", help="Image Tag") parser.add_argument("--executable", help="Executable to copy") parser.add_argument("--add-current-user", "-u", dest="user", action="store_true", help="Add the current user to image's passwd") def run(self, args, argv): # Create a temporary tarball with our whole build context and # dockerfile for the update tmp = tempfile.NamedTemporaryFile(suffix="dckr.tar.gz") tmp_tar = TarFile(fileobj=tmp, mode='w') # Create a Docker buildfile df = StringIO() df.write(u"FROM %s\n" % args.tag) if args.executable: # Add the executable to the tarball, using the current # configured binfmt_misc path. If we don't get a path then we # only need the support libraries copied ff, enabled = _check_binfmt_misc(args.executable) if not enabled: print("binfmt_misc not enabled, update disabled") return 1 if ff: tmp_tar.add(args.executable, arcname=ff) # Add any associated libraries libs = _get_so_libs(args.executable) if libs: for l in libs: so_path = os.path.dirname(l) name = os.path.basename(l) real_l = os.path.realpath(l) try: tmp_tar.add(real_l, arcname="%s/%s" % (so_path, name)) except FileNotFoundError: print("Couldn't add %s/%s to archive" % (so_path, name)) pass df.write(u"ADD . /\n") if args.user: uid = os.getuid() uname = getpwuid(uid).pw_name df.write("\n") df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" % (uname, uid, uname)) df_bytes = BytesIO(bytes(df.getvalue(), "UTF-8")) df_tar = TarInfo(name="Dockerfile") df_tar.size = df_bytes.getbuffer().nbytes tmp_tar.addfile(df_tar, fileobj=df_bytes) tmp_tar.close() # reset the file pointers tmp.flush() tmp.seek(0) # Run the build with our tarball context dkr = Docker() dkr.update_image(args.tag, tmp, quiet=args.quiet) return 0 class CleanCommand(SubCommand): """Clean up docker instances""" name = "clean" def run(self, args, argv): Docker().clean() return 0 class ImagesCommand(SubCommand): """Run "docker images" command""" name = "images" def run(self, args, argv): return Docker().command("images", argv, args.quiet) class ProbeCommand(SubCommand): """Probe if we can run docker automatically""" name = "probe" def run(self, args, argv): try: docker = Docker() if docker._command[0] == "docker": print("docker") elif docker._command[0] == "sudo": print("sudo docker") elif docker._command[0] == "podman": print("podman") except Exception: print("no") return class CcCommand(SubCommand): """Compile sources with cc in images""" name = "cc" def args(self, parser): parser.add_argument("--image", "-i", required=True, help="The docker image in which to run cc") parser.add_argument("--cc", default="cc", help="The compiler executable to call") parser.add_argument("--source-path", "-s", nargs="*", dest="paths", help="""Extra paths to (ro) mount into container for reading sources""") def run(self, args, argv): if argv and argv[0] == "--": argv = argv[1:] cwd = os.getcwd() cmd = ["-w", cwd, "-v", "%s:%s:rw" % (cwd, cwd)] if args.paths: for p in args.paths: cmd += ["-v", "%s:%s:ro,z" % (p, p)] cmd += [args.image, args.cc] cmd += argv return Docker().run(cmd, False, quiet=args.quiet, as_user=True) class CheckCommand(SubCommand): """Check if we need to re-build a docker image out of a dockerfile. Arguments: <tag> <dockerfile>""" name = "check" def args(self, parser): parser.add_argument("tag", help="Image Tag") parser.add_argument("dockerfile", default=None, help="Dockerfile name", nargs='?') parser.add_argument("--checktype", choices=["checksum", "age"], default="checksum", help="check type") parser.add_argument("--olderthan", default=60, type=int, help="number of minutes") def run(self, args, argv): tag = args.tag try: dkr = Docker() except subprocess.CalledProcessError: print("Docker not set up") return 1 info = dkr.inspect_tag(tag) if info is None: print("Image does not exist") return 1 if args.checktype == "checksum": if not args.dockerfile: print("Need a dockerfile for tag:%s" % (tag)) return 1 dockerfile = _read_dockerfile(args.dockerfile) if dkr.image_matches_dockerfile(tag, dockerfile): if not args.quiet: print("Image is up to date") return 0 else: print("Image needs updating") return 1 elif args.checktype == "age": timestr = dkr.get_image_creation_time(info).split(".")[0] created = datetime.strptime(timestr, "%Y-%m-%dT%H:%M:%S") past = datetime.now() - timedelta(minutes=args.olderthan) if created < past: print ("Image created @ %s more than %d minutes old" % (timestr, args.olderthan)) return 1 else: if not args.quiet: print ("Image less than %d minutes old" % (args.olderthan)) return 0 def main(): global USE_ENGINE parser = argparse.ArgumentParser(description="A Docker helper", usage="%s <subcommand> ..." % os.path.basename(sys.argv[0])) parser.add_argument("--engine", type=EngineEnum.argparse, choices=list(EngineEnum), help="specify which container engine to use") subparsers = parser.add_subparsers(title="subcommands", help=None) for cls in SubCommand.__subclasses__(): cmd = cls() subp = subparsers.add_parser(cmd.name, help=cmd.__doc__) cmd.shared_args(subp) cmd.args(subp) subp.set_defaults(cmdobj=cmd) args, argv = parser.parse_known_args() if args.engine: USE_ENGINE = args.engine return args.cmdobj.run(args, argv) if __name__ == "__main__": sys.exit(main())
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/docker/docker.py
# # This script needs to be run on startup # qemu -kernel ${KERNEL} -s -S # and then: # gdb ${KERNEL}.vmlinux -x ${QEMU_SRC}/tests/guest-debug/test-gdbstub.py import gdb failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def check_step(): "Step an instruction, check it moved." start_pc = gdb.parse_and_eval('$pc') gdb.execute("si") end_pc = gdb.parse_and_eval('$pc') return not (start_pc == end_pc) def check_break(sym_name): "Setup breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) bp = gdb.Breakpoint(sym_name) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') print ("%s == %s %d" % (end_pc, sym.value(), bp.hit_count)) bp.delete() # can we test we hit bp? return end_pc == sym.value() # We need to do hbreak manually as the python interface doesn't export it def check_hbreak(sym_name): "Setup hardware breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) gdb.execute("hbreak %s" % (sym_name)) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') print ("%s == %s" % (end_pc, sym.value())) if end_pc == sym.value(): gdb.execute("d 1") return True else: return False class WatchPoint(gdb.Breakpoint): def get_wpstr(self, sym_name): "Setup sym and wp_str for given symbol." self.sym, ok = gdb.lookup_symbol(sym_name) wp_addr = gdb.parse_and_eval(sym_name).address self.wp_str = '*(%(type)s)(&%(address)s)' % dict( type = wp_addr.type, address = sym_name) return(self.wp_str) def __init__(self, sym_name, type): wp_str = self.get_wpstr(sym_name) super(WatchPoint, self).__init__(wp_str, gdb.BP_WATCHPOINT, type) def stop(self): end_pc = gdb.parse_and_eval('$pc') print ("HIT WP @ %s" % (end_pc)) return True def do_one_watch(sym, wtype, text): wp = WatchPoint(sym, wtype) gdb.execute("c") report_str = "%s for %s (%s)" % (text, sym, wp.sym.value()) if wp.hit_count > 0: report(True, report_str) wp.delete() else: report(False, report_str) def check_watches(sym_name): "Watch a symbol for any access." # Should hit for any read do_one_watch(sym_name, gdb.WP_ACCESS, "awatch") # Again should hit for reads do_one_watch(sym_name, gdb.WP_READ, "rwatch") # Finally when it is written do_one_watch(sym_name, gdb.WP_WRITE, "watch") class CatchBreakpoint(gdb.Breakpoint): def __init__(self, sym_name): super(CatchBreakpoint, self).__init__(sym_name) self.sym, ok = gdb.lookup_symbol(sym_name) def stop(self): end_pc = gdb.parse_and_eval('$pc') print ("CB: %s == %s" % (end_pc, self.sym.value())) if end_pc == self.sym.value(): report(False, "Hit final catchpoint") def run_test(): "Run through the tests one by one" print ("Checking we can step the first few instructions") step_ok = 0 for i in range(3): if check_step(): step_ok += 1 report(step_ok == 3, "single step in boot code") print ("Checking HW breakpoint works") break_ok = check_hbreak("kernel_init") report(break_ok, "hbreak @ kernel_init") # Can't set this up until we are in the kernel proper # if we make it to run_init_process we've over-run and # one of the tests failed print ("Setup catch-all for run_init_process") cbp = CatchBreakpoint("run_init_process") cpb2 = CatchBreakpoint("try_to_run_init_process") print ("Checking Normal breakpoint works") break_ok = check_break("wait_for_completion") report(break_ok, "break @ wait_for_completion") print ("Checking watchpoint works") check_watches("system_state") # # This runs as the script it sourced (via -x) # try: print ("Connecting to remote") gdb.execute("target remote localhost:1234") # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except: print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 import code code.InteractiveConsole(locals=globals()).interact() raise # Finally kill the inferior and exit gdb with a count of failures gdb.execute("kill") exit(failcount)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/guest-debug/test-gdbstub.py
#!/usr/bin/env python3 # # Run a gdbstub test case # # Copyright (c) 2019 Linaro # # Author: Alex Bennée <alex.bennee@linaro.org> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # SPDX-License-Identifier: GPL-2.0-or-later import argparse import subprocess import shutil import shlex import os from time import sleep from tempfile import TemporaryDirectory def get_args(): parser = argparse.ArgumentParser(description="A gdbstub test runner") parser.add_argument("--qemu", help="Qemu binary for test", required=True) parser.add_argument("--qargs", help="Qemu arguments for test") parser.add_argument("--binary", help="Binary to debug", required=True) parser.add_argument("--test", help="GDB test script", required=True) parser.add_argument("--gdb", help="The gdb binary to use", default=None) parser.add_argument("--output", help="A file to redirect output to") return parser.parse_args() def log(output, msg): if output: output.write(msg + "\n") output.flush() else: print(msg) if __name__ == '__main__': args = get_args() # Search for a gdb we can use if not args.gdb: args.gdb = shutil.which("gdb-multiarch") if not args.gdb: args.gdb = shutil.which("gdb") if not args.gdb: print("We need gdb to run the test") exit(-1) if args.output: output = open(args.output, "w") else: output = None socket_dir = TemporaryDirectory("qemu-gdbstub") socket_name = os.path.join(socket_dir.name, "gdbstub.socket") # Launch QEMU with binary if "system" in args.qemu: cmd = "%s %s %s -gdb unix:path=%s,server=on" % (args.qemu, args.qargs, args.binary, socket_name) else: cmd = "%s %s -g %s %s" % (args.qemu, args.qargs, socket_name, args.binary) log(output, "QEMU CMD: %s" % (cmd)) inferior = subprocess.Popen(shlex.split(cmd)) # Now launch gdb with our test and collect the result gdb_cmd = "%s %s" % (args.gdb, args.binary) # run quietly and ignore .gdbinit gdb_cmd += " -q -n -batch" # disable prompts in case of crash gdb_cmd += " -ex 'set confirm off'" # connect to remote gdb_cmd += " -ex 'target remote %s'" % (socket_name) # finally the test script itself gdb_cmd += " -x %s" % (args.test) sleep(1) log(output, "GDB CMD: %s" % (gdb_cmd)) result = subprocess.call(gdb_cmd, shell=True, stdout=output) # A negative result is the result of an internal gdb failure like # a crash. We force a return of 0 so we don't fail the test on # account of broken external tools. if result < 0: print("GDB crashed? SKIPPING") exit(0) try: inferior.wait(2) except subprocess.TimeoutExpired: print("GDB never connected? Killed guest") inferior.kill() exit(result)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/guest-debug/run-test.py
#!/usr/bin/env python3 # NBD server - fault injection utility # # Configuration file syntax: # [inject-error "disconnect-neg1"] # event=neg1 # io=readwrite # when=before # # Note that Python's ConfigParser squashes together all sections with the same # name, so give each [inject-error] a unique name. # # inject-error options: # event - name of the trigger event # "neg1" - first part of negotiation struct # "export" - export struct # "neg2" - second part of negotiation struct # "request" - NBD request struct # "reply" - NBD reply struct # "data" - request/reply data # io - I/O direction that triggers this rule: # "read", "write", or "readwrite" # default: readwrite # when - after how many bytes to inject the fault # -1 - inject error after I/O # 0 - inject error before I/O # integer - inject error after integer bytes # "before" - alias for 0 # "after" - alias for -1 # default: before # # Currently the only error injection action is to terminate the server process. # This resets the TCP connection and thus forces the client to handle # unexpected connection termination. # # Other error injection actions could be added in the future. # # Copyright Red Hat, Inc. 2014 # # Authors: # Stefan Hajnoczi <stefanha@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. import sys import socket import struct import collections import configparser FAKE_DISK_SIZE = 8 * 1024 * 1024 * 1024 # 8 GB # Protocol constants NBD_CMD_READ = 0 NBD_CMD_WRITE = 1 NBD_CMD_DISC = 2 NBD_REQUEST_MAGIC = 0x25609513 NBD_SIMPLE_REPLY_MAGIC = 0x67446698 NBD_PASSWD = 0x4e42444d41474943 NBD_OPTS_MAGIC = 0x49484156454F5054 NBD_CLIENT_MAGIC = 0x0000420281861253 NBD_OPT_EXPORT_NAME = 1 << 0 # Protocol structs neg_classic_struct = struct.Struct('>QQQI124x') neg1_struct = struct.Struct('>QQH') export_tuple = collections.namedtuple('Export', 'reserved magic opt len') export_struct = struct.Struct('>IQII') neg2_struct = struct.Struct('>QH124x') request_tuple = collections.namedtuple('Request', 'magic type handle from_ len') request_struct = struct.Struct('>IIQQI') reply_struct = struct.Struct('>IIQ') def err(msg): sys.stderr.write(msg + '\n') sys.exit(1) def recvall(sock, bufsize): received = 0 chunks = [] while received < bufsize: chunk = sock.recv(bufsize - received) if len(chunk) == 0: raise Exception('unexpected disconnect') chunks.append(chunk) received += len(chunk) return b''.join(chunks) class Rule(object): def __init__(self, name, event, io, when): self.name = name self.event = event self.io = io self.when = when def match(self, event, io): if event != self.event: return False if io != self.io and self.io != 'readwrite': return False return True class FaultInjectionSocket(object): def __init__(self, sock, rules): self.sock = sock self.rules = rules def check(self, event, io, bufsize=None): for rule in self.rules: if rule.match(event, io): if rule.when == 0 or bufsize is None: print('Closing connection on rule match %s' % rule.name) self.sock.close() sys.stdout.flush() sys.exit(0) if rule.when != -1: return rule.when return bufsize def send(self, buf, event): bufsize = self.check(event, 'write', bufsize=len(buf)) self.sock.sendall(buf[:bufsize]) self.check(event, 'write') def recv(self, bufsize, event): bufsize = self.check(event, 'read', bufsize=bufsize) data = recvall(self.sock, bufsize) self.check(event, 'read') return data def close(self): self.sock.close() def negotiate_classic(conn): buf = neg_classic_struct.pack(NBD_PASSWD, NBD_CLIENT_MAGIC, FAKE_DISK_SIZE, 0) conn.send(buf, event='neg-classic') def negotiate_export(conn): # Send negotiation part 1 buf = neg1_struct.pack(NBD_PASSWD, NBD_OPTS_MAGIC, 0) conn.send(buf, event='neg1') # Receive export option buf = conn.recv(export_struct.size, event='export') export = export_tuple._make(export_struct.unpack(buf)) assert export.magic == NBD_OPTS_MAGIC assert export.opt == NBD_OPT_EXPORT_NAME name = conn.recv(export.len, event='export-name') # Send negotiation part 2 buf = neg2_struct.pack(FAKE_DISK_SIZE, 0) conn.send(buf, event='neg2') def negotiate(conn, use_export): '''Negotiate export with client''' if use_export: negotiate_export(conn) else: negotiate_classic(conn) def read_request(conn): '''Parse NBD request from client''' buf = conn.recv(request_struct.size, event='request') req = request_tuple._make(request_struct.unpack(buf)) assert req.magic == NBD_REQUEST_MAGIC return req def write_reply(conn, error, handle): buf = reply_struct.pack(NBD_SIMPLE_REPLY_MAGIC, error, handle) conn.send(buf, event='reply') def handle_connection(conn, use_export): negotiate(conn, use_export) while True: req = read_request(conn) if req.type == NBD_CMD_READ: write_reply(conn, 0, req.handle) conn.send(b'\0' * req.len, event='data') elif req.type == NBD_CMD_WRITE: _ = conn.recv(req.len, event='data') write_reply(conn, 0, req.handle) elif req.type == NBD_CMD_DISC: break else: print('unrecognized command type %#02x' % req.type) break conn.close() def run_server(sock, rules, use_export): while True: conn, _ = sock.accept() handle_connection(FaultInjectionSocket(conn, rules), use_export) def parse_inject_error(name, options): if 'event' not in options: err('missing \"event\" option in %s' % name) event = options['event'] if event not in ('neg-classic', 'neg1', 'export', 'neg2', 'request', 'reply', 'data'): err('invalid \"event\" option value \"%s\" in %s' % (event, name)) io = options.get('io', 'readwrite') if io not in ('read', 'write', 'readwrite'): err('invalid \"io\" option value \"%s\" in %s' % (io, name)) when = options.get('when', 'before') try: when = int(when) except ValueError: if when == 'before': when = 0 elif when == 'after': when = -1 else: err('invalid \"when\" option value \"%s\" in %s' % (when, name)) return Rule(name, event, io, when) def parse_config(config): rules = [] for name in config.sections(): if name.startswith('inject-error'): options = dict(config.items(name)) rules.append(parse_inject_error(name, options)) else: err('invalid config section name: %s' % name) return rules def load_rules(filename): config = configparser.RawConfigParser() with open(filename, 'rt') as f: config.read_file(f, filename) return parse_config(config) def open_socket(path): '''Open a TCP or UNIX domain listen socket''' if ':' in path: host, port = path.split(':', 1) sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, int(port))) # If given port was 0 the final port number is now available path = '%s:%d' % sock.getsockname() else: sock = socket.socket(socket.AF_UNIX) sock.bind(path) sock.listen(0) print('Listening on %s' % path) sys.stdout.flush() # another process may be waiting, show message now return sock def usage(args): sys.stderr.write('usage: %s [--classic-negotiation] <tcp-port>|<unix-path> <config-file>\n' % args[0]) sys.stderr.write('Run an fault injector NBD server with rules defined in a config file.\n') sys.exit(1) def main(args): if len(args) != 3 and len(args) != 4: usage(args) use_export = True if args[1] == '--classic-negotiation': use_export = False elif len(args) == 4: usage(args) sock = open_socket(args[1 if use_export else 2]) rules = load_rules(args[2 if use_export else 3]) run_server(sock, rules, use_export) return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/nbd-fault-injector.py
# TestEnv class to manage test environment variables. # # Copyright (c) 2020-2021 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import sys import tempfile from pathlib import Path import shutil import collections import random import subprocess import glob from typing import List, Dict, Any, Optional, ContextManager DEF_GDB_OPTIONS = 'localhost:12345' def isxfile(path: str) -> bool: return os.path.isfile(path) and os.access(path, os.X_OK) def get_default_machine(qemu_prog: str) -> str: outp = subprocess.run([qemu_prog, '-machine', 'help'], check=True, universal_newlines=True, stdout=subprocess.PIPE).stdout machines = outp.split('\n') try: default_machine = next(m for m in machines if m.endswith(' (default)')) except StopIteration: return '' default_machine = default_machine.split(' ', 1)[0] alias_suf = ' (alias of {})'.format(default_machine) alias = next((m for m in machines if m.endswith(alias_suf)), None) if alias is not None: default_machine = alias.split(' ', 1)[0] return default_machine class TestEnv(ContextManager['TestEnv']): """ Manage system environment for running tests The following variables are supported/provided. They are represented by lower-cased TestEnv attributes. """ # We store environment variables as instance attributes, and there are a # lot of them. Silence pylint: # pylint: disable=too-many-instance-attributes env_variables = ['PYTHONPATH', 'TEST_DIR', 'SOCK_DIR', 'SAMPLE_IMG_DIR', 'OUTPUT_DIR', 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG', 'QEMU_IO_PROG', 'QEMU_NBD_PROG', 'QSD_PROG', 'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS', 'QEMU_IO_OPTIONS', 'QEMU_IO_OPTIONS_NO_FMT', 'QEMU_NBD_OPTIONS', 'IMGOPTS', 'IMGFMT', 'IMGPROTO', 'AIOMODE', 'CACHEMODE', 'VALGRIND_QEMU', 'CACHEMODE_IS_DEFAULT', 'IMGFMT_GENERIC', 'IMGOPTSSYNTAX', 'IMGKEYSECRET', 'QEMU_DEFAULT_MACHINE', 'MALLOC_PERTURB_', 'GDB_OPTIONS', 'PRINT_QEMU'] def prepare_subprocess(self, args: List[str]) -> Dict[str, str]: if self.debug: args.append('-d') with open(args[0], encoding="utf-8") as f: try: if f.readline().rstrip() == '#!/usr/bin/env python3': args.insert(0, self.python) except UnicodeDecodeError: # binary test? for future. pass os_env = os.environ.copy() os_env.update(self.get_env()) return os_env def get_env(self) -> Dict[str, str]: env = {} for v in self.env_variables: val = getattr(self, v.lower(), None) if val is not None: env[v] = val return env def init_directories(self) -> None: """Init directory variables: PYTHONPATH TEST_DIR SOCK_DIR SAMPLE_IMG_DIR OUTPUT_DIR """ # Path where qemu goodies live in this source tree. qemu_srctree_path = Path(__file__, '../../../python').resolve() self.pythonpath = os.pathsep.join(filter(None, ( self.source_iotests, str(qemu_srctree_path), os.getenv('PYTHONPATH'), ))) self.test_dir = os.getenv('TEST_DIR', os.path.join(os.getcwd(), 'scratch')) Path(self.test_dir).mkdir(parents=True, exist_ok=True) try: self.sock_dir = os.environ['SOCK_DIR'] self.tmp_sock_dir = False Path(self.sock_dir).mkdir(parents=True, exist_ok=True) except KeyError: self.sock_dir = tempfile.mkdtemp() self.tmp_sock_dir = True self.sample_img_dir = os.getenv('SAMPLE_IMG_DIR', os.path.join(self.source_iotests, 'sample_images')) self.output_dir = os.getcwd() # OUTPUT_DIR def init_binaries(self) -> None: """Init binary path variables: PYTHON (for bash tests) QEMU_PROG, QEMU_IMG_PROG, QEMU_IO_PROG, QEMU_NBD_PROG, QSD_PROG """ self.python = sys.executable def root(*names: str) -> str: return os.path.join(self.build_root, *names) arch = os.uname().machine if 'ppc64' in arch: arch = 'ppc64' self.qemu_prog = os.getenv('QEMU_PROG', root(f'qemu-system-{arch}')) if not os.path.exists(self.qemu_prog): pattern = root('qemu-system-*') try: progs = sorted(glob.iglob(pattern)) self.qemu_prog = next(p for p in progs if isxfile(p)) except StopIteration: sys.exit("Not found any Qemu executable binary by pattern " f"'{pattern}'") self.qemu_img_prog = os.getenv('QEMU_IMG_PROG', root('qemu-img')) self.qemu_io_prog = os.getenv('QEMU_IO_PROG', root('qemu-io')) self.qemu_nbd_prog = os.getenv('QEMU_NBD_PROG', root('qemu-nbd')) self.qsd_prog = os.getenv('QSD_PROG', root('storage-daemon', 'qemu-storage-daemon')) for b in [self.qemu_img_prog, self.qemu_io_prog, self.qemu_nbd_prog, self.qemu_prog, self.qsd_prog]: if not os.path.exists(b): sys.exit('No such file: ' + b) if not isxfile(b): sys.exit('Not executable: ' + b) def __init__(self, imgfmt: str, imgproto: str, aiomode: str, cachemode: Optional[str] = None, imgopts: Optional[str] = None, misalign: bool = False, debug: bool = False, valgrind: bool = False, gdb: bool = False, qprint: bool = False) -> None: self.imgfmt = imgfmt self.imgproto = imgproto self.aiomode = aiomode self.imgopts = imgopts self.misalign = misalign self.debug = debug if qprint: self.print_qemu = 'y' if gdb: self.gdb_options = os.getenv('GDB_OPTIONS', DEF_GDB_OPTIONS) if not self.gdb_options: # cover the case 'export GDB_OPTIONS=' self.gdb_options = DEF_GDB_OPTIONS elif 'GDB_OPTIONS' in os.environ: # to not propagate it in prepare_subprocess() del os.environ['GDB_OPTIONS'] if valgrind: self.valgrind_qemu = 'y' if cachemode is None: self.cachemode_is_default = 'true' self.cachemode = 'writeback' else: self.cachemode_is_default = 'false' self.cachemode = cachemode # Initialize generic paths: build_root, build_iotests, source_iotests, # which are needed to initialize some environment variables. They are # used by init_*() functions as well. if os.path.islink(sys.argv[0]): # called from the build tree self.source_iotests = os.path.dirname(os.readlink(sys.argv[0])) self.build_iotests = os.path.dirname(os.path.abspath(sys.argv[0])) else: # called from the source tree self.source_iotests = os.getcwd() self.build_iotests = self.source_iotests self.build_root = os.path.join(self.build_iotests, '..', '..') self.init_directories() self.init_binaries() self.malloc_perturb_ = os.getenv('MALLOC_PERTURB_', str(random.randrange(1, 255))) # QEMU_OPTIONS self.qemu_options = '-nodefaults -display none -accel qtest' machine_map = ( ('arm', 'virt'), ('aarch64', 'virt'), ('avr', 'mega2560'), ('m68k', 'virt'), ('rx', 'gdbsim-r5f562n8'), ('tricore', 'tricore_testboard') ) for suffix, machine in machine_map: if self.qemu_prog.endswith(f'qemu-system-{suffix}'): self.qemu_options += f' -machine {machine}' # QEMU_DEFAULT_MACHINE self.qemu_default_machine = get_default_machine(self.qemu_prog) self.qemu_img_options = os.getenv('QEMU_IMG_OPTIONS') self.qemu_nbd_options = os.getenv('QEMU_NBD_OPTIONS') is_generic = self.imgfmt not in ['bochs', 'cloop', 'dmg'] self.imgfmt_generic = 'true' if is_generic else 'false' self.qemu_io_options = f'--cache {self.cachemode} --aio {self.aiomode}' if self.misalign: self.qemu_io_options += ' --misalign' self.qemu_io_options_no_fmt = self.qemu_io_options if self.imgfmt == 'luks': self.imgoptssyntax = 'true' self.imgkeysecret = '123456' if not self.imgopts: self.imgopts = 'iter-time=10' elif 'iter-time=' not in self.imgopts: self.imgopts += ',iter-time=10' else: self.imgoptssyntax = 'false' self.qemu_io_options += ' -f ' + self.imgfmt if self.imgfmt == 'vmdk': if not self.imgopts: self.imgopts = 'zeroed_grain=on' elif 'zeroed_grain=' not in self.imgopts: self.imgopts += ',zeroed_grain=on' def close(self) -> None: if self.tmp_sock_dir: shutil.rmtree(self.sock_dir) def __enter__(self) -> 'TestEnv': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.close() def print_env(self) -> None: template = """\ QEMU -- "{QEMU_PROG}" {QEMU_OPTIONS} QEMU_IMG -- "{QEMU_IMG_PROG}" {QEMU_IMG_OPTIONS} QEMU_IO -- "{QEMU_IO_PROG}" {QEMU_IO_OPTIONS} QEMU_NBD -- "{QEMU_NBD_PROG}" {QEMU_NBD_OPTIONS} IMGFMT -- {IMGFMT}{imgopts} IMGPROTO -- {IMGPROTO} PLATFORM -- {platform} TEST_DIR -- {TEST_DIR} SOCK_DIR -- {SOCK_DIR} GDB_OPTIONS -- {GDB_OPTIONS} VALGRIND_QEMU -- {VALGRIND_QEMU} PRINT_QEMU_OUTPUT -- {PRINT_QEMU} """ args = collections.defaultdict(str, self.get_env()) if 'IMGOPTS' in args: args['imgopts'] = f" ({args['IMGOPTS']})" u = os.uname() args['platform'] = f'{u.sysname}/{u.machine} {u.nodename} {u.release}' print(template.format_map(args))
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/testenv.py
#!/usr/bin/env python3 # # Tool to manipulate QED image files # # Copyright (C) 2010 IBM, Corp. # # Authors: # Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. import sys import struct import random import optparse # This can be used as a module __all__ = ['QED_F_NEED_CHECK', 'QED'] QED_F_NEED_CHECK = 0x02 header_fmt = '<IIIIQQQQQII' header_size = struct.calcsize(header_fmt) field_names = ['magic', 'cluster_size', 'table_size', 'header_size', 'features', 'compat_features', 'autoclear_features', 'l1_table_offset', 'image_size', 'backing_filename_offset', 'backing_filename_size'] table_elem_fmt = '<Q' table_elem_size = struct.calcsize(table_elem_fmt) def err(msg): sys.stderr.write(msg + '\n') sys.exit(1) def unpack_header(s): fields = struct.unpack(header_fmt, s) return dict((field_names[idx], val) for idx, val in enumerate(fields)) def pack_header(header): fields = tuple(header[x] for x in field_names) return struct.pack(header_fmt, *fields) def unpack_table_elem(s): return struct.unpack(table_elem_fmt, s)[0] def pack_table_elem(elem): return struct.pack(table_elem_fmt, elem) class QED(object): def __init__(self, f): self.f = f self.f.seek(0, 2) self.filesize = f.tell() self.load_header() self.load_l1_table() def raw_pread(self, offset, size): self.f.seek(offset) return self.f.read(size) def raw_pwrite(self, offset, data): self.f.seek(offset) return self.f.write(data) def load_header(self): self.header = unpack_header(self.raw_pread(0, header_size)) def store_header(self): self.raw_pwrite(0, pack_header(self.header)) def read_table(self, offset): size = self.header['table_size'] * self.header['cluster_size'] s = self.raw_pread(offset, size) table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)] return table def load_l1_table(self): self.l1_table = self.read_table(self.header['l1_table_offset']) self.table_nelems = self.header['table_size'] * self.header['cluster_size'] // table_elem_size def write_table(self, offset, table): s = ''.join(pack_table_elem(x) for x in table) self.raw_pwrite(offset, s) def random_table_item(table): vals = [(index, offset) for index, offset in enumerate(table) if offset != 0] if not vals: err('cannot pick random item because table is empty') return random.choice(vals) def corrupt_table_duplicate(table): '''Corrupt a table by introducing a duplicate offset''' victim_idx, victim_val = random_table_item(table) unique_vals = set(table) if len(unique_vals) == 1: err('no duplication corruption possible in table') dup_val = random.choice(list(unique_vals.difference([victim_val]))) table[victim_idx] = dup_val def corrupt_table_invalidate(qed, table): '''Corrupt a table by introducing an invalid offset''' index, _ = random_table_item(table) table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024) def cmd_show(qed, *args): '''show [header|l1|l2 <offset>]- Show header or l1/l2 tables''' if not args or args[0] == 'header': print(qed.header) elif args[0] == 'l1': print(qed.l1_table) elif len(args) == 2 and args[0] == 'l2': offset = int(args[1]) print(qed.read_table(offset)) else: err('unrecognized sub-command') def cmd_duplicate(qed, table_level): '''duplicate l1|l2 - Duplicate a random table element''' if table_level == 'l1': offset = qed.header['l1_table_offset'] table = qed.l1_table elif table_level == 'l2': _, offset = random_table_item(qed.l1_table) table = qed.read_table(offset) else: err('unrecognized sub-command') corrupt_table_duplicate(table) qed.write_table(offset, table) def cmd_invalidate(qed, table_level): '''invalidate l1|l2 - Plant an invalid table element at random''' if table_level == 'l1': offset = qed.header['l1_table_offset'] table = qed.l1_table elif table_level == 'l2': _, offset = random_table_item(qed.l1_table) table = qed.read_table(offset) else: err('unrecognized sub-command') corrupt_table_invalidate(qed, table) qed.write_table(offset, table) def cmd_need_check(qed, *args): '''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit''' if not args: print(bool(qed.header['features'] & QED_F_NEED_CHECK)) return if args[0] == 'on': qed.header['features'] |= QED_F_NEED_CHECK elif args[0] == 'off': qed.header['features'] &= ~QED_F_NEED_CHECK else: err('unrecognized sub-command') qed.store_header() def cmd_zero_cluster(qed, pos, *args): '''zero-cluster <pos> [<n>] - Zero data clusters''' pos, n = int(pos), 1 if args: if len(args) != 1: err('expected one argument') n = int(args[0]) for i in xrange(n): l1_index = pos // qed.header['cluster_size'] // len(qed.l1_table) if qed.l1_table[l1_index] == 0: err('no l2 table allocated') l2_offset = qed.l1_table[l1_index] l2_table = qed.read_table(l2_offset) l2_index = (pos // qed.header['cluster_size']) % len(qed.l1_table) l2_table[l2_index] = 1 # zero the data cluster qed.write_table(l2_offset, l2_table) pos += qed.header['cluster_size'] def cmd_copy_metadata(qed, outfile): '''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)''' out = open(outfile, 'wb') # Match file size out.seek(qed.filesize - 1) out.write('\0') # Copy header clusters out.seek(0) header_size_bytes = qed.header['header_size'] * qed.header['cluster_size'] out.write(qed.raw_pread(0, header_size_bytes)) # Copy L1 table out.seek(qed.header['l1_table_offset']) s = ''.join(pack_table_elem(x) for x in qed.l1_table) out.write(s) # Copy L2 tables for l2_offset in qed.l1_table: if l2_offset == 0: continue l2_table = qed.read_table(l2_offset) out.seek(l2_offset) s = ''.join(pack_table_elem(x) for x in l2_table) out.write(s) out.close() def usage(): print('Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]) print() print('Supported commands:') for cmd in sorted(x for x in globals() if x.startswith('cmd_')): print(globals()[cmd].__doc__) sys.exit(1) def main(): if len(sys.argv) < 3: usage() filename, cmd = sys.argv[1:3] cmd = 'cmd_' + cmd.replace('-', '_') if cmd not in globals(): usage() qed = QED(open(filename, 'r+b')) try: globals()[cmd](qed, *sys.argv[3:]) except TypeError as e: sys.stderr.write(globals()[cmd].__doc__ + '\n') sys.exit(1) if __name__ == '__main__': main()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/qed.py
# Common utilities and Python wrappers for qemu-iotests # # Copyright (C) 2012 IBM Corp. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import atexit import bz2 from collections import OrderedDict import faulthandler import json import logging import os import re import shutil import signal import struct import subprocess import sys import time from typing import (Any, Callable, Dict, Iterable, List, Optional, Sequence, TextIO, Tuple, Type, TypeVar) import unittest from contextlib import contextmanager from qemu.machine import qtest from qemu.qmp import QMPMessage # Use this logger for logging messages directly from the iotests module logger = logging.getLogger('qemu.iotests') logger.addHandler(logging.NullHandler()) # Use this logger for messages that ought to be used for diff output. test_logger = logging.getLogger('qemu.iotests.diff_io') faulthandler.enable() # This will not work if arguments contain spaces but is necessary if we # want to support the override options that ./check supports. qemu_img_args = [os.environ.get('QEMU_IMG_PROG', 'qemu-img')] if os.environ.get('QEMU_IMG_OPTIONS'): qemu_img_args += os.environ['QEMU_IMG_OPTIONS'].strip().split(' ') qemu_io_args = [os.environ.get('QEMU_IO_PROG', 'qemu-io')] if os.environ.get('QEMU_IO_OPTIONS'): qemu_io_args += os.environ['QEMU_IO_OPTIONS'].strip().split(' ') qemu_io_args_no_fmt = [os.environ.get('QEMU_IO_PROG', 'qemu-io')] if os.environ.get('QEMU_IO_OPTIONS_NO_FMT'): qemu_io_args_no_fmt += \ os.environ['QEMU_IO_OPTIONS_NO_FMT'].strip().split(' ') qemu_nbd_prog = os.environ.get('QEMU_NBD_PROG', 'qemu-nbd') qemu_nbd_args = [qemu_nbd_prog] if os.environ.get('QEMU_NBD_OPTIONS'): qemu_nbd_args += os.environ['QEMU_NBD_OPTIONS'].strip().split(' ') qemu_prog = os.environ.get('QEMU_PROG', 'qemu') qemu_opts = os.environ.get('QEMU_OPTIONS', '').strip().split(' ') gdb_qemu_env = os.environ.get('GDB_OPTIONS') qemu_gdb = [] if gdb_qemu_env: qemu_gdb = ['gdbserver'] + gdb_qemu_env.strip().split(' ') qemu_print = os.environ.get('PRINT_QEMU', False) imgfmt = os.environ.get('IMGFMT', 'raw') imgproto = os.environ.get('IMGPROTO', 'file') output_dir = os.environ.get('OUTPUT_DIR', '.') try: test_dir = os.environ['TEST_DIR'] sock_dir = os.environ['SOCK_DIR'] cachemode = os.environ['CACHEMODE'] aiomode = os.environ['AIOMODE'] qemu_default_machine = os.environ['QEMU_DEFAULT_MACHINE'] except KeyError: # We are using these variables as proxies to indicate that we're # not being run via "check". There may be other things set up by # "check" that individual test cases rely on. sys.stderr.write('Please run this test via the "check" script\n') sys.exit(os.EX_USAGE) qemu_valgrind = [] if os.environ.get('VALGRIND_QEMU') == "y" and \ os.environ.get('NO_VALGRIND') != "y": valgrind_logfile = "--log-file=" + test_dir # %p allows to put the valgrind process PID, since # we don't know it a priori (subprocess.Popen is # not yet invoked) valgrind_logfile += "/%p.valgrind" qemu_valgrind = ['valgrind', valgrind_logfile, '--error-exitcode=99'] luks_default_secret_object = 'secret,id=keysec0,data=' + \ os.environ.get('IMGKEYSECRET', '') luks_default_key_secret_opt = 'key-secret=keysec0' sample_img_dir = os.environ['SAMPLE_IMG_DIR'] def unarchive_sample_image(sample, fname): sample_fname = os.path.join(sample_img_dir, sample + '.bz2') with bz2.open(sample_fname) as f_in, open(fname, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) def qemu_tool_pipe_and_status(tool: str, args: Sequence[str], connect_stderr: bool = True) -> Tuple[str, int]: """ Run a tool and return both its output and its exit code """ stderr = subprocess.STDOUT if connect_stderr else None with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=stderr, universal_newlines=True) as subp: output = subp.communicate()[0] if subp.returncode < 0: cmd = ' '.join(args) sys.stderr.write(f'{tool} received signal \ {-subp.returncode}: {cmd}\n') return (output, subp.returncode) def qemu_img_pipe_and_status(*args: str) -> Tuple[str, int]: """ Run qemu-img and return both its output and its exit code """ full_args = qemu_img_args + list(args) return qemu_tool_pipe_and_status('qemu-img', full_args) def qemu_img(*args: str) -> int: '''Run qemu-img and return the exit code''' return qemu_img_pipe_and_status(*args)[1] def ordered_qmp(qmsg, conv_keys=True): # Dictionaries are not ordered prior to 3.6, therefore: if isinstance(qmsg, list): return [ordered_qmp(atom) for atom in qmsg] if isinstance(qmsg, dict): od = OrderedDict() for k, v in sorted(qmsg.items()): if conv_keys: k = k.replace('_', '-') od[k] = ordered_qmp(v, conv_keys=False) return od return qmsg def qemu_img_create(*args): args = list(args) # default luks support if '-f' in args and args[args.index('-f') + 1] == 'luks': if '-o' in args: i = args.index('-o') if 'key-secret' not in args[i + 1]: args[i + 1].append(luks_default_key_secret_opt) args.insert(i + 2, '--object') args.insert(i + 3, luks_default_secret_object) else: args = ['-o', luks_default_key_secret_opt, '--object', luks_default_secret_object] + args args.insert(0, 'create') return qemu_img(*args) def qemu_img_measure(*args): return json.loads(qemu_img_pipe("measure", "--output", "json", *args)) def qemu_img_check(*args): return json.loads(qemu_img_pipe("check", "--output", "json", *args)) def qemu_img_verbose(*args): '''Run qemu-img without suppressing its output and return the exit code''' exitcode = subprocess.call(qemu_img_args + list(args)) if exitcode < 0: sys.stderr.write('qemu-img received signal %i: %s\n' % (-exitcode, ' '.join(qemu_img_args + list(args)))) return exitcode def qemu_img_pipe(*args: str) -> str: '''Run qemu-img and return its output''' return qemu_img_pipe_and_status(*args)[0] def qemu_img_log(*args): result = qemu_img_pipe(*args) log(result, filters=[filter_testfiles]) return result def img_info_log(filename, filter_path=None, imgopts=False, extra_args=()): args = ['info'] if imgopts: args.append('--image-opts') else: args += ['-f', imgfmt] args += extra_args args.append(filename) output = qemu_img_pipe(*args) if not filter_path: filter_path = filename log(filter_img_info(output, filter_path)) def qemu_io(*args): '''Run qemu-io and return the stdout data''' args = qemu_io_args + list(args) return qemu_tool_pipe_and_status('qemu-io', args)[0] def qemu_io_log(*args): result = qemu_io(*args) log(result, filters=[filter_testfiles, filter_qemu_io]) return result def qemu_io_silent(*args): '''Run qemu-io and return the exit code, suppressing stdout''' if '-f' in args or '--image-opts' in args: default_args = qemu_io_args_no_fmt else: default_args = qemu_io_args args = default_args + list(args) result = subprocess.run(args, stdout=subprocess.DEVNULL, check=False) if result.returncode < 0: sys.stderr.write('qemu-io received signal %i: %s\n' % (-result.returncode, ' '.join(args))) return result.returncode def qemu_io_silent_check(*args): '''Run qemu-io and return the true if subprocess returned 0''' args = qemu_io_args + list(args) result = subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, check=False) return result.returncode == 0 class QemuIoInteractive: def __init__(self, *args): self.args = qemu_io_args_no_fmt + list(args) # We need to keep the Popen objext around, and not # close it immediately. Therefore, disable the pylint check: # pylint: disable=consider-using-with self._p = subprocess.Popen(self.args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) out = self._p.stdout.read(9) if out != 'qemu-io> ': # Most probably qemu-io just failed to start. # Let's collect the whole output and exit. out += self._p.stdout.read() self._p.wait(timeout=1) raise ValueError(out) def close(self): self._p.communicate('q\n') def _read_output(self): pattern = 'qemu-io> ' n = len(pattern) pos = 0 s = [] while pos != n: c = self._p.stdout.read(1) # check unexpected EOF assert c != '' s.append(c) if c == pattern[pos]: pos += 1 else: pos = 0 return ''.join(s[:-n]) def cmd(self, cmd): # quit command is in close(), '\n' is added automatically assert '\n' not in cmd cmd = cmd.strip() assert cmd not in ('q', 'quit') self._p.stdin.write(cmd + '\n') self._p.stdin.flush() return self._read_output() def qemu_nbd(*args): '''Run qemu-nbd in daemon mode and return the parent's exit code''' return subprocess.call(qemu_nbd_args + ['--fork'] + list(args)) def qemu_nbd_early_pipe(*args: str) -> Tuple[int, str]: '''Run qemu-nbd in daemon mode and return both the parent's exit code and its output in case of an error''' full_args = qemu_nbd_args + ['--fork'] + list(args) output, returncode = qemu_tool_pipe_and_status('qemu-nbd', full_args, connect_stderr=False) return returncode, output if returncode else '' def qemu_nbd_list_log(*args: str) -> str: '''Run qemu-nbd to list remote exports''' full_args = [qemu_nbd_prog, '-L'] + list(args) output, _ = qemu_tool_pipe_and_status('qemu-nbd', full_args) log(output, filters=[filter_testfiles, filter_nbd_exports]) return output @contextmanager def qemu_nbd_popen(*args): '''Context manager running qemu-nbd within the context''' pid_file = file_path("qemu_nbd_popen-nbd-pid-file") assert not os.path.exists(pid_file) cmd = list(qemu_nbd_args) cmd.extend(('--persistent', '--pid-file', pid_file)) cmd.extend(args) log('Start NBD server') with subprocess.Popen(cmd) as p: try: while not os.path.exists(pid_file): if p.poll() is not None: raise RuntimeError( "qemu-nbd terminated with exit code {}: {}" .format(p.returncode, ' '.join(cmd))) time.sleep(0.01) yield finally: if os.path.exists(pid_file): os.remove(pid_file) log('Kill NBD server') p.kill() p.wait() def compare_images(img1, img2, fmt1=imgfmt, fmt2=imgfmt): '''Return True if two image files are identical''' return qemu_img('compare', '-f', fmt1, '-F', fmt2, img1, img2) == 0 def create_image(name, size): '''Create a fully-allocated raw image with sector markers''' with open(name, 'wb') as file: i = 0 while i < size: sector = struct.pack('>l504xl', i // 512, i // 512) file.write(sector) i = i + 512 def image_size(img): '''Return image's virtual size''' r = qemu_img_pipe('info', '--output=json', '-f', imgfmt, img) return json.loads(r)['virtual-size'] def is_str(val): return isinstance(val, str) test_dir_re = re.compile(r"%s" % test_dir) def filter_test_dir(msg): return test_dir_re.sub("TEST_DIR", msg) win32_re = re.compile(r"\r") def filter_win32(msg): return win32_re.sub("", msg) qemu_io_re = re.compile(r"[0-9]* ops; [0-9\/:. sec]* " r"\([0-9\/.inf]* [EPTGMKiBbytes]*\/sec " r"and [0-9\/.inf]* ops\/sec\)") def filter_qemu_io(msg): msg = filter_win32(msg) return qemu_io_re.sub("X ops; XX:XX:XX.X " "(XXX YYY/sec and XXX ops/sec)", msg) chown_re = re.compile(r"chown [0-9]+:[0-9]+") def filter_chown(msg): return chown_re.sub("chown UID:GID", msg) def filter_qmp_event(event): '''Filter a QMP event dict''' event = dict(event) if 'timestamp' in event: event['timestamp']['seconds'] = 'SECS' event['timestamp']['microseconds'] = 'USECS' return event def filter_qmp(qmsg, filter_fn): '''Given a string filter, filter a QMP object's values. filter_fn takes a (key, value) pair.''' # Iterate through either lists or dicts; if isinstance(qmsg, list): items = enumerate(qmsg) else: items = qmsg.items() for k, v in items: if isinstance(v, (dict, list)): qmsg[k] = filter_qmp(v, filter_fn) else: qmsg[k] = filter_fn(k, v) return qmsg def filter_testfiles(msg): pref1 = os.path.join(test_dir, "%s-" % (os.getpid())) pref2 = os.path.join(sock_dir, "%s-" % (os.getpid())) return msg.replace(pref1, 'TEST_DIR/PID-').replace(pref2, 'SOCK_DIR/PID-') def filter_qmp_testfiles(qmsg): def _filter(_key, value): if is_str(value): return filter_testfiles(value) return value return filter_qmp(qmsg, _filter) def filter_virtio_scsi(output: str) -> str: return re.sub(r'(virtio-scsi)-(ccw|pci)', r'\1', output) def filter_qmp_virtio_scsi(qmsg): def _filter(_key, value): if is_str(value): return filter_virtio_scsi(value) return value return filter_qmp(qmsg, _filter) def filter_generated_node_ids(msg): return re.sub("#block[0-9]+", "NODE_NAME", msg) def filter_img_info(output, filename): lines = [] for line in output.split('\n'): if 'disk size' in line or 'actual-size' in line: continue line = line.replace(filename, 'TEST_IMG') line = filter_testfiles(line) line = line.replace(imgfmt, 'IMGFMT') line = re.sub('iters: [0-9]+', 'iters: XXX', line) line = re.sub('uuid: [-a-f0-9]+', 'uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', line) line = re.sub('cid: [0-9]+', 'cid: XXXXXXXXXX', line) lines.append(line) return '\n'.join(lines) def filter_imgfmt(msg): return msg.replace(imgfmt, 'IMGFMT') def filter_qmp_imgfmt(qmsg): def _filter(_key, value): if is_str(value): return filter_imgfmt(value) return value return filter_qmp(qmsg, _filter) def filter_nbd_exports(output: str) -> str: return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output) Msg = TypeVar('Msg', Dict[str, Any], List[Any], str) def log(msg: Msg, filters: Iterable[Callable[[Msg], Msg]] = (), indent: Optional[int] = None) -> None: """ Logs either a string message or a JSON serializable message (like QMP). If indent is provided, JSON serializable messages are pretty-printed. """ for flt in filters: msg = flt(msg) if isinstance(msg, (dict, list)): # Don't sort if it's already sorted do_sort = not isinstance(msg, OrderedDict) test_logger.info(json.dumps(msg, sort_keys=do_sort, indent=indent)) else: test_logger.info(msg) class Timeout: def __init__(self, seconds, errmsg="Timeout"): self.seconds = seconds self.errmsg = errmsg def __enter__(self): if qemu_gdb or qemu_valgrind: return self signal.signal(signal.SIGALRM, self.timeout) signal.setitimer(signal.ITIMER_REAL, self.seconds) return self def __exit__(self, exc_type, value, traceback): if qemu_gdb or qemu_valgrind: return False signal.setitimer(signal.ITIMER_REAL, 0) return False def timeout(self, signum, frame): raise Exception(self.errmsg) def file_pattern(name): return "{0}-{1}".format(os.getpid(), name) class FilePath: """ Context manager generating multiple file names. The generated files are removed when exiting the context. Example usage: with FilePath('a.img', 'b.img') as (img_a, img_b): # Use img_a and img_b here... # a.img and b.img are automatically removed here. By default images are created in iotests.test_dir. To create sockets use iotests.sock_dir: with FilePath('a.sock', base_dir=iotests.sock_dir) as sock: For convenience, calling with one argument yields a single file instead of a tuple with one item. """ def __init__(self, *names, base_dir=test_dir): self.paths = [os.path.join(base_dir, file_pattern(name)) for name in names] def __enter__(self): if len(self.paths) == 1: return self.paths[0] else: return self.paths def __exit__(self, exc_type, exc_val, exc_tb): for path in self.paths: try: os.remove(path) except OSError: pass return False def try_remove(img): try: os.remove(img) except OSError: pass def file_path_remover(): for path in reversed(file_path_remover.paths): try_remove(path) def file_path(*names, base_dir=test_dir): ''' Another way to get auto-generated filename that cleans itself up. Use is as simple as: img_a, img_b = file_path('a.img', 'b.img') sock = file_path('socket') ''' if not hasattr(file_path_remover, 'paths'): file_path_remover.paths = [] atexit.register(file_path_remover) paths = [] for name in names: filename = file_pattern(name) path = os.path.join(base_dir, filename) file_path_remover.paths.append(path) paths.append(path) return paths[0] if len(paths) == 1 else paths def remote_filename(path): if imgproto == 'file': return path elif imgproto == 'ssh': return "ssh://%s@127.0.0.1:22%s" % (os.environ.get('USER'), path) else: raise Exception("Protocol %s not supported" % (imgproto)) class VM(qtest.QEMUQtestMachine): '''A QEMU VM''' def __init__(self, path_suffix=''): name = "qemu%s-%d" % (path_suffix, os.getpid()) timer = 15.0 if not (qemu_gdb or qemu_valgrind) else None if qemu_gdb and qemu_valgrind: sys.stderr.write('gdb and valgrind are mutually exclusive\n') sys.exit(1) wrapper = qemu_gdb if qemu_gdb else qemu_valgrind super().__init__(qemu_prog, qemu_opts, wrapper=wrapper, name=name, base_temp_dir=test_dir, sock_dir=sock_dir, qmp_timer=timer) self._num_drives = 0 def _post_shutdown(self) -> None: super()._post_shutdown() if not qemu_valgrind or not self._popen: return valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind" if self.exitcode() == 99: with open(valgrind_filename, encoding='utf-8') as f: print(f.read()) else: os.remove(valgrind_filename) def _pre_launch(self) -> None: super()._pre_launch() if qemu_print: # set QEMU binary output to stdout self._close_qemu_log_file() def add_object(self, opts): self._args.append('-object') self._args.append(opts) return self def add_device(self, opts): self._args.append('-device') self._args.append(opts) return self def add_drive_raw(self, opts): self._args.append('-drive') self._args.append(opts) return self def add_drive(self, path, opts='', interface='virtio', img_format=imgfmt): '''Add a virtio-blk drive to the VM''' options = ['if=%s' % interface, 'id=drive%d' % self._num_drives] if path is not None: options.append('file=%s' % path) options.append('format=%s' % img_format) options.append('cache=%s' % cachemode) options.append('aio=%s' % aiomode) if opts: options.append(opts) if img_format == 'luks' and 'key-secret' not in opts: # default luks support if luks_default_secret_object not in self._args: self.add_object(luks_default_secret_object) options.append(luks_default_key_secret_opt) self._args.append('-drive') self._args.append(','.join(options)) self._num_drives += 1 return self def add_blockdev(self, opts): self._args.append('-blockdev') if isinstance(opts, str): self._args.append(opts) else: self._args.append(','.join(opts)) return self def add_incoming(self, addr): self._args.append('-incoming') self._args.append(addr) return self def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage: cmd = 'human-monitor-command' kwargs: Dict[str, Any] = {'command-line': command_line} if use_log: return self.qmp_log(cmd, **kwargs) else: return self.qmp(cmd, **kwargs) def pause_drive(self, drive: str, event: Optional[str] = None) -> None: """Pause drive r/w operations""" if not event: self.pause_drive(drive, "read_aio") self.pause_drive(drive, "write_aio") return self.hmp(f'qemu-io {drive} "break {event} bp_{drive}"') def resume_drive(self, drive: str) -> None: """Resume drive r/w operations""" self.hmp(f'qemu-io {drive} "remove_break bp_{drive}"') def hmp_qemu_io(self, drive: str, cmd: str, use_log: bool = False, qdev: bool = False) -> QMPMessage: """Write to a given drive using an HMP command""" d = '-d ' if qdev else '' return self.hmp(f'qemu-io {d}{drive} "{cmd}"', use_log=use_log) def flatten_qmp_object(self, obj, output=None, basestr=''): if output is None: output = {} if isinstance(obj, list): for i, item in enumerate(obj): self.flatten_qmp_object(item, output, basestr + str(i) + '.') elif isinstance(obj, dict): for key in obj: self.flatten_qmp_object(obj[key], output, basestr + key + '.') else: output[basestr[:-1]] = obj # Strip trailing '.' return output def qmp_to_opts(self, obj): obj = self.flatten_qmp_object(obj) output_list = [] for key in obj: output_list += [key + '=' + obj[key]] return ','.join(output_list) def get_qmp_events_filtered(self, wait=60.0): result = [] for ev in self.get_qmp_events(wait=wait): result.append(filter_qmp_event(ev)) return result def qmp_log(self, cmd, filters=(), indent=None, **kwargs): full_cmd = OrderedDict(( ("execute", cmd), ("arguments", ordered_qmp(kwargs)) )) log(full_cmd, filters, indent=indent) result = self.qmp(cmd, **kwargs) log(result, filters, indent=indent) return result # Returns None on success, and an error string on failure def run_job(self, job, auto_finalize=True, auto_dismiss=False, pre_finalize=None, cancel=False, wait=60.0): """ run_job moves a job from creation through to dismissal. :param job: String. ID of recently-launched job :param auto_finalize: Bool. True if the job was launched with auto_finalize. Defaults to True. :param auto_dismiss: Bool. True if the job was launched with auto_dismiss=True. Defaults to False. :param pre_finalize: Callback. A callable that takes no arguments to be invoked prior to issuing job-finalize, if any. :param cancel: Bool. When true, cancels the job after the pre_finalize callback. :param wait: Float. Timeout value specifying how long to wait for any event, in seconds. Defaults to 60.0. """ match_device = {'data': {'device': job}} match_id = {'data': {'id': job}} events = [ ('BLOCK_JOB_COMPLETED', match_device), ('BLOCK_JOB_CANCELLED', match_device), ('BLOCK_JOB_ERROR', match_device), ('BLOCK_JOB_READY', match_device), ('BLOCK_JOB_PENDING', match_id), ('JOB_STATUS_CHANGE', match_id) ] error = None while True: ev = filter_qmp_event(self.events_wait(events, timeout=wait)) if ev['event'] != 'JOB_STATUS_CHANGE': log(ev) continue status = ev['data']['status'] if status == 'aborting': result = self.qmp('query-jobs') for j in result['return']: if j['id'] == job: error = j['error'] log('Job failed: %s' % (j['error'])) elif status == 'ready': self.qmp_log('job-complete', id=job) elif status == 'pending' and not auto_finalize: if pre_finalize: pre_finalize() if cancel: self.qmp_log('job-cancel', id=job) else: self.qmp_log('job-finalize', id=job) elif status == 'concluded' and not auto_dismiss: self.qmp_log('job-dismiss', id=job) elif status == 'null': return error # Returns None on success, and an error string on failure def blockdev_create(self, options, job_id='job0', filters=None): if filters is None: filters = [filter_qmp_testfiles] result = self.qmp_log('blockdev-create', filters=filters, job_id=job_id, options=options) if 'return' in result: assert result['return'] == {} job_result = self.run_job(job_id) else: job_result = result['error'] log("") return job_result def enable_migration_events(self, name): log('Enabling migration QMP events on %s...' % name) log(self.qmp('migrate-set-capabilities', capabilities=[ { 'capability': 'events', 'state': True } ])) def wait_migration(self, expect_runstate: Optional[str]) -> bool: while True: event = self.event_wait('MIGRATION') # We use the default timeout, and with a timeout, event_wait() # never returns None assert event log(event, filters=[filter_qmp_event]) if event['data']['status'] in ('completed', 'failed'): break if event['data']['status'] == 'completed': # The event may occur in finish-migrate, so wait for the expected # post-migration runstate runstate = None while runstate != expect_runstate: runstate = self.qmp('query-status')['return']['status'] return True else: return False def node_info(self, node_name): nodes = self.qmp('query-named-block-nodes') for x in nodes['return']: if x['node-name'] == node_name: return x return None def query_bitmaps(self): res = self.qmp("query-named-block-nodes") return {device['node-name']: device['dirty-bitmaps'] for device in res['return'] if 'dirty-bitmaps' in device} def get_bitmap(self, node_name, bitmap_name, recording=None, bitmaps=None): """ get a specific bitmap from the object returned by query_bitmaps. :param recording: If specified, filter results by the specified value. :param bitmaps: If specified, use it instead of call query_bitmaps() """ if bitmaps is None: bitmaps = self.query_bitmaps() for bitmap in bitmaps[node_name]: if bitmap.get('name', '') == bitmap_name: if recording is None or bitmap.get('recording') == recording: return bitmap return None def check_bitmap_status(self, node_name, bitmap_name, fields): ret = self.get_bitmap(node_name, bitmap_name) return fields.items() <= ret.items() def assert_block_path(self, root, path, expected_node, graph=None): """ Check whether the node under the given path in the block graph is @expected_node. @root is the node name of the node where the @path is rooted. @path is a string that consists of child names separated by slashes. It must begin with a slash. Examples for @root + @path: - root="qcow2-node", path="/backing/file" - root="quorum-node", path="/children.2/file" Hypothetically, @path could be empty, in which case it would point to @root. However, in practice this case is not useful and hence not allowed. @expected_node may be None. (All elements of the path but the leaf must still exist.) @graph may be None or the result of an x-debug-query-block-graph call that has already been performed. """ if graph is None: graph = self.qmp('x-debug-query-block-graph')['return'] iter_path = iter(path.split('/')) # Must start with a / assert next(iter_path) == '' node = next((node for node in graph['nodes'] if node['name'] == root), None) # An empty @path is not allowed, so the root node must be present assert node is not None, 'Root node %s not found' % root for child_name in iter_path: assert node is not None, 'Cannot follow path %s%s' % (root, path) try: node_id = next(edge['child'] for edge in graph['edges'] if (edge['parent'] == node['id'] and edge['name'] == child_name)) node = next(node for node in graph['nodes'] if node['id'] == node_id) except StopIteration: node = None if node is None: assert expected_node is None, \ 'No node found under %s (but expected %s)' % \ (path, expected_node) else: assert node['name'] == expected_node, \ 'Found node %s under %s (but expected %s)' % \ (node['name'], path, expected_node) index_re = re.compile(r'([^\[]+)\[([^\]]+)\]') class QMPTestCase(unittest.TestCase): '''Abstract base class for QMP test cases''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Many users of this class set a VM property we rely on heavily # in the methods below. self.vm = None def dictpath(self, d, path): '''Traverse a path in a nested dict''' for component in path.split('/'): m = index_re.match(component) if m: component, idx = m.groups() idx = int(idx) if not isinstance(d, dict) or component not in d: self.fail(f'failed path traversal for "{path}" in "{d}"') d = d[component] if m: if not isinstance(d, list): self.fail(f'path component "{component}" in "{path}" ' f'is not a list in "{d}"') try: d = d[idx] except IndexError: self.fail(f'invalid index "{idx}" in path "{path}" ' f'in "{d}"') return d def assert_qmp_absent(self, d, path): try: result = self.dictpath(d, path) except AssertionError: return self.fail('path "%s" has value "%s"' % (path, str(result))) def assert_qmp(self, d, path, value): '''Assert that the value for a specific path in a QMP dict matches. When given a list of values, assert that any of them matches.''' result = self.dictpath(d, path) # [] makes no sense as a list of valid values, so treat it as # an actual single value. if isinstance(value, list) and value != []: for v in value: if result == v: return self.fail('no match for "%s" in %s' % (str(result), str(value))) else: self.assertEqual(result, value, '"%s" is "%s", expected "%s"' % (path, str(result), str(value))) def assert_no_active_block_jobs(self): result = self.vm.qmp('query-block-jobs') self.assert_qmp(result, 'return', []) def assert_has_block_node(self, node_name=None, file_name=None): """Issue a query-named-block-nodes and assert node_name and/or file_name is present in the result""" def check_equal_or_none(a, b): return a is None or b is None or a == b assert node_name or file_name result = self.vm.qmp('query-named-block-nodes') for x in result["return"]: if check_equal_or_none(x.get("node-name"), node_name) and \ check_equal_or_none(x.get("file"), file_name): return self.fail("Cannot find %s %s in result:\n%s" % (node_name, file_name, result)) def assert_json_filename_equal(self, json_filename, reference): '''Asserts that the given filename is a json: filename and that its content is equal to the given reference object''' self.assertEqual(json_filename[:5], 'json:') self.assertEqual( self.vm.flatten_qmp_object(json.loads(json_filename[5:])), self.vm.flatten_qmp_object(reference) ) def cancel_and_wait(self, drive='drive0', force=False, resume=False, wait=60.0): '''Cancel a block job and wait for it to finish, returning the event''' result = self.vm.qmp('block-job-cancel', device=drive, force=force) self.assert_qmp(result, 'return', {}) if resume: self.vm.resume_drive(drive) cancelled = False result = None while not cancelled: for event in self.vm.get_qmp_events(wait=wait): if event['event'] == 'BLOCK_JOB_COMPLETED' or \ event['event'] == 'BLOCK_JOB_CANCELLED': self.assert_qmp(event, 'data/device', drive) result = event cancelled = True elif event['event'] == 'JOB_STATUS_CHANGE': self.assert_qmp(event, 'data/id', drive) self.assert_no_active_block_jobs() return result def wait_until_completed(self, drive='drive0', check_offset=True, wait=60.0, error=None): '''Wait for a block job to finish, returning the event''' while True: for event in self.vm.get_qmp_events(wait=wait): if event['event'] == 'BLOCK_JOB_COMPLETED': self.assert_qmp(event, 'data/device', drive) if error is None: self.assert_qmp_absent(event, 'data/error') if check_offset: self.assert_qmp(event, 'data/offset', event['data']['len']) else: self.assert_qmp(event, 'data/error', error) self.assert_no_active_block_jobs() return event if event['event'] == 'JOB_STATUS_CHANGE': self.assert_qmp(event, 'data/id', drive) def wait_ready(self, drive='drive0'): """Wait until a BLOCK_JOB_READY event, and return the event.""" return self.vm.events_wait([ ('BLOCK_JOB_READY', {'data': {'type': 'mirror', 'device': drive}}), ('BLOCK_JOB_READY', {'data': {'type': 'commit', 'device': drive}}) ]) def wait_ready_and_cancel(self, drive='drive0'): self.wait_ready(drive=drive) event = self.cancel_and_wait(drive=drive) self.assertEqual(event['event'], 'BLOCK_JOB_COMPLETED') self.assert_qmp(event, 'data/type', 'mirror') self.assert_qmp(event, 'data/offset', event['data']['len']) def complete_and_wait(self, drive='drive0', wait_ready=True, completion_error=None): '''Complete a block job and wait for it to finish''' if wait_ready: self.wait_ready(drive=drive) result = self.vm.qmp('block-job-complete', device=drive) self.assert_qmp(result, 'return', {}) event = self.wait_until_completed(drive=drive, error=completion_error) self.assertTrue(event['data']['type'] in ['mirror', 'commit']) def pause_wait(self, job_id='job0'): with Timeout(3, "Timeout waiting for job to pause"): while True: result = self.vm.qmp('query-block-jobs') found = False for job in result['return']: if job['device'] == job_id: found = True if job['paused'] and not job['busy']: return job break assert found def pause_job(self, job_id='job0', wait=True): result = self.vm.qmp('block-job-pause', device=job_id) self.assert_qmp(result, 'return', {}) if wait: return self.pause_wait(job_id) return result def case_skip(self, reason): '''Skip this test case''' case_notrun(reason) self.skipTest(reason) def notrun(reason): '''Skip this test suite''' # Each test in qemu-iotests has a number ("seq") seq = os.path.basename(sys.argv[0]) with open('%s/%s.notrun' % (output_dir, seq), 'w', encoding='utf-8') \ as outfile: outfile.write(reason + '\n') logger.warning("%s not run: %s", seq, reason) sys.exit(0) def case_notrun(reason): '''Mark this test case as not having been run (without actually skipping it, that is left to the caller). See QMPTestCase.case_skip() for a variant that actually skips the current test case.''' # Each test in qemu-iotests has a number ("seq") seq = os.path.basename(sys.argv[0]) with open('%s/%s.casenotrun' % (output_dir, seq), 'a', encoding='utf-8') \ as outfile: outfile.write(' [case not run] ' + reason + '\n') def _verify_image_format(supported_fmts: Sequence[str] = (), unsupported_fmts: Sequence[str] = ()) -> None: if 'generic' in supported_fmts and \ os.environ.get('IMGFMT_GENERIC', 'true') == 'true': # similar to # _supported_fmt generic # for bash tests supported_fmts = () not_sup = supported_fmts and (imgfmt not in supported_fmts) if not_sup or (imgfmt in unsupported_fmts): notrun('not suitable for this image format: %s' % imgfmt) if imgfmt == 'luks': verify_working_luks() def _verify_protocol(supported: Sequence[str] = (), unsupported: Sequence[str] = ()) -> None: assert not (supported and unsupported) if 'generic' in supported: return not_sup = supported and (imgproto not in supported) if not_sup or (imgproto in unsupported): notrun('not suitable for this protocol: %s' % imgproto) def _verify_platform(supported: Sequence[str] = (), unsupported: Sequence[str] = ()) -> None: if any((sys.platform.startswith(x) for x in unsupported)): notrun('not suitable for this OS: %s' % sys.platform) if supported: if not any((sys.platform.startswith(x) for x in supported)): notrun('not suitable for this OS: %s' % sys.platform) def _verify_cache_mode(supported_cache_modes: Sequence[str] = ()) -> None: if supported_cache_modes and (cachemode not in supported_cache_modes): notrun('not suitable for this cache mode: %s' % cachemode) def _verify_aio_mode(supported_aio_modes: Sequence[str] = ()) -> None: if supported_aio_modes and (aiomode not in supported_aio_modes): notrun('not suitable for this aio mode: %s' % aiomode) def _verify_formats(required_formats: Sequence[str] = ()) -> None: usf_list = list(set(required_formats) - set(supported_formats())) if usf_list: notrun(f'formats {usf_list} are not whitelisted') def _verify_virtio_blk() -> None: out = qemu_pipe('-M', 'none', '-device', 'help') if 'virtio-blk' not in out: notrun('Missing virtio-blk in QEMU binary') def _verify_virtio_scsi_pci_or_ccw() -> None: out = qemu_pipe('-M', 'none', '-device', 'help') if 'virtio-scsi-pci' not in out and 'virtio-scsi-ccw' not in out: notrun('Missing virtio-scsi-pci or virtio-scsi-ccw in QEMU binary') def supports_quorum(): return 'quorum' in qemu_img_pipe('--help') def verify_quorum(): '''Skip test suite if quorum support is not available''' if not supports_quorum(): notrun('quorum support missing') def has_working_luks() -> Tuple[bool, str]: """ Check whether our LUKS driver can actually create images (this extends to LUKS encryption for qcow2). If not, return the reason why. """ img_file = f'{test_dir}/luks-test.luks' (output, status) = \ qemu_img_pipe_and_status('create', '-f', 'luks', '--object', luks_default_secret_object, '-o', luks_default_key_secret_opt, '-o', 'iter-time=10', img_file, '1G') try: os.remove(img_file) except OSError: pass if status != 0: reason = output for line in output.splitlines(): if img_file + ':' in line: reason = line.split(img_file + ':', 1)[1].strip() break return (False, reason) else: return (True, '') def verify_working_luks(): """ Skip test suite if LUKS does not work """ (working, reason) = has_working_luks() if not working: notrun(reason) def qemu_pipe(*args: str) -> str: """ Run qemu with an option to print something and exit (e.g. a help option). :return: QEMU's stdout output. """ full_args = [qemu_prog] + qemu_opts + list(args) output, _ = qemu_tool_pipe_and_status('qemu', full_args) return output def supported_formats(read_only=False): '''Set 'read_only' to True to check ro-whitelist Otherwise, rw-whitelist is checked''' if not hasattr(supported_formats, "formats"): supported_formats.formats = {} if read_only not in supported_formats.formats: format_message = qemu_pipe("-drive", "format=help") line = 1 if read_only else 0 supported_formats.formats[read_only] = \ format_message.splitlines()[line].split(":")[1].split() return supported_formats.formats[read_only] def skip_if_unsupported(required_formats=(), read_only=False): '''Skip Test Decorator Runs the test if all the required formats are whitelisted''' def skip_test_decorator(func): def func_wrapper(test_case: QMPTestCase, *args: List[Any], **kwargs: Dict[str, Any]) -> None: if callable(required_formats): fmts = required_formats(test_case) else: fmts = required_formats usf_list = list(set(fmts) - set(supported_formats(read_only))) if usf_list: msg = f'{test_case}: formats {usf_list} are not whitelisted' test_case.case_skip(msg) else: func(test_case, *args, **kwargs) return func_wrapper return skip_test_decorator def skip_for_formats(formats: Sequence[str] = ()) \ -> Callable[[Callable[[QMPTestCase, List[Any], Dict[str, Any]], None]], Callable[[QMPTestCase, List[Any], Dict[str, Any]], None]]: '''Skip Test Decorator Skips the test for the given formats''' def skip_test_decorator(func): def func_wrapper(test_case: QMPTestCase, *args: List[Any], **kwargs: Dict[str, Any]) -> None: if imgfmt in formats: msg = f'{test_case}: Skipped for format {imgfmt}' test_case.case_skip(msg) else: func(test_case, *args, **kwargs) return func_wrapper return skip_test_decorator def skip_if_user_is_root(func): '''Skip Test Decorator Runs the test only without root permissions''' def func_wrapper(*args, **kwargs): if os.getuid() == 0: case_notrun('{}: cannot be run as root'.format(args[0])) return None else: return func(*args, **kwargs) return func_wrapper # We need to filter out the time taken from the output so that # qemu-iotest can reliably diff the results against master output, # and hide skipped tests from the reference output. class ReproducibleTestResult(unittest.TextTestResult): def addSkip(self, test, reason): # Same as TextTestResult, but print dot instead of "s" unittest.TestResult.addSkip(self, test, reason) if self.showAll: self.stream.writeln("skipped {0!r}".format(reason)) elif self.dots: self.stream.write(".") self.stream.flush() class ReproducibleStreamWrapper: def __init__(self, stream: TextIO): self.stream = stream def __getattr__(self, attr): if attr in ('stream', '__getstate__'): raise AttributeError(attr) return getattr(self.stream, attr) def write(self, arg=None): arg = re.sub(r'Ran (\d+) tests? in [\d.]+s', r'Ran \1 tests', arg) arg = re.sub(r' \(skipped=\d+\)', r'', arg) self.stream.write(arg) class ReproducibleTestRunner(unittest.TextTestRunner): def __init__(self, stream: Optional[TextIO] = None, resultclass: Type[unittest.TestResult] = ReproducibleTestResult, **kwargs: Any) -> None: rstream = ReproducibleStreamWrapper(stream or sys.stdout) super().__init__(stream=rstream, # type: ignore descriptions=True, resultclass=resultclass, **kwargs) def execute_unittest(argv: List[str], debug: bool = False) -> None: """Executes unittests within the calling module.""" # Some tests have warnings, especially ResourceWarnings for unclosed # files and sockets. Ignore them for now to ensure reproducibility of # the test output. unittest.main(argv=argv, testRunner=ReproducibleTestRunner, verbosity=2 if debug else 1, warnings=None if sys.warnoptions else 'ignore') def execute_setup_common(supported_fmts: Sequence[str] = (), supported_platforms: Sequence[str] = (), supported_cache_modes: Sequence[str] = (), supported_aio_modes: Sequence[str] = (), unsupported_fmts: Sequence[str] = (), supported_protocols: Sequence[str] = (), unsupported_protocols: Sequence[str] = (), required_fmts: Sequence[str] = ()) -> bool: """ Perform necessary setup for either script-style or unittest-style tests. :return: Bool; Whether or not debug mode has been requested via the CLI. """ # Note: Python 3.6 and pylint do not like 'Collection' so use 'Sequence'. debug = '-d' in sys.argv if debug: sys.argv.remove('-d') logging.basicConfig(level=(logging.DEBUG if debug else logging.WARN)) _verify_image_format(supported_fmts, unsupported_fmts) _verify_protocol(supported_protocols, unsupported_protocols) _verify_platform(supported=supported_platforms) _verify_cache_mode(supported_cache_modes) _verify_aio_mode(supported_aio_modes) _verify_formats(required_fmts) _verify_virtio_blk() return debug def execute_test(*args, test_function=None, **kwargs): """Run either unittest or script-style tests.""" debug = execute_setup_common(*args, **kwargs) if not test_function: execute_unittest(sys.argv, debug) else: test_function() def activate_logging(): """Activate iotests.log() output to stdout for script-style tests.""" handler = logging.StreamHandler(stream=sys.stdout) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) test_logger.addHandler(handler) test_logger.setLevel(logging.INFO) test_logger.propagate = False # This is called from script-style iotests without a single point of entry def script_initialize(*args, **kwargs): """Initialize script-style tests without running any tests.""" activate_logging() execute_setup_common(*args, **kwargs) # This is called from script-style iotests with a single point of entry def script_main(test_function, *args, **kwargs): """Run script-style tests outside of the unittest framework""" activate_logging() execute_test(*args, test_function=test_function, **kwargs) # This is called from unittest style iotests def main(*args, **kwargs): """Run tests using the unittest framework""" execute_test(*args, **kwargs)
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/iotests.py
# TestFinder class, define set of tests to run. # # Copyright (c) 2020-2021 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import glob import re from collections import defaultdict from contextlib import contextmanager from typing import Optional, List, Iterator, Set @contextmanager def chdir(path: Optional[str] = None) -> Iterator[None]: if path is None: yield return saved_dir = os.getcwd() os.chdir(path) try: yield finally: os.chdir(saved_dir) class TestFinder: def __init__(self, test_dir: Optional[str] = None) -> None: self.groups = defaultdict(set) with chdir(test_dir): self.all_tests = glob.glob('[0-9][0-9][0-9]') self.all_tests += [f for f in glob.iglob('tests/*') if not f.endswith('.out') and os.path.isfile(f + '.out')] for t in self.all_tests: with open(t, encoding="utf-8") as f: for line in f: if line.startswith('# group: '): for g in line.split()[2:]: self.groups[g].add(t) break def add_group_file(self, fname: str) -> None: with open(fname, encoding="utf-8") as f: for line in f: line = line.strip() if (not line) or line[0] == '#': continue words = line.split() test_file = self.parse_test_name(words[0]) groups = words[1:] for g in groups: self.groups[g].add(test_file) def parse_test_name(self, name: str) -> str: if '/' in name: raise ValueError('Paths are unsupported for test selection, ' f'requiring "{name}" is wrong') if re.fullmatch(r'\d+', name): # Numbered tests are old naming convention. We should convert them # to three-digit-length, like 1 --> 001. name = f'{int(name):03}' else: # Named tests all should be in tests/ subdirectory name = os.path.join('tests', name) if name not in self.all_tests: raise ValueError(f'Test "{name}" is not found') return name def find_tests(self, groups: Optional[List[str]] = None, exclude_groups: Optional[List[str]] = None, tests: Optional[List[str]] = None, start_from: Optional[str] = None) -> List[str]: """Find tests Algorithm: 1. a. if some @groups specified a.1 Take all tests from @groups a.2 Drop tests, which are in at least one of @exclude_groups or in 'disabled' group (if 'disabled' is not listed in @groups) a.3 Add tests from @tests (don't exclude anything from them) b. else, if some @tests specified: b.1 exclude_groups must be not specified, so just take @tests c. else (only @exclude_groups list is non-empty): c.1 Take all tests c.2 Drop tests, which are in at least one of @exclude_groups or in 'disabled' group 2. sort 3. If start_from specified, drop tests from first one to @start_from (not inclusive) """ if groups is None: groups = [] if exclude_groups is None: exclude_groups = [] if tests is None: tests = [] res: Set[str] = set() if groups: # Some groups specified. exclude_groups supported, additionally # selecting some individual tests supported as well. res.update(*(self.groups[g] for g in groups)) elif tests: # Some individual tests specified, but no groups. In this case # we don't support exclude_groups. if exclude_groups: raise ValueError("Can't exclude from individually specified " "tests.") else: # No tests no groups: start from all tests, exclude_groups # supported. res.update(self.all_tests) if 'disabled' not in groups and 'disabled' not in exclude_groups: # Don't want to modify function argument, so create new list. exclude_groups = exclude_groups + ['disabled'] res = res.difference(*(self.groups[g] for g in exclude_groups)) # We want to add @tests. But for compatibility with old test names, # we should convert any number < 100 to number padded by # leading zeroes, like 1 -> 001 and 23 -> 023. for t in tests: res.add(self.parse_test_name(t)) sequence = sorted(res) if start_from is not None: del sequence[:sequence.index(self.parse_test_name(start_from))] return sequence
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/findtests.py
# Library for manipulations with qcow2 image # # Copyright (c) 2020 Virtuozzo International GmbH. # Copyright (C) 2012 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import struct import string import json class ComplexEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'to_json'): return obj.to_json() else: return json.JSONEncoder.default(self, obj) class Qcow2Field: def __init__(self, value): self.value = value def __str__(self): return str(self.value) class Flags64(Qcow2Field): def __str__(self): bits = [] for bit in range(64): if self.value & (1 << bit): bits.append(bit) return str(bits) class BitmapFlags(Qcow2Field): flags = { 0x1: 'in-use', 0x2: 'auto' } def __str__(self): bits = [] for bit in range(64): flag = self.value & (1 << bit) if flag: bits.append(self.flags.get(flag, f'bit-{bit}')) return f'{self.value:#x} ({bits})' class Enum(Qcow2Field): def __str__(self): return f'{self.value:#x} ({self.mapping.get(self.value, "<unknown>")})' class Qcow2StructMeta(type): # Mapping from c types to python struct format ctypes = { 'u8': 'B', 'u16': 'H', 'u32': 'I', 'u64': 'Q' } def __init__(self, name, bases, attrs): if 'fields' in attrs: self.fmt = '>' + ''.join(self.ctypes[f[0]] for f in self.fields) class Qcow2Struct(metaclass=Qcow2StructMeta): """Qcow2Struct: base class for qcow2 data structures Successors should define fields class variable, which is: list of tuples, each of three elements: - c-type (one of 'u8', 'u16', 'u32', 'u64') - format (format_spec to use with .format() when dump or 'mask' to dump bitmasks) - field name """ def __init__(self, fd=None, offset=None, data=None): """ Two variants: 1. Specify data. fd and offset must be None. 2. Specify fd and offset, data must be None. offset may be omitted in this case, than current position of fd is used. """ if data is None: assert fd is not None buf_size = struct.calcsize(self.fmt) if offset is not None: fd.seek(offset) data = fd.read(buf_size) else: assert fd is None and offset is None values = struct.unpack(self.fmt, data) self.__dict__ = dict((field[2], values[i]) for i, field in enumerate(self.fields)) def dump(self, is_json=False): if is_json: print(json.dumps(self.to_json(), indent=4, cls=ComplexEncoder)) return for f in self.fields: value = self.__dict__[f[2]] if isinstance(f[1], str): value_str = f[1].format(value) else: value_str = str(f[1](value)) print('{:<25} {}'.format(f[2], value_str)) def to_json(self): return dict((f[2], self.__dict__[f[2]]) for f in self.fields) class Qcow2BitmapExt(Qcow2Struct): fields = ( ('u32', '{}', 'nb_bitmaps'), ('u32', '{}', 'reserved32'), ('u64', '{:#x}', 'bitmap_directory_size'), ('u64', '{:#x}', 'bitmap_directory_offset') ) def __init__(self, fd, cluster_size): super().__init__(fd=fd) tail = struct.calcsize(self.fmt) % 8 if tail: fd.seek(8 - tail, 1) position = fd.tell() self.cluster_size = cluster_size self.read_bitmap_directory(fd) fd.seek(position) def read_bitmap_directory(self, fd): fd.seek(self.bitmap_directory_offset) self.bitmap_directory = \ [Qcow2BitmapDirEntry(fd, cluster_size=self.cluster_size) for _ in range(self.nb_bitmaps)] def dump(self): super().dump() for entry in self.bitmap_directory: print() entry.dump() def to_json(self): fields_dict = super().to_json() fields_dict['bitmap_directory'] = self.bitmap_directory return fields_dict class Qcow2BitmapDirEntry(Qcow2Struct): fields = ( ('u64', '{:#x}', 'bitmap_table_offset'), ('u32', '{}', 'bitmap_table_size'), ('u32', BitmapFlags, 'flags'), ('u8', '{}', 'type'), ('u8', '{}', 'granularity_bits'), ('u16', '{}', 'name_size'), ('u32', '{}', 'extra_data_size') ) def __init__(self, fd, cluster_size): super().__init__(fd=fd) self.cluster_size = cluster_size # Seek relative to the current position in the file fd.seek(self.extra_data_size, 1) bitmap_name = fd.read(self.name_size) self.name = bitmap_name.decode('ascii') # Move position to the end of the entry in the directory entry_raw_size = self.bitmap_dir_entry_raw_size() padding = ((entry_raw_size + 7) & ~7) - entry_raw_size fd.seek(padding, 1) self.bitmap_table = Qcow2BitmapTable(fd=fd, offset=self.bitmap_table_offset, nb_entries=self.bitmap_table_size, cluster_size=self.cluster_size) def bitmap_dir_entry_raw_size(self): return struct.calcsize(self.fmt) + self.name_size + \ self.extra_data_size def dump(self): print(f'{"Bitmap name":<25} {self.name}') super(Qcow2BitmapDirEntry, self).dump() self.bitmap_table.dump() def to_json(self): # Put the name ahead of the dict return { 'name': self.name, **super().to_json(), 'bitmap_table': self.bitmap_table } class Qcow2BitmapTableEntry(Qcow2Struct): fields = ( ('u64', '{}', 'entry'), ) BME_TABLE_ENTRY_RESERVED_MASK = 0xff000000000001fe BME_TABLE_ENTRY_OFFSET_MASK = 0x00fffffffffffe00 BME_TABLE_ENTRY_FLAG_ALL_ONES = 1 def __init__(self, fd): super().__init__(fd=fd) self.reserved = self.entry & self.BME_TABLE_ENTRY_RESERVED_MASK self.offset = self.entry & self.BME_TABLE_ENTRY_OFFSET_MASK if self.offset: if self.entry & self.BME_TABLE_ENTRY_FLAG_ALL_ONES: self.type = 'invalid' else: self.type = 'serialized' elif self.entry & self.BME_TABLE_ENTRY_FLAG_ALL_ONES: self.type = 'all-ones' else: self.type = 'all-zeroes' def to_json(self): return {'type': self.type, 'offset': self.offset, 'reserved': self.reserved} class Qcow2BitmapTable: def __init__(self, fd, offset, nb_entries, cluster_size): self.cluster_size = cluster_size position = fd.tell() fd.seek(offset) self.entries = [Qcow2BitmapTableEntry(fd) for _ in range(nb_entries)] fd.seek(position) def dump(self): bitmap_table = enumerate(self.entries) print(f'{"Bitmap table":<14} {"type":<15} {"size":<12} {"offset"}') for i, entry in bitmap_table: if entry.type == 'serialized': size = self.cluster_size else: size = 0 print(f'{i:<14} {entry.type:<15} {size:<12} {entry.offset}') def to_json(self): return self.entries QCOW2_EXT_MAGIC_BITMAPS = 0x23852875 class QcowHeaderExtension(Qcow2Struct): class Magic(Enum): mapping = { 0xe2792aca: 'Backing format', 0x6803f857: 'Feature table', 0x0537be77: 'Crypto header', QCOW2_EXT_MAGIC_BITMAPS: 'Bitmaps', 0x44415441: 'Data file' } def to_json(self): return self.mapping.get(self.value, "<unknown>") fields = ( ('u32', Magic, 'magic'), ('u32', '{}', 'length') # length bytes of data follows # then padding to next multiply of 8 ) def __init__(self, magic=None, length=None, data=None, fd=None, cluster_size=None): """ Support both loading from fd and creation from user data. For fd-based creation current position in a file will be used to read the data. The cluster_size value may be obtained by dependent structures. This should be somehow refactored and functionality should be moved to superclass (to allow creation of any qcow2 struct), but then, fields of variable length (data here) should be supported in base class somehow. Note also, that we probably want to parse different extensions. Should they be subclasses of this class, or how to do it better? Should it be something like QAPI union with discriminator field (magic here). So, it's a TODO. We'll see how to properly refactor this when we have more qcow2 structures. """ if fd is None: assert all(v is not None for v in (magic, length, data)) self.magic = magic self.length = length if length % 8 != 0: padding = 8 - (length % 8) data += b'\0' * padding self.data = data else: assert all(v is None for v in (magic, length, data)) super().__init__(fd=fd) if self.magic == QCOW2_EXT_MAGIC_BITMAPS: self.obj = Qcow2BitmapExt(fd=fd, cluster_size=cluster_size) self.data = None else: padded = (self.length + 7) & ~7 self.data = fd.read(padded) assert self.data is not None self.obj = None if self.data is not None: data_str = self.data[:self.length] if all(c in string.printable.encode( 'ascii') for c in data_str): data_str = f"'{ data_str.decode('ascii') }'" else: data_str = '<binary>' self.data_str = data_str def dump(self): super().dump() if self.obj is None: print(f'{"data":<25} {self.data_str}') else: self.obj.dump() def to_json(self): # Put the name ahead of the dict res = {'name': self.Magic(self.magic), **super().to_json()} if self.obj is not None: res['data'] = self.obj else: res['data_str'] = self.data_str return res @classmethod def create(cls, magic, data): return QcowHeaderExtension(magic, len(data), data) class QcowHeader(Qcow2Struct): fields = ( # Version 2 header fields ('u32', '{:#x}', 'magic'), ('u32', '{}', 'version'), ('u64', '{:#x}', 'backing_file_offset'), ('u32', '{:#x}', 'backing_file_size'), ('u32', '{}', 'cluster_bits'), ('u64', '{}', 'size'), ('u32', '{}', 'crypt_method'), ('u32', '{}', 'l1_size'), ('u64', '{:#x}', 'l1_table_offset'), ('u64', '{:#x}', 'refcount_table_offset'), ('u32', '{}', 'refcount_table_clusters'), ('u32', '{}', 'nb_snapshots'), ('u64', '{:#x}', 'snapshot_offset'), # Version 3 header fields ('u64', Flags64, 'incompatible_features'), ('u64', Flags64, 'compatible_features'), ('u64', Flags64, 'autoclear_features'), ('u32', '{}', 'refcount_order'), ('u32', '{}', 'header_length'), ) def __init__(self, fd): super().__init__(fd=fd, offset=0) self.set_defaults() self.cluster_size = 1 << self.cluster_bits fd.seek(self.header_length) self.load_extensions(fd) if self.backing_file_offset: fd.seek(self.backing_file_offset) self.backing_file = fd.read(self.backing_file_size) else: self.backing_file = None def set_defaults(self): if self.version == 2: self.incompatible_features = 0 self.compatible_features = 0 self.autoclear_features = 0 self.refcount_order = 4 self.header_length = 72 def load_extensions(self, fd): self.extensions = [] if self.backing_file_offset != 0: end = min(self.cluster_size, self.backing_file_offset) else: end = self.cluster_size while fd.tell() < end: ext = QcowHeaderExtension(fd=fd, cluster_size=self.cluster_size) if ext.magic == 0: break else: self.extensions.append(ext) def update_extensions(self, fd): fd.seek(self.header_length) extensions = self.extensions extensions.append(QcowHeaderExtension(0, 0, b'')) for ex in extensions: buf = struct.pack('>II', ex.magic, ex.length) fd.write(buf) fd.write(ex.data) if self.backing_file is not None: self.backing_file_offset = fd.tell() fd.write(self.backing_file) if fd.tell() > self.cluster_size: raise Exception('I think I just broke the image...') def update(self, fd): header_bytes = self.header_length self.update_extensions(fd) fd.seek(0) header = tuple(self.__dict__[f] for t, p, f in QcowHeader.fields) buf = struct.pack(QcowHeader.fmt, *header) buf = buf[0:header_bytes-1] fd.write(buf) def dump_extensions(self, is_json=False): if is_json: print(json.dumps(self.extensions, indent=4, cls=ComplexEncoder)) return for ex in self.extensions: print('Header extension:') ex.dump() print()
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/qcow2_format.py
# Class for actually running tests. # # Copyright (c) 2020-2021 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os from pathlib import Path import datetime import time import difflib import subprocess import contextlib import json import termios import sys from contextlib import contextmanager from typing import List, Optional, Iterator, Any, Sequence, Dict, \ ContextManager from testenv import TestEnv def silent_unlink(path: Path) -> None: try: path.unlink() except OSError: pass def file_diff(file1: str, file2: str) -> List[str]: with open(file1, encoding="utf-8") as f1, \ open(file2, encoding="utf-8") as f2: # We want to ignore spaces at line ends. There are a lot of mess about # it in iotests. # TODO: fix all tests to not produce extra spaces, fix all .out files # and use strict diff here! seq1 = [line.rstrip() for line in f1] seq2 = [line.rstrip() for line in f2] res = [line.rstrip() for line in difflib.unified_diff(seq1, seq2, file1, file2)] return res # We want to save current tty settings during test run, # since an aborting qemu call may leave things screwed up. @contextmanager def savetty() -> Iterator[None]: isterm = sys.stdin.isatty() if isterm: fd = sys.stdin.fileno() attr = termios.tcgetattr(fd) try: yield finally: if isterm: termios.tcsetattr(fd, termios.TCSADRAIN, attr) class LastElapsedTime(ContextManager['LastElapsedTime']): """ Cache for elapsed time for tests, to show it during new test run It is safe to use get() at any time. To use update(), you must either use it inside with-block or use save() after update(). """ def __init__(self, cache_file: str, env: TestEnv) -> None: self.env = env self.cache_file = cache_file self.cache: Dict[str, Dict[str, Dict[str, float]]] try: with open(cache_file, encoding="utf-8") as f: self.cache = json.load(f) except (OSError, ValueError): self.cache = {} def get(self, test: str, default: Optional[float] = None) -> Optional[float]: if test not in self.cache: return default if self.env.imgproto not in self.cache[test]: return default return self.cache[test][self.env.imgproto].get(self.env.imgfmt, default) def update(self, test: str, elapsed: float) -> None: d = self.cache.setdefault(test, {}) d.setdefault(self.env.imgproto, {})[self.env.imgfmt] = elapsed def save(self) -> None: with open(self.cache_file, 'w', encoding="utf-8") as f: json.dump(self.cache, f) def __enter__(self) -> 'LastElapsedTime': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.save() class TestResult: def __init__(self, status: str, description: str = '', elapsed: Optional[float] = None, diff: Sequence[str] = (), casenotrun: str = '', interrupted: bool = False) -> None: self.status = status self.description = description self.elapsed = elapsed self.diff = diff self.casenotrun = casenotrun self.interrupted = interrupted class TestRunner(ContextManager['TestRunner']): def __init__(self, env: TestEnv, makecheck: bool = False, color: str = 'auto') -> None: self.env = env self.makecheck = makecheck self.last_elapsed = LastElapsedTime('.last-elapsed-cache', env) assert color in ('auto', 'on', 'off') self.color = (color == 'on') or (color == 'auto' and sys.stdout.isatty()) self._stack: contextlib.ExitStack def __enter__(self) -> 'TestRunner': self._stack = contextlib.ExitStack() self._stack.enter_context(self.env) self._stack.enter_context(self.last_elapsed) self._stack.enter_context(savetty()) return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self._stack.close() def test_print_one_line(self, test: str, starttime: str, endtime: Optional[str] = None, status: str = '...', lasttime: Optional[float] = None, thistime: Optional[float] = None, description: str = '', test_field_width: Optional[int] = None, end: str = '\n') -> None: """ Print short test info before/after test run """ test = os.path.basename(test) if test_field_width is None: test_field_width = 8 if self.makecheck and status != '...': if status and status != 'pass': status = f' [{status}]' else: status = '' print(f' TEST iotest-{self.env.imgfmt}: {test}{status}') return if lasttime: lasttime_s = f' (last: {lasttime:.1f}s)' else: lasttime_s = '' if thistime: thistime_s = f'{thistime:.1f}s' else: thistime_s = '...' if endtime: endtime = f'[{endtime}]' else: endtime = '' if self.color: if status == 'pass': col = '\033[32m' elif status == 'fail': col = '\033[1m\033[31m' elif status == 'not run': col = '\033[33m' else: col = '' col_end = '\033[0m' else: col = '' col_end = '' print(f'{test:{test_field_width}} {col}{status:10}{col_end} ' f'[{starttime}] {endtime:13}{thistime_s:5} {lasttime_s:14} ' f'{description}', end=end) def find_reference(self, test: str) -> str: if self.env.cachemode == 'none': ref = f'{test}.out.nocache' if os.path.isfile(ref): return ref ref = f'{test}.out.{self.env.imgfmt}' if os.path.isfile(ref): return ref ref = f'{test}.{self.env.qemu_default_machine}.out' if os.path.isfile(ref): return ref return f'{test}.out' def do_run_test(self, test: str) -> TestResult: f_test = Path(test) f_bad = Path(f_test.name + '.out.bad') f_notrun = Path(f_test.name + '.notrun') f_casenotrun = Path(f_test.name + '.casenotrun') f_reference = Path(self.find_reference(test)) if not f_test.exists(): return TestResult(status='fail', description=f'No such test file: {f_test}') if not os.access(str(f_test), os.X_OK): sys.exit(f'Not executable: {f_test}') if not f_reference.exists(): return TestResult(status='not run', description='No qualified output ' f'(expected {f_reference})') for p in (f_bad, f_notrun, f_casenotrun): silent_unlink(p) args = [str(f_test.resolve())] env = self.env.prepare_subprocess(args) t0 = time.time() with f_bad.open('w', encoding="utf-8") as f: with subprocess.Popen(args, cwd=str(f_test.parent), env=env, stdout=f, stderr=subprocess.STDOUT) as proc: try: proc.wait() except KeyboardInterrupt: proc.terminate() proc.wait() return TestResult(status='not run', description='Interrupted by user', interrupted=True) ret = proc.returncode elapsed = round(time.time() - t0, 1) if ret != 0: return TestResult(status='fail', elapsed=elapsed, description=f'failed, exit status {ret}', diff=file_diff(str(f_reference), str(f_bad))) if f_notrun.exists(): return TestResult( status='not run', description=f_notrun.read_text(encoding='utf-8').strip()) casenotrun = '' if f_casenotrun.exists(): casenotrun = f_casenotrun.read_text(encoding='utf-8') diff = file_diff(str(f_reference), str(f_bad)) if diff: return TestResult(status='fail', elapsed=elapsed, description=f'output mismatch (see {f_bad})', diff=diff, casenotrun=casenotrun) else: f_bad.unlink() self.last_elapsed.update(test, elapsed) return TestResult(status='pass', elapsed=elapsed, casenotrun=casenotrun) def run_test(self, test: str, test_field_width: Optional[int] = None) -> TestResult: last_el = self.last_elapsed.get(test) start = datetime.datetime.now().strftime('%H:%M:%S') if not self.makecheck: self.test_print_one_line(test=test, starttime=start, lasttime=last_el, end='\r', test_field_width=test_field_width) res = self.do_run_test(test) end = datetime.datetime.now().strftime('%H:%M:%S') self.test_print_one_line(test=test, status=res.status, starttime=start, endtime=end, lasttime=last_el, thistime=res.elapsed, description=res.description, test_field_width=test_field_width) if res.casenotrun: print(res.casenotrun) return res def run_tests(self, tests: List[str]) -> bool: n_run = 0 failed = [] notrun = [] casenotrun = [] if not self.makecheck: self.env.print_env() test_field_width = max(len(os.path.basename(t)) for t in tests) + 2 for t in tests: name = os.path.basename(t) res = self.run_test(t, test_field_width=test_field_width) assert res.status in ('pass', 'fail', 'not run') if res.casenotrun: casenotrun.append(t) if res.status != 'not run': n_run += 1 if res.status == 'fail': failed.append(name) if self.makecheck: self.env.print_env() if res.diff: print('\n'.join(res.diff)) elif res.status == 'not run': notrun.append(name) if res.interrupted: break if notrun: print('Not run:', ' '.join(notrun)) if casenotrun: print('Some cases not run in:', ' '.join(casenotrun)) if failed: print('Failures:', ' '.join(failed)) print(f'Failed {len(failed)} of {n_run} iotests') return False else: print(f'Passed all {n_run} iotests') return True
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/testrunner.py
#!/usr/bin/env python3 # # Manipulations with qcow2 image # # Copyright (C) 2012 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import sys from qcow2_format import ( QcowHeader, QcowHeaderExtension ) is_json = False def cmd_dump_header(fd): h = QcowHeader(fd) h.dump(is_json) print() h.dump_extensions(is_json) def cmd_dump_header_exts(fd): h = QcowHeader(fd) h.dump_extensions(is_json) def cmd_set_header(fd, name, value): try: value = int(value, 0) except ValueError: print("'%s' is not a valid number" % value) sys.exit(1) fields = (field[2] for field in QcowHeader.fields) if name not in fields: print("'%s' is not a known header field" % name) sys.exit(1) h = QcowHeader(fd) h.__dict__[name] = value h.update(fd) def cmd_add_header_ext(fd, magic, data): try: magic = int(magic, 0) except ValueError: print("'%s' is not a valid magic number" % magic) sys.exit(1) h = QcowHeader(fd) h.extensions.append(QcowHeaderExtension.create(magic, data.encode('ascii'))) h.update(fd) def cmd_add_header_ext_stdio(fd, magic): data = sys.stdin.read() cmd_add_header_ext(fd, magic, data) def cmd_del_header_ext(fd, magic): try: magic = int(magic, 0) except ValueError: print("'%s' is not a valid magic number" % magic) sys.exit(1) h = QcowHeader(fd) found = False for ex in h.extensions: if ex.magic == magic: found = True h.extensions.remove(ex) if not found: print("No such header extension") return h.update(fd) def cmd_set_feature_bit(fd, group, bit): try: bit = int(bit, 0) if bit < 0 or bit >= 64: raise ValueError except ValueError: print("'%s' is not a valid bit number in range [0, 64)" % bit) sys.exit(1) h = QcowHeader(fd) if group == 'incompatible': h.incompatible_features |= 1 << bit elif group == 'compatible': h.compatible_features |= 1 << bit elif group == 'autoclear': h.autoclear_features |= 1 << bit else: print("'%s' is not a valid group, try " "'incompatible', 'compatible', or 'autoclear'" % group) sys.exit(1) h.update(fd) cmds = [ ['dump-header', cmd_dump_header, 0, 'Dump image header and header extensions'], ['dump-header-exts', cmd_dump_header_exts, 0, 'Dump image header extensions'], ['set-header', cmd_set_header, 2, 'Set a field in the header'], ['add-header-ext', cmd_add_header_ext, 2, 'Add a header extension'], ['add-header-ext-stdio', cmd_add_header_ext_stdio, 1, 'Add a header extension, data from stdin'], ['del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension'], ['set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'], ] def main(filename, cmd, args): fd = open(filename, "r+b") try: for name, handler, num_args, desc in cmds: if name != cmd: continue elif len(args) != num_args: usage() return else: handler(fd, *args) return print("Unknown command '%s'" % cmd) finally: fd.close() def usage(): print("Usage: %s <file> <cmd> [<arg>, ...] [<key>, ...]" % sys.argv[0]) print("") print("Supported commands:") for name, handler, num_args, desc in cmds: print(" %-20s - %s" % (name, desc)) print("") print("Supported keys:") print(" %-20s - %s" % ('-j', 'Dump in JSON format')) if __name__ == '__main__': if len(sys.argv) < 3: usage() sys.exit(1) is_json = '-j' in sys.argv if is_json: sys.argv.remove('-j') main(sys.argv[1], sys.argv[2], sys.argv[3:])
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/qemu-iotests/qcow2.py
# # VM testing base class # # Copyright 2017-2019 Red Hat Inc. # # Authors: # Fam Zheng <famz@redhat.com> # Gerd Hoffmann <kraxel@redhat.com> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. # import os import re import sys import socket import logging import time import datetime sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine from qemu.utils import get_info_usernet_hostfwd_port, kvm_available import subprocess import hashlib import argparse import atexit import tempfile import shutil import multiprocessing import traceback import shlex SSH_KEY_FILE = os.path.join(os.path.dirname(__file__), "..", "keys", "id_rsa") SSH_PUB_KEY_FILE = os.path.join(os.path.dirname(__file__), "..", "keys", "id_rsa.pub") # This is the standard configuration. # Any or all of these can be overridden by # passing in a config argument to the VM constructor. DEFAULT_CONFIG = { 'cpu' : "max", 'machine' : 'pc', 'guest_user' : "qemu", 'guest_pass' : "qemupass", 'root_user' : "root", 'root_pass' : "qemupass", 'ssh_key_file' : SSH_KEY_FILE, 'ssh_pub_key_file': SSH_PUB_KEY_FILE, 'memory' : "4G", 'extra_args' : [], 'qemu_args' : "", 'dns' : "", 'ssh_port' : 0, 'install_cmds' : "", 'boot_dev_type' : "block", 'ssh_timeout' : 1, } BOOT_DEVICE = { 'block' : "-drive file={},if=none,id=drive0,cache=writeback "\ "-device virtio-blk,drive=drive0,bootindex=0", 'scsi' : "-device virtio-scsi-device,id=scsi "\ "-drive file={},format=raw,if=none,id=hd0 "\ "-device scsi-hd,drive=hd0,bootindex=0", } class BaseVM(object): envvars = [ "https_proxy", "http_proxy", "ftp_proxy", "no_proxy", ] # The script to run in the guest that builds QEMU BUILD_SCRIPT = "" # The guest name, to be overridden by subclasses name = "#base" # The guest architecture, to be overridden by subclasses arch = "#arch" # command to halt the guest, can be overridden by subclasses poweroff = "poweroff" # Time to wait for shutdown to finish. shutdown_timeout_default = 30 # enable IPv6 networking ipv6 = True # This is the timeout on the wait for console bytes. socket_timeout = 120 # Scale up some timeouts under TCG. # 4 is arbitrary, but greater than 2, # since we found we need to wait more than twice as long. tcg_timeout_multiplier = 4 def __init__(self, args, config=None): self._guest = None self._genisoimage = args.genisoimage self._build_path = args.build_path self._efi_aarch64 = args.efi_aarch64 self._source_path = args.source_path # Allow input config to override defaults. self._config = DEFAULT_CONFIG.copy() if config != None: self._config.update(config) self.validate_ssh_keys() self._tmpdir = os.path.realpath(tempfile.mkdtemp(prefix="vm-test-", suffix=".tmp", dir=".")) atexit.register(shutil.rmtree, self._tmpdir) # Copy the key files to a temporary directory. # Also chmod the key file to agree with ssh requirements. self._config['ssh_key'] = \ open(self._config['ssh_key_file']).read().rstrip() self._config['ssh_pub_key'] = \ open(self._config['ssh_pub_key_file']).read().rstrip() self._ssh_tmp_key_file = os.path.join(self._tmpdir, "id_rsa") open(self._ssh_tmp_key_file, "w").write(self._config['ssh_key']) subprocess.check_call(["chmod", "600", self._ssh_tmp_key_file]) self._ssh_tmp_pub_key_file = os.path.join(self._tmpdir, "id_rsa.pub") open(self._ssh_tmp_pub_key_file, "w").write(self._config['ssh_pub_key']) self.debug = args.debug self._console_log_path = None if args.log_console: self._console_log_path = \ os.path.join(os.path.expanduser("~/.cache/qemu-vm"), "{}.install.log".format(self.name)) self._stderr = sys.stderr self._devnull = open(os.devnull, "w") if self.debug: self._stdout = sys.stdout else: self._stdout = self._devnull netdev = "user,id=vnet,hostfwd=:127.0.0.1:{}-:22" self._args = [ \ "-nodefaults", "-m", self._config['memory'], "-cpu", self._config['cpu'], "-netdev", netdev.format(self._config['ssh_port']) + (",ipv6=no" if not self.ipv6 else "") + (",dns=" + self._config['dns'] if self._config['dns'] else ""), "-device", "virtio-net-pci,netdev=vnet", "-vnc", "127.0.0.1:0,to=20"] if args.jobs and args.jobs > 1: self._args += ["-smp", "%d" % args.jobs] if kvm_available(self.arch): self._shutdown_timeout = self.shutdown_timeout_default self._args += ["-enable-kvm"] else: logging.info("KVM not available, not using -enable-kvm") self._shutdown_timeout = \ self.shutdown_timeout_default * self.tcg_timeout_multiplier self._data_args = [] if self._config['qemu_args'] != None: qemu_args = self._config['qemu_args'] qemu_args = qemu_args.replace('\n',' ').replace('\r','') # shlex groups quoted arguments together # we need this to keep the quoted args together for when # the QEMU command is issued later. args = shlex.split(qemu_args) self._config['extra_args'] = [] for arg in args: if arg: # Preserve quotes around arguments. # shlex above takes them out, so add them in. if " " in arg: arg = '"{}"'.format(arg) self._config['extra_args'].append(arg) def validate_ssh_keys(self): """Check to see if the ssh key files exist.""" if 'ssh_key_file' not in self._config or\ not os.path.exists(self._config['ssh_key_file']): raise Exception("ssh key file not found.") if 'ssh_pub_key_file' not in self._config or\ not os.path.exists(self._config['ssh_pub_key_file']): raise Exception("ssh pub key file not found.") def wait_boot(self, wait_string=None): """Wait for the standard string we expect on completion of a normal boot. The user can also choose to override with an alternate string to wait for.""" if wait_string is None: if self.login_prompt is None: raise Exception("self.login_prompt not defined") wait_string = self.login_prompt # Intentionally bump up the default timeout under TCG, # since the console wait below takes longer. timeout = self.socket_timeout if not kvm_available(self.arch): timeout *= 8 self.console_init(timeout=timeout) self.console_wait(wait_string) def _download_with_cache(self, url, sha256sum=None, sha512sum=None): def check_sha256sum(fname): if not sha256sum: return True checksum = subprocess.check_output(["sha256sum", fname]).split()[0] return sha256sum == checksum.decode("utf-8") def check_sha512sum(fname): if not sha512sum: return True checksum = subprocess.check_output(["sha512sum", fname]).split()[0] return sha512sum == checksum.decode("utf-8") cache_dir = os.path.expanduser("~/.cache/qemu-vm/download") if not os.path.exists(cache_dir): os.makedirs(cache_dir) fname = os.path.join(cache_dir, hashlib.sha1(url.encode("utf-8")).hexdigest()) if os.path.exists(fname) and check_sha256sum(fname) and check_sha512sum(fname): return fname logging.debug("Downloading %s to %s...", url, fname) subprocess.check_call(["wget", "-c", url, "-O", fname + ".download"], stdout=self._stdout, stderr=self._stderr) os.rename(fname + ".download", fname) return fname def _ssh_do(self, user, cmd, check): ssh_cmd = ["ssh", "-t", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=" + os.devnull, "-o", "ConnectTimeout={}".format(self._config["ssh_timeout"]), "-p", str(self.ssh_port), "-i", self._ssh_tmp_key_file] # If not in debug mode, set ssh to quiet mode to # avoid printing the results of commands. if not self.debug: ssh_cmd.append("-q") for var in self.envvars: ssh_cmd += ['-o', "SendEnv=%s" % var ] assert not isinstance(cmd, str) ssh_cmd += ["%s@127.0.0.1" % user] + list(cmd) logging.debug("ssh_cmd: %s", " ".join(ssh_cmd)) r = subprocess.call(ssh_cmd) if check and r != 0: raise Exception("SSH command failed: %s" % cmd) return r def ssh(self, *cmd): return self._ssh_do(self._config["guest_user"], cmd, False) def ssh_root(self, *cmd): return self._ssh_do(self._config["root_user"], cmd, False) def ssh_check(self, *cmd): self._ssh_do(self._config["guest_user"], cmd, True) def ssh_root_check(self, *cmd): self._ssh_do(self._config["root_user"], cmd, True) def build_image(self, img): raise NotImplementedError def exec_qemu_img(self, *args): cmd = [os.environ.get("QEMU_IMG", "qemu-img")] cmd.extend(list(args)) subprocess.check_call(cmd) def add_source_dir(self, src_dir): name = "data-" + hashlib.sha1(src_dir.encode("utf-8")).hexdigest()[:5] tarfile = os.path.join(self._tmpdir, name + ".tar") logging.debug("Creating archive %s for src_dir dir: %s", tarfile, src_dir) subprocess.check_call(["./scripts/archive-source.sh", tarfile], cwd=src_dir, stdin=self._devnull, stdout=self._stdout, stderr=self._stderr) self._data_args += ["-drive", "file=%s,if=none,id=%s,cache=writeback,format=raw" % \ (tarfile, name), "-device", "virtio-blk,drive=%s,serial=%s,bootindex=1" % (name, name)] def boot(self, img, extra_args=[]): boot_dev = BOOT_DEVICE[self._config['boot_dev_type']] boot_params = boot_dev.format(img) args = self._args + boot_params.split(' ') args += self._data_args + extra_args + self._config['extra_args'] logging.debug("QEMU args: %s", " ".join(args)) qemu_path = get_qemu_path(self.arch, self._build_path) # Since console_log_path is only set when the user provides the # log_console option, we will set drain_console=True so the # console is always drained. guest = QEMUMachine(binary=qemu_path, args=args, console_log=self._console_log_path, drain_console=True) guest.set_machine(self._config['machine']) guest.set_console() try: guest.launch() except: logging.error("Failed to launch QEMU, command line:") logging.error(" ".join([qemu_path] + args)) logging.error("Log:") logging.error(guest.get_log()) logging.error("QEMU version >= 2.10 is required") raise atexit.register(self.shutdown) self._guest = guest # Init console so we can start consuming the chars. self.console_init() usernet_info = guest.qmp("human-monitor-command", command_line="info usernet").get("return") self.ssh_port = get_info_usernet_hostfwd_port(usernet_info) if not self.ssh_port: raise Exception("Cannot find ssh port from 'info usernet':\n%s" % \ usernet_info) def console_init(self, timeout = None): if timeout == None: timeout = self.socket_timeout vm = self._guest vm.console_socket.settimeout(timeout) self.console_raw_path = os.path.join(vm._temp_dir, vm._name + "-console.raw") self.console_raw_file = open(self.console_raw_path, 'wb') def console_log(self, text): for line in re.split("[\r\n]", text): # filter out terminal escape sequences line = re.sub("\x1b\[[0-9;?]*[a-zA-Z]", "", line) line = re.sub("\x1b\([0-9;?]*[a-zA-Z]", "", line) # replace unprintable chars line = re.sub("\x1b", "<esc>", line) line = re.sub("[\x00-\x1f]", ".", line) line = re.sub("[\x80-\xff]", ".", line) if line == "": continue # log console line sys.stderr.write("con recv: %s\n" % line) def console_wait(self, expect, expectalt = None): vm = self._guest output = "" while True: try: chars = vm.console_socket.recv(1) if self.console_raw_file: self.console_raw_file.write(chars) self.console_raw_file.flush() except socket.timeout: sys.stderr.write("console: *** read timeout ***\n") sys.stderr.write("console: waiting for: '%s'\n" % expect) if not expectalt is None: sys.stderr.write("console: waiting for: '%s' (alt)\n" % expectalt) sys.stderr.write("console: line buffer:\n") sys.stderr.write("\n") self.console_log(output.rstrip()) sys.stderr.write("\n") raise output += chars.decode("latin1") if expect in output: break if not expectalt is None and expectalt in output: break if "\r" in output or "\n" in output: lines = re.split("[\r\n]", output) output = lines.pop() if self.debug: self.console_log("\n".join(lines)) if self.debug: self.console_log(output) if not expectalt is None and expectalt in output: return False return True def console_consume(self): vm = self._guest output = "" vm.console_socket.setblocking(0) while True: try: chars = vm.console_socket.recv(1) except: break output += chars.decode("latin1") if "\r" in output or "\n" in output: lines = re.split("[\r\n]", output) output = lines.pop() if self.debug: self.console_log("\n".join(lines)) if self.debug: self.console_log(output) vm.console_socket.setblocking(1) def console_send(self, command): vm = self._guest if self.debug: logline = re.sub("\n", "<enter>", command) logline = re.sub("[\x00-\x1f]", ".", logline) sys.stderr.write("con send: %s\n" % logline) for char in list(command): vm.console_socket.send(char.encode("utf-8")) time.sleep(0.01) def console_wait_send(self, wait, command): self.console_wait(wait) self.console_send(command) def console_ssh_init(self, prompt, user, pw): sshkey_cmd = "echo '%s' > .ssh/authorized_keys\n" \ % self._config['ssh_pub_key'].rstrip() self.console_wait_send("login:", "%s\n" % user) self.console_wait_send("Password:", "%s\n" % pw) self.console_wait_send(prompt, "mkdir .ssh\n") self.console_wait_send(prompt, sshkey_cmd) self.console_wait_send(prompt, "chmod 755 .ssh\n") self.console_wait_send(prompt, "chmod 644 .ssh/authorized_keys\n") def console_sshd_config(self, prompt): self.console_wait(prompt) self.console_send("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config\n") for var in self.envvars: self.console_wait(prompt) self.console_send("echo 'AcceptEnv %s' >> /etc/ssh/sshd_config\n" % var) def print_step(self, text): sys.stderr.write("### %s ...\n" % text) def wait_ssh(self, wait_root=False, seconds=300, cmd="exit 0"): # Allow more time for VM to boot under TCG. if not kvm_available(self.arch): seconds *= self.tcg_timeout_multiplier starttime = datetime.datetime.now() endtime = starttime + datetime.timedelta(seconds=seconds) cmd_success = False while datetime.datetime.now() < endtime: if wait_root and self.ssh_root(cmd) == 0: cmd_success = True break elif self.ssh(cmd) == 0: cmd_success = True break seconds = (endtime - datetime.datetime.now()).total_seconds() logging.debug("%ds before timeout", seconds) time.sleep(1) if not cmd_success: raise Exception("Timeout while waiting for guest ssh") def shutdown(self): self._guest.shutdown(timeout=self._shutdown_timeout) def wait(self): self._guest.wait(timeout=self._shutdown_timeout) def graceful_shutdown(self): self.ssh_root(self.poweroff) self._guest.wait(timeout=self._shutdown_timeout) def qmp(self, *args, **kwargs): return self._guest.qmp(*args, **kwargs) def gen_cloud_init_iso(self): cidir = self._tmpdir mdata = open(os.path.join(cidir, "meta-data"), "w") name = self.name.replace(".","-") mdata.writelines(["instance-id: {}-vm-0\n".format(name), "local-hostname: {}-guest\n".format(name)]) mdata.close() udata = open(os.path.join(cidir, "user-data"), "w") print("guest user:pw {}:{}".format(self._config['guest_user'], self._config['guest_pass'])) udata.writelines(["#cloud-config\n", "chpasswd:\n", " list: |\n", " root:%s\n" % self._config['root_pass'], " %s:%s\n" % (self._config['guest_user'], self._config['guest_pass']), " expire: False\n", "users:\n", " - name: %s\n" % self._config['guest_user'], " sudo: ALL=(ALL) NOPASSWD:ALL\n", " ssh-authorized-keys:\n", " - %s\n" % self._config['ssh_pub_key'], " - name: root\n", " ssh-authorized-keys:\n", " - %s\n" % self._config['ssh_pub_key'], "locale: en_US.UTF-8\n"]) proxy = os.environ.get("http_proxy") if not proxy is None: udata.writelines(["apt:\n", " proxy: %s" % proxy]) udata.close() subprocess.check_call([self._genisoimage, "-output", "cloud-init.iso", "-volid", "cidata", "-joliet", "-rock", "user-data", "meta-data"], cwd=cidir, stdin=self._devnull, stdout=self._stdout, stderr=self._stdout) return os.path.join(cidir, "cloud-init.iso") def get_qemu_path(arch, build_path=None): """Fetch the path to the qemu binary.""" # If QEMU environment variable set, it takes precedence if "QEMU" in os.environ: qemu_path = os.environ["QEMU"] elif build_path: qemu_path = os.path.join(build_path, arch + "-softmmu") qemu_path = os.path.join(qemu_path, "qemu-system-" + arch) else: # Default is to use system path for qemu. qemu_path = "qemu-system-" + arch return qemu_path def get_qemu_version(qemu_path): """Get the version number from the current QEMU, and return the major number.""" output = subprocess.check_output([qemu_path, '--version']) version_line = output.decode("utf-8") version_num = re.split(' |\(', version_line)[3].split('.')[0] return int(version_num) def parse_config(config, args): """ Parse yaml config and populate our config structure. The yaml config allows the user to override the defaults for VM parameters. In many cases these defaults can be overridden without rebuilding the VM.""" if args.config: config_file = args.config elif 'QEMU_CONFIG' in os.environ: config_file = os.environ['QEMU_CONFIG'] else: return config if not os.path.exists(config_file): raise Exception("config file {} does not exist".format(config_file)) # We gracefully handle importing the yaml module # since it might not be installed. # If we are here it means the user supplied a .yml file, # so if the yaml module is not installed we will exit with error. try: import yaml except ImportError: print("The python3-yaml package is needed "\ "to support config.yaml files") # Instead of raising an exception we exit to avoid # a raft of messy (expected) errors to stdout. exit(1) with open(config_file) as f: yaml_dict = yaml.safe_load(f) if 'qemu-conf' in yaml_dict: config.update(yaml_dict['qemu-conf']) else: raise Exception("config file {} is not valid"\ " missing qemu-conf".format(config_file)) return config def parse_args(vmcls): def get_default_jobs(): if multiprocessing.cpu_count() > 1: if kvm_available(vmcls.arch): return multiprocessing.cpu_count() // 2 elif os.uname().machine == "x86_64" and \ vmcls.arch in ["aarch64", "x86_64", "i386"]: # MTTCG is available on these arches and we can allow # more cores. but only up to a reasonable limit. User # can always override these limits with --jobs. return min(multiprocessing.cpu_count() // 2, 8) else: return 1 parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Utility for provisioning VMs and running builds", epilog="""Remaining arguments are passed to the command. Exit codes: 0 = success, 1 = command line error, 2 = environment initialization failed, 3 = test command failed""") parser.add_argument("--debug", "-D", action="store_true", help="enable debug output") parser.add_argument("--image", "-i", default="%s.img" % vmcls.name, help="image file name") parser.add_argument("--force", "-f", action="store_true", help="force build image even if image exists") parser.add_argument("--jobs", type=int, default=get_default_jobs(), help="number of virtual CPUs") parser.add_argument("--verbose", "-V", action="store_true", help="Pass V=1 to builds within the guest") parser.add_argument("--build-image", "-b", action="store_true", help="build image") parser.add_argument("--build-qemu", help="build QEMU from source in guest") parser.add_argument("--build-target", help="QEMU build target", default="check") parser.add_argument("--build-path", default=None, help="Path of build directory, "\ "for using build tree QEMU binary. ") parser.add_argument("--source-path", default=None, help="Path of source directory, "\ "for finding additional files. ") parser.add_argument("--interactive", "-I", action="store_true", help="Interactively run command") parser.add_argument("--snapshot", "-s", action="store_true", help="run tests with a snapshot") parser.add_argument("--genisoimage", default="genisoimage", help="iso imaging tool") parser.add_argument("--config", "-c", default=None, help="Provide config yaml for configuration. "\ "See config_example.yaml for example.") parser.add_argument("--efi-aarch64", default="/usr/share/qemu-efi-aarch64/QEMU_EFI.fd", help="Path to efi image for aarch64 VMs.") parser.add_argument("--log-console", action="store_true", help="Log console to file.") parser.add_argument("commands", nargs="*", help="""Remaining commands after -- are passed to command inside the VM""") return parser.parse_args() def main(vmcls, config=None): try: if config == None: config = DEFAULT_CONFIG args = parse_args(vmcls) if not args.commands and not args.build_qemu and not args.build_image: print("Nothing to do?") return 1 config = parse_config(config, args) logging.basicConfig(level=(logging.DEBUG if args.debug else logging.WARN)) vm = vmcls(args, config=config) if args.build_image: if os.path.exists(args.image) and not args.force: sys.stderr.writelines(["Image file exists: %s\n" % args.image, "Use --force option to overwrite\n"]) return 1 return vm.build_image(args.image) if args.build_qemu: vm.add_source_dir(args.build_qemu) cmd = [vm.BUILD_SCRIPT.format( configure_opts = " ".join(args.commands), jobs=int(args.jobs), target=args.build_target, verbose = "V=1" if args.verbose else "")] else: cmd = args.commands img = args.image if args.snapshot: img += ",snapshot=on" vm.boot(img) vm.wait_ssh() except Exception as e: if isinstance(e, SystemExit) and e.code == 0: return 0 sys.stderr.write("Failed to prepare guest environment\n") traceback.print_exc() return 2 exitcode = 0 if vm.ssh(*cmd) != 0: exitcode = 3 if args.interactive: vm.ssh() if not args.snapshot: vm.graceful_shutdown() return exitcode
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/vm/basevm.py
#!/usr/bin/env python3 # # Ubuntu VM testing library # # Copyright 2017 Red Hat Inc. # Copyright 2020 Linaro # # Authors: # Robert Foley <robert.foley@linaro.org> # Originally based on ubuntu.i386 Fam Zheng <famz@redhat.com> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. import os import subprocess import basevm class UbuntuVM(basevm.BaseVM): def __init__(self, args, config=None): self.login_prompt = "ubuntu-{}-guest login:".format(self.arch) basevm.BaseVM.__init__(self, args, config) def build_image(self, img): """Build an Ubuntu VM image. The child class will define the install_cmds to init the VM.""" os_img = self._download_with_cache(self.image_link, sha256sum=self.image_sha256) img_tmp = img + ".tmp" subprocess.check_call(["cp", "-f", os_img, img_tmp]) self.exec_qemu_img("resize", img_tmp, "+50G") ci_img = self.gen_cloud_init_iso() self.boot(img_tmp, extra_args = [ "-device", "VGA", "-cdrom", ci_img, ]) # First command we issue is fix for slow ssh login. self.wait_ssh(wait_root=True, cmd="chmod -x /etc/update-motd.d/*") # Wait for cloud init to finish self.wait_ssh(wait_root=True, cmd="ls /var/lib/cloud/instance/boot-finished") self.ssh_root("touch /etc/cloud/cloud-init.disabled") # Disable auto upgrades. # We want to keep the VM system state stable. self.ssh_root('sed -ie \'s/"1"/"0"/g\' '\ '/etc/apt/apt.conf.d/20auto-upgrades') self.ssh_root("sed -ie s/^#\ deb-src/deb-src/g /etc/apt/sources.list") # If the user chooses not to do the install phase, # then we will jump right to the graceful shutdown if self._config['install_cmds'] != "": # Issue the install commands. # This can be overriden by the user in the config .yml. install_cmds = self._config['install_cmds'].split(',') for cmd in install_cmds: self.ssh_root(cmd) self.graceful_shutdown() os.rename(img_tmp, img) return 0
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/vm/ubuntuvm.py
#!/usr/bin/env python3 # # VM testing aarch64 library # # Copyright 2020 Linaro # # Authors: # Robert Foley <robert.foley@linaro.org> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. # import os import sys import subprocess import basevm from qemu.utils import kvm_available # This is the config needed for current version of QEMU. # This works for both kvm and tcg. CURRENT_CONFIG = { 'cpu' : "max", 'machine' : "virt,gic-version=max", } # The minimum minor version of QEMU we will support with aarch64 VMs is 3. # QEMU versions less than 3 have various issues running these VMs. QEMU_AARCH64_MIN_VERSION = 3 # The DEFAULT_CONFIG will default to a version of # parameters that works for backwards compatibility. DEFAULT_CONFIG = {'kvm' : {'cpu' : "host", 'machine' : "virt,gic-version=host"}, 'tcg' : {'cpu' : "cortex-a57", 'machine' : "virt"}, } def get_config_defaults(vmcls, default_config): """Fetch the configuration defaults for this VM, taking into consideration the defaults for aarch64 first, followed by the defaults for this VM.""" config = default_config config.update(aarch_get_config_defaults(vmcls)) return config def aarch_get_config_defaults(vmcls): """Set the defaults for current version of QEMU.""" config = CURRENT_CONFIG args = basevm.parse_args(vmcls) qemu_path = basevm.get_qemu_path(vmcls.arch, args.build_path) qemu_version = basevm.get_qemu_version(qemu_path) if qemu_version < QEMU_AARCH64_MIN_VERSION: error = "\nThis major version of QEMU {} is to old for aarch64 VMs.\n"\ "The major version must be at least {}.\n"\ "To continue with the current build of QEMU, "\ "please restart with QEMU_LOCAL=1 .\n" print(error.format(qemu_version, QEMU_AARCH64_MIN_VERSION)) exit(1) if qemu_version == QEMU_AARCH64_MIN_VERSION: # We have an older version of QEMU, # set the config values for backwards compatibility. if kvm_available('aarch64'): config.update(DEFAULT_CONFIG['kvm']) else: config.update(DEFAULT_CONFIG['tcg']) return config def create_flash_images(flash_dir="./", efi_img=""): """Creates the appropriate pflash files for an aarch64 VM.""" flash0_path = get_flash_path(flash_dir, "flash0") flash1_path = get_flash_path(flash_dir, "flash1") fd_null = open(os.devnull, 'w') subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash0_path), "bs=1M", "count=64"], stdout=fd_null, stderr=subprocess.STDOUT) # A reliable way to get the QEMU EFI image is via an installed package or # via the bios included with qemu. if not os.path.exists(efi_img): sys.stderr.write("*** efi argument is invalid ({})\n".format(efi_img)) sys.stderr.write("*** please check --efi-aarch64 argument or "\ "install qemu-efi-aarch64 package\n") exit(3) subprocess.check_call(["dd", "if={}".format(efi_img), "of={}".format(flash0_path), "conv=notrunc"], stdout=fd_null, stderr=subprocess.STDOUT) subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash1_path), "bs=1M", "count=64"], stdout=fd_null, stderr=subprocess.STDOUT) fd_null.close() def get_pflash_args(flash_dir="./"): """Returns a string that can be used to boot qemu using the appropriate pflash files for aarch64.""" flash0_path = get_flash_path(flash_dir, "flash0") flash1_path = get_flash_path(flash_dir, "flash1") pflash_args_str = "-drive file={},format=raw,if=pflash "\ "-drive file={},format=raw,if=pflash" pflash_args = pflash_args_str.format(flash0_path, flash1_path) return pflash_args.split(" ") def get_flash_path(flash_dir, name): return os.path.join(flash_dir, "{}.img".format(name))
nvtrust-main
infrastructure/kvm/qemu/qemu_source/tests/vm/aarch64vm.py