python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2016 The Brotli Authors. All rights reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
import unittest
from . import _test_utils
import brotli
class TestCompress(_test_utils.TestCase):
VARIANTS = {'quality': (1, 6, 9, 11), 'lgwin': (10, 15, 20, 24)}
def _check_decompression(self, test_data, **kwargs):
kwargs = {}
# Write decompression to temp file and verify it matches the original.
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
original = test_data
with open(temp_uncompressed, 'wb') as out_file:
with open(temp_compressed, 'rb') as in_file:
out_file.write(brotli.decompress(in_file.read(), **kwargs))
self.assertFilesMatch(temp_uncompressed, original)
def _compress(self, test_data, **kwargs):
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
with open(temp_compressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
out_file.write(brotli.compress(in_file.read(), **kwargs))
def _test_compress(self, test_data, **kwargs):
self._compress(test_data, **kwargs)
self._check_decompression(test_data, **kwargs)
_test_utils.generate_test_methods(TestCompress, variants=TestCompress.VARIANTS)
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/compress_test.py
|
# Copyright 2016 The Brotli Authors. All rights reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
import subprocess
import unittest
from . import _test_utils
import brotli
BRO_ARGS = _test_utils.BRO_ARGS
TEST_ENV = _test_utils.TEST_ENV
def _get_original_name(test_data):
return test_data.split('.compressed')[0]
class TestBroDecompress(_test_utils.TestCase):
def _check_decompression(self, test_data):
# Verify decompression matches the original.
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
original = _get_original_name(test_data)
self.assertFilesMatch(temp_uncompressed, original)
def _decompress_file(self, test_data):
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
args = BRO_ARGS + ['-f', '-d', '-i', test_data, '-o', temp_uncompressed]
subprocess.check_call(args, env=TEST_ENV)
def _decompress_pipe(self, test_data):
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
args = BRO_ARGS + ['-d']
with open(temp_uncompressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
subprocess.check_call(
args, stdin=in_file, stdout=out_file, env=TEST_ENV)
def _test_decompress_file(self, test_data):
self._decompress_file(test_data)
self._check_decompression(test_data)
def _test_decompress_pipe(self, test_data):
self._decompress_pipe(test_data)
self._check_decompression(test_data)
_test_utils.generate_test_methods(TestBroDecompress, for_decompression=True)
class TestBroCompress(_test_utils.TestCase):
VARIANTS = {'quality': (1, 6, 9, 11), 'lgwin': (10, 15, 20, 24)}
def _check_decompression(self, test_data, **kwargs):
# Write decompression to temp file and verify it matches the original.
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
original = test_data
args = BRO_ARGS + ['-f', '-d']
args.extend(['-i', temp_compressed, '-o', temp_uncompressed])
subprocess.check_call(args, env=TEST_ENV)
self.assertFilesMatch(temp_uncompressed, original)
def _compress_file(self, test_data, **kwargs):
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
args = BRO_ARGS + ['-f']
if 'quality' in kwargs:
args.extend(['-q', str(kwargs['quality'])])
if 'lgwin' in kwargs:
args.extend(['--lgwin', str(kwargs['lgwin'])])
args.extend(['-i', test_data, '-o', temp_compressed])
subprocess.check_call(args, env=TEST_ENV)
def _compress_pipe(self, test_data, **kwargs):
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
args = BRO_ARGS
if 'quality' in kwargs:
args.extend(['-q', str(kwargs['quality'])])
if 'lgwin' in kwargs:
args.extend(['--lgwin', str(kwargs['lgwin'])])
with open(temp_compressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
subprocess.check_call(
args, stdin=in_file, stdout=out_file, env=TEST_ENV)
def _test_compress_file(self, test_data, **kwargs):
self._compress_file(test_data, **kwargs)
self._check_decompression(test_data)
def _test_compress_pipe(self, test_data, **kwargs):
self._compress_pipe(test_data, **kwargs)
self._check_decompression(test_data)
_test_utils.generate_test_methods(
TestBroCompress, variants=TestBroCompress.VARIANTS)
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/bro_test.py
|
# Copyright 2016 The Brotli Authors. All rights reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
import unittest
from . import _test_utils
import brotli
def _get_original_name(test_data):
return test_data.split('.compressed')[0]
class TestDecompress(_test_utils.TestCase):
def _check_decompression(self, test_data):
# Verify decompression matches the original.
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
original = _get_original_name(test_data)
self.assertFilesMatch(temp_uncompressed, original)
def _decompress(self, test_data):
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
with open(temp_uncompressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
out_file.write(brotli.decompress(in_file.read()))
def _test_decompress(self, test_data):
self._decompress(test_data)
self._check_decompression(test_data)
def test_garbage_appended(self):
with self.assertRaises(brotli.error):
brotli.decompress(brotli.compress(b'a') + b'a')
_test_utils.generate_test_methods(TestDecompress, for_decompression=True)
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/decompress_test.py
|
# Copyright 2016 The Brotli Authors. All rights reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
import functools
import unittest
from . import _test_utils
import brotli
def _get_original_name(test_data):
return test_data.split('.compressed')[0]
class TestDecompressor(_test_utils.TestCase):
CHUNK_SIZE = 1
def setUp(self):
self.decompressor = brotli.Decompressor()
def tearDown(self):
self.decompressor = None
def _check_decompression(self, test_data):
# Verify decompression matches the original.
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
original = _get_original_name(test_data)
self.assertFilesMatch(temp_uncompressed, original)
def _decompress(self, test_data):
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
with open(temp_uncompressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
read_chunk = functools.partial(in_file.read, self.CHUNK_SIZE)
for data in iter(read_chunk, b''):
out_file.write(self.decompressor.process(data))
self.assertTrue(self.decompressor.is_finished())
def _test_decompress(self, test_data):
self._decompress(test_data)
self._check_decompression(test_data)
def test_garbage_appended(self):
with self.assertRaises(brotli.error):
self.decompressor.process(brotli.compress(b'a') + b'a')
def test_already_finished(self):
self.decompressor.process(brotli.compress(b'a'))
with self.assertRaises(brotli.error):
self.decompressor.process(b'a')
_test_utils.generate_test_methods(TestDecompressor, for_decompression=True)
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/decompressor_test.py
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/__init__.py
|
|
from __future__ import print_function
import filecmp
import glob
import itertools
import os
import sys
import sysconfig
import tempfile
import unittest
project_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
test_dir = os.getenv("BROTLI_TESTS_PATH")
BRO_ARGS = [os.getenv("BROTLI_WRAPPER")]
# Fallbacks
if test_dir is None:
test_dir = os.path.join(project_dir, 'tests')
if BRO_ARGS[0] is None:
python_exe = sys.executable or 'python'
bro_path = os.path.join(project_dir, 'python', 'bro.py')
BRO_ARGS = [python_exe, bro_path]
# Get the platform/version-specific build folder.
# By default, the distutils build base is in the same location as setup.py.
platform_lib_name = 'lib.{platform}-{version[0]}.{version[1]}'.format(
platform=sysconfig.get_platform(), version=sys.version_info)
build_dir = os.path.join(project_dir, 'bin', platform_lib_name)
# Prepend the build folder to sys.path and the PYTHONPATH environment variable.
if build_dir not in sys.path:
sys.path.insert(0, build_dir)
TEST_ENV = os.environ.copy()
if 'PYTHONPATH' not in TEST_ENV:
TEST_ENV['PYTHONPATH'] = build_dir
else:
TEST_ENV['PYTHONPATH'] = build_dir + os.pathsep + TEST_ENV['PYTHONPATH']
TESTDATA_DIR = os.path.join(test_dir, 'testdata')
TESTDATA_FILES = [
'empty', # Empty file
'10x10y', # Small text
'alice29.txt', # Large text
'random_org_10k.bin', # Small data
'mapsdatazrh', # Large data
]
TESTDATA_PATHS = [os.path.join(TESTDATA_DIR, f) for f in TESTDATA_FILES]
TESTDATA_PATHS_FOR_DECOMPRESSION = glob.glob(
os.path.join(TESTDATA_DIR, '*.compressed'))
TEMP_DIR = tempfile.mkdtemp()
def get_temp_compressed_name(filename):
return os.path.join(TEMP_DIR, os.path.basename(filename + '.bro'))
def get_temp_uncompressed_name(filename):
return os.path.join(TEMP_DIR, os.path.basename(filename + '.unbro'))
def bind_method_args(method, *args, **kwargs):
return lambda self: method(self, *args, **kwargs)
def generate_test_methods(test_case_class,
for_decompression=False,
variants=None):
# Add test methods for each test data file. This makes identifying problems
# with specific compression scenarios easier.
if for_decompression:
paths = TESTDATA_PATHS_FOR_DECOMPRESSION
else:
paths = TESTDATA_PATHS
opts = []
if variants:
opts_list = []
for k, v in variants.items():
opts_list.append([r for r in itertools.product([k], v)])
for o in itertools.product(*opts_list):
opts_name = '_'.join([str(i) for i in itertools.chain(*o)])
opts_dict = dict(o)
opts.append([opts_name, opts_dict])
else:
opts.append(['', {}])
for method in [m for m in dir(test_case_class) if m.startswith('_test')]:
for testdata in paths:
for (opts_name, opts_dict) in opts:
f = os.path.splitext(os.path.basename(testdata))[0]
name = 'test_{method}_{options}_{file}'.format(
method=method, options=opts_name, file=f)
func = bind_method_args(
getattr(test_case_class, method), testdata, **opts_dict)
setattr(test_case_class, name, func)
class TestCase(unittest.TestCase):
def tearDown(self):
for f in TESTDATA_PATHS:
try:
os.unlink(get_temp_compressed_name(f))
except OSError:
pass
try:
os.unlink(get_temp_uncompressed_name(f))
except OSError:
pass
def assertFilesMatch(self, first, second):
self.assertTrue(
filecmp.cmp(first, second, shallow=False),
'File {} differs from {}'.format(first, second))
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/_test_utils.py
|
# Copyright 2016 The Brotli Authors. All rights reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
import functools
import unittest
from . import _test_utils
import brotli
# Do not inherit from TestCase here to ensure that test methods
# are not run automatically and instead are run as part of a specific
# configuration below.
class _TestCompressor(object):
CHUNK_SIZE = 2048
def tearDown(self):
self.compressor = None
def _check_decompression(self, test_data):
# Write decompression to temp file and verify it matches the original.
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
original = test_data
with open(temp_uncompressed, 'wb') as out_file:
with open(temp_compressed, 'rb') as in_file:
out_file.write(brotli.decompress(in_file.read()))
self.assertFilesMatch(temp_uncompressed, original)
def _test_single_process(self, test_data):
# Write single-shot compression to temp file.
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
with open(temp_compressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
out_file.write(self.compressor.process(in_file.read()))
out_file.write(self.compressor.finish())
self._check_decompression(test_data)
def _test_multiple_process(self, test_data):
# Write chunked compression to temp file.
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
with open(temp_compressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
read_chunk = functools.partial(in_file.read, self.CHUNK_SIZE)
for data in iter(read_chunk, b''):
out_file.write(self.compressor.process(data))
out_file.write(self.compressor.finish())
self._check_decompression(test_data)
def _test_multiple_process_and_flush(self, test_data):
# Write chunked and flushed compression to temp file.
temp_compressed = _test_utils.get_temp_compressed_name(test_data)
with open(temp_compressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
read_chunk = functools.partial(in_file.read, self.CHUNK_SIZE)
for data in iter(read_chunk, b''):
out_file.write(self.compressor.process(data))
out_file.write(self.compressor.flush())
out_file.write(self.compressor.finish())
self._check_decompression(test_data)
_test_utils.generate_test_methods(_TestCompressor)
class TestCompressorQuality1(_TestCompressor, _test_utils.TestCase):
def setUp(self):
self.compressor = brotli.Compressor(quality=1)
class TestCompressorQuality6(_TestCompressor, _test_utils.TestCase):
def setUp(self):
self.compressor = brotli.Compressor(quality=6)
class TestCompressorQuality9(_TestCompressor, _test_utils.TestCase):
def setUp(self):
self.compressor = brotli.Compressor(quality=9)
class TestCompressorQuality11(_TestCompressor, _test_utils.TestCase):
def setUp(self):
self.compressor = brotli.Compressor(quality=11)
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/BrotliCompress/brotli/python/tests/compressor_test.py
|
# @file NmakeSubdirs.py
# This script support parallel build for nmake in windows environment.
# It supports Python2.x and Python3.x both.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
# Import Modules
#
from __future__ import print_function
import argparse
import threading
import time
import os
import subprocess
import multiprocessing
import copy
import sys
__prog__ = 'NmakeSubdirs'
__version__ = '%s Version %s' % (__prog__, '0.10 ')
__copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.'
__description__ = 'Replace for NmakeSubdirs.bat in windows ,support parallel build for nmake.\n'
cpu_count = multiprocessing.cpu_count()
output_lock = threading.Lock()
def RunCommand(WorkDir=None, *Args, **kwargs):
if WorkDir is None:
WorkDir = os.curdir
if "stderr" not in kwargs:
kwargs["stderr"] = subprocess.STDOUT
if "stdout" not in kwargs:
kwargs["stdout"] = subprocess.PIPE
p = subprocess.Popen(Args, cwd=WorkDir, stderr=kwargs["stderr"], stdout=kwargs["stdout"])
stdout, stderr = p.communicate()
message = ""
if stdout is not None:
message = stdout.decode(errors='ignore') #for compatibility in python 2 and 3
if p.returncode != 0:
raise RuntimeError("Error while execute command \'{0}\' in direcotry {1}\n{2}".format(" ".join(Args), WorkDir, message))
output_lock.acquire(True)
print("execute command \"{0}\" in directory {1}".format(" ".join(Args), WorkDir))
try:
print(message)
except:
pass
output_lock.release()
return p.returncode, stdout
class TaskUnit(object):
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return id(self).__eq__(id(other))
def run(self):
return self.func(*self.args, **self.kwargs)
def __str__(self):
para = list(self.args)
para.extend("{0}={1}".format(k, v)for k, v in self.kwargs.items())
return "{0}({1})".format(self.func.__name__, ",".join(para))
class ThreadControl(object):
def __init__(self, maxthread):
self._processNum = maxthread
self.pending = []
self.running = []
self.pendingLock = threading.Lock()
self.runningLock = threading.Lock()
self.error = False
self.errorLock = threading.Lock()
self.errorMsg = "errorMsg"
def addTask(self, func, *args, **kwargs):
self.pending.append(TaskUnit(func, args, kwargs))
def waitComplete(self):
self._schedule.join()
def startSchedule(self):
self._schedule = threading.Thread(target=self.Schedule)
self._schedule.start()
def Schedule(self):
for i in range(self._processNum):
task = threading.Thread(target=self.startTask)
task.daemon = False
self.running.append(task)
self.runningLock.acquire(True)
for thread in self.running:
thread.start()
self.runningLock.release()
while len(self.running) > 0:
time.sleep(0.1)
if self.error:
print("subprocess not exit successfully")
print(self.errorMsg)
def startTask(self):
while True:
if self.error:
break
self.pendingLock.acquire(True)
if len(self.pending) == 0:
self.pendingLock.release()
break
task = self.pending.pop(0)
self.pendingLock.release()
try:
task.run()
except RuntimeError as e:
if self.error: break
self.errorLock.acquire(True)
self.error = True
self.errorMsg = str(e)
time.sleep(0.1)
self.errorLock.release()
break
self.runningLock.acquire(True)
self.running.remove(threading.currentThread())
self.runningLock.release()
def Run():
curdir = os.path.abspath(os.curdir)
if len(args.subdirs) == 1:
args.jobs = 1
if args.jobs == 1:
try:
for dir in args.subdirs:
RunCommand(os.path.join(curdir, dir), "nmake", args.target, stdout=sys.stdout, stderr=subprocess.STDOUT)
except RuntimeError:
exit(1)
else:
controller = ThreadControl(args.jobs)
for dir in args.subdirs:
controller.addTask(RunCommand, os.path.join(curdir, dir), "nmake", args.target)
controller.startSchedule()
controller.waitComplete()
if controller.error:
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler='resolve')
parser.add_argument("target", help="the target for nmake")
parser.add_argument("subdirs", nargs="+", help="the relative dir path of makefile")
parser.add_argument("--jobs", type=int, dest="jobs", default=cpu_count, help="thread number")
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
Run()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Source/C/Makefiles/NmakeSubdirs.py
|
## @file
# Unit tests for AutoGen.UniClassObject
#
# Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import unittest
import codecs
import TestTools
from Common.Misc import PathClass
import AutoGen.UniClassObject as BtUni
from Common import EdkLogger
EdkLogger.InitializeForUnitTest()
class Tests(TestTools.BaseToolsTest):
SampleData = u'''
#langdef en-US "English"
#string STR_A #language en-US "STR_A for en-US"
'''
def EncodeToFile(self, encoding, string=None):
if string is None:
string = self.SampleData
if encoding is not None:
data = codecs.encode(string, encoding)
else:
data = string
path = 'input.uni'
self.WriteTmpFile(path, data)
return PathClass(self.GetTmpFilePath(path))
def ErrorFailure(self, error, encoding, shouldPass):
msg = error + ' should '
if shouldPass:
msg += 'not '
msg += 'be generated for '
msg += '%s data in a .uni file' % encoding
self.fail(msg)
def UnicodeErrorFailure(self, encoding, shouldPass):
self.ErrorFailure('UnicodeError', encoding, shouldPass)
def EdkErrorFailure(self, encoding, shouldPass):
self.ErrorFailure('EdkLogger.FatalError', encoding, shouldPass)
def CheckFile(self, encoding, shouldPass, string=None):
path = self.EncodeToFile(encoding, string)
try:
BtUni.UniFileClassObject([path])
if shouldPass:
return
except UnicodeError:
if not shouldPass:
return
else:
self.UnicodeErrorFailure(encoding, shouldPass)
except EdkLogger.FatalError:
if not shouldPass:
return
else:
self.EdkErrorFailure(encoding, shouldPass)
except Exception:
pass
self.EdkErrorFailure(encoding, shouldPass)
def testUtf16InUniFile(self):
self.CheckFile('utf_16', shouldPass=True)
def testSupplementaryPlaneUnicodeCharInUtf16File(self):
#
# Supplementary Plane characters can exist in UTF-16 files,
# but they are not valid UCS-2 characters.
#
# This test makes sure that BaseTools rejects these characters
# if seen in a .uni file.
#
data = u'''
#langdef en-US "English"
#string STR_A #language en-US "CodePoint (\U00010300) > 0xFFFF"
'''
self.CheckFile('utf_16', shouldPass=False, string=data)
def testSurrogatePairUnicodeCharInUtf16File(self):
#
# Surrogate Pair code points are used in UTF-16 files to
# encode the Supplementary Plane characters. But, a Surrogate
# Pair code point which is not followed by another Surrogate
# Pair code point might be interpreted as a single code point
# with the Surrogate Pair code point.
#
# This test makes sure that BaseTools rejects these characters
# if seen in a .uni file.
#
data = codecs.BOM_UTF16_LE + b'//\x01\xd8 '
self.CheckFile(encoding=None, shouldPass=False, string=data)
def testValidUtf8File(self):
self.CheckFile(encoding='utf_8', shouldPass=True)
def testValidUtf8FileWithBom(self):
#
# Same test as testValidUtf8File, but add the UTF-8 BOM
#
data = codecs.BOM_UTF8 + codecs.encode(self.SampleData, 'utf_8')
self.CheckFile(encoding=None, shouldPass=True, string=data)
def test32bitUnicodeCharInUtf8File(self):
data = u'''
#langdef en-US "English"
#string STR_A #language en-US "CodePoint (\U00010300) > 0xFFFF"
'''
self.CheckFile('utf_16', shouldPass=False, string=data)
def test32bitUnicodeCharInUtf8File(self):
data = u'''
#langdef en-US "English"
#string STR_A #language en-US "CodePoint (\U00010300) > 0xFFFF"
'''
self.CheckFile('utf_8', shouldPass=False, string=data)
def test32bitUnicodeCharInUtf8Comment(self):
data = u'''
// Even in comments, we reject non-UCS-2 chars: \U00010300
#langdef en-US "English"
#string STR_A #language en-US "A"
'''
self.CheckFile('utf_8', shouldPass=False, string=data)
def testSurrogatePairUnicodeCharInUtf8File(self):
#
# Surrogate Pair code points are used in UTF-16 files to
# encode the Supplementary Plane characters. In UTF-8, it is
# trivial to encode these code points, but they are not valid
# code points for characters, since they are reserved for the
# UTF-16 Surrogate Pairs.
#
# This test makes sure that BaseTools rejects these characters
# if seen in a .uni file.
#
data = b'\xed\xa0\x81'
self.CheckFile(encoding=None, shouldPass=False, string=data)
def testSurrogatePairUnicodeCharInUtf8FileWithBom(self):
#
# Same test as testSurrogatePairUnicodeCharInUtf8File, but add
# the UTF-8 BOM
#
data = codecs.BOM_UTF8 + b'\xed\xa0\x81'
self.CheckFile(encoding=None, shouldPass=False, string=data)
TheTestSuite = TestTools.MakeTheTestSuite(locals())
if __name__ == '__main__':
allTests = TheTestSuite()
unittest.TextTestRunner().run(allTests)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/CheckUnicodeSourceFiles.py
|
## @file
# Unit tests for checking syntax of Python source code
#
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import unittest
import py_compile
import TestTools
class Tests(TestTools.BaseToolsTest):
def setUp(self):
TestTools.BaseToolsTest.setUp(self)
def SingleFileTest(self, filename):
try:
py_compile.compile(filename, doraise=True)
except Exception as e:
self.fail('syntax error: %s, Error is %s' % (filename, str(e)))
def MakePythonSyntaxCheckTests():
def GetAllPythonSourceFiles():
pythonSourceFiles = []
for (root, dirs, files) in os.walk(TestTools.PythonSourceDir):
for filename in files:
if filename.lower().endswith('.py'):
pythonSourceFiles.append(
os.path.join(root, filename)
)
return pythonSourceFiles
def MakeTestName(filename):
assert filename.lower().endswith('.py')
name = filename[:-3]
name = name.replace(TestTools.PythonSourceDir, '')
name = name.replace(os.path.sep, '_')
return 'test' + name
def MakeNewTest(filename):
test = MakeTestName(filename)
newmethod = lambda self: self.SingleFileTest(filename)
setattr(
Tests,
test,
newmethod
)
for filename in GetAllPythonSourceFiles():
MakeNewTest(filename)
MakePythonSyntaxCheckTests()
del MakePythonSyntaxCheckTests
TheTestSuite = TestTools.MakeTheTestSuite(locals())
if __name__ == '__main__':
allTests = TheTestSuite()
unittest.TextTestRunner().run(allTests)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/CheckPythonSyntax.py
|
from __future__ import print_function
## @file
# Utility functions and classes for BaseTools unit tests
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import base64
import os
import os.path
import random
import shutil
import subprocess
import sys
import unittest
import codecs
TestsDir = os.path.realpath(os.path.split(sys.argv[0])[0])
BaseToolsDir = os.path.realpath(os.path.join(TestsDir, '..'))
CSourceDir = os.path.join(BaseToolsDir, 'Source', 'C')
PythonSourceDir = os.path.join(BaseToolsDir, 'Source', 'Python')
TestTempDir = os.path.join(TestsDir, 'TestTempDir')
if PythonSourceDir not in sys.path:
#
# Allow unit tests to import BaseTools python modules. This is very useful
# for writing unit tests.
#
sys.path.append(PythonSourceDir)
def MakeTheTestSuite(localItems):
tests = []
for name, item in localItems.items():
if isinstance(item, type):
if issubclass(item, unittest.TestCase):
tests.append(unittest.TestLoader().loadTestsFromTestCase(item))
elif issubclass(item, unittest.TestSuite):
tests.append(item())
return lambda: unittest.TestSuite(tests)
def GetBaseToolsPaths():
if sys.platform in ('win32', 'win64'):
return [ os.path.join(BaseToolsDir, 'Bin', sys.platform.title()) ]
else:
uname = os.popen('uname -sm').read().strip()
for char in (' ', '/'):
uname = uname.replace(char, '-')
return [
os.path.join(BaseToolsDir, 'Bin', uname),
os.path.join(BaseToolsDir, 'BinWrappers', uname),
os.path.join(BaseToolsDir, 'BinWrappers', 'PosixLike')
]
BaseToolsBinPaths = GetBaseToolsPaths()
class BaseToolsTest(unittest.TestCase):
def cleanOutDir(self, dir):
for dirItem in os.listdir(dir):
if dirItem in ('.', '..'): continue
dirItem = os.path.join(dir, dirItem)
self.RemoveFileOrDir(dirItem)
def CleanUpTmpDir(self):
if os.path.exists(self.testDir):
self.cleanOutDir(self.testDir)
def HandleTreeDeleteError(self, function, path, excinfo):
os.chmod(path, stat.S_IWRITE)
function(path)
def RemoveDir(self, dir):
shutil.rmtree(dir, False, self.HandleTreeDeleteError)
def RemoveFileOrDir(self, path):
if not os.path.exists(path):
return
elif os.path.isdir(path):
self.RemoveDir(path)
else:
os.remove(path)
def DisplayBinaryData(self, description, data):
print(description, '(base64 encoded):')
b64data = base64.b64encode(data)
print(b64data)
def DisplayFile(self, fileName):
sys.stdout.write(self.ReadTmpFile(fileName))
sys.stdout.flush()
def FindToolBin(self, toolName):
for binPath in BaseToolsBinPaths:
bin = os.path.join(binPath, toolName)
if os.path.exists(bin):
break
assert os.path.exists(bin)
return bin
def RunTool(self, *args, **kwd):
if 'toolName' in kwd: toolName = kwd['toolName']
else: toolName = None
if 'logFile' in kwd: logFile = kwd['logFile']
else: logFile = None
if toolName is None: toolName = self.toolName
bin = self.FindToolBin(toolName)
if logFile is not None:
logFile = open(os.path.join(self.testDir, logFile), 'w')
popenOut = logFile
else:
popenOut = subprocess.PIPE
args = [toolName] + list(args)
Proc = subprocess.Popen(
args, executable=bin,
stdout=popenOut, stderr=subprocess.STDOUT
)
if logFile is None:
Proc.stdout.read()
return Proc.wait()
def GetTmpFilePath(self, fileName):
return os.path.join(self.testDir, fileName)
def OpenTmpFile(self, fileName, mode = 'r'):
return open(os.path.join(self.testDir, fileName), mode)
def ReadTmpFile(self, fileName):
f = open(self.GetTmpFilePath(fileName), 'r')
data = f.read()
f.close()
return data
def WriteTmpFile(self, fileName, data):
if isinstance(data, bytes):
with open(self.GetTmpFilePath(fileName), 'wb') as f:
f.write(data)
else:
with codecs.open(self.GetTmpFilePath(fileName), 'w', encoding='utf-8') as f:
f.write(data)
def GenRandomFileData(self, fileName, minlen = None, maxlen = None):
if maxlen is None: maxlen = minlen
f = self.OpenTmpFile(fileName, 'w')
f.write(self.GetRandomString(minlen, maxlen))
f.close()
def GetRandomString(self, minlen = None, maxlen = None):
if minlen is None: minlen = 1024
if maxlen is None: maxlen = minlen
return ''.join(
[chr(random.randint(0, 255))
for x in range(random.randint(minlen, maxlen))
])
def setUp(self):
self.savedEnvPath = os.environ['PATH']
self.savedSysPath = sys.path[:]
for binPath in BaseToolsBinPaths:
os.environ['PATH'] = \
os.path.pathsep.join((os.environ['PATH'], binPath))
self.testDir = TestTempDir
if not os.path.exists(self.testDir):
os.mkdir(self.testDir)
else:
self.cleanOutDir(self.testDir)
def tearDown(self):
self.RemoveFileOrDir(self.testDir)
os.environ['PATH'] = self.savedEnvPath
sys.path = self.savedSysPath
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/TestTools.py
|
## @file
# Test whether PYTHON_COMMAND is available
#
# Copyright (c) 2013 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
if __name__ == '__main__':
print('TRUE')
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/PythonTest.py
|
## @file
# Unit tests for TianoCompress utility
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
import os
import random
import sys
import unittest
import TestTools
class Tests(TestTools.BaseToolsTest):
def setUp(self):
TestTools.BaseToolsTest.setUp(self)
self.toolName = 'TianoCompress'
def testHelp(self):
result = self.RunTool('--help', logFile='help')
#self.DisplayFile('help')
self.assertTrue(result == 0)
def compressionTestCycle(self, data):
path = self.GetTmpFilePath('input')
self.WriteTmpFile('input', data)
result = self.RunTool(
'-e',
'-o', self.GetTmpFilePath('output1'),
self.GetTmpFilePath('input')
)
self.assertTrue(result == 0)
result = self.RunTool(
'-d',
'-o', self.GetTmpFilePath('output2'),
self.GetTmpFilePath('output1')
)
self.assertTrue(result == 0)
start = self.ReadTmpFile('input')
finish = self.ReadTmpFile('output2')
startEqualsFinish = start == finish
if not startEqualsFinish:
print()
print('Original data did not match decompress(compress(data))')
self.DisplayBinaryData('original data', start)
self.DisplayBinaryData('after compression', self.ReadTmpFile('output1'))
self.DisplayBinaryData('after decompression', finish)
self.assertTrue(startEqualsFinish)
def testRandomDataCycles(self):
for i in range(8):
data = self.GetRandomString(1024, 2048)
self.compressionTestCycle(data)
self.CleanUpTmpDir()
TheTestSuite = TestTools.MakeTheTestSuite(locals())
if __name__ == '__main__':
allTests = TheTestSuite()
unittest.TextTestRunner().run(allTests)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/TianoCompress.py
|
## @file
# Routines for generating Pcd Database
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
import unittest
from Common.Misc import RemoveCComments
from Workspace.BuildClassObject import ArrayIndex
class TestRe(unittest.TestCase):
def test_ccomments(self):
TestStr1 = """ {0x01,0x02} """
self.assertEquals(TestStr1, RemoveCComments(TestStr1))
TestStr2 = """ L'TestString' """
self.assertEquals(TestStr2, RemoveCComments(TestStr2))
TestStr3 = """ 'TestString' """
self.assertEquals(TestStr3, RemoveCComments(TestStr3))
TestStr4 = """
{CODE({
{0x01, {0x02, 0x03, 0x04 }},// Data comment
{0x01, {0x02, 0x03, 0x04 }},// Data comment
})
} /*
This is multiple line comments
The seconde line comment
*/
// This is a comment
"""
Expect_TestStr4 = """{CODE({
{0x01, {0x02, 0x03, 0x04 }},
{0x01, {0x02, 0x03, 0x04 }},
})
}"""
self.assertEquals(Expect_TestStr4, RemoveCComments(TestStr4).strip())
def Test_ArrayIndex(self):
TestStr1 = """[1]"""
self.assertEquals(['[1]'], ArrayIndex.findall(TestStr1))
TestStr2 = """[1][2][0x1][0x01][]"""
self.assertEquals(['[1]','[2]','[0x1]','[0x01]','[]'], ArrayIndex.findall(TestStr2))
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/TestRegularExpression.py
|
## @file
# Unit tests for Python based BaseTools
#
# Copyright (c) 2008 - 2015, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
import unittest
def TheTestSuite():
suites = []
import CheckPythonSyntax
suites.append(CheckPythonSyntax.TheTestSuite())
import CheckUnicodeSourceFiles
suites.append(CheckUnicodeSourceFiles.TheTestSuite())
return unittest.TestSuite(suites)
if __name__ == '__main__':
allTests = TheTestSuite()
unittest.TextTestRunner().run(allTests)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/PythonToolsTests.py
|
## @file
# Unit tests for C based BaseTools
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
import unittest
import TianoCompress
modules = (
TianoCompress,
)
def TheTestSuite():
suites = list(map(lambda module: module.TheTestSuite(), modules))
return unittest.TestSuite(suites)
if __name__ == '__main__':
allTests = TheTestSuite()
unittest.TextTestRunner().run(allTests)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/CToolsTests.py
|
## @file
# Unit tests for BaseTools utilities
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
import unittest
import TestTools
def GetCTestSuite():
import CToolsTests
return CToolsTests.TheTestSuite()
def GetPythonTestSuite():
import PythonToolsTests
return PythonToolsTests.TheTestSuite()
def GetAllTestsSuite():
return unittest.TestSuite([GetCTestSuite(), GetPythonTestSuite()])
if __name__ == '__main__':
allTests = GetAllTestsSuite()
unittest.TextTestRunner(verbosity=2).run(allTests)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Tests/RunTests.py
|
##
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import logging
import json
try:
from edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin
class BuildToolsReportGenerator(IUefiBuildPlugin):
def do_report(self, thebuilder):
try:
from edk2toolext.environment import version_aggregator
except ImportError:
logging.critical("Loading BuildToolsReportGenerator failed, please update your Edk2-PyTool-Extensions")
return 0
OutputReport = os.path.join(thebuilder.env.GetValue("BUILD_OUTPUT_BASE"), "BUILD_TOOLS_REPORT")
OutputReport = os.path.normpath(OutputReport)
if not os.path.isdir(os.path.dirname(OutputReport)):
os.makedirs(os.path.dirname(OutputReport))
Report = BuildToolsReport()
Report.MakeReport(version_aggregator.GetVersionAggregator().GetAggregatedVersionInformation(), OutputReport=OutputReport)
def do_pre_build(self, thebuilder):
self.do_report(thebuilder)
return 0
def do_post_build(self, thebuilder):
self.do_report(thebuilder)
return 0
except ImportError:
pass
class BuildToolsReport(object):
MY_FOLDER = os.path.dirname(os.path.realpath(__file__))
VERSION = "1.00"
def __init__(self):
pass
def MakeReport(self, BuildTools, OutputReport="BuildToolsReport"):
logging.info("Writing BuildToolsReports to {0}".format(OutputReport))
versions_list = []
for key, value in BuildTools.items():
versions_list.append(value)
versions_list = sorted(versions_list, key=lambda k: k['type'])
json_dict = {"modules": versions_list,
"PluginVersion": BuildToolsReport.VERSION}
htmlfile = open(OutputReport + ".html", "w")
jsonfile = open(OutputReport + ".json", "w")
template = open(os.path.join(BuildToolsReport.MY_FOLDER, "BuildToolsReport_Template.html"), "r")
for line in template.readlines():
if "%TO_BE_FILLED_IN_BY_PYTHON_SCRIPT%" in line:
line = line.replace("%TO_BE_FILLED_IN_BY_PYTHON_SCRIPT%", json.dumps(json_dict))
htmlfile.write(line)
jsonfile.write(json.dumps(versions_list, indent=4))
jsonfile.close()
template.close()
htmlfile.close()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Plugin/BuildToolsReport/BuildToolsReportGenerator.py
|
# @file LinuxGcc5ToolChain.py
# Plugin to configures paths for GCC5 ARM/AARCH64 Toolchain
##
# This plugin works in conjuncture with the tools_def
#
# Copyright (c) Microsoft Corporation
# Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import logging
from edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin
from edk2toolext.environment import shell_environment
class LinuxGcc5ToolChain(IUefiBuildPlugin):
def do_post_build(self, thebuilder):
return 0
def do_pre_build(self, thebuilder):
self.Logger = logging.getLogger("LinuxGcc5ToolChain")
#
# GCC5 - The ARM and AARCH64 compilers need their paths set if available
if thebuilder.env.GetValue("TOOL_CHAIN_TAG") == "GCC5":
# Start with AARACH64 compiler
ret = self._check_aarch64()
if ret != 0:
self.Logger.critical("Failed in check aarch64")
return ret
# Check arm compiler
ret = self._check_arm()
if ret != 0:
self.Logger.critical("Failed in check arm")
return ret
# Check RISCV64 compiler
ret = self._check_riscv64()
if ret != 0:
self.Logger.critical("Failed in check riscv64")
return ret
# Check LoongArch64 compiler
ret = self._check_loongarch64()
if ret != 0:
self.Logger.critical("Failed in check loongarch64")
return ret
return 0
def _check_arm(self):
# check to see if full path already configured
if shell_environment.GetEnvironment().get_shell_var("GCC5_ARM_PREFIX") is not None:
self.Logger.info("GCC5_ARM_PREFIX is already set.")
else:
# now check for install dir. If set then set the Prefix
install_path = shell_environment.GetEnvironment().get_shell_var("GCC5_ARM_INSTALL")
if install_path is None:
return 0
# make GCC5_ARM_PREFIX to align with tools_def.txt
prefix = os.path.join(install_path, "bin", "arm-none-linux-gnueabihf-")
shell_environment.GetEnvironment().set_shell_var("GCC5_ARM_PREFIX", prefix)
# now confirm it exists
if not os.path.exists(shell_environment.GetEnvironment().get_shell_var("GCC5_ARM_PREFIX") + "gcc"):
self.Logger.error("Path for GCC5_ARM_PREFIX toolchain is invalid")
return -2
return 0
def _check_aarch64(self):
# check to see if full path already configured
if shell_environment.GetEnvironment().get_shell_var("GCC5_AARCH64_PREFIX") is not None:
self.Logger.info("GCC5_AARCH64_PREFIX is already set.")
else:
# now check for install dir. If set then set the Prefix
install_path = shell_environment.GetEnvironment(
).get_shell_var("GCC5_AARCH64_INSTALL")
if install_path is None:
return 0
# make GCC5_AARCH64_PREFIX to align with tools_def.txt
prefix = os.path.join(install_path, "bin", "aarch64-none-linux-gnu-")
shell_environment.GetEnvironment().set_shell_var("GCC5_AARCH64_PREFIX", prefix)
# now confirm it exists
if not os.path.exists(shell_environment.GetEnvironment().get_shell_var("GCC5_AARCH64_PREFIX") + "gcc"):
self.Logger.error(
"Path for GCC5_AARCH64_PREFIX toolchain is invalid")
return -2
return 0
def _check_riscv64(self):
# now check for install dir. If set then set the Prefix
install_path = shell_environment.GetEnvironment(
).get_shell_var("GCC5_RISCV64_INSTALL")
if install_path is None:
return 0
# check to see if full path already configured
if shell_environment.GetEnvironment().get_shell_var("GCC5_RISCV64_PREFIX") is not None:
self.Logger.info("GCC5_RISCV64_PREFIX is already set.")
else:
# make GCC5_RISCV64_PREFIX to align with tools_def.txt
prefix = os.path.join(install_path, "bin", "riscv64-unknown-elf-")
shell_environment.GetEnvironment().set_shell_var("GCC5_RISCV64_PREFIX", prefix)
# now confirm it exists
if not os.path.exists(shell_environment.GetEnvironment().get_shell_var("GCC5_RISCV64_PREFIX") + "gcc"):
self.Logger.error(
"Path for GCC5_RISCV64_PREFIX toolchain is invalid")
return -2
# Check if LD_LIBRARY_PATH is set for the libraries of RISC-V GCC toolchain
if shell_environment.GetEnvironment().get_shell_var("LD_LIBRARY_PATH") is not None:
self.Logger.info("LD_LIBRARY_PATH is already set.")
prefix = os.path.join(install_path, "lib")
shell_environment.GetEnvironment().set_shell_var("LD_LIBRARY_PATH", prefix)
return 0
def _check_loongarch64(self):
# check to see if full path already configured
if shell_environment.GetEnvironment().get_shell_var("GCC5_LOONGARCH64_PREFIX") is not None:
self.Logger.info("GCC5_LOONGARCH64_PREFIX is already set.")
else:
# now check for install dir. If set then set the Prefix
install_path = shell_environment.GetEnvironment(
).get_shell_var("GCC5_LOONGARCH64_INSTALL")
if install_path is None:
return 0
# make GCC5_LOONGARCH64_PREFIX to align with tools_def.txt
prefix = os.path.join(install_path, "bin", "loongarch64-unknown-linux-gnu-")
shell_environment.GetEnvironment().set_shell_var("GCC5_LOONGARCH64_PREFIX", prefix)
# now confirm it exists
if not os.path.exists(shell_environment.GetEnvironment().get_shell_var("GCC5_LOONGARCH64_PREFIX") + "gcc"):
self.Logger.error(
"Path for GCC5_LOONGARCH64_PREFIX toolchain is invalid")
return -2
return 0
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Plugin/LinuxGcc5ToolChain/LinuxGcc5ToolChain.py
|
# @file WindowsVsToolChain.py
# Plugin to configures paths for the VS2017 and VS2019 tool chain
##
# This plugin works in conjuncture with the tools_def
#
# Copyright (c) Microsoft Corporation
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import logging
from edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin
import edk2toollib.windows.locate_tools as locate_tools
from edk2toollib.windows.locate_tools import FindWithVsWhere
from edk2toolext.environment import shell_environment
from edk2toolext.environment import version_aggregator
from edk2toollib.utility_functions import GetHostInfo
class WindowsVsToolChain(IUefiBuildPlugin):
def do_post_build(self, thebuilder):
return 0
def do_pre_build(self, thebuilder):
self.Logger = logging.getLogger("WindowsVsToolChain")
interesting_keys = ["ExtensionSdkDir", "INCLUDE", "LIB", "LIBPATH", "UniversalCRTSdkDir",
"UCRTVersion", "WindowsLibPath", "WindowsSdkBinPath", "WindowsSdkDir", "WindowsSdkVerBinPath",
"WindowsSDKVersion", "VCToolsInstallDir", "Path"]
#
# VS2017 - Follow VS2017 where there is potential for many versions of the tools.
# If a specific version is required then the user must set both env variables:
# VS150INSTALLPATH: base install path on system to VC install dir. Here you will find the VC folder, etc
# VS150TOOLVER: version number for the VC compiler tools
# VS2017_PREFIX: path to MSVC compiler folder with trailing slash (can be used instead of two vars above)
# VS2017_HOST: set the host architecture to use for host tools, and host libs, etc
if thebuilder.env.GetValue("TOOL_CHAIN_TAG") == "VS2017":
# check to see if host is configured
# HostType for VS2017 should be (defined in tools_def):
# x86 == 32bit Intel
# x64 == 64bit Intel
# arm == 32bit Arm
# arm64 == 64bit Arm
#
HostType = shell_environment.GetEnvironment().get_shell_var("VS2017_HOST")
if HostType is not None:
HostType = HostType.lower()
self.Logger.info(
f"HOST TYPE defined by environment. Host Type is {HostType}")
else:
HostInfo = GetHostInfo()
if HostInfo.arch == "x86":
if HostInfo.bit == "32":
HostType = "x86"
elif HostInfo.bit == "64":
HostType = "x64"
else:
raise NotImplementedError()
# VS2017_HOST options are not exactly the same as QueryVcVariables. This translates.
VC_HOST_ARCH_TRANSLATOR = {
"x86": "x86", "x64": "AMD64", "arm": "not supported", "arm64": "not supported"}
# check to see if full path already configured
if shell_environment.GetEnvironment().get_shell_var("VS2017_PREFIX") != None:
self.Logger.info("VS2017_PREFIX is already set.")
else:
install_path = self._get_vs_install_path(
"VS2017".lower(), "VS150INSTALLPATH")
vc_ver = self._get_vc_version(install_path, "VS150TOOLVER")
if install_path is None or vc_ver is None:
self.Logger.error(
"Failed to configure environment for VS2017")
return -1
version_aggregator.GetVersionAggregator().ReportVersion(
"Visual Studio Install Path", install_path, version_aggregator.VersionTypes.INFO)
version_aggregator.GetVersionAggregator().ReportVersion(
"VC Version", vc_ver, version_aggregator.VersionTypes.TOOL)
# make VS2017_PREFIX to align with tools_def.txt
prefix = os.path.join(install_path, "VC",
"Tools", "MSVC", vc_ver)
prefix = prefix + os.path.sep
shell_environment.GetEnvironment().set_shell_var("VS2017_PREFIX", prefix)
shell_environment.GetEnvironment().set_shell_var("VS2017_HOST", HostType)
shell_env = shell_environment.GetEnvironment()
# Use the tools lib to determine the correct values for the vars that interest us.
vs_vars = locate_tools.QueryVcVariables(
interesting_keys, VC_HOST_ARCH_TRANSLATOR[HostType], vs_version="vs2017")
for (k, v) in vs_vars.items():
shell_env.set_shell_var(k, v)
# now confirm it exists
if not os.path.exists(shell_environment.GetEnvironment().get_shell_var("VS2017_PREFIX")):
self.Logger.error("Path for VS2017 toolchain is invalid")
return -2
#
# VS2019 - Follow VS2019 where there is potential for many versions of the tools.
# If a specific version is required then the user must set both env variables:
# VS160INSTALLPATH: base install path on system to VC install dir. Here you will find the VC folder, etc
# VS160TOOLVER: version number for the VC compiler tools
# VS2019_PREFIX: path to MSVC compiler folder with trailing slash (can be used instead of two vars above)
# VS2017_HOST: set the host architecture to use for host tools, and host libs, etc
elif thebuilder.env.GetValue("TOOL_CHAIN_TAG") == "VS2019":
# check to see if host is configured
# HostType for VS2019 should be (defined in tools_def):
# x86 == 32bit Intel
# x64 == 64bit Intel
# arm == 32bit Arm
# arm64 == 64bit Arm
#
HostType = shell_environment.GetEnvironment().get_shell_var("VS2019_HOST")
if HostType is not None:
HostType = HostType.lower()
self.Logger.info(
f"HOST TYPE defined by environment. Host Type is {HostType}")
else:
HostInfo = GetHostInfo()
if HostInfo.arch == "x86":
if HostInfo.bit == "32":
HostType = "x86"
elif HostInfo.bit == "64":
HostType = "x64"
else:
raise NotImplementedError()
# VS2019_HOST options are not exactly the same as QueryVcVariables. This translates.
VC_HOST_ARCH_TRANSLATOR = {
"x86": "x86", "x64": "AMD64", "arm": "not supported", "arm64": "not supported"}
# check to see if full path already configured
if shell_environment.GetEnvironment().get_shell_var("VS2019_PREFIX") != None:
self.Logger.info("VS2019_PREFIX is already set.")
else:
install_path = self._get_vs_install_path(
"VS2019".lower(), "VS160INSTALLPATH")
vc_ver = self._get_vc_version(install_path, "VS160TOOLVER")
if install_path is None or vc_ver is None:
self.Logger.error(
"Failed to configure environment for VS2019")
return -1
version_aggregator.GetVersionAggregator().ReportVersion(
"Visual Studio Install Path", install_path, version_aggregator.VersionTypes.INFO)
version_aggregator.GetVersionAggregator().ReportVersion(
"VC Version", vc_ver, version_aggregator.VersionTypes.TOOL)
# make VS2019_PREFIX to align with tools_def.txt
prefix = os.path.join(install_path, "VC",
"Tools", "MSVC", vc_ver)
prefix = prefix + os.path.sep
shell_environment.GetEnvironment().set_shell_var("VS2019_PREFIX", prefix)
shell_environment.GetEnvironment().set_shell_var("VS2019_HOST", HostType)
shell_env = shell_environment.GetEnvironment()
# Use the tools lib to determine the correct values for the vars that interest us.
vs_vars = locate_tools.QueryVcVariables(
interesting_keys, VC_HOST_ARCH_TRANSLATOR[HostType], vs_version="vs2019")
for (k, v) in vs_vars.items():
shell_env.set_shell_var(k, v)
# now confirm it exists
if not os.path.exists(shell_environment.GetEnvironment().get_shell_var("VS2019_PREFIX")):
self.Logger.error("Path for VS2019 toolchain is invalid")
return -2
return 0
def _get_vs_install_path(self, vs_version, varname):
# check if already specified
path = None
if varname is not None:
path = shell_environment.GetEnvironment().get_shell_var(varname)
if(path is None):
# Not specified...find latest
try:
path = FindWithVsWhere(vs_version=vs_version)
except (EnvironmentError, ValueError, RuntimeError) as e:
self.Logger.error(str(e))
return None
if path is not None and os.path.exists(path):
self.Logger.debug("Found VS instance for %s", vs_version)
else:
self.Logger.error(
f"VsWhere successfully executed, but could not find VS instance for {vs_version}.")
return path
def _get_vc_version(self, path, varname):
# check if already specified
vc_ver = shell_environment.GetEnvironment().get_shell_var(varname)
if (path is None):
self.Logger.critical(
"Failed to find Visual Studio tools. Might need to check for VS install")
return vc_ver
if(vc_ver is None):
# Not specified...find latest
p2 = os.path.join(path, "VC", "Tools", "MSVC")
if not os.path.isdir(p2):
self.Logger.critical(
"Failed to find VC tools. Might need to check for VS install")
return vc_ver
vc_ver = os.listdir(p2)[-1].strip() # get last in list
self.Logger.debug("Found VC Tool version is %s" % vc_ver)
return vc_ver
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Plugin/WindowsVsToolChain/WindowsVsToolChain.py
|
## @file WinRcPath.py
# Plugin to find Windows SDK Resource Compiler rc.exe
##
# This plugin works in conjuncture with the tools_def to support rc.exe
#
# Copyright (c) Microsoft Corporation
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
from edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin
import edk2toollib.windows.locate_tools as locate_tools
from edk2toolext.environment import shell_environment
from edk2toolext.environment import version_aggregator
class WinRcPath(IUefiBuildPlugin):
def do_post_build(self, thebuilder):
return 0
def do_pre_build(self, thebuilder):
#get the locate tools module
path = locate_tools.FindToolInWinSdk("rc.exe")
if path is None:
thebuilder.logging.warning("Failed to find rc.exe")
else:
p = os.path.abspath(os.path.dirname(path))
shell_environment.GetEnvironment().set_shell_var("WINSDK_PATH_FOR_RC_EXE", p)
version_aggregator.GetVersionAggregator().ReportVersion("WINSDK_PATH_FOR_RC_EXE", p, version_aggregator.VersionTypes.INFO)
return 0
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Plugin/WindowsResourceCompiler/WinRcPath.py
|
# @file HostBasedUnitTestRunner.py
# Plugin to located any host-based unit tests in the output directory and execute them.
##
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import logging
import glob
import stat
import xml.etree.ElementTree
from edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin
from edk2toolext import edk2_logging
import edk2toollib.windows.locate_tools as locate_tools
from edk2toolext.environment import shell_environment
from edk2toollib.utility_functions import RunCmd
from edk2toollib.utility_functions import GetHostInfo
class HostBasedUnitTestRunner(IUefiBuildPlugin):
def do_pre_build(self, thebuilder):
'''
Run Prebuild
'''
return 0
def do_post_build(self, thebuilder):
'''
After a build, will automatically locate and run all host-based unit tests. Logs any
failures with Warning severity and will return a count of the failures as the return code.
EXPECTS:
- Build Var 'CI_BUILD_TYPE' - If not set to 'host_unit_test', will not do anything.
UPDATES:
- Shell Var 'CMOCKA_XML_FILE'
'''
ci_type = thebuilder.env.GetValue('CI_BUILD_TYPE')
if ci_type != 'host_unit_test':
return 0
shell_env = shell_environment.GetEnvironment()
logging.log(edk2_logging.get_section_level(),
"Run Host based Unit Tests")
path = thebuilder.env.GetValue("BUILD_OUTPUT_BASE")
failure_count = 0
# Set up the reporting type for Cmocka.
shell_env.set_shell_var('CMOCKA_MESSAGE_OUTPUT', 'xml')
for arch in thebuilder.env.GetValue("TARGET_ARCH").split():
logging.log(edk2_logging.get_subsection_level(),
"Testing for architecture: " + arch)
cp = os.path.join(path, arch)
# If any old results XML files exist, clean them up.
for old_result in glob.iglob(os.path.join(cp, "*.result.xml")):
os.remove(old_result)
# Find and Run any Host Tests
if GetHostInfo().os.upper() == "LINUX":
testList = glob.glob(os.path.join(cp, "*Test*"))
for a in testList[:]:
p = os.path.join(cp, a)
# It must be a file
if not os.path.isfile(p):
testList.remove(a)
logging.debug(f"Remove directory file: {p}")
continue
# It must be executable
if os.stat(p).st_mode & (stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH) == 0:
testList.remove(a)
logging.debug(f"Remove non-executable file: {p}")
continue
logging.info(f"Test file found: {p}")
elif GetHostInfo().os.upper() == "WINDOWS":
testList = glob.glob(os.path.join(cp, "*Test*.exe"))
else:
raise NotImplementedError("Unsupported Operating System")
for test in testList:
# Configure output name if test uses cmocka.
shell_env.set_shell_var(
'CMOCKA_XML_FILE', test + ".CMOCKA.%g." + arch + ".result.xml")
# Configure output name if test uses gtest.
shell_env.set_shell_var(
'GTEST_OUTPUT', "xml:" + test + ".GTEST." + arch + ".result.xml")
# Run the test.
ret = RunCmd('"' + test + '"', "", workingdir=cp)
if ret != 0:
logging.error("UnitTest Execution Error: " +
os.path.basename(test))
else:
logging.info("UnitTest Completed: " +
os.path.basename(test))
file_match_pattern = test + ".*." + arch + ".result.xml"
xml_results_list = glob.glob(file_match_pattern)
for xml_result_file in xml_results_list:
root = xml.etree.ElementTree.parse(
xml_result_file).getroot()
for suite in root:
for case in suite:
for result in case:
if result.tag == 'failure':
logging.warning(
"%s Test Failed" % os.path.basename(test))
logging.warning(
" %s - %s" % (case.attrib['name'], result.text))
failure_count += 1
if thebuilder.env.GetValue("CODE_COVERAGE") != "FALSE":
if thebuilder.env.GetValue("TOOL_CHAIN_TAG") == "GCC5":
self.gen_code_coverage_gcc(thebuilder)
elif thebuilder.env.GetValue("TOOL_CHAIN_TAG").startswith ("VS"):
self.gen_code_coverage_msvc(thebuilder)
else:
logging.info("Skipping code coverage. Currently, support GCC and MSVC compiler.")
return failure_count
def gen_code_coverage_gcc(self, thebuilder):
logging.info("Generating UnitTest code coverage")
buildOutputBase = thebuilder.env.GetValue("BUILD_OUTPUT_BASE")
workspace = thebuilder.env.GetValue("WORKSPACE")
# Generate base code coverage for all source files
ret = RunCmd("lcov", f"--no-external --capture --initial --directory {buildOutputBase} --output-file {buildOutputBase}/cov-base.info --rc lcov_branch_coverage=1")
if ret != 0:
logging.error("UnitTest Coverage: Failed to build initial coverage data.")
return 1
# Coverage data for tested files only
ret = RunCmd("lcov", f"--capture --directory {buildOutputBase}/ --output-file {buildOutputBase}/coverage-test.info --rc lcov_branch_coverage=1")
if ret != 0:
logging.error("UnitTest Coverage: Failed to build coverage data for tested files.")
return 1
# Aggregate all coverage data
ret = RunCmd("lcov", f"--add-tracefile {buildOutputBase}/cov-base.info --add-tracefile {buildOutputBase}/coverage-test.info --output-file {buildOutputBase}/total-coverage.info --rc lcov_branch_coverage=1")
if ret != 0:
logging.error("UnitTest Coverage: Failed to aggregate coverage data.")
return 1
# Generate coverage XML
ret = RunCmd("lcov_cobertura",f"{buildOutputBase}/total-coverage.info -o {buildOutputBase}/compare.xml")
if ret != 0:
logging.error("UnitTest Coverage: Failed to generate coverage XML.")
return 1
# Filter out auto-generated and test code
ret = RunCmd("lcov_cobertura",f"{buildOutputBase}/total-coverage.info --excludes ^.*UnitTest\|^.*MU\|^.*Mock\|^.*DEBUG -o {buildOutputBase}/coverage.xml")
if ret != 0:
logging.error("UnitTest Coverage: Failed generate filtered coverage XML.")
return 1
# Generate all coverage file
testCoverageList = glob.glob (f"{workspace}/Build/**/total-coverage.info", recursive=True)
coverageFile = ""
for testCoverage in testCoverageList:
coverageFile += " --add-tracefile " + testCoverage
ret = RunCmd("lcov", f"{coverageFile} --output-file {workspace}/Build/all-coverage.info --rc lcov_branch_coverage=1")
if ret != 0:
logging.error("UnitTest Coverage: Failed generate all coverage file.")
return 1
# Generate and XML file if requested.for all package
if os.path.isfile(f"{workspace}/Build/coverage.xml"):
os.remove(f"{workspace}/Build/coverage.xml")
ret = RunCmd("lcov_cobertura",f"{workspace}/Build/all-coverage.info --excludes ^.*UnitTest\|^.*MU\|^.*Mock\|^.*DEBUG -o {workspace}/Build/coverage.xml")
return 0
def gen_code_coverage_msvc(self, thebuilder):
logging.info("Generating UnitTest code coverage")
buildOutputBase = thebuilder.env.GetValue("BUILD_OUTPUT_BASE")
testList = glob.glob(os.path.join(buildOutputBase, "**","*Test*.exe"), recursive=True)
workspace = thebuilder.env.GetValue("WORKSPACE")
workspace = (workspace + os.sep) if workspace[-1] != os.sep else workspace
# Generate coverage file
coverageFile = ""
for testFile in testList:
ret = RunCmd("OpenCppCoverage", f"--source {workspace} --export_type binary:{testFile}.cov -- {testFile}")
coverageFile += " --input_coverage=" + testFile + ".cov"
if ret != 0:
logging.error("UnitTest Coverage: Failed to collect coverage data.")
return 1
# Generate and XML file if requested.by each package
ret = RunCmd("OpenCppCoverage", f"--export_type cobertura:{os.path.join(buildOutputBase, 'coverage.xml')} --working_dir={workspace}Build {coverageFile}")
if ret != 0:
logging.error("UnitTest Coverage: Failed to generate cobertura format xml in single package.")
return 1
# Generate total report XML file for all package
testCoverageList = glob.glob(os.path.join(workspace, "Build", "**","*Test*.exe.cov"), recursive=True)
coverageFile = ""
for testCoverage in testCoverageList:
coverageFile += " --input_coverage=" + testCoverage
ret = RunCmd("OpenCppCoverage", f"--export_type cobertura:{workspace}Build/coverage.xml --working_dir={workspace}Build {coverageFile}")
if ret != 0:
logging.error("UnitTest Coverage: Failed to generate cobertura format xml.")
return 1
return 0
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Plugin/HostBasedUnitTestRunner/HostBasedUnitTestRunner.py
|
## @file
# Detect unreferenced PCD and GUID/Protocols/PPIs.
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
DetectNotUsedItem
'''
import re
import os
import sys
import argparse
#
# Globals for help information
#
__prog__ = 'DetectNotUsedItem'
__version__ = '%s Version %s' % (__prog__, '0.1')
__copyright__ = 'Copyright (c) 2019, Intel Corporation. All rights reserved.'
__description__ = "Detect unreferenced PCD and GUID/Protocols/PPIs.\n"
SectionList = ["LibraryClasses", "Guids", "Ppis", "Protocols", "Pcd"]
class PROCESS(object):
def __init__(self, DecPath, InfDirs):
self.Dec = DecPath
self.InfPath = InfDirs
self.Log = []
def ParserDscFdfInfFile(self):
AllContentList = []
for File in self.SearchbyExt([".dsc", ".fdf", ".inf"]):
AllContentList += self.ParseDscFdfInfContent(File)
return AllContentList
# Search File by extension name
def SearchbyExt(self, ExtList):
FileList = []
for path in self.InfPath:
if type(ExtList) == type(''):
for root, _, files in os.walk(path, topdown=True, followlinks=False):
for filename in files:
if filename.endswith(ExtList):
FileList.append(os.path.join(root, filename))
elif type(ExtList) == type([]):
for root, _, files in os.walk(path, topdown=True, followlinks=False):
for filename in files:
for Ext in ExtList:
if filename.endswith(Ext):
FileList.append(os.path.join(root, filename))
return FileList
# Parse DEC file to get Line number and Name
# return section name, the Item Name and comments line number
def ParseDecContent(self):
SectionRE = re.compile(r'\[(.*)\]')
Flag = False
Comments = {}
Comment_Line = []
ItemName = {}
with open(self.Dec, 'r') as F:
for Index, content in enumerate(F):
NotComment = not content.strip().startswith("#")
Section = SectionRE.findall(content)
if Section and NotComment:
Flag = self.IsNeedParseSection(Section[0])
if Flag:
Comment_Line.append(Index)
if NotComment:
if content != "\n" and content != "\r\n":
ItemName[Index] = content.split('=')[0].split('|')[0].split('#')[0].strip()
Comments[Index] = Comment_Line
Comment_Line = []
return ItemName, Comments
def IsNeedParseSection(self, SectionName):
for item in SectionList:
if item in SectionName:
return True
return False
# Parse DSC, FDF, INF File, remove comments, return Lines list
def ParseDscFdfInfContent(self, File):
with open(File, 'r') as F:
lines = F.readlines()
for Index in range(len(lines) - 1, -1, -1):
if lines[Index].strip().startswith("#") or lines[Index] == "\n" or lines[Index] == "\r\n":
lines.remove(lines[Index])
elif "#" in lines[Index]:
lines[Index] = lines[Index].split("#")[0].strip()
else:
lines[Index] = lines[Index].strip()
return lines
def DetectNotUsedItem(self):
NotUsedItem = {}
DecItem, DecComments = self.ParseDecContent()
InfDscFdfContent = self.ParserDscFdfInfFile()
for LineNum in list(DecItem.keys()):
DecItemName = DecItem[LineNum]
Match_reg = re.compile("(?<![a-zA-Z0-9_-])%s(?![a-zA-Z0-9_-])" % DecItemName)
MatchFlag = False
for Line in InfDscFdfContent:
if Match_reg.search(Line):
MatchFlag = True
break
if not MatchFlag:
NotUsedItem[LineNum] = DecItemName
self.Display(NotUsedItem)
return NotUsedItem, DecComments
def Display(self, UnuseDict):
print("DEC File:\n%s\n%s%s" % (self.Dec, "{:<15}".format("Line Number"), "{:<0}".format("Unused Item")))
self.Log.append(
"DEC File:\n%s\n%s%s\n" % (self.Dec, "{:<15}".format("Line Number"), "{:<0}".format("Unused Item")))
for num in list(sorted(UnuseDict.keys())):
ItemName = UnuseDict[num]
print("%s%s%s" % (" " * 3, "{:<12}".format(num + 1), "{:<1}".format(ItemName)))
self.Log.append(("%s%s%s\n" % (" " * 3, "{:<12}".format(num + 1), "{:<1}".format(ItemName))))
def Clean(self, UnUseDict, Comments):
removednum = []
for num in list(UnUseDict.keys()):
if num in list(Comments.keys()):
removednum += Comments[num]
with open(self.Dec, 'r') as Dec:
lines = Dec.readlines()
try:
with open(self.Dec, 'w+') as T:
for linenum in range(len(lines)):
if linenum in removednum:
continue
else:
T.write(lines[linenum])
print("DEC File has been clean: %s" % (self.Dec))
except Exception as err:
print(err)
class Main(object):
def mainprocess(self, Dec, Dirs, Isclean, LogPath):
for dir in Dirs:
if not os.path.exists(dir):
print("Error: Invalid path for '--dirs': %s" % dir)
sys.exit(1)
Pa = PROCESS(Dec, Dirs)
unuse, comment = Pa.DetectNotUsedItem()
if Isclean:
Pa.Clean(unuse, comment)
self.Logging(Pa.Log, LogPath)
def Logging(self, content, LogPath):
if LogPath:
try:
if os.path.isdir(LogPath):
FilePath = os.path.dirname(LogPath)
if not os.path.exists(FilePath):
os.makedirs(FilePath)
with open(LogPath, 'w+') as log:
for line in content:
log.write(line)
print("Log save to file: %s" % LogPath)
except Exception as e:
print("Save log Error: %s" % e)
def main():
parser = argparse.ArgumentParser(prog=__prog__,
description=__description__ + __copyright__,
conflict_handler='resolve')
parser.add_argument('-i', '--input', metavar="", dest='InputDec', help="Input DEC file name.")
parser.add_argument('--dirs', metavar="", action='append', dest='Dirs',
help="The package directory. To specify more directories, please repeat this option.")
parser.add_argument('--clean', action='store_true', default=False, dest='Clean',
help="Clean the unreferenced items from DEC file.")
parser.add_argument('--log', metavar="", dest="Logfile", default=False,
help="Put log in specified file as well as on console.")
options = parser.parse_args()
if options.InputDec:
if not (os.path.exists(options.InputDec) and options.InputDec.endswith(".dec")):
print("Error: Invalid DEC file input: %s" % options.InputDec)
if options.Dirs:
M = Main()
M.mainprocess(options.InputDec, options.Dirs, options.Clean, options.Logfile)
else:
print("Error: the following argument is required:'--dirs'.")
else:
print("Error: the following argument is required:'-i/--input'.")
if __name__ == '__main__':
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/DetectNotUsedItem.py
|
#!/usr/bin/python3
'''
Copyright (c) Apple Inc. 2021
SPDX-License-Identifier: BSD-2-Clause-Patent
Class that abstracts PE/COFF debug info parsing via a Python file like
object. You can port this code into an arbitrary debugger by invoking
the classes and passing in a file like object that abstracts the debugger
reading memory.
If you run this file directly it will parse the passed in PE/COFF files
for debug info:
python3 ./efi_pefcoff.py DxeCore.efi
IA32`<path...>/DxeCore.dll load = 0x00000000
EntryPoint = 0x000030d2 TextAddress = 0x00000240 DataAddress = 0x000042c0
.text 0x00000240 (0x04080) flags:0x60000020
.data 0x000042C0 (0x001C0) flags:0xC0000040
.reloc 0x00004480 (0x00240) flags:0x42000040
Note: PeCoffClass uses virtual addresses and not file offsets.
It needs to work when images are loaded into memory.
as long as virtual address map to file addresses this
code can process binary files.
Note: This file can also contain generic worker functions (like GuidNames)
that abstract debugger agnostic services to the debugger.
This file should never import debugger specific modules.
'''
import sys
import os
import uuid
import struct
import re
from ctypes import c_char, c_uint8, c_uint16, c_uint32, c_uint64, c_void_p
from ctypes import ARRAY, sizeof
from ctypes import Structure, LittleEndianStructure
#
# The empty LittleEndianStructure must have _fields_ assigned prior to use or
# sizeof(). Anything that is size UINTN may need to get adjusted.
#
# The issue is ctypes matches our local machine, not the machine we are
# trying to debug. Call patch_ctypes() passing in the byte width from the
# debugger python to make sure you are in sync.
#
# Splitting out the _field_ from the Structure (LittleEndianStructure) class
# allows it to be patched.
#
class EFI_LOADED_IMAGE_PROTOCOL(LittleEndianStructure):
pass
EFI_LOADED_IMAGE_PROTOCOL_fields_ = [
('Revision', c_uint32),
('ParentHandle', c_void_p),
('SystemTable', c_void_p),
('DeviceHandle', c_void_p),
('FilePath', c_void_p),
('Reserved', c_void_p),
('LoadOptionsSize', c_uint32),
('LoadOptions', c_void_p),
('ImageBase', c_void_p),
('ImageSize', c_uint64),
('ImageCodeType', c_uint32),
('ImageDataType', c_uint32),
('Unload', c_void_p),
]
class EFI_GUID(LittleEndianStructure):
_fields_ = [
('Data1', c_uint32),
('Data2', c_uint16),
('Data3', c_uint16),
('Data4', ARRAY(c_uint8, 8))
]
class EFI_SYSTEM_TABLE_POINTER(LittleEndianStructure):
_fields_ = [
('Signature', c_uint64),
('EfiSystemTableBase', c_uint64),
('Crc32', c_uint32)
]
class EFI_DEBUG_IMAGE_INFO_NORMAL(LittleEndianStructure):
pass
EFI_DEBUG_IMAGE_INFO_NORMAL_fields_ = [
('ImageInfoType', c_uint32),
('LoadedImageProtocolInstance', c_void_p),
('ImageHandle', c_void_p)
]
class EFI_DEBUG_IMAGE_INFO(LittleEndianStructure):
pass
EFI_DEBUG_IMAGE_INFO_fields_ = [
('NormalImage', c_void_p),
]
class EFI_DEBUG_IMAGE_INFO_TABLE_HEADER(LittleEndianStructure):
pass
EFI_DEBUG_IMAGE_INFO_TABLE_HEADER_fields_ = [
('UpdateStatus', c_uint32),
('TableSize', c_uint32),
('EfiDebugImageInfoTable', c_void_p),
]
class EFI_TABLE_HEADER(LittleEndianStructure):
_fields_ = [
('Signature', c_uint64),
('Revision', c_uint32),
('HeaderSize', c_uint32),
('CRC32', c_uint32),
('Reserved', c_uint32),
]
class EFI_CONFIGURATION_TABLE(LittleEndianStructure):
pass
EFI_CONFIGURATION_TABLE_fields_ = [
('VendorGuid', EFI_GUID),
('VendorTable', c_void_p)
]
class EFI_SYSTEM_TABLE(LittleEndianStructure):
pass
EFI_SYSTEM_TABLE_fields_ = [
('Hdr', EFI_TABLE_HEADER),
('FirmwareVendor', c_void_p),
('FirmwareRevision', c_uint32),
('ConsoleInHandle', c_void_p),
('ConIn', c_void_p),
('ConsoleOutHandle', c_void_p),
('ConOut', c_void_p),
('StandardErrHandle', c_void_p),
('StdErr', c_void_p),
('RuntimeService', c_void_p),
('BootService', c_void_p),
('NumberOfTableEntries', c_void_p),
('ConfigurationTable', c_void_p),
]
class EFI_IMAGE_DATA_DIRECTORY(LittleEndianStructure):
_fields_ = [
('VirtualAddress', c_uint32),
('Size', c_uint32)
]
class EFI_TE_IMAGE_HEADER(LittleEndianStructure):
_fields_ = [
('Signature', ARRAY(c_char, 2)),
('Machine', c_uint16),
('NumberOfSections', c_uint8),
('Subsystem', c_uint8),
('StrippedSize', c_uint16),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('ImageBase', c_uint64),
('DataDirectoryBaseReloc', EFI_IMAGE_DATA_DIRECTORY),
('DataDirectoryDebug', EFI_IMAGE_DATA_DIRECTORY)
]
class EFI_IMAGE_DOS_HEADER(LittleEndianStructure):
_fields_ = [
('e_magic', c_uint16),
('e_cblp', c_uint16),
('e_cp', c_uint16),
('e_crlc', c_uint16),
('e_cparhdr', c_uint16),
('e_minalloc', c_uint16),
('e_maxalloc', c_uint16),
('e_ss', c_uint16),
('e_sp', c_uint16),
('e_csum', c_uint16),
('e_ip', c_uint16),
('e_cs', c_uint16),
('e_lfarlc', c_uint16),
('e_ovno', c_uint16),
('e_res', ARRAY(c_uint16, 4)),
('e_oemid', c_uint16),
('e_oeminfo', c_uint16),
('e_res2', ARRAY(c_uint16, 10)),
('e_lfanew', c_uint16)
]
class EFI_IMAGE_FILE_HEADER(LittleEndianStructure):
_fields_ = [
('Machine', c_uint16),
('NumberOfSections', c_uint16),
('TimeDateStamp', c_uint32),
('PointerToSymbolTable', c_uint32),
('NumberOfSymbols', c_uint32),
('SizeOfOptionalHeader', c_uint16),
('Characteristics', c_uint16)
]
class EFI_IMAGE_OPTIONAL_HEADER32(LittleEndianStructure):
_fields_ = [
('Magic', c_uint16),
('MajorLinkerVersion', c_uint8),
('MinorLinkerVersion', c_uint8),
('SizeOfCode', c_uint32),
('SizeOfInitializedData', c_uint32),
('SizeOfUninitializedData', c_uint32),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('BaseOfData', c_uint32),
('ImageBase', c_uint32),
('SectionAlignment', c_uint32),
('FileAlignment', c_uint32),
('MajorOperatingSystemVersion', c_uint16),
('MinorOperatingSystemVersion', c_uint16),
('MajorImageVersion', c_uint16),
('MinorImageVersion', c_uint16),
('MajorSubsystemVersion', c_uint16),
('MinorSubsystemVersion', c_uint16),
('Win32VersionValue', c_uint32),
('SizeOfImage', c_uint32),
('SizeOfHeaders', c_uint32),
('CheckSum', c_uint32),
('Subsystem', c_uint16),
('DllCharacteristics', c_uint16),
('SizeOfStackReserve', c_uint32),
('SizeOfStackCommit', c_uint32),
('SizeOfHeapReserve', c_uint32),
('SizeOfHeapCommit', c_uint32),
('LoaderFlags', c_uint32),
('NumberOfRvaAndSizes', c_uint32),
('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16))
]
class EFI_IMAGE_NT_HEADERS32(LittleEndianStructure):
_fields_ = [
('Signature', c_uint32),
('FileHeader', EFI_IMAGE_FILE_HEADER),
('OptionalHeader', EFI_IMAGE_OPTIONAL_HEADER32)
]
class EFI_IMAGE_OPTIONAL_HEADER64(LittleEndianStructure):
_fields_ = [
('Magic', c_uint16),
('MajorLinkerVersion', c_uint8),
('MinorLinkerVersion', c_uint8),
('SizeOfCode', c_uint32),
('SizeOfInitializedData', c_uint32),
('SizeOfUninitializedData', c_uint32),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('BaseOfData', c_uint32),
('ImageBase', c_uint32),
('SectionAlignment', c_uint32),
('FileAlignment', c_uint32),
('MajorOperatingSystemVersion', c_uint16),
('MinorOperatingSystemVersion', c_uint16),
('MajorImageVersion', c_uint16),
('MinorImageVersion', c_uint16),
('MajorSubsystemVersion', c_uint16),
('MinorSubsystemVersion', c_uint16),
('Win32VersionValue', c_uint32),
('SizeOfImage', c_uint32),
('SizeOfHeaders', c_uint32),
('CheckSum', c_uint32),
('Subsystem', c_uint16),
('DllCharacteristics', c_uint16),
('SizeOfStackReserve', c_uint64),
('SizeOfStackCommit', c_uint64),
('SizeOfHeapReserve', c_uint64),
('SizeOfHeapCommit', c_uint64),
('LoaderFlags', c_uint32),
('NumberOfRvaAndSizes', c_uint32),
('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16))
]
class EFI_IMAGE_NT_HEADERS64(LittleEndianStructure):
_fields_ = [
('Signature', c_uint32),
('FileHeader', EFI_IMAGE_FILE_HEADER),
('OptionalHeader', EFI_IMAGE_OPTIONAL_HEADER64)
]
class EFI_IMAGE_DEBUG_DIRECTORY_ENTRY(LittleEndianStructure):
_fields_ = [
('Characteristics', c_uint32),
('TimeDateStamp', c_uint32),
('MajorVersion', c_uint16),
('MinorVersion', c_uint16),
('Type', c_uint32),
('SizeOfData', c_uint32),
('RVA', c_uint32),
('FileOffset', c_uint32),
]
class EFI_IMAGE_SECTION_HEADER(LittleEndianStructure):
_fields_ = [
('Name', ARRAY(c_char, 8)),
('VirtualSize', c_uint32),
('VirtualAddress', c_uint32),
('SizeOfRawData', c_uint32),
('PointerToRawData', c_uint32),
('PointerToRelocations', c_uint32),
('PointerToLinenumbers', c_uint32),
('NumberOfRelocations', c_uint16),
('NumberOfLinenumbers', c_uint16),
('Characteristics', c_uint32),
]
EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b
EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b
DIRECTORY_DEBUG = 6
image_machine_dict = {
0x014c: "IA32",
0x0200: "IPF",
0x0EBC: "EBC",
0x8664: "X64",
0x01c2: "ARM",
0xAA64: "AArch64",
0x5032: "RISC32",
0x5064: "RISC64",
0x5128: "RISCV128",
}
def patch_void_p_to_ctype(patch_type, to_patch):
'''Optionally patch c_void_p in the Structure._fields_'''
if patch_type is None:
return to_patch
result = []
for name, c_type in to_patch:
if type(c_type) == type(c_void_p):
result.append((name, c_uint32))
else:
result.append((name, c_type))
return result
def patch_ctypes(pointer_width=8):
'''
Pass in the pointer width of the system being debugged. If it is not
the same as c_void_p then patch the _fields_ with the correct type.
For any ctypes Structure that has a c_void_p this function needs to be
called prior to use or sizeof() to initialize _fields_.
'''
if sizeof(c_void_p) == pointer_width:
patch_type = None
elif pointer_width == 16:
assert False
elif pointer_width == 8:
patch_type = c_uint64
elif pointer_width == 4:
patch_type = c_uint32
else:
raise Exception(f'ERROR: Unkown pointer_width = {pointer_width}')
# If you add a ctypes Structure class with a c_void_p you need to add
# it to this list. Note: you should use c_void_p for UINTN values.
EFI_LOADED_IMAGE_PROTOCOL._fields_ = patch_void_p_to_ctype(
patch_type, EFI_LOADED_IMAGE_PROTOCOL_fields_)
EFI_DEBUG_IMAGE_INFO_NORMAL._fields_ = patch_void_p_to_ctype(
patch_type, EFI_DEBUG_IMAGE_INFO_NORMAL_fields_)
EFI_DEBUG_IMAGE_INFO._fields_ = patch_void_p_to_ctype(
patch_type, EFI_DEBUG_IMAGE_INFO_fields_)
EFI_DEBUG_IMAGE_INFO_TABLE_HEADER._fields_ = patch_void_p_to_ctype(
patch_type, EFI_DEBUG_IMAGE_INFO_TABLE_HEADER_fields_)
EFI_CONFIGURATION_TABLE._fields_ = patch_void_p_to_ctype(
patch_type, EFI_CONFIGURATION_TABLE_fields_)
EFI_SYSTEM_TABLE._fields_ = patch_void_p_to_ctype(
patch_type, EFI_SYSTEM_TABLE_fields_)
# patch up anything else that needs to know pointer_width
EfiStatusClass(pointer_width)
def ctype_to_str(ctype, indent='', hide_list=[]):
'''
Given a ctype object print out as a string by walking the _fields_
in the cstring Class
'''
result = ''
for field in ctype._fields_:
attr = getattr(ctype, field[0])
tname = type(attr).__name__
if field[0] in hide_list:
continue
result += indent + f'{field[0]} = '
if tname == 'EFI_GUID':
result += GuidNames.to_name(GuidNames.to_uuid(attr)) + '\n'
elif issubclass(type(attr), Structure):
result += f'{tname}\n' + \
ctype_to_str(attr, indent + ' ', hide_list)
elif isinstance(attr, int):
result += f'0x{attr:x}\n'
else:
result += f'{attr}\n'
return result
def hexline(addr, data):
hexstr = ''
printable = ''
for i in range(0, len(data)):
hexstr += f'{data[i]:02x} '
printable += chr(data[i]) if data[i] > 0x20 and data[i] < 0x7f else '.'
return f'{addr:04x} {hexstr:48s} |{printable:s}|'
def hexdump(data, indent=''):
if not isinstance(data, bytearray):
data = bytearray(data)
result = ''
for i in range(0, len(data), 16):
result += indent + hexline(i, data[i:i+16]) + '\n'
return result
class EfiTpl:
''' Return string for EFI_TPL'''
def __init__(self, tpl):
self.tpl = tpl
def __str__(self):
if self.tpl < 4:
result = f'{self.tpl:d}'
elif self.tpl < 8:
result = "TPL_APPLICATION"
if self.tpl - 4 > 0:
result += f' + {self.tpl - 4:d}'
elif self.tpl < 16:
result = "TPL_CALLBACK"
if self.tpl - 8 > 0:
result += f' + {self.tpl - 8:d}'
elif self.tpl < 31:
result = "TPL_NOTIFY"
if self.tpl - 16 > 0:
result += f' + {self.tpl - 16:d}'
elif self.tpl == 31:
result = "TPL_HIGH_LEVEL"
else:
result = f'Invalid TPL = {self.tpl:d}'
return result
class EfiBootMode:
'''
Class to return human readable string for EFI_BOOT_MODE
Methods
-----------
to_str(boot_mode, default)
return string for boot_mode, and return default if there is not a
match.
'''
EFI_BOOT_MODE_dict = {
0x00: "BOOT_WITH_FULL_CONFIGURATION",
0x01: "BOOT_WITH_MINIMAL_CONFIGURATION",
0x02: "BOOT_ASSUMING_NO_CONFIGURATION_CHANGES",
0x03: "BOOT_WITH_FULL_CONFIGURATION_PLUS_DIAGNOSTICS",
0x04: "BOOT_WITH_DEFAULT_SETTINGS",
0x05: "BOOT_ON_S4_RESUME",
0x06: "BOOT_ON_S5_RESUME",
0x07: "BOOT_WITH_MFG_MODE_SETTINGS",
0x10: "BOOT_ON_S2_RESUME",
0x11: "BOOT_ON_S3_RESUME",
0x12: "BOOT_ON_FLASH_UPDATE",
0x20: "BOOT_IN_RECOVERY_MODE",
}
def __init__(self, boot_mode):
self._boot_mode = boot_mode
def __str__(self):
return self.to_str(self._boot_mode)
@classmethod
def to_str(cls, boot_mode, default=''):
return cls.EFI_BOOT_MODE_dict.get(boot_mode, default)
class EfiStatusClass:
'''
Class to decode EFI_STATUS to a human readable string. You need to
pass in pointer_width to get the corret value since the EFI_STATUS
code values are different based on the sizeof UINTN. The default is
sizeof(UINTN) == 8.
Attributes
??????
_dict_ : dictionary
dictionary of EFI_STATUS that has beed updated to match
pointer_width.
Methods
-----------
patch_dictionary(pointer_width)
to_str(status, default)
'''
_dict_ = {}
_EFI_STATUS_UINT32_dict = {
0: "Success",
1: "Warning Unknown Glyph",
2: "Warning Delete Failure",
3: "Warning Write Failure",
4: "Warning Buffer Too Small",
5: "Warning Stale Data",
6: "Warngin File System",
(0x20000000 | 0): "Warning interrupt source pending",
(0x20000000 | 1): "Warning interrupt source quiesced",
(0x80000000 | 1): "Load Error",
(0x80000000 | 2): "Invalid Parameter",
(0x80000000 | 3): "Unsupported",
(0x80000000 | 4): "Bad Buffer Size",
(0x80000000 | 5): "Buffer Too Small",
(0x80000000 | 6): "Not Ready",
(0x80000000 | 7): "Device Error",
(0x80000000 | 8): "Write Protected",
(0x80000000 | 9): "Out of Resources",
(0x80000000 | 10): "Volume Corrupt",
(0x80000000 | 11): "Volume Full",
(0x80000000 | 12): "No Media",
(0x80000000 | 13): "Media changed",
(0x80000000 | 14): "Not Found",
(0x80000000 | 15): "Access Denied",
(0x80000000 | 16): "No Response",
(0x80000000 | 17): "No mapping",
(0x80000000 | 18): "Time out",
(0x80000000 | 19): "Not started",
(0x80000000 | 20): "Already started",
(0x80000000 | 21): "Aborted",
(0x80000000 | 22): "ICMP Error",
(0x80000000 | 23): "TFTP Error",
(0x80000000 | 24): "Protocol Error",
(0x80000000 | 25): "Incompatible Version",
(0x80000000 | 26): "Security Violation",
(0x80000000 | 27): "CRC Error",
(0x80000000 | 28): "End of Media",
(0x80000000 | 31): "End of File",
(0x80000000 | 32): "Invalid Language",
(0x80000000 | 33): "Compromised Data",
(0x80000000 | 35): "HTTP Error",
(0xA0000000 | 0): "Interrupt Pending",
}
def __init__(self, status=None, pointer_width=8):
self.status = status
# this will convert to 64-bit version if needed
self.patch_dictionary(pointer_width)
def __str__(self):
return self.to_str(self.status)
@classmethod
def to_str(cls, status, default=''):
return cls._dict_.get(status, default)
@classmethod
def patch_dictionary(cls, pointer_width):
'''Patch UINTN upper bits like values '''
if cls._dict_:
# only patch the class variable once
return False
if pointer_width == 4:
cls._dict = cls._EFI_STATUS_UINT32_dict
elif pointer_width == 8:
for key, value in cls._EFI_STATUS_UINT32_dict.items():
mask = (key & 0xE0000000) << 32
new_key = (key & 0x1FFFFFFF) | mask
cls._dict_[new_key] = value
return True
else:
return False
class GuidNames:
'''
Class to expose the C names of EFI_GUID's. The _dict_ starts with
common EFI System Table entry EFI_GUID's. _dict_ can get updated with the
build generated Guid.xref file if a path to a module is passed
into add_build_guid_file(). If symbols are loaded for any module
in the build the path the build product should imply the
relative location of that builds Guid.xref file.
Attributes
??????----
_dict_ : dictionary
dictionary of EFI_GUID (uuid) strings to C global names
Methods
-------
to_uuid(uuid)
convert a hex UUID string or bytearray to a uuid.UUID
to_name(uuid)
convert a UUID string to a C global constant name.
to_guid(guid_name)
convert a C global constant EFI_GUID name to uuid hex string.
is_guid_str(name)
name is a hex UUID string.
Example: 49152E77-1ADA-4764-B7A2-7AFEFED95E8B
to_c_guid(value)
convert a uuid.UUID or UUID string to a c_guid string
(see is_c_guid())
from_c_guid(value)
covert a C guid string to a hex UUID string.
is_c_guid(name)
name is the C initialization value for an EFI_GUID. Example:
{ 0x414e6bdd, 0xe47b, 0x47cc, { 0xb2, 0x44, 0xbb, 0x61,
0x02, 0x0c, 0xf5, 0x16 }}
add_build_guid_file(module_path, custom_file):
assume module_path is an edk2 build product and load the Guid.xref
file from that build to fill in _dict_. If you know the path and
file name of a custom Guid.xref you can pass it in as custom_file.
'''
_dict_ = { # Common EFI System Table values
'05AD34BA-6F02-4214-952E-4DA0398E2BB9':
'gEfiDxeServicesTableGuid',
'7739F24C-93D7-11D4-9A3A-0090273FC14D':
'gEfiHobListGuid',
'4C19049F-4137-4DD3-9C10-8B97A83FFDFA':
'gEfiMemoryTypeInformationGuid',
'49152E77-1ADA-4764-B7A2-7AFEFED95E8B':
'gEfiDebugImageInfoTableGuid',
'060CC026-4C0D-4DDA-8F41-595FEF00A502':
'gMemoryStatusCodeRecordGuid',
'EB9D2D31-2D88-11D3-9A16-0090273FC14D':
'gEfiSmbiosTableGuid',
'EB9D2D30-2D88-11D3-9A16-0090273FC14D':
'gEfiAcpi10TableGuid',
'8868E871-E4F1-11D3-BC22-0080C73C8881':
'gEfiAcpi20TableGuid',
}
guid_files = []
def __init__(self, uuid=None, pointer_width=8):
self.uuid = None if uuid is None else self.to_uuid(uuid)
def __str__(self):
if self.uuid is None:
result = ''
for key, value in GuidNames._dict_.items():
result += f'{key}: {value}\n'
else:
result = self.to_name(self.uuid)
return result
@classmethod
def to_uuid(cls, obj):
try:
return uuid.UUID(bytes_le=bytes(obj))
except (ValueError, TypeError):
try:
return uuid.UUID(bytes_le=obj)
except (ValueError, TypeError):
return uuid.UUID(obj)
@classmethod
def to_name(cls, uuid):
if not isinstance(uuid, str):
uuid = str(uuid)
if cls.is_c_guid(uuid):
uuid = cls.from_c_guid(uuid)
return cls._dict_.get(uuid.upper(), uuid.upper())
@classmethod
def to_guid(cls, guid_name):
for key, value in cls._dict_.items():
if guid_name == value:
return key.upper()
else:
raise KeyError(key)
@classmethod
def is_guid_str(cls, name):
if not isinstance(name, str):
return False
return name.count('-') >= 4
@classmethod
def to_c_guid(cls, value):
if isinstance(value, uuid.UUID):
guid = value
else:
guid = uuid.UUID(value)
(data1, data2, data3,
data4_0, data4_1, data4_2, data4_3,
data4_4, data4_5, data4_6, data4_7) = struct.unpack(
'<IHH8B', guid.bytes_le)
return (f'{{ 0x{data1:08X}, 0x{data2:04X}, 0x{data3:04X}, '
f'{{ 0x{data4_0:02X}, 0x{data4_1:02X}, 0x{data4_2:02X}, '
f'0x{data4_3:02X}, 0x{data4_4:02X}, 0x{data4_5:02X}, '
f'0x{data4_6:02X}, 0x{data4_7:02X} }} }}')
@ classmethod
def from_c_guid(cls, value):
try:
hex = [int(x, 16) for x in re.findall(r"[\w']+", value)]
return (f'{hex[0]:08X}-{hex[1]:04X}-{hex[2]:04X}'
+ f'-{hex[3]:02X}{hex[4]:02X}-{hex[5]:02X}{hex[6]:02X}'
+ f'{hex[7]:02X}{hex[8]:02X}{hex[9]:02X}{hex[10]:02X}')
except ValueError:
return value
@ classmethod
def is_c_guid(cls, name):
if not isinstance(name, str):
return False
return name.count('{') == 2 and name.count('}') == 2
@ classmethod
def add_build_guid_file(cls, module_path, custom_file=None):
if custom_file is not None:
xref = custom_file
else:
# module_path will look like:
# <repo>/Build/OvmfX64/DEBUG_XCODE5/X64/../DxeCore.dll
# Walk backwards looking for a toolchain like name.
# Then look for GUID database:
# Build/OvmfX64//DEBUG_XCODE5/FV/Guid.xref
for i in reversed(module_path.split(os.sep)):
if (i.startswith('DEBUG_') or
i.startswith('RELEASE_') or
i.startswith('NOOPT_')):
build_root = os.path.join(
module_path.rsplit(i, 1)[0], i)
break
xref = os.path.join(build_root, 'FV', 'Guid.xref')
if xref in cls.guid_files:
# only processes the file one time
return True
with open(xref) as f:
content = f.readlines()
cls.guid_files.append(xref)
for lines in content:
try:
if cls.is_guid_str(lines):
# a regex would be more pedantic
words = lines.split()
cls._dict_[words[0].upper()] = words[1].strip('\n')
except ValueError:
pass
return True
return False
class EFI_HOB_GENERIC_HEADER(LittleEndianStructure):
_fields_ = [
('HobType', c_uint16),
('HobLength', c_uint16),
('Reserved', c_uint32)
]
class EFI_HOB_HANDOFF_INFO_TABLE(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('Version', c_uint32),
('BootMode', c_uint32),
('EfiMemoryTop', c_uint64),
('EfiMemoryBottom', c_uint64),
('EfiFreeMemoryTop', c_uint64),
('EfiFreeMemoryBottom', c_uint64),
('EfiEndOfHobList', c_uint64),
]
class EFI_HOB_MEMORY_ALLOCATION(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('Name', EFI_GUID),
('MemoryBaseAddress', c_uint64),
('MemoryLength', c_uint64),
('MemoryType', c_uint32),
('Reserved', c_uint32),
]
class EFI_HOB_RESOURCE_DESCRIPTOR(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('Owner', EFI_GUID),
('ResourceType', c_uint32),
('ResourceAttribute', c_uint32),
('PhysicalStart', c_uint64),
('ResourceLength', c_uint64),
]
class EFI_HOB_GUID_TYPE(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('Name', EFI_GUID),
]
class EFI_HOB_FIRMWARE_VOLUME(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('BaseAddress', c_uint64),
('Length', c_uint64),
]
class EFI_HOB_CPU(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('SizeOfMemorySpace', c_uint8),
('SizeOfIoSpace', c_uint8),
('Reserved', ARRAY(c_uint8, 6)),
]
class EFI_HOB_MEMORY_POOL(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
]
class EFI_HOB_FIRMWARE_VOLUME2(LittleEndianStructure):
_fields_ = [
('Header', EFI_HOB_GENERIC_HEADER),
('BaseAddress', c_uint64),
('Length', c_uint64),
('FvName', EFI_GUID),
('FileName', EFI_GUID)
]
class EFI_HOB_FIRMWARE_VOLUME3(LittleEndianStructure):
_fields_ = [
('HobType', c_uint16),
('HobLength', c_uint16),
('Reserved', c_uint32),
('BaseAddress', c_uint64),
('Length', c_uint64),
('AuthenticationStatus', c_uint32),
('ExtractedFv', c_uint8),
('FvName', EFI_GUID),
('FileName', EFI_GUID),
]
class EFI_HOB_UEFI_CAPSULE(LittleEndianStructure):
_fields_ = [
('HobType', c_uint16),
('HobLength', c_uint16),
('Reserved', c_uint32),
('BaseAddress', c_uint64),
('Length', c_uint64),
]
class EfiHob:
'''
Parse EFI Device Paths based on the edk2 C Structures defined above.
In the context of this class verbose means hexdump extra data.
Attributes
??????
Hob : list
List of HOBs. Each entry contains the name, HOB type, HOB length,
the ctype struct for the HOB, and any extra data.
Methods
-----------
get_hob_by_type(hob_type)
return string that decodes the HOBs of hob_type. If hob_type is
None then return all HOBs.
'''
Hob = []
verbose = False
hob_dict = {
1: EFI_HOB_HANDOFF_INFO_TABLE,
2: EFI_HOB_MEMORY_ALLOCATION,
3: EFI_HOB_RESOURCE_DESCRIPTOR,
4: EFI_HOB_GUID_TYPE,
5: EFI_HOB_FIRMWARE_VOLUME,
6: EFI_HOB_CPU,
7: EFI_HOB_MEMORY_POOL,
9: EFI_HOB_FIRMWARE_VOLUME2,
0xb: EFI_HOB_UEFI_CAPSULE,
0xc: EFI_HOB_FIRMWARE_VOLUME3,
0xffff: EFI_HOB_GENERIC_HEADER,
}
def __init__(self, file, address=None, verbose=False, count=1000):
self._file = file
EfiHob.verbose = verbose
if len(EfiHob.Hob) != 0 and address is None:
return
if address is not None:
hob_ptr = address
else:
hob_ptr = EfiConfigurationTable(file).GetConfigTable(
'7739F24C-93D7-11D4-9A3A-0090273FC14D')
self.read_hobs(hob_ptr)
@ classmethod
def __str__(cls):
return cls.get_hob_by_type(None)
@ classmethod
def get_hob_by_type(cls, hob_type):
result = ""
for (Name, HobType, HobLen, chob, extra) in cls.Hob:
if hob_type is not None:
if hob_type != HobType:
continue
result += f'Type: {Name:s} (0x{HobType:01x}) Len: 0x{HobLen:03x}\n'
result += ctype_to_str(chob, ' ', ['Reserved'])
if cls.verbose:
if extra is not None:
result += hexdump(extra, ' ')
return result
def read_hobs(self, hob_ptr, count=1000):
if hob_ptr is None:
return
try:
for _ in range(count): # while True
hdr, _ = self._ctype_read_ex(EFI_HOB_GENERIC_HEADER, hob_ptr)
if hdr.HobType == 0xffff:
break
type_str = self.hob_dict.get(
hdr.HobType, EFI_HOB_GENERIC_HEADER)
hob, extra = self._ctype_read_ex(
type_str, hob_ptr, hdr.HobLength)
EfiHob.Hob.append(
(type(hob).__name__,
hdr.HobType,
hdr.HobLength,
hob,
extra))
hob_ptr += hdr.HobLength
except ValueError:
pass
def _ctype_read_ex(self, ctype_struct, offset=0, rsize=None):
if offset != 0:
self._file.seek(offset)
type_size = sizeof(ctype_struct)
size = rsize if rsize else type_size
data = self._file.read(size)
cdata = ctype_struct.from_buffer(bytearray(data))
if size > type_size:
return cdata, data[type_size:]
else:
return cdata, None
class EFI_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Type', c_uint8),
('SubType', c_uint8),
# UINT8 Length[2]
# Cheat and use c_uint16 since we don't care about alignment
('Length', c_uint16)
]
class PCI_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('Function', c_uint8),
('Device', c_uint8)
]
class PCCARD_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('FunctionNumber', c_uint8),
]
class MEMMAP_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('StartingAddress', c_uint64),
('EndingAddress', c_uint64),
]
class VENDOR_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('Guid', EFI_GUID),
]
class CONTROLLER_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('ControllerNumber', c_uint32),
]
class BMC_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('InterfaceType', c_uint8),
('BaseAddress', ARRAY(c_uint8, 8)),
]
class BBS_BBS_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('DeviceType', c_uint16),
('StatusFlag', c_uint16)
]
class ACPI_HID_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('HID', c_uint32),
('UID', c_uint32)
]
class ACPI_EXTENDED_HID_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('HID', c_uint32),
('UID', c_uint32),
('CID', c_uint32)
]
class ACPI_ADR_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('ARD', c_uint32)
]
class ACPI_NVDIMM_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('NFITDeviceHandle', c_uint32)
]
class ATAPI_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("PrimarySecondary", c_uint8),
("SlaveMaster", c_uint8),
("Lun", c_uint16)
]
class SCSI_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Pun", c_uint16),
("Lun", c_uint16)
]
class FIBRECHANNEL_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Reserved", c_uint32),
("WWN", c_uint64),
("Lun", c_uint64)
]
class F1394_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Reserved", c_uint32),
("Guid", c_uint64)
]
class USB_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("ParentPortNumber", c_uint8),
("InterfaceNumber", c_uint8),
]
class I2O_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Tid", c_uint32)
]
class INFINIBAND_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("ResourceFlags", c_uint32),
("PortGid", ARRAY(c_uint8, 16)),
("ServiceId", c_uint64),
("TargetPortId", c_uint64),
("DeviceId", c_uint64)
]
class UART_FLOW_CONTROL_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Guid", EFI_GUID),
("FlowControlMap", c_uint32)
]
class SAS_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Guid", EFI_GUID),
("Reserved", c_uint32),
("SasAddress", c_uint64),
("Lun", c_uint64),
("DeviceTopology", c_uint16),
("RelativeTargetPort", c_uint16)
]
class EFI_MAC_ADDRESS(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("Addr", ARRAY(c_uint8, 32)),
]
class MAC_ADDR_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('MacAddress', EFI_MAC_ADDRESS),
('IfType', c_uint8)
]
class IPv4_ADDRESS(LittleEndianStructure):
_fields_ = [
("Addr", ARRAY(c_uint8, 4)),
]
class IPv4_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('LocalIpAddress', IPv4_ADDRESS),
('RemoteIpAddress', IPv4_ADDRESS),
('LocalPort', c_uint16),
('RemotePort', c_uint16),
('Protocol', c_uint16),
('StaticIpAddress', c_uint8),
('GatewayIpAddress', IPv4_ADDRESS),
('SubnetMask', IPv4_ADDRESS)
]
class IPv6_ADDRESS(LittleEndianStructure):
_fields_ = [
("Addr", ARRAY(c_uint8, 16)),
]
class IPv6_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('LocalIpAddress', IPv6_ADDRESS),
('RemoteIpAddress', IPv6_ADDRESS),
('LocalPort', c_uint16),
('RemotePort', c_uint16),
('Protocol', c_uint16),
('IpAddressOrigin', c_uint8),
('PrefixLength', c_uint8),
('GatewayIpAddress', IPv6_ADDRESS)
]
class UART_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('Reserved', c_uint32),
('BaudRate', c_uint64),
('DataBits', c_uint8),
('Parity', c_uint8),
('StopBits', c_uint8)
]
class USB_CLASS_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('VendorId', c_uint16),
('ProductId', c_uint16),
('DeviceClass', c_uint8),
('DeviceCSjblass', c_uint8),
('DeviceProtocol', c_uint8),
]
class USB_WWID_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('InterfaceNumber', c_uint16),
('VendorId', c_uint16),
('ProductId', c_uint16),
]
class DEVICE_LOGICAL_UNIT_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('Lun', c_uint8)
]
class SATA_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('HBAPortNumber', c_uint16),
('PortMultiplierPortNumber', c_uint16),
('Lun', c_uint16),
]
class ISCSI_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('NetworkProtocol', c_uint16),
('LoginOption', c_uint16),
('Lun', c_uint64),
('TargetPortalGroupTag', c_uint16),
]
class VLAN_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("VlandId", c_uint16)
]
class FIBRECHANNELEX_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Reserved", c_uint16),
("WWN", ARRAY(c_uint8, 8)),
("Lun", ARRAY(c_uint8, 8)),
]
class SASEX_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("SasAddress", ARRAY(c_uint8, 8)),
("Lun", ARRAY(c_uint8, 8)),
("DeviceTopology", c_uint16),
("RelativeTargetPort", c_uint16)
]
class NVME_NAMESPACE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("NamespaceId", c_uint32),
("NamespaceUuid", c_uint64)
]
class DNS_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("IsIPv6", c_uint8),
("DnsServerIp", IPv6_ADDRESS)
]
class UFS_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Pun", c_uint8),
("Lun", c_uint8),
]
class SD_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("SlotNumber", c_uint8)
]
class BLUETOOTH_ADDRESS(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("Address", ARRAY(c_uint8, 6))
]
class BLUETOOTH_LE_ADDRESS(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("Format", c_uint8),
("Class", c_uint16)
]
class BLUETOOTH_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("BD_ADDR", BLUETOOTH_ADDRESS)
]
class WIFI_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("SSId", ARRAY(c_uint8, 32))
]
class EMMC_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("SlotNumber", c_uint8)
]
class BLUETOOTH_LE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("BD_ADDR", BLUETOOTH_LE_ADDRESS)
]
class NVDIMM_NAMESPACE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("Uuid", EFI_GUID)
]
class REST_SERVICE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("RESTService", c_uint8),
("AccessMode", c_uint8)
]
class REST_VENDOR_SERVICE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
("RESTService", c_uint8),
("AccessMode", c_uint8),
("Guid", EFI_GUID),
]
class HARDDRIVE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('PartitionNumber', c_uint32),
('PartitionStart', c_uint64),
('PartitionSize', c_uint64),
('Signature', ARRAY(c_uint8, 16)),
('MBRType', c_uint8),
('SignatureType', c_uint8)
]
class CDROM_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('BootEntry', c_uint32),
('PartitionStart', c_uint64),
('PartitionSize', c_uint64)
]
class MEDIA_PROTOCOL_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('Protocol', EFI_GUID)
]
class MEDIA_FW_VOL_FILEPATH_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('FvFileName', EFI_GUID)
]
class MEDIA_FW_VOL_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('FvName', EFI_GUID)
]
class MEDIA_RELATIVE_OFFSET_RANGE_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('Reserved', c_uint32),
('StartingOffset', c_uint64),
('EndingOffset', c_uint64)
]
class MEDIA_RAM_DISK_DEVICE_PATH(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Header', EFI_DEVICE_PATH),
('StartingAddr', c_uint64),
('EndingAddr', c_uint64),
('TypeGuid', EFI_GUID),
('Instance', c_uint16)
]
class EfiDevicePath:
'''
Parse EFI Device Paths based on the edk2 C Structures defined above.
In the context of this class verbose means hexdump extra data.
Attributes
??????
DevicePath : list
List of devixe path instances. Each instance is a list of nodes
for the given Device Path instance.
Methods
-----------
device_path_node(address)
return the Device Path ctype hdr, ctype, and any extra data in
the Device Path node. This is just a single Device Path node,
not the entire Device Path.
device_path_node_str(address)
return the device path node (not the entire Device Path) as a string
'''
DevicePath = []
device_path_dict = {
# ( Type, SubType ) : Device Path C typedef
# HARDWARE_DEVICE_PATH
(1, 1): PCI_DEVICE_PATH,
(1, 2): PCCARD_DEVICE_PATH,
(1, 3): MEMMAP_DEVICE_PATH,
(1, 4): VENDOR_DEVICE_PATH,
(1, 5): CONTROLLER_DEVICE_PATH,
(1, 6): BMC_DEVICE_PATH,
# ACPI_DEVICE_PATH
(2, 1): ACPI_HID_DEVICE_PATH,
(2, 2): ACPI_EXTENDED_HID_DEVICE_PATH,
(2, 3): ACPI_ADR_DEVICE_PATH,
(2, 4): ACPI_NVDIMM_DEVICE_PATH,
# MESSAGING_DEVICE_PATH
(3, 1): ATAPI_DEVICE_PATH,
(3, 2): SCSI_DEVICE_PATH,
(3, 3): FIBRECHANNEL_DEVICE_PATH,
(3, 4): F1394_DEVICE_PATH,
(3, 5): USB_DEVICE_PATH,
(3, 6): I2O_DEVICE_PATH,
(3, 9): INFINIBAND_DEVICE_PATH,
(3, 10): VENDOR_DEVICE_PATH,
(3, 11): MAC_ADDR_DEVICE_PATH,
(3, 12): IPv4_DEVICE_PATH,
(3, 13): IPv6_DEVICE_PATH,
(3, 14): UART_DEVICE_PATH,
(3, 15): USB_CLASS_DEVICE_PATH,
(3, 16): USB_WWID_DEVICE_PATH,
(3, 17): DEVICE_LOGICAL_UNIT_DEVICE_PATH,
(3, 18): SATA_DEVICE_PATH,
(3, 19): ISCSI_DEVICE_PATH,
(3, 20): VLAN_DEVICE_PATH,
(3, 21): FIBRECHANNELEX_DEVICE_PATH,
(3, 22): SASEX_DEVICE_PATH,
(3, 23): NVME_NAMESPACE_DEVICE_PATH,
(3, 24): DNS_DEVICE_PATH,
(3, 25): UFS_DEVICE_PATH,
(3, 26): SD_DEVICE_PATH,
(3, 27): BLUETOOTH_DEVICE_PATH,
(3, 28): WIFI_DEVICE_PATH,
(3, 29): EMMC_DEVICE_PATH,
(3, 30): BLUETOOTH_LE_DEVICE_PATH,
(3, 31): DNS_DEVICE_PATH,
(3, 32): NVDIMM_NAMESPACE_DEVICE_PATH,
(3, 33): REST_SERVICE_DEVICE_PATH,
(3, 34): REST_VENDOR_SERVICE_DEVICE_PATH,
# MEDIA_DEVICE_PATH
(4, 1): HARDDRIVE_DEVICE_PATH,
(4, 2): CDROM_DEVICE_PATH,
(4, 3): VENDOR_DEVICE_PATH,
(4, 4): EFI_DEVICE_PATH,
(4, 5): MEDIA_PROTOCOL_DEVICE_PATH,
(4, 6): MEDIA_FW_VOL_FILEPATH_DEVICE_PATH,
(4, 7): MEDIA_FW_VOL_DEVICE_PATH,
(4, 8): MEDIA_RELATIVE_OFFSET_RANGE_DEVICE_PATH,
(4, 9): MEDIA_RAM_DISK_DEVICE_PATH,
# BBS_DEVICE_PATH
(5, 1): BBS_BBS_DEVICE_PATH,
}
guid_override_dict = {
uuid.UUID('37499A9D-542F-4C89-A026-35DA142094E4'):
UART_FLOW_CONTROL_DEVICE_PATH,
uuid.UUID('D487DDB4-008B-11D9-AFDC-001083FFCA4D'):
SAS_DEVICE_PATH,
}
def __init__(self, file, ptr=None, verbose=False, count=64):
'''
Convert ptr into a list of Device Path nodes. If verbose also hexdump
extra data.
'''
self._file = file
self._verbose = verbose
if ptr is None:
return
try:
instance = []
for _ in range(count): # while True
hdr, _ = self._ctype_read_ex(EFI_DEVICE_PATH, ptr)
if hdr.Length < sizeof(EFI_DEVICE_PATH):
# Not a valid device path
break
if hdr.Type == 0x7F: # END_DEVICE_PATH_TYPE
self.DevicePath.append(instance)
if hdr.SubType == 0xFF: # END_ENTIRE_DEVICE_PATH_SUBTYPE
break
if hdr.SubType == 0x01: # END_INSTANCE_DEVICE_PATH_SUBTYPE
# start new device path instance
instance = []
type_str = self.device_path_dict.get(
(hdr.Type, hdr.SubType), EFI_DEVICE_PATH)
node, extra = self._ctype_read_ex(type_str, ptr, hdr.Length)
if 'VENDOR_DEVICE_PATH' in type(node).__name__:
guid_type = self.guid_override_dict.get(
GuidNames.to_uuid(node.Guid), None)
if guid_type:
# use the ctype associated with the GUID
node, extra = self._ctype_read_ex(
guid_type, ptr, hdr.Length)
instance.append((type(node).__name__, hdr.Type,
hdr.SubType, hdr.Length, node, extra))
ptr += hdr.Length
except ValueError:
pass
def __str__(self):
''' '''
if not self.valid():
return '<class: EfiDevicePath>'
result = ""
for instance in self.DevicePath:
for (Name, Type, SubType, Length, cnode, extra) in instance:
result += f'{Name:s} {Type:2d}:{SubType:2d} Len: {Length:3d}\n'
result += ctype_to_str(cnode, ' ', ['Reserved'])
if self._verbose:
if extra is not None:
result += hexdump(extra, ' ')
result += '\n'
return result
def valid(self):
return True if self.DevicePath else False
def device_path_node(self, address):
try:
hdr, _ = self._ctype_read_ex(EFI_DEVICE_PATH, address)
if hdr.Length < sizeof(EFI_DEVICE_PATH):
return None, None, None
type_str = self.device_path_dict.get(
(hdr.Type, hdr.SubType), EFI_DEVICE_PATH)
cnode, extra = self._ctype_read_ex(type_str, address, hdr.Length)
return hdr, cnode, extra
except ValueError:
return None, None, None
def device_path_node_str(self, address, verbose=False):
hdr, cnode, extra = self.device_path_node(address)
if hdr is None:
return ''
cname = type(cnode).__name__
result = f'{cname:s} {hdr.Type:2d}:{hdr.SubType:2d} '
result += f'Len: 0x{hdr.Length:03x}\n'
result += ctype_to_str(cnode, ' ', ['Reserved'])
if verbose:
if extra is not None:
result += hexdump(extra, ' ')
return result
def _ctype_read_ex(self, ctype_struct, offset=0, rsize=None):
if offset != 0:
self._file.seek(offset)
type_size = sizeof(ctype_struct)
size = rsize if rsize else type_size
data = self._file.read(size)
if data is None:
return None, None
cdata = ctype_struct.from_buffer(bytearray(data))
if size > type_size:
return cdata, data[type_size:]
else:
return cdata, None
class EfiConfigurationTable:
'''
A class to abstract EFI Configuration Tables from gST->ConfigurationTable
and gST->NumberOfTableEntries. Pass in the gST pointer from EFI,
likely you need to look up this address after you have loaded symbols
Attributes
??????
ConfigurationTableDict : dictionary
dictionary of EFI Configuration Table entries
Methods
-----------
GetConfigTable(uuid)
pass in VendorGuid and return VendorTable from EFI System Table
DebugImageInfo(table)
return tuple of load address and size of PE/COFF images
'''
ConfigurationTableDict = {}
def __init__(self, file, gST_addr=None):
self._file = file
if gST_addr is None:
# ToDo add code to search for gST via EFI_SYSTEM_TABLE_POINTER
return
gST = self._ctype_read(EFI_SYSTEM_TABLE, gST_addr)
self.read_efi_config_table(gST.NumberOfTableEntries,
gST.ConfigurationTable,
self._ctype_read)
@ classmethod
def __str__(cls):
'''return EFI_CONFIGURATION_TABLE entries as a string'''
result = ""
for key, value in cls.ConfigurationTableDict.items():
result += f'{GuidNames().to_name(key):>37s}: '
result += f'VendorTable = 0x{value:08x}\n'
return result
def _ctype_read(self, ctype_struct, offset=0):
'''ctype worker function to read data'''
if offset != 0:
self._file.seek(offset)
data = self._file.read(sizeof(ctype_struct))
return ctype_struct.from_buffer(bytearray(data))
@ classmethod
def read_efi_config_table(cls, table_cnt, table_ptr, ctype_read):
'''Create a dictionary of EFI Configuration table entries'''
EmptryTables = EFI_CONFIGURATION_TABLE * table_cnt
Tables = ctype_read(EmptryTables, table_ptr)
for i in range(table_cnt):
cls.ConfigurationTableDict[str(GuidNames.to_uuid(
Tables[i].VendorGuid)).upper()] = Tables[i].VendorTable
return cls.ConfigurationTableDict
def GetConfigTable(self, uuid):
''' Return VendorTable for VendorGuid (uuid.UUID) or None'''
return self.ConfigurationTableDict.get(uuid.upper())
def DebugImageInfo(self, table=None):
'''
Walk the debug image info table to find the LoadedImage protocols
for all the loaded PE/COFF images and return a list of load address
and image size.
'''
ImageLoad = []
if table is None:
table = self.GetConfigTable('49152e77-1ada-4764-b7a2-7afefed95e8b')
DbgInfoHdr = self._ctype_read(EFI_DEBUG_IMAGE_INFO_TABLE_HEADER, table)
NormalImageArray = EFI_DEBUG_IMAGE_INFO * DbgInfoHdr.TableSize
NormalImageArray = self._ctype_read(
NormalImageArray, DbgInfoHdr.EfiDebugImageInfoTable)
for i in range(DbgInfoHdr.TableSize):
ImageInfo = self._ctype_read(
EFI_DEBUG_IMAGE_INFO_NORMAL, NormalImageArray[i].NormalImage)
LoadedImage = self._ctype_read(
EFI_LOADED_IMAGE_PROTOCOL,
ImageInfo.LoadedImageProtocolInstance)
ImageLoad.append((LoadedImage.ImageBase, LoadedImage.ImageSize))
return ImageLoad
class PeTeImage:
'''
A class to abstract PE/COFF or TE image processing via passing in a
Python file like object. If you pass in an address the PE/COFF is parsed,
if you pass in NULL for an address then you get a class instance you can
use to search memory for a PE/COFF hader given a pc value.
Attributes
??????
LoadAddress : int
Load address of the PE/COFF image
AddressOfEntryPoint : int
Address of the Entry point of the PE/COFF image
TextAddress : int
Start of the PE/COFF text section
DataAddress : int
Start of the PE/COFF data section
CodeViewPdb : str
File name of the symbols file
CodeViewUuid : uuid:UUID
GUID for "RSDS" Debug Directory entry, or Mach-O UUID for "MTOC"
Methods
-----------
pcToPeCoff(address, step, max_range, rom_range)
Given an address(pc) find the PE/COFF image it is in
sections_to_str()
return a string giving info for all the PE/COFF sections
'''
def __init__(self, file, address=0):
self._file = file
# book keeping, but public
self.PeHdr = None
self.TeHdr = None
self.Machine = None
self.Subsystem = None
self.CodeViewSig = None
self.e_lfanew = 0
self.NumberOfSections = 0
self.Sections = None
# Things debuggers may want to know
self.LoadAddress = 0 if address is None else address
self.EndLoadAddress = 0
self.AddressOfEntryPoint = 0
self.TextAddress = 0
self.DataAddress = 0
self.CodeViewPdb = None
self.CodeViewUuid = None
self.TeAdjust = 0
self.dir_name = {
0: 'Export Table',
1: 'Import Table',
2: 'Resource Table',
3: 'Exception Table',
4: 'Certificate Table',
5: 'Relocation Table',
6: 'Debug',
7: 'Architecture',
8: 'Global Ptr',
9: 'TLS Table',
10: 'Load Config Table',
11: 'Bound Import',
12: 'IAT',
13: 'Delay Import Descriptor',
14: 'CLR Runtime Header',
15: 'Reserved',
}
if address is not None:
if self.maybe():
self.parse()
def __str__(self):
if self.PeHdr is None and self.TeHdr is None:
# no PE/COFF header found
return "<class: PeTeImage>"
if self.CodeViewPdb:
pdb = f'{self.Machine}`{self.CodeViewPdb}'
else:
pdb = 'No Debug Info:'
if self.CodeViewUuid:
guid = f'{self.CodeViewUuid}:'
else:
guid = ''
slide = f'slide = {self.TeAdjust:d} ' if self.TeAdjust != 0 else ' '
res = guid + f'{pdb} load = 0x{self.LoadAddress:08x} ' + slide
return res
def _seek(self, offset):
"""
seek() relative to start of PE/COFF (TE) image
"""
self._file.seek(self.LoadAddress + offset)
def _read_offset(self, size, offset=None):
"""
read() relative to start of PE/COFF (TE) image
if offset is not None then seek() before the read
"""
if offset is not None:
self._seek(offset)
return self._file.read(size)
def _read_ctype(self, ctype_struct, offset=None):
data = self._read_offset(sizeof(ctype_struct), offset)
return ctype_struct.from_buffer(bytearray(data), 0)
def _unsigned(self, i):
"""return a 32-bit unsigned int (UINT32) """
return int.from_bytes(i, byteorder='little', signed=False)
def pcToPeCoff(self,
address,
step=None,
max_range=None,
rom_range=[0xFE800000, 0xFFFFFFFF]):
"""
Given an address search backwards for PE/COFF (TE) header
For DXE 4K is probably OK
For PEI you might have to search every 4 bytes.
"""
if step is None:
step = 0x1000
if max_range is None:
max_range = 0x200000
if address in range(*rom_range):
# The XIP code in the ROM ends up 4 byte aligned.
step = 4
max_range = min(max_range, 0x100000)
# Align address to page boundary for memory image search.
address = address & ~(step-1)
# Search every step backward
offset_range = list(range(0, min(max_range, address), step))
for offset in offset_range:
if self.maybe(address - offset):
if self.parse():
return True
return False
def maybe(self, offset=None):
"""Probe to see if this offset is likely a PE/COFF or TE file """
self.LoadAddress = 0
e_magic = self._read_offset(2, offset)
header_ok = e_magic == b'MZ' or e_magic == b'VZ'
if offset is not None and header_ok:
self.LoadAddress = offset
return header_ok
def parse(self):
"""Parse PE/COFF (TE) debug directory entry """
DosHdr = self._read_ctype(EFI_IMAGE_DOS_HEADER, 0)
if DosHdr.e_magic == self._unsigned(b'VZ'):
# TE image
self.TeHdr = self._read_ctype(EFI_TE_IMAGE_HEADER, 0)
self.TeAdjust = sizeof(self.TeHdr) - self.TeHdr.StrippedSize
self.Machine = image_machine_dict.get(self.TeHdr.Machine, None)
self.Subsystem = self.TeHdr.Subsystem
self.AddressOfEntryPoint = self.TeHdr.AddressOfEntryPoint
debug_dir_size = self.TeHdr.DataDirectoryDebug.Size
debug_dir_offset = (self.TeAdjust +
self.TeHdr.DataDirectoryDebug.VirtualAddress)
else:
if DosHdr.e_magic == self._unsigned(b'MZ'):
self.e_lfanew = DosHdr.e_lfanew
else:
self.e_lfanew = 0
self.PeHdr = self._read_ctype(
EFI_IMAGE_NT_HEADERS64, self.e_lfanew)
if self.PeHdr.Signature != self._unsigned(b'PE\0\0'):
return False
if self.PeHdr.OptionalHeader.Magic == \
EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC:
self.PeHdr = self._read_ctype(
EFI_IMAGE_NT_HEADERS32, self.e_lfanew)
if self.PeHdr.OptionalHeader.NumberOfRvaAndSizes <= \
DIRECTORY_DEBUG:
return False
self.Machine = image_machine_dict.get(
self.PeHdr.FileHeader.Machine, None)
self.Subsystem = self.PeHdr.OptionalHeader.Subsystem
self.AddressOfEntryPoint = \
self.PeHdr.OptionalHeader.AddressOfEntryPoint
self.TeAdjust = 0
debug_dir_size = self.PeHdr.OptionalHeader.DataDirectory[
DIRECTORY_DEBUG].Size
debug_dir_offset = self.PeHdr.OptionalHeader.DataDirectory[
DIRECTORY_DEBUG].VirtualAddress
if self.Machine is None or self.Subsystem not in [0, 10, 11, 12]:
return False
self.AddressOfEntryPoint += self.LoadAddress
self.sections()
return self.processDebugDirEntry(debug_dir_offset, debug_dir_size)
def sections(self):
'''Parse the PE/COFF (TE) section table'''
if self.Sections is not None:
return
elif self.TeHdr is not None:
self.NumberOfSections = self.TeHdr.NumberOfSections
offset = sizeof(EFI_TE_IMAGE_HEADER)
elif self.PeHdr is not None:
self.NumberOfSections = self.PeHdr.FileHeader.NumberOfSections
offset = sizeof(c_uint32) + \
sizeof(EFI_IMAGE_FILE_HEADER)
offset += self.PeHdr.FileHeader.SizeOfOptionalHeader
offset += self.e_lfanew
else:
return
self.Sections = EFI_IMAGE_SECTION_HEADER * self.NumberOfSections
self.Sections = self._read_ctype(self.Sections, offset)
for i in range(self.NumberOfSections):
name = str(self.Sections[i].Name, 'ascii', 'ignore')
addr = self.Sections[i].VirtualAddress
addr += self.LoadAddress + self.TeAdjust
if name == '.text':
self.TextAddress = addr
elif name == '.data':
self.DataAddress = addr
end_addr = addr + self.Sections[i].VirtualSize - 1
if end_addr > self.EndLoadAddress:
self.EndLoadAddress = end_addr
def sections_to_str(self):
# return text summary of sections
# name virt addr (virt size) flags:Characteristics
result = ''
for i in range(self.NumberOfSections):
name = str(self.Sections[i].Name, 'ascii', 'ignore')
result += f'{name:8s} '
result += f'0x{self.Sections[i].VirtualAddress:08X} '
result += f'(0x{self.Sections[i].VirtualSize:05X}) '
result += f'flags:0x{self.Sections[i].Characteristics:08X}\n'
return result
def directory_to_str(self):
result = ''
if self.TeHdr:
debug_size = self.TeHdr.DataDirectoryDebug.Size
if debug_size > 0:
debug_offset = (self.TeAdjust
+ self.TeHdr.DataDirectoryDebug.VirtualAddress)
result += f"Debug 0x{debug_offset:08X} 0x{debug_size}\n"
relocation_size = self.TeHdr.DataDirectoryBaseReloc.Size
if relocation_size > 0:
relocation_offset = (
self.TeAdjust
+ self.TeHdr.DataDirectoryBaseReloc.VirtualAddress)
result += f'Relocation 0x{relocation_offset:08X} '
result += f' 0x{relocation_size}\n'
elif self.PeHdr:
for i in range(self.PeHdr.OptionalHeader.NumberOfRvaAndSizes):
size = self.PeHdr.OptionalHeader.DataDirectory[i].Size
if size == 0:
continue
virt_addr = self.PeHdr.OptionalHeader.DataDirectory[
i].VirtualAddress
name = self.dir_name.get(i, '?')
result += f'{name:s} 0x{virt_addr:08X} 0x{size:X}\n'
return result
def processDebugDirEntry(self, virt_address, virt_size):
"""Process PE/COFF Debug Directory Entry"""
if (virt_address == 0 or
virt_size < sizeof(EFI_IMAGE_DEBUG_DIRECTORY_ENTRY)):
return False
data = bytearray(self._read_offset(virt_size, virt_address))
for offset in range(0,
virt_size,
sizeof(EFI_IMAGE_DEBUG_DIRECTORY_ENTRY)):
DirectoryEntry = EFI_IMAGE_DEBUG_DIRECTORY_ENTRY.from_buffer(
data[offset:])
if DirectoryEntry.Type != 2:
continue
entry = self._read_offset(
DirectoryEntry.SizeOfData, DirectoryEntry.RVA + self.TeAdjust)
self.CodeViewSig = entry[:4]
if self.CodeViewSig == b'MTOC':
self.CodeViewUuid = uuid.UUID(bytes_le=entry[4:4+16])
PdbOffset = 20
elif self.CodeViewSig == b'RSDS':
self.CodeViewUuid = uuid.UUID(bytes_le=entry[4:4+16])
PdbOffset = 24
elif self.CodeViewSig == b'NB10':
PdbOffset = 16
else:
continue
# can't find documentation about Pdb string encoding?
# guessing utf-8 since that will match file systems in macOS
# and Linux Windows is UTF-16, or ANSI adjusted for local.
# We might need a different value for Windows here?
self.CodeViewPdb = entry[PdbOffset:].split(b'\x00')[
0].decode('utf-8')
return True
return False
def main():
'''Process arguments as PE/COFF files'''
for fname in sys.argv[1:]:
with open(fname, 'rb') as f:
image = PeTeImage(f)
print(image)
res = f'EntryPoint = 0x{image.AddressOfEntryPoint:08x} '
res += f'TextAddress = 0x{image.TextAddress:08x} '
res += f'DataAddress = 0x{image.DataAddress:08x}'
print(res)
print(image.sections_to_str())
print('Data Directories:')
print(image.directory_to_str())
if __name__ == "__main__":
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/efi_debugging.py
|
## @file
# Retrieves the people to request review from on submission of a commit.
#
# Copyright (c) 2019, Linaro Ltd. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
from collections import defaultdict
from collections import OrderedDict
import argparse
import os
import re
import SetupGit
EXPRESSIONS = {
'exclude': re.compile(r'^X:\s*(?P<exclude>.*?)\r*$'),
'file': re.compile(r'^F:\s*(?P<file>.*?)\r*$'),
'list': re.compile(r'^L:\s*(?P<list>.*?)\r*$'),
'maintainer': re.compile(r'^M:\s*(?P<maintainer>.*?)\r*$'),
'reviewer': re.compile(r'^R:\s*(?P<reviewer>.*?)\r*$'),
'status': re.compile(r'^S:\s*(?P<status>.*?)\r*$'),
'tree': re.compile(r'^T:\s*(?P<tree>.*?)\r*$'),
'webpage': re.compile(r'^W:\s*(?P<webpage>.*?)\r*$')
}
def printsection(section):
"""Prints out the dictionary describing a Maintainers.txt section."""
print('===')
for key in section.keys():
print("Key: %s" % key)
for item in section[key]:
print(' %s' % item)
def pattern_to_regex(pattern):
"""Takes a string containing regular UNIX path wildcards
and returns a string suitable for matching with regex."""
pattern = pattern.replace('.', r'\.')
pattern = pattern.replace('?', r'.')
pattern = pattern.replace('*', r'.*')
if pattern.endswith('/'):
pattern += r'.*'
elif pattern.endswith('.*'):
pattern = pattern[:-2]
pattern += r'(?!.*?/.*?)'
return pattern
def path_in_section(path, section):
"""Returns True of False indicating whether the path is covered by
the current section."""
if not 'file' in section:
return False
for pattern in section['file']:
regex = pattern_to_regex(pattern)
match = re.match(regex, path)
if match:
# Check if there is an exclude pattern that applies
for pattern in section['exclude']:
regex = pattern_to_regex(pattern)
match = re.match(regex, path)
if match:
return False
return True
return False
def get_section_maintainers(path, section):
"""Returns a list with email addresses to any M: and R: entries
matching the provided path in the provided section."""
maintainers = []
lists = []
nowarn_status = ['Supported', 'Maintained']
if path_in_section(path, section):
for status in section['status']:
if status not in nowarn_status:
print('WARNING: Maintained status for "%s" is \'%s\'!' % (path, status))
for address in section['maintainer'], section['reviewer']:
# Convert to list if necessary
if isinstance(address, list):
maintainers += address
else:
lists += [address]
for address in section['list']:
# Convert to list if necessary
if isinstance(address, list):
lists += address
else:
lists += [address]
return maintainers, lists
def get_maintainers(path, sections, level=0):
"""For 'path', iterates over all sections, returning maintainers
for matching ones."""
maintainers = []
lists = []
for section in sections:
tmp_maint, tmp_lists = get_section_maintainers(path, section)
if tmp_maint:
maintainers += tmp_maint
if tmp_lists:
lists += tmp_lists
if not maintainers:
# If no match found, look for match for (nonexistent) file
# REPO.working_dir/<default>
print('"%s": no maintainers found, looking for default' % path)
if level == 0:
maintainers = get_maintainers('<default>', sections, level=level + 1)
else:
print("No <default> maintainers set for project.")
if not maintainers:
return None
return maintainers + lists
def parse_maintainers_line(line):
"""Parse one line of Maintainers.txt, returning any match group and its key."""
for key, expression in EXPRESSIONS.items():
match = expression.match(line)
if match:
return key, match.group(key)
return None, None
def parse_maintainers_file(filename):
"""Parse the Maintainers.txt from top-level of repo and
return a list containing dictionaries of all sections."""
with open(filename, 'r') as text:
line = text.readline()
sectionlist = []
section = defaultdict(list)
while line:
key, value = parse_maintainers_line(line)
if key and value:
section[key].append(value)
line = text.readline()
# If end of section (end of file, or non-tag line encountered)...
if not key or not value or not line:
# ...if non-empty, append section to list.
if section:
sectionlist.append(section.copy())
section.clear()
return sectionlist
def get_modified_files(repo, args):
"""Returns a list of the files modified by the commit specified in 'args'."""
commit = repo.commit(args.commit)
return commit.stats.files
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Retrieves information on who to cc for review on a given commit')
PARSER.add_argument('commit',
action="store",
help='git revision to examine (default: HEAD)',
nargs='?',
default='HEAD')
PARSER.add_argument('-l', '--lookup',
help='Find section matches for path LOOKUP',
required=False)
ARGS = PARSER.parse_args()
REPO = SetupGit.locate_repo()
CONFIG_FILE = os.path.join(REPO.working_dir, 'Maintainers.txt')
SECTIONS = parse_maintainers_file(CONFIG_FILE)
if ARGS.lookup:
FILES = [ARGS.lookup.replace('\\','/')]
else:
FILES = get_modified_files(REPO, ARGS)
ADDRESSES = []
for file in FILES:
print(file)
addresslist = get_maintainers(file, SECTIONS)
if addresslist:
ADDRESSES += addresslist
for address in list(OrderedDict.fromkeys(ADDRESSES)):
if '<' in address and '>' in address:
address = address.split('>', 1)[0] + '>'
print(' %s' % address)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/GetMaintainer.py
|
#!/usr/bin/python3
'''
Copyright (c) Apple Inc. 2021
SPDX-License-Identifier: BSD-2-Clause-Patent
Example usage:
OvmfPkg/build.sh qemu -gdb tcp::9000
lldb -o "gdb-remote localhost:9000" -o "command script import efi_lldb.py"
'''
import optparse
import shlex
import subprocess
import uuid
import sys
import os
from pathlib import Path
from efi_debugging import EfiDevicePath, EfiConfigurationTable, EfiTpl
from efi_debugging import EfiHob, GuidNames, EfiStatusClass, EfiBootMode
from efi_debugging import PeTeImage, patch_ctypes
try:
# Just try for LLDB in case PYTHONPATH is already correctly setup
import lldb
except ImportError:
try:
env = os.environ.copy()
env['LLDB_DEFAULT_PYTHON_VERSION'] = str(sys.version_info.major)
lldb_python_path = subprocess.check_output(
["xcrun", "lldb", "-P"], env=env).decode("utf-8").strip()
sys.path.append(lldb_python_path)
import lldb
except ValueError:
print("Couldn't find LLDB.framework from lldb -P")
print("PYTHONPATH should match the currently selected lldb")
sys.exit(-1)
class LldbFileObject(object):
'''
Class that fakes out file object to abstract lldb from the generic code.
For lldb this is memory so we don't have a concept of the end of the file.
'''
def __init__(self, process):
# _exe_ctx is lldb.SBExecutionContext
self._process = process
self._offset = 0
self._SBError = lldb.SBError()
def tell(self):
return self._offset
def read(self, size=-1):
if size == -1:
# arbitrary default size
size = 0x1000000
data = self._process.ReadMemory(self._offset, size, self._SBError)
if self._SBError.fail:
raise MemoryError(
f'lldb could not read memory 0x{size:x} '
f' bytes from 0x{self._offset:08x}')
else:
return data
def readable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
self._offset = offset
elif whence == 1:
self._offset += offset
else:
# whence == 2 is seek from end
raise NotImplementedError
def seekable(self):
return True
def write(self, data):
result = self._process.WriteMemory(self._offset, data, self._SBError)
if self._SBError.fail:
raise MemoryError(
f'lldb could not write memory to 0x{self._offset:08x}')
return result
def writable(self):
return True
def truncate(self, size=None):
raise NotImplementedError
def flush(self):
raise NotImplementedError
def fileno(self):
raise NotImplementedError
class EfiSymbols:
"""
Class to manage EFI Symbols
You need to pass file, and exe_ctx to load symbols.
You can print(EfiSymbols()) to see the currently loaded symbols
"""
loaded = {}
stride = None
range = None
verbose = False
def __init__(self, target=None):
if target:
EfiSymbols.target = target
EfiSymbols._file = LldbFileObject(target.process)
@ classmethod
def __str__(cls):
return ''.join(f'{pecoff}\n' for (pecoff, _) in cls.loaded.values())
@ classmethod
def configure_search(cls, stride, range, verbose=False):
cls.stride = stride
cls.range = range
cls.verbose = verbose
@ classmethod
def clear(cls):
cls.loaded = {}
@ classmethod
def add_symbols_for_pecoff(cls, pecoff):
'''Tell lldb the location of the .text and .data sections.'''
if pecoff.LoadAddress in cls.loaded:
return 'Already Loaded: '
module = cls.target.AddModule(None, None, str(pecoff.CodeViewUuid))
if not module:
module = cls.target.AddModule(pecoff.CodeViewPdb,
None,
str(pecoff.CodeViewUuid))
if module.IsValid():
SBError = cls.target.SetModuleLoadAddress(
module, pecoff.LoadAddress + pecoff.TeAdjust)
if SBError.success:
cls.loaded[pecoff.LoadAddress] = (pecoff, module)
return ''
return 'Symbols NOT FOUND: '
@ classmethod
def address_to_symbols(cls, address, reprobe=False):
'''
Given an address search backwards for a PE/COFF (or TE) header
and load symbols. Return a status string.
'''
if not isinstance(address, int):
address = int(address)
pecoff, _ = cls.address_in_loaded_pecoff(address)
if not reprobe and pecoff is not None:
# skip the probe of the remote
return f'{pecoff} is already loaded'
pecoff = PeTeImage(cls._file, None)
if pecoff.pcToPeCoff(address, cls.stride, cls.range):
res = cls.add_symbols_for_pecoff(pecoff)
return f'{res}{pecoff}'
else:
return f'0x{address:08x} not in a PE/COFF (or TE) image'
@ classmethod
def address_in_loaded_pecoff(cls, address):
if not isinstance(address, int):
address = int(address)
for (pecoff, module) in cls.loaded.values():
if (address >= pecoff.LoadAddress and
address <= pecoff.EndLoadAddress):
return pecoff, module
return None, None
@ classmethod
def unload_symbols(cls, address):
pecoff, module = cls.address_in_loaded_pecoff(address)
if module:
name = str(module)
cls.target.ClearModuleLoadAddress(module)
cls.target.RemoveModule(module)
del cls.loaded[pecoff.LoadAddress]
return f'{name:s} was unloaded'
return f'0x{address:x} was not in a loaded image'
def arg_to_address(frame, arg):
''' convert an lldb command arg into a memory address (addr_t)'''
if arg is None:
return None
arg_str = arg if isinstance(arg, str) else str(arg)
SBValue = frame.EvaluateExpression(arg_str)
if SBValue.error.fail:
return arg
if (SBValue.TypeIsPointerType() or
SBValue.value_type == lldb.eValueTypeRegister or
SBValue.value_type == lldb.eValueTypeRegisterSet or
SBValue.value_type == lldb.eValueTypeConstResult):
try:
addr = SBValue.GetValueAsAddress()
except ValueError:
addr = SBValue.unsigned
else:
try:
addr = SBValue.address_of.GetValueAsAddress()
except ValueError:
addr = SBValue.address_of.unsigned
return addr
def arg_to_data(frame, arg):
''' convert an lldb command arg into a data vale (uint32_t/uint64_t)'''
if not isinstance(arg, str):
arg_str = str(str)
SBValue = frame.EvaluateExpression(arg_str)
return SBValue.unsigned
class EfiDevicePathCommand:
def create_options(self):
''' standard lldb command help/options parser'''
usage = "usage: %prog [options]"
description = '''Command that can EFI Config Tables
'''
# Pass add_help_option = False, since this keeps the command in line
# with lldb commands, and we wire up "help command" to work by
# providing the long & short help methods below.
self.parser = optparse.OptionParser(
description=description,
prog='devicepath',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='hex dump extra data',
default=False)
self.parser.add_option(
'-n',
'--node',
action='store_true',
dest='node',
help='dump a single device path node',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
def get_short_help(self):
'''standard lldb function method'''
return "Display EFI Tables"
def get_long_help(self):
'''standard lldb function method'''
return self.help_string
def __init__(self, debugger, internal_dict):
'''standard lldb function method'''
self.create_options()
self.help_string = self.parser.format_help()
def __call__(self, debugger, command, exe_ctx, result):
'''standard lldb function method'''
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, args) = self.parser.parse_args(command_args)
dev_list = []
for arg in args:
dev_list.append(arg_to_address(exe_ctx.frame, arg))
except ValueError:
# if you don't handle exceptions, passing an incorrect argument
# to the OptionParser will cause LLDB to exit (courtesy of
# OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
if options.help:
self.parser.print_help()
return
file = LldbFileObject(exe_ctx.process)
for dev_addr in dev_list:
if options.node:
print(EfiDevicePath(file).device_path_node_str(
dev_addr, options.verbose))
else:
device_path = EfiDevicePath(file, dev_addr, options.verbose)
if device_path.valid():
print(device_path)
class EfiHobCommand:
def create_options(self):
''' standard lldb command help/options parser'''
usage = "usage: %prog [options]"
description = '''Command that can EFI dump EFI HOBs'''
# Pass add_help_option = False, since this keeps the command in line
# with lldb commands, and we wire up "help command" to work by
# providing the long & short help methods below.
self.parser = optparse.OptionParser(
description=description,
prog='table',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-a',
'--address',
type="int",
dest='address',
help='Parse HOBs from address',
default=None)
self.parser.add_option(
'-t',
'--type',
type="int",
dest='type',
help='Only dump HOBS of his type',
default=None)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='hex dump extra data',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
def get_short_help(self):
'''standard lldb function method'''
return "Display EFI Hobs"
def get_long_help(self):
'''standard lldb function method'''
return self.help_string
def __init__(self, debugger, internal_dict):
'''standard lldb function method'''
self.create_options()
self.help_string = self.parser.format_help()
def __call__(self, debugger, command, exe_ctx, result):
'''standard lldb function method'''
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, _) = self.parser.parse_args(command_args)
except ValueError:
# if you don't handle exceptions, passing an incorrect argument
# to the OptionParser will cause LLDB to exit (courtesy of
# OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
if options.help:
self.parser.print_help()
return
address = arg_to_address(exe_ctx.frame, options.address)
file = LldbFileObject(exe_ctx.process)
hob = EfiHob(file, address, options.verbose).get_hob_by_type(
options.type)
print(hob)
class EfiTableCommand:
def create_options(self):
''' standard lldb command help/options parser'''
usage = "usage: %prog [options]"
description = '''Command that can display EFI Config Tables
'''
# Pass add_help_option = False, since this keeps the command in line
# with lldb commands, and we wire up "help command" to work by
# providing the long & short help methods below.
self.parser = optparse.OptionParser(
description=description,
prog='table',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
def get_short_help(self):
'''standard lldb function method'''
return "Display EFI Tables"
def get_long_help(self):
'''standard lldb function method'''
return self.help_string
def __init__(self, debugger, internal_dict):
'''standard lldb function method'''
self.create_options()
self.help_string = self.parser.format_help()
def __call__(self, debugger, command, exe_ctx, result):
'''standard lldb function method'''
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, _) = self.parser.parse_args(command_args)
except ValueError:
# if you don't handle exceptions, passing an incorrect argument
# to the OptionParser will cause LLDB to exit (courtesy of
# OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
if options.help:
self.parser.print_help()
return
gST = exe_ctx.target.FindFirstGlobalVariable('gST')
if gST.error.fail:
print('Error: This command requires symbols for gST to be loaded')
return
file = LldbFileObject(exe_ctx.process)
table = EfiConfigurationTable(file, gST.unsigned)
if table:
print(table, '\n')
class EfiGuidCommand:
def create_options(self):
''' standard lldb command help/options parser'''
usage = "usage: %prog [options]"
description = '''
Command that can display all EFI GUID's or give info on a
specific GUID's
'''
self.parser = optparse.OptionParser(
description=description,
prog='guid',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-n',
'--new',
action='store_true',
dest='new',
help='Generate a new GUID',
default=False)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='Also display GUID C structure values',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
def get_short_help(self):
'''standard lldb function method'''
return "Display EFI GUID's"
def get_long_help(self):
'''standard lldb function method'''
return self.help_string
def __init__(self, debugger, internal_dict):
'''standard lldb function method'''
self.create_options()
self.help_string = self.parser.format_help()
def __call__(self, debugger, command, exe_ctx, result):
'''standard lldb function method'''
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, args) = self.parser.parse_args(command_args)
if len(args) >= 1:
# guid { 0x414e6bdd, 0xe47b, 0x47cc,
# { 0xb2, 0x44, 0xbb, 0x61, 0x02, 0x0c,0xf5, 0x16 }}
# this generates multiple args
arg = ' '.join(args)
except ValueError:
# if you don't handle exceptions, passing an incorrect argument
# to the OptionParser will cause LLDB to exit (courtesy of
# OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
if options.help:
self.parser.print_help()
return
if options.new:
guid = uuid.uuid4()
print(str(guid).upper())
print(GuidNames.to_c_guid(guid))
return
if len(args) > 0:
if GuidNames.is_guid_str(arg):
# guid 05AD34BA-6F02-4214-952E-4DA0398E2BB9
key = arg.lower()
name = GuidNames.to_name(key)
elif GuidNames.is_c_guid(arg):
# guid { 0x414e6bdd, 0xe47b, 0x47cc,
# { 0xb2, 0x44, 0xbb, 0x61, 0x02, 0x0c,0xf5, 0x16 }}
key = GuidNames.from_c_guid(arg)
name = GuidNames.to_name(key)
else:
# guid gEfiDxeServicesTableGuid
name = arg
try:
key = GuidNames.to_guid(name)
name = GuidNames.to_name(key)
except ValueError:
return
extra = f'{GuidNames.to_c_guid(key)}: ' if options.verbose else ''
print(f'{key}: {extra}{name}')
else:
for key, value in GuidNames._dict_.items():
if options.verbose:
extra = f'{GuidNames.to_c_guid(key)}: '
else:
extra = ''
print(f'{key}: {extra}{value}')
class EfiSymbolicateCommand(object):
'''Class to abstract an lldb command'''
def create_options(self):
''' standard lldb command help/options parser'''
usage = "usage: %prog [options]"
description = '''Command that can load EFI PE/COFF and TE image
symbols. If you are having trouble in PEI try adding --pei.
'''
# Pass add_help_option = False, since this keeps the command in line
# with lldb commands, and we wire up "help command" to work by
# providing the long & short help methods below.
self.parser = optparse.OptionParser(
description=description,
prog='efi_symbols',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-a',
'--address',
type="int",
dest='address',
help='Load symbols for image at address',
default=None)
self.parser.add_option(
'-f',
'--frame',
action='store_true',
dest='frame',
help='Load symbols for current stack frame',
default=False)
self.parser.add_option(
'-p',
'--pc',
action='store_true',
dest='pc',
help='Load symbols for pc',
default=False)
self.parser.add_option(
'--pei',
action='store_true',
dest='pei',
help='Load symbols for PEI (searches every 4 bytes)',
default=False)
self.parser.add_option(
'-e',
'--extended',
action='store_true',
dest='extended',
help='Try to load all symbols based on config tables.',
default=False)
self.parser.add_option(
'-r',
'--range',
type="long",
dest='range',
help='How far to search backward for start of PE/COFF Image',
default=None)
self.parser.add_option(
'-s',
'--stride',
type="long",
dest='stride',
help='Boundary to search for PE/COFF header',
default=None)
self.parser.add_option(
'-t',
'--thread',
action='store_true',
dest='thread',
help='Load symbols for the frames of all threads',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
def get_short_help(self):
'''standard lldb function method'''
return (
"Load symbols based on an address that is part of"
" a PE/COFF EFI image.")
def get_long_help(self):
'''standard lldb function method'''
return self.help_string
def __init__(self, debugger, unused):
'''standard lldb function method'''
self.create_options()
self.help_string = self.parser.format_help()
def lldb_print(self, lldb_str):
# capture command out like an lldb command
self.result.PutCString(lldb_str)
# flush the output right away
self.result.SetImmediateOutputFile(
self.exe_ctx.target.debugger.GetOutputFile())
def __call__(self, debugger, command, exe_ctx, result):
'''standard lldb function method'''
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
try:
(options, _) = self.parser.parse_args(command_args)
except ValueError:
# if you don't handle exceptions, passing an incorrect argument
# to the OptionParser will cause LLDB to exit (courtesy of
# OptParse dealing with argument errors by throwing SystemExit)
result.SetError("option parsing failed")
return
if options.help:
self.parser.print_help()
return
file = LldbFileObject(exe_ctx.process)
efi_symbols = EfiSymbols(exe_ctx.target)
self.result = result
self.exe_ctx = exe_ctx
if options.pei:
# XIP code ends up on a 4 byte boundary.
options.stride = 4
options.range = 0x100000
efi_symbols.configure_search(options.stride, options.range)
if not options.pc and options.address is None:
# default to
options.frame = True
if options.frame:
if not exe_ctx.frame.IsValid():
result.SetError("invalid frame")
return
threads = exe_ctx.process.threads if options.thread else [
exe_ctx.thread]
for thread in threads:
for frame in thread:
res = efi_symbols.address_to_symbols(frame.pc)
self.lldb_print(res)
else:
if options.address is not None:
address = options.address
elif options.pc:
try:
address = exe_ctx.thread.GetSelectedFrame().pc
except ValueError:
result.SetError("invalid pc")
return
else:
address = 0
res = efi_symbols.address_to_symbols(address.pc)
print(res)
if options.extended:
gST = exe_ctx.target.FindFirstGlobalVariable('gST')
if gST.error.fail:
print('Error: This command requires symbols to be loaded')
else:
table = EfiConfigurationTable(file, gST.unsigned)
for address, _ in table.DebugImageInfo():
res = efi_symbols.address_to_symbols(address)
self.lldb_print(res)
# keep trying module file names until we find a GUID xref file
for m in exe_ctx.target.modules:
if GuidNames.add_build_guid_file(str(m.file)):
break
def CHAR16_TypeSummary(valobj, internal_dict):
'''
Display CHAR16 as a String in the debugger.
Note: utf-8 is returned as that is the value for the debugger.
'''
SBError = lldb.SBError()
Str = ''
if valobj.TypeIsPointerType():
if valobj.GetValueAsUnsigned() == 0:
return "NULL"
# CHAR16 * max string size 1024
for i in range(1024):
Char = valobj.GetPointeeData(i, 1).GetUnsignedInt16(SBError, 0)
if SBError.fail or Char == 0:
break
Str += chr(Char)
return 'L"' + Str + '"'
if valobj.num_children == 0:
# CHAR16
return "L'" + chr(valobj.unsigned) + "'"
else:
# CHAR16 []
for i in range(valobj.num_children):
Char = valobj.GetChildAtIndex(i).data.GetUnsignedInt16(SBError, 0)
if Char == 0:
break
Str += chr(Char)
return 'L"' + Str + '"'
return Str
def CHAR8_TypeSummary(valobj, internal_dict):
'''
Display CHAR8 as a String in the debugger.
Note: utf-8 is returned as that is the value for the debugger.
'''
SBError = lldb.SBError()
Str = ''
if valobj.TypeIsPointerType():
if valobj.GetValueAsUnsigned() == 0:
return "NULL"
# CHAR8 * max string size 1024
for i in range(1024):
Char = valobj.GetPointeeData(i, 1).GetUnsignedInt8(SBError, 0)
if SBError.fail or Char == 0:
break
Str += chr(Char)
Str = '"' + Str + '"'
return Str
if valobj.num_children == 0:
# CHAR8
return "'" + chr(valobj.unsigned) + "'"
else:
# CHAR8 []
for i in range(valobj.num_children):
Char = valobj.GetChildAtIndex(i).data.GetUnsignedInt8(SBError, 0)
if SBError.fail or Char == 0:
break
Str += chr(Char)
return '"' + Str + '"'
return Str
def EFI_STATUS_TypeSummary(valobj, internal_dict):
if valobj.TypeIsPointerType():
return ''
return str(EfiStatusClass(valobj.unsigned))
def EFI_TPL_TypeSummary(valobj, internal_dict):
if valobj.TypeIsPointerType():
return ''
return str(EfiTpl(valobj.unsigned))
def EFI_GUID_TypeSummary(valobj, internal_dict):
if valobj.TypeIsPointerType():
return ''
return str(GuidNames(bytes(valobj.data.uint8)))
def EFI_BOOT_MODE_TypeSummary(valobj, internal_dict):
if valobj.TypeIsPointerType():
return ''
'''Return #define name for EFI_BOOT_MODE'''
return str(EfiBootMode(valobj.unsigned))
def lldb_type_formaters(debugger, mod_name):
'''Teach lldb about EFI types'''
category = debugger.GetDefaultCategory()
FormatBool = lldb.SBTypeFormat(lldb.eFormatBoolean)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("BOOLEAN"), FormatBool)
FormatHex = lldb.SBTypeFormat(lldb.eFormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT64"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT64"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT32"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT32"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT16"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT16"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT8"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT8"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINTN"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INTN"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("CHAR8"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("CHAR16"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier(
"EFI_PHYSICAL_ADDRESS"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier(
"PHYSICAL_ADDRESS"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_LBA"), FormatHex)
category.AddTypeFormat(
lldb.SBTypeNameSpecifier("EFI_BOOT_MODE"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier(
"EFI_FV_FILETYPE"), FormatHex)
#
# Smart type printing for EFI
#
debugger.HandleCommand(
f'type summary add GUID - -python-function '
f'{mod_name}.EFI_GUID_TypeSummary')
debugger.HandleCommand(
f'type summary add EFI_GUID --python-function '
f'{mod_name}.EFI_GUID_TypeSummary')
debugger.HandleCommand(
f'type summary add EFI_STATUS --python-function '
f'{mod_name}.EFI_STATUS_TypeSummary')
debugger.HandleCommand(
f'type summary add EFI_TPL - -python-function '
f'{mod_name}.EFI_TPL_TypeSummary')
debugger.HandleCommand(
f'type summary add EFI_BOOT_MODE --python-function '
f'{mod_name}.EFI_BOOT_MODE_TypeSummary')
debugger.HandleCommand(
f'type summary add CHAR16 --python-function '
f'{mod_name}.CHAR16_TypeSummary')
# W605 this is the correct escape sequence for the lldb command
debugger.HandleCommand(
f'type summary add --regex "CHAR16 \[[0-9]+\]" ' # noqa: W605
f'--python-function {mod_name}.CHAR16_TypeSummary')
debugger.HandleCommand(
f'type summary add CHAR8 --python-function '
f'{mod_name}.CHAR8_TypeSummary')
# W605 this is the correct escape sequence for the lldb command
debugger.HandleCommand(
f'type summary add --regex "CHAR8 \[[0-9]+\]" ' # noqa: W605
f'--python-function {mod_name}.CHAR8_TypeSummary')
class LldbWorkaround:
needed = True
@classmethod
def activate(cls):
if cls.needed:
lldb.debugger.HandleCommand("process handle SIGALRM -n false")
cls.needed = False
def LoadEmulatorEfiSymbols(frame, bp_loc, internal_dict):
#
# This is an lldb breakpoint script, and assumes the breakpoint is on a
# function with the same prototype as SecGdbScriptBreak(). The
# argument names are important as lldb looks them up.
#
# VOID
# SecGdbScriptBreak (
# char *FileName,
# int FileNameLength,
# long unsigned int LoadAddress,
# int AddSymbolFlag
# )
# {
# return;
# }
#
# When the emulator loads a PE/COFF image, it calls the stub function with
# the filename of the symbol file, the length of the FileName, the
# load address and a flag to indicate if this is a load or unload operation
#
LldbWorkaround().activate()
symbols = EfiSymbols(frame.thread.process.target)
LoadAddress = frame.FindVariable("LoadAddress").unsigned
if frame.FindVariable("AddSymbolFlag").unsigned == 1:
res = symbols.address_to_symbols(LoadAddress)
else:
res = symbols.unload_symbols(LoadAddress)
print(res)
# make breakpoint command continue
return False
def __lldb_init_module(debugger, internal_dict):
'''
This initializer is being run from LLDB in the embedded command interpreter
'''
mod_name = Path(__file__).stem
lldb_type_formaters(debugger, mod_name)
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
f'command script add -c {mod_name}.EfiSymbolicateCommand efi_symbols')
debugger.HandleCommand(
f'command script add -c {mod_name}.EfiGuidCommand guid')
debugger.HandleCommand(
f'command script add -c {mod_name}.EfiTableCommand table')
debugger.HandleCommand(
f'command script add -c {mod_name}.EfiHobCommand hob')
debugger.HandleCommand(
f'command script add -c {mod_name}.EfiDevicePathCommand devicepath')
print('EFI specific commands have been installed.')
# patch the ctypes c_void_p values if the debuggers OS and EFI have
# different ideas on the size of the debug.
try:
patch_ctypes(debugger.GetSelectedTarget().addr_size)
except ValueError:
# incase the script is imported and the debugger has not target
# defaults to sizeof(UINTN) == sizeof(UINT64)
patch_ctypes()
try:
target = debugger.GetSelectedTarget()
if target.FindFunctions('SecGdbScriptBreak').symbols:
breakpoint = target.BreakpointCreateByName('SecGdbScriptBreak')
# Set the emulator breakpoints, if we are in the emulator
cmd = 'breakpoint command add -s python -F '
cmd += f'efi_lldb.LoadEmulatorEfiSymbols {breakpoint.GetID()}'
debugger.HandleCommand(cmd)
print('Type r to run emulator.')
else:
raise ValueError("No Emulator Symbols")
except ValueError:
# default action when the script is imported
debugger.HandleCommand("efi_symbols --frame --extended")
debugger.HandleCommand("register read")
debugger.HandleCommand("bt all")
if __name__ == '__main__':
pass
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/efi_lldb.py
|
# @file ConvertMasmToNasm.py
# This script assists with conversion of MASM assembly syntax to NASM
#
# Copyright (c) 2007 - 2016, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
#
# Import Modules
#
import argparse
import io
import os.path
import re
import subprocess
import sys
class UnsupportedConversion(Exception):
pass
class NoSourceFile(Exception):
pass
class UnsupportedArch(Exception):
unsupported = ('aarch64', 'arm', 'ebc', 'ipf')
class CommonUtils:
# Version and Copyright
VersionNumber = "0.01"
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved."
__usage__ = "%prog [options] source.asm [destination.nasm]"
def __init__(self, clone=None):
if clone is None:
self.args = self.ProcessCommandLine()
else:
self.args = clone.args
self.unsupportedSyntaxSeen = False
self.src = self.args.source
self.keep = self.args.keep
assert(os.path.exists(self.src))
self.dirmode = os.path.isdir(self.src)
srcExt = os.path.splitext(self.src)[1]
assert (self.dirmode or srcExt != '.nasm')
self.infmode = not self.dirmode and srcExt == '.inf'
self.diff = self.args.diff
self.git = self.args.git
self.force = self.args.force
if clone is None:
self.rootdir = os.getcwd()
self.DetectGit()
else:
self.rootdir = clone.rootdir
self.gitdir = clone.gitdir
self.gitemail = clone.gitemail
def ProcessCommandLine(self):
parser = argparse.ArgumentParser(description=self.__copyright__)
parser.add_argument('--version', action='version',
version='%(prog)s ' + self.VersionNumber)
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all messages except FATAL ERRORS.")
parser.add_argument("--git", action="store_true",
help="Use git to create commits for each file converted")
parser.add_argument("--keep", action="append", choices=('asm', 's'),
default=[],
help="Don't remove files with this extension")
parser.add_argument("--diff", action="store_true",
help="Show diff of conversion")
parser.add_argument("-f", "--force", action="store_true",
help="Force conversion even if unsupported")
parser.add_argument('source', help='MASM input file')
parser.add_argument('dest', nargs='?',
help='NASM output file (default=input.nasm; - for stdout)')
return parser.parse_args()
def RootRelative(self, path):
result = path
if result.startswith(self.rootdir):
result = result[len(self.rootdir):]
while len(result) > 0 and result[0] in '/\\':
result = result[1:]
return result
def MatchAndSetMo(self, regexp, string):
self.mo = regexp.match(string)
return self.mo is not None
def SearchAndSetMo(self, regexp, string):
self.mo = regexp.search(string)
return self.mo is not None
def ReplacePreserveSpacing(self, string, find, replace):
if len(find) >= len(replace):
padded = replace + (' ' * (len(find) - len(replace)))
return string.replace(find, padded)
elif find.find(replace) >= 0:
return string.replace(find, replace)
else:
lenDiff = len(replace) - len(find)
result = string
for i in range(lenDiff, -1, -1):
padded = find + (' ' * i)
result = result.replace(padded, replace)
return result
def DetectGit(self):
lastpath = os.path.realpath(self.src)
self.gitdir = None
while True:
path = os.path.split(lastpath)[0]
if path == lastpath:
self.gitemail = None
return
candidate = os.path.join(path, '.git')
if os.path.isdir(candidate):
self.gitdir = candidate
self.gitemail = self.FormatGitEmailAddress()
return
lastpath = path
def FormatGitEmailAddress(self):
if not self.git or not self.gitdir:
return ''
cmd = ('git', 'config', 'user.name')
name = self.RunAndCaptureOutput(cmd).strip()
cmd = ('git', 'config', 'user.email')
email = self.RunAndCaptureOutput(cmd).strip()
if name.find(',') >= 0:
name = '"' + name + '"'
return name + ' <' + email + '>'
def RunAndCaptureOutput(self, cmd, checkExitCode=True, pipeIn=None):
if pipeIn:
subpStdin = subprocess.PIPE
else:
subpStdin = None
p = subprocess.Popen(args=cmd, stdout=subprocess.PIPE, stdin=subpStdin)
(stdout, stderr) = p.communicate(pipeIn)
if checkExitCode:
if p.returncode != 0:
print('command:', ' '.join(cmd))
print('stdout:', stdout)
print('stderr:', stderr)
print('return:', p.returncode)
assert p.returncode == 0
return stdout.decode('utf-8', 'ignore')
def FileUpdated(self, path):
if not self.git or not self.gitdir:
return
cmd = ('git', 'add', path)
self.RunAndCaptureOutput(cmd)
def FileAdded(self, path):
self.FileUpdated(path)
def RemoveFile(self, path):
if not self.git or not self.gitdir:
return
if self.ShouldKeepFile(path):
return
cmd = ('git', 'rm', path)
self.RunAndCaptureOutput(cmd)
def ShouldKeepFile(self, path):
ext = os.path.splitext(path)[1].lower()
if ext.startswith('.'):
ext = ext[1:]
return ext in self.keep
def FileConversionFinished(self, pkg, module, src, dst):
if not self.git or not self.gitdir:
return
if not self.args.quiet:
print('Committing: Conversion of', dst)
prefix = ' '.join(filter(lambda a: a, [pkg, module]))
message = ''
if self.unsupportedSyntaxSeen:
message += 'ERROR! '
message += '%s: Convert %s to NASM\n' % (prefix, src)
message += '\n'
message += 'The %s script was used to convert\n' % sys.argv[0]
message += '%s to %s\n' % (src, dst)
message += '\n'
message += 'Contributed-under: TianoCore Contribution Agreement 1.0\n'
assert(self.gitemail is not None)
message += 'Signed-off-by: %s\n' % self.gitemail
message = message.encode('utf-8', 'ignore')
cmd = ('git', 'commit', '-F', '-')
self.RunAndCaptureOutput(cmd, pipeIn=message)
class ConvertAsmFile(CommonUtils):
def __init__(self, src, dst, clone):
CommonUtils.__init__(self, clone)
self.ConvertAsmFile(src, dst)
self.FileAdded(dst)
self.RemoveFile(src)
def ConvertAsmFile(self, inputFile, outputFile=None):
self.globals = set()
self.unsupportedSyntaxSeen = False
self.inputFilename = inputFile
if not outputFile:
outputFile = os.path.splitext(inputFile)[0] + '.nasm'
self.outputFilename = outputFile
fullSrc = os.path.realpath(inputFile)
srcParentDir = os.path.basename(os.path.split(fullSrc)[0])
maybeArch = srcParentDir.lower()
if maybeArch in UnsupportedArch.unsupported:
raise UnsupportedArch
self.ia32 = maybeArch == 'ia32'
self.x64 = maybeArch == 'x64'
self.inputFileBase = os.path.basename(self.inputFilename)
self.outputFileBase = os.path.basename(self.outputFilename)
self.output = io.BytesIO()
if not self.args.quiet:
dirpath, src = os.path.split(self.inputFilename)
dirpath = self.RootRelative(dirpath)
dst = os.path.basename(self.outputFilename)
print('Converting:', dirpath, src, '->', dst)
lines = io.open(self.inputFilename).readlines()
self.Convert(lines)
if self.outputFilename == '-' and not self.diff:
output_data = self.output.getvalue()
if sys.version_info >= (3, 0):
output_data = output_data.decode('utf-8', 'ignore')
sys.stdout.write(output_data)
self.output.close()
else:
f = io.open(self.outputFilename, 'wb')
f.write(self.output.getvalue())
f.close()
self.output.close()
endOfLineRe = re.compile(r'''
\s* ( ; .* )? \n $
''',
re.VERBOSE | re.MULTILINE
)
begOfLineRe = re.compile(r'''
\s*
''',
re.VERBOSE
)
def Convert(self, lines):
self.proc = None
self.anonLabelCount = -1
output = self.output
self.oldAsmEmptyLineCount = 0
self.newAsmEmptyLineCount = 0
for line in lines:
mo = self.begOfLineRe.search(line)
assert mo is not None
self.indent = mo.group()
lineWithoutBeginning = line[len(self.indent):]
mo = self.endOfLineRe.search(lineWithoutBeginning)
if mo is None:
endOfLine = ''
else:
endOfLine = mo.group()
oldAsm = line[len(self.indent):len(line) - len(endOfLine)]
self.originalLine = line.rstrip()
if line.strip() == '':
self.oldAsmEmptyLineCount += 1
self.TranslateAsm(oldAsm, endOfLine)
if line.strip() != '':
self.oldAsmEmptyLineCount = 0
procDeclRe = re.compile(r'''
(?: ASM_PFX \s* [(] \s* )?
([\w@][\w@0-9]*) \s*
[)]? \s+
PROC
(?: \s+ NEAR | FAR )?
(?: \s+ C )?
(?: \s+ (PUBLIC | PRIVATE) )?
(?: \s+ USES ( (?: \s+ \w[\w0-9]* )+ ) )?
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
procEndRe = re.compile(r'''
([\w@][\w@0-9]*) \s+
ENDP
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
varAndTypeSubRe = r' (?: [\w@][\w@0-9]* ) (?: \s* : \s* \w+ )? '
publicRe = re.compile(r'''
PUBLIC \s+
( %s (?: \s* , \s* %s )* )
\s* $
''' % (varAndTypeSubRe, varAndTypeSubRe),
re.VERBOSE | re.IGNORECASE
)
varAndTypeSubRe = re.compile(varAndTypeSubRe, re.VERBOSE | re.IGNORECASE)
macroDeclRe = re.compile(r'''
([\w@][\w@0-9]*) \s+
MACRO
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
sectionDeclRe = re.compile(r'''
([\w@][\w@0-9]*) \s+
( SECTION | ENDS )
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
externRe = re.compile(r'''
EXTE?RN \s+ (?: C \s+ )?
([\w@][\w@0-9]*) \s* : \s* (\w+)
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
externdefRe = re.compile(r'''
EXTERNDEF \s+ (?: C \s+ )?
([\w@][\w@0-9]*) \s* : \s* (\w+)
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
protoRe = re.compile(r'''
([\w@][\w@0-9]*) \s+
PROTO
(?: \s+ .* )?
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
defineDataRe = re.compile(r'''
([\w@][\w@0-9]*) \s+
( db | dw | dd | dq ) \s+
( .*? )
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
equRe = re.compile(r'''
([\w@][\w@0-9]*) \s+ EQU \s+ (\S.*?)
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
ignoreRe = re.compile(r'''
\. (?: const |
mmx |
model |
xmm |
x?list |
[3-6]86p?
) |
page
(?: \s+ .* )?
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
whitespaceRe = re.compile(r'\s+', re.MULTILINE)
def TranslateAsm(self, oldAsm, endOfLine):
assert(oldAsm.strip() == oldAsm)
endOfLine = endOfLine.replace(self.inputFileBase, self.outputFileBase)
oldOp = oldAsm.split()
if len(oldOp) >= 1:
oldOp = oldOp[0]
else:
oldOp = ''
if oldAsm == '':
newAsm = oldAsm
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif oldOp in ('#include', ):
newAsm = oldAsm
self.EmitLine(oldAsm + endOfLine)
elif oldOp.lower() in ('end', 'title', 'text'):
newAsm = ''
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif oldAsm.lower() == '@@:':
self.anonLabelCount += 1
self.EmitLine(self.anonLabel(self.anonLabelCount) + ':')
elif self.MatchAndSetMo(self.ignoreRe, oldAsm):
newAsm = ''
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif oldAsm.lower() == 'ret':
for i in range(len(self.uses) - 1, -1, -1):
register = self.uses[i]
self.EmitNewContent('pop ' + register)
newAsm = 'ret'
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
self.uses = tuple()
elif oldOp.lower() == 'lea':
newAsm = self.ConvertLea(oldAsm)
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif oldAsm.lower() == 'end':
newAsm = ''
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
self.uses = tuple()
elif self.MatchAndSetMo(self.equRe, oldAsm):
equ = self.mo.group(1)
newAsm = '%%define %s %s' % (equ, self.mo.group(2))
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.externRe, oldAsm) or \
self.MatchAndSetMo(self.protoRe, oldAsm):
extern = self.mo.group(1)
self.NewGlobal(extern)
newAsm = 'extern ' + extern
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.externdefRe, oldAsm):
newAsm = ''
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.macroDeclRe, oldAsm):
newAsm = '%%macro %s 0' % self.mo.group(1)
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif oldOp.lower() == 'endm':
newAsm = r'%endmacro'
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.sectionDeclRe, oldAsm):
name = self.mo.group(1)
ty = self.mo.group(2)
if ty.lower() == 'section':
newAsm = '.' + name
else:
newAsm = ''
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.procDeclRe, oldAsm):
proc = self.proc = self.mo.group(1)
visibility = self.mo.group(2)
if visibility is None:
visibility = ''
else:
visibility = visibility.lower()
if visibility != 'private':
self.NewGlobal(self.proc)
proc = 'ASM_PFX(' + proc + ')'
self.EmitNewContent('global ' + proc)
newAsm = proc + ':'
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
uses = self.mo.group(3)
if uses is not None:
uses = tuple(filter(None, uses.split()))
else:
uses = tuple()
self.uses = uses
for register in self.uses:
self.EmitNewContent(' push ' + register)
elif self.MatchAndSetMo(self.procEndRe, oldAsm):
newAsm = ''
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.publicRe, oldAsm):
publics = re.findall(self.varAndTypeSubRe, self.mo.group(1))
publics = tuple(map(lambda p: p.split(':')[0].strip(), publics))
for i in range(len(publics) - 1):
name = publics[i]
self.EmitNewContent('global ASM_PFX(%s)' % publics[i])
self.NewGlobal(name)
name = publics[-1]
self.NewGlobal(name)
newAsm = 'global ASM_PFX(%s)' % name
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
elif self.MatchAndSetMo(self.defineDataRe, oldAsm):
name = self.mo.group(1)
ty = self.mo.group(2)
value = self.mo.group(3)
if value == '?':
value = 0
newAsm = '%s: %s %s' % (name, ty, value)
newAsm = self.CommonConversions(newAsm)
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
else:
newAsm = self.CommonConversions(oldAsm)
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
def NewGlobal(self, name):
regex = re.compile(r'(?<![_\w\d])(?<!ASM_PFX\()(' + re.escape(name) +
r')(?![_\w\d])')
self.globals.add(regex)
def ConvertAnonymousLabels(self, oldAsm):
newAsm = oldAsm
anonLabel = self.anonLabel(self.anonLabelCount)
newAsm = newAsm.replace('@b', anonLabel)
newAsm = newAsm.replace('@B', anonLabel)
anonLabel = self.anonLabel(self.anonLabelCount + 1)
newAsm = newAsm.replace('@f', anonLabel)
newAsm = newAsm.replace('@F', anonLabel)
return newAsm
def anonLabel(self, count):
return '.%d' % count
def EmitString(self, string):
self.output.write(string.encode('utf-8', 'ignore'))
def EmitLineWithDiff(self, old, new):
newLine = (self.indent + new).rstrip()
if self.diff:
if old is None:
print('+%s' % newLine)
elif newLine != old:
print('-%s' % old)
print('+%s' % newLine)
else:
print('', newLine)
if newLine != '':
self.newAsmEmptyLineCount = 0
self.EmitString(newLine + '\r\n')
def EmitLine(self, string):
self.EmitLineWithDiff(self.originalLine, string)
def EmitNewContent(self, string):
self.EmitLineWithDiff(None, string)
def EmitAsmReplaceOp(self, oldAsm, oldOp, newOp, endOfLine):
newAsm = oldAsm.replace(oldOp, newOp, 1)
self.EmitAsmWithComment(oldAsm, newAsm, endOfLine)
hexNumRe = re.compile(r'0*((?=[\da-f])\d*(?<=\d)[\da-f]*)h', re.IGNORECASE)
def EmitAsmWithComment(self, oldAsm, newAsm, endOfLine):
for glblRe in self.globals:
newAsm = glblRe.sub(r'ASM_PFX(\1)', newAsm)
newAsm = self.hexNumRe.sub(r'0x\1', newAsm)
newLine = newAsm + endOfLine
emitNewLine = ((newLine.strip() != '') or
((oldAsm + endOfLine).strip() == ''))
if emitNewLine and newLine.strip() == '':
self.newAsmEmptyLineCount += 1
if self.newAsmEmptyLineCount > 1:
emitNewLine = False
if emitNewLine:
self.EmitLine(newLine.rstrip())
elif self.diff:
print('-%s' % self.originalLine)
leaRe = re.compile(r'''
(lea \s+) ([\w@][\w@0-9]*) \s* , \s* (\S (?:.*\S)?)
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
def ConvertLea(self, oldAsm):
newAsm = oldAsm
if self.MatchAndSetMo(self.leaRe, oldAsm):
lea = self.mo.group(1)
dst = self.mo.group(2)
src = self.mo.group(3)
if src.find('[') < 0:
src = '[' + src + ']'
newAsm = lea + dst + ', ' + src
newAsm = self.CommonConversions(newAsm)
return newAsm
ptrRe = re.compile(r'''
(?<! \S )
([dfq]?word|byte) \s+ (?: ptr ) (\s*)
(?= [[\s] )
''',
re.VERBOSE | re.IGNORECASE
)
def ConvertPtr(self, oldAsm):
newAsm = oldAsm
while self.SearchAndSetMo(self.ptrRe, newAsm):
ty = self.mo.group(1)
if ty.lower() == 'fword':
ty = ''
else:
ty += self.mo.group(2)
newAsm = newAsm[:self.mo.start(0)] + ty + newAsm[self.mo.end(0):]
return newAsm
labelByteRe = re.compile(r'''
(?: \s+ label \s+ (?: [dfq]?word | byte ) )
(?! \S )
''',
re.VERBOSE | re.IGNORECASE
)
def ConvertLabelByte(self, oldAsm):
newAsm = oldAsm
if self.SearchAndSetMo(self.labelByteRe, newAsm):
newAsm = newAsm[:self.mo.start(0)] + ':' + newAsm[self.mo.end(0):]
return newAsm
unaryBitwiseOpRe = re.compile(r'''
( NOT )
(?= \s+ \S )
''',
re.VERBOSE | re.IGNORECASE
)
binaryBitwiseOpRe = re.compile(r'''
( \S \s+ )
( AND | OR | SHL | SHR )
(?= \s+ \S )
''',
re.VERBOSE | re.IGNORECASE
)
bitwiseOpReplacements = {
'not': '~',
'and': '&',
'shl': '<<',
'shr': '>>',
'or': '|',
}
def ConvertBitwiseOp(self, oldAsm):
newAsm = oldAsm
while self.SearchAndSetMo(self.binaryBitwiseOpRe, newAsm):
prefix = self.mo.group(1)
op = self.bitwiseOpReplacements[self.mo.group(2).lower()]
newAsm = newAsm[:self.mo.start(0)] + prefix + op + \
newAsm[self.mo.end(0):]
while self.SearchAndSetMo(self.unaryBitwiseOpRe, newAsm):
op = self.bitwiseOpReplacements[self.mo.group(1).lower()]
newAsm = newAsm[:self.mo.start(0)] + op + newAsm[self.mo.end(0):]
return newAsm
sectionRe = re.compile(r'''
\. ( code |
data
)
(?: \s+ .* )?
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
segmentRe = re.compile(r'''
( code |
data )
(?: \s+ SEGMENT )
(?: \s+ .* )?
\s* $
''',
re.VERBOSE | re.IGNORECASE
)
def ConvertSection(self, oldAsm):
newAsm = oldAsm
if self.MatchAndSetMo(self.sectionRe, newAsm) or \
self.MatchAndSetMo(self.segmentRe, newAsm):
name = self.mo.group(1).lower()
if name == 'code':
if self.x64:
self.EmitLine('DEFAULT REL')
name = 'text'
newAsm = 'SECTION .' + name
return newAsm
fwordRe = re.compile(r'''
(?<! \S )
fword
(?! \S )
''',
re.VERBOSE | re.IGNORECASE
)
def FwordUnsupportedCheck(self, oldAsm):
newAsm = oldAsm
if self.SearchAndSetMo(self.fwordRe, newAsm):
newAsm = self.Unsupported(newAsm, 'fword used')
return newAsm
__common_conversion_routines__ = (
ConvertAnonymousLabels,
ConvertPtr,
FwordUnsupportedCheck,
ConvertBitwiseOp,
ConvertLabelByte,
ConvertSection,
)
def CommonConversions(self, oldAsm):
newAsm = oldAsm
for conv in self.__common_conversion_routines__:
newAsm = conv(self, newAsm)
return newAsm
def Unsupported(self, asm, message=None):
if not self.force:
raise UnsupportedConversion
self.unsupportedSyntaxSeen = True
newAsm = '%error conversion unsupported'
if message:
newAsm += '; ' + message
newAsm += ': ' + asm
return newAsm
class ConvertInfFile(CommonUtils):
def __init__(self, inf, clone):
CommonUtils.__init__(self, clone)
self.inf = inf
self.ScanInfAsmFiles()
if self.infmode:
self.ConvertInfAsmFiles()
infSrcRe = re.compile(r'''
\s*
( [\w@][\w@0-9/]* \.(asm|s) )
\s* (?: \| [^#]* )?
\s* (?: \# .* )?
$
''',
re.VERBOSE | re.IGNORECASE
)
def GetInfAsmFileMapping(self):
srcToDst = {'order': []}
for line in self.lines:
line = line.rstrip()
if self.MatchAndSetMo(self.infSrcRe, line):
src = self.mo.group(1)
srcExt = self.mo.group(2)
dst = os.path.splitext(src)[0] + '.nasm'
fullDst = os.path.join(self.dir, dst)
if src not in srcToDst and not os.path.exists(fullDst):
srcToDst[src] = dst
srcToDst['order'].append(src)
return srcToDst
def ScanInfAsmFiles(self):
src = self.inf
assert os.path.isfile(src)
f = io.open(src, 'rt')
self.lines = f.readlines()
f.close()
path = os.path.realpath(self.inf)
(self.dir, inf) = os.path.split(path)
parent = os.path.normpath(self.dir)
(lastpath, self.moduleName) = os.path.split(parent)
self.packageName = None
while True:
lastpath = os.path.normpath(lastpath)
(parent, basename) = os.path.split(lastpath)
if parent == lastpath:
break
if basename.endswith('Pkg'):
self.packageName = basename
break
lastpath = parent
self.srcToDst = self.GetInfAsmFileMapping()
self.dstToSrc = {'order': []}
for src in self.srcToDst['order']:
srcExt = os.path.splitext(src)[1]
dst = self.srcToDst[src]
if dst not in self.dstToSrc:
self.dstToSrc[dst] = [src]
self.dstToSrc['order'].append(dst)
else:
self.dstToSrc[dst].append(src)
def __len__(self):
return len(self.dstToSrc['order'])
def __iter__(self):
return iter(self.dstToSrc['order'])
def ConvertInfAsmFiles(self):
notConverted = []
unsupportedArchCount = 0
for dst in self:
didSomething = False
try:
self.UpdateInfAsmFile(dst)
didSomething = True
except UnsupportedConversion:
if not self.args.quiet:
print('MASM=>NASM conversion unsupported for', dst)
notConverted.append(dst)
except NoSourceFile:
if not self.args.quiet:
print('Source file missing for', reldst)
notConverted.append(dst)
except UnsupportedArch:
unsupportedArchCount += 1
else:
if didSomething:
self.ConversionFinished(dst)
if len(notConverted) > 0 and not self.args.quiet:
for dst in notConverted:
reldst = self.RootRelative(dst)
print('Unabled to convert', reldst)
if unsupportedArchCount > 0 and not self.args.quiet:
print('Skipped', unsupportedArchCount, 'files based on architecture')
def UpdateInfAsmFile(self, dst, IgnoreMissingAsm=False):
infPath = os.path.split(os.path.realpath(self.inf))[0]
asmSrc = os.path.splitext(dst)[0] + '.asm'
fullSrc = os.path.join(infPath, asmSrc)
fullDst = os.path.join(infPath, dst)
srcParentDir = os.path.basename(os.path.split(fullSrc)[0])
if srcParentDir.lower() in UnsupportedArch.unsupported:
raise UnsupportedArch
elif not os.path.exists(fullSrc):
if not IgnoreMissingAsm:
raise NoSourceFile
else: # not os.path.exists(fullDst):
conv = ConvertAsmFile(fullSrc, fullDst, self)
self.unsupportedSyntaxSeen = conv.unsupportedSyntaxSeen
fileChanged = False
recentSources = list()
i = 0
while i < len(self.lines):
line = self.lines[i].rstrip()
updatedLine = line
lineChanged = False
preserveOldSource = False
for src in self.dstToSrc[dst]:
assert self.srcToDst[src] == dst
updatedLine = self.ReplacePreserveSpacing(
updatedLine, src, dst)
lineChanged = updatedLine != line
if lineChanged:
preserveOldSource = self.ShouldKeepFile(src)
break
if lineChanged:
if preserveOldSource:
if updatedLine.strip() not in recentSources:
self.lines.insert(i, updatedLine + '\n')
recentSources.append(updatedLine.strip())
i += 1
if self.diff:
print('+%s' % updatedLine)
if self.diff:
print('', line)
else:
if self.diff:
print('-%s' % line)
if updatedLine.strip() in recentSources:
self.lines[i] = None
else:
self.lines[i] = updatedLine + '\n'
recentSources.append(updatedLine.strip())
if self.diff:
print('+%s' % updatedLine)
else:
if len(recentSources) > 0:
recentSources = list()
if self.diff:
print('', line)
fileChanged |= lineChanged
i += 1
if fileChanged:
self.lines = list(filter(lambda l: l is not None, self.lines))
for src in self.dstToSrc[dst]:
if not src.endswith('.asm'):
fullSrc = os.path.join(infPath, src)
if os.path.exists(fullSrc):
self.RemoveFile(fullSrc)
if fileChanged:
f = io.open(self.inf, 'w', newline='\r\n')
f.writelines(self.lines)
f.close()
self.FileUpdated(self.inf)
def ConversionFinished(self, dst):
asmSrc = os.path.splitext(dst)[0] + '.asm'
self.FileConversionFinished(
self.packageName, self.moduleName, asmSrc, dst)
class ConvertInfFiles(CommonUtils):
def __init__(self, infs, clone):
CommonUtils.__init__(self, clone)
infs = map(lambda i: ConvertInfFile(i, self), infs)
infs = filter(lambda i: len(i) > 0, infs)
dstToInfs = {'order': []}
for inf in infs:
for dst in inf:
fulldst = os.path.realpath(os.path.join(inf.dir, dst))
pair = (inf, dst)
if fulldst in dstToInfs:
dstToInfs[fulldst].append(pair)
else:
dstToInfs['order'].append(fulldst)
dstToInfs[fulldst] = [pair]
notConverted = []
unsupportedArchCount = 0
for dst in dstToInfs['order']:
didSomething = False
try:
for inf, reldst in dstToInfs[dst]:
inf.UpdateInfAsmFile(reldst, IgnoreMissingAsm=didSomething)
didSomething = True
except UnsupportedConversion:
if not self.args.quiet:
print('MASM=>NASM conversion unsupported for', reldst)
notConverted.append(dst)
except NoSourceFile:
if not self.args.quiet:
print('Source file missing for', reldst)
notConverted.append(dst)
except UnsupportedArch:
unsupportedArchCount += 1
else:
if didSomething:
inf.ConversionFinished(reldst)
if len(notConverted) > 0 and not self.args.quiet:
for dst in notConverted:
reldst = self.RootRelative(dst)
print('Unabled to convert', reldst)
if unsupportedArchCount > 0 and not self.args.quiet:
print('Skipped', unsupportedArchCount, 'files based on architecture')
class ConvertDirectories(CommonUtils):
def __init__(self, paths, clone):
CommonUtils.__init__(self, clone)
self.paths = paths
self.ConvertInfAndAsmFiles()
def ConvertInfAndAsmFiles(self):
infs = list()
for path in self.paths:
assert(os.path.exists(path))
for path in self.paths:
for root, dirs, files in os.walk(path):
for d in ('.svn', '.git'):
if d in dirs:
dirs.remove(d)
for f in files:
if f.lower().endswith('.inf'):
inf = os.path.realpath(os.path.join(root, f))
infs.append(inf)
ConvertInfFiles(infs, self)
class ConvertAsmApp(CommonUtils):
def __init__(self):
CommonUtils.__init__(self)
src = self.args.source
dst = self.args.dest
if self.infmode:
ConvertInfFiles((src,), self)
elif self.dirmode:
ConvertDirectories((src,), self)
elif not self.dirmode:
ConvertAsmFile(src, dst, self)
ConvertAsmApp()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/ConvertMasmToNasm.py
|
## @file
# Check a patch for various format issues
#
# Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
VersionNumber = '0.1'
__copyright__ = "Copyright (c) 2015, Intel Corporation All rights reserved."
import argparse
import codecs
import os
import sys
class ConvertOneArg:
"""Converts utf-16 to utf-8 for one command line argument.
This could be a single file, or a directory.
"""
def __init__(self, utf8, source):
self.utf8 = utf8
self.source = source
self.ok = True
if not os.path.exists(source):
self.ok = False
elif os.path.isdir(source):
for (root, dirs, files) in os.walk(source):
files = filter(lambda a: a.endswith('.uni'), files)
for filename in files:
path = os.path.join(root, filename)
self.ok &= self.convert_one_file(path)
if not self.ok:
break
if not self.ok:
break
else:
self.ok &= self.convert_one_file(source)
def convert_one_file(self, source):
if self.utf8:
new_enc, old_enc = 'utf-8', 'utf-16'
else:
new_enc, old_enc = 'utf-16', 'utf-8'
#
# Read file
#
f = open(source, mode='rb')
file_content = f.read()
f.close()
#
# Detect UTF-16 Byte Order Mark at beginning of file.
#
bom = (file_content.startswith(codecs.BOM_UTF16_BE) or
file_content.startswith(codecs.BOM_UTF16_LE))
if bom != self.utf8:
print("%s: already %s" % (source, new_enc))
return True
#
# Decode old string data
#
str_content = file_content.decode(old_enc, 'ignore')
#
# Encode new string data
#
new_content = str_content.encode(new_enc, 'ignore')
#
# Write converted data back to file
#
f = open(source, mode='wb')
f.write(new_content)
f.close()
print(source + ": converted, size", len(file_content), '=>', len(new_content))
return True
class ConvertUniApp:
"""Converts .uni files between utf-16 and utf-8."""
def __init__(self):
self.parse_options()
sources = self.args.source
self.ok = True
for patch in sources:
self.process_one_arg(patch)
if self.ok:
self.retval = 0
else:
self.retval = -1
def process_one_arg(self, arg):
self.ok &= ConvertOneArg(self.utf8, arg).ok
def parse_options(self):
parser = argparse.ArgumentParser(description=__copyright__)
parser.add_argument('--version', action='version',
version='%(prog)s ' + VersionNumber)
parser.add_argument('source', nargs='+',
help='[uni file | directory]')
group = parser.add_mutually_exclusive_group()
group.add_argument("--utf-8",
action="store_true",
help="Convert from utf-16 to utf-8 [default]")
group.add_argument("--utf-16",
action="store_true",
help="Convert from utf-8 to utf-16")
self.args = parser.parse_args()
self.utf8 = not self.args.utf_16
if __name__ == "__main__":
sys.exit(ConvertUniApp().retval)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/ConvertUni.py
|
## @file
# Get current UTC date and time information and output as ascii code.
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
VersionNumber = '0.1'
import sys
import datetime
import argparse
def Main():
PARSER = argparse.ArgumentParser(
description='Retrieves UTC date and time information (output ordering: year, date, time) - Version ' + VersionNumber)
PARSER.add_argument('--year',
action='store_true',
help='Return UTC year of now. [Example output (2019): 39313032]')
PARSER.add_argument('--date',
action='store_true',
help='Return UTC date MMDD of now. [Example output (7th August): 37303830]')
PARSER.add_argument('--time',
action='store_true',
help='Return 24-hour-format UTC time HHMM of now. [Example output (14:25): 35323431]')
ARGS = PARSER.parse_args()
if len(sys.argv) == 1:
print ("ERROR: At least one argument is required!\n")
PARSER.print_help()
today = datetime.datetime.utcnow()
if ARGS.year:
ReversedNumber = str(today.year)[::-1]
print (''.join(hex(ord(HexString))[2:] for HexString in ReversedNumber))
if ARGS.date:
ReversedNumber = str(today.strftime("%m%d"))[::-1]
print (''.join(hex(ord(HexString))[2:] for HexString in ReversedNumber))
if ARGS.time:
ReversedNumber = str(today.strftime("%H%M"))[::-1]
print (''.join(hex(ord(HexString))[2:] for HexString in ReversedNumber))
if __name__ == '__main__':
Main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/GetUtcDateTime.py
|
##
# Generate symbal for memory profile info.
#
# This tool depends on DIA2Dump.exe (VS) or nm (gcc) to parse debug entry.
#
# Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
from __future__ import print_function
import os
import re
import sys
from optparse import OptionParser
versionNumber = "1.1"
__copyright__ = "Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved."
class Symbols:
def __init__(self):
self.listLineAddress = []
self.pdbName = ""
# Cache for function
self.functionName = ""
# Cache for line
self.sourceName = ""
def getSymbol (self, rva):
index = 0
lineName = 0
sourceName = "??"
while index + 1 < self.lineCount :
if self.listLineAddress[index][0] <= rva and self.listLineAddress[index + 1][0] > rva :
offset = rva - self.listLineAddress[index][0]
functionName = self.listLineAddress[index][1]
lineName = self.listLineAddress[index][2]
sourceName = self.listLineAddress[index][3]
if lineName == 0 :
return " (" + self.listLineAddress[index][1] + "() - " + ")"
else :
return " (" + self.listLineAddress[index][1] + "() - " + sourceName + ":" + str(lineName) + ")"
index += 1
return " (unknown)"
def parse_debug_file(self, driverName, pdbName):
if cmp (pdbName, "") == 0 :
return
self.pdbName = pdbName;
try:
nmCommand = "nm"
nmLineOption = "-l"
print("parsing (debug) - " + pdbName)
os.system ('%s %s %s > nmDump.line.log' % (nmCommand, nmLineOption, pdbName))
except :
print('ERROR: nm command not available. Please verify PATH')
return
#
# parse line
#
linefile = open("nmDump.line.log")
reportLines = linefile.readlines()
linefile.close()
# 000113ca T AllocatePool c:\home\edk-ii\MdePkg\Library\UefiMemoryAllocationLib\MemoryAllocationLib.c:399
patchLineFileMatchString = "([0-9a-fA-F]*)\s+[T|D|t|d]\s+(\w+)\s*((?:[a-zA-Z]:)?[\w+\-./_a-zA-Z0-9\\\\]*):?([0-9]*)"
for reportLine in reportLines:
#print "check - " + reportLine
match = re.match(patchLineFileMatchString, reportLine)
if match is not None:
#print "match - " + reportLine[:-1]
#print "0 - " + match.group(0)
#print "1 - " + match.group(1)
#print "2 - " + match.group(2)
#print "3 - " + match.group(3)
#print "4 - " + match.group(4)
rva = int (match.group(1), 16)
functionName = match.group(2)
sourceName = match.group(3)
if cmp (match.group(4), "") != 0 :
lineName = int (match.group(4))
else :
lineName = 0
self.listLineAddress.append ([rva, functionName, lineName, sourceName])
self.lineCount = len (self.listLineAddress)
self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0])
#for key in self.listLineAddress :
#print "rva - " + "%x"%(key[0]) + ", func - " + key[1] + ", line - " + str(key[2]) + ", source - " + key[3]
def parse_pdb_file(self, driverName, pdbName):
if cmp (pdbName, "") == 0 :
return
self.pdbName = pdbName;
try:
#DIA2DumpCommand = "\"C:\\Program Files (x86)\Microsoft Visual Studio 14.0\\DIA SDK\\Samples\\DIA2Dump\\x64\\Debug\\Dia2Dump.exe\""
DIA2DumpCommand = "Dia2Dump.exe"
#DIA2SymbolOption = "-p"
DIA2LinesOption = "-l"
print("parsing (pdb) - " + pdbName)
#os.system ('%s %s %s > DIA2Dump.symbol.log' % (DIA2DumpCommand, DIA2SymbolOption, pdbName))
os.system ('%s %s %s > DIA2Dump.line.log' % (DIA2DumpCommand, DIA2LinesOption, pdbName))
except :
print('ERROR: DIA2Dump command not available. Please verify PATH')
return
#
# parse line
#
linefile = open("DIA2Dump.line.log")
reportLines = linefile.readlines()
linefile.close()
# ** GetDebugPrintErrorLevel
# line 32 at [0000C790][0001:0000B790], len = 0x3 c:\home\edk-ii\mdepkg\library\basedebugprinterrorlevellib\basedebugprinterrorlevellib.c (MD5: 687C0AE564079D35D56ED5D84A6164CC)
# line 36 at [0000C793][0001:0000B793], len = 0x5
# line 37 at [0000C798][0001:0000B798], len = 0x2
patchLineFileMatchString = "\s+line ([0-9]+) at \[([0-9a-fA-F]{8})\]\[[0-9a-fA-F]{4}\:[0-9a-fA-F]{8}\], len = 0x[0-9a-fA-F]+\s*([\w+\-\:./_a-zA-Z0-9\\\\]*)\s*"
patchLineFileMatchStringFunc = "\*\*\s+(\w+)\s*"
for reportLine in reportLines:
#print "check line - " + reportLine
match = re.match(patchLineFileMatchString, reportLine)
if match is not None:
#print "match - " + reportLine[:-1]
#print "0 - " + match.group(0)
#print "1 - " + match.group(1)
#print "2 - " + match.group(2)
if cmp (match.group(3), "") != 0 :
self.sourceName = match.group(3)
sourceName = self.sourceName
functionName = self.functionName
rva = int (match.group(2), 16)
lineName = int (match.group(1))
self.listLineAddress.append ([rva, functionName, lineName, sourceName])
else :
match = re.match(patchLineFileMatchStringFunc, reportLine)
if match is not None:
self.functionName = match.group(1)
self.lineCount = len (self.listLineAddress)
self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0])
#for key in self.listLineAddress :
#print "rva - " + "%x"%(key[0]) + ", func - " + key[1] + ", line - " + str(key[2]) + ", source - " + key[3]
class SymbolsFile:
def __init__(self):
self.symbolsTable = {}
symbolsFile = ""
driverName = ""
rvaName = ""
symbolName = ""
def getSymbolName(driverName, rva):
global symbolsFile
#print "driverName - " + driverName
try :
symbolList = symbolsFile.symbolsTable[driverName]
if symbolList is not None:
return symbolList.getSymbol (rva)
else:
return " (???)"
except Exception:
return " (???)"
def processLine(newline):
global driverName
global rvaName
driverPrefixLen = len("Driver - ")
# get driver name
if cmp(newline[0:driverPrefixLen], "Driver - ") == 0 :
driverlineList = newline.split(" ")
driverName = driverlineList[2]
#print "Checking : ", driverName
# EDKII application output
pdbMatchString = "Driver - \w* \(Usage - 0x[0-9a-fA-F]+\) \(Pdb - ([:\-.\w\\\\/]*)\)\s*"
pdbName = ""
match = re.match(pdbMatchString, newline)
if match is not None:
#print "match - " + newline
#print "0 - " + match.group(0)
#print "1 - " + match.group(1)
pdbName = match.group(1)
#print "PDB - " + pdbName
symbolsFile.symbolsTable[driverName] = Symbols()
if cmp (pdbName[-3:], "pdb") == 0 :
symbolsFile.symbolsTable[driverName].parse_pdb_file (driverName, pdbName)
else :
symbolsFile.symbolsTable[driverName].parse_debug_file (driverName, pdbName)
elif cmp(newline, "") == 0 :
driverName = ""
# check entry line
if newline.find ("<==") != -1 :
entry_list = newline.split(" ")
rvaName = entry_list[4]
#print "rva : ", rvaName
symbolName = getSymbolName (driverName, int(rvaName, 16))
else :
rvaName = ""
symbolName = ""
if cmp(rvaName, "") == 0 :
return newline
else :
return newline + symbolName
def myOptionParser():
usage = "%prog [--version] [-h] [--help] [-i inputfile [-o outputfile]]"
Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber))
Parser.add_option("-i", "--inputfile", dest="inputfilename", type="string", help="The input memory profile info file output from MemoryProfileInfo application in MdeModulePkg")
Parser.add_option("-o", "--outputfile", dest="outputfilename", type="string", help="The output memory profile info file with symbol, MemoryProfileInfoSymbol.txt will be used if it is not specified")
(Options, args) = Parser.parse_args()
if Options.inputfilename is None:
Parser.error("no input file specified")
if Options.outputfilename is None:
Options.outputfilename = "MemoryProfileInfoSymbol.txt"
return Options
def main():
global symbolsFile
global Options
Options = myOptionParser()
symbolsFile = SymbolsFile()
try :
file = open(Options.inputfilename)
except Exception:
print("fail to open " + Options.inputfilename)
return 1
try :
newfile = open(Options.outputfilename, "w")
except Exception:
print("fail to open " + Options.outputfilename)
return 1
try:
while True:
line = file.readline()
if not line:
break
newline = line[:-1]
newline = processLine(newline)
newfile.write(newline)
newfile.write("\n")
finally:
file.close()
newfile.close()
if __name__ == '__main__':
sys.exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/MemoryProfileSymbolGen.py
|
## @file
# Set up the git configuration for contributing to TianoCore projects
#
# Copyright (c) 2019, Linaro Ltd. All rights reserved.<BR>
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
import argparse
import os.path
import re
import sys
try:
import git
except ImportError:
print('Unable to load gitpython module - please install and try again.')
sys.exit(1)
try:
# Try Python 2 'ConfigParser' module first since helpful lib2to3 will
# otherwise automagically load it with the name 'configparser'
import ConfigParser
except ImportError:
# Otherwise, try loading the Python 3 'configparser' under an alias
try:
import configparser as ConfigParser
except ImportError:
print("Unable to load configparser/ConfigParser module - please install and try again!")
sys.exit(1)
# Assumptions: Script is in edk2/BaseTools/Scripts,
# templates in edk2/BaseTools/Conf
CONFDIR = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'Conf')
UPSTREAMS = [
{'name': 'edk2',
'repo': 'https://github.com/tianocore/edk2.git',
'list': 'devel@edk2.groups.io'},
{'name': 'edk2-platforms',
'repo': 'https://github.com/tianocore/edk2-platforms.git',
'list': 'devel@edk2.groups.io', 'prefix': 'edk2-platforms'},
{'name': 'edk2-non-osi',
'repo': 'https://github.com/tianocore/edk2-non-osi.git',
'list': 'devel@edk2.groups.io', 'prefix': 'edk2-non-osi'},
{'name': 'edk2-test',
'repo': 'https://github.com/tianocore/edk2-test.git',
'list': 'devel@edk2.groups.io', 'prefix': 'edk2-test'}
]
# The minimum version required for all of the below options to work
MIN_GIT_VERSION = (1, 9, 0)
# Set of options to be set identically for all repositories
OPTIONS = [
{'section': 'am', 'option': 'keepcr', 'value': True},
{'section': 'am', 'option': 'signoff', 'value': True},
{'section': 'cherry-pick', 'option': 'signoff', 'value': True},
{'section': 'color', 'option': 'diff', 'value': True},
{'section': 'color', 'option': 'grep', 'value': 'auto'},
{'section': 'commit', 'option': 'signoff', 'value': True},
{'section': 'core', 'option': 'abbrev', 'value': 12},
{'section': 'core', 'option': 'attributesFile',
'value': os.path.join(CONFDIR, 'gitattributes').replace('\\', '/')},
{'section': 'core', 'option': 'whitespace', 'value': 'cr-at-eol'},
{'section': 'diff', 'option': 'algorithm', 'value': 'patience'},
{'section': 'diff', 'option': 'orderFile',
'value': os.path.join(CONFDIR, 'diff.order').replace('\\', '/')},
{'section': 'diff', 'option': 'renames', 'value': 'copies'},
{'section': 'diff', 'option': 'statGraphWidth', 'value': '20'},
{'section': 'diff "ini"', 'option': 'xfuncname',
'value': '^\\\\[[A-Za-z0-9_., ]+]'},
{'section': 'format', 'option': 'coverLetter', 'value': True},
{'section': 'format', 'option': 'numbered', 'value': True},
{'section': 'format', 'option': 'signoff', 'value': False},
{'section': 'log', 'option': 'mailmap', 'value': True},
{'section': 'notes', 'option': 'rewriteRef', 'value': 'refs/notes/commits'},
{'section': 'sendemail', 'option': 'chainreplyto', 'value': False},
{'section': 'sendemail', 'option': 'thread', 'value': True},
{'section': 'sendemail', 'option': 'transferEncoding', 'value': '8bit'},
]
def locate_repo():
"""Opens a Repo object for the current tree, searching upwards in the directory hierarchy."""
try:
repo = git.Repo(path='.', search_parent_directories=True)
except (git.InvalidGitRepositoryError, git.NoSuchPathError):
print("It doesn't look like we're inside a git repository - aborting.")
sys.exit(2)
return repo
def fuzzy_match_repo_url(one, other):
"""Compares two repository URLs, ignoring protocol and optional trailing '.git'."""
oneresult = re.match(r'.*://(?P<oneresult>.*?)(\.git)*$', one)
otherresult = re.match(r'.*://(?P<otherresult>.*?)(\.git)*$', other)
if oneresult and otherresult:
onestring = oneresult.group('oneresult')
otherstring = otherresult.group('otherresult')
if onestring == otherstring:
return True
return False
def get_upstream(url, name):
"""Extracts the dict for the current repo origin."""
for upstream in UPSTREAMS:
if (fuzzy_match_repo_url(upstream['repo'], url) or
upstream['name'] == name):
return upstream
print("Unknown upstream '%s' - aborting!" % url)
sys.exit(3)
def check_versions():
"""Checks versions of dependencies."""
version = git.cmd.Git().version_info
if version < MIN_GIT_VERSION:
print('Need git version %d.%d or later!' % (version[0], version[1]))
sys.exit(4)
def write_config_value(repo, section, option, data):
"""."""
with repo.config_writer(config_level='repository') as configwriter:
configwriter.set_value(section, option, data)
if __name__ == '__main__':
check_versions()
PARSER = argparse.ArgumentParser(
description='Sets up a git repository according to TianoCore rules.')
PARSER.add_argument('-c', '--check',
help='check current config only, printing what would be changed',
action='store_true',
required=False)
PARSER.add_argument('-f', '--force',
help='overwrite existing settings conflicting with program defaults',
action='store_true',
required=False)
PARSER.add_argument('-n', '--name', type=str, metavar='repo',
choices=['edk2', 'edk2-platforms', 'edk2-non-osi'],
help='set the repo name to configure for, if not '
'detected automatically',
required=False)
PARSER.add_argument('-v', '--verbose',
help='enable more detailed output',
action='store_true',
required=False)
ARGS = PARSER.parse_args()
REPO = locate_repo()
if REPO.bare:
print('Bare repo - please check out an upstream one!')
sys.exit(6)
URL = REPO.remotes.origin.url
UPSTREAM = get_upstream(URL, ARGS.name)
if not UPSTREAM:
print("Upstream '%s' unknown, aborting!" % URL)
sys.exit(7)
# Set a list email address if our upstream wants it
if 'list' in UPSTREAM:
OPTIONS.append({'section': 'sendemail', 'option': 'to',
'value': UPSTREAM['list']})
# Append a subject prefix entry to OPTIONS if our upstream wants it
if 'prefix' in UPSTREAM:
OPTIONS.append({'section': 'format', 'option': 'subjectPrefix',
'value': "PATCH " + UPSTREAM['prefix']})
CONFIG = REPO.config_reader(config_level='repository')
for entry in OPTIONS:
exists = False
try:
# Make sure to read boolean/int settings as real type rather than strings
if isinstance(entry['value'], bool):
value = CONFIG.getboolean(entry['section'], entry['option'])
elif isinstance(entry['value'], int):
value = CONFIG.getint(entry['section'], entry['option'])
else:
value = CONFIG.get(entry['section'], entry['option'])
exists = True
# Don't bail out from options not already being set
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
if exists:
if value == entry['value']:
if ARGS.verbose:
print("%s.%s already set (to '%s')" % (entry['section'],
entry['option'], value))
else:
if ARGS.force:
write_config_value(REPO, entry['section'], entry['option'], entry['value'])
else:
print("Not overwriting existing %s.%s value:" % (entry['section'],
entry['option']))
print(" '%s' != '%s'" % (value, entry['value']))
print(" add '-f' to command line to force overwriting existing settings")
else:
print("%s.%s => '%s'" % (entry['section'], entry['option'], entry['value']))
if not ARGS.check:
write_config_value(REPO, entry['section'], entry['option'], entry['value'])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/SetupGit.py
|
## @file
# Run a makefile as part of a PREBUILD or POSTBUILD action.
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
RunMakefile.py
'''
import os
import sys
import argparse
import subprocess
#
# Globals for help information
#
__prog__ = 'RunMakefile'
__version__ = '%s Version %s' % (__prog__, '1.0')
__copyright__ = 'Copyright (c) 2017, Intel Corporation. All rights reserved.'
__description__ = 'Run a makefile as part of a PREBUILD or POSTBUILD action.\n'
#
# Globals
#
gArgs = None
def Log(Message):
if not gArgs.Verbose:
return
sys.stdout.write (__prog__ + ': ' + Message + '\n')
def Error(Message, ExitValue=1):
sys.stderr.write (__prog__ + ': ERROR: ' + Message + '\n')
sys.exit (ExitValue)
def RelativePath(target):
return os.path.relpath (target, gWorkspace)
def NormalizePath(target):
if isinstance(target, tuple):
return os.path.normpath (os.path.join (*target))
else:
return os.path.normpath (target)
if __name__ == '__main__':
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser (
prog = __prog__,
version = __version__,
description = __description__ + __copyright__,
conflict_handler = 'resolve'
)
parser.add_argument (
'-a', '--arch', dest = 'Arch', nargs = '+', action = 'append',
required = True,
help = '''ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC,
which overrides target.txt's TARGET_ARCH definition. To
specify more archs, please repeat this option.'''
)
parser.add_argument (
'-t', '--tagname', dest = 'ToolChain', required = True,
help = '''Using the Tool Chain Tagname to build the platform,
overriding target.txt's TOOL_CHAIN_TAG definition.'''
)
parser.add_argument (
'-p', '--platform', dest = 'PlatformFile', required = True,
help = '''Build the platform specified by the DSC file name argument,
overriding target.txt's ACTIVE_PLATFORM definition.'''
)
parser.add_argument (
'-b', '--buildtarget', dest = 'BuildTarget', required = True,
help = '''Using the TARGET to build the platform, overriding
target.txt's TARGET definition.'''
)
parser.add_argument (
'--conf=', dest = 'ConfDirectory', required = True,
help = '''Specify the customized Conf directory.'''
)
parser.add_argument (
'-D', '--define', dest = 'Define', nargs='*', action = 'append',
help = '''Macro: "Name [= Value]".'''
)
parser.add_argument (
'--makefile', dest = 'Makefile', required = True,
help = '''Makefile to run passing in arguments as makefile defines.'''
)
parser.add_argument (
'-v', '--verbose', dest = 'Verbose', action = 'store_true',
help = '''Turn on verbose output with informational messages printed'''
)
#
# Parse command line arguments
#
gArgs, remaining = parser.parse_known_args()
gArgs.BuildType = 'all'
for BuildType in ['all', 'fds', 'genc', 'genmake', 'clean', 'cleanall', 'modules', 'libraries', 'run']:
if BuildType in remaining:
gArgs.BuildType = BuildType
remaining.remove(BuildType)
break
gArgs.Remaining = ' '.join(remaining)
#
# Start
#
Log ('Start')
#
# Find makefile in WORKSPACE or PACKAGES_PATH
#
PathList = ['']
try:
PathList.append(os.environ['WORKSPACE'])
except:
Error ('WORKSPACE environment variable not set')
try:
PathList += os.environ['PACKAGES_PATH'].split(os.pathsep)
except:
pass
for Path in PathList:
Makefile = NormalizePath((Path, gArgs.Makefile))
if os.path.exists (Makefile):
break
if not os.path.exists(Makefile):
Error ('makefile %s not found' % (gArgs.Makefile))
#
# Build command line arguments converting build arguments to makefile defines
#
CommandLine = [Makefile]
CommandLine.append('TARGET_ARCH="%s"' % (' '.join([Item[0] for Item in gArgs.Arch])))
CommandLine.append('TOOL_CHAIN_TAG="%s"' % (gArgs.ToolChain))
CommandLine.append('TARGET="%s"' % (gArgs.BuildTarget))
CommandLine.append('ACTIVE_PLATFORM="%s"' % (gArgs.PlatformFile))
CommandLine.append('CONF_DIRECTORY="%s"' % (gArgs.ConfDirectory))
if gArgs.Define:
for Item in gArgs.Define:
if '=' not in Item[0]:
continue
Item = Item[0].split('=', 1)
CommandLine.append('%s="%s"' % (Item[0], Item[1]))
CommandLine.append('EXTRA_FLAGS="%s"' % (gArgs.Remaining))
CommandLine.append(gArgs.BuildType)
if sys.platform == "win32":
CommandLine = 'nmake /f %s' % (' '.join(CommandLine))
else:
CommandLine = 'make -f %s' % (' '.join(CommandLine))
#
# Run the makefile
#
try:
Process = subprocess.Popen(CommandLine, shell=True)
except:
Error ('make command not available. Please verify PATH')
Process.communicate()
#
# Done
#
Log ('Done')
#
# Return status from running the makefile
#
sys.exit(Process.returncode)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/RunMakefile.py
|
# @file FormatDosFiles.py
# This script format the source files to follow dos style.
# It supports Python2.x and Python3.x both.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
# Import Modules
#
from __future__ import print_function
import argparse
import os
import os.path
import re
import sys
import copy
__prog__ = 'FormatDosFiles'
__version__ = '%s Version %s' % (__prog__, '0.10 ')
__copyright__ = 'Copyright (c) 2018-2019, Intel Corporation. All rights reserved.'
__description__ = 'Convert source files to meet the EDKII C Coding Standards Specification.\n'
DEFAULT_EXT_LIST = ['.h', '.c', '.nasm', '.nasmb', '.asm', '.S', '.inf', '.dec', '.dsc', '.fdf', '.uni', '.asl', '.aslc', '.vfr', '.idf', '.txt', '.bat', '.py']
#For working in python2 and python3 environment, re pattern should use binary string, which is bytes type in python3.
#Because in python3,read from file in binary mode will return bytes type,and in python3 bytes type can not be mixed with str type.
def FormatFile(FilePath, Args):
with open(FilePath, 'rb') as Fd:
Content = Fd.read()
# Convert the line endings to CRLF
Content = re.sub(br'([^\r])\n', br'\1\r\n', Content)
Content = re.sub(br'^\n', br'\r\n', Content, flags=re.MULTILINE)
# Add a new empty line if the file is not end with one
Content = re.sub(br'([^\r\n])$', br'\1\r\n', Content)
# Remove trailing white spaces
Content = re.sub(br'[ \t]+(\r\n)', br'\1', Content, flags=re.MULTILINE)
# Replace '\t' with two spaces
Content = re.sub(b'\t', b' ', Content)
with open(FilePath, 'wb') as Fd:
Fd.write(Content)
if not Args.Quiet:
print(FilePath)
def FormatFilesInDir(DirPath, ExtList, Args):
FileList = []
ExcludeDir = DirPath
for DirPath, DirNames, FileNames in os.walk(DirPath):
if Args.Exclude:
DirNames[:] = [d for d in DirNames if d not in Args.Exclude]
FileNames[:] = [f for f in FileNames if f not in Args.Exclude]
Continue = False
for Path in Args.Exclude:
Path = Path.strip('\\').strip('/')
if not os.path.isdir(Path) and not os.path.isfile(Path):
Path = os.path.join(ExcludeDir, Path)
if os.path.isdir(Path) and Path.endswith(DirPath):
DirNames[:] = []
Continue = True
elif os.path.isfile(Path):
FilePaths = FileNames
for ItemPath in FilePaths:
FilePath = os.path.join(DirPath, ItemPath)
if Path.endswith(FilePath):
FileNames.remove(ItemPath)
if Continue:
continue
for FileName in [f for f in FileNames if any(f.endswith(ext) for ext in ExtList)]:
FileList.append(os.path.join(DirPath, FileName))
for File in FileList:
FormatFile(File, Args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler = 'resolve')
parser.add_argument('Path', nargs='+',
help='the path for files to be converted.It could be directory or file path.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--append-extensions', dest='AppendExt', nargs='+',
help='append file extensions filter to default extensions. (Example: .txt .c .h)')
parser.add_argument('--override-extensions', dest='OverrideExt', nargs='+',
help='override file extensions filter on default extensions. (Example: .txt .c .h)')
parser.add_argument('-v', '--verbose', dest='Verbose', action='store_true',
help='increase output messages')
parser.add_argument('-q', '--quiet', dest='Quiet', action='store_true',
help='reduce output messages')
parser.add_argument('--debug', dest='Debug', type=int, metavar='[0-9]', choices=range(0, 10), default=0,
help='set debug level')
parser.add_argument('--exclude', dest='Exclude', nargs='+', help="directory name or file name which will be excluded")
args = parser.parse_args()
DefaultExt = copy.copy(DEFAULT_EXT_LIST)
if args.OverrideExt is not None:
DefaultExt = args.OverrideExt
if args.AppendExt is not None:
DefaultExt = list(set(DefaultExt + args.AppendExt))
for Path in args.Path:
if not os.path.exists(Path):
print("not exists path: {0}".format(Path))
sys.exit(1)
if os.path.isdir(Path):
FormatFilesInDir(Path, DefaultExt, args)
elif os.path.isfile(Path):
FormatFile(Path, args)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/FormatDosFiles.py
|
## @file
# Update build revisions of the tools when performing a developer build
#
# This script will modife the C/Include/Common/BuildVersion.h file and the two
# Python scripts, Python/Common/BuildVersion.py and Python/UPT/BuildVersion.py.
# If SVN is available, the tool will obtain the current checked out version of
# the source tree for including the --version commands.
# Copyright (c) 2014 - 2015, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
""" This program will update the BuildVersion.py and BuildVersion.h files used to set a tool's version value """
from __future__ import absolute_import
import os
import shlex
import subprocess
import sys
from argparse import ArgumentParser, SUPPRESS
from tempfile import NamedTemporaryFile
from types import IntType, ListType
SYS_ENV_ERR = "ERROR : %s system environment variable must be set prior to running this tool.\n"
__execname__ = "UpdateBuildVersions.py"
SVN_REVISION = "$LastChangedRevision: 3 $"
SVN_REVISION = SVN_REVISION.replace("$LastChangedRevision:", "").replace("$", "").strip()
__copyright__ = "Copyright (c) 2014, Intel Corporation. All rights reserved."
VERSION_NUMBER = "0.7.0"
__version__ = "Version %s.%s" % (VERSION_NUMBER, SVN_REVISION)
def ParseOptions():
"""
Parse the command-line options.
The options for this tool will be passed along to the MkBinPkg tool.
"""
parser = ArgumentParser(
usage=("%s [options]" % __execname__),
description=__copyright__,
conflict_handler='resolve')
# Standard Tool Options
parser.add_argument("--version", action="version",
version=__execname__ + " " + __version__)
parser.add_argument("-s", "--silent", action="store_true",
dest="silent",
help="All output will be disabled, pass/fail determined by the exit code")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose",
help="Enable verbose output")
# Tool specific options
parser.add_argument("--revert", action="store_true",
dest="REVERT", default=False,
help="Revert the BuildVersion files only")
parser.add_argument("--svn-test", action="store_true",
dest="TEST_SVN", default=False,
help="Test if the svn command is available")
parser.add_argument("--svnFlag", action="store_true",
dest="HAVE_SVN", default=False,
help=SUPPRESS)
return(parser.parse_args())
def ShellCommandResults(CmdLine, Opt):
""" Execute the command, returning the output content """
file_list = NamedTemporaryFile(delete=False)
filename = file_list.name
Results = []
returnValue = 0
try:
subprocess.check_call(args=shlex.split(CmdLine), stderr=subprocess.STDOUT, stdout=file_list)
except subprocess.CalledProcessError as err_val:
file_list.close()
if not Opt.silent:
sys.stderr.write("ERROR : %d : %s\n" % (err_val.returncode, err_val.__str__()))
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = err_val.returncode
except IOError as err_val:
(errno, strerror) = err_val.args
file_list.close()
if not Opt.silent:
sys.stderr.write("I/O ERROR : %s : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = errno
except OSError as err_val:
(errno, strerror) = err_val.args
file_list.close()
if not Opt.silent:
sys.stderr.write("OS ERROR : %s : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = errno
except KeyboardInterrupt:
file_list.close()
if not Opt.silent:
sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine)
if os.path.exists(filename):
sys.stderr.write(" : Partial results may be in this file: %s\n" % filename)
sys.stderr.flush()
returnValue = 1
finally:
if not file_list.closed:
file_list.flush()
os.fsync(file_list.fileno())
file_list.close()
if os.path.exists(filename):
fd_ = open(filename, 'r')
Results = fd_.readlines()
fd_.close()
os.unlink(filename)
if returnValue > 0:
return returnValue
return Results
def UpdateBuildVersionPython(Rev, UserModified, opts):
""" This routine will update the BuildVersion.h files in the C source tree """
for SubDir in ["Common", "UPT"]:
PyPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir)
BuildVersionPy = os.path.join(PyPath, "BuildVersion.py")
fd_ = open(os.path.normpath(BuildVersionPy), 'r')
contents = fd_.readlines()
fd_.close()
if opts.HAVE_SVN is False:
BuildVersionOrig = os.path.join(PyPath, "orig_BuildVersion.py")
fd_ = open (BuildVersionOrig, 'w')
for line in contents:
fd_.write(line)
fd_.flush()
fd_.close()
new_content = []
for line in contents:
if line.strip().startswith("gBUILD_VERSION"):
new_line = "gBUILD_VERSION = \"Developer Build based on Revision: %s\"" % Rev
if UserModified:
new_line = "gBUILD_VERSION = \"Developer Build based on Revision: %s with Modified Sources\"" % Rev
new_content.append(new_line)
continue
new_content.append(line)
fd_ = open(os.path.normpath(BuildVersionPy), 'w')
for line in new_content:
fd_.write(line)
fd_.close()
def UpdateBuildVersionH(Rev, UserModified, opts):
""" This routine will update the BuildVersion.h files in the C source tree """
CPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common")
BuildVersionH = os.path.join(CPath, "BuildVersion.h")
fd_ = open(os.path.normpath(BuildVersionH), 'r')
contents = fd_.readlines()
fd_.close()
if opts.HAVE_SVN is False:
BuildVersionOrig = os.path.join(CPath, "orig_BuildVersion.h")
fd_ = open(BuildVersionOrig, 'w')
for line in contents:
fd_.write(line)
fd_.flush()
fd_.close()
new_content = []
for line in contents:
if line.strip().startswith("#define"):
new_line = "#define __BUILD_VERSION \"Developer Build based on Revision: %s\"" % Rev
if UserModified:
new_line = "#define __BUILD_VERSION \"Developer Build based on Revision: %s with Modified Sources\"" % \
Rev
new_content.append(new_line)
continue
new_content.append(line)
fd_ = open(os.path.normpath(BuildVersionH), 'w')
for line in new_content:
fd_.write(line)
fd_.close()
def RevertCmd(Filename, Opt):
""" This is the shell command that does the SVN revert """
CmdLine = "svn revert %s" % Filename.replace("\\", "/").strip()
try:
subprocess.check_output(args=shlex.split(CmdLine))
except subprocess.CalledProcessError as err_val:
if not Opt.silent:
sys.stderr.write("Subprocess ERROR : %s\n" % err_val)
sys.stderr.flush()
except IOError as err_val:
(errno, strerror) = err_val.args
if not Opt.silent:
sys.stderr.write("I/O ERROR : %d : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
sys.stderr.flush()
except OSError as err_val:
(errno, strerror) = err_val.args
if not Opt.silent:
sys.stderr.write("OS ERROR : %d : %s\n" % (str(errno), strerror))
sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine)
sys.stderr.flush()
except KeyboardInterrupt:
if not Opt.silent:
sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine)
sys.stderr.flush()
if Opt.verbose:
sys.stdout.write("Reverted this file: %s\n" % Filename)
sys.stdout.flush()
def GetSvnRevision(opts):
""" Get the current revision of the BaseTools/Source tree, and check if any of the files have been modified """
Revision = "Unknown"
Modified = False
if opts.HAVE_SVN is False:
sys.stderr.write("WARNING: the svn command-line tool is not available.\n")
return (Revision, Modified)
SrcPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source")
# Check if there are modified files.
Cwd = os.getcwd()
os.chdir(SrcPath)
StatusCmd = "svn st -v --depth infinity --non-interactive"
contents = ShellCommandResults(StatusCmd, opts)
os.chdir(Cwd)
if isinstance(contents, ListType):
for line in contents:
if line.startswith("M "):
Modified = True
break
# Get the repository revision of BaseTools/Source
InfoCmd = "svn info %s" % SrcPath.replace("\\", "/").strip()
Revision = 0
contents = ShellCommandResults(InfoCmd, opts)
if isinstance(contents, IntType):
return 0, Modified
for line in contents:
line = line.strip()
if line.startswith("Revision:"):
Revision = line.replace("Revision:", "").strip()
break
return (Revision, Modified)
def CheckSvn(opts):
"""
This routine will return True if an svn --version command succeeds, or False if it fails.
If it failed, SVN is not available.
"""
OriginalSilent = opts.silent
opts.silent = True
VerCmd = "svn --version"
contents = ShellCommandResults(VerCmd, opts)
opts.silent = OriginalSilent
if isinstance(contents, IntType):
if opts.verbose:
sys.stdout.write("SVN does not appear to be available.\n")
sys.stdout.flush()
return False
if opts.verbose:
sys.stdout.write("Found %s" % contents[0])
sys.stdout.flush()
return True
def CopyOrig(Src, Dest, Opt):
""" Overwrite the Dest File with the Src File content """
try:
fd_ = open(Src, 'r')
contents = fd_.readlines()
fd_.close()
fd_ = open(Dest, 'w')
for line in contents:
fd_.write(line)
fd_.flush()
fd_.close()
except IOError:
if not Opt.silent:
sys.stderr.write("Unable to restore this file: %s\n" % Dest)
sys.stderr.flush()
return 1
os.remove(Src)
if Opt.verbose:
sys.stdout.write("Restored this file: %s\n" % Src)
sys.stdout.flush()
return 0
def CheckOriginals(Opts):
"""
If SVN was not available, then the tools may have made copies of the original BuildVersion.* files using
orig_BuildVersion.* for the name. If they exist, replace the existing BuildVersion.* file with the corresponding
orig_BuildVersion.* file.
Returns 0 if this succeeds, or 1 if the copy function fails. It will also return 0 if the orig_BuildVersion.* file
does not exist.
"""
CPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common")
BuildVersionH = os.path.join(CPath, "BuildVersion.h")
OrigBuildVersionH = os.path.join(CPath, "orig_BuildVersion.h")
if not os.path.exists(OrigBuildVersionH):
return 0
if CopyOrig(OrigBuildVersionH, BuildVersionH, Opts):
return 1
for SubDir in ["Common", "UPT"]:
PyPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir)
BuildVersionPy = os.path.join(PyPath, "BuildVersion.h")
OrigBuildVersionPy = os.path.join(PyPath, "orig_BuildVersion.h")
if not os.path.exists(OrigBuildVersionPy):
return 0
if CopyOrig(OrigBuildVersionPy, BuildVersionPy, Opts):
return 1
return 0
def RevertBuildVersionFiles(opts):
"""
This routine will attempt to perform an SVN --revert on each of the BuildVersion.* files
"""
if not opts.HAVE_SVN:
if CheckOriginals(opts):
return 1
return 0
# SVN is available
BuildVersionH = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common", "BuildVersion.h")
RevertCmd(BuildVersionH, opts)
for SubDir in ["Common", "UPT"]:
BuildVersionPy = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir, "BuildVersion.py")
RevertCmd(BuildVersionPy, opts)
def UpdateRevisionFiles():
""" Main routine that will update the BuildVersion.py and BuildVersion.h files."""
options = ParseOptions()
# Check the working environment
if "WORKSPACE" not in os.environ.keys():
sys.stderr.write(SYS_ENV_ERR % 'WORKSPACE')
return 1
if 'BASE_TOOLS_PATH' not in os.environ.keys():
sys.stderr.write(SYS_ENV_ERR % 'BASE_TOOLS_PATH')
return 1
if not os.path.exists(os.environ['BASE_TOOLS_PATH']):
sys.stderr.write("Unable to locate the %s directory." % os.environ['BASE_TOOLS_PATH'])
return 1
options.HAVE_SVN = CheckSvn(options)
if options.TEST_SVN:
return (not options.HAVE_SVN)
# done processing the option, now use the option.HAVE_SVN as a flag. True = Have it, False = Don't have it.
if options.REVERT:
# Just revert the tools an exit
RevertBuildVersionFiles(options)
else:
# Revert any changes in the BuildVersion.* files before setting them again.
RevertBuildVersionFiles(options)
Revision, Modified = GetSvnRevision(options)
if options.verbose:
sys.stdout.write("Revision: %s is Modified: %s\n" % (Revision, Modified))
sys.stdout.flush()
UpdateBuildVersionH(Revision, Modified, options)
UpdateBuildVersionPython(Revision, Modified, options)
return 0
if __name__ == "__main__":
sys.exit(UpdateRevisionFiles())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/UpdateBuildVersions.py
|
## @file
# Check a patch for various format issues
#
# Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (C) 2020, Red Hat, Inc.<BR>
# Copyright (c) 2020, ARM Ltd. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
VersionNumber = '0.1'
__copyright__ = "Copyright (c) 2015 - 2016, Intel Corporation All rights reserved."
import email
import argparse
import os
import re
import subprocess
import sys
import email.header
class Verbose:
SILENT, ONELINE, NORMAL = range(3)
level = NORMAL
class EmailAddressCheck:
"""Checks an email address."""
def __init__(self, email, description):
self.ok = True
if email is None:
self.error('Email address is missing!')
return
if description is None:
self.error('Email description is missing!')
return
self.description = "'" + description + "'"
self.check_email_address(email)
def error(self, *err):
if self.ok and Verbose.level > Verbose.ONELINE:
print('The ' + self.description + ' email address is not valid:')
self.ok = False
if Verbose.level < Verbose.NORMAL:
return
count = 0
for line in err:
prefix = (' *', ' ')[count > 0]
print(prefix, line)
count += 1
email_re1 = re.compile(r'(?:\s*)(.*?)(\s*)<(.+)>\s*$',
re.MULTILINE|re.IGNORECASE)
def check_email_address(self, email):
email = email.strip()
mo = self.email_re1.match(email)
if mo is None:
self.error("Email format is invalid: " + email.strip())
return
name = mo.group(1).strip()
if name == '':
self.error("Name is not provided with email address: " +
email)
else:
quoted = len(name) > 2 and name[0] == '"' and name[-1] == '"'
if name.find(',') >= 0 and not quoted:
self.error('Add quotes (") around name with a comma: ' +
name)
if mo.group(2) == '':
self.error("There should be a space between the name and " +
"email address: " + email)
if mo.group(3).find(' ') >= 0:
self.error("The email address cannot contain a space: " +
mo.group(3))
if ' via Groups.Io' in name and mo.group(3).endswith('@groups.io'):
self.error("Email rewritten by lists DMARC / DKIM / SPF: " +
email)
class CommitMessageCheck:
"""Checks the contents of a git commit message."""
def __init__(self, subject, message, author_email):
self.ok = True
if subject is None and message is None:
self.error('Commit message is missing!')
return
MergifyMerge = False
if "mergify[bot]@users.noreply.github.com" in author_email:
if "Merge branch" in subject:
MergifyMerge = True
self.subject = subject
self.msg = message
print (subject)
self.check_contributed_under()
if not MergifyMerge:
self.check_signed_off_by()
self.check_misc_signatures()
self.check_overall_format()
self.report_message_result()
url = 'https://github.com/tianocore/tianocore.github.io/wiki/Commit-Message-Format'
def report_message_result(self):
if Verbose.level < Verbose.NORMAL:
return
if self.ok:
# All checks passed
return_code = 0
print('The commit message format passed all checks.')
else:
return_code = 1
if not self.ok:
print(self.url)
def error(self, *err):
if self.ok and Verbose.level > Verbose.ONELINE:
print('The commit message format is not valid:')
self.ok = False
if Verbose.level < Verbose.NORMAL:
return
count = 0
for line in err:
prefix = (' *', ' ')[count > 0]
print(prefix, line)
count += 1
# Find 'contributed-under:' at the start of a line ignoring case and
# requires ':' to be present. Matches if there is white space before
# the tag or between the tag and the ':'.
contributed_under_re = \
re.compile(r'^\s*contributed-under\s*:', re.MULTILINE|re.IGNORECASE)
def check_contributed_under(self):
match = self.contributed_under_re.search(self.msg)
if match is not None:
self.error('Contributed-under! (Note: this must be ' +
'removed by the code contributor!)')
@staticmethod
def make_signature_re(sig, re_input=False):
if re_input:
sub_re = sig
else:
sub_re = sig.replace('-', r'[-\s]+')
re_str = (r'^(?P<tag>' + sub_re +
r')(\s*):(\s*)(?P<value>\S.*?)(?:\s*)$')
try:
return re.compile(re_str, re.MULTILINE|re.IGNORECASE)
except Exception:
print("Tried to compile re:", re_str)
raise
sig_block_re = \
re.compile(r'''^
(?: (?P<tag>[^:]+) \s* : \s*
(?P<value>\S.*?) )
|
(?: \[ (?P<updater>[^:]+) \s* : \s*
(?P<note>.+?) \s* \] )
\s* $''',
re.VERBOSE | re.MULTILINE)
def find_signatures(self, sig):
if not sig.endswith('-by') and sig != 'Cc':
sig += '-by'
regex = self.make_signature_re(sig)
sigs = regex.findall(self.msg)
bad_case_sigs = filter(lambda m: m[0] != sig, sigs)
for s in bad_case_sigs:
self.error("'" +s[0] + "' should be '" + sig + "'")
for s in sigs:
if s[1] != '':
self.error('There should be no spaces between ' + sig +
" and the ':'")
if s[2] != ' ':
self.error("There should be a space after '" + sig + ":'")
EmailAddressCheck(s[3], sig)
return sigs
def check_signed_off_by(self):
sob='Signed-off-by'
if self.msg.find(sob) < 0:
self.error('Missing Signed-off-by! (Note: this must be ' +
'added by the code contributor!)')
return
sobs = self.find_signatures('Signed-off')
if len(sobs) == 0:
self.error('Invalid Signed-off-by format!')
return
sig_types = (
'Reviewed',
'Reported',
'Tested',
'Suggested',
'Acked',
'Cc'
)
def check_misc_signatures(self):
for sig in self.sig_types:
self.find_signatures(sig)
cve_re = re.compile('CVE-[0-9]{4}-[0-9]{5}[^0-9]')
def check_overall_format(self):
lines = self.msg.splitlines()
if len(lines) >= 1 and lines[0].endswith('\r\n'):
empty_line = '\r\n'
else:
empty_line = '\n'
lines.insert(0, empty_line)
lines.insert(0, self.subject + empty_line)
count = len(lines)
if count <= 0:
self.error('Empty commit message!')
return
if count >= 1 and re.search(self.cve_re, lines[0]):
#
# If CVE-xxxx-xxxxx is present in subject line, then limit length of
# subject line to 92 characters
#
if len(lines[0].rstrip()) >= 93:
self.error(
'First line of commit message (subject line) is too long (%d >= 93).' %
(len(lines[0].rstrip()))
)
else:
#
# If CVE-xxxx-xxxxx is not present in subject line, then limit
# length of subject line to 75 characters
#
if len(lines[0].rstrip()) >= 76:
self.error(
'First line of commit message (subject line) is too long (%d >= 76).' %
(len(lines[0].rstrip()))
)
if count >= 1 and len(lines[0].strip()) == 0:
self.error('First line of commit message (subject line) ' +
'is empty.')
if count >= 2 and lines[1].strip() != '':
self.error('Second line of commit message should be ' +
'empty.')
for i in range(2, count):
if (len(lines[i]) >= 76 and
len(lines[i].split()) > 1 and
not lines[i].startswith('git-svn-id:') and
not lines[i].startswith('Reviewed-by') and
not lines[i].startswith('Acked-by:') and
not lines[i].startswith('Tested-by:') and
not lines[i].startswith('Reported-by:') and
not lines[i].startswith('Suggested-by:') and
not lines[i].startswith('Signed-off-by:') and
not lines[i].startswith('Cc:')):
#
# Print a warning if body line is longer than 75 characters
#
print(
'WARNING - Line %d of commit message is too long (%d >= 76).' %
(i + 1, len(lines[i]))
)
print(lines[i])
last_sig_line = None
for i in range(count - 1, 0, -1):
line = lines[i]
mo = self.sig_block_re.match(line)
if mo is None:
if line.strip() == '':
break
elif last_sig_line is not None:
err2 = 'Add empty line before "%s"?' % last_sig_line
self.error('The line before the signature block ' +
'should be empty', err2)
else:
self.error('The signature block was not found')
break
last_sig_line = line.strip()
(START, PRE_PATCH, PATCH) = range(3)
class GitDiffCheck:
"""Checks the contents of a git diff."""
def __init__(self, diff):
self.ok = True
self.format_ok = True
self.lines = diff.splitlines(True)
self.count = len(self.lines)
self.line_num = 0
self.state = START
self.new_bin = []
while self.line_num < self.count and self.format_ok:
line_num = self.line_num
self.run()
assert(self.line_num > line_num)
self.report_message_result()
def report_message_result(self):
if Verbose.level < Verbose.NORMAL:
return
if self.ok:
print('The code passed all checks.')
if self.new_bin:
print('\nWARNING - The following binary files will be added ' +
'into the repository:')
for binary in self.new_bin:
print(' ' + binary)
def run(self):
line = self.lines[self.line_num]
if self.state in (PRE_PATCH, PATCH):
if line.startswith('diff --git'):
self.state = START
if self.state == PATCH:
if line.startswith('@@ '):
self.state = PRE_PATCH
elif len(line) >= 1 and line[0] not in ' -+' and \
not line.startswith('\r\n') and \
not line.startswith(r'\ No newline ') and not self.binary:
for line in self.lines[self.line_num + 1:]:
if line.startswith('diff --git'):
self.format_error('diff found after end of patch')
break
self.line_num = self.count
return
if self.state == START:
if line.startswith('diff --git'):
self.state = PRE_PATCH
self.filename = line[13:].split(' ', 1)[0]
self.is_newfile = False
self.force_crlf = True
self.force_notabs = True
if self.filename.endswith('.sh') or \
self.filename.startswith('BaseTools/BinWrappers/PosixLike/') or \
self.filename.startswith('BaseTools/BinPipWrappers/PosixLike/') or \
self.filename == 'BaseTools/BuildEnv':
#
# Do not enforce CR/LF line endings for linux shell scripts.
# Some linux shell scripts don't end with the ".sh" extension,
# they are identified by their path.
#
self.force_crlf = False
if self.filename == '.gitmodules' or \
self.filename == 'BaseTools/Conf/diff.order':
#
# .gitmodules and diff orderfiles are used internally by git
# use tabs and LF line endings. Do not enforce no tabs and
# do not enforce CR/LF line endings.
#
self.force_crlf = False
self.force_notabs = False
if os.path.basename(self.filename) == 'GNUmakefile' or \
os.path.basename(self.filename) == 'Makefile':
self.force_notabs = False
elif len(line.rstrip()) != 0:
self.format_error("didn't find diff command")
self.line_num += 1
elif self.state == PRE_PATCH:
if line.startswith('@@ '):
self.state = PATCH
self.binary = False
elif line.startswith('GIT binary patch') or \
line.startswith('Binary files'):
self.state = PATCH
self.binary = True
if self.is_newfile:
self.new_bin.append(self.filename)
elif line.startswith('new file mode 160000'):
#
# New submodule. Do not enforce CR/LF line endings
#
self.force_crlf = False
else:
ok = False
self.is_newfile = self.newfile_prefix_re.match(line)
for pfx in self.pre_patch_prefixes:
if line.startswith(pfx):
ok = True
if not ok:
self.format_error("didn't find diff hunk marker (@@)")
self.line_num += 1
elif self.state == PATCH:
if self.binary:
pass
elif line.startswith('-'):
pass
elif line.startswith('+'):
self.check_added_line(line[1:])
elif line.startswith('\r\n'):
pass
elif line.startswith(r'\ No newline '):
pass
elif not line.startswith(' '):
self.format_error("unexpected patch line")
self.line_num += 1
pre_patch_prefixes = (
'--- ',
'+++ ',
'index ',
'new file ',
'deleted file ',
'old mode ',
'new mode ',
'similarity index ',
'copy from ',
'copy to ',
'rename ',
)
line_endings = ('\r\n', '\n\r', '\n', '\r')
newfile_prefix_re = \
re.compile(r'''^
index\ 0+\.\.
''',
re.VERBOSE)
def added_line_error(self, msg, line):
lines = [ msg ]
if self.filename is not None:
lines.append('File: ' + self.filename)
lines.append('Line: ' + line)
self.error(*lines)
old_debug_re = \
re.compile(r'''
DEBUG \s* \( \s* \( \s*
(?: DEBUG_[A-Z_]+ \s* \| \s*)*
EFI_D_ ([A-Z_]+)
''',
re.VERBOSE)
def check_added_line(self, line):
eol = ''
for an_eol in self.line_endings:
if line.endswith(an_eol):
eol = an_eol
line = line[:-len(eol)]
stripped = line.rstrip()
if self.force_crlf and eol != '\r\n' and (line.find('Subproject commit') == -1):
self.added_line_error('Line ending (%s) is not CRLF' % repr(eol),
line)
if self.force_notabs and '\t' in line:
self.added_line_error('Tab character used', line)
if len(stripped) < len(line):
self.added_line_error('Trailing whitespace found', line)
mo = self.old_debug_re.search(line)
if mo is not None:
self.added_line_error('EFI_D_' + mo.group(1) + ' was used, '
'but DEBUG_' + mo.group(1) +
' is now recommended', line)
split_diff_re = re.compile(r'''
(?P<cmd>
^ diff \s+ --git \s+ a/.+ \s+ b/.+ $
)
(?P<index>
^ index \s+ .+ $
)
''',
re.IGNORECASE | re.VERBOSE | re.MULTILINE)
def format_error(self, err):
self.format_ok = False
err = 'Patch format error: ' + err
err2 = 'Line: ' + self.lines[self.line_num].rstrip()
self.error(err, err2)
def error(self, *err):
if self.ok and Verbose.level > Verbose.ONELINE:
print('Code format is not valid:')
self.ok = False
if Verbose.level < Verbose.NORMAL:
return
count = 0
for line in err:
prefix = (' *', ' ')[count > 0]
print(prefix, line)
count += 1
class CheckOnePatch:
"""Checks the contents of a git email formatted patch.
Various checks are performed on both the commit message and the
patch content.
"""
def __init__(self, name, patch):
self.patch = patch
self.find_patch_pieces()
email_check = EmailAddressCheck(self.author_email, 'Author')
email_ok = email_check.ok
msg_check = CommitMessageCheck(self.commit_subject, self.commit_msg, self.author_email)
msg_ok = msg_check.ok
diff_ok = True
if self.diff is not None:
diff_check = GitDiffCheck(self.diff)
diff_ok = diff_check.ok
self.ok = email_ok and msg_ok and diff_ok
if Verbose.level == Verbose.ONELINE:
if self.ok:
result = 'ok'
else:
result = list()
if not msg_ok:
result.append('commit message')
if not diff_ok:
result.append('diff content')
result = 'bad ' + ' and '.join(result)
print(name, result)
git_diff_re = re.compile(r'''
^ diff \s+ --git \s+ a/.+ \s+ b/.+ $
''',
re.IGNORECASE | re.VERBOSE | re.MULTILINE)
stat_re = \
re.compile(r'''
(?P<commit_message> [\s\S\r\n]* )
(?P<stat>
^ --- $ [\r\n]+
(?: ^ \s+ .+ \s+ \| \s+ \d+ \s+ \+* \-*
$ [\r\n]+ )+
[\s\S\r\n]+
)
''',
re.IGNORECASE | re.VERBOSE | re.MULTILINE)
subject_prefix_re = \
re.compile(r'''^
\s* (\[
[^\[\]]* # Allow all non-brackets
\])* \s*
''',
re.VERBOSE)
def find_patch_pieces(self):
if sys.version_info < (3, 0):
patch = self.patch.encode('ascii', 'ignore')
else:
patch = self.patch
self.commit_msg = None
self.stat = None
self.commit_subject = None
self.commit_prefix = None
self.diff = None
if patch.startswith('diff --git'):
self.diff = patch
return
pmail = email.message_from_string(patch)
parts = list(pmail.walk())
assert(len(parts) == 1)
assert(parts[0].get_content_type() == 'text/plain')
content = parts[0].get_payload(decode=True).decode('utf-8', 'ignore')
mo = self.git_diff_re.search(content)
if mo is not None:
self.diff = content[mo.start():]
content = content[:mo.start()]
mo = self.stat_re.search(content)
if mo is None:
self.commit_msg = content
else:
self.stat = mo.group('stat')
self.commit_msg = mo.group('commit_message')
#
# Parse subject line from email header. The subject line may be
# composed of multiple parts with different encodings. Decode and
# combine all the parts to produce a single string with the contents of
# the decoded subject line.
#
parts = email.header.decode_header(pmail.get('subject'))
subject = ''
for (part, encoding) in parts:
if encoding:
part = part.decode(encoding)
else:
try:
part = part.decode()
except:
pass
subject = subject + part
self.commit_subject = subject.replace('\r\n', '')
self.commit_subject = self.commit_subject.replace('\n', '')
self.commit_subject = self.subject_prefix_re.sub('', self.commit_subject, 1)
self.author_email = pmail['from']
class CheckGitCommits:
"""Reads patches from git based on the specified git revision range.
The patches are read from git, and then checked.
"""
def __init__(self, rev_spec, max_count):
commits = self.read_commit_list_from_git(rev_spec, max_count)
if len(commits) == 1 and Verbose.level > Verbose.ONELINE:
commits = [ rev_spec ]
self.ok = True
blank_line = False
for commit in commits:
if Verbose.level > Verbose.ONELINE:
if blank_line:
print()
else:
blank_line = True
print('Checking git commit:', commit)
email = self.read_committer_email_address_from_git(commit)
self.ok &= EmailAddressCheck(email, 'Committer').ok
patch = self.read_patch_from_git(commit)
self.ok &= CheckOnePatch(commit, patch).ok
if not commits:
print("Couldn't find commit matching: '{}'".format(rev_spec))
def read_commit_list_from_git(self, rev_spec, max_count):
# Run git to get the commit patch
cmd = [ 'rev-list', '--abbrev-commit', '--no-walk' ]
if max_count is not None:
cmd.append('--max-count=' + str(max_count))
cmd.append(rev_spec)
out = self.run_git(*cmd)
return out.split() if out else []
def read_patch_from_git(self, commit):
# Run git to get the commit patch
return self.run_git('show', '--pretty=email', '--no-textconv',
'--no-use-mailmap', commit)
def read_committer_email_address_from_git(self, commit):
# Run git to get the committer email
return self.run_git('show', '--pretty=%cn <%ce>', '--no-patch',
'--no-use-mailmap', commit)
def run_git(self, *args):
cmd = [ 'git' ]
cmd += args
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
Result = p.communicate()
return Result[0].decode('utf-8', 'ignore') if Result[0] and Result[0].find(b"fatal")!=0 else None
class CheckOnePatchFile:
"""Performs a patch check for a single file.
stdin is used when the filename is '-'.
"""
def __init__(self, patch_filename):
if patch_filename == '-':
patch = sys.stdin.read()
patch_filename = 'stdin'
else:
f = open(patch_filename, 'rb')
patch = f.read().decode('utf-8', 'ignore')
f.close()
if Verbose.level > Verbose.ONELINE:
print('Checking patch file:', patch_filename)
self.ok = CheckOnePatch(patch_filename, patch).ok
class CheckOneArg:
"""Performs a patch check for a single command line argument.
The argument will be handed off to a file or git-commit based
checker.
"""
def __init__(self, param, max_count=None):
self.ok = True
if param == '-' or os.path.exists(param):
checker = CheckOnePatchFile(param)
else:
checker = CheckGitCommits(param, max_count)
self.ok = checker.ok
class PatchCheckApp:
"""Checks patches based on the command line arguments."""
def __init__(self):
self.parse_options()
patches = self.args.patches
if len(patches) == 0:
patches = [ 'HEAD' ]
self.ok = True
self.count = None
for patch in patches:
self.process_one_arg(patch)
if self.count is not None:
self.process_one_arg('HEAD')
if self.ok:
self.retval = 0
else:
self.retval = -1
def process_one_arg(self, arg):
if len(arg) >= 2 and arg[0] == '-':
try:
self.count = int(arg[1:])
return
except ValueError:
pass
self.ok &= CheckOneArg(arg, self.count).ok
self.count = None
def parse_options(self):
parser = argparse.ArgumentParser(description=__copyright__)
parser.add_argument('--version', action='version',
version='%(prog)s ' + VersionNumber)
parser.add_argument('patches', nargs='*',
help='[patch file | git rev list]')
group = parser.add_mutually_exclusive_group()
group.add_argument("--oneline",
action="store_true",
help="Print one result per line")
group.add_argument("--silent",
action="store_true",
help="Print nothing")
self.args = parser.parse_args()
if self.args.oneline:
Verbose.level = Verbose.ONELINE
if self.args.silent:
Verbose.level = Verbose.SILENT
if __name__ == "__main__":
sys.exit(PatchCheckApp().retval)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PatchCheck.py
|
## @file
# Convert a binary file to a VOID* PCD value or DSC file VOID* PCD statement.
#
# Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
BinToPcd
'''
from __future__ import print_function
import sys
import argparse
import re
import xdrlib
#
# Globals for help information
#
__prog__ = 'BinToPcd'
__copyright__ = 'Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.'
__description__ = 'Convert one or more binary files to a VOID* PCD value or DSC file VOID* PCD statement.\n'
if __name__ == '__main__':
def ValidateUnsignedInteger (Argument):
try:
Value = int (Argument, 0)
except:
Message = '{Argument} is not a valid integer value.'.format (Argument = Argument)
raise argparse.ArgumentTypeError (Message)
if Value < 0:
Message = '{Argument} is a negative value.'.format (Argument = Argument)
raise argparse.ArgumentTypeError (Message)
return Value
def ValidatePcdName (Argument):
if re.split ('[a-zA-Z\_][a-zA-Z0-9\_]*\.[a-zA-Z\_][a-zA-Z0-9\_]*', Argument) != ['', '']:
Message = '{Argument} is not in the form <PcdTokenSpaceGuidCName>.<PcdCName>'.format (Argument = Argument)
raise argparse.ArgumentTypeError (Message)
return Argument
def ValidateGuidName (Argument):
if re.split ('[a-zA-Z\_][a-zA-Z0-9\_]*', Argument) != ['', '']:
Message = '{Argument} is not a valid GUID C name'.format (Argument = Argument)
raise argparse.ArgumentTypeError (Message)
return Argument
def ByteArray (Buffer, Xdr = False):
if Xdr:
#
# If Xdr flag is set then encode data using the Variable-Length Opaque
# Data format of RFC 4506 External Data Representation Standard (XDR).
#
XdrEncoder = xdrlib.Packer ()
for Item in Buffer:
XdrEncoder.pack_bytes (Item)
Buffer = bytearray (XdrEncoder.get_buffer ())
else:
#
# If Xdr flag is not set, then concatenate all the data
#
Buffer = bytearray (b''.join (Buffer))
#
# Return a PCD value of the form '{0x01, 0x02, ...}' along with the PCD length in bytes
#
return '{' + (', '.join (['0x{Byte:02X}'.format (Byte = Item) for Item in Buffer])) + '}', len (Buffer)
#
# Create command line argument parser object
#
parser = argparse.ArgumentParser (prog = __prog__,
description = __description__ + __copyright__,
conflict_handler = 'resolve')
parser.add_argument ("-i", "--input", dest = 'InputFile', type = argparse.FileType ('rb'), action='append', required = True,
help = "Input binary filename. Multiple input files are combined into a single PCD.")
parser.add_argument ("-o", "--output", dest = 'OutputFile', type = argparse.FileType ('w'),
help = "Output filename for PCD value or PCD statement")
parser.add_argument ("-p", "--pcd", dest = 'PcdName', type = ValidatePcdName,
help = "Name of the PCD in the form <PcdTokenSpaceGuidCName>.<PcdCName>")
parser.add_argument ("-t", "--type", dest = 'PcdType', default = None, choices = ['VPD', 'HII'],
help = "PCD statement type (HII or VPD). Default is standard.")
parser.add_argument ("-m", "--max-size", dest = 'MaxSize', type = ValidateUnsignedInteger,
help = "Maximum size of the PCD. Ignored with --type HII.")
parser.add_argument ("-f", "--offset", dest = 'Offset', type = ValidateUnsignedInteger,
help = "VPD offset if --type is VPD. UEFI Variable offset if --type is HII. Must be 8-byte aligned.")
parser.add_argument ("-n", "--variable-name", dest = 'VariableName',
help = "UEFI variable name. Only used with --type HII.")
parser.add_argument ("-g", "--variable-guid", type = ValidateGuidName, dest = 'VariableGuid',
help = "UEFI variable GUID C name. Only used with --type HII.")
parser.add_argument ("-x", "--xdr", dest = 'Xdr', action = "store_true",
help = "Encode PCD using the Variable-Length Opaque Data format of RFC 4506 External Data Representation Standard (XDR)")
parser.add_argument ("-v", "--verbose", dest = 'Verbose', action = "store_true",
help = "Increase output messages")
parser.add_argument ("-q", "--quiet", dest = 'Quiet', action = "store_true",
help = "Reduce output messages")
parser.add_argument ("--debug", dest = 'Debug', type = int, metavar = '[0-9]', choices = range (0, 10), default = 0,
help = "Set debug level")
#
# Parse command line arguments
#
args = parser.parse_args ()
#
# Read all binary input files
#
Buffer = []
for File in args.InputFile:
try:
Buffer.append (File.read ())
File.close ()
except:
print ('BinToPcd: error: can not read binary input file {File}'.format (File = File))
sys.exit (1)
#
# Convert PCD to an encoded string of hex values and determine the size of
# the encoded PCD in bytes.
#
PcdValue, PcdSize = ByteArray (Buffer, args.Xdr)
#
# Convert binary buffer to a DSC file PCD statement
#
if args.PcdName is None:
#
# If PcdName is None, then only a PCD value is being requested.
#
Pcd = PcdValue
if args.Verbose:
print ('BinToPcd: Convert binary file to PCD Value')
elif args.PcdType is None:
#
# If --type is neither VPD nor HII, then use PCD statement syntax that is
# compatible with [PcdsFixedAtBuild], [PcdsPatchableInModule],
# [PcdsDynamicDefault], and [PcdsDynamicExDefault].
#
if args.MaxSize is None:
#
# If --max-size is not provided, then do not generate the syntax that
# includes the maximum size.
#
Pcd = ' {Name}|{Value}'.format (Name = args.PcdName, Value = PcdValue)
elif args.MaxSize < PcdSize:
print ('BinToPcd: error: argument --max-size is smaller than input file.')
sys.exit (1)
else:
Pcd = ' {Name}|{Value}|VOID*|{Size}'.format (Name = args.PcdName, Value = PcdValue, Size = args.MaxSize)
if args.Verbose:
print ('BinToPcd: Convert binary file to PCD statement compatible with PCD sections:')
print (' [PcdsFixedAtBuild]')
print (' [PcdsPatchableInModule]')
print (' [PcdsDynamicDefault]')
print (' [PcdsDynamicExDefault]')
elif args.PcdType == 'VPD':
if args.MaxSize is None:
#
# If --max-size is not provided, then set maximum size to the size of the
# binary input file
#
args.MaxSize = PcdSize
if args.MaxSize < PcdSize:
print ('BinToPcd: error: argument --max-size is smaller than input file.')
sys.exit (1)
if args.Offset is None:
#
# if --offset is not provided, then set offset field to '*' so build
# tools will compute offset of PCD in VPD region.
#
Pcd = ' {Name}|*|{Size}|{Value}'.format (Name = args.PcdName, Size = args.MaxSize, Value = PcdValue)
else:
#
# --offset value must be 8-byte aligned
#
if (args.Offset % 8) != 0:
print ('BinToPcd: error: argument --offset must be 8-byte aligned.')
sys.exit (1)
#
# Use the --offset value provided.
#
Pcd = ' {Name}|{Offset}|{Size}|{Value}'.format (Name = args.PcdName, Offset = args.Offset, Size = args.MaxSize, Value = PcdValue)
if args.Verbose:
print ('BinToPcd: Convert binary file to PCD statement compatible with PCD sections')
print (' [PcdsDynamicVpd]')
print (' [PcdsDynamicExVpd]')
elif args.PcdType == 'HII':
if args.VariableGuid is None or args.VariableName is None:
print ('BinToPcd: error: arguments --variable-guid and --variable-name are required for --type HII.')
sys.exit (1)
if args.Offset is None:
#
# Use UEFI Variable offset of 0 if --offset is not provided
#
args.Offset = 0
#
# --offset value must be 8-byte aligned
#
if (args.Offset % 8) != 0:
print ('BinToPcd: error: argument --offset must be 8-byte aligned.')
sys.exit (1)
Pcd = ' {Name}|L"{VarName}"|{VarGuid}|{Offset}|{Value}'.format (Name = args.PcdName, VarName = args.VariableName, VarGuid = args.VariableGuid, Offset = args.Offset, Value = PcdValue)
if args.Verbose:
print ('BinToPcd: Convert binary file to PCD statement compatible with PCD sections')
print (' [PcdsDynamicHii]')
print (' [PcdsDynamicExHii]')
#
# Write PCD value or PCD statement to the output file
#
try:
args.OutputFile.write (Pcd)
args.OutputFile.close ()
except:
#
# If output file is not specified or it can not be written, then write the
# PCD value or PCD statement to the console
#
print (Pcd)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/BinToPcd.py
|
#!/usr/bin/python
## @file
# Firmware Configuration Editor (FCE) from https://firmware.intel.com/develop
# can parse BIOS image and generate Firmware Configuration file.
# This script bases on Firmware Configuration file, and generate the structure
# PCD setting in DEC/DSC/INF files.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
ConvertFceToStructurePcd
'''
import re
import os
import datetime
import argparse
#
# Globals for help information
#
__prog__ = 'ConvertFceToStructurePcd'
__version__ = '%s Version %s' % (__prog__, '0.1 ')
__copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.'
__description__ = 'Generate Structure PCD in DEC/DSC/INF based on Firmware Configuration.\n'
dscstatement='''[Defines]
VPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E08
[SkuIds]
0|DEFAULT # The entry: 0|DEFAULT is reserved and always required.
[DefaultStores]
0|STANDARD # UEFI Standard default 0|STANDARD is reserved.
1|MANUFACTURING # UEFI Manufacturing default 1|MANUFACTURING is reserved.
[PcdsDynamicExVpd.common.DEFAULT]
gEfiMdeModulePkgTokenSpaceGuid.PcdNvStoreDefaultValueBuffer|*
'''
decstatement = '''[Guids]
gStructPcdTokenSpaceGuid = {0x3f1406f4, 0x2b, 0x487a, {0x8b, 0x69, 0x74, 0x29, 0x1b, 0x36, 0x16, 0xf4}}
[PcdsFixedAtBuild,PcdsPatchableInModule,PcdsDynamic,PcdsDynamicEx]
'''
infstatement = '''[Pcd]
'''
SECTION='PcdsDynamicHii'
PCD_NAME='gStructPcdTokenSpaceGuid.Pcd'
Max_Pcd_Len = 100
WARNING=[]
ERRORMSG=[]
class parser_lst(object):
def __init__(self,filelist):
self._ignore=['BOOLEAN', 'UINT8', 'UINT16', 'UINT32', 'UINT64']
self.file=filelist
self.text=self.megre_lst()[0]
self.content=self.megre_lst()[1]
def megre_lst(self):
alltext=''
content={}
for file in self.file:
with open(file,'r') as f:
read =f.read()
alltext += read
content[file]=read
return alltext,content
def struct_lst(self):#{struct:lst file}
structs_file={}
name_format = re.compile(r'(?<!typedef)\s+struct (\w+) {.*?;', re.S)
for i in list(self.content.keys()):
structs= name_format.findall(self.content[i])
if structs:
for j in structs:
if j not in self._ignore:
structs_file[j]=i
else:
print("%s"%structs)
return structs_file
def struct(self):#struct:{offset:name}
unit_num = re.compile('(\d+)')
offset1_re = re.compile('(\d+)\[')
pcdname_num_re = re.compile('\w+\[(\S+)\]')
pcdname_re = re.compile('\](.*)\<')
pcdname2_re = re.compile('(\w+)\[')
uint_re = re.compile('\<(\S+)\>')
name_format = re.compile(r'(?<!typedef)\s+struct (\w+) {.*?;', re.S)
name=name_format.findall(self.text)
info={}
unparse=[]
if name:
tmp_n = [n for n in name if n not in self._ignore]
name = list(set(tmp_n))
name.sort(key = tmp_n.index)
name.reverse()
#name=list(set(name).difference(set(self._ignore)))
for struct in name:
s_re = re.compile(r'struct %s :(.*?)};'% struct, re.S)
content = s_re.search(self.text)
if content:
tmp_dict = {}
text = content.group().split('+')
for line in text[1:]:
offset = offset1_re.findall(line)
t_name = pcdname_re.findall(line)
uint = uint_re.findall(line)
if offset and uint:
offset = offset[0]
uint = uint[0]
if t_name:
t_name = t_name[0].strip()
if (' ' in t_name) or ("=" in t_name) or (";" in t_name) or("\\" in name) or (t_name ==''):
WARNING.append("Warning:Invalid Pcd name '%s' for Offset %s in struct %s" % (t_name,offset, struct))
else:
if '[' in t_name:
if uint in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
offset = int(offset, 10)
tmp_name = pcdname2_re.findall(t_name)[0] + '[0]'
tmp_dict[offset] = tmp_name
pcdname_num = int(pcdname_num_re.findall(t_name)[0],10)
uint = int(unit_num.findall(uint)[0],10)
bit = uint // 8
for i in range(1, pcdname_num):
offset += bit
tmp_name = pcdname2_re.findall(t_name)[0] + '[%s]' % i
tmp_dict[offset] = tmp_name
else:
tmp_name = pcdname2_re.findall(t_name)[0]
pcdname_num = pcdname_num_re.findall(t_name)[0]
line = [offset,tmp_name,pcdname_num,uint]
line.append(struct)
unparse.append(line)
else:
if uint not in ['UINT8', 'UINT16', 'UINT32', 'UINT64', 'BOOLEAN']:
line = [offset, t_name, 0, uint]
line.append(struct)
unparse.append(line)
else:
offset = int(offset,10)
tmp_dict[offset] = t_name
info[struct] = tmp_dict
if len(unparse) != 0:
for u in unparse:
if u[3] in list(info.keys()):
unpar = self.nameISstruct(u,info[u[3]])
info[u[4]]= dict(list(info[u[4]].items())+list(unpar[u[4]].items()))
else:
print("ERROR: No struct name found in %s" % self.file)
ERRORMSG.append("ERROR: No struct name found in %s" % self.file)
return info
def nameISstruct(self,line,key_dict):
dict={}
dict2={}
s_re = re.compile(r'struct %s :(.*?)};' % line[3], re.S)
size_re = re.compile(r'mTotalSize \[(\S+)\]')
content = s_re.search(self.text)
if content:
s_size = size_re.findall(content.group())[0]
else:
s_size = '0'
print("ERROR: Struct %s not define mTotalSize in lst file" %line[3])
ERRORMSG.append("ERROR: Struct %s not define mTotalSize in lst file" %line[3])
size = int(line[0], 10)
if line[2] != 0:
for j in range(0, int(line[2], 10)):
for k in list(key_dict.keys()):
offset = size + k
name ='%s.%s' %((line[1]+'[%s]'%j),key_dict[k])
dict[offset] = name
size = int(s_size,16)+size
elif line[2] == 0:
for k in list(key_dict.keys()):
offset = size + k
name = '%s.%s' % (line[1], key_dict[k])
dict[offset] = name
dict2[line[4]] = dict
return dict2
def efivarstore_parser(self):
efivarstore_format = re.compile(r'efivarstore.*?;', re.S)
struct_re = re.compile(r'efivarstore(.*?),',re.S)
name_re = re.compile(r'name=(\w+)')
efivarstore_dict={}
efitxt = efivarstore_format.findall(self.text)
for i in efitxt:
struct = struct_re.findall(i.replace(' ',''))
if struct[0] in self._ignore:
continue
name = name_re.findall(i.replace(' ',''))
if struct and name:
efivarstore_dict[name[0]]=struct[0]
else:
print("ERROR: Can't find Struct or name in lst file, please check have this format:efivarstore XXXX, name=xxxx")
ERRORMSG.append("ERROR: Can't find Struct or name in lst file, please check have this format:efivarstore XXXX, name=xxxx")
return efivarstore_dict
class Config(object):
def __init__(self,Config):
self.config=Config
#Parser .config file,return list[offset,name,guid,value,help]
def config_parser(self):
ids_re =re.compile('_ID:(\d+)',re.S)
id_re= re.compile('\s+')
info = []
info_dict={}
with open(self.config, 'r') as text:
read = text.read()
if 'DEFAULT_ID:' in read:
all_txt = read.split('FCEKEY DEFAULT')
for i in all_txt[1:]:
part = [] #save all infomation for DEFAULT_ID
str_id=''
ids = ids_re.findall(i.replace(' ',''))
for m in ids:
str_id +=m+'_'
str_id=str_id[:-1]
part.append(ids)
section = i.split('\nQ') #split with '\nQ ' to get every block
part +=self.section_parser(section)
info_dict[str_id] = self.section_parser(section)
info.append(part)
else:
part = []
id=('0','0')
str_id='0_0'
part.append(id)
section = read.split('\nQ')
part +=self.section_parser(section)
info_dict[str_id] = self.section_parser(section)
info.append(part)
return info_dict
def eval_id(self,id):
id = id.split("_")
default_id=id[0:len(id)//2]
platform_id=id[len(id)//2:]
text=''
for i in range(len(default_id)):
text +="%s.common.%s.%s,"%(SECTION,self.id_name(platform_id[i],'PLATFORM'),self.id_name(default_id[i],'DEFAULT'))
return '\n[%s]\n'%text[:-1]
def id_name(self,ID, flag):
platform_dict = {'0': 'DEFAULT'}
default_dict = {'0': 'STANDARD', '1': 'MANUFACTURING'}
if flag == "PLATFORM":
try:
value = platform_dict[ID]
except KeyError:
value = 'SKUID%s' % ID
elif flag == 'DEFAULT':
try:
value = default_dict[ID]
except KeyError:
value = 'DEFAULTID%s' % ID
else:
value = None
return value
def section_parser(self,section):
offset_re = re.compile(r'offset=(\w+)')
name_re = re.compile(r'name=(\S+)')
guid_re = re.compile(r'guid=(\S+)')
# help_re = re.compile(r'help = (.*)')
attribute_re=re.compile(r'attribute=(\w+)')
value_re = re.compile(r'(//.*)')
part = []
part_without_comment = []
for x in section[1:]:
line=x.split('\n')[0]
comment_list = value_re.findall(line) # the string \\... in "Q...." line
comment_list[0] = comment_list[0].replace('//', '')
comment_ori = comment_list[0].strip()
comment = ""
for each in comment_ori:
if each != " " and "\x21" > each or each > "\x7E":
if bytes(each, 'utf-16') == b'\xff\xfe\xae\x00':
each = '(R)'
else:
each = ""
comment += each
line=value_re.sub('',line) #delete \\... in "Q...." line
list1=line.split(' ')
value=self.value_parser(list1)
offset = offset_re.findall(x.replace(' ',''))
name = name_re.findall(x.replace(' ',''))
guid = guid_re.findall(x.replace(' ',''))
attribute =attribute_re.findall(x.replace(' ',''))
if offset and name and guid and value and attribute:
if attribute[0] in ['0x3','0x7']:
offset = int(offset[0], 16)
#help = help_re.findall(x)
text_without_comment = offset, name[0], guid[0], value, attribute[0]
if text_without_comment in part_without_comment:
# check if exists same Pcd with different comments, add different comments in one line with "|".
dupl_index = part_without_comment.index(text_without_comment)
part[dupl_index] = list(part[dupl_index])
if comment not in part[dupl_index][-1]:
part[dupl_index][-1] += " | " + comment
part[dupl_index] = tuple(part[dupl_index])
else:
text = offset, name[0], guid[0], value, attribute[0], comment
part_without_comment.append(text_without_comment)
part.append(text)
return(part)
def value_parser(self, list1):
list1 = [t for t in list1 if t != ''] # remove '' form list
first_num = int(list1[0], 16)
if list1[first_num + 1] == 'STRING': # parser STRING
if list1[-1] == '""':
value = "{0x0, 0x0}"
else:
value = 'L%s' % list1[-1]
elif list1[first_num + 1] == 'ORDERED_LIST': # parser ORDERED_LIST
value_total = int(list1[first_num + 2])
list2 = list1[-value_total:]
tmp = []
line = ''
for i in list2:
if len(i) % 2 == 0 and len(i) != 2:
for m in range(0, len(i) // 2):
tmp.append('0x%02x' % (int('0x%s' % i, 16) >> m * 8 & 0xff))
else:
tmp.append('0x%s' % i)
for i in tmp:
line += '%s,' % i
value = '{%s}' % line[:-1]
else:
value = "0x%01x" % int(list1[-1], 16)
return value
#parser Guid file, get guid name form guid value
class GUID(object):
def __init__(self,path):
self.path = path
self.guidfile = self.gfile()
self.guiddict = self.guid_dict()
def gfile(self):
for root, dir, file in os.walk(self.path, topdown=True, followlinks=False):
if 'FV' in dir:
gfile = os.path.join(root,'Fv','Guid.xref')
if os.path.isfile(gfile):
return gfile
else:
print("ERROR: Guid.xref file not found")
ERRORMSG.append("ERROR: Guid.xref file not found")
exit()
def guid_dict(self):
guiddict={}
with open(self.guidfile,'r') as file:
lines = file.readlines()
guidinfo=lines
for line in guidinfo:
list=line.strip().split(' ')
if list:
if len(list)>1:
guiddict[list[0].upper()]=list[1]
elif list[0] != ''and len(list)==1:
print("Error: line %s can't be parser in %s"%(line.strip(),self.guidfile))
ERRORMSG.append("Error: line %s can't be parser in %s"%(line.strip(),self.guidfile))
else:
print("ERROR: No data in %s" %self.guidfile)
ERRORMSG.append("ERROR: No data in %s" %self.guidfile)
return guiddict
def guid_parser(self,guid):
if guid.upper() in self.guiddict:
return self.guiddict[guid.upper()]
else:
print("ERROR: GUID %s not found in file %s"%(guid, self.guidfile))
ERRORMSG.append("ERROR: GUID %s not found in file %s"%(guid, self.guidfile))
return guid
class PATH(object):
def __init__(self,path):
self.path=path
self.rootdir=self.get_root_dir()
self.usefuldir=set()
self.lstinf = {}
for path in self.rootdir:
for o_root, o_dir, o_file in os.walk(os.path.join(path, "OUTPUT"), topdown=True, followlinks=False):
for INF in o_file:
if os.path.splitext(INF)[1] == '.inf':
for l_root, l_dir, l_file in os.walk(os.path.join(path, "DEBUG"), topdown=True,
followlinks=False):
for LST in l_file:
if os.path.splitext(LST)[1] == '.lst':
self.lstinf[os.path.join(l_root, LST)] = os.path.join(o_root, INF)
self.usefuldir.add(path)
def get_root_dir(self):
rootdir=[]
for root,dir,file in os.walk(self.path,topdown=True,followlinks=False):
if "OUTPUT" in root:
updir=root.split("OUTPUT",1)[0]
rootdir.append(updir)
rootdir=list(set(rootdir))
return rootdir
def lst_inf(self):
return self.lstinf
def package(self):
package={}
package_re=re.compile(r'Packages\.\w+]\n(.*)',re.S)
for i in list(self.lstinf.values()):
with open(i,'r') as inf:
read=inf.read()
section=read.split('[')
for j in section:
p=package_re.findall(j)
if p:
package[i]=p[0].rstrip()
return package
def header(self,struct):
header={}
head_re = re.compile('typedef.*} %s;[\n]+(.*)(?:typedef|formset)'%struct,re.M|re.S)
head_re2 = re.compile(r'#line[\s\d]+"(\S+h)"')
for i in list(self.lstinf.keys()):
with open(i,'r') as lst:
read = lst.read()
h = head_re.findall(read)
if h:
head=head_re2.findall(h[0])
if head:
format = head[0].replace('\\\\','/').replace('\\','/')
name =format.split('/')[-1]
head = self.headerfileset.get(name)
if head:
head = head.replace('\\','/')
header[struct] = head
return header
@property
def headerfileset(self):
headerset = dict()
for root,dirs,files in os.walk(self.path):
for file in files:
if os.path.basename(file) == 'deps.txt':
with open(os.path.join(root,file),"r") as fr:
for line in fr.readlines():
headerset[os.path.basename(line).strip()] = line.strip()
return headerset
def makefile(self,filename):
re_format = re.compile(r'DEBUG_DIR.*(?:\S+Pkg)\\(.*\\%s)'%filename)
for i in self.usefuldir:
with open(os.path.join(i,'Makefile'),'r') as make:
read = make.read()
dir = re_format.findall(read)
if dir:
return dir[0]
return None
class mainprocess(object):
def __init__(self,InputPath,Config,OutputPath):
self.init = 0xFCD00000
self.inputpath = os.path.abspath(InputPath)
self.outputpath = os.path.abspath(OutputPath)
self.LST = PATH(self.inputpath)
self.lst_dict = self.LST.lst_inf()
self.Config = Config
self.attribute_dict = {'0x3': 'NV, BS', '0x7': 'NV, BS, RT'}
self.guid = GUID(self.inputpath)
self.header={}
def main(self):
conf=Config(self.Config)
config_dict=conf.config_parser() #get {'0_0':[offset,name,guid,value,attribute]...,'1_0':....}
lst=parser_lst(list(self.lst_dict.keys()))
efi_dict=lst.efivarstore_parser() #get {name:struct} form lst file
keys=sorted(config_dict.keys())
all_struct=lst.struct()
stru_lst=lst.struct_lst()
title_list=[]
info_list=[]
header_list=[]
inf_list =[]
for i in stru_lst:
tmp = self.LST.header(i)
self.header.update(tmp)
for id_key in keys:
tmp_id=[id_key] #['0_0',[(struct,[name...]),(struct,[name...])]]
tmp_info={} #{name:struct}
for section in config_dict[id_key]:
c_offset,c_name,c_guid,c_value,c_attribute,c_comment = section
if c_name in efi_dict:
struct = efi_dict[c_name]
title='%s%s|L"%s"|%s|0x00||%s\n'%(PCD_NAME,c_name,c_name,self.guid.guid_parser(c_guid),self.attribute_dict[c_attribute])
if struct in all_struct:
lstfile = stru_lst[struct]
struct_dict=all_struct[struct]
try:
title2 = '%s%s|{0}|%s|0xFCD00000{\n <HeaderFiles>\n %s\n <Packages>\n%s\n}\n' % (PCD_NAME, c_name, struct, self.header[struct], self.LST.package()[self.lst_dict[lstfile]])
except KeyError:
WARNING.append("Warning: No <HeaderFiles> for struct %s"%struct)
title2 = '%s%s|{0}|%s|0xFCD00000{\n <HeaderFiles>\n %s\n <Packages>\n%s\n}\n' % (PCD_NAME, c_name, struct, '', self.LST.package()[self.lst_dict[lstfile]])
header_list.append(title2)
elif struct not in lst._ignore:
struct_dict ={}
print("ERROR: Struct %s can't found in lst file" %struct)
ERRORMSG.append("ERROR: Struct %s can't found in lst file" %struct)
if c_offset in struct_dict:
offset_name=struct_dict[c_offset]
info = "%s%s.%s|%s\n"%(PCD_NAME,c_name,offset_name,c_value)
blank_length = Max_Pcd_Len - len(info)
if blank_length <= 0:
info_comment = "%s%s.%s|%s%s# %s\n"%(PCD_NAME,c_name,offset_name,c_value," ",c_comment)
else:
info_comment = "%s%s.%s|%s%s# %s\n"%(PCD_NAME,c_name,offset_name,c_value,blank_length*" ",c_comment)
inf = "%s%s\n"%(PCD_NAME,c_name)
inf_list.append(inf)
tmp_info[info_comment]=title
else:
print("ERROR: Can't find offset %s with struct name %s"%(c_offset,struct))
ERRORMSG.append("ERROR: Can't find offset %s with name %s"%(c_offset,struct))
else:
print("ERROR: Can't find name %s in lst file"%(c_name))
ERRORMSG.append("ERROR: Can't find name %s in lst file"%(c_name))
tmp_id.append(list(self.reverse_dict(tmp_info).items()))
id,tmp_title_list,tmp_info_list = self.read_list(tmp_id)
title_list +=tmp_title_list
info_list.append(tmp_info_list)
inf_list = self.del_repeat(inf_list)
header_list = self.plus(self.del_repeat(header_list))
title_all=list(set(title_list))
info_list = self.remove_bracket(self.del_repeat(info_list))
for i in range(len(info_list)-1,-1,-1):
if len(info_list[i]) == 0:
info_list.remove(info_list[i])
for i in (inf_list, title_all, header_list):
i.sort()
return keys,title_all,info_list,header_list,inf_list
def correct_sort(self, PcdString):
# sort the Pcd list with two rules:
# First sort through Pcd name;
# Second if the Pcd exists several elements, sort them through index value.
if ("]|") in PcdString:
Pcdname = PcdString.split("[")[0]
Pcdindex = int(PcdString.split("[")[1].split("]")[0])
else:
Pcdname = PcdString.split("|")[0]
Pcdindex = 0
return Pcdname, Pcdindex
def remove_bracket(self,List):
for i in List:
for j in i:
tmp = j.split("|")
if (('L"' in j) and ("[" in j)) or (tmp[1].split("#")[0].strip() == '{0x0, 0x0}'):
tmp[0] = tmp[0][:tmp[0].index('[')]
List[List.index(i)][i.index(j)] = "|".join(tmp)
else:
List[List.index(i)][i.index(j)] = j
for i in List:
if type(i) == type([0,0]):
i.sort(key = lambda x:(self.correct_sort(x)[0], self.correct_sort(x)[1]))
return List
def write_all(self):
title_flag=1
info_flag=1
if not os.path.isdir(self.outputpath):
os.makedirs(self.outputpath)
decwrite = write2file(os.path.join(self.outputpath,'StructurePcd.dec'))
dscwrite = write2file(os.path.join(self.outputpath,'StructurePcd.dsc'))
infwrite = write2file(os.path.join(self.outputpath, 'StructurePcd.inf'))
conf = Config(self.Config)
ids,title,info,header,inf=self.main()
decwrite.add2file(decstatement)
decwrite.add2file(header)
infwrite.add2file(infstatement)
infwrite.add2file(inf)
dscwrite.add2file(dscstatement)
for id in ids:
dscwrite.add2file(conf.eval_id(id))
if title_flag:
dscwrite.add2file(title)
title_flag=0
if len(info) == 1:
dscwrite.add2file(info)
elif len(info) == 2:
if info_flag:
dscwrite.add2file(info[0])
info_flag =0
else:
dscwrite.add2file(info[1])
def del_repeat(self,List):
if len(List) == 1 or len(List) == 0:
return List
else:
if type(List[0]) != type('xxx'):
alist=[]
for i in range(len(List)):
if i == 0:
alist.append(List[0])
else:
plist = []
for j in range(i):
plist += List[j]
alist.append(self.__del(list(set(plist)), List[i]))
return alist
else:
return list(set(List))
def __del(self,list1,list2):
return list(set(list2).difference(set(list1)))
def reverse_dict(self,dict):
data={}
for i in list(dict.items()):
if i[1] not in list(data.keys()):
data[i[1]]=[i[0]]
else:
data[i[1]].append(i[0])
return data
def read_list(self,list):
title_list=[]
info_list=[]
for i in list[1]:
title_list.append(i[0])
for j in i[1]:
info_list.append(j)
return list[0],title_list,info_list
def plus(self,list):
nums=[]
for i in list:
if type(i) != type([0]):
self.init += 1
num = "0x%01x" % self.init
j=i.replace('0xFCD00000',num.upper())
nums.append(j)
return nums
class write2file(object):
def __init__(self,Output):
self.output=Output
self.text=''
if os.path.exists(self.output):
os.remove(self.output)
def add2file(self,content):
self.text = ''
with open(self.output,'a+') as file:
file.write(self.__gen(content))
def __gen(self,content):
if type(content) == type(''):
return content
elif type(content) == type([0,0])or type(content) == type((0,0)):
return self.__readlist(content)
elif type(content) == type({0:0}):
return self.__readdict(content)
def __readlist(self,list):
for i in list:
if type(i) == type([0,0])or type(i) == type((0,0)):
self.__readlist(i)
elif type(i) == type('') :
self.text +=i
return self.text
def __readdict(self,dict):
content=list(dict.items())
return self.__readlist(content)
def stamp():
return datetime.datetime.now()
def dtime(start,end,id=None):
if id:
pass
print("%s time:%s" % (id,str(end - start)))
else:
print("Total time:%s" %str(end-start)[:-7])
def main():
start = stamp()
parser = argparse.ArgumentParser(prog = __prog__,
description = __description__ + __copyright__,
conflict_handler = 'resolve')
parser.add_argument('-v', '--version', action = 'version',version = __version__, help="show program's version number and exit")
parser.add_argument('-p', '--path', metavar='PATH', dest='path', help="platform build output directory")
parser.add_argument('-c', '--config',metavar='FILENAME', dest='config', help="firmware configuration file")
parser.add_argument('-o', '--outputdir', metavar='PATH', dest='output', help="output directoy")
options = parser.parse_args()
if options.config:
if options.path:
if options.output:
run = mainprocess(options.path, options.config, options.output)
print("Running...")
run.write_all()
if WARNING:
warning = list(set(WARNING))
for j in warning:
print(j)
if ERRORMSG:
ERROR = list(set(ERRORMSG))
with open("ERROR.log", 'w+') as error:
for i in ERROR:
error.write(i + '\n')
print("Some error find, error log in ERROR.log")
print('Finished, Output files in directory %s'%os.path.abspath(options.output))
else:
print('Error command, no output path, use -h for help')
else:
print('Error command, no build path input, use -h for help')
else:
print('Error command, no output file, use -h for help')
end = stamp()
dtime(start, end)
if __name__ == '__main__':
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/ConvertFceToStructurePcd.py
|
##
# Generate symbal for SMI handler profile info.
#
# This tool depends on DIA2Dump.exe (VS) or nm (gcc) to parse debug entry.
#
# Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
from __future__ import print_function
import os
import re
import sys
from optparse import OptionParser
from xml.dom.minidom import parse
import xml.dom.minidom
versionNumber = "1.1"
__copyright__ = "Copyright (c) 2016, Intel Corporation. All rights reserved."
class Symbols:
def __init__(self):
self.listLineAddress = []
self.pdbName = ""
# Cache for function
self.functionName = ""
# Cache for line
self.sourceName = ""
def getSymbol (self, rva):
index = 0
lineName = 0
sourceName = "??"
while index + 1 < self.lineCount :
if self.listLineAddress[index][0] <= rva and self.listLineAddress[index + 1][0] > rva :
offset = rva - self.listLineAddress[index][0]
functionName = self.listLineAddress[index][1]
lineName = self.listLineAddress[index][2]
sourceName = self.listLineAddress[index][3]
if lineName == 0 :
return [functionName]
else :
return [functionName, sourceName, lineName]
index += 1
return []
def parse_debug_file(self, driverName, pdbName):
if cmp (pdbName, "") == 0 :
return
self.pdbName = pdbName;
try:
nmCommand = "nm"
nmLineOption = "-l"
print("parsing (debug) - " + pdbName)
os.system ('%s %s %s > nmDump.line.log' % (nmCommand, nmLineOption, pdbName))
except :
print('ERROR: nm command not available. Please verify PATH')
return
#
# parse line
#
linefile = open("nmDump.line.log")
reportLines = linefile.readlines()
linefile.close()
# 000113ca T AllocatePool c:\home\edk-ii\MdePkg\Library\UefiMemoryAllocationLib\MemoryAllocationLib.c:399
patchLineFileMatchString = "([0-9a-fA-F]*)\s+[T|D|t|d]\s+(\w+)\s*((?:[a-zA-Z]:)?[\w+\-./_a-zA-Z0-9\\\\]*):?([0-9]*)"
for reportLine in reportLines:
match = re.match(patchLineFileMatchString, reportLine)
if match is not None:
rva = int (match.group(1), 16)
functionName = match.group(2)
sourceName = match.group(3)
if cmp (match.group(4), "") != 0 :
lineName = int (match.group(4))
else :
lineName = 0
self.listLineAddress.append ([rva, functionName, lineName, sourceName])
self.lineCount = len (self.listLineAddress)
self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0])
def parse_pdb_file(self, driverName, pdbName):
if cmp (pdbName, "") == 0 :
return
self.pdbName = pdbName;
try:
#DIA2DumpCommand = "\"C:\\Program Files (x86)\Microsoft Visual Studio 14.0\\DIA SDK\\Samples\\DIA2Dump\\x64\\Debug\\Dia2Dump.exe\""
DIA2DumpCommand = "Dia2Dump.exe"
#DIA2SymbolOption = "-p"
DIA2LinesOption = "-l"
print("parsing (pdb) - " + pdbName)
#os.system ('%s %s %s > DIA2Dump.symbol.log' % (DIA2DumpCommand, DIA2SymbolOption, pdbName))
os.system ('%s %s %s > DIA2Dump.line.log' % (DIA2DumpCommand, DIA2LinesOption, pdbName))
except :
print('ERROR: DIA2Dump command not available. Please verify PATH')
return
#
# parse line
#
linefile = open("DIA2Dump.line.log")
reportLines = linefile.readlines()
linefile.close()
# ** GetDebugPrintErrorLevel
# line 32 at [0000C790][0001:0000B790], len = 0x3 c:\home\edk-ii\mdepkg\library\basedebugprinterrorlevellib\basedebugprinterrorlevellib.c (MD5: 687C0AE564079D35D56ED5D84A6164CC)
# line 36 at [0000C793][0001:0000B793], len = 0x5
# line 37 at [0000C798][0001:0000B798], len = 0x2
patchLineFileMatchString = "\s+line ([0-9]+) at \[([0-9a-fA-F]{8})\]\[[0-9a-fA-F]{4}\:[0-9a-fA-F]{8}\], len = 0x[0-9a-fA-F]+\s*([\w+\-\:./_a-zA-Z0-9\\\\]*)\s*"
patchLineFileMatchStringFunc = "\*\*\s+(\w+)\s*"
for reportLine in reportLines:
match = re.match(patchLineFileMatchString, reportLine)
if match is not None:
if cmp (match.group(3), "") != 0 :
self.sourceName = match.group(3)
sourceName = self.sourceName
functionName = self.functionName
rva = int (match.group(2), 16)
lineName = int (match.group(1))
self.listLineAddress.append ([rva, functionName, lineName, sourceName])
else :
match = re.match(patchLineFileMatchStringFunc, reportLine)
if match is not None:
self.functionName = match.group(1)
self.lineCount = len (self.listLineAddress)
self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0])
class SymbolsFile:
def __init__(self):
self.symbolsTable = {}
symbolsFile = ""
driverName = ""
rvaName = ""
symbolName = ""
def getSymbolName(driverName, rva):
global symbolsFile
try :
symbolList = symbolsFile.symbolsTable[driverName]
if symbolList is not None:
return symbolList.getSymbol (rva)
else:
return []
except Exception:
return []
def myOptionParser():
usage = "%prog [--version] [-h] [--help] [-i inputfile [-o outputfile] [-g guidreffile]]"
Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber))
Parser.add_option("-i", "--inputfile", dest="inputfilename", type="string", help="The input memory profile info file output from MemoryProfileInfo application in MdeModulePkg")
Parser.add_option("-o", "--outputfile", dest="outputfilename", type="string", help="The output memory profile info file with symbol, MemoryProfileInfoSymbol.txt will be used if it is not specified")
Parser.add_option("-g", "--guidref", dest="guidreffilename", type="string", help="The input guid ref file output from build")
(Options, args) = Parser.parse_args()
if Options.inputfilename is None:
Parser.error("no input file specified")
if Options.outputfilename is None:
Options.outputfilename = "SmiHandlerProfileInfoSymbol.xml"
return Options
dictGuid = {
'00000000-0000-0000-0000-000000000000':'gZeroGuid',
'2A571201-4966-47F6-8B86-F31E41F32F10':'gEfiEventLegacyBootGuid',
'27ABF055-B1B8-4C26-8048-748F37BAA2DF':'gEfiEventExitBootServicesGuid',
'7CE88FB3-4BD7-4679-87A8-A8D8DEE50D2B':'gEfiEventReadyToBootGuid',
'02CE967A-DD7E-4FFC-9EE7-810CF0470880':'gEfiEndOfDxeEventGroupGuid',
'60FF8964-E906-41D0-AFED-F241E974E08E':'gEfiDxeSmmReadyToLockProtocolGuid',
'18A3C6DC-5EEA-48C8-A1C1-B53389F98999':'gEfiSmmSwDispatch2ProtocolGuid',
'456D2859-A84B-4E47-A2EE-3276D886997D':'gEfiSmmSxDispatch2ProtocolGuid',
'4CEC368E-8E8E-4D71-8BE1-958C45FC8A53':'gEfiSmmPeriodicTimerDispatch2ProtocolGuid',
'EE9B8D90-C5A6-40A2-BDE2-52558D33CCA1':'gEfiSmmUsbDispatch2ProtocolGuid',
'25566B03-B577-4CBF-958C-ED663EA24380':'gEfiSmmGpiDispatch2ProtocolGuid',
'7300C4A1-43F2-4017-A51B-C81A7F40585B':'gEfiSmmStandbyButtonDispatch2ProtocolGuid',
'1B1183FA-1823-46A7-8872-9C578755409D':'gEfiSmmPowerButtonDispatch2ProtocolGuid',
'58DC368D-7BFA-4E77-ABBC-0E29418DF930':'gEfiSmmIoTrapDispatch2ProtocolGuid',
}
def genGuidString(guidreffile):
guidLines = guidreffile.readlines()
for guidLine in guidLines:
guidLineList = guidLine.split(" ")
if len(guidLineList) == 2:
guid = guidLineList[0]
guidName = guidLineList[1]
if guid not in dictGuid :
dictGuid[guid] = guidName
def createSym(symbolName):
SymbolNode = xml.dom.minidom.Document().createElement("Symbol")
SymbolFunction = xml.dom.minidom.Document().createElement("Function")
SymbolFunctionData = xml.dom.minidom.Document().createTextNode(symbolName[0])
SymbolFunction.appendChild(SymbolFunctionData)
SymbolNode.appendChild(SymbolFunction)
if (len(symbolName)) >= 2:
SymbolSourceFile = xml.dom.minidom.Document().createElement("SourceFile")
SymbolSourceFileData = xml.dom.minidom.Document().createTextNode(symbolName[1])
SymbolSourceFile.appendChild(SymbolSourceFileData)
SymbolNode.appendChild(SymbolSourceFile)
if (len(symbolName)) >= 3:
SymbolLineNumber = xml.dom.minidom.Document().createElement("LineNumber")
SymbolLineNumberData = xml.dom.minidom.Document().createTextNode(str(symbolName[2]))
SymbolLineNumber.appendChild(SymbolLineNumberData)
SymbolNode.appendChild(SymbolLineNumber)
return SymbolNode
def main():
global symbolsFile
global Options
Options = myOptionParser()
symbolsFile = SymbolsFile()
try :
DOMTree = xml.dom.minidom.parse(Options.inputfilename)
except Exception:
print("fail to open input " + Options.inputfilename)
return 1
if Options.guidreffilename is not None:
try :
guidreffile = open(Options.guidreffilename)
except Exception:
print("fail to open guidref" + Options.guidreffilename)
return 1
genGuidString(guidreffile)
guidreffile.close()
SmiHandlerProfile = DOMTree.documentElement
SmiHandlerDatabase = SmiHandlerProfile.getElementsByTagName("SmiHandlerDatabase")
SmiHandlerCategory = SmiHandlerDatabase[0].getElementsByTagName("SmiHandlerCategory")
for smiHandlerCategory in SmiHandlerCategory:
SmiEntry = smiHandlerCategory.getElementsByTagName("SmiEntry")
for smiEntry in SmiEntry:
if smiEntry.hasAttribute("HandlerType"):
guidValue = smiEntry.getAttribute("HandlerType")
if guidValue in dictGuid:
smiEntry.setAttribute("HandlerType", dictGuid[guidValue])
SmiHandler = smiEntry.getElementsByTagName("SmiHandler")
for smiHandler in SmiHandler:
Module = smiHandler.getElementsByTagName("Module")
Pdb = Module[0].getElementsByTagName("Pdb")
if (len(Pdb)) >= 1:
driverName = Module[0].getAttribute("Name")
pdbName = Pdb[0].childNodes[0].data
Module[0].removeChild(Pdb[0])
symbolsFile.symbolsTable[driverName] = Symbols()
if cmp (pdbName[-3:], "pdb") == 0 :
symbolsFile.symbolsTable[driverName].parse_pdb_file (driverName, pdbName)
else :
symbolsFile.symbolsTable[driverName].parse_debug_file (driverName, pdbName)
Handler = smiHandler.getElementsByTagName("Handler")
RVA = Handler[0].getElementsByTagName("RVA")
print(" Handler RVA: %s" % RVA[0].childNodes[0].data)
if (len(RVA)) >= 1:
rvaName = RVA[0].childNodes[0].data
symbolName = getSymbolName (driverName, int(rvaName, 16))
if (len(symbolName)) >= 1:
SymbolNode = createSym(symbolName)
Handler[0].appendChild(SymbolNode)
Caller = smiHandler.getElementsByTagName("Caller")
RVA = Caller[0].getElementsByTagName("RVA")
print(" Caller RVA: %s" % RVA[0].childNodes[0].data)
if (len(RVA)) >= 1:
rvaName = RVA[0].childNodes[0].data
symbolName = getSymbolName (driverName, int(rvaName, 16))
if (len(symbolName)) >= 1:
SymbolNode = createSym(symbolName)
Caller[0].appendChild(SymbolNode)
try :
newfile = open(Options.outputfilename, "w")
except Exception:
print("fail to open output" + Options.outputfilename)
return 1
newfile.write(DOMTree.toprettyxml(indent = "\t", newl = "\n", encoding = "utf-8"))
newfile.close()
if __name__ == '__main__':
sys.exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/SmiHandlerProfileSymbolGen.py
|
#!/usr/bin/python3
'''
Copyright 2021 (c) Apple Inc. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent
EFI gdb commands based on efi_debugging classes.
Example usage:
OvmfPkg/build.sh qemu -gdb tcp::9000
gdb -ex "target remote localhost:9000" -ex "source efi_gdb.py"
(gdb) help efi
Commands for debugging EFI. efi <cmd>
List of efi subcommands:
efi devicepath -- Display an EFI device path.
efi guid -- Display info about EFI GUID's.
efi hob -- Dump EFI HOBs. Type 'hob -h' for more info.
efi symbols -- Load Symbols for EFI. Type 'efi_symbols -h' for more info.
efi table -- Dump EFI System Tables. Type 'table -h' for more info.
This module is coded against a generic gdb remote serial stub. It should work
with QEMU, JTAG debugger, or a generic EFI gdb remote serial stub.
If you are debugging with QEMU or a JTAG hardware debugger you can insert
a CpuDeadLoop(); in your code, attach with gdb, and then `p Index=1` to
step past. If you have a debug stub in EFI you can use CpuBreakpoint();.
'''
from gdb.printing import RegexpCollectionPrettyPrinter
from gdb.printing import register_pretty_printer
import gdb
import os
import sys
import uuid
import optparse
import shlex
# gdb will not import from the same path as this script.
# so lets fix that for gdb...
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from efi_debugging import PeTeImage, patch_ctypes # noqa: E402
from efi_debugging import EfiHob, GuidNames, EfiStatusClass # noqa: E402
from efi_debugging import EfiBootMode, EfiDevicePath # noqa: E402
from efi_debugging import EfiConfigurationTable, EfiTpl # noqa: E402
class GdbFileObject(object):
'''Provide a file like object required by efi_debugging'''
def __init__(self):
self.inferior = gdb.selected_inferior()
self.offset = 0
def tell(self):
return self.offset
def read(self, size=-1):
if size == -1:
# arbitrary default size
size = 0x1000000
try:
data = self.inferior.read_memory(self.offset, size)
except MemoryError:
data = bytearray(size)
assert False
if len(data) != size:
raise MemoryError(
f'gdb could not read memory 0x{size:x}'
+ f' bytes from 0x{self.offset:08x}')
else:
# convert memoryview object to a bytestring.
return data.tobytes()
def readable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
else:
# whence == 2 is seek from end
raise NotImplementedError
def seekable(self):
return True
def write(self, data):
self.inferior.write_memory(self.offset, data)
return len(data)
def writable(self):
return True
def truncate(self, size=None):
raise NotImplementedError
def flush(self):
raise NotImplementedError
def fileno(self):
raise NotImplementedError
class EfiSymbols:
"""Class to manage EFI Symbols"""
loaded = {}
stride = None
range = None
verbose = False
def __init__(self, file=None):
EfiSymbols.file = file if file else GdbFileObject()
@ classmethod
def __str__(cls):
return ''.join(f'{value}\n' for value in cls.loaded.values())
@ classmethod
def configure_search(cls, stride, range=None, verbose=False):
cls.stride = stride
cls.range = range
cls.verbose = verbose
@ classmethod
def clear(cls):
cls.loaded = {}
@ classmethod
def add_symbols_for_pecoff(cls, pecoff):
'''Tell lldb the location of the .text and .data sections.'''
if pecoff.TextAddress in cls.loaded:
return 'Already Loaded: '
try:
res = 'Loading Symbols Failed:'
res = gdb.execute('add-symbol-file ' + pecoff.CodeViewPdb +
' ' + hex(pecoff.TextAddress) +
' -s .data ' + hex(pecoff.DataAddress),
False, True)
cls.loaded[pecoff.TextAddress] = pecoff
if cls.verbose:
print(f'\n{res:s}\n')
return ''
except gdb.error:
return res
@ classmethod
def address_to_symbols(cls, address, reprobe=False):
'''
Given an address search backwards for a PE/COFF (or TE) header
and load symbols. Return a status string.
'''
if not isinstance(address, int):
address = int(address)
pecoff = cls.address_in_loaded_pecoff(address)
if not reprobe and pecoff is not None:
# skip the probe of the remote
return f'{pecoff} is already loaded'
pecoff = PeTeImage(cls.file, None)
if pecoff.pcToPeCoff(address, cls.stride, cls.range):
res = cls.add_symbols_for_pecoff(pecoff)
return f'{res}{pecoff}'
else:
return f'0x{address:08x} not in a PE/COFF (or TE) image'
@ classmethod
def address_in_loaded_pecoff(cls, address):
if not isinstance(address, int):
address = int(address)
for value in cls.loaded.values():
if (address >= value.LoadAddress and
address <= value.EndLoadAddress):
return value
return None
@ classmethod
def unload_symbols(cls, address):
if not isinstance(address, int):
address = int(address)
pecoff = cls.address_in_loaded_pecoff(address)
try:
res = 'Unloading Symbols Failed:'
res = gdb.execute(
f'remove-symbol-file -a {hex(pecoff.TextAddress):s}',
False, True)
del cls.loaded[pecoff.LoadAddress]
return res
except gdb.error:
return res
class CHAR16_PrettyPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
if int(self.val) < 0x20:
return f"L'\\x{int(self.val):02x}'"
else:
return f"L'{chr(self.val):s}'"
class EFI_TPL_PrettyPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
return str(EfiTpl(int(self.val)))
class EFI_STATUS_PrettyPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
status = int(self.val)
return f'{str(EfiStatusClass(status)):s} (0x{status:08x})'
class EFI_BOOT_MODE_PrettyPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
return str(EfiBootMode(int(self.val)))
class EFI_GUID_PrettyPrinter(object):
"""Print 'EFI_GUID' as 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'"""
def __init__(self, val):
self.val = val
def to_string(self):
# if we could get a byte like object of *(unsigned char (*)[16])
# then we could just use uuid.UUID() to convert
Data1 = int(self.val['Data1'])
Data2 = int(self.val['Data2'])
Data3 = int(self.val['Data3'])
Data4 = self.val['Data4']
guid = f'{Data1:08X}-{Data2:04X}-'
guid += f'{Data3:04X}-'
guid += f'{int(Data4[0]):02X}{int(Data4[1]):02X}-'
guid += f'{int(Data4[2]):02X}{int(Data4[3]):02X}'
guid += f'{int(Data4[4]):02X}{int(Data4[5]):02X}'
guid += f'{int(Data4[6]):02X}{int(Data4[7]):02X}'
return str(GuidNames(guid))
def build_pretty_printer():
# Turn off via: disable pretty-printer global EFI
pp = RegexpCollectionPrettyPrinter("EFI")
# you can also tell gdb `x/sh <address>` to print CHAR16 string
pp.add_printer('CHAR16', '^CHAR16$', CHAR16_PrettyPrinter)
pp.add_printer('EFI_BOOT_MODE', '^EFI_BOOT_MODE$',
EFI_BOOT_MODE_PrettyPrinter)
pp.add_printer('EFI_GUID', '^EFI_GUID$', EFI_GUID_PrettyPrinter)
pp.add_printer('EFI_STATUS', '^EFI_STATUS$', EFI_STATUS_PrettyPrinter)
pp.add_printer('EFI_TPL', '^EFI_TPL$', EFI_TPL_PrettyPrinter)
return pp
class EfiDevicePathCmd (gdb.Command):
"""Display an EFI device path. Type 'efi devicepath -h' for more info"""
def __init__(self):
super(EfiDevicePathCmd, self).__init__(
"efi devicepath", gdb.COMMAND_NONE)
self.file = GdbFileObject()
def create_options(self, arg, from_tty):
usage = "usage: %prog [options] [arg]"
description = (
"Command that can load EFI PE/COFF and TE image symbols. ")
self.parser = optparse.OptionParser(
description=description,
prog='efi devicepath',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='hex dump extra data',
default=False)
self.parser.add_option(
'-n',
'--node',
action='store_true',
dest='node',
help='dump a single device path node',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
return self.parser.parse_args(shlex.split(arg))
def invoke(self, arg, from_tty):
'''gdb command to dump EFI device paths'''
try:
(options, _) = self.create_options(arg, from_tty)
if options.help:
self.parser.print_help()
return
dev_addr = int(gdb.parse_and_eval(arg))
except ValueError:
print("Invalid argument!")
return
if options.node:
print(EfiDevicePath(
self.file).device_path_node_str(dev_addr,
options.verbose))
else:
device_path = EfiDevicePath(self.file, dev_addr, options.verbose)
if device_path.valid():
print(device_path)
class EfiGuidCmd (gdb.Command):
"""Display info about EFI GUID's. Type 'efi guid -h' for more info"""
def __init__(self):
super(EfiGuidCmd, self).__init__("efi guid",
gdb.COMMAND_NONE,
gdb.COMPLETE_EXPRESSION)
self.file = GdbFileObject()
def create_options(self, arg, from_tty):
usage = "usage: %prog [options] [arg]"
description = (
"Show EFI_GUID values and the C name of the EFI_GUID variables"
"in the C code. If symbols are loaded the Guid.xref file"
"can be processed and the complete GUID database can be shown."
"This command also suports generating new GUID's, and showing"
"the value used to initialize the C variable.")
self.parser = optparse.OptionParser(
description=description,
prog='efi guid',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-n',
'--new',
action='store_true',
dest='new',
help='Generate a new GUID',
default=False)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='Also display GUID C structure values',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
return self.parser.parse_args(shlex.split(arg))
def invoke(self, arg, from_tty):
'''gdb command to dump EFI System Tables'''
try:
(options, args) = self.create_options(arg, from_tty)
if options.help:
self.parser.print_help()
return
if len(args) >= 1:
# guid { 0x414e6bdd, 0xe47b, 0x47cc,
# { 0xb2, 0x44, 0xbb, 0x61, 0x02, 0x0c,0xf5, 0x16 }}
# this generates multiple args
guid = ' '.join(args)
except ValueError:
print('bad arguments!')
return
if options.new:
guid = uuid.uuid4()
print(str(guid).upper())
print(GuidNames.to_c_guid(guid))
return
if len(args) > 0:
if GuidNames.is_guid_str(arg):
# guid 05AD34BA-6F02-4214-952E-4DA0398E2BB9
key = guid.upper()
name = GuidNames.to_name(key)
elif GuidNames.is_c_guid(arg):
# guid { 0x414e6bdd, 0xe47b, 0x47cc,
# { 0xb2, 0x44, 0xbb, 0x61, 0x02, 0x0c,0xf5, 0x16 }}
key = GuidNames.from_c_guid(arg)
name = GuidNames.to_name(key)
else:
# guid gEfiDxeServicesTableGuid
name = guid
try:
key = GuidNames.to_guid(name)
name = GuidNames.to_name(key)
except ValueError:
return
extra = f'{GuidNames.to_c_guid(key)}: ' if options.verbose else ''
print(f'{key}: {extra}{name}')
else:
for key, value in GuidNames._dict_.items():
if options.verbose:
extra = f'{GuidNames.to_c_guid(key)}: '
else:
extra = ''
print(f'{key}: {extra}{value}')
class EfiHobCmd (gdb.Command):
"""Dump EFI HOBs. Type 'hob -h' for more info."""
def __init__(self):
super(EfiHobCmd, self).__init__("efi hob", gdb.COMMAND_NONE)
self.file = GdbFileObject()
def create_options(self, arg, from_tty):
usage = "usage: %prog [options] [arg]"
description = (
"Command that can load EFI PE/COFF and TE image symbols. ")
self.parser = optparse.OptionParser(
description=description,
prog='efi hob',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-a',
'--address',
type="int",
dest='address',
help='Parse HOBs from address',
default=None)
self.parser.add_option(
'-t',
'--type',
type="int",
dest='type',
help='Only dump HOBS of his type',
default=None)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='hex dump extra data',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
return self.parser.parse_args(shlex.split(arg))
def invoke(self, arg, from_tty):
'''gdb command to dump EFI System Tables'''
try:
(options, _) = self.create_options(arg, from_tty)
if options.help:
self.parser.print_help()
return
except ValueError:
print('bad arguments!')
return
if options.address:
try:
value = gdb.parse_and_eval(options.address)
address = int(value)
except ValueError:
address = None
else:
address = None
hob = EfiHob(self.file,
address,
options.verbose).get_hob_by_type(options.type)
print(hob)
class EfiTablesCmd (gdb.Command):
"""Dump EFI System Tables. Type 'table -h' for more info."""
def __init__(self):
super(EfiTablesCmd, self).__init__("efi table", gdb.COMMAND_NONE)
self.file = GdbFileObject()
def create_options(self, arg, from_tty):
usage = "usage: %prog [options] [arg]"
description = "Dump EFI System Tables. Requires symbols to be loaded"
self.parser = optparse.OptionParser(
description=description,
prog='efi table',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
return self.parser.parse_args(shlex.split(arg))
def invoke(self, arg, from_tty):
'''gdb command to dump EFI System Tables'''
try:
(options, _) = self.create_options(arg, from_tty)
if options.help:
self.parser.print_help()
return
except ValueError:
print('bad arguments!')
return
gST = gdb.lookup_global_symbol('gST')
if gST is None:
print('Error: This command requires symbols for gST to be loaded')
return
table = EfiConfigurationTable(
self.file, int(gST.value(gdb.selected_frame())))
if table:
print(table, '\n')
class EfiSymbolsCmd (gdb.Command):
"""Load Symbols for EFI. Type 'efi symbols -h' for more info."""
def __init__(self):
super(EfiSymbolsCmd, self).__init__("efi symbols",
gdb.COMMAND_NONE,
gdb.COMPLETE_EXPRESSION)
self.file = GdbFileObject()
self.gST = None
self.efi_symbols = EfiSymbols(self.file)
def create_options(self, arg, from_tty):
usage = "usage: %prog [options]"
description = (
"Command that can load EFI PE/COFF and TE image symbols. "
"If you are having trouble in PEI try adding --pei. "
"Given any address search backward for the PE/COFF (or TE header) "
"and then parse the PE/COFF image to get debug info. "
"The address can come from the current pc, pc values in the "
"frame, or an address provided to the command"
"")
self.parser = optparse.OptionParser(
description=description,
prog='efi symbols',
usage=usage,
add_help_option=False)
self.parser.add_option(
'-a',
'--address',
type="str",
dest='address',
help='Load symbols for image that contains address',
default=None)
self.parser.add_option(
'-c',
'--clear',
action='store_true',
dest='clear',
help='Clear the cache of loaded images',
default=False)
self.parser.add_option(
'-f',
'--frame',
action='store_true',
dest='frame',
help='Load symbols for current stack frame',
default=False)
self.parser.add_option(
'-p',
'--pc',
action='store_true',
dest='pc',
help='Load symbols for pc',
default=False)
self.parser.add_option(
'--pei',
action='store_true',
dest='pei',
help='Load symbols for PEI (searches every 4 bytes)',
default=False)
self.parser.add_option(
'-e',
'--extended',
action='store_true',
dest='extended',
help='Try to load all symbols based on config tables',
default=False)
self.parser.add_option(
'-r',
'--range',
type="long",
dest='range',
help='How far to search backward for start of PE/COFF Image',
default=None)
self.parser.add_option(
'-s',
'--stride',
type="long",
dest='stride',
help='Boundary to search for PE/COFF header',
default=None)
self.parser.add_option(
'-t',
'--thread',
action='store_true',
dest='thread',
help='Load symbols for the frames of all threads',
default=False)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='Show more info on symbols loading in gdb',
default=False)
self.parser.add_option(
'-h',
'--help',
action='store_true',
dest='help',
help='Show help for the command',
default=False)
return self.parser.parse_args(shlex.split(arg))
def save_user_state(self):
self.pagination = gdb.parameter("pagination")
if self.pagination:
gdb.execute("set pagination off")
self.user_selected_thread = gdb.selected_thread()
self.user_selected_frame = gdb.selected_frame()
def restore_user_state(self):
self.user_selected_thread.switch()
self.user_selected_frame.select()
if self.pagination:
gdb.execute("set pagination on")
def canonical_address(self, address):
'''
Scrub out 48-bit non canonical addresses
Raw frames in gdb can have some funky values
'''
# Skip lowest 256 bytes to avoid interrupt frames
if address > 0xFF and address < 0x00007FFFFFFFFFFF:
return True
if address >= 0xFFFF800000000000:
return True
return False
def pc_set_for_frames(self):
'''Return a set for the PC's in the current frame'''
pc_list = []
frame = gdb.newest_frame()
while frame:
pc = int(frame.read_register('pc'))
if self.canonical_address(pc):
pc_list.append(pc)
frame = frame.older()
return set(pc_list)
def invoke(self, arg, from_tty):
'''gdb command to symbolicate all the frames from all the threads'''
try:
(options, _) = self.create_options(arg, from_tty)
if options.help:
self.parser.print_help()
return
except ValueError:
print('bad arguments!')
return
self.dont_repeat()
self.save_user_state()
if options.clear:
self.efi_symbols.clear()
return
if options.pei:
# XIP code can be 4 byte aligned in the FV
options.stride = 4
options.range = 0x100000
self.efi_symbols.configure_search(options.stride,
options.range,
options.verbose)
if options.thread:
thread_list = gdb.selected_inferior().threads()
else:
thread_list = (gdb.selected_thread(),)
address = None
if options.address:
value = gdb.parse_and_eval(options.address)
address = int(value)
elif options.pc:
address = gdb.selected_frame().pc()
if address:
res = self.efi_symbols.address_to_symbols(address)
print(res)
else:
for thread in thread_list:
thread.switch()
# You can not iterate over frames as you load symbols. Loading
# symbols changes the frames gdb can see due to inlining and
# boom. So we loop adding symbols for the current frame, and
# we test to see if new frames have shown up. If new frames
# show up we process those new frames. Thus 1st pass is the
# raw frame, and other passes are only new PC values.
NewPcSet = self.pc_set_for_frames()
while NewPcSet:
PcSet = self.pc_set_for_frames()
for pc in NewPcSet:
res = self.efi_symbols.address_to_symbols(pc)
print(res)
NewPcSet = PcSet.symmetric_difference(
self.pc_set_for_frames())
# find the EFI System tables the 1st time
if self.gST is None:
gST = gdb.lookup_global_symbol('gST')
if gST is not None:
self.gST = int(gST.value(gdb.selected_frame()))
table = EfiConfigurationTable(self.file, self.gST)
else:
table = None
else:
table = EfiConfigurationTable(self.file, self.gST)
if options.extended and table:
# load symbols from EFI System Table entry
for address, _ in table.DebugImageInfo():
res = self.efi_symbols.address_to_symbols(address)
print(res)
# sync up the GUID database from the build output
for m in gdb.objfiles():
if GuidNames.add_build_guid_file(str(m.filename)):
break
self.restore_user_state()
class EfiCmd (gdb.Command):
"""Commands for debugging EFI. efi <cmd>"""
def __init__(self):
super(EfiCmd, self).__init__("efi",
gdb.COMMAND_NONE,
gdb.COMPLETE_NONE,
True)
def invoke(self, arg, from_tty):
'''default to loading symbols'''
if '-h' in arg or '--help' in arg:
gdb.execute('help efi')
else:
# default to loading all symbols
gdb.execute('efi symbols --extended')
class LoadEmulatorEfiSymbols(gdb.Breakpoint):
'''
breakpoint for EmulatorPkg to load symbols
Note: make sure SecGdbScriptBreak is not optimized away!
Also turn off the dlopen() flow like on macOS.
'''
def stop(self):
symbols = EfiSymbols()
# Emulator adds SizeOfHeaders so we need file alignment to search
symbols.configure_search(0x20)
frame = gdb.newest_frame()
try:
# gdb was looking at spill address, pre spill :(
LoadAddress = frame.read_register('rdx')
AddSymbolFlag = frame.read_register('rcx')
except gdb.error:
LoadAddress = frame.read_var('LoadAddress')
AddSymbolFlag = frame.read_var('AddSymbolFlag')
if AddSymbolFlag == 1:
res = symbols.address_to_symbols(LoadAddress)
else:
res = symbols.unload_symbols(LoadAddress)
print(res)
# keep running
return False
# Get python backtraces to debug errors in this script
gdb.execute("set python print-stack full")
# tell efi_debugging how to walk data structures with pointers
try:
pointer_width = gdb.lookup_type('int').pointer().sizeof
except ValueError:
pointer_width = 8
patch_ctypes(pointer_width)
register_pretty_printer(None, build_pretty_printer(), replace=True)
# gdb commands that we are adding
# add `efi` prefix gdb command
EfiCmd()
# subcommands for `efi`
EfiSymbolsCmd()
EfiTablesCmd()
EfiHobCmd()
EfiDevicePathCmd()
EfiGuidCmd()
#
bp = LoadEmulatorEfiSymbols('SecGdbScriptBreak', internal=True)
if bp.pending:
try:
gdb.selected_frame()
# Not the emulator so do this when you attach
gdb.execute('efi symbols --frame --extended', True)
gdb.execute('bt')
# If you want to skip the above commands comment them out
pass
except gdb.error:
# If you load the script and there is no target ignore the error.
pass
else:
# start the emulator
gdb.execute('run')
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/efi_gdb.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/__init__.py
|
## @file
# This module provide command line entry for generating package document!
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
import os, sys, logging, traceback, subprocess
from optparse import OptionParser
from plugins.EdkPlugins.edk2.model import baseobject
from plugins.EdkPlugins.edk2.model import doxygengen
gArchMarcoDict = {'ALL' : 'MDE_CPU_IA32 MDE_CPU_X64 MDE_CPU_EBC MDE_CPU_IPF _MSC_EXTENSIONS __GNUC__ __INTEL_COMPILER',
'IA32_MSFT': 'MDE_CPU_IA32 _MSC_EXTENSIONS',
'IA32_GNU' : 'MDE_CPU_IA32 __GNUC__',
'X64_MSFT' : 'MDE_CPU_X64 _MSC_EXTENSIONS ASM_PFX= OPTIONAL= ',
'X64_GNU' : 'MDE_CPU_X64 __GNUC__ ASM_PFX= OPTIONAL= ',
'IPF_MSFT' : 'MDE_CPU_IPF _MSC_EXTENSIONS ASM_PFX= OPTIONAL= ',
'IPF_GNU' : 'MDE_CPU_IPF __GNUC__ ASM_PFX= OPTIONAL= ',
'EBC_INTEL': 'MDE_CPU_EBC __INTEL_COMPILER ASM_PFX= OPTIONAL= '}
def parseCmdArgs():
parser = OptionParser(version="Package Document Generation Tools - Version 0.1")
parser.add_option('-w', '--workspace', action='store', type='string', dest='WorkspacePath',
help='Specify workspace absolute path. For example: c:\\tianocore')
parser.add_option('-p', '--decfile', action='store', dest='PackagePath',
help='Specify the absolute path for package DEC file. For example: c:\\tianocore\\MdePkg\\MdePkg.dec')
parser.add_option('-x', '--doxygen', action='store', dest='DoxygenPath',
help='Specify the absolute path of doxygen tools installation. For example: C:\\Program Files\\doxygen\bin\doxygen.exe')
parser.add_option('-o', '--output', action='store', dest='OutputPath',
help='Specify the document output path. For example: c:\\docoutput')
parser.add_option('-a', '--arch', action='store', dest='Arch', choices=list(gArchMarcoDict.keys()),
help='Specify the architecture used in preprocess package\'s source. For example: -a IA32_MSFT')
parser.add_option('-m', '--mode', action='store', dest='DocumentMode', choices=['CHM', 'HTML'],
help='Specify the document mode from : CHM or HTML')
parser.add_option('-i', '--includeonly', action='store_true', dest='IncludeOnly',
help='Only generate document for package\'s public interfaces produced by include folder. ')
parser.add_option('-c', '--htmlworkshop', dest='HtmlWorkshopPath',
help='Specify the absolute path for Microsoft HTML Workshop\'s hhc.exe file. For example: C:\\Program Files\\HTML Help Workshop\\hhc.exe')
(options, args) = parser.parse_args()
# validate the options
errors = []
if options.WorkspacePath is None:
errors.append('- Please specify workspace path via option -w!')
elif not os.path.exists(options.WorkspacePath):
errors.append("- Invalid workspace path %s! The workspace path should be exist in absolute path!" % options.WorkspacePath)
if options.PackagePath is None:
errors.append('- Please specify package DEC file path via option -p!')
elif not os.path.exists(options.PackagePath):
errors.append("- Invalid package's DEC file path %s! The DEC path should be exist in absolute path!" % options.PackagePath)
default = "C:\\Program Files\\doxygen\\bin\\doxygen.exe"
if options.DoxygenPath is None:
if os.path.exists(default):
print("Warning: Assume doxygen tool is installed at %s. If not, please specify via -x" % default)
options.DoxygenPath = default
else:
errors.append('- Please specify the path of doxygen tool installation via option -x! or install it in default path %s' % default)
elif not os.path.exists(options.DoxygenPath):
errors.append("- Invalid doxygen tool path %s! The doxygen tool path should be exist in absolute path!" % options.DoxygenPath)
if options.OutputPath is not None:
if not os.path.exists(options.OutputPath):
# create output
try:
os.makedirs(options.OutputPath)
except:
errors.append('- Fail to create the output directory %s' % options.OutputPath)
else:
if options.PackagePath is not None and os.path.exists(options.PackagePath):
dirpath = os.path.dirname(options.PackagePath)
default = os.path.join (dirpath, "Document")
print('Warning: Assume document output at %s. If not, please specify via option -o' % default)
options.OutputPath = default
if not os.path.exists(default):
try:
os.makedirs(default)
except:
errors.append('- Fail to create default output directory %s! Please specify document output diretory via option -o' % default)
else:
errors.append('- Please specify document output path via option -o!')
if options.Arch is None:
options.Arch = 'ALL'
print("Warning: Assume arch is \"ALL\". If not, specify via -a")
if options.DocumentMode is None:
options.DocumentMode = "HTML"
print("Warning: Assume document mode is \"HTML\". If not, specify via -m")
if options.IncludeOnly is None:
options.IncludeOnly = False
print("Warning: Assume generate package document for all package\'s source including publich interfaces and implementation libraries and modules.")
if options.DocumentMode.lower() == 'chm':
default = "C:\\Program Files\\HTML Help Workshop\\hhc.exe"
if options.HtmlWorkshopPath is None:
if os.path.exists(default):
print('Warning: Assume the installation path of Microsoft HTML Workshop is %s. If not, specify via option -c.' % default)
options.HtmlWorkshopPath = default
else:
errors.append('- Please specify the installation path of Microsoft HTML Workshop via option -c!')
elif not os.path.exists(options.HtmlWorkshopPath):
errors.append('- The installation path of Microsoft HTML Workshop %s does not exists. ' % options.HtmlWorkshopPath)
if len(errors) != 0:
print('\n')
parser.error('Fail to start due to following reasons: \n%s' %'\n'.join(errors))
return (options.WorkspacePath, options.PackagePath, options.DoxygenPath, options.OutputPath,
options.Arch, options.DocumentMode, options.IncludeOnly, options.HtmlWorkshopPath)
def createPackageObject(wsPath, pkgPath):
try:
pkgObj = baseobject.Package(None, wsPath)
pkgObj.Load(pkgPath)
except:
logging.getLogger().error ('Fail to create package object!')
return None
return pkgObj
def callbackLogMessage(msg, level):
print(msg.strip())
def callbackCreateDoxygenProcess(doxPath, configPath):
if sys.platform == 'win32':
cmd = '"%s" %s' % (doxPath, configPath)
else:
cmd = '%s %s' % (doxPath, configPath)
print(cmd)
subprocess.call(cmd, shell=True)
def DocumentFixup(outPath, arch):
# find BASE_LIBRARY_JUMP_BUFFER structure reference page
print('\n >>> Start fixup document \n')
for root, dirs, files in os.walk(outPath):
for dir in dirs:
if dir.lower() in ['.svn', '_svn', 'cvs']:
dirs.remove(dir)
for file in files:
if not file.lower().endswith('.html'): continue
fullpath = os.path.join(outPath, root, file)
try:
f = open(fullpath, 'r')
text = f.read()
f.close()
except:
logging.getLogger().error('\nFail to open file %s\n' % fullpath)
continue
if arch.lower() == 'all':
if text.find('BASE_LIBRARY_JUMP_BUFFER Struct Reference') != -1:
FixPageBASE_LIBRARY_JUMP_BUFFER(fullpath, text)
if text.find('MdePkg/Include/Library/BaseLib.h File Reference') != -1:
FixPageBaseLib(fullpath, text)
if text.find('IA32_IDT_GATE_DESCRIPTOR Union Reference') != -1:
FixPageIA32_IDT_GATE_DESCRIPTOR(fullpath, text)
if text.find('MdePkg/Include/Library/UefiDriverEntryPoint.h File Reference') != -1:
FixPageUefiDriverEntryPoint(fullpath, text)
if text.find('MdePkg/Include/Library/UefiApplicationEntryPoint.h File Reference') != -1:
FixPageUefiApplicationEntryPoint(fullpath, text)
print(' >>> Finish all document fixing up! \n')
def FixPageBaseLib(path, text):
print(' >>> Fixup BaseLib file page at file %s \n' % path)
lines = text.split('\n')
lastBaseJumpIndex = -1
lastIdtGateDescriptor = -1
for index in range(len(lines) - 1, -1, -1):
line = lines[index]
if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 4 </td>':
lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 4 [IA32] </td>'
if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 0x10 </td>':
lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 0x10 [IPF] </td>'
if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 8 </td>':
lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 9 [EBC, x64] </td>'
if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 4') != -1:
lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 4',
'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 4 [IA32]')
if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 0x10') != -1:
lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 0x10',
'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 0x10 [IPF]')
if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 8') != -1:
lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 8',
'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a> 8 [x64, EBC]')
if line.find('>BASE_LIBRARY_JUMP_BUFFER</a>') != -1:
if lastBaseJumpIndex != -1:
del lines[lastBaseJumpIndex]
lastBaseJumpIndex = index
if line.find('>IA32_IDT_GATE_DESCRIPTOR</a></td>') != -1:
if lastIdtGateDescriptor != -1:
del lines[lastIdtGateDescriptor]
lastIdtGateDescriptor = index
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
logging.getLogger().error(" <<< Fail to fixup file %s\n" % path)
return
print(" <<< Finish to fixup file %s\n" % path)
def FixPageIA32_IDT_GATE_DESCRIPTOR(path, text):
print(' >>> Fixup structure reference IA32_IDT_GATE_DESCRIPTOR at file %s \n' % path)
lines = text.split('\n')
for index in range(len(lines) - 1, -1, -1):
line = lines[index].strip()
if line.find('struct {</td>') != -1 and lines[index - 2].find('>Uint64</a></td>') != -1:
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>')
if line.find('struct {</td>') != -1 and lines[index - 1].find('Data Fields') != -1:
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>')
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
logging.getLogger().error(" <<< Fail to fixup file %s\n" % path)
return
print(" <<< Finish to fixup file %s\n" % path)
def FixPageBASE_LIBRARY_JUMP_BUFFER(path, text):
print(' >>> Fixup structure reference BASE_LIBRARY_JUMP_BUFFER at file %s \n' % path)
lines = text.split('\n')
bInDetail = True
bNeedRemove = False
for index in range(len(lines) - 1, -1, -1):
line = lines[index]
if line.find('Detailed Description') != -1:
bInDetail = False
if line.startswith('EBC context buffer used by') and lines[index - 1].startswith('x64 context buffer'):
lines[index] = "IA32/IPF/X64/" + line
bNeedRemove = True
if line.startswith("x64 context buffer") or line.startswith('IPF context buffer used by') or \
line.startswith('IA32 context buffer used by'):
if bNeedRemove:
lines.remove(line)
if line.find('>R0</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For EBC</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For EBC</h2></td></tr>')
if line.find('>Rbx</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>')
if line.find('>F2</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For IPF</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IPF</h2></td></tr>')
if line.find('>Ebx</a>') != -1 and not bInDetail:
if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>':
lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>')
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
logging.getLogger().error(" <<< Fail to fixup file %s" % path)
return
print(" <<< Finish to fixup file %s\n" % path)
def FixPageUefiDriverEntryPoint(path, text):
print(' >>> Fixup file reference MdePkg/Include/Library/UefiDriverEntryPoint.h at file %s \n' % path)
lines = text.split('\n')
bInModuleEntry = False
bInEfiMain = False
ModuleEntryDlCount = 0
ModuleEntryDelStart = 0
ModuleEntryDelEnd = 0
EfiMainDlCount = 0
EfiMainDelStart = 0
EfiMainDelEnd = 0
for index in range(len(lines)):
line = lines[index].strip()
if line.find('EFI_STATUS</a> EFIAPI _ModuleEntryPoint </td>') != -1:
bInModuleEntry = True
if line.find('EFI_STATUS</a> EFIAPI EfiMain </td>') != -1:
bInEfiMain = True
if line.startswith('<p>References <a'):
if bInModuleEntry:
ModuleEntryDelEnd = index - 1
bInModuleEntry = False
elif bInEfiMain:
EfiMainDelEnd = index - 1
bInEfiMain = False
if bInModuleEntry:
if line.startswith('</dl>'):
ModuleEntryDlCount = ModuleEntryDlCount + 1
if ModuleEntryDlCount == 1:
ModuleEntryDelStart = index + 1
if bInEfiMain:
if line.startswith('</dl>'):
EfiMainDlCount = EfiMainDlCount + 1
if EfiMainDlCount == 1:
EfiMainDelStart = index + 1
if EfiMainDelEnd > EfiMainDelStart:
for index in range(EfiMainDelEnd, EfiMainDelStart, -1):
del lines[index]
if ModuleEntryDelEnd > ModuleEntryDelStart:
for index in range(ModuleEntryDelEnd, ModuleEntryDelStart, -1):
del lines[index]
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
logging.getLogger().error(" <<< Fail to fixup file %s" % path)
return
print(" <<< Finish to fixup file %s\n" % path)
def FixPageUefiApplicationEntryPoint(path, text):
print(' >>> Fixup file reference MdePkg/Include/Library/UefiApplicationEntryPoint.h at file %s \n' % path)
lines = text.split('\n')
bInModuleEntry = False
bInEfiMain = False
ModuleEntryDlCount = 0
ModuleEntryDelStart = 0
ModuleEntryDelEnd = 0
EfiMainDlCount = 0
EfiMainDelStart = 0
EfiMainDelEnd = 0
for index in range(len(lines)):
line = lines[index].strip()
if line.find('EFI_STATUS</a> EFIAPI _ModuleEntryPoint </td>') != -1:
bInModuleEntry = True
if line.find('EFI_STATUS</a> EFIAPI EfiMain </td>') != -1:
bInEfiMain = True
if line.startswith('<p>References <a'):
if bInModuleEntry:
ModuleEntryDelEnd = index - 1
bInModuleEntry = False
elif bInEfiMain:
EfiMainDelEnd = index - 1
bInEfiMain = False
if bInModuleEntry:
if line.startswith('</dl>'):
ModuleEntryDlCount = ModuleEntryDlCount + 1
if ModuleEntryDlCount == 1:
ModuleEntryDelStart = index + 1
if bInEfiMain:
if line.startswith('</dl>'):
EfiMainDlCount = EfiMainDlCount + 1
if EfiMainDlCount == 1:
EfiMainDelStart = index + 1
if EfiMainDelEnd > EfiMainDelStart:
for index in range(EfiMainDelEnd, EfiMainDelStart, -1):
del lines[index]
if ModuleEntryDelEnd > ModuleEntryDelStart:
for index in range(ModuleEntryDelEnd, ModuleEntryDelStart, -1):
del lines[index]
try:
f = open(path, 'w')
f.write('\n'.join(lines))
f.close()
except:
logging.getLogger().error(" <<< Fail to fixup file %s" % path)
return
print(" <<< Finish to fixup file %s\n" % path)
if __name__ == '__main__':
wspath, pkgpath, doxpath, outpath, archtag, docmode, isinc, hwpath = parseCmdArgs()
# configure logging system
logfilepath = os.path.join(outpath, 'log.txt')
logging.basicConfig(format='%(levelname)-8s %(message)s', level=logging.DEBUG)
# create package model object firstly
pkgObj = createPackageObject(wspath, pkgpath)
if pkgObj is None:
sys.exit(-1)
# create doxygen action model
arch = None
tooltag = None
if archtag.lower() != 'all':
arch = archtag.split('_')[0]
tooltag = archtag.split('_')[1]
else:
arch = 'all'
tooltag = 'all'
# preprocess package and call doxygen
try:
action = doxygengen.PackageDocumentAction(doxpath,
hwpath,
outpath,
pkgObj,
docmode,
callbackLogMessage,
arch,
tooltag,
isinc,
True)
action.RegisterCallbackDoxygenProcess(callbackCreateDoxygenProcess)
action.Generate()
except:
message = traceback.format_exception(*sys.exc_info())
logging.getLogger().error('Fail to create doxygen action! \n%s' % ''.join(message))
sys.exit(-1)
DocumentFixup(outpath, arch)
# generate CHM is necessary
if docmode.lower() == 'chm':
indexpath = os.path.join(outpath, 'html', 'index.hhp')
if sys.platform == 'win32':
cmd = '"%s" %s' % (hwpath, indexpath)
else:
cmd = '%s %s' % (hwpath, indexpath)
subprocess.call(cmd)
print('\nFinish to generate package document! Please open %s for review' % os.path.join(outpath, 'html', 'index.chm'))
else:
print('\nFinish to generate package document! Please open %s for review' % os.path.join(outpath, 'html', 'index.html'))
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/packagedoc_cli.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/__init__.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/__init__.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
import array
import uuid
import re
import os
import logging
import core.pe as pe
def GetLogger():
return logging.getLogger('EFI Binary File')
class EFIBinaryError(Exception):
def __init__(self, message):
Exception.__init__(self)
self._message = message
def GetMessage(self):
return self._message
class EfiFd(object):
EFI_FV_HEADER_SIZE = 0x48
def __init__(self):
self._fvs = []
def Load(self, fd, size):
index = fd.tell()
while (index + self.EFI_FV_HEADER_SIZE < size):
fv = EfiFv(self)
fv.Load(fd)
self._fvs.append(fv)
index += fv.GetHeader().GetFvLength()
index = align(index, 8)
fd.seek(index)
def GetFvs(self):
return self._fvs
class EfiFv(object):
FILE_SYSTEM_GUID = uuid.UUID('{8c8ce578-8a3d-4f1c-9935-896185c32dd3}')
def __init__(self, parent=None):
self._size = 0
self._filename = None
self._fvheader = None
self._blockentries = []
self._ffs = []
# following field is for FV in FD
self._parent = parent
self._offset = 0
self._raw = array.array('B')
def Load(self, fd):
self._offset = fd.tell()
self._filename = fd.name
# get file header
self._fvheader = EfiFirmwareVolumeHeader.Read(fd)
#self._fvheader.Dump()
self._size = self._fvheader.GetFvLength()
if self._fvheader.GetFileSystemGuid() != self.FILE_SYSTEM_GUID:
fd.seek(self._offset)
self._raw.fromfile(fd, self.GetHeader().GetFvLength())
return
# read block map
blockentry = BlockMapEntry.Read(fd)
self._blockentries.append(blockentry)
while (blockentry.GetNumberBlocks() != 0 and blockentry.GetLength() != 0):
self._blockentries.append(blockentry)
blockentry = BlockMapEntry.Read(fd)
if self._fvheader.GetSize() + (len(self._blockentries)) * 8 != \
self._fvheader.GetHeaderLength():
raise EFIBinaryError("Volume Header length not consistent with block map!")
index = align(fd.tell(), 8)
count = 0
while ((index + EfiFfs.FFS_HEADER_SIZE) < self._size):
ffs = EfiFfs.Read(fd, self)
if not isValidGuid(ffs.GetNameGuid()):
break
self._ffs.append(ffs)
count += 1
index = align(fd.tell(), 8)
fd.seek(self._offset)
self._raw.fromfile(fd, self.GetHeader().GetFvLength())
def GetFfs(self):
return self._ffs
def GetHeader(self):
return self._fvheader
def GetBlockEntries(self):
return self._blockentries
def GetHeaderRawData(self):
ret = []
ret += self._fvheader.GetRawData()
for block in self._blockentries:
ret += block.GetRawData()
return ret
def GetOffset(self):
return 0
def GetRawData(self):
return self._raw.tolist()
class BinaryItem(object):
def __init__(self, parent=None):
self._size = 0
self._arr = array.array('B')
self._parent = parent
@classmethod
def Read(cls, fd, parent=None):
item = cls(parent)
item.fromfile(fd)
return item
def Load(self, fd):
self.fromfile(fd)
def GetSize(self):
"""should be implemented by inherited class"""
def fromfile(self, fd):
self._arr.fromfile(fd, self.GetSize())
def GetParent(self):
return self._parent
class EfiFirmwareVolumeHeader(BinaryItem):
def GetSize(self):
return 56
def GetSigunature(self):
list = self._arr.tolist()
sig = ''
for x in list[40:44]:
sig += chr(x)
return sig
def GetAttribute(self):
return list2int(self._arr.tolist()[44:48])
def GetErasePolarity(self):
list = self.GetAttrStrings()
if 'EFI_FVB2_ERASE_POLARITY' in list:
return True
return False
def GetAttrStrings(self):
list = []
value = self.GetAttribute()
if (value & 0x01) != 0:
list.append('EFI_FVB2_READ_DISABLED_CAP')
if (value & 0x02) != 0:
list.append('EFI_FVB2_READ_ENABLED_CAP')
if (value & 0x04) != 0:
list.append('EFI_FVB2_READ_STATUS')
if (value & 0x08) != 0:
list.append('EFI_FVB2_WRITE_DISABLED_CAP')
if (value & 0x10) != 0:
list.append('EFI_FVB2_WRITE_ENABLED_CAP')
if (value & 0x20) != 0:
list.append('EFI_FVB2_WRITE_STATUS')
if (value & 0x40) != 0:
list.append('EFI_FVB2_LOCK_CAP')
if (value & 0x80) != 0:
list.append('EFI_FVB2_LOCK_STATUS')
if (value & 0x200) != 0:
list.append('EFI_FVB2_STICKY_WRITE')
if (value & 0x400) != 0:
list.append('EFI_FVB2_MEMORY_MAPPED')
if (value & 0x800) != 0:
list.append('EFI_FVB2_ERASE_POLARITY')
if (value & 0x1000) != 0:
list.append('EFI_FVB2_READ_LOCK_CAP')
if (value & 0x00002000) != 0:
list.append('EFI_FVB2_READ_LOCK_STATUS')
if (value & 0x00004000) != 0:
list.append('EFI_FVB2_WRITE_LOCK_CAP')
if (value & 0x00008000) != 0:
list.append('EFI_FVB2_WRITE_LOCK_STATUS')
if (value == 0):
list.append('EFI_FVB2_ALIGNMENT_1')
if (value & 0x001F0000) == 0x00010000:
list.append('EFI_FVB2_ALIGNMENT_2')
if (value & 0x001F0000) == 0x00020000:
list.append('EFI_FVB2_ALIGNMENT_4')
if (value & 0x001F0000) == 0x00030000:
list.append('EFI_FVB2_ALIGNMENT_8')
if (value & 0x001F0000) == 0x00040000:
list.append('EFI_FVB2_ALIGNMENT_16')
if (value & 0x001F0000) == 0x00050000:
list.append('EFI_FVB2_ALIGNMENT_32')
if (value & 0x001F0000) == 0x00060000:
list.append('EFI_FVB2_ALIGNMENT_64')
if (value & 0x001F0000) == 0x00070000:
list.append('EFI_FVB2_ALIGNMENT_128')
if (value & 0x001F0000) == 0x00080000:
list.append('EFI_FVB2_ALIGNMENT_256')
if (value & 0x001F0000) == 0x00090000:
list.append('EFI_FVB2_ALIGNMENT_512')
if (value & 0x001F0000) == 0x000A0000:
list.append('EFI_FVB2_ALIGNMENT_1K')
if (value & 0x001F0000) == 0x000B0000:
list.append('EFI_FVB2_ALIGNMENT_2K')
if (value & 0x001F0000) == 0x000C0000:
list.append('EFI_FVB2_ALIGNMENT_4K')
if (value & 0x001F0000) == 0x000D0000:
list.append('EFI_FVB2_ALIGNMENT_8K')
if (value & 0x001F0000) == 0x000E0000:
list.append('EFI_FVB2_ALIGNMENT_16K')
if (value & 0x001F0000) == 0x000F0000:
list.append('EFI_FVB2_ALIGNMENT_32K')
if (value & 0x001F0000) == 0x00100000:
list.append('EFI_FVB2_ALIGNMENT_64K')
if (value & 0x001F0000) == 0x00110000:
list.append('EFI_FVB2_ALIGNMENT_128K')
if (value & 0x001F0000) == 0x00120000:
list.append('EFI_FVB2_ALIGNMENT_256K')
if (value & 0x001F0000) == 0x00130000:
list.append('EFI_FVB2_ALIGNMENT_512K')
return list
def GetHeaderLength(self):
return list2int(self._arr.tolist()[48:50])
def Dump(self):
print('Signature: %s' % self.GetSigunature())
print('Attribute: 0x%X' % self.GetAttribute())
print('Header Length: 0x%X' % self.GetHeaderLength())
print('File system Guid: ', self.GetFileSystemGuid())
print('Revision: 0x%X' % self.GetRevision())
print('FvLength: 0x%X' % self.GetFvLength())
def GetFileSystemGuid(self):
list = self._arr.tolist()
return list2guid(list[16:32])
def GetRevision(self):
list = self._arr.tolist()
return int(list[55])
def GetFvLength(self):
list = self._arr.tolist()
return list2int(list[32:40])
def GetRawData(self):
return self._arr.tolist()
class BlockMapEntry(BinaryItem):
def GetSize(self):
return 8
def GetNumberBlocks(self):
list = self._arr.tolist()
return list2int(list[0:4])
def GetLength(self):
list = self._arr.tolist()
return list2int(list[4:8])
def GetRawData(self):
return self._arr.tolist()
def __str__(self):
return '[BlockEntry] Number = 0x%X, length=0x%X' % (self.GetNumberBlocks(), self.GetLength())
class EfiFfs(object):
FFS_HEADER_SIZE = 24
def __init__(self, parent=None):
self._header = None
# following field is for FFS in FV file.
self._parent = parent
self._offset = 0
self._sections = []
def Load(self, fd):
self._offset = align(fd.tell(), 8)
self._header = EfiFfsHeader.Read(fd, self)
if not isValidGuid(self.GetNameGuid()):
return
index = self._offset
fileend = self._offset + self.GetSize()
while (index + EfiSection.EFI_SECTION_HEADER_SIZE < fileend):
section = EfiSection(self)
section.Load(fd)
if section.GetSize() == 0 and section.GetHeader().GetType() == 0:
break
self._sections.append(section)
index = fd.tell()
# rebase file pointer to next ffs file
index = self._offset + self._header.GetFfsSize()
index = align(index, 8)
fd.seek(index)
def GetOffset(self):
return self._offset
def GetSize(self):
return self._header.GetFfsSize()
@classmethod
def Read(cls, fd, parent=None):
item = cls(parent)
item.Load(fd)
return item
def GetNameGuid(self):
return self._header.GetNameGuid()
def DumpContent(self):
list = self._content.tolist()
line = []
count = 0
for item in list:
if count < 32:
line.append('0x%X' % int(item))
count += 1
else:
print(' '.join(line))
count = 0
line = []
line.append('0x%X' % int(item))
count += 1
def GetHeader(self):
return self._header
def GetParent(self):
return self._parent
def GetSections(self):
return self._sections
class EfiFfsHeader(BinaryItem):
ffs_state_map = {0x01:'EFI_FILE_HEADER_CONSTRUCTION',
0x02:'EFI_FILE_HEADER_VALID',
0x04:'EFI_FILE_DATA_VALID',
0x08:'EFI_FILE_MARKED_FOR_UPDATE',
0x10:'EFI_FILE_DELETED',
0x20:'EFI_FILE_HEADER_INVALID'}
def GetSize(self):
return 24
def GetNameGuid(self):
list = self._arr.tolist()
return list2guid(list[0:16])
def GetType(self):
list = self._arr.tolist()
return int(list[18])
def GetTypeString(self):
value = self.GetType()
if value == 0x01:
return 'EFI_FV_FILETYPE_RAW'
if value == 0x02:
return 'EFI_FV_FILETYPE_FREEFORM'
if value == 0x03:
return 'EFI_FV_FILETYPE_SECURITY_CORE'
if value == 0x04:
return 'EFI_FV_FILETYPE_PEI_CORE'
if value == 0x05:
return 'EFI_FV_FILETYPE_DXE_CORE'
if value == 0x06:
return 'EFI_FV_FILETYPE_PEIM'
if value == 0x07:
return 'EFI_FV_FILETYPE_DRIVER'
if value == 0x08:
return 'EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER'
if value == 0x09:
return 'EFI_FV_FILETYPE_APPLICATION'
if value == 0x0B:
return 'EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE'
if value == 0xc0:
return 'EFI_FV_FILETYPE_OEM_MIN'
if value == 0xdf:
return 'EFI_FV_FILETYPE_OEM_MAX'
if value == 0xe0:
return 'EFI_FV_FILETYPE_DEBUG_MIN'
if value == 0xef:
return 'EFI_FV_FILETYPE_DEBUG_MAX'
if value == 0xf0:
return 'EFI_FV_FILETYPE_FFS_PAD'
if value == 0xff:
return 'EFI_FV_FILETYPE_FFS_MAX'
return 'Unknown FFS Type'
def GetAttributes(self):
list = self._arr.tolist()
return int(list[19])
def GetFfsSize(self):
list = self._arr.tolist()
return list2int(list[20:23])
def GetState(self):
list = self._arr.tolist()
state = int(list[23])
polarity = self.GetParent().GetParent().GetHeader().GetErasePolarity()
if polarity:
state = (~state) & 0xFF
HighestBit = 0x80
while (HighestBit != 0) and (HighestBit & state) == 0:
HighestBit = HighestBit >> 1
return HighestBit
def GetStateString(self):
state = self.GetState()
if state in self.ffs_state_map.keys():
return self.ffs_state_map[state]
return 'Unknown Ffs State'
def Dump(self):
print("FFS name: ", self.GetNameGuid())
print("FFS type: ", self.GetType())
print("FFS attr: 0x%X" % self.GetAttributes())
print("FFS size: 0x%X" % self.GetFfsSize())
print("FFS state: 0x%X" % self.GetState())
def GetRawData(self):
return self._arr.tolist()
class EfiSection(object):
EFI_SECTION_HEADER_SIZE = 4
def __init__(self, parent=None):
self._size = 0
self._parent = parent
self._offset = 0
self._contents = array.array('B')
def Load(self, fd):
self._offset = align(fd.tell(), 4)
self._header = EfiSectionHeader.Read(fd, self)
if self._header.GetTypeString() == "EFI_SECTION_PE32":
pefile = pe.PEFile(self)
pefile.Load(fd, self.GetContentSize())
fd.seek(self._offset)
self._contents.fromfile(fd, self.GetContentSize())
# rebase file pointer to next section
index = self._offset + self.GetSize()
index = align(index, 4)
fd.seek(index)
def GetContentSize(self):
return self.GetSize() - self.EFI_SECTION_HEADER_SIZE
def GetContent(self):
return self._contents.tolist()
def GetSize(self):
return self._header.GetSectionSize()
def GetHeader(self):
return self._header
def GetSectionOffset(self):
return self._offset + self.EFI_SECTION_HEADER_SIZE
class EfiSectionHeader(BinaryItem):
section_type_map = {0x01: 'EFI_SECTION_COMPRESSION',
0x02: 'EFI_SECTION_GUID_DEFINED',
0x10: 'EFI_SECTION_PE32',
0x11: 'EFI_SECTION_PIC',
0x12: 'EFI_SECTION_TE',
0x13: 'EFI_SECTION_DXE_DEPEX',
0x14: 'EFI_SECTION_VERSION',
0x15: 'EFI_SECTION_USER_INTERFACE',
0x16: 'EFI_SECTION_COMPATIBILITY16',
0x17: 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE',
0x18: 'EFI_SECTION_FREEFORM_SUBTYPE_GUID',
0x19: 'EFI_SECTION_RAW',
0x1B: 'EFI_SECTION_PEI_DEPEX'}
def GetSize(self):
return 4
def GetSectionSize(self):
list = self._arr.tolist()
return list2int(list[0:3])
def GetType(self):
list = self._arr.tolist()
return int(list[3])
def GetTypeString(self):
type = self.GetType()
if type not in self.section_type_map.keys():
return 'Unknown Section Type'
return self.section_type_map[type]
def Dump(self):
print('size = 0x%X' % self.GetSectionSize())
print('type = 0x%X' % self.GetType())
rMapEntry = re.compile('^(\w+)[ \(\w\)]* \(BaseAddress=([0-9a-fA-F]+), EntryPoint=([0-9a-fA-F]+), GUID=([0-9a-fA-F\-]+)')
class EfiFvMapFile(object):
def __init__(self):
self._mapentries = {}
def Load(self, path):
if not os.path.exists(path):
return False
try:
file = open(path, 'r')
lines = file.readlines()
file.close()
except:
return False
for line in lines:
if line[0] != ' ':
# new entry
ret = rMapEntry.match(line)
if ret is not None:
name = ret.groups()[0]
baseaddr = int(ret.groups()[1], 16)
entry = int(ret.groups()[2], 16)
guidstr = '{' + ret.groups()[3] + '}'
guid = uuid.UUID(guidstr)
self._mapentries[guid] = EfiFvMapFileEntry(name, baseaddr, entry, guid)
return True
def GetEntry(self, guid):
if guid in self._mapentries.keys():
return self._mapentries[guid]
return None
class EfiFvMapFileEntry(object):
def __init__(self, name, baseaddr, entry, guid):
self._name = name
self._baseaddr = baseaddr
self._entry = entry
self._guid = guid
def GetName(self):
return self._name
def GetBaseAddress(self):
return self._baseaddr
def GetEntryPoint(self):
return self._entry
def list2guid(list):
val1 = list2int(list[0:4])
val2 = list2int(list[4:6])
val3 = list2int(list[6:8])
val4 = 0
for item in list[8:16]:
val4 = (val4 << 8) | int(item)
val = val1 << 12 * 8 | val2 << 10 * 8 | val3 << 8 * 8 | val4
guid = uuid.UUID(int=val)
return guid
def list2int(list):
val = 0
for index in range(len(list) - 1, -1, -1):
val = (val << 8) | int(list[index])
return val
def align(value, alignment):
return (value + ((alignment - value) & (alignment - 1)))
gInvalidGuid = uuid.UUID(int=0xffffffffffffffffffffffffffffffff)
def isValidGuid(guid):
if guid == gInvalidGuid:
return False
return True
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/efibinary.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/__init__.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
def GetEdkLogger():
import logging
return logging.getLogger('edk')
class EdkException(Exception):
def __init__(self, message, fName=None, fNo=None):
self._message = message
ErrorMsg(message, fName, fNo)
def GetMessage(self):
return '[EDK Failure]: %s' %self._message
def ErrorMsg(mess, fName=None, fNo=None):
GetEdkLogger().error(NormalMessage('#ERR#', mess, fName, fNo))
def LogMsg(mess, fName=None, fNo=None):
GetEdkLogger().info(NormalMessage('@LOG@', mess, fName, fNo))
def WarnMsg(mess, fName=None, fNo=None):
GetEdkLogger().warning(NormalMessage('!WAR!', mess, fName, fNo))
def NormalMessage(type, mess, fName=None, fNo=None):
strMsg = type
if fName is not None:
strMsg += ' %s' % fName.replace('/', '\\')
if fNo is not None:
strMsg += '(%d):' % fNo
else:
strMsg += ' :'
if fName is None and fNo is None:
strMsg += ' '
strMsg += mess
return strMsg
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/message.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import core.editor
class INIDoc(core.editor.EditorDocument):
def __init__(self):
core.editor.EditorDocument.__init__(self)
self._iniobj = None
class INIView(core.editor.EditorView):
pass
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/inidocview.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import print_function
from __future__ import absolute_import
import os
from .message import *
class BaseDoxygeItem:
def __init__(self, name, tag=''):
self.mName = name
self.mTag = tag
self.mDescription = ''
self.mText = []
def AddDescription(self, desc):
self.mDescription = '%s%s' % (self.mDescription, desc)
def __str__(self):
return '\n'.join(self.mText)
def Generate(self):
"""This interface need to be override"""
class Section(BaseDoxygeItem):
def Generate(self):
"""This interface need to be override"""
if len(self.mTag) != 0:
self.mText.append(' \section %s %s' % (self.mName, self.mTag))
else:
self.mText.append(' \section %s' % self.mName)
self.mText.append(self.mDescription)
return self.mText
class Page(BaseDoxygeItem):
def __init__(self, name, tag=None, isSort=True):
BaseDoxygeItem.__init__(self, name, tag)
self.mSubPages = []
self.mIsMainPage = False
self.mSections = []
self.mIsSort = isSort
def GetSubpageCount(self):
return len(self.mSubPages)
def AddPage(self, subpage):
self.mSubPages.append(subpage)
return subpage
def AddPages(self, pageArray):
if pageArray is None:
return
for page in pageArray:
self.AddPage(page)
def AddSection(self, section):
self.mSections.append(section)
self.mSections.sort(key=lambda x: x.mName.lower())
def Generate(self):
if self.mIsMainPage:
self.mText.append('/** \mainpage %s' % self.mName)
self.mIsSort = False
else:
self.mText.append('/** \page %s %s' % (self.mTag, self.mName))
if len(self.mDescription) != 0:
self.mText.append(self.mDescription)
endIndex = len(self.mText)
self.mSections.sort(key=lambda x: x.mName.lower())
for sect in self.mSections:
self.mText += sect.Generate()
endIndex = len(self.mText)
if len(self.mSubPages) != 0:
self.mText.insert(endIndex, "<p> \section content_index INDEX")
endIndex = len(self.mText)
self.mText.insert(endIndex, '<ul>')
endIndex += 1
if self.mIsSort:
self.mSubPages.sort(key=lambda x: x.mName.lower())
for page in self.mSubPages:
self.mText.insert(endIndex, '<li>\subpage %s \"%s\" </li>' % (page.mTag, page.mName))
endIndex += 1
self.mText += page.Generate()
self.mText.insert(endIndex, '</ul>')
endIndex += 1
self.mText.insert(endIndex, ' **/')
return self.mText
class DoxygenFile(Page):
def __init__(self, name, file):
Page.__init__(self, name)
self.mFilename = file
self.mIsMainPage = True
def GetFilename(self):
return self.mFilename.replace('/', '\\')
def Save(self):
str = self.Generate()
try:
f = open(self.mFilename, 'w')
f.write('\n'.join(str))
f.close()
except IOError as e:
ErrorMsg ('Fail to write file %s' % self.mFilename)
return False
return True
doxygenConfigTemplate = """
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = %(ProjectName)s
PROJECT_NUMBER = %(ProjectVersion)s
OUTPUT_DIRECTORY = %(OutputDir)s
CREATE_SUBDIRS = YES
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF = "The $name class " \\
"The $name widget " \\
"The $name file " \\
is \\
provides \\
specifies \\
contains \\
represents \\
a \\
an \\
the
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = YES
STRIP_FROM_PATH = %(StripPath)s
STRIP_FROM_INC_PATH =
SHORT_NAMES = YES
JAVADOC_AUTOBRIEF = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
DETAILS_AT_TOP = YES
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 1
ALIASES =
OPTIMIZE_OUTPUT_FOR_C = YES
OPTIMIZE_OUTPUT_JAVA = NO
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
DISTRIBUTE_GROUP_DOC = YES
SUBGROUPING = YES
TYPEDEF_HIDES_STRUCT = NO
EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
EXTRACT_STATIC = NO
EXTRACT_LOCAL_CLASSES = NO
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = NO
HIDE_SCOPE_NAMES = NO
SHOW_INCLUDE_FILES = NO
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_BY_SCOPE_NAME = YES
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = NO
SHOW_DIRECTORIES = NO
FILE_VERSION_FILTER =
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = YES
WARN_FORMAT = "$file:$line: $text "
WARN_LOGFILE = %(WarningFile)s
INPUT = %(FileList)s
INPUT_ENCODING = UTF-8
FILE_PATTERNS = %(Pattern)s
RECURSIVE = NO
EXCLUDE = *.svn
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = .svn
EXCLUDE_SYMBOLS =
EXAMPLE_PATH = %(ExamplePath)s
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = NO
IMAGE_PATH =
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = YES
REFERENCES_RELATION = YES
REFERENCES_LINK_SOURCE = NO
USE_HTAGS = NO
VERBATIM_HEADERS = NO
ALPHABETICAL_INDEX = NO
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
GENERATE_HTML = YES
HTML_OUTPUT = html
HTML_FILE_EXTENSION = .html
HTML_HEADER =
HTML_FOOTER =
HTML_STYLESHEET =
HTML_ALIGN_MEMBERS = YES
GENERATE_HTMLHELP = %(WhetherGenerateHtmlHelp)s
HTML_DYNAMIC_SECTIONS = NO
CHM_FILE = index.chm
HHC_LOCATION =
GENERATE_CHI = NO
BINARY_TOC = NO
TOC_EXPAND = NO
DISABLE_INDEX = NO
ENUM_VALUES_PER_LINE = 4
GENERATE_TREEVIEW = %(WhetherGenerateTreeView)s
TREEVIEW_WIDTH = 250
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4wide
EXTRA_PACKAGES =
LATEX_HEADER =
PDF_HYPERLINKS = YES
USE_PDFLATEX = YES
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_LINKS = NO
GENERATE_XML = NO
XML_OUTPUT = xml
XML_SCHEMA =
XML_DTD =
XML_PROGRAMLISTING = YES
GENERATE_AUTOGEN_DEF = NO
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
SEARCH_INCLUDES = YES
INCLUDE_PATH = %(IncludePath)s
INCLUDE_FILE_PATTERNS = *.h
PREDEFINED = %(PreDefined)s
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = NO
TAGFILES =
GENERATE_TAGFILE =
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
PERL_PATH = /usr/bin/perl
CLASS_DIAGRAMS = NO
MSCGEN_PATH =
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = NO
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
TEMPLATE_RELATIONS = NO
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
DOT_PATH =
DOTFILE_DIRS =
DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 1000
DOT_TRANSPARENT = YES
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES
SEARCHENGINE = NO
"""
class DoxygenConfigFile:
def __init__(self):
self.mProjectName = ''
self.mOutputDir = ''
self.mFileList = []
self.mIncludeList = []
self.mStripPath = ''
self.mExamplePath = ''
self.mPattern = ['*.c', '*.h',
'*.asm', '*.s', '.nasm', '*.html', '*.dox']
self.mMode = 'HTML'
self.mWarningFile = ''
self.mPreDefined = []
self.mProjectVersion = 0.1
def SetChmMode(self):
self.mMode = 'CHM'
def SetHtmlMode(self):
self.mMode = 'HTML'
def SetProjectName(self, str):
self.mProjectName = str
def SetProjectVersion(self, str):
self.mProjectVersion = str
def SetOutputDir(self, str):
self.mOutputDir = str
def SetStripPath(self, str):
self.mStripPath = str
def SetExamplePath(self, str):
self.mExamplePath = str
def SetWarningFilePath(self, str):
self.mWarningFile = str.replace('\\', '/')
def FileExists(self, path):
if path is None:
return False
if len(path) == 0:
return False
for p in self.mFileList:
if path.lower() == p.lower():
return True
return False
def AddFile(self, path):
if path is None:
return
if len(path) == 0:
return
path = path.replace('\\', '/')
if not self.FileExists(path):
self.mFileList.append(path)
def AddIncludePath(self, path):
path = path.replace('\\', '/')
if path not in self.mIncludeList:
self.mIncludeList.append(path)
def AddPattern(self, pattern):
self.mPattern.append(pattern)
def AddPreDefined(self, macro):
self.mPreDefined.append(macro)
def Generate(self, path):
files = ' \\\n'.join(self.mFileList)
includes = ' \\\n'.join(self.mIncludeList)
patterns = ' \\\n'.join(self.mPattern)
if self.mMode.lower() == 'html':
sHtmlHelp = 'NO'
sTreeView = 'YES'
else:
sHtmlHelp = 'YES'
sTreeView = 'NO'
text = doxygenConfigTemplate % {'ProjectName':self.mProjectName,
'OutputDir':self.mOutputDir,
'StripPath':self.mStripPath,
'ExamplePath':self.mExamplePath,
'FileList':files,
'Pattern':patterns,
'WhetherGenerateHtmlHelp':sHtmlHelp,
'WhetherGenerateTreeView':sTreeView,
'IncludePath':includes,
'WarningFile':self.mWarningFile,
'PreDefined':' '.join(self.mPreDefined),
'ProjectVersion':self.mProjectVersion}
try:
f = open(path, 'w')
f.write(text)
f.close()
except IOError as e:
ErrorMsg ('Fail to generate doxygen config file %s' % path)
return False
return True
########################################################################
# TEST CODE
########################################################################
if __name__== '__main__':
df = DoxygenFile('Platform Document', 'm:\tree')
df.AddPage(Page('Module', 'module'))
p = df.AddPage(Page('Library', 'library'))
p.AddDescription(desc)
p.AddPage(Page('PCD', 'pcds'))
df.Generate()
print(df)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/doxygen.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import absolute_import
from .message import *
import re
import os
section_re = re.compile(r'^\[([\w., "]+)\]')
class BaseINIFile(object):
_objs = {}
def __new__(cls, *args, **kwargs):
"""Maintain only a single instance of this object
@return: instance of this class
"""
if len(args) == 0: return object.__new__(cls)
filename = args[0]
parent = None
if len(args) > 1:
parent = args[1]
key = os.path.normpath(filename)
if key not in cls._objs.keys():
cls._objs[key] = object.__new__(cls)
if parent is not None:
cls._objs[key].AddParent(parent)
return cls._objs[key]
def __init__(self, filename=None, parent=None):
self._lines = []
self._sections = {}
self._filename = filename
self._globals = []
self._isModify = True
def AddParent(self, parent):
if parent is None: return
if not hasattr(self, "_parents"):
self._parents = []
if parent in self._parents:
ErrorMsg("Duplicate parent is found for INI file %s" % self._filename)
return
self._parents.append(parent)
def GetFilename(self):
return os.path.normpath(self._filename)
def IsModified(self):
return self._isModify
def Modify(self, modify=True, obj=None):
if modify == self._isModify: return
self._isModify = modify
if modify:
for parent in self._parents:
parent.Modify(True, self)
def _ReadLines(self, filename):
#
# try to open file
#
if not os.path.exists(filename):
return False
try:
handle = open(filename, 'r')
self._lines = handle.readlines()
handle.close()
except:
raise EdkException("Fail to open file %s" % filename)
return True
def GetSectionInstance(self, parent, name, isCombined=False):
return BaseINISection(parent, name, isCombined)
def GetSectionByName(self, name):
arr = []
for key in self._sections.keys():
if '.private' in key:
continue
for item in self._sections[key]:
if item.GetBaseName().lower().find(name.lower()) != -1:
arr.append(item)
return arr
def GetSectionObjectsByName(self, name):
arr = []
sects = self.GetSectionByName(name)
for sect in sects:
for obj in sect.GetObjects():
arr.append(obj)
return arr
def Parse(self):
if not self._isModify: return True
if not self._ReadLines(self._filename): return False
sObjs = []
inGlobal = True
# process line
for index in range(len(self._lines)):
templine = self._lines[index].strip()
# skip comments
if len(templine) == 0: continue
if re.match("^\[=*\]", templine) or re.match("^#", templine) or \
re.match("\*+/", templine):
continue
m = section_re.match(templine)
if m is not None: # found a section
inGlobal = False
# Finish the latest section first
if len(sObjs) != 0:
for sObj in sObjs:
sObj._end = index - 1
if not sObj.Parse():
ErrorMsg("Fail to parse section %s" % sObj.GetBaseName(),
self._filename,
sObj._start)
# start new section
sname_arr = m.groups()[0].split(',')
sObjs = []
for name in sname_arr:
sObj = self.GetSectionInstance(self, name, (len(sname_arr) > 1))
sObj._start = index
sObjs.append(sObj)
if name.lower() not in self._sections:
self._sections[name.lower()] = [sObj]
else:
self._sections[name.lower()].append(sObj)
elif inGlobal: # not start any section and find global object
gObj = BaseINIGlobalObject(self)
gObj._start = index
gObj.Parse()
self._globals.append(gObj)
# Finish the last section
if len(sObjs) != 0:
for sObj in sObjs:
sObj._end = index
if not sObj.Parse():
ErrorMsg("Fail to parse section %s" % sObj.GetBaseName(),
self._filename,
sObj._start)
self._isModify = False
return True
def Destroy(self, parent):
# check referenced parent
if parent is not None:
assert parent in self._parents, "when destory ini object, can not found parent reference!"
self._parents.remove(parent)
if len(self._parents) != 0: return
for sects in self._sections.values():
for sect in sects:
sect.Destroy()
# dereference from _objs array
assert self.GetFilename() in self._objs.keys(), "When destroy ini object, can not find obj reference!"
assert self in self._objs.values(), "When destroy ini object, can not find obj reference!"
del self._objs[self.GetFilename()]
# dereference self
self.Clear()
def GetDefine(self, name):
sects = self.GetSectionByName('Defines')
for sect in sects:
for obj in sect.GetObjects():
line = obj.GetLineByOffset(obj._start).split('#')[0].strip()
arr = line.split('=')
if arr[0].strip().lower() == name.strip().lower():
return arr[1].strip()
return None
def Clear(self):
for sects in self._sections.values():
for sect in sects:
del sect
self._sections.clear()
for gObj in self._globals:
del gObj
del self._globals[:]
del self._lines[:]
def Reload(self):
self.Clear()
ret = self.Parse()
if ret:
self._isModify = False
return ret
def AddNewSection(self, sectName):
if sectName.lower() in self._sections.keys():
ErrorMsg('Section %s can not be created for conflict with existing section')
return None
sectionObj = self.GetSectionInstance(self, sectName)
sectionObj._start = len(self._lines)
sectionObj._end = len(self._lines) + 1
self._lines.append('[%s]\n' % sectName)
self._lines.append('\n\n')
self._sections[sectName.lower()] = sectionObj
return sectionObj
def CopySectionsByName(self, oldDscObj, nameStr):
sects = oldDscObj.GetSectionByName(nameStr)
for sect in sects:
sectObj = self.AddNewSection(sect.GetName())
sectObj.Copy(sect)
def __str__(self):
return ''.join(self._lines)
## Get file header's comment from basic INI file.
# The file comments has two style:
# 1) #/** @file
# 2) ## @file
#
def GetFileHeader(self):
desc = []
lineArr = self._lines
inHeader = False
for num in range(len(self._lines)):
line = lineArr[num].strip()
if not inHeader and (line.startswith("#/**") or line.startswith("##")) and \
line.find("@file") != -1:
inHeader = True
continue
if inHeader and (line.startswith("#**/") or line.startswith('##')):
inHeader = False
break
if inHeader:
prefixIndex = line.find('#')
if prefixIndex == -1:
desc.append(line)
else:
desc.append(line[prefixIndex + 1:])
return '<br>\n'.join(desc)
class BaseINISection(object):
def __init__(self, parent, name, isCombined=False):
self._parent = parent
self._name = name
self._isCombined = isCombined
self._start = 0
self._end = 0
self._objs = []
def __del__(self):
for obj in self._objs:
del obj
del self._objs[:]
def GetName(self):
return self._name
def GetObjects(self):
return self._objs
def GetParent(self):
return self._parent
def GetStartLinenumber(self):
return self._start
def GetEndLinenumber(self):
return self._end
def GetLine(self, linenumber):
return self._parent._lines[linenumber]
def GetFilename(self):
return self._parent.GetFilename()
def GetSectionINIObject(self, parent):
return BaseINISectionObject(parent)
def Parse(self):
# skip first line in section, it is used by section name
visit = self._start + 1
iniObj = None
while (visit <= self._end):
line = self.GetLine(visit).strip()
if re.match("^\[=*\]", line) or re.match("^#", line) or len(line) == 0:
visit += 1
continue
line = line.split('#')[0].strip()
if iniObj is not None:
if line.endswith('}'):
iniObj._end = visit - self._start
if not iniObj.Parse():
ErrorMsg("Fail to parse ini object",
self.GetFilename(),
iniObj.GetStartLinenumber())
else:
self._objs.append(iniObj)
iniObj = None
else:
iniObj = self.GetSectionINIObject(self)
iniObj._start = visit - self._start
if not line.endswith('{'):
iniObj._end = visit - self._start
if not iniObj.Parse():
ErrorMsg("Fail to parse ini object",
self.GetFilename(),
iniObj.GetStartLinenumber())
else:
self._objs.append(iniObj)
iniObj = None
visit += 1
return True
def Destroy(self):
for obj in self._objs:
obj.Destroy()
def GetBaseName(self):
return self._name
def AddLine(self, line):
end = self.GetEndLinenumber()
self._parent._lines.insert(end, line)
self._end += 1
def Copy(self, sectObj):
index = sectObj.GetStartLinenumber() + 1
while index < sectObj.GetEndLinenumber():
line = sectObj.GetLine(index)
if not line.strip().startswith('#'):
self.AddLine(line)
index += 1
def AddObject(self, obj):
lines = obj.GenerateLines()
for line in lines:
self.AddLine(line)
def GetComment(self):
comments = []
start = self._start - 1
bFound = False
while (start > 0):
line = self.GetLine(start).strip()
if len(line) == 0:
start -= 1
continue
if line.startswith('##'):
bFound = True
index = line.rfind('#')
if (index + 1) < len(line):
comments.append(line[index + 1:])
break
if line.startswith('#'):
start -= 1
continue
break
if bFound:
end = start + 1
while (end < self._start):
line = self.GetLine(end).strip()
if len(line) == 0: break
if not line.startswith('#'): break
index = line.rfind('#')
if (index + 1) < len(line):
comments.append(line[index + 1:])
end += 1
return comments
class BaseINIGlobalObject(object):
def __init__(self, parent):
self._start = 0
self._end = 0
def Parse(self):
return True
def __str__(self):
return parent._lines[self._start]
def __del__(self):
pass
class BaseINISectionObject(object):
def __init__(self, parent):
self._start = 0
self._end = 0
self._parent = parent
def __del__(self):
self._parent = None
def GetParent(self):
return self._parent
def GetFilename(self):
return self.GetParent().GetFilename()
def GetPackageName(self):
return self.GetFilename()
def GetFileObj(self):
return self.GetParent().GetParent()
def GetStartLinenumber(self):
return self.GetParent()._start + self._start
def GetLineByOffset(self, offset):
sect_start = self._parent.GetStartLinenumber()
linenumber = sect_start + offset
return self._parent.GetLine(linenumber)
def GetLinenumberByOffset(self, offset):
return offset + self._parent.GetStartLinenumber()
def Parse(self):
return True
def Destroy(self):
pass
def __str__(self):
return self.GetLineByOffset(self._start).strip()
def GenerateLines(self):
return ['default setion object string\n']
def GetComment(self):
comments = []
start = self.GetStartLinenumber() - 1
bFound = False
while (start > 0):
line = self.GetParent().GetLine(start).strip()
if len(line) == 0:
start -= 1
continue
if line.startswith('##'):
bFound = True
index = line.rfind('#')
if (index + 1) < len(line):
comments.append(line[index + 1:])
break
if line.startswith('#'):
start -= 1
continue
break
if bFound:
end = start + 1
while (end <= self.GetStartLinenumber() - 1):
line = self.GetParent().GetLine(end).strip()
if len(line) == 0: break
if not line.startswith('#'): break
index = line.rfind('#')
if (index + 1) < len(line):
comments.append(line[index + 1:])
end += 1
return comments
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/ini.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/__init__.py
|
## @file
#
# This file produce action class to generate doxygen document for edk2 codebase.
# The action classes are shared by GUI and command line tools.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
from plugins.EdkPlugins.basemodel import doxygen
import os
try:
import wx
gInGui = True
except:
gInGui = False
import re
from plugins.EdkPlugins.edk2.model import inf
from plugins.EdkPlugins.edk2.model import dec
from plugins.EdkPlugins.basemodel.message import *
_ignore_dir = ['.svn', '_svn', 'cvs']
_inf_key_description_mapping_table = {
'INF_VERSION':'Version of INF file specification',
#'BASE_NAME':'Module Name',
'FILE_GUID':'Module Guid',
'MODULE_TYPE': 'Module Type',
'VERSION_STRING': 'Module Version',
'LIBRARY_CLASS': 'Produced Library Class',
'EFI_SPECIFICATION_VERSION': 'UEFI Specification Version',
'PI_SPECIFICATION_VERSION': 'PI Specification Version',
'ENTRY_POINT': 'Module Entry Point Function',
'CONSTRUCTOR': 'Library Constructor Function'
}
_dec_key_description_mapping_table = {
'DEC_SPECIFICATION': 'Version of DEC file specification',
'PACKAGE_GUID': 'Package Guid'
}
class DoxygenAction:
"""This is base class for all doxygen action.
"""
def __init__(self, doxPath, chmPath, outputPath, projname, mode='html', log=None, verbose=False):
"""Constructor function.
@param doxPath the obosolution path of doxygen execute file.
@param outputPath the obosolution output path.
@param log log function for output message
"""
self._doxPath = doxPath
self._chmPath = chmPath
self._outputPath = outputPath
self._projname = projname
self._configFile = None # doxygen config file is used by doxygen exe file
self._indexPageFile = None # doxygen page file for index page.
self._log = log
self._mode = mode
self._verbose = verbose
self._doxygenCallback = None
self._chmCallback = None
def Log(self, message, level='info'):
if self._log is not None:
self._log(message, level)
def IsVerbose(self):
return self._verbose
def Generate(self):
"""Generate interface called by outer directly"""
self.Log(">>>>>> Start generate doxygen document for %s... Zzz....\n" % self._projname)
# create doxygen config file at first
self._configFile = doxygen.DoxygenConfigFile()
self._configFile.SetOutputDir(self._outputPath)
self._configFile.SetWarningFilePath(os.path.join(self._outputPath, 'warning.txt'))
if self._mode.lower() == 'html':
self._configFile.SetHtmlMode()
else:
self._configFile.SetChmMode()
self.Log(" >>>>>> Initialize doxygen config file...Zzz...\n")
self.InitializeConfigFile()
self.Log(" >>>>>> Generate doxygen index page file...Zzz...\n")
indexPagePath = self.GenerateIndexPage()
if indexPagePath is None:
self.Log("Fail to generate index page!\n", 'error')
return False
else:
self.Log("Success to create doxygen index page file %s \n" % indexPagePath)
# Add index page doxygen file to file list.
self._configFile.AddFile(indexPagePath)
# save config file to output path
configFilePath = os.path.join(self._outputPath, self._projname + '.doxygen_config')
self._configFile.Generate(configFilePath)
self.Log(" <<<<<< Success Save doxygen config file to %s...\n" % configFilePath)
# launch doxygen tool to generate document
if self._doxygenCallback is not None:
self.Log(" >>>>>> Start doxygen process...Zzz...\n")
if not self._doxygenCallback(self._doxPath, configFilePath):
return False
else:
self.Log("Fail to create doxygen process!", 'error')
return False
return True
def InitializeConfigFile(self):
"""Initialize config setting for doxygen project. It will be invoked after config file
object is created. Inherited class should implement it.
"""
def GenerateIndexPage(self):
"""Generate doxygen index page. Inherited class should implement it."""
return None
def RegisterCallbackDoxygenProcess(self, callback):
self._doxygenCallback = callback
def RegisterCallbackCHMProcess(self, callback):
self._chmCallback = callback
class PlatformDocumentAction(DoxygenAction):
"""Generate platform doxygen document, will be implement at future."""
class PackageDocumentAction(DoxygenAction):
"""Generate package reference document"""
def __init__(self, doxPath, chmPath, outputPath, pObj, mode='html', log=None, arch=None, tooltag=None,
macros=[], onlyInclude=False, verbose=False):
DoxygenAction.__init__(self, doxPath, chmPath, outputPath, pObj.GetName(), mode, log, verbose)
self._pObj = pObj
self._arch = arch
self._tooltag = tooltag
self._macros = macros
self._onlyIncludeDocument = onlyInclude
def InitializeConfigFile(self):
if self._arch == 'IA32':
self._configFile.AddPreDefined('MDE_CPU_IA32')
elif self._arch == 'X64':
self._configFile.AddPreDefined('MDE_CPU_X64')
elif self._arch == 'IPF':
self._configFile.AddPreDefined('MDE_CPU_IPF')
elif self._arch == 'EBC':
self._configFile.AddPreDefined('MDE_CPU_EBC')
else:
self._arch = None
self._configFile.AddPreDefined('MDE_CPU_IA32')
self._configFile.AddPreDefined('MDE_CPU_X64')
self._configFile.AddPreDefined('MDE_CPU_IPF')
self._configFile.AddPreDefined('MDE_CPU_EBC')
self._configFile.AddPreDefined('MDE_CPU_ARM')
for macro in self._macros:
self._configFile.AddPreDefined(macro)
namestr = self._pObj.GetName()
if self._arch is not None:
namestr += '[%s]' % self._arch
if self._tooltag is not None:
namestr += '[%s]' % self._tooltag
self._configFile.SetProjectName(namestr)
self._configFile.SetStripPath(self._pObj.GetWorkspace())
self._configFile.SetProjectVersion(self._pObj.GetFileObj().GetVersion())
self._configFile.AddPattern('*.decdoxygen')
if self._tooltag.lower() == 'msft':
self._configFile.AddPreDefined('_MSC_EXTENSIONS')
elif self._tooltag.lower() == 'gnu':
self._configFile.AddPreDefined('__GNUC__')
elif self._tooltag.lower() == 'intel':
self._configFile.AddPreDefined('__INTEL_COMPILER')
else:
self._tooltag = None
self._configFile.AddPreDefined('_MSC_EXTENSIONS')
self._configFile.AddPreDefined('__GNUC__')
self._configFile.AddPreDefined('__INTEL_COMPILER')
self._configFile.AddPreDefined('ASM_PFX= ')
self._configFile.AddPreDefined('OPTIONAL= ')
def GenerateIndexPage(self):
"""Generate doxygen index page. Inherited class should implement it."""
fObj = self._pObj.GetFileObj()
pdObj = doxygen.DoxygenFile('%s Package Document' % self._pObj.GetName(),
'%s.decdoxygen' % self._pObj.GetFilename())
self._configFile.AddFile(pdObj.GetFilename())
pdObj.AddDescription(fObj.GetFileHeader())
defSection = fObj.GetSectionByName('defines')[0]
baseSection = doxygen.Section('PackageBasicInformation', 'Package Basic Information')
descr = '<TABLE>'
for obj in defSection.GetObjects():
if obj.GetKey() in _dec_key_description_mapping_table.keys():
descr += '<TR>'
descr += '<TD><B>%s</B></TD>' % _dec_key_description_mapping_table[obj.GetKey()]
descr += '<TD>%s</TD>' % obj.GetValue()
descr += '</TR>'
descr += '</TABLE><br>'
baseSection.AddDescription(descr)
pdObj.AddSection(baseSection)
knownIssueSection = doxygen.Section('Known_Issue_section', 'Known Issue')
knownIssueSection.AddDescription('<ul>')
knownIssueSection.AddDescription('<li> OPTIONAL macro for function parameter can not be dealed with doxygen, so it disapear in this document! </li>')
knownIssueSection.AddDescription('</ul>')
pdObj.AddSection(knownIssueSection)
self.AddAllIncludeFiles(self._pObj, self._configFile)
pages = self.GenerateIncludesSubPage(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GenerateLibraryClassesSubPage(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GeneratePcdSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GenerateGuidSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GeneratePpiSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GenerateProtocolSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
if not self._onlyIncludeDocument:
pdObj.AddPages(self.GenerateModulePages(self._pObj, self._configFile))
pdObj.Save()
return pdObj.GetFilename()
def GenerateIncludesSubPage(self, pObj, configFile):
# by default add following path as include path to config file
pkpath = pObj.GetFileObj().GetPackageRootPath()
configFile.AddIncludePath(os.path.join(pkpath, 'Include'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Library'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Protocol'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Ppi'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Guid'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'IndustryStandard'))
rootArray = []
pageRoot = doxygen.Page("Public Includes", "%s_public_includes" % pObj.GetName())
objs = pObj.GetFileObj().GetSectionObjectsByName('includes')
if len(objs) == 0: return []
for obj in objs:
# Add path to include path
path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath())
configFile.AddIncludePath(path)
# only list common folder's include file
if obj.GetArch().lower() != 'common':
continue
bNeedAddIncludePage = False
topPage = doxygen.Page(self._ConvertPathToDoxygen(path, pObj), 'public_include_top')
topPage.AddDescription('<ul>\n')
for file in os.listdir(path):
if file.lower() in _ignore_dir: continue
fullpath = os.path.join(path, file)
if os.path.isfile(fullpath):
self.ProcessSourceFileForInclude(fullpath, pObj, configFile)
topPage.AddDescription('<li> \link %s\endlink </li>\n' % self._ConvertPathToDoxygen(fullpath, pObj))
else:
if file.lower() in ['library', 'protocol', 'guid', 'ppi', 'ia32', 'x64', 'ipf', 'ebc', 'arm', 'pi', 'uefi', 'aarch64']:
continue
bNeedAddSubPage = False
subpage = doxygen.Page(self._ConvertPathToDoxygen(fullpath, pObj), 'public_include_%s' % file)
subpage.AddDescription('<ul>\n')
for subfile in os.listdir(fullpath):
if subfile.lower() in _ignore_dir: continue
bNeedAddSubPage = True
subfullpath = os.path.join(fullpath, subfile)
self.ProcessSourceFileForInclude(subfullpath, pObj, configFile)
subpage.AddDescription('<li> \link %s \endlink </li>\n' % self._ConvertPathToDoxygen(subfullpath, pObj))
subpage.AddDescription('</ul>\n')
if bNeedAddSubPage:
bNeedAddIncludePage = True
pageRoot.AddPage(subpage)
topPage.AddDescription('</ul>\n')
if bNeedAddIncludePage:
pageRoot.AddPage(topPage)
if pageRoot.GetSubpageCount() != 0:
return [pageRoot]
else:
return []
def GenerateLibraryClassesSubPage(self, pObj, configFile):
"""
Generate sub page for library class for package.
One DEC file maybe contains many library class sections
for different architecture.
@param fObj DEC file object.
"""
rootArray = []
pageRoot = doxygen.Page("Library Class", "%s_libraryclass" % pObj.GetName())
objs = pObj.GetFileObj().GetSectionObjectsByName('libraryclass', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
classPage = doxygen.Page(obj.GetClassName(),
"lc_%s" % obj.GetClassName())
comments = obj.GetComment()
if len(comments) != 0:
classPage.AddDescription('<br>\n'.join(comments) + '<br>\n')
pageRoot.AddPage(classPage)
path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
path = path[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile())
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % obj.GetHeaderFile())
section.AddDescription(' \endlink<p>\n')
classPage.AddSection(section)
fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
self.ProcessSourceFileForInclude(fullPath, pObj, configFile)
else:
archPageDict = {}
for obj in objs:
if obj.GetArch() not in archPageDict.keys():
archPageDict[obj.GetArch()] = doxygen.Page(obj.GetArch(),
'lc_%s' % obj.GetArch())
pageRoot.AddPage(archPageDict[obj.GetArch()])
subArchRoot = archPageDict[obj.GetArch()]
classPage = doxygen.Page(obj.GetClassName(),
"lc_%s" % obj.GetClassName())
comments = obj.GetComment()
if len(comments) != 0:
classPage.AddDescription('<br>\n'.join(comments) + '<br>\n')
subArchRoot.AddPage(classPage)
path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
path = path[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile())
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % obj.GetHeaderFile())
section.AddDescription(' \endlink<p>\n')
classPage.AddSection(section)
fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
self.ProcessSourceFileForInclude(fullPath, pObj, configFile)
rootArray.append(pageRoot)
return rootArray
def ProcessSourceFileForInclude(self, path, pObj, configFile, infObj=None):
"""
@param path the analysising file full path
@param pObj package object
@param configFile doxygen config file.
"""
if gInGui:
wx.Yield()
if not os.path.exists(path):
ErrorMsg('Source file path %s does not exist!' % path)
return
if configFile.FileExists(path):
return
try:
f = open(path, 'r')
lines = f.readlines()
f.close()
except IOError:
ErrorMsg('Fail to open file %s' % path)
return
configFile.AddFile(path)
return
no = 0
for no in range(len(lines)):
if len(lines[no].strip()) == 0:
continue
if lines[no].strip()[:2] in ['##', '//', '/*', '*/']:
continue
index = lines[no].lower().find('include')
#mo = IncludePattern.finditer(lines[no].lower())
mo = re.match(r"^#\s*include\s+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip().lower())
if not mo:
continue
mo = re.match(r"^[#\w\s]+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip())
filePath = mo.groups()[0]
if filePath is None or len(filePath) == 0:
continue
# find header file in module's path firstly.
fullPath = None
if os.path.exists(os.path.join(os.path.dirname(path), filePath)):
# Find the file in current directory
fullPath = os.path.join(os.path.dirname(path), filePath).replace('\\', '/')
else:
# find in depedent package's include path
incObjs = pObj.GetFileObj().GetSectionObjectsByName('includes')
for incObj in incObjs:
incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), incObj.GetPath()).strip()
incPath = os.path.realpath(os.path.join(incPath, filePath))
if os.path.exists(incPath):
fullPath = incPath
break
if infObj is not None:
pkgInfObjs = infObj.GetSectionObjectsByName('packages')
for obj in pkgInfObjs:
decObj = dec.DECFile(os.path.join(pObj.GetWorkspace(), obj.GetPath()))
if not decObj:
ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName())
continue
if not decObj.Parse():
ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName())
continue
incObjs = decObj.GetSectionObjectsByName('includes')
for incObj in incObjs:
incPath = os.path.join(decObj.GetPackageRootPath(), incObj.GetPath()).replace('\\', '/')
if os.path.exists(os.path.join(incPath, filePath)):
fullPath = os.path.join(os.path.join(incPath, filePath))
break
if fullPath is not None:
break
if fullPath is None and self.IsVerbose():
self.Log('Can not resolve header file %s for file %s in package %s\n' % (filePath, path, pObj.GetFileObj().GetFilename()), 'error')
return
else:
fullPath = fullPath.replace('\\', '/')
if self.IsVerbose():
self.Log('Preprocessing: Add include file %s for file %s\n' % (fullPath, path))
#LogMsg ('Preprocessing: Add include file %s for file %s' % (fullPath, path))
self.ProcessSourceFileForInclude(fullPath, pObj, configFile, infObj)
def AddAllIncludeFiles(self, pObj, configFile):
objs = pObj.GetFileObj().GetSectionObjectsByName('includes')
for obj in objs:
incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath())
for root, dirs, files in os.walk(incPath):
for dir in dirs:
if dir.lower() in _ignore_dir:
dirs.remove(dir)
for file in files:
path = os.path.normpath(os.path.join(root, file))
configFile.AddFile(path.replace('/', '\\'))
def GeneratePcdSubPages(self, pObj, configFile):
"""
Generate sub pages for package's PCD definition.
@param pObj package object
@param configFile config file object
"""
rootArray = []
objs = pObj.GetFileObj().GetSectionObjectsByName('pcd')
if len(objs) == 0:
return []
pcdRootPage = doxygen.Page('PCD', 'pcd_root_page')
typeRootPageDict = {}
typeArchRootPageDict = {}
for obj in objs:
if obj.GetPcdType() not in typeRootPageDict.keys():
typeRootPageDict[obj.GetPcdType()] = doxygen.Page(obj.GetPcdType(), 'pcd_%s_root_page' % obj.GetPcdType())
pcdRootPage.AddPage(typeRootPageDict[obj.GetPcdType()])
typeRoot = typeRootPageDict[obj.GetPcdType()]
if self._arch is not None:
pcdPage = doxygen.Page('%s' % obj.GetPcdName(),
'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1]))
pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n')
section = doxygen.Section('PCDinformation', 'PCD Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>Name</CAPTION></TD>'
desc += '<TD><CAPTION>Token Space</CAPTION></TD>'
desc += '<TD><CAPTION>Token number</CAPTION></TD>'
desc += '<TD><CAPTION>Data Type</CAPTION></TD>'
desc += '<TD><CAPTION>Default Value</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
pcdPage.AddSection(section)
typeRoot.AddPage(pcdPage)
else:
keystr = obj.GetPcdType() + obj.GetArch()
if keystr not in typeArchRootPageDict.keys():
typeArchRootPage = doxygen.Page(obj.GetArch(), 'pcd_%s_%s_root_page' % (obj.GetPcdType(), obj.GetArch()))
typeArchRootPageDict[keystr] = typeArchRootPage
typeRoot.AddPage(typeArchRootPage)
typeArchRoot = typeArchRootPageDict[keystr]
pcdPage = doxygen.Page('%s' % obj.GetPcdName(),
'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1]))
pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n')
section = doxygen.Section('PCDinformation', 'PCD Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>Name</CAPTION></TD>'
desc += '<TD><CAPTION>Token Space</CAPTION></TD>'
desc += '<TD><CAPTION>Token number</CAPTION></TD>'
desc += '<TD><CAPTION>Data Type</CAPTION></TD>'
desc += '<TD><CAPTION>Default Value</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
pcdPage.AddSection(section)
typeArchRoot.AddPage(pcdPage)
return [pcdRootPage]
def _GenerateGuidSubPage(self, pObj, obj, configFile):
guidPage = doxygen.Page('%s' % obj.GetName(),
'guid_%s_%s' % (obj.GetArch(), obj.GetName()))
comments = obj.GetComment()
if len(comments) != 0:
guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>')
section = doxygen.Section('BasicGuidInfo', 'GUID Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>GUID\'s Guid Name</CAPTION></TD><TD><CAPTION>GUID\'s Guid</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD>%s</TD>' % obj.GetName()
desc += '<TD>%s</TD>' % obj.GetGuid()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
guidPage.AddSection(section)
refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile)
if refFile:
relPath = refFile[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
guidPage.AddDescription(' \\copydoc %s <br>' % relPath)
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % relPath)
section.AddDescription('\endlink\n')
self.ProcessSourceFileForInclude(refFile, pObj, configFile)
guidPage.AddSection(section)
return guidPage
def GenerateGuidSubPages(self, pObj, configFile):
"""
Generate sub pages for package's GUID definition.
@param pObj package object
@param configFilf doxygen config file object
"""
pageRoot = doxygen.Page('GUID', 'guid_root_page')
objs = pObj.GetFileObj().GetSectionObjectsByName('guids', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
pageRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile))
else:
guidArchRootPageDict = {}
for obj in objs:
if obj.GetArch() not in guidArchRootPageDict.keys():
guidArchRoot = doxygen.Page(obj.GetArch(), 'guid_arch_root_%s' % obj.GetArch())
pageRoot.AddPage(guidArchRoot)
guidArchRootPageDict[obj.GetArch()] = guidArchRoot
guidArchRoot = guidArchRootPageDict[obj.GetArch()]
guidArchRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile))
return [pageRoot]
def _GeneratePpiSubPage(self, pObj, obj, configFile):
guidPage = doxygen.Page(obj.GetName(), 'ppi_page_%s' % obj.GetName())
comments = obj.GetComment()
if len(comments) != 0:
guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>')
section = doxygen.Section('BasicPpiInfo', 'PPI Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>PPI\'s Guid Name</CAPTION></TD><TD><CAPTION>PPI\'s Guid</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD>%s</TD>' % obj.GetName()
desc += '<TD>%s</TD>' % obj.GetGuid()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
guidPage.AddSection(section)
refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile)
if refFile:
relPath = refFile[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
guidPage.AddDescription(' \\copydoc %s <br>' % relPath)
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % relPath)
section.AddDescription('\endlink\n')
self.ProcessSourceFileForInclude(refFile, pObj, configFile)
guidPage.AddSection(section)
return guidPage
def GeneratePpiSubPages(self, pObj, configFile):
"""
Generate sub pages for package's GUID definition.
@param pObj package object
@param configFilf doxygen config file object
"""
pageRoot = doxygen.Page('PPI', 'ppi_root_page')
objs = pObj.GetFileObj().GetSectionObjectsByName('ppis', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
pageRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile))
else:
guidArchRootPageDict = {}
for obj in objs:
if obj.GetArch() not in guidArchRootPageDict.keys():
guidArchRoot = doxygen.Page(obj.GetArch(), 'ppi_arch_root_%s' % obj.GetArch())
pageRoot.AddPage(guidArchRoot)
guidArchRootPageDict[obj.GetArch()] = guidArchRoot
guidArchRoot = guidArchRootPageDict[obj.GetArch()]
guidArchRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile))
return [pageRoot]
def _GenerateProtocolSubPage(self, pObj, obj, configFile):
guidPage = doxygen.Page(obj.GetName(), 'protocol_page_%s' % obj.GetName())
comments = obj.GetComment()
if len(comments) != 0:
guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>')
section = doxygen.Section('BasicProtocolInfo', 'PROTOCOL Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>PROTOCOL\'s Guid Name</CAPTION></TD><TD><CAPTION>PROTOCOL\'s Guid</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD>%s</TD>' % obj.GetName()
desc += '<TD>%s</TD>' % obj.GetGuid()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
guidPage.AddSection(section)
refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile)
if refFile:
relPath = refFile[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
guidPage.AddDescription(' \\copydoc %s <br>' % relPath)
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % relPath)
section.AddDescription('\endlink\n')
self.ProcessSourceFileForInclude(refFile, pObj, configFile)
guidPage.AddSection(section)
return guidPage
def GenerateProtocolSubPages(self, pObj, configFile):
"""
Generate sub pages for package's GUID definition.
@param pObj package object
@param configFilf doxygen config file object
"""
pageRoot = doxygen.Page('PROTOCOL', 'protocol_root_page')
objs = pObj.GetFileObj().GetSectionObjectsByName('protocols', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
pageRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile))
else:
guidArchRootPageDict = {}
for obj in objs:
if obj.GetArch() not in guidArchRootPageDict.keys():
guidArchRoot = doxygen.Page(obj.GetArch(), 'protocol_arch_root_%s' % obj.GetArch())
pageRoot.AddPage(guidArchRoot)
guidArchRootPageDict[obj.GetArch()] = guidArchRoot
guidArchRoot = guidArchRootPageDict[obj.GetArch()]
guidArchRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile))
return [pageRoot]
def FindHeaderFileForGuid(self, pObj, name, configFile):
"""
For declaration header file for GUID/PPI/Protocol.
@param pObj package object
@param name guid/ppi/protocol's name
@param configFile config file object
@return full path of header file and None if not found.
"""
startPath = pObj.GetFileObj().GetPackageRootPath()
incPath = os.path.join(startPath, 'Include').replace('\\', '/')
# if <PackagePath>/include exist, then search header under it.
if os.path.exists(incPath):
startPath = incPath
for root, dirs, files in os.walk(startPath):
for dir in dirs:
if dir.lower() in _ignore_dir:
dirs.remove(dir)
for file in files:
fPath = os.path.join(root, file)
if not IsCHeaderFile(fPath):
continue
try:
f = open(fPath, 'r')
lines = f.readlines()
f.close()
except IOError:
self.Log('Fail to open file %s\n' % fPath)
continue
for line in lines:
if line.find(name) != -1 and \
line.find('extern') != -1:
return fPath.replace('\\', '/')
return None
def GetPackageModuleList(self, pObj):
"""
Get all module's INF path under package's root path
@param pObj package object
@return arrary of INF full path
"""
mArray = []
packPath = pObj.GetFileObj().GetPackageRootPath()
if not os.path.exists:
return None
for root, dirs, files in os.walk(packPath):
for dir in dirs:
if dir.lower() in _ignore_dir:
dirs.remove(dir)
for file in files:
if CheckPathPostfix(file, 'inf'):
fPath = os.path.join(root, file).replace('\\', '/')
mArray.append(fPath)
return mArray
def GenerateModulePages(self, pObj, configFile):
"""
Generate sub pages for package's module which is under the package
root directory.
@param pObj package object
@param configFilf doxygen config file object
"""
infList = self.GetPackageModuleList(pObj)
rootPages = []
libObjs = []
modObjs = []
for infpath in infList:
infObj = inf.INFFile(infpath)
#infObj = INFFileObject.INFFile (pObj.GetWorkspacePath(),
# inf)
if not infObj:
self.Log('Fail create INF object for %s' % inf)
continue
if not infObj.Parse():
self.Log('Fail to load INF file %s' % inf)
continue
if infObj.GetProduceLibraryClass() is not None:
libObjs.append(infObj)
else:
modObjs.append(infObj)
if len(libObjs) != 0:
libRootPage = doxygen.Page('Libraries', 'lib_root_page')
rootPages.append(libRootPage)
for libInf in libObjs:
libRootPage.AddPage(self.GenerateModulePage(pObj, libInf, configFile, True))
if len(modObjs) != 0:
modRootPage = doxygen.Page('Modules', 'module_root_page')
rootPages.append(modRootPage)
for modInf in modObjs:
modRootPage.AddPage(self.GenerateModulePage(pObj, modInf, configFile, False))
return rootPages
def GenerateModulePage(self, pObj, infObj, configFile, isLib):
"""
Generate page for a module/library.
@param infObj INF file object for module/library
@param configFile doxygen config file object
@param isLib Whether this module is library
@param module doxygen page object
"""
workspace = pObj.GetWorkspace()
refDecObjs = []
for obj in infObj.GetSectionObjectsByName('packages'):
decObj = dec.DECFile(os.path.join(workspace, obj.GetPath()))
if not decObj:
ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName())
continue
if not decObj.Parse():
ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName())
continue
refDecObjs.append(decObj)
modPage = doxygen.Page('%s' % infObj.GetBaseName(),
'module_%s' % infObj.GetBaseName())
modPage.AddDescription(infObj.GetFileHeader())
basicInfSection = doxygen.Section('BasicModuleInformation', 'Basic Module Information')
desc = "<TABLE>"
for obj in infObj.GetSectionObjectsByName('defines'):
key = obj.GetKey()
value = obj.GetValue()
if key not in _inf_key_description_mapping_table.keys(): continue
if key == 'LIBRARY_CLASS' and value.find('|') != -1:
clsname, types = value.split('|')
desc += '<TR>'
desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key]
desc += '<TD>%s</TD>' % clsname
desc += '</TR>'
desc += '<TR>'
desc += '<TD><B>Supported Module Types</B></TD>'
desc += '<TD>%s</TD>' % types
desc += '</TR>'
else:
desc += '<TR>'
desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key]
if key == 'EFI_SPECIFICATION_VERSION' and value == '0x00020000':
value = '2.0'
desc += '<TD>%s</TD>' % value
desc += '</TR>'
desc += '</TABLE>'
basicInfSection.AddDescription(desc)
modPage.AddSection(basicInfSection)
# Add protocol section
data = []
for obj in infObj.GetSectionObjectsByName('pcd', self._arch):
data.append(obj.GetPcdName().strip())
if len(data) != 0:
s = doxygen.Section('Pcds', 'Pcds')
desc = "<TABLE>"
desc += '<TR><TD><B>PCD Name</B></TD><TD><B>TokenSpace</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item.split('.')[1]
desc += '<TD>%s</TD>' % item.split('.')[0]
pkgbasename = self.SearchPcdPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
# Add protocol section
#sects = infObj.GetSectionByString('protocol')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('protocol', self._arch):
data.append(obj.GetName().strip())
if len(data) != 0:
s = doxygen.Section('Protocols', 'Protocols')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item
pkgbasename = self.SearchProtocolPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
# Add ppi section
#sects = infObj.GetSectionByString('ppi')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('ppi', self._arch):
data.append(obj.GetName().strip())
if len(data) != 0:
s = doxygen.Section('Ppis', 'Ppis')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item
pkgbasename = self.SearchPpiPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
# Add guid section
#sects = infObj.GetSectionByString('guid')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('guid', self._arch):
data.append(obj.GetName().strip())
if len(data) != 0:
s = doxygen.Section('Guids', 'Guids')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item
pkgbasename = self.SearchGuidPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
section = doxygen.Section('LibraryClasses', 'Library Classes')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Type</B></TD><TD><B>Package</B></TD><TD><B>Header File</B></TD></TR>'
if isLib:
desc += '<TR>'
desc += '<TD>%s</TD>' % infObj.GetProduceLibraryClass()
desc += '<TD>Produce</TD>'
try:
pkgname, hPath = self.SearchLibraryClassHeaderFile(infObj.GetProduceLibraryClass(),
workspace,
refDecObjs)
except:
self.Log ('fail to get package header file for lib class %s' % infObj.GetProduceLibraryClass())
pkgname = 'NULL'
hPath = 'NULL'
desc += '<TD>%s</TD>' % pkgname
if hPath != "NULL":
#desc += '<TD>\link %s \endlink</TD>' % hPath
desc += '<TD>%s</TD>' % hPath
else:
desc += '<TD>%s</TD>' % hPath
desc += '</TR>'
for lcObj in infObj.GetSectionObjectsByName('libraryclasses', self._arch):
desc += '<TR>'
desc += '<TD>%s</TD>' % lcObj.GetClass()
retarr = self.SearchLibraryClassHeaderFile(lcObj.GetClass(),
workspace,
refDecObjs)
if retarr is not None:
pkgname, hPath = retarr
else:
self.Log('Fail find the library class %s definition from module %s dependent package!' % (lcObj.GetClass(), infObj.GetFilename()), 'error')
pkgname = 'NULL'
hPath = 'NULL'
desc += '<TD>Consume</TD>'
desc += '<TD>%s</TD>' % pkgname
desc += '<TD>%s</TD>' % hPath
desc += '</TR>'
desc += "</TABLE>"
section.AddDescription(desc)
modPage.AddSection(section)
section = doxygen.Section('SourceFiles', 'Source Files')
section.AddDescription('<ul>\n')
for obj in infObj.GetSourceObjects(self._arch, self._tooltag):
sPath = infObj.GetModuleRootPath()
sPath = os.path.join(sPath, obj.GetSourcePath()).replace('\\', '/').strip()
if sPath.lower().endswith('.uni') or sPath.lower().endswith('.s') or sPath.lower().endswith('.asm') or sPath.lower().endswith('.nasm'):
newPath = self.TranslateUniFile(sPath)
configFile.AddFile(newPath)
newPath = newPath[len(pObj.GetWorkspace()) + 1:]
section.AddDescription('<li> \link %s \endlink </li>' % newPath)
else:
self.ProcessSourceFileForInclude(sPath, pObj, configFile, infObj)
sPath = sPath[len(pObj.GetWorkspace()) + 1:]
section.AddDescription('<li>\link %s \endlink </li>' % sPath)
section.AddDescription('</ul>\n')
modPage.AddSection(section)
#sects = infObj.GetSectionByString('depex')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('depex'):
data.append(str(obj))
if len(data) != 0:
s = doxygen.Section('DependentSection', 'Module Dependencies')
s.AddDescription('<br>'.join(data))
modPage.AddSection(s)
return modPage
def TranslateUniFile(self, path):
newpath = path + '.dox'
#import core.textfile as textfile
#file = textfile.TextFile(path)
try:
file = open(path, 'r')
except (IOError, OSError) as msg:
return None
t = file.read()
file.close()
output = '/** @file \n'
#output = '<html><body>'
arr = t.split('\r\n')
for line in arr:
if line.find('@file') != -1:
continue
if line.find('*/') != -1:
continue
line = line.strip()
if line.strip().startswith('/'):
arr = line.split(' ')
if len(arr) > 1:
line = ' '.join(arr[1:])
else:
continue
output += '%s<br>\n' % line
output += '**/'
if os.path.exists(newpath):
os.remove(newpath)
file = open(newpath, "w")
file.write(output)
file.close()
return newpath
def SearchPcdPackage(self, pcdname, workspace, decObjs):
for decObj in decObjs:
for pcd in decObj.GetSectionObjectsByName('pcd'):
if pcdname == pcd.GetPcdName():
return decObj.GetBaseName()
return None
def SearchProtocolPackage(self, protname, workspace, decObjs):
for decObj in decObjs:
for proto in decObj.GetSectionObjectsByName('protocol'):
if protname == proto.GetName():
return decObj.GetBaseName()
return None
def SearchPpiPackage(self, ppiname, workspace, decObjs):
for decObj in decObjs:
for ppi in decObj.GetSectionObjectsByName('ppi'):
if ppiname == ppi.GetName():
return decObj.GetBaseName()
return None
def SearchGuidPackage(self, guidname, workspace, decObjs):
for decObj in decObjs:
for guid in decObj.GetSectionObjectsByName('guid'):
if guidname == guid.GetName():
return decObj.GetBaseName()
return None
def SearchLibraryClassHeaderFile(self, className, workspace, decObjs):
for decObj in decObjs:
for cls in decObj.GetSectionObjectsByName('libraryclasses'):
if cls.GetClassName().strip() == className:
path = cls.GetHeaderFile().strip()
path = os.path.join(decObj.GetPackageRootPath(), path)
path = path[len(workspace) + 1:]
return decObj.GetBaseName(), path.replace('\\', '/')
return None
def _ConvertPathToDoxygen(self, path, pObj):
pRootPath = pObj.GetWorkspace()
path = path[len(pRootPath) + 1:]
return path.replace('\\', '/')
def IsCHeaderFile(path):
return CheckPathPostfix(path, 'h')
def CheckPathPostfix(path, str):
index = path.rfind('.')
if index == -1:
return False
if path[index + 1:].lower() == str.lower():
return True
return False
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/doxygengen_spec.py
|
## @file
#
# This file produce action class to generate doxygen document for edk2 codebase.
# The action classes are shared by GUI and command line tools.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
"""This file produce action class to generate doxygen document for edk2 codebase.
The action classes are shared by GUI and command line tools.
"""
from plugins.EdkPlugins.basemodel import doxygen
import os
try:
import wx
gInGui = True
except:
gInGui = False
import re
from plugins.EdkPlugins.edk2.model import inf
from plugins.EdkPlugins.edk2.model import dec
from plugins.EdkPlugins.basemodel.message import *
_ignore_dir = ['.svn', '_svn', 'cvs']
_inf_key_description_mapping_table = {
'INF_VERSION':'Version of INF file specification',
#'BASE_NAME':'Module Name',
'FILE_GUID':'Module Guid',
'MODULE_TYPE': 'Module Type',
'VERSION_STRING': 'Module Version',
'LIBRARY_CLASS': 'Produced Library Class',
'EFI_SPECIFICATION_VERSION': 'UEFI Specification Version',
'PI_SPECIFICATION_VERSION': 'PI Specification Version',
'ENTRY_POINT': 'Module Entry Point Function',
'CONSTRUCTOR': 'Library Constructor Function'
}
_dec_key_description_mapping_table = {
'DEC_SPECIFICATION': 'Version of DEC file specification',
'PACKAGE_GUID': 'Package Guid'
}
class DoxygenAction:
"""This is base class for all doxygen action.
"""
def __init__(self, doxPath, chmPath, outputPath, projname, mode='html', log=None, verbose=False):
"""Constructor function.
@param doxPath the obosolution path of doxygen execute file.
@param outputPath the obosolution output path.
@param log log function for output message
"""
self._doxPath = doxPath
self._chmPath = chmPath
self._outputPath = outputPath
self._projname = projname
self._configFile = None # doxygen config file is used by doxygen exe file
self._indexPageFile = None # doxygen page file for index page.
self._log = log
self._mode = mode
self._verbose = verbose
self._doxygenCallback = None
self._chmCallback = None
def Log(self, message, level='info'):
if self._log is not None:
self._log(message, level)
def IsVerbose(self):
return self._verbose
def Generate(self):
"""Generate interface called by outer directly"""
self.Log(">>>>>> Start generate doxygen document for %s... Zzz....\n" % self._projname)
# create doxygen config file at first
self._configFile = doxygen.DoxygenConfigFile()
self._configFile.SetOutputDir(self._outputPath)
self._configFile.SetWarningFilePath(os.path.join(self._outputPath, 'warning.txt'))
if self._mode.lower() == 'html':
self._configFile.SetHtmlMode()
else:
self._configFile.SetChmMode()
self.Log(" >>>>>> Initialize doxygen config file...Zzz...\n")
self.InitializeConfigFile()
self.Log(" >>>>>> Generate doxygen index page file...Zzz...\n")
indexPagePath = self.GenerateIndexPage()
if indexPagePath is None:
self.Log("Fail to generate index page!\n", 'error')
return False
else:
self.Log("Success to create doxygen index page file %s \n" % indexPagePath)
# Add index page doxygen file to file list.
self._configFile.AddFile(indexPagePath)
# save config file to output path
configFilePath = os.path.join(self._outputPath, self._projname + '.doxygen_config')
self._configFile.Generate(configFilePath)
self.Log(" <<<<<< Success Save doxygen config file to %s...\n" % configFilePath)
# launch doxygen tool to generate document
if self._doxygenCallback is not None:
self.Log(" >>>>>> Start doxygen process...Zzz...\n")
if not self._doxygenCallback(self._doxPath, configFilePath):
return False
else:
self.Log("Fail to create doxygen process!", 'error')
return False
return True
def InitializeConfigFile(self):
"""Initialize config setting for doxygen project. It will be invoked after config file
object is created. Inherited class should implement it.
"""
def GenerateIndexPage(self):
"""Generate doxygen index page. Inherited class should implement it."""
return None
def RegisterCallbackDoxygenProcess(self, callback):
self._doxygenCallback = callback
def RegisterCallbackCHMProcess(self, callback):
self._chmCallback = callback
class PlatformDocumentAction(DoxygenAction):
"""Generate platform doxygen document, will be implement at future."""
class PackageDocumentAction(DoxygenAction):
"""Generate package reference document"""
def __init__(self, doxPath, chmPath, outputPath, pObj, mode='html', log=None, arch=None, tooltag=None,
onlyInclude=False, verbose=False):
DoxygenAction.__init__(self, doxPath, chmPath, outputPath, pObj.GetName(), mode, log, verbose)
self._pObj = pObj
self._arch = arch
self._tooltag = tooltag
self._onlyIncludeDocument = onlyInclude
def InitializeConfigFile(self):
if self._arch == 'IA32':
self._configFile.AddPreDefined('MDE_CPU_IA32')
elif self._arch == 'X64':
self._configFile.AddPreDefined('MDE_CPU_X64')
elif self._arch == 'IPF':
self._configFile.AddPreDefined('MDE_CPU_IPF')
elif self._arch == 'EBC':
self._configFile.AddPreDefined('MDE_CPU_EBC')
else:
self._arch = None
self._configFile.AddPreDefined('MDE_CPU_IA32')
self._configFile.AddPreDefined('MDE_CPU_X64')
self._configFile.AddPreDefined('MDE_CPU_IPF')
self._configFile.AddPreDefined('MDE_CPU_EBC')
self._configFile.AddPreDefined('MDE_CPU_ARM')
namestr = self._pObj.GetName()
if self._arch is not None:
namestr += '[%s]' % self._arch
if self._tooltag is not None:
namestr += '[%s]' % self._tooltag
self._configFile.SetProjectName(namestr)
self._configFile.SetStripPath(self._pObj.GetWorkspace())
self._configFile.SetProjectVersion(self._pObj.GetFileObj().GetVersion())
self._configFile.AddPattern('*.decdoxygen')
if self._tooltag.lower() == 'msft':
self._configFile.AddPreDefined('_MSC_EXTENSIONS')
elif self._tooltag.lower() == 'gnu':
self._configFile.AddPreDefined('__GNUC__')
elif self._tooltag.lower() == 'intel':
self._configFile.AddPreDefined('__INTEL_COMPILER')
else:
self._tooltag = None
self._configFile.AddPreDefined('_MSC_EXTENSIONS')
self._configFile.AddPreDefined('__GNUC__')
self._configFile.AddPreDefined('__INTEL_COMPILER')
self._configFile.AddPreDefined('ASM_PFX= ')
self._configFile.AddPreDefined('OPTIONAL= ')
def GenerateIndexPage(self):
"""Generate doxygen index page. Inherited class should implement it."""
fObj = self._pObj.GetFileObj()
pdObj = doxygen.DoxygenFile('%s Package Document' % self._pObj.GetName(),
'%s.decdoxygen' % self._pObj.GetFilename())
self._configFile.AddFile(pdObj.GetFilename())
pdObj.AddDescription(fObj.GetFileHeader())
defSection = fObj.GetSectionByName('defines')[0]
baseSection = doxygen.Section('PackageBasicInformation', 'Package Basic Information')
descr = '<TABLE>'
for obj in defSection.GetObjects():
if obj.GetKey() in _dec_key_description_mapping_table.keys():
descr += '<TR>'
descr += '<TD><B>%s</B></TD>' % _dec_key_description_mapping_table[obj.GetKey()]
descr += '<TD>%s</TD>' % obj.GetValue()
descr += '</TR>'
descr += '</TABLE><br>'
baseSection.AddDescription(descr)
pdObj.AddSection(baseSection)
knownIssueSection = doxygen.Section('Known_Issue_section', 'Known Issue')
knownIssueSection.AddDescription('<ul>')
knownIssueSection.AddDescription('<li> OPTIONAL macro for function parameter can not be dealed with doxygen, so it disapear in this document! </li>')
knownIssueSection.AddDescription('</ul>')
pdObj.AddSection(knownIssueSection)
self.AddAllIncludeFiles(self._pObj, self._configFile)
pages = self.GenerateIncludesSubPage(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GenerateLibraryClassesSubPage(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GeneratePcdSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GenerateGuidSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GeneratePpiSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
pages = self.GenerateProtocolSubPages(self._pObj, self._configFile)
if len(pages) != 0:
pdObj.AddPages(pages)
if not self._onlyIncludeDocument:
pdObj.AddPages(self.GenerateModulePages(self._pObj, self._configFile))
pdObj.Save()
return pdObj.GetFilename()
def GenerateIncludesSubPage(self, pObj, configFile):
# by default add following path as include path to config file
pkpath = pObj.GetFileObj().GetPackageRootPath()
configFile.AddIncludePath(os.path.join(pkpath, 'Include'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Library'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Protocol'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Ppi'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Guid'))
configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'IndustryStandard'))
rootArray = []
pageRoot = doxygen.Page("Public Includes", "%s_public_includes" % pObj.GetName())
objs = pObj.GetFileObj().GetSectionObjectsByName('includes')
if len(objs) == 0: return []
for obj in objs:
# Add path to include path
path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath())
configFile.AddIncludePath(path)
# only list common folder's include file
if obj.GetArch().lower() != 'common':
continue
bNeedAddIncludePage = False
topPage = doxygen.Page(self._ConvertPathToDoxygen(path, pObj), 'public_include_top')
topPage.AddDescription('<ul>\n')
for file in os.listdir(path):
if file.lower() in _ignore_dir: continue
fullpath = os.path.join(path, file)
if os.path.isfile(fullpath):
self.ProcessSourceFileForInclude(fullpath, pObj, configFile)
topPage.AddDescription('<li> \link %s\endlink </li>\n' % self._ConvertPathToDoxygen(fullpath, pObj))
else:
if file.lower() in ['library', 'protocol', 'guid', 'ppi', 'ia32', 'x64', 'ipf', 'ebc', 'arm', 'pi', 'uefi', 'aarch64']:
continue
bNeedAddSubPage = False
subpage = doxygen.Page(self._ConvertPathToDoxygen(fullpath, pObj), 'public_include_%s' % file)
subpage.AddDescription('<ul>\n')
for subfile in os.listdir(fullpath):
if subfile.lower() in _ignore_dir: continue
bNeedAddSubPage = True
subfullpath = os.path.join(fullpath, subfile)
self.ProcessSourceFileForInclude(subfullpath, pObj, configFile)
subpage.AddDescription('<li> \link %s \endlink </li>\n' % self._ConvertPathToDoxygen(subfullpath, pObj))
subpage.AddDescription('</ul>\n')
if bNeedAddSubPage:
bNeedAddIncludePage = True
pageRoot.AddPage(subpage)
topPage.AddDescription('</ul>\n')
if bNeedAddIncludePage:
pageRoot.AddPage(topPage)
if pageRoot.GetSubpageCount() != 0:
return [pageRoot]
else:
return []
def GenerateLibraryClassesSubPage(self, pObj, configFile):
"""
Generate sub page for library class for package.
One DEC file maybe contains many library class sections
for different architecture.
@param fObj DEC file object.
"""
rootArray = []
pageRoot = doxygen.Page("Library Class", "%s_libraryclass" % pObj.GetName())
objs = pObj.GetFileObj().GetSectionObjectsByName('libraryclass', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
classPage = doxygen.Page(obj.GetClassName(),
"lc_%s" % obj.GetClassName())
comments = obj.GetComment()
if len(comments) != 0:
classPage.AddDescription('<br>\n'.join(comments) + '<br>\n')
pageRoot.AddPage(classPage)
path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
path = path[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile())
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % obj.GetHeaderFile())
section.AddDescription(' \endlink<p>\n')
classPage.AddSection(section)
fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
self.ProcessSourceFileForInclude(fullPath, pObj, configFile)
else:
archPageDict = {}
for obj in objs:
if obj.GetArch() not in archPageDict.keys():
archPageDict[obj.GetArch()] = doxygen.Page(obj.GetArch(),
'lc_%s' % obj.GetArch())
pageRoot.AddPage(archPageDict[obj.GetArch()])
subArchRoot = archPageDict[obj.GetArch()]
classPage = doxygen.Page(obj.GetClassName(),
"lc_%s" % obj.GetClassName())
comments = obj.GetComment()
if len(comments) != 0:
classPage.AddDescription('<br>\n'.join(comments) + '<br>\n')
subArchRoot.AddPage(classPage)
path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
path = path[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile())
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % obj.GetHeaderFile())
section.AddDescription(' \endlink<p>\n')
classPage.AddSection(section)
fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile())
self.ProcessSourceFileForInclude(fullPath, pObj, configFile)
rootArray.append(pageRoot)
return rootArray
def ProcessSourceFileForInclude(self, path, pObj, configFile, infObj=None):
"""
@param path the analysising file full path
@param pObj package object
@param configFile doxygen config file.
"""
if gInGui:
wx.Yield()
if not os.path.exists(path):
ErrorMsg('Source file path %s does not exist!' % path)
return
if configFile.FileExists(path):
return
try:
with open(path, 'r') as f:
lines = f.readlines()
except UnicodeDecodeError:
return
except IOError:
ErrorMsg('Fail to open file %s' % path)
return
configFile.AddFile(path)
no = 0
for no in range(len(lines)):
if len(lines[no].strip()) == 0:
continue
if lines[no].strip()[:2] in ['##', '//', '/*', '*/']:
continue
index = lines[no].lower().find('include')
#mo = IncludePattern.finditer(lines[no].lower())
mo = re.match(r"^#\s*include\s+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip().lower())
if not mo:
continue
mo = re.match(r"^[#\w\s]+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip())
filePath = mo.groups()[0]
if filePath is None or len(filePath) == 0:
continue
# find header file in module's path firstly.
fullPath = None
if os.path.exists(os.path.join(os.path.dirname(path), filePath)):
# Find the file in current directory
fullPath = os.path.join(os.path.dirname(path), filePath).replace('\\', '/')
else:
# find in depedent package's include path
incObjs = pObj.GetFileObj().GetSectionObjectsByName('includes')
for incObj in incObjs:
incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), incObj.GetPath()).strip()
incPath = os.path.realpath(os.path.join(incPath, filePath))
if os.path.exists(incPath):
fullPath = incPath
break
if infObj is not None:
pkgInfObjs = infObj.GetSectionObjectsByName('packages')
for obj in pkgInfObjs:
decObj = dec.DECFile(os.path.join(pObj.GetWorkspace(), obj.GetPath()))
if not decObj:
ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName())
continue
if not decObj.Parse():
ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName())
continue
incObjs = decObj.GetSectionObjectsByName('includes')
for incObj in incObjs:
incPath = os.path.join(decObj.GetPackageRootPath(), incObj.GetPath()).replace('\\', '/')
if os.path.exists(os.path.join(incPath, filePath)):
fullPath = os.path.join(os.path.join(incPath, filePath))
break
if fullPath is not None:
break
if fullPath is None and self.IsVerbose():
self.Log('Can not resolve header file %s for file %s in package %s\n' % (filePath, path, pObj.GetFileObj().GetFilename()), 'error')
return
else:
fullPath = fullPath.replace('\\', '/')
if self.IsVerbose():
self.Log('Preprocessing: Add include file %s for file %s\n' % (fullPath, path))
#LogMsg ('Preprocessing: Add include file %s for file %s' % (fullPath, path))
self.ProcessSourceFileForInclude(fullPath, pObj, configFile, infObj)
def AddAllIncludeFiles(self, pObj, configFile):
objs = pObj.GetFileObj().GetSectionObjectsByName('includes')
for obj in objs:
incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath())
for root, dirs, files in os.walk(incPath):
for dir in dirs:
if dir.lower() in _ignore_dir:
dirs.remove(dir)
for file in files:
path = os.path.normpath(os.path.join(root, file))
configFile.AddFile(path.replace('/', '\\'))
def GeneratePcdSubPages(self, pObj, configFile):
"""
Generate sub pages for package's PCD definition.
@param pObj package object
@param configFile config file object
"""
rootArray = []
objs = pObj.GetFileObj().GetSectionObjectsByName('pcd')
if len(objs) == 0:
return []
pcdRootPage = doxygen.Page('PCD', 'pcd_root_page')
typeRootPageDict = {}
typeArchRootPageDict = {}
for obj in objs:
if obj.GetPcdType() not in typeRootPageDict.keys():
typeRootPageDict[obj.GetPcdType()] = doxygen.Page(obj.GetPcdType(), 'pcd_%s_root_page' % obj.GetPcdType())
pcdRootPage.AddPage(typeRootPageDict[obj.GetPcdType()])
typeRoot = typeRootPageDict[obj.GetPcdType()]
if self._arch is not None:
pcdPage = doxygen.Page('%s' % obj.GetPcdName(),
'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1]))
pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n')
section = doxygen.Section('PCDinformation', 'PCD Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>Name</CAPTION></TD>'
desc += '<TD><CAPTION>Token Space</CAPTION></TD>'
desc += '<TD><CAPTION>Token number</CAPTION></TD>'
desc += '<TD><CAPTION>Data Type</CAPTION></TD>'
desc += '<TD><CAPTION>Default Value</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
pcdPage.AddSection(section)
typeRoot.AddPage(pcdPage)
else:
keystr = obj.GetPcdType() + obj.GetArch()
if keystr not in typeArchRootPageDict.keys():
typeArchRootPage = doxygen.Page(obj.GetArch(), 'pcd_%s_%s_root_page' % (obj.GetPcdType(), obj.GetArch()))
typeArchRootPageDict[keystr] = typeArchRootPage
typeRoot.AddPage(typeArchRootPage)
typeArchRoot = typeArchRootPageDict[keystr]
pcdPage = doxygen.Page('%s' % obj.GetPcdName(),
'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1]))
pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n')
section = doxygen.Section('PCDinformation', 'PCD Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>Name</CAPTION></TD>'
desc += '<TD><CAPTION>Token Space</CAPTION></TD>'
desc += '<TD><CAPTION>Token number</CAPTION></TD>'
desc += '<TD><CAPTION>Data Type</CAPTION></TD>'
desc += '<TD><CAPTION>Default Value</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0]
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType()
desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
pcdPage.AddSection(section)
typeArchRoot.AddPage(pcdPage)
return [pcdRootPage]
def _GenerateGuidSubPage(self, pObj, obj, configFile):
guidPage = doxygen.Page('%s' % obj.GetName(),
'guid_%s_%s' % (obj.GetArch(), obj.GetName()))
comments = obj.GetComment()
if len(comments) != 0:
guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>')
section = doxygen.Section('BasicGuidInfo', 'GUID Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>GUID\'s Guid Name</CAPTION></TD><TD><CAPTION>GUID\'s Guid</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD>%s</TD>' % obj.GetName()
desc += '<TD>%s</TD>' % obj.GetGuid()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
guidPage.AddSection(section)
refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile)
if refFile:
relPath = refFile[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
guidPage.AddDescription(' \\copydoc %s <br>' % relPath)
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % relPath)
section.AddDescription('\endlink\n')
self.ProcessSourceFileForInclude(refFile, pObj, configFile)
guidPage.AddSection(section)
return guidPage
def GenerateGuidSubPages(self, pObj, configFile):
"""
Generate sub pages for package's GUID definition.
@param pObj package object
@param configFilf doxygen config file object
"""
pageRoot = doxygen.Page('GUID', 'guid_root_page')
objs = pObj.GetFileObj().GetSectionObjectsByName('guids', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
pageRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile))
else:
guidArchRootPageDict = {}
for obj in objs:
if obj.GetArch() not in guidArchRootPageDict.keys():
guidArchRoot = doxygen.Page(obj.GetArch(), 'guid_arch_root_%s' % obj.GetArch())
pageRoot.AddPage(guidArchRoot)
guidArchRootPageDict[obj.GetArch()] = guidArchRoot
guidArchRoot = guidArchRootPageDict[obj.GetArch()]
guidArchRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile))
return [pageRoot]
def _GeneratePpiSubPage(self, pObj, obj, configFile):
guidPage = doxygen.Page(obj.GetName(), 'ppi_page_%s' % obj.GetName())
comments = obj.GetComment()
if len(comments) != 0:
guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>')
section = doxygen.Section('BasicPpiInfo', 'PPI Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>PPI\'s Guid Name</CAPTION></TD><TD><CAPTION>PPI\'s Guid</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD>%s</TD>' % obj.GetName()
desc += '<TD>%s</TD>' % obj.GetGuid()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
guidPage.AddSection(section)
refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile)
if refFile:
relPath = refFile[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
guidPage.AddDescription(' \\copydoc %s <br>' % relPath)
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % relPath)
section.AddDescription('\endlink\n')
self.ProcessSourceFileForInclude(refFile, pObj, configFile)
guidPage.AddSection(section)
return guidPage
def GeneratePpiSubPages(self, pObj, configFile):
"""
Generate sub pages for package's GUID definition.
@param pObj package object
@param configFilf doxygen config file object
"""
pageRoot = doxygen.Page('PPI', 'ppi_root_page')
objs = pObj.GetFileObj().GetSectionObjectsByName('ppis', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
pageRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile))
else:
guidArchRootPageDict = {}
for obj in objs:
if obj.GetArch() not in guidArchRootPageDict.keys():
guidArchRoot = doxygen.Page(obj.GetArch(), 'ppi_arch_root_%s' % obj.GetArch())
pageRoot.AddPage(guidArchRoot)
guidArchRootPageDict[obj.GetArch()] = guidArchRoot
guidArchRoot = guidArchRootPageDict[obj.GetArch()]
guidArchRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile))
return [pageRoot]
def _GenerateProtocolSubPage(self, pObj, obj, configFile):
guidPage = doxygen.Page(obj.GetName(), 'protocol_page_%s' % obj.GetName())
comments = obj.GetComment()
if len(comments) != 0:
guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>')
section = doxygen.Section('BasicProtocolInfo', 'PROTOCOL Information')
desc = '<TABLE>'
desc += '<TR>'
desc += '<TD><CAPTION>PROTOCOL\'s Guid Name</CAPTION></TD><TD><CAPTION>PROTOCOL\'s Guid</CAPTION></TD>'
desc += '</TR>'
desc += '<TR>'
desc += '<TD>%s</TD>' % obj.GetName()
desc += '<TD>%s</TD>' % obj.GetGuid()
desc += '</TR>'
desc += '</TABLE>'
section.AddDescription(desc)
guidPage.AddSection(section)
refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile)
if refFile:
relPath = refFile[len(pObj.GetWorkspace()) + 1:]
if len(comments) == 0:
guidPage.AddDescription(' \\copydoc %s <br>' % relPath)
section = doxygen.Section('ref', 'Refer to Header File')
section.AddDescription('\link %s\n' % relPath)
section.AddDescription('\endlink\n')
self.ProcessSourceFileForInclude(refFile, pObj, configFile)
guidPage.AddSection(section)
return guidPage
def GenerateProtocolSubPages(self, pObj, configFile):
"""
Generate sub pages for package's GUID definition.
@param pObj package object
@param configFilf doxygen config file object
"""
pageRoot = doxygen.Page('PROTOCOL', 'protocol_root_page')
objs = pObj.GetFileObj().GetSectionObjectsByName('protocols', self._arch)
if len(objs) == 0: return []
if self._arch is not None:
for obj in objs:
pageRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile))
else:
guidArchRootPageDict = {}
for obj in objs:
if obj.GetArch() not in guidArchRootPageDict.keys():
guidArchRoot = doxygen.Page(obj.GetArch(), 'protocol_arch_root_%s' % obj.GetArch())
pageRoot.AddPage(guidArchRoot)
guidArchRootPageDict[obj.GetArch()] = guidArchRoot
guidArchRoot = guidArchRootPageDict[obj.GetArch()]
guidArchRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile))
return [pageRoot]
def FindHeaderFileForGuid(self, pObj, name, configFile):
"""
For declaration header file for GUID/PPI/Protocol.
@param pObj package object
@param name guid/ppi/protocol's name
@param configFile config file object
@return full path of header file and None if not found.
"""
startPath = pObj.GetFileObj().GetPackageRootPath()
incPath = os.path.join(startPath, 'Include').replace('\\', '/')
# if <PackagePath>/include exist, then search header under it.
if os.path.exists(incPath):
startPath = incPath
for root, dirs, files in os.walk(startPath):
for dir in dirs:
if dir.lower() in _ignore_dir:
dirs.remove(dir)
for file in files:
fPath = os.path.join(root, file)
if not IsCHeaderFile(fPath):
continue
try:
f = open(fPath, 'r')
lines = f.readlines()
f.close()
except IOError:
self.Log('Fail to open file %s\n' % fPath)
continue
for line in lines:
if line.find(name) != -1 and \
line.find('extern') != -1:
return fPath.replace('\\', '/')
return None
def GetPackageModuleList(self, pObj):
"""
Get all module's INF path under package's root path
@param pObj package object
@return arrary of INF full path
"""
mArray = []
packPath = pObj.GetFileObj().GetPackageRootPath()
if not os.path.exists:
return None
for root, dirs, files in os.walk(packPath):
for dir in dirs:
if dir.lower() in _ignore_dir:
dirs.remove(dir)
for file in files:
if CheckPathPostfix(file, 'inf'):
fPath = os.path.join(root, file).replace('\\', '/')
mArray.append(fPath)
return mArray
def GenerateModulePages(self, pObj, configFile):
"""
Generate sub pages for package's module which is under the package
root directory.
@param pObj package object
@param configFilf doxygen config file object
"""
infList = self.GetPackageModuleList(pObj)
rootPages = []
libObjs = []
modObjs = []
for infpath in infList:
infObj = inf.INFFile(infpath)
#infObj = INFFileObject.INFFile (pObj.GetWorkspacePath(),
# inf)
if not infObj:
self.Log('Fail create INF object for %s' % inf)
continue
if not infObj.Parse():
self.Log('Fail to load INF file %s' % inf)
continue
if infObj.GetProduceLibraryClass() is not None:
libObjs.append(infObj)
else:
modObjs.append(infObj)
if len(libObjs) != 0:
libRootPage = doxygen.Page('Libraries', 'lib_root_page')
rootPages.append(libRootPage)
for libInf in libObjs:
libRootPage.AddPage(self.GenerateModulePage(pObj, libInf, configFile, True))
if len(modObjs) != 0:
modRootPage = doxygen.Page('Modules', 'module_root_page')
rootPages.append(modRootPage)
for modInf in modObjs:
modRootPage.AddPage(self.GenerateModulePage(pObj, modInf, configFile, False))
return rootPages
def GenerateModulePage(self, pObj, infObj, configFile, isLib):
"""
Generate page for a module/library.
@param infObj INF file object for module/library
@param configFile doxygen config file object
@param isLib Whether this module is library
@param module doxygen page object
"""
workspace = pObj.GetWorkspace()
refDecObjs = []
for obj in infObj.GetSectionObjectsByName('packages'):
decObj = dec.DECFile(os.path.join(workspace, obj.GetPath()))
if not decObj:
ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName())
continue
if not decObj.Parse():
ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName())
continue
refDecObjs.append(decObj)
modPage = doxygen.Page('%s' % infObj.GetBaseName(),
'module_%s' % infObj.GetBaseName())
modPage.AddDescription(infObj.GetFileHeader())
basicInfSection = doxygen.Section('BasicModuleInformation', 'Basic Module Information')
desc = "<TABLE>"
for obj in infObj.GetSectionObjectsByName('defines'):
key = obj.GetKey()
value = obj.GetValue()
if key not in _inf_key_description_mapping_table.keys(): continue
if key == 'LIBRARY_CLASS' and value.find('|') != -1:
clsname, types = value.split('|')
desc += '<TR>'
desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key]
desc += '<TD>%s</TD>' % clsname
desc += '</TR>'
desc += '<TR>'
desc += '<TD><B>Supported Module Types</B></TD>'
desc += '<TD>%s</TD>' % types
desc += '</TR>'
else:
desc += '<TR>'
desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key]
if key == 'EFI_SPECIFICATION_VERSION' and value == '0x00020000':
value = '2.0'
desc += '<TD>%s</TD>' % value
desc += '</TR>'
desc += '</TABLE>'
basicInfSection.AddDescription(desc)
modPage.AddSection(basicInfSection)
# Add protocol section
data = []
for obj in infObj.GetSectionObjectsByName('pcd', self._arch):
data.append(obj.GetPcdName().strip())
if len(data) != 0:
s = doxygen.Section('Pcds', 'Pcds')
desc = "<TABLE>"
desc += '<TR><TD><B>PCD Name</B></TD><TD><B>TokenSpace</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item.split('.')[1]
desc += '<TD>%s</TD>' % item.split('.')[0]
pkgbasename = self.SearchPcdPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
# Add protocol section
#sects = infObj.GetSectionByString('protocol')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('protocol', self._arch):
data.append(obj.GetName().strip())
if len(data) != 0:
s = doxygen.Section('Protocols', 'Protocols')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item
pkgbasename = self.SearchProtocolPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
# Add ppi section
#sects = infObj.GetSectionByString('ppi')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('ppi', self._arch):
data.append(obj.GetName().strip())
if len(data) != 0:
s = doxygen.Section('Ppis', 'Ppis')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item
pkgbasename = self.SearchPpiPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
# Add guid section
#sects = infObj.GetSectionByString('guid')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('guid', self._arch):
data.append(obj.GetName().strip())
if len(data) != 0:
s = doxygen.Section('Guids', 'Guids')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>'
for item in data:
desc += '<TR>'
desc += '<TD>%s</TD>' % item
pkgbasename = self.SearchGuidPackage(item, workspace, refDecObjs)
desc += '<TD>%s</TD>' % pkgbasename
desc += '</TR>'
desc += "</TABLE>"
s.AddDescription(desc)
modPage.AddSection(s)
section = doxygen.Section('LibraryClasses', 'Library Classes')
desc = "<TABLE>"
desc += '<TR><TD><B>Name</B></TD><TD><B>Type</B></TD><TD><B>Package</B></TD><TD><B>Header File</B></TD></TR>'
if isLib:
desc += '<TR>'
desc += '<TD>%s</TD>' % infObj.GetProduceLibraryClass()
desc += '<TD>Produce</TD>'
try:
pkgname, hPath = self.SearchLibraryClassHeaderFile(infObj.GetProduceLibraryClass(),
workspace,
refDecObjs)
except:
self.Log ('fail to get package header file for lib class %s' % infObj.GetProduceLibraryClass())
pkgname = 'NULL'
hPath = 'NULL'
desc += '<TD>%s</TD>' % pkgname
if hPath != "NULL":
desc += '<TD>\link %s \endlink</TD>' % hPath
else:
desc += '<TD>%s</TD>' % hPath
desc += '</TR>'
for lcObj in infObj.GetSectionObjectsByName('libraryclasses', self._arch):
desc += '<TR>'
desc += '<TD>%s</TD>' % lcObj.GetClass()
retarr = self.SearchLibraryClassHeaderFile(lcObj.GetClass(),
workspace,
refDecObjs)
if retarr is not None:
pkgname, hPath = retarr
else:
self.Log('Fail find the library class %s definition from module %s dependent package!' % (lcObj.GetClass(), infObj.GetFilename()), 'error')
pkgname = 'NULL'
hPath = 'NULL'
desc += '<TD>Consume</TD>'
desc += '<TD>%s</TD>' % pkgname
desc += '<TD>\link %s \endlink</TD>' % hPath
desc += '</TR>'
desc += "</TABLE>"
section.AddDescription(desc)
modPage.AddSection(section)
section = doxygen.Section('SourceFiles', 'Source Files')
section.AddDescription('<ul>\n')
for obj in infObj.GetSourceObjects(self._arch, self._tooltag):
sPath = infObj.GetModuleRootPath()
sPath = os.path.join(sPath, obj.GetSourcePath()).replace('\\', '/').strip()
if sPath.lower().endswith('.uni') or sPath.lower().endswith('.s') or sPath.lower().endswith('.asm') or sPath.lower().endswith('.nasm'):
newPath = self.TranslateUniFile(sPath)
configFile.AddFile(newPath)
newPath = newPath[len(pObj.GetWorkspace()) + 1:]
section.AddDescription('<li> \link %s \endlink </li>' % newPath)
else:
self.ProcessSourceFileForInclude(sPath, pObj, configFile, infObj)
sPath = sPath[len(pObj.GetWorkspace()) + 1:]
section.AddDescription('<li>\link %s \endlink </li>' % sPath)
section.AddDescription('</ul>\n')
modPage.AddSection(section)
#sects = infObj.GetSectionByString('depex')
data = []
#for sect in sects:
for obj in infObj.GetSectionObjectsByName('depex'):
data.append(str(obj))
if len(data) != 0:
s = doxygen.Section('DependentSection', 'Module Dependencies')
s.AddDescription('<br>'.join(data))
modPage.AddSection(s)
return modPage
def TranslateUniFile(self, path):
newpath = path + '.dox'
#import core.textfile as textfile
#file = textfile.TextFile(path)
try:
file = open(path, 'r')
except (IOError, OSError) as msg:
return None
t = file.read()
file.close()
output = '/** @file \n'
#output = '<html><body>'
arr = t.split('\r\n')
for line in arr:
if line.find('@file') != -1:
continue
if line.find('*/') != -1:
continue
line = line.strip()
if line.strip().startswith('/'):
arr = line.split(' ')
if len(arr) > 1:
line = ' '.join(arr[1:])
else:
continue
output += '%s<br>\n' % line
output += '**/'
if os.path.exists(newpath):
os.remove(newpath)
file = open(newpath, "w")
file.write(output)
file.close()
return newpath
def SearchPcdPackage(self, pcdname, workspace, decObjs):
for decObj in decObjs:
for pcd in decObj.GetSectionObjectsByName('pcd'):
if pcdname == pcd.GetPcdName():
return decObj.GetBaseName()
return None
def SearchProtocolPackage(self, protname, workspace, decObjs):
for decObj in decObjs:
for proto in decObj.GetSectionObjectsByName('protocol'):
if protname == proto.GetName():
return decObj.GetBaseName()
return None
def SearchPpiPackage(self, ppiname, workspace, decObjs):
for decObj in decObjs:
for ppi in decObj.GetSectionObjectsByName('ppi'):
if ppiname == ppi.GetName():
return decObj.GetBaseName()
return None
def SearchGuidPackage(self, guidname, workspace, decObjs):
for decObj in decObjs:
for guid in decObj.GetSectionObjectsByName('guid'):
if guidname == guid.GetName():
return decObj.GetBaseName()
return None
def SearchLibraryClassHeaderFile(self, className, workspace, decObjs):
for decObj in decObjs:
for cls in decObj.GetSectionObjectsByName('libraryclasses'):
if cls.GetClassName().strip() == className:
path = cls.GetHeaderFile().strip()
path = os.path.join(decObj.GetPackageRootPath(), path)
path = path[len(workspace) + 1:]
return decObj.GetBaseName(), path.replace('\\', '/')
return None
def _ConvertPathToDoxygen(self, path, pObj):
pRootPath = pObj.GetWorkspace()
path = path[len(pRootPath) + 1:]
return path.replace('\\', '/')
def IsCHeaderFile(path):
return CheckPathPostfix(path, 'h')
def CheckPathPostfix(path, str):
index = path.rfind('.')
if index == -1:
return False
if path[index + 1:].lower() == str.lower():
return True
return False
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/doxygengen.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/__init__.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from plugins.EdkPlugins.basemodel import ini
import re, os
from plugins.EdkPlugins.basemodel.message import *
class DECFile(ini.BaseINIFile):
def GetSectionInstance(self, parent, name, isCombined=False):
return DECSection(parent, name, isCombined)
def GetComponents(self):
return self.GetSectionByName('Components')
def GetPackageRootPath(self):
return os.path.dirname(self.GetFilename()).strip()
def GetBaseName(self):
return self.GetDefine("PACKAGE_NAME").strip()
def GetVersion(self):
return self.GetDefine("PACKAGE_VERSION").strip()
def GetSectionObjectsByName(self, name, arch=None):
arr = []
sects = self.GetSectionByName(name)
for sect in sects:
# skip unmatched archtecture content
if not sect.IsArchMatch(arch):
continue
for obj in sect.GetObjects():
arr.append(obj)
return arr
class DECSection(ini.BaseINISection):
def GetSectionINIObject(self, parent):
type = self.GetType()
if type.lower().find('defines') != -1:
return DECDefineSectionObject(self)
if type.lower().find('includes') != -1:
return DECIncludeObject(self)
if type.lower().find('pcd') != -1:
return DECPcdObject(self)
if type.lower() == 'libraryclasses':
return DECLibraryClassObject(self)
if type.lower() == 'guids':
return DECGuidObject(self)
if type.lower() == 'ppis':
return DECPpiObject(self)
if type.lower() == 'protocols':
return DECProtocolObject(self)
return DECSectionObject(self)
def GetType(self):
arr = self._name.split('.')
return arr[0].strip()
def GetArch(self):
arr = self._name.split('.')
if len(arr) == 1:
return 'common'
return arr[1]
def IsArchMatch(self, arch):
if arch is None or self.GetArch() == 'common':
return True
if self.GetArch().lower() != arch.lower():
return False
return True
class DECSectionObject(ini.BaseINISectionObject):
def GetArch(self):
return self.GetParent().GetArch()
class DECDefineSectionObject(DECSectionObject):
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._key = None
self._value = None
def Parse(self):
assert (self._start == self._end), 'The object in define section must be in single line'
line = self.GetLineByOffset(self._start).strip()
line = line.split('#')[0]
arr = line.split('=')
if len(arr) != 2:
ErrorMsg('Invalid define section object',
self.GetFilename(),
self.GetParent().GetName()
)
return False
self._key = arr[0].strip()
self._value = arr[1].strip()
return True
def GetKey(self):
return self._key
def GetValue(self):
return self._value
class DECGuidObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('=')[0].strip()
self._guid = line.split('=')[1].strip()
objdict = DECGuidObject._objs
if self._name not in objdict.keys():
objdict[self._name] = [self]
else:
objdict[self._name].append(self)
return True
def GetName(self):
return self._name
def GetGuid(self):
return self._guid
def Destroy(self):
objdict = DECGuidObject._objs
objdict[self._name].remove(self)
if len(objdict[self._name]) == 0:
del objdict[self._name]
@staticmethod
def GetObjectDict():
return DECGuidObject._objs
class DECPpiObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('=')[0].strip()
self._guid = line.split('=')[1].strip()
objdict = DECPpiObject._objs
if self._name not in objdict.keys():
objdict[self._name] = [self]
else:
objdict[self._name].append(self)
return True
def GetName(self):
return self._name
def GetGuid(self):
return self._guid
def Destroy(self):
objdict = DECPpiObject._objs
objdict[self._name].remove(self)
if len(objdict[self._name]) == 0:
del objdict[self._name]
@staticmethod
def GetObjectDict():
return DECPpiObject._objs
class DECProtocolObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('=')[0].strip()
self._guid = line.split('=')[1].strip()
objdict = DECProtocolObject._objs
if self._name not in objdict.keys():
objdict[self._name] = [self]
else:
objdict[self._name].append(self)
return True
def GetName(self):
return self._name
def GetGuid(self):
return self._guid
def Destroy(self):
objdict = DECProtocolObject._objs
objdict[self._name].remove(self)
if len(objdict[self._name]) == 0:
del objdict[self._name]
@staticmethod
def GetObjectDict():
return DECProtocolObject._objs
class DECLibraryClassObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self.mClassName = None
self.mHeaderFile = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self.mClassName, self.mHeaderFile = line.split('|')
objdict = DECLibraryClassObject._objs
if self.mClassName not in objdict.keys():
objdict[self.mClassName] = [self]
else:
objdict[self.mClassName].append(self)
return True
def GetClassName(self):
return self.mClassName
def GetName(self):
return self.mClassName
def GetHeaderFile(self):
return self.mHeaderFile
def Destroy(self):
objdict = DECLibraryClassObject._objs
objdict[self.mClassName].remove(self)
if len(objdict[self.mClassName]) == 0:
del objdict[self.mClassName]
@staticmethod
def GetObjectDict():
return DECLibraryClassObject._objs
class DECIncludeObject(DECSectionObject):
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
def GetPath(self):
return self.GetLineByOffset(self._start).split('#')[0].strip()
class DECPcdObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self.mPcdName = None
self.mPcdDefaultValue = None
self.mPcdDataType = None
self.mPcdToken = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
(self.mPcdName, self.mPcdDefaultValue, self.mPcdDataType, self.mPcdToken) = line.split('|')
objdict = DECPcdObject._objs
if self.mPcdName not in objdict.keys():
objdict[self.mPcdName] = [self]
else:
objdict[self.mPcdName].append(self)
return True
def Destroy(self):
objdict = DECPcdObject._objs
objdict[self.mPcdName].remove(self)
if len(objdict[self.mPcdName]) == 0:
del objdict[self.mPcdName]
def GetPcdType(self):
return self.GetParent().GetType()
def GetPcdName(self):
return self.mPcdName
def GetPcdValue(self):
return self.mPcdDefaultValue
def GetPcdDataType(self):
return self.mPcdDataType
def GetPcdToken(self):
return self.mPcdToken
def GetName(self):
return self.GetPcdName().split('.')[1]
@staticmethod
def GetObjectDict():
return DECPcdObject._objs
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/dec.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from plugins.EdkPlugins.basemodel import ini
import re, os
from plugins.EdkPlugins.basemodel.message import *
class DSCFile(ini.BaseINIFile):
def GetSectionInstance(self, parent, name, isCombined=False):
return DSCSection(parent, name, isCombined)
def GetComponents(self):
return self.GetSectionObjectsByName('Components')
class DSCSection(ini.BaseINISection):
def GetSectionINIObject(self, parent):
type = self.GetType()
if type.lower() == 'components':
return DSCComponentObject(self)
if type.lower() == 'libraryclasses':
return DSCLibraryClassObject(self)
if type.lower() == 'defines':
return ini.BaseINISectionObject(self)
if type.lower() == 'pcdsfeatureflag' or \
type.lower() == 'pcdsfixedatbuild' or \
type.lower() == 'pcdspatchableinmodule' or\
type.lower() == 'pcdsdynamicdefault' or \
type.lower() == 'pcdsdynamicex' or \
type.lower() == 'pcdsdynamichii' or \
type.lower() == 'pcdsdynamicvpd':
return DSCPcdObject(self)
return DSCSectionObject(self)
def GetType(self):
arr = self._name.split('.')
return arr[0].strip()
def GetArch(self):
arr = self._name.split('.')
if len(arr) == 1:
return 'common'
return arr[1]
def GetModuleType(self):
arr = self._name.split('.')
if len(arr) < 3:
return 'common'
return arr[2]
class DSCSectionObject(ini.BaseINISectionObject):
def GetArch(self):
return self.GetParent().GetArch()
class DSCPcdObject(DSCSectionObject):
def __init__(self, parent):
ini.BaseINISectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('|')[0]
self._value = line.split('|')[1]
return True
def GetPcdName(self):
return self._name
def GetPcdType(self):
return self.GetParent().GetType()
def GetPcdValue(self):
return self._value
class DSCLibraryClassObject(DSCSectionObject):
def __init__(self, parent):
ini.BaseINISectionObject.__init__(self, parent)
def GetClass(self):
line = self.GetLineByOffset(self._start)
return line.split('#')[0].split('|')[0].strip()
def GetInstance(self):
line = self.GetLineByOffset(self._start)
return line.split('#')[0].split('|')[1].strip()
def GetArch(self):
return self.GetParent().GetArch()
def GetModuleType(self):
return self.GetParent().GetModuleType()
class DSCComponentObject(DSCSectionObject):
def __init__(self, parent):
ini.BaseINISectionObject.__init__(self, parent)
self._OveridePcds = {}
self._OverideLibraries = {}
self._Filename = ''
def __del__(self):
self._OverideLibraries.clear()
self._OverideLibraries.clear()
ini.BaseINISectionObject.__del__(self)
def AddOverideLib(self, libclass, libinstPath):
if libclass not in self._OverideLibraries.keys():
self._OverideLibraries[libclass] = libinstPath
def AddOveridePcd(self, name, type, value=None):
if type not in self._OveridePcds.keys():
self._OveridePcds[type] = []
self._OveridePcds[type].append((name, value))
def GetOverideLibs(self):
return self._OverideLibraries
def GetArch(self):
return self.GetParent().GetArch()
def GetOveridePcds(self):
return self._OveridePcds
def GetFilename(self):
return self.GetLineByOffset(self._start).split('#')[0].split('{')[0].strip()
def SetFilename(self, fName):
self._Filename = fName
def Parse(self):
if (self._start < self._end):
#
# The first line is inf path and could be ignored
# The end line is '}' and could be ignored
#
curr = self._start + 1
end = self._end - 1
OverideName = ''
while (curr <= end):
line = self.GetLineByOffset(curr).strip()
if len(line) > 0 and line[0] != '#':
line = line.split('#')[0].strip()
if line[0] == '<':
OverideName = line[1:len(line)-1]
elif OverideName.lower() == 'libraryclasses':
arr = line.split('|')
self._OverideLibraries[arr[0].strip()] = arr[1].strip()
elif OverideName.lower() == 'pcds':
ErrorMsg('EDES does not support PCD overide',
self.GetFileName(),
self.GetParent().GetLinenumberByOffset(curr))
curr = curr + 1
return True
def GenerateLines(self):
lines = []
hasLib = False
hasPcd = False
if len(self._OverideLibraries) != 0:
hasLib = True
if len(self._OveridePcds) != 0:
hasPcd = True
if hasLib or hasPcd:
lines.append((' %s {\n' % self._Filename))
else:
lines.append((' %s \n' % self._Filename))
return lines
if hasLib:
lines.append(' <LibraryClasses>\n')
for libKey in self._OverideLibraries.keys():
lines.append(' %s|%s\n' % (libKey, self._OverideLibraries[libKey]))
if hasPcd:
for key in self._OveridePcds.keys():
lines.append(' <%s>\n' % key)
for name, value in self._OveridePcds[key]:
if value is not None:
lines.append(' %s|%s\n' % (name, value))
else:
lines.append(' %s\n' % name)
if hasLib or hasPcd:
lines.append(' }\n')
return lines
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/dsc.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from plugins.EdkPlugins.basemodel import ini
import re, os
from plugins.EdkPlugins.basemodel.message import *
class INFFile(ini.BaseINIFile):
_libobjs = {}
def GetSectionInstance(self, parent, name, isCombined=False):
return INFSection(parent, name, isCombined)
def GetProduceLibraryClass(self):
obj = self.GetDefine("LIBRARY_CLASS")
if obj is None: return None
return obj.split('|')[0].strip()
def GetSectionObjectsByName(self, name, arch=None):
arr = []
sects = self.GetSectionByName(name)
for sect in sects:
# skip unmatched archtecture content
if not sect.IsArchMatch(arch):
continue
for obj in sect.GetObjects():
arr.append(obj)
return arr
def GetSourceObjects(self, arch=None, tool=None):
arr = []
sects = self.GetSectionByName('sources')
for sect in sects:
# skip unmatched archtecture content
if not sect.IsArchMatch(arch):
continue
for obj in sect.GetObjects():
if not obj.IsMatchFamily(tool):
continue
arr.append(obj)
return arr
def Parse(self):
if not ini.BaseINIFile.Parse(self):
return False
classname = self.GetProduceLibraryClass()
if classname is not None:
libobjdict = INFFile._libobjs
if classname in libobjdict:
if self not in libobjdict[classname]:
libobjdict[classname].append(self)
else:
libobjdict[classname] = [self]
return True
def GetBaseName(self):
return self.GetDefine("BASE_NAME").strip()
def GetModuleRootPath(self):
return os.path.dirname(self.GetFilename())
def Clear(self):
classname = self.GetProduceLibraryClass()
if classname is not None:
libobjdict = INFFile._libobjs
libobjdict[classname].remove(self)
if len(libobjdict[classname]) == 0:
del libobjdict[classname]
ini.BaseINIFile.Clear(self)
class INFSection(ini.BaseINISection):
def GetSectionINIObject(self, parent):
type = self.GetType()
if type.lower() == 'libraryclasses':
return INFLibraryClassObject(self)
if type.lower() == 'sources':
return INFSourceObject(self)
if type.lower().find('pcd') != -1:
return INFPcdObject(self)
if type.lower() == 'packages':
return INFDependentPackageObject(self)
if type.lower() in ['guids', 'protocols', 'ppis']:
return INFGuidObject(self)
if type.lower() == 'defines':
return INFDefineSectionObject(self)
return INFSectionObject(self)
def GetType(self):
arr = self._name.split('.')
return arr[0].strip()
def GetArch(self):
arr = self._name.split('.')
if len(arr) == 1:
return 'common'
return arr[1]
def IsArchMatch(self, arch):
if arch is None or self.GetArch() == 'common':
return True
if self.GetArch().lower() != arch.lower():
return False
return True
class INFSectionObject(ini.BaseINISectionObject):
def GetArch(self):
return self.GetParent().GetArch()
class INFDefineSectionObject(INFSectionObject):
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self._key = None
self._value = None
def Parse(self):
assert (self._start == self._end), 'The object in define section must be in single line'
line = self.GetLineByOffset(self._start).strip()
line = line.split('#')[0]
arr = line.split('=')
if len(arr) != 2:
ErrorMsg('Invalid define section object',
self.GetFilename(),
self._start
)
return False
self._key = arr[0].strip()
self._value = arr[1].strip()
return True
def GetKey(self):
return self._key
def GetValue(self):
return self._value
class INFLibraryClassObject(INFSectionObject):
_objs = {}
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self._classname = None
def GetClass(self):
return self._classname
def Parse(self):
self._classname = self.GetLineByOffset(self._start).split('#')[0].strip()
objdict = INFLibraryClassObject._objs
if self._classname in objdict:
objdict[self._classname].append(self)
else:
objdict[self._classname] = [self]
return True
def Destroy(self):
objdict = INFLibraryClassObject._objs
objdict[self._classname].remove(self)
if len(objdict[self._classname]) == 0:
del objdict[self._classname]
def GetName(self):
return self._classname
@staticmethod
def GetObjectDict():
return INFLibraryClassObject._objs
class INFDependentPackageObject(INFSectionObject):
def GetPath(self):
return self.GetLineByOffset(self._start).split('#')[0].strip()
class INFSourceObject(INFSectionObject):
_objs = {}
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self.mSourcename = None
self.mToolCode = None
self.mFamily = None
self.mTagName = None
self.mFeaturePcd = None
self.mFilename = None
def GetSourcePath(self):
return self.mSourcename
def GetSourceFullPath(self):
path = os.path.dirname(self.GetFilename())
path = os.path.join(path, self.GetSourcePath())
return os.path.normpath(path)
def GetToolCode(self):
return self.mToolCode
def GetFamily(self):
return self.mFamily
def GetTagName(self):
return self.mTagName
def GetFeaturePcd(self):
return self.mFeaturePcd
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
arr = line.split('|')
self.mSourcename = arr[0].strip()
if len(arr) >= 2:
self.mFamily = arr[1].strip()
if len(arr) >= 3:
self.mTagName = arr[2].strip()
if len(arr) >= 4:
self.mToolCode = arr[3].strip()
if len(arr) >= 5:
self.mFeaturePcd = arr[4].strip()
self.mFilename = os.path.basename(self.GetSourceFullPath())
objdict = INFSourceObject._objs
if self.mFilename not in objdict:
objdict[self.mFilename] = [self]
else:
objdict[self.mFilename].append(self)
return True
def GetName(self):
return self.mFilename
def Destroy(self):
objdict = INFSourceObject._objs
objdict[self.mFilename].remove(self)
if len(objdict[self.mFilename]) == 0:
del objdict[self.mFilename]
def IsMatchFamily(self, family):
if family is None:
return True
if self.mFamily is not None:
if family.strip().lower() == self.mFamily.lower():
return True
else:
return False
else:
fname = self.GetSourcePath()
if fname.endswith('.S') and family.lower() != 'gcc':
return False
if fname.endswith('.s') and (self.GetArch().lower() != 'ipf' and self.GetArch().lower() != 'common'):
return False
if fname.lower().endswith('.asm') and (family.lower() != 'msft' and family.lower() != 'intel'):
return False
return True
@staticmethod
def GetObjectDict():
return INFSourceObject._objs
class INFPcdObject(INFSectionObject):
_objs = {}
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self.mPcdType = None
self.mDefaultValue = None
self.mPcdName = None
@staticmethod
def GetObjectDict():
return INFPcdObject._objs
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
arr = line.split('|')
self.mPcdName = arr[0].strip()
if len(arr) >= 2:
self.mDefaultValue = arr[1].strip()
objdict = INFPcdObject._objs
if self.GetName() in objdict:
if self not in objdict[self.GetName()]:
objdict[self.GetName()].append(self)
else:
objdict[self.GetName()] = [self]
return True
def GetPcdName(self):
return self.mPcdName
def GetPcdType(self):
return self.GetParent().GetType()
def GetName(self):
return self.mPcdName.split('.')[1]
def Destroy(self):
objdict = INFPcdObject._objs
objdict[self.GetName()].remove(self)
if len(objdict[self.GetName()]) == 0:
del objdict[self.GetName()]
class INFGuidObject(INFSectionObject):
def __init__(self, parent):
INFSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0].split("|")[0]
self._name = line.strip()
return True
def GetName(self):
return self._name
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/inf.py
|
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
from plugins.EdkPlugins.basemodel import ini
from plugins.EdkPlugins.edk2.model import dsc
from plugins.EdkPlugins.edk2.model import inf
from plugins.EdkPlugins.edk2.model import dec
import os
from plugins.EdkPlugins.basemodel.message import *
class SurfaceObject(object):
_objs = {}
def __new__(cls, *args, **kwargs):
"""Maintain only a single instance of this object
@return: instance of this class
"""
obj = object.__new__(cls)
if "None" not in cls._objs:
cls._objs["None"] = []
cls._objs["None"].append(obj)
return obj
def __init__(self, parent, workspace):
self._parent = parent
self._fileObj = None
self._workspace = workspace
self._isModify = False
self._modifiedObjs = []
def __del__(self):
pass
def Destroy(self):
key = self.GetRelativeFilename()
self.GetFileObj().Destroy(self)
del self._fileObj
# dereference self from _objs arrary
assert key in self._objs, "when destory, object is not in obj list"
assert self in self._objs[key], "when destory, object is not in obj list"
self._objs[key].remove(self)
if len(self._objs[key]) == 0:
del self._objs[key]
def GetParent(self):
return self._parent
def GetWorkspace(self):
return self._workspace
def GetFileObjectClass(self):
return ini.BaseINIFile
def GetFilename(self):
return self.GetFileObj().GetFilename()
def GetFileObj(self):
return self._fileObj
def GetRelativeFilename(self):
fullPath = self.GetFilename()
return fullPath[len(self._workspace) + 1:]
def Load(self, relativePath):
# if has been loaded, directly return
if self._fileObj is not None: return True
relativePath = os.path.normpath(relativePath)
fullPath = os.path.join(self._workspace, relativePath)
fullPath = os.path.normpath(fullPath)
if not os.path.exists(fullPath):
ErrorMsg("file does not exist!", fullPath)
return False
self._fileObj = self.GetFileObjectClass()(fullPath, self)
if not self._fileObj.Parse():
ErrorMsg("Fail to parse file!", fullPath)
return False
# remove self from None list to list with filename as key
cls = self.__class__
if self not in cls._objs["None"]:
ErrorMsg("Sufrace object does not be create into None list")
cls._objs["None"].remove(self)
if relativePath not in cls._objs:
cls._objs[relativePath] = []
cls._objs[relativePath].append(self)
return True
def Reload(self, force=False):
ret = True
# whether require must be update
if force:
ret = self.GetFileObj().Reload(True)
else:
if self.IsModified():
if self.GetFileObj().IsModified():
ret = self.GetFileObj().Reload()
return ret
def Modify(self, modify=True, modifiedObj=None):
if modify:
#LogMsg("%s is modified, modified object is %s" % (self.GetFilename(), modifiedObj))
if issubclass(modifiedObj.__class__, ini.BaseINIFile) and self._isModify:
return
self._isModify = modify
self.GetParent().Modify(modify, self)
else:
self._isModify = modify
def IsModified(self):
return self._isModify
def GetModifiedObjs(self):
return self._modifiedObjs
def FilterObjsByArch(self, objs, arch):
arr = []
for obj in objs:
if obj.GetArch().lower() == 'common':
arr.append(obj)
continue
if obj.GetArch().lower() == arch.lower():
arr.append(obj)
continue
return arr
class Platform(SurfaceObject):
def __init__(self, parent, workspace):
SurfaceObject.__init__(self, parent, workspace)
self._modules = []
self._packages = []
def Destroy(self):
for module in self._modules:
module.Destroy()
del self._modules[:]
del self._packages[:]
SurfaceObject.Destroy(self)
def GetName(self):
return self.GetFileObj().GetDefine("PLATFORM_NAME")
def GetFileObjectClass(self):
return dsc.DSCFile
def GetModuleCount(self):
if self.GetFileObj() is None:
ErrorMsg("Fail to get module count because DSC file has not been load!")
return len(self.GetFileObj().GetComponents())
def GetSupportArchs(self):
return self.GetFileObj().GetDefine("SUPPORTED_ARCHITECTURES").strip().split('#')[0].split('|')
def LoadModules(self, precallback=None, postcallback=None):
for obj in self.GetFileObj().GetComponents():
mFilename = obj.GetFilename()
if precallback is not None:
precallback(self, mFilename)
arch = obj.GetArch()
if arch.lower() == 'common':
archarr = self.GetSupportArchs()
else:
archarr = [arch]
for arch in archarr:
module = Module(self, self.GetWorkspace())
if module.Load(mFilename, arch, obj.GetOveridePcds(), obj.GetOverideLibs()):
self._modules.append(module)
if postcallback is not None:
postcallback(self, module)
else:
del module
ErrorMsg("Fail to load module %s" % mFilename)
def GetModules(self):
return self._modules
def GetLibraryPath(self, classname, arch, type):
objs = self.GetFileObj().GetSectionObjectsByName("libraryclasses")
for obj in objs:
if classname.lower() != obj.GetClass().lower():
continue
if obj.GetArch().lower() != 'common' and \
obj.GetArch().lower() != arch.lower():
continue
if obj.GetModuleType().lower() != 'common' and \
obj.GetModuleType().lower() != type.lower():
continue
return obj.GetInstance()
ErrorMsg("Fail to get library class %s [%s][%s] from platform %s" % (classname, arch, type, self.GetFilename()))
return None
def GetPackage(self, path):
package = self.GetParent().GetPackage(path)
if package not in self._packages:
self._packages.append(package)
return package
def GetPcdBuildObjs(self, name, arch=None):
arr = []
objs = self.GetFileObj().GetSectionObjectsByName('pcds')
for obj in objs:
if obj.GetPcdName().lower() == name.lower():
arr.append(obj)
if arch is not None:
arr = self.FilterObjsByArch(arr, arch)
return arr
def Reload(self, callback=None):
# do not care force paramter for platform object
isFileChanged = self.GetFileObj().IsModified()
ret = SurfaceObject.Reload(self, False)
if not ret: return False
if isFileChanged:
# destroy all modules and reload them again
for obj in self._modules:
obj.Destroy()
del self._modules[:]
del self._packages[:]
self.LoadModules(callback)
else:
for obj in self._modules:
callback(self, obj.GetFilename())
obj.Reload()
self.Modify(False)
return True
def Modify(self, modify=True, modifiedObj=None):
if modify:
#LogMsg("%s is modified, modified object is %s" % (self.GetFilename(), modifiedObj))
if issubclass(modifiedObj.__class__, ini.BaseINIFile) and self._isModify:
return
self._isModify = modify
self.GetParent().Modify(modify, self)
else:
if self.GetFileObj().IsModified():
return
for obj in self._modules:
if obj.IsModified():
return
self._isModify = modify
self.GetParent().Modify(modify, self)
def GetModuleObject(self, relativePath, arch):
path = os.path.normpath(relativePath)
for obj in self._modules:
if obj.GetRelativeFilename() == path:
if arch.lower() == 'common':
return obj
if obj.GetArch() == arch:
return obj
return None
def GenerateFullReferenceDsc(self):
oldDsc = self.GetFileObj()
newDsc = dsc.DSCFile()
newDsc.CopySectionsByName(oldDsc, 'defines')
newDsc.CopySectionsByName(oldDsc, 'SkuIds')
#
# Dynamic common section should also be copied
#
newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicDefault')
newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicHii')
newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicVpd')
newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicEx')
sects = oldDsc.GetSectionByName('Components')
for oldSect in sects:
newSect = newDsc.AddNewSection(oldSect.GetName())
for oldComObj in oldSect.GetObjects():
module = self.GetModuleObject(oldComObj.GetFilename(), oldSect.GetArch())
if module is None: continue
newComObj = dsc.DSCComponentObject(newSect)
newComObj.SetFilename(oldComObj.GetFilename())
# add all library instance for override section
libdict = module.GetLibraries()
for libclass in libdict.keys():
if libdict[libclass] is not None:
newComObj.AddOverideLib(libclass, libdict[libclass].GetRelativeFilename().replace('\\', '/'))
# add all pcds for override section
pcddict = module.GetPcds()
for pcd in pcddict.values():
buildPcd = pcd.GetBuildObj()
buildType = buildPcd.GetPcdType()
buildValue = None
if buildType.lower() == 'pcdsdynamichii' or \
buildType.lower() == 'pcdsdynamicvpd' or \
buildType.lower() == 'pcdsdynamicdefault':
buildType = 'PcdsDynamic'
if buildType != 'PcdsDynamic':
buildValue = buildPcd.GetPcdValue()
newComObj.AddOveridePcd(buildPcd.GetPcdName(),
buildType,
buildValue)
newSect.AddObject(newComObj)
return newDsc
class Module(SurfaceObject):
def __init__(self, parent, workspace):
SurfaceObject.__init__(self, parent, workspace)
self._arch = 'common'
self._parent = parent
self._overidePcds = {}
self._overideLibs = {}
self._libs = {}
self._pcds = {}
self._ppis = []
self._protocols = []
self._depexs = []
self._guids = []
self._packages = []
def Destroy(self):
for lib in self._libs.values():
if lib is not None:
lib.Destroy()
self._libs.clear()
for pcd in self._pcds.values():
pcd.Destroy()
self._pcds.clear()
for ppi in self._ppis:
ppi.DeRef(self)
del self._ppis[:]
for protocol in self._protocols:
if protocol is not None:
protocol.DeRef(self)
del self._protocols[:]
for guid in self._guids:
if guid is not None:
guid.DeRef(self)
del self._guids[:]
del self._packages[:]
del self._depexs[:]
SurfaceObject.Destroy(self)
def GetFileObjectClass(self):
return inf.INFFile
def GetLibraries(self):
return self._libs
def Load(self, filename, arch='common', overidePcds=None, overideLibs=None):
if not SurfaceObject.Load(self, filename):
return False
self._arch = arch
if overidePcds is not None:
self._overideLibs = overideLibs
if overideLibs is not None:
self._overidePcds = overidePcds
self._SearchLibraries()
self._SearchPackage()
self._SearchSurfaceItems()
return True
def GetArch(self):
return self._arch
def GetModuleName(self):
return self.GetFileObj().GetDefine("BASE_NAME")
def GetModuleType(self):
return self.GetFileObj().GetDefine("MODULE_TYPE")
def GetPlatform(self):
return self.GetParent()
def GetModuleObj(self):
return self
def GetPcds(self):
pcds = self._pcds.copy()
for lib in self._libs.values():
if lib is None: continue
for name in lib._pcds.keys():
pcds[name] = lib._pcds[name]
return pcds
def GetPpis(self):
ppis = []
ppis += self._ppis
for lib in self._libs.values():
if lib is None: continue
ppis += lib._ppis
return ppis
def GetProtocols(self):
pros = []
pros = self._protocols
for lib in self._libs.values():
if lib is None: continue
pros += lib._protocols
return pros
def GetGuids(self):
guids = []
guids += self._guids
for lib in self._libs.values():
if lib is None: continue
guids += lib._guids
return guids
def GetDepexs(self):
deps = []
deps += self._depexs
for lib in self._libs.values():
if lib is None: continue
deps += lib._depexs
return deps
def IsLibrary(self):
return self.GetFileObj().GetDefine("LIBRARY_CLASS") is not None
def GetLibraryInstance(self, classname, arch, type):
if classname not in self._libs.keys():
# find in overide lib firstly
if classname in self._overideLibs.keys():
self._libs[classname] = Library(self, self.GetWorkspace())
self._libs[classname].Load(self._overideLibs[classname])
return self._libs[classname]
parent = self.GetParent()
if issubclass(parent.__class__, Platform):
path = parent.GetLibraryPath(classname, arch, type)
if path is None:
ErrorMsg('Fail to get library instance for %s' % classname, self.GetFilename())
return None
self._libs[classname] = Library(self, self.GetWorkspace())
if not self._libs[classname].Load(path, self.GetArch()):
self._libs[classname] = None
else:
self._libs[classname] = parent.GetLibraryInstance(classname, arch, type)
return self._libs[classname]
def GetSourceObjs(self):
return self.GetFileObj().GetSectionObjectsByName('source')
def _SearchLibraries(self):
objs = self.GetFileObj().GetSectionObjectsByName('libraryclasses')
arch = self.GetArch()
type = self.GetModuleType()
for obj in objs:
if obj.GetArch().lower() != 'common' and \
obj.GetArch().lower() not in self.GetPlatform().GetSupportArchs():
continue
classname = obj.GetClass()
instance = self.GetLibraryInstance(classname, arch, type)
if not self.IsLibrary() and instance is not None:
instance._isInherit = False
if classname not in self._libs.keys():
self._libs[classname] = instance
def _SearchSurfaceItems(self):
# get surface item from self's inf
pcds = []
ppis = []
pros = []
deps = []
guids = []
if self.GetFileObj() is not None:
pcds = self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('pcd'),
self.GetArch())
for pcd in pcds:
if pcd.GetPcdName() not in self._pcds.keys():
pcdItem = PcdItem(pcd.GetPcdName(), self, pcd)
self._pcds[pcd.GetPcdName()] = ModulePcd(self,
pcd.GetPcdName(),
pcd,
pcdItem)
ppis += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('ppis'),
self.GetArch())
for ppi in ppis:
item = PpiItem(ppi.GetName(), self, ppi)
if item not in self._ppis:
self._ppis.append(item)
pros += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('protocols'),
self.GetArch())
for pro in pros:
item = ProtocolItem(pro.GetName(), self, pro)
if item not in self._protocols:
self._protocols.append(item)
deps += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('depex'),
self.GetArch())
for dep in deps:
item = DepexItem(self, dep)
self._depexs.append(item)
guids += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('guids'),
self.GetArch())
for guid in guids:
item = GuidItem(guid.GetName(), self, guid)
if item not in self._guids:
self._guids.append(item)
def _SearchPackage(self):
objs = self.GetFileObj().GetSectionObjectsByName('packages')
for obj in objs:
package = self.GetPlatform().GetPackage(obj.GetPath())
if package is not None:
self._packages.append(package)
def GetPackages(self):
return self._packages
def GetPcdObjects(self):
if self.GetFileObj() is None:
return []
return self.GetFileObj().GetSectionObjectsByName('pcd')
def GetLibraryClassHeaderFilePath(self):
lcname = self.GetFileObj().GetProduceLibraryClass()
if lcname is None: return None
pkgs = self.GetPackages()
for package in pkgs:
path = package.GetLibraryClassHeaderPathByName(lcname)
if path is not None:
return os.path.realpath(os.path.join(package.GetFileObj().GetPackageRootPath(), path))
return None
def Reload(self, force=False, callback=None):
if callback is not None:
callback(self, "Starting reload...")
ret = SurfaceObject.Reload(self, force)
if not ret: return False
if not force and not self.IsModified():
return True
for lib in self._libs.values():
if lib is not None:
lib.Destroy()
self._libs.clear()
for pcd in self._pcds.values():
pcd.Destroy()
self._pcds.clear()
for ppi in self._ppis:
ppi.DeRef(self)
del self._ppis[:]
for protocol in self._protocols:
protocol.DeRef(self)
del self._protocols[:]
for guid in self._guids:
guid.DeRef(self)
del self._guids[:]
del self._packages[:]
del self._depexs[:]
if callback is not None:
callback(self, "Searching libraries...")
self._SearchLibraries()
if callback is not None:
callback(self, "Searching packages...")
self._SearchPackage()
if callback is not None:
callback(self, "Searching surface items...")
self._SearchSurfaceItems()
self.Modify(False)
return True
def Modify(self, modify=True, modifiedObj=None):
if modify:
#LogMsg("%s is modified, modified object is %s" % (self.GetFilename(), modifiedObj))
if issubclass(modifiedObj.__class__, ini.BaseINIFile) and self._isModify:
return
self._isModify = modify
self.GetParent().Modify(modify, self)
else:
if self.GetFileObj().IsModified():
return
self._isModify = modify
self.GetParent().Modify(modify, self)
class Library(Module):
def __init__(self, parent, workspace):
Module.__init__(self, parent, workspace)
self._isInherit = True
def IsInherit(self):
return self._isInherit
def GetModuleType(self):
return self.GetParent().GetModuleType()
def GetPlatform(self):
return self.GetParent().GetParent()
def GetModuleObj(self):
return self.GetParent()
def GetArch(self):
return self.GetParent().GetArch()
def Destroy(self):
self._libs.clear()
self._pcds.clear()
SurfaceObject.Destroy(self)
class Package(SurfaceObject):
def __init__(self, parent, workspace):
SurfaceObject.__init__(self, parent, workspace)
self._pcds = {}
self._guids = {}
self._protocols = {}
self._ppis = {}
def GetPcds(self):
return self._pcds
def GetPpis(self):
return list(self._ppis.values())
def GetProtocols(self):
return list(self._protocols.values())
def GetGuids(self):
return list(self._guids.values())
def Destroy(self):
for pcd in self._pcds.values():
if pcd is not None:
pcd.Destroy()
for guid in self._guids.values():
if guid is not None:
guid.Destroy()
for protocol in self._protocols.values():
if protocol is not None:
protocol.Destroy()
for ppi in self._ppis.values():
if ppi is not None:
ppi.Destroy()
self._pcds.clear()
self._guids.clear()
self._protocols.clear()
self._ppis.clear()
self._pcds.clear()
SurfaceObject.Destroy(self)
def Load(self, relativePath):
ret = SurfaceObject.Load(self, relativePath)
if not ret: return False
pcds = self.GetFileObj().GetSectionObjectsByName('pcds')
for pcd in pcds:
if pcd.GetPcdName() in self._pcds.keys():
if self._pcds[pcd.GetPcdName()] is not None:
self._pcds[pcd.GetPcdName()].AddDecObj(pcd)
else:
self._pcds[pcd.GetPcdName()] = PcdItem(pcd.GetPcdName(), self, pcd)
guids = self.GetFileObj().GetSectionObjectsByName('guids')
for guid in guids:
if guid.GetName() not in self._guids.keys():
self._guids[guid.GetName()] = GuidItem(guid.GetName(), self, guid)
else:
WarnMsg("Duplicate definition for %s" % guid.GetName())
ppis = self.GetFileObj().GetSectionObjectsByName('ppis')
for ppi in ppis:
if ppi.GetName() not in self._ppis.keys():
self._ppis[ppi.GetName()] = PpiItem(ppi.GetName(), self, ppi)
else:
WarnMsg("Duplicate definition for %s" % ppi.GetName())
protocols = self.GetFileObj().GetSectionObjectsByName('protocols')
for protocol in protocols:
if protocol.GetName() not in self._protocols.keys():
self._protocols[protocol.GetName()] = ProtocolItem(protocol.GetName(), self, protocol)
else:
WarnMsg("Duplicate definition for %s" % protocol.GetName())
return True
def GetFileObjectClass(self):
return dec.DECFile
def GetName(self):
return self.GetFileObj().GetDefine("PACKAGE_NAME")
def GetPcdDefineObjs(self, name=None):
arr = []
objs = self.GetFileObj().GetSectionObjectsByName('pcds')
if name is None: return objs
for obj in objs:
if obj.GetPcdName().lower() == name.lower():
arr.append(obj)
return arr
def GetLibraryClassObjs(self):
return self.GetFileObj().GetSectionObjectsByName('libraryclasses')
def Modify(self, modify=True, modifiedObj=None):
if modify:
self._isModify = modify
self.GetParent().Modify(modify, self)
else:
if self.GetFileObj().IsModified():
return
self._isModify = modify
self.GetParent().Modify(modify, self)
def GetLibraryClassHeaderPathByName(self, clsname):
objs = self.GetLibraryClassObjs()
for obj in objs:
if obj.GetClassName() == clsname:
return obj.GetHeaderFile()
return None
class DepexItem(object):
def __init__(self, parent, infObj):
self._parent = parent
self._infObj = infObj
def GetDepexString(self):
return str(self._infObj)
def GetInfObject(self):
return self._infObj
class ModulePcd(object):
_type_mapping = {'FeaturePcd': 'PcdsFeatureFlag',
'FixedPcd': 'PcdsFixedAtBuild',
'PatchPcd': 'PcdsPatchableInModule'}
def __init__(self, parent, name, infObj, pcdItem):
assert issubclass(parent.__class__, Module), "Module's PCD's parent must be module!"
assert pcdItem is not None, 'Pcd %s does not in some package!' % name
self._name = name
self._parent = parent
self._pcdItem = pcdItem
self._infObj = infObj
def GetName(self):
return self._name
def GetParent(self):
return self._name
def GetArch(self):
return self._parent.GetArch()
def Destroy(self):
self._pcdItem.DeRef(self._parent)
self._infObj = None
def GetBuildObj(self):
platformInfos = self._parent.GetPlatform().GetPcdBuildObjs(self._name, self.GetArch())
modulePcdType = self._infObj.GetPcdType()
# if platform do not gives pcd's value, get default value from package
if len(platformInfos) == 0:
if modulePcdType.lower() == 'pcd':
return self._pcdItem.GetDecObject()
else:
for obj in self._pcdItem.GetDecObjects():
if modulePcdType not in self._type_mapping.keys():
ErrorMsg("Invalid PCD type %s" % modulePcdType)
return None
if self._type_mapping[modulePcdType] == obj.GetPcdType():
return obj
ErrorMsg ('Module PCD type %s does not in valied range [%s] in package!' % \
(modulePcdType))
else:
if modulePcdType.lower() == 'pcd':
if len(platformInfos) > 1:
WarnMsg("Find more than one value for PCD %s in platform %s" % \
(self._name, self._parent.GetPlatform().GetFilename()))
return platformInfos[0]
else:
for obj in platformInfos:
if modulePcdType not in self._type_mapping.keys():
ErrorMsg("Invalid PCD type %s" % modulePcdType)
return None
if self._type_mapping[modulePcdType] == obj.GetPcdType():
return obj
ErrorMsg('Can not find value for pcd %s in pcd type %s' % \
(self._name, modulePcdType))
return None
class SurfaceItem(object):
_objs = {}
def __new__(cls, *args, **kwargs):
"""Maintain only a single instance of this object
@return: instance of this class
"""
name = args[0]
parent = args[1]
fileObj = args[2]
if issubclass(parent.__class__, Package):
if name in cls._objs.keys():
ErrorMsg("%s item is duplicated defined in packages: %s and %s" %
(name, parent.GetFilename(), cls._objs[name].GetParent().GetFilename()))
return None
obj = object.__new__(cls)
cls._objs[name] = obj
return obj
elif issubclass(parent.__class__, Module):
if name not in cls._objs.keys():
ErrorMsg("%s item does not defined in any package! It is used by module %s" % \
(name, parent.GetFilename()))
return None
return cls._objs[name]
return None
def __init__(self, name, parent, fileObj):
if issubclass(parent.__class__, Package):
self._name = name
self._parent = parent
self._decObj = [fileObj]
self._refMods = {}
else:
self.RefModule(parent, fileObj)
@classmethod
def GetObjectDict(cls):
return cls._objs
def GetParent(self):
return self._parent
def GetReference(self):
return self._refMods
def RefModule(self, mObj, infObj):
if mObj in self._refMods.keys():
return
self._refMods[mObj] = infObj
def DeRef(self, mObj):
if mObj not in self._refMods.keys():
WarnMsg("%s is not referenced by module %s" % (self._name, mObj.GetFilename()))
return
del self._refMods[mObj]
def Destroy(self):
self._refMods.clear()
cls = self.__class__
del cls._objs[self._name]
def GetName(self):
return self._name
def GetDecObject(self):
return self._decObj[0]
def GetDecObjects(self):
return self._decObj
class PcdItem(SurfaceItem):
def AddDecObj(self, fileObj):
for decObj in self._decObj:
if decObj.GetFilename() != fileObj.GetFilename():
ErrorMsg("Pcd %s defined in more than one packages : %s and %s" % \
(self._name, decObj.GetFilename(), fileObj.GetFilename()))
return
if decObj.GetPcdType() == fileObj.GetPcdType() and \
decObj.GetArch().lower() == fileObj.GetArch():
ErrorMsg("Pcd %s is duplicated defined in pcd type %s in package %s" % \
(self._name, decObj.GetPcdType(), decObj.GetFilename()))
return
self._decObj.append(fileObj)
def GetValidPcdType(self):
types = []
for obj in self._decObj:
if obj.GetPcdType() not in types:
types += obj.GetPcdType()
return types
class GuidItem(SurfaceItem):
pass
class PpiItem(SurfaceItem):
pass
class ProtocolItem(SurfaceItem):
pass
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/baseobject.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("X64",)
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
return "Bhyve/BhyveX64.dsc"
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/BhyveBuild.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("X64",)
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
return "IntelTdx/IntelTdxX64.dsc"
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/IntelTdxBuild.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("X64",)
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
return "CloudHv/CloudHvX64.dsc"
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/CloudHvBuild.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("X64",)
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
return "Microvm/MicrovmX64.dsc"
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/MicrovmBuild.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
import subprocess
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("X64",)
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
return "AmdSev/AmdSevX64.dsc"
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
# hack alert -- create dummy grub.efi
subprocess.run(['touch', 'OvmfPkg/AmdSev/Grub/grub.efi'])
subprocess.run(['ls', '-l', '--sort=time', 'OvmfPkg/AmdSev/Grub'])
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/AmdSevBuild.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import logging
import io
from edk2toolext.environment import shell_environment
from edk2toolext.environment.uefi_build import UefiBuilder
from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager
from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule
from edk2toolext.invocables.edk2_update import UpdateSettingsManager
from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager
from edk2toollib.utility_functions import RunCmd
# ####################################################################################### #
# Configuration for Update & Setup #
# ####################################################################################### #
class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager):
def GetPackagesSupported(self):
''' return iterable of edk2 packages supported by this build.
These should be edk2 workspace relative paths '''
return CommonPlatform.PackagesSupported
def GetArchitecturesSupported(self):
''' return iterable of edk2 architectures supported by this build '''
return CommonPlatform.ArchSupported
def GetTargetsSupported(self):
''' return iterable of edk2 target tags supported by this build '''
return CommonPlatform.TargetsSupported
def GetRequiredSubmodules(self):
''' return iterable containing RequiredSubmodule objects.
If no RequiredSubmodules return an empty iterable
'''
rs = []
# intentionally declare this one with recursive false to avoid overhead
rs.append(RequiredSubmodule(
"CryptoPkg/Library/OpensslLib/openssl", False))
# To avoid maintenance of this file for every new submodule
# lets just parse the .gitmodules and add each if not already in list.
# The GetRequiredSubmodules is designed to allow a build to optimize
# the desired submodules but it isn't necessary for this repository.
result = io.StringIO()
ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result)
# Cmd output is expected to look like:
# submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl
# submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
if ret == 0:
for line in result.getvalue().splitlines():
_, _, path = line.partition(" ")
if path is not None:
if path not in [x.path for x in rs]:
rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know
return rs
def SetArchitectures(self, list_of_requested_architectures):
''' Confirm the requests architecture list is valid and configure SettingsManager
to run only the requested architectures.
Raise Exception if a list_of_requested_architectures is not supported
'''
unsupported = set(list_of_requested_architectures) - set(self.GetArchitecturesSupported())
if(len(unsupported) > 0):
errorString = ( "Unsupported Architecture Requested: " + " ".join(unsupported))
logging.critical( errorString )
raise Exception( errorString )
self.ActualArchitectures = list_of_requested_architectures
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
return CommonPlatform.WorkspaceRoot
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
return CommonPlatform.Scopes
def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list:
''' Filter other cases that this package should be built
based on changed files. This should cover things that can't
be detected as dependencies. '''
build_these_packages = []
possible_packages = potentialPackagesList.copy()
for f in changedFilesList:
# BaseTools files that might change the build
if "BaseTools" in f:
if os.path.splitext(f) not in [".txt", ".md"]:
build_these_packages = possible_packages
break
# if the azure pipeline platform template file changed
if "platform-build-run-steps.yml" in f:
build_these_packages = possible_packages
break
return build_these_packages
def GetPlatformDscAndConfig(self) -> tuple:
''' If a platform desires to provide its DSC then Policy 4 will evaluate if
any of the changes will be built in the dsc.
The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>)
'''
dsc = CommonPlatform.GetDscName(",".join(self.ActualArchitectures))
return (f"OvmfPkg/{dsc}", {})
# ####################################################################################### #
# Actual Configuration for Platform Build #
# ####################################################################################### #
class PlatformBuilder( UefiBuilder, BuildSettingsManager):
def __init__(self):
UefiBuilder.__init__(self)
def AddCommandLineOptions(self, parserObj):
''' Add command line options to the argparser '''
parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="IA32,X64",
help="Optional - CSV of architecture to build. IA32 will use IA32 for Pei & Dxe. "
"X64 will use X64 for both PEI and DXE. IA32,X64 will use IA32 for PEI and "
"X64 for DXE. default is IA32,X64")
def RetrieveCommandLineOptions(self, args):
''' Retrieve command line options from the argparser '''
shell_environment.GetBuildVars().SetValue("TARGET_ARCH"," ".join(args.build_arch.upper().split(",")), "From CmdLine")
dsc = CommonPlatform.GetDscName(args.build_arch)
shell_environment.GetBuildVars().SetValue("ACTIVE_PLATFORM", f"OvmfPkg/{dsc}", "From CmdLine")
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
return CommonPlatform.WorkspaceRoot
def GetPackagesPath(self):
''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath '''
return ()
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
return CommonPlatform.Scopes
def GetName(self):
''' Get the name of the repo, platform, or product being build '''
''' Used for naming the log file, among others '''
# check the startup nsh flag and if set then rename the log file.
# this helps in CI so we don't overwrite the build log since running
# uses the stuart_build command.
if(shell_environment.GetBuildVars().GetValue("MAKE_STARTUP_NSH", "FALSE") == "TRUE"):
return "OvmfPkg_With_Run"
return "OvmfPkg"
def GetLoggingLevel(self, loggerType):
''' Get the logging level for a given type
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
return logging.DEBUG
def SetPlatformEnv(self):
logging.debug("PlatformBuilder SetPlatformEnv")
self.env.SetValue("PRODUCT_NAME", "OVMF", "Platform Hardcoded")
self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false")
self.env.SetValue("QEMU_HEADLESS", "FALSE", "Default to false")
self.env.SetValue("QEMU_CPUHP_QUIRK", "FALSE", "Default to false")
return 0
def PlatformPreBuild(self):
return 0
def PlatformPostBuild(self):
return 0
def FlashRomImage(self):
VirtualDrive = os.path.join(self.env.GetValue("BUILD_OUTPUT_BASE"), "VirtualDrive")
os.makedirs(VirtualDrive, exist_ok=True)
OutputPath_FV = os.path.join(self.env.GetValue("BUILD_OUTPUT_BASE"), "FV")
if (self.env.GetValue("QEMU_SKIP") and
self.env.GetValue("QEMU_SKIP").upper() == "TRUE"):
logging.info("skipping qemu boot test")
return 0
#
# QEMU must be on the path
#
cmd = "qemu-system-x86_64"
args = "-debugcon stdio" # write messages to stdio
args += " -global isa-debugcon.iobase=0x402" # debug messages out thru virtual io port
args += " -net none" # turn off network
args += f" -drive file=fat:rw:{VirtualDrive},format=raw,media=disk" # Mount disk with startup.nsh
if (self.env.GetValue("QEMU_HEADLESS").upper() == "TRUE"):
args += " -display none" # no graphics
if (self.env.GetBuildValue("SMM_REQUIRE") == "1"):
args += " -machine q35,smm=on" #,accel=(tcg|kvm)"
#args += " -m ..."
#args += " -smp ..."
args += " -global driver=cfi.pflash01,property=secure,value=on"
args += " -drive if=pflash,format=raw,unit=0,file=" + os.path.join(OutputPath_FV, "OVMF_CODE.fd") + ",readonly=on"
args += " -drive if=pflash,format=raw,unit=1,file=" + os.path.join(OutputPath_FV, "OVMF_VARS.fd")
else:
args += " -pflash " + os.path.join(OutputPath_FV, "OVMF.fd") # path to firmware
###
### NOTE This is a temporary workaround to allow platform CI to cope with
### a QEMU bug in the CPU hotplug code. Once the CI environment has
### been updated to carry a fixed version of QEMU, this can be
### removed again
###
### Bugzilla: https://bugzilla.tianocore.org/show_bug.cgi?id=4250
###
if (self.env.GetValue("QEMU_CPUHP_QUIRK").upper() == "TRUE"):
args += " -fw_cfg name=opt/org.tianocore/X-Cpuhp-Bugcheck-Override,string=yes"
if (self.env.GetValue("MAKE_STARTUP_NSH").upper() == "TRUE"):
f = open(os.path.join(VirtualDrive, "startup.nsh"), "w")
f.write("BOOT SUCCESS !!! \n")
## add commands here
f.write("reset -s\n")
f.close()
ret = RunCmd(cmd, args)
if ret == 0xc0000005:
#for some reason getting a c0000005 on successful return
return 0
return ret
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/PlatformBuildLib.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("X64",)
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
return "OvmfXen.dsc"
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/XenBuild.py
|
# @file
# Script to Build OVMF UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from PlatformBuildLib import SettingsManager
from PlatformBuildLib import PlatformBuilder
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OvmfPkg",)
ArchSupported = ("IA32", "X64")
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('ovmf', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
dsc = "OvmfPkg"
if "IA32" in ArchCsv.upper().split(","):
dsc += "Ia32"
if "X64" in ArchCsv.upper().split(","):
dsc += "X64"
dsc += ".dsc"
return dsc
import PlatformBuildLib
PlatformBuildLib.CommonPlatform = CommonPlatform
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/OvmfPkg/PlatformCI/PlatformBuild.py
|
## @file
# Automate the process of building the various reset vector types
#
# Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import subprocess
import sys
PAGE_TABLE_2M = 'PageTable2M'
PAGE_TABLE_1G = 'PageTable1G'
FILE_FORMAT = '.raw'
ALL_RAW_FORMAT = '*' + FILE_FORMAT
IA32 = 'IA32'
X64 = 'X64'
# Pre-Define a Macros for Page Table
PAGE_TABLES = {
PAGE_TABLE_2M : "PAGE_TABLE_2M",
PAGE_TABLE_1G : "PAGE_TABLE_1G"
}
def RunCommand(commandLine):
return subprocess.call(commandLine)
# Check for all raw binaries and delete them
for root, dirs, files in os.walk('Bin'):
for file in files:
if file.endswith(FILE_FORMAT):
os.remove(os.path.join(root, file))
for arch in ('ia32', 'x64'):
for debugType in (None, 'port80', 'serial'):
for pageTable in PAGE_TABLES.keys():
ret = True
if arch.lower() == X64.lower():
directory = os.path.join('Bin', X64, pageTable)
else:
directory = os.path.join('Bin', IA32)
# output raw binary name with arch type
fileName = 'ResetVector' + '.' + arch
if debugType is not None:
fileName += '.' + debugType
fileName += FILE_FORMAT
output = os.path.join(directory, fileName)
# if the directory not exists then create it
if not os.path.isdir(directory):
os.makedirs(directory)
# Prepare the command to execute the nasmb
commandLine = (
'nasm',
'-D', 'ARCH_%s' % arch.upper(),
'-D', 'DEBUG_%s' % str(debugType).upper(),
'-D', PAGE_TABLES[pageTable].upper(),
'-o', output,
'Vtf0.nasmb',
)
print(f"Command : {' '.join(commandLine)}")
try:
ret = RunCommand(commandLine)
except FileNotFoundError:
print("NASM not found")
except:
pass
if ret != 0:
print(f"something went wrong while executing {commandLine[-1]}")
sys.exit()
print('\tASM\t' + output)
commandLine = (
'python',
'Tools/FixupForRawSection.py',
output,
)
print('\tFIXUP\t' + output)
ret = RunCommand(commandLine)
if ret != 0: sys.exit(ret)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/UefiCpuPkg/ResetVector/Vtf0/Build.py
|
## @file
# Apply fixup to VTF binary image for FFS Raw section
#
# Copyright (c) 2008 - 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import sys
filename = sys.argv[1]
d = open(sys.argv[1], 'rb').read()
c = ((len(d) + 4 + 7) & ~7) - 4
if c > len(d):
c -= len(d)
f = open(sys.argv[1], 'wb')
f.write(b'\x90' * c)
f.write(d)
f.close()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/UefiCpuPkg/ResetVector/Vtf0/Tools/FixupForRawSection.py
|
#
# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
import firmware_volume
import build_report
import system_table
# Reload external classes
reload(firmware_volume)
reload(build_report)
reload(system_table)
def readMem32(executionContext, address):
bytes = executionContext.getMemoryService().read(address, 4, 32)
return struct.unpack('<I',bytes)[0]
def dump_fv(ec, fv_base, fv_size):
fv = firmware_volume.FirmwareVolume(ec,
int(build.PCDs['gArmTokenSpaceGuid']['PcdFvBaseAddress'][0],16),
int(build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16))
ffs = fv.get_next_ffs()
while ffs != None:
print "# %s" % ffs
section = ffs.get_next_section()
while section != None:
print "\t%s" % section
try:
print "\t\t- %s" % section.get_debug_filepath()
except Exception:
pass
section = ffs.get_next_section(section)
ffs = fv.get_next_ffs(ffs)
def dump_system_table(ec, mem_base, mem_size):
st = system_table.SystemTable(ec, mem_base, mem_size)
debug_info_table_base = st.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID)
debug_info_table = system_table.DebugInfoTable(ec, debug_info_table_base)
debug_info_table.dump()
def load_symbol_from_file(ec, filename, address, verbose = False):
if verbose:
print "Add symbols of %s at 0x%x" % (filename, address)
try:
ec.getImageService().addSymbols(filename, address)
except:
try:
# We could get an exception if the symbols are already loaded
ec.getImageService().unloadSymbols(filename)
ec.getImageService().addSymbols(filename, address)
except:
print "Warning: not possible to load symbols from %s at 0x%x" % (filename, address)
def is_aarch64(ec):
success = True
try:
# Try to access a Aarch64 specific register
ec.getRegisterService().getValue('X0')
except:
success = False
return success
class ArmPlatform:
def __init__(self, sysmembase=None, sysmemsize=None, fvs={}):
self.sysmembase = sysmembase
self.sysmemsize = sysmemsize
self.fvs = fvs
class ArmPlatformDebugger:
system_table = None
firmware_volumes = {}
REGION_TYPE_SYSMEM = 1
REGION_TYPE_ROM = 2
REGION_TYPE_FV = 3
def __init__(self, ec, report_log, regions, verbose = False):
self.ec = ec
self.verbose = verbose
fvs = []
sysmem_base = None
sysmem_size = None
if report_log and os.path.isfile(report_log):
try:
self.build = build_report.BuildReport(report_log)
except IOError:
raise IOError(2, 'Report \'%s\' is not valid' % report_log)
# Generate list of supported Firmware Volumes
if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16) != 0:
fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16)))
if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdSecureFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvSize'][0],16) != 0:
fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvSize'][0],16)))
if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdHypFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvSize'][0],16) != 0:
fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvSize'][0],16)))
sysmem_base = int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSystemMemoryBase'][0],16)
sysmem_size = int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSystemMemorySize'][0],16)
else:
for region in regions:
if region[0] == ArmPlatformDebugger.REGION_TYPE_SYSMEM:
sysmem_base = region[1]
sysmem_size = region[2]
elif region[0] == ArmPlatformDebugger.REGION_TYPE_FV:
fvs.append((region[1],region[2]))
elif region[0] == ArmPlatformDebugger.REGION_TYPE_ROM:
for base in xrange(region[1], region[1] + region[2], 0x400000):
signature = struct.unpack("cccc", self.ec.getMemoryService().read(base, 4, 32))
if signature == FirmwareVolume.CONST_FV_SIGNATURE:
fvs.append((base,0))
else:
print "Region type '%d' Not Supported" % region[0]
self.platform = ArmPlatform(sysmem_base, sysmem_size, fvs)
def in_sysmem(self, addr):
return (self.platform.sysmembase is not None) and (self.platform.sysmembase <= addr) and (addr < self.platform.sysmembase + self.platform.sysmemsize)
def in_fv(self, addr):
return (self.get_fv_at(addr) != None)
def get_fv_at(self, addr):
for fv in self.platform.fvs:
if (fv[0] <= addr) and (addr < fv[0] + fv[1]):
return fv
return None
def load_current_symbols(self):
pc = int(self.ec.getRegisterService().getValue('PC')) & 0xFFFFFFFF
if self.in_fv(pc):
debug_infos = []
(fv_base, fv_size) = self.get_fv_at(pc)
if self.firmware_volumes.has_key(fv_base) == False:
self.firmware_volumes[fv_base] = firmware_volume.FirmwareVolume(self.ec, fv_base, fv_size)
stack_frame = self.ec.getTopLevelStackFrame()
info = self.firmware_volumes[fv_base].load_symbols_at(int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF, self.verbose)
debug_infos.append(info)
while stack_frame.next() is not None:
stack_frame = stack_frame.next()
# Stack frame attached to 'PC'
pc = int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF
# Check if the symbols for this stack frame have already been loaded
found = False
for debug_info in debug_infos:
if (pc >= debug_info[0]) and (pc < debug_info[0] + debug_info[1]):
found = True
if found == False:
info = self.firmware_volumes[fv_base].load_symbols_at(pc)
debug_infos.append(info)
#self.firmware_volumes[fv_base].load_symbols_at(pc)
elif self.in_sysmem(pc):
debug_infos = []
if self.system_table is None:
# Find the System Table
self.system_table = system_table.SystemTable(self.ec, self.platform.sysmembase, self.platform.sysmemsize)
# Find the Debug Info Table
debug_info_table_base = self.system_table.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID)
self.debug_info_table = system_table.DebugInfoTable(self.ec, debug_info_table_base)
stack_frame = self.ec.getTopLevelStackFrame()
info = self.debug_info_table.load_symbols_at(int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF, self.verbose)
debug_infos.append(info)
while stack_frame.next() is not None:
stack_frame = stack_frame.next()
# Stack frame attached to 'PC'
pc = int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF
# Check if the symbols for this stack frame have already been loaded
found = False
for debug_info in debug_infos:
if (pc >= debug_info[0]) and (pc < debug_info[0] + debug_info[1]):
found = True
if found == False:
try:
info = self.debug_info_table.load_symbols_at(pc)
debug_infos.append(info)
except:
pass
#self.debug_info_table.load_symbols_at(pc)
else:
raise Exception('ArmPlatformDebugger', "Not supported region")
def load_all_symbols(self):
# Load all the XIP symbols attached to the Firmware Volume
for (fv_base, fv_size) in self.platform.fvs:
if self.firmware_volumes.has_key(fv_base) == False:
self.firmware_volumes[fv_base] = firmware_volume.FirmwareVolume(self.ec, fv_base, fv_size)
self.firmware_volumes[fv_base].load_all_symbols(self.verbose)
try:
# Load all symbols of module loaded into System Memory
if self.system_table is None:
# Find the System Table
self.system_table = system_table.SystemTable(self.ec, self.platform.sysmembase, self.platform.sysmemsize)
# Find the Debug Info Table
debug_info_table_base = self.system_table.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID)
self.debug_info_table = system_table.DebugInfoTable(self.ec, debug_info_table_base)
self.debug_info_table.load_all_symbols(self.verbose)
except:
# Debugger exception could be excepted if DRAM has not been initialized or if we have not started to run from DRAM yet
print "Note: no symbols have been found in System Memory (possible cause: the UEFI permanent memory has not been installed yet)"
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/edk2_debugger.py
|
#
# Copyright (c) 2021, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from arm_ds.debugger_v1 import DebugException
import subprocess, os, edk2_debugger, re
def get_module_name(line):
path = line.rsplit(' ')[1]
return os.path.splitext(os.path.basename(path))[0]
def get_module_path(line):
return line.rsplit(' ')[1]
def get_module_entrypoint(list, module_name):
line = [i for i in list if module_name in i and re.search(r'\b'+module_name+r'\b', i)]
if len(line) == 0:
# Module was not loaded using DxeDispatcher or PeiDispatcher. It is a SEC module
# Symbols for these modules are loaded from FV, not from console log
return None
entrypoint_str = line[0].rsplit(' ')[4]
return entrypoint_str.rsplit('=')[1]
def load_symbol_from_console(ec, console_file, objdump, verbose):
if objdump is None:
print "Error: A path to objdump tool is not specified, but -i parameter is provided"
elif not os.path.exists(objdump):
print "Error: Provided path to objdump is invalid: %s" % objdump
elif not os.path.exists(console_file):
print "Error: UEFI console file is not found: %s" % console_file
else:
full_list = open(console_file).read().splitlines()
efi_list = [i for i in full_list if "EntryPoint=" in i]
full_list = dict.fromkeys(full_list)
full_list = [i for i in full_list if "add-symbol-file" in i]
module_dict = {}
for line in full_list:
name = get_module_name(line)
module_dict[name] = (get_module_path(line), get_module_entrypoint(efi_list, name))
for module in module_dict:
entrypoint_addr = module_dict[module][1]
if entrypoint_addr is not None:
path = module_dict[module][0]
if not os.path.exists(path):
print "Module not found: " + path + ". Skipping..."
continue
sp = subprocess.Popen([objdump,'-S', path], stdout = subprocess.PIPE)
objdump_out = sp.stdout.readlines()
entrypoint_record = [i for i in objdump_out if "<_ModuleEntryPoint>" in i]
entrypoint_offset = entrypoint_record[0].split(' ')[0]
load_addr = int(entrypoint_addr, 16) - int(entrypoint_offset, 16)
edk2_debugger.load_symbol_from_file(ec, path, load_addr, verbose)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/console_loader.py
|
#
# Copyright (c) 2011-2021, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
from console_loader import load_symbol_from_console
import re, sys, getopt
import edk2_debugger
# Reload external classes
reload(edk2_debugger)
def usage():
print "-v,--verbose"
print "-a,--all: Load all symbols"
print "-l,--report=: Filename for the EDK2 report log"
print "-m,--sysmem=(base,size): System Memory region"
print "-f,--fv=(base,size): Firmware region"
print "-r,--rom=(base,size): ROM region"
print "-i,--input=: Filename for the EDK2 console output"
print "-o,--objdump=: Path to the objdump tool"
verbose = False
load_all = False
report_file = None
input_file = None
objdump = None
regions = []
opts,args = getopt.getopt(sys.argv[1:], "hvar:i:o:vm:vr:vf:v", ["help","verbose","all","report=","sysmem=","rom=","fv=","input=","objdump="])
if (opts is None) or (not opts):
report_file = '../../../report.log'
else:
region_reg = re.compile("\((.*),(.*)\)")
base_reg = re.compile("(.*)")
for o,a in opts:
region_type = None
regex = None
m = None
if o in ("-h","--help"):
usage()
sys.exit()
elif o in ("-v","--verbose"):
verbose = True
elif o in ("-a","--all"):
load_all = True
elif o in ("-l","--report"):
report_file = a
elif o in ("-m","--sysmem"):
region_type = edk2_debugger.ArmPlatformDebugger.REGION_TYPE_SYSMEM
regex = region_reg
elif o in ("-f","--fv"):
region_type = edk2_debugger.ArmPlatformDebugger.REGION_TYPE_FV
regex = region_reg
elif o in ("-r","--rom"):
region_type = edk2_debugger.ArmPlatformDebugger.REGION_TYPE_ROM
regex = region_reg
elif o in ("-i","--input"):
input_file = a
elif o in ("-o", "--objdump"):
objdump = a
else:
assert False, "Unhandled option (%s)" % o
if region_type:
m = regex.match(a)
if m:
if regex.groups == 1:
regions.append((region_type,int(m.group(1),0),0))
else:
regions.append((region_type,int(m.group(1),0),int(m.group(2),0)))
else:
if regex.groups == 1:
raise Exception('cmd_load_symbols', "Expect a base address")
else:
raise Exception('cmd_load_symbols', "Expect a region format as (base,size)")
# Debugger object for accessing the debugger
debugger = Debugger()
# Initialisation commands
ec = debugger.getCurrentExecutionContext()
ec.getExecutionService().stop()
# in case the execution context reference is out of date
ec = debugger.getCurrentExecutionContext()
try:
armplatform_debugger = edk2_debugger.ArmPlatformDebugger(ec, report_file, regions, verbose)
if load_all:
armplatform_debugger.load_all_symbols()
else:
armplatform_debugger.load_current_symbols()
except IOError, (ErrorNumber, ErrorMessage):
print "Error: %s" % ErrorMessage
except Exception, (ErrorClass, ErrorMessage):
print "Error(%s): %s" % (ErrorClass, ErrorMessage)
except DebugException, de:
print "DebugError: %s" % (de.getMessage())
if input_file:
load_symbol_from_console(ec, input_file, objdump, verbose)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/cmd_load_symbols.py
|
#!/usr/bin/python
#
# Copyright (c) 2014, ARM Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import getopt
import operator
import os
import pickle
import sys
from sys import argv
from cStringIO import StringIO
modules = {}
functions = {}
functions_addr = {}
def usage():
print "-t,--trace: Location of the Trace file"
print "-s,--symbols: Location of the symbols and modules"
def get_address_from_string(address):
return int(address.strip("S:").strip("N:").strip("EL2:").strip("EL1:"), 16)
def get_module_from_addr(modules, addr):
for key,value in modules.items():
if (value['start'] <= addr) and (addr <= value['end']):
return key
return None
def add_cycles_to_function(functions, func_name, addr, cycles):
if func_name != "<Unknown>":
# Check if we are still in the previous function
if add_cycles_to_function.prev_func_name == func_name:
add_cycles_to_function.prev_entry['cycles'] += cycles
return (add_cycles_to_function.prev_func_name, add_cycles_to_function.prev_module_name)
if func_name in functions.keys():
for module_name, module_value in functions[func_name].iteritems():
if (module_value['start'] <= addr) and (addr < module_value['end']):
module_value['cycles'] += cycles
add_cycles_to_function.prev_func_name = func_name
add_cycles_to_function.prev_module_name = module_name
add_cycles_to_function.prev_entry = module_value
return (func_name, module_name)
elif (module_value['end'] == 0):
module_value['cycles'] += cycles
add_cycles_to_function.prev_func_name = func_name
add_cycles_to_function.prev_module_name = module_name
add_cycles_to_function.prev_entry = module_value
return (func_name, module_name)
# Workaround to fix the 'info func' limitation that does not expose the 'static' function
module_name = get_module_from_addr(modules, addr)
functions[func_name] = {}
functions[func_name][module_name] = {}
functions[func_name][module_name]['start'] = 0
functions[func_name][module_name]['end'] = 0
functions[func_name][module_name]['cycles'] = cycles
functions[func_name][module_name]['count'] = 0
add_cycles_to_function.prev_func_name = func_name
add_cycles_to_function.prev_module_name = module_name
add_cycles_to_function.prev_entry = functions[func_name][module_name]
return (func_name, module_name)
else:
# Check if we are still in the previous function
if (add_cycles_to_function.prev_entry is not None) and (add_cycles_to_function.prev_entry['start'] <= addr) and (addr < add_cycles_to_function.prev_entry['end']):
add_cycles_to_function.prev_entry['cycles'] += cycles
return (add_cycles_to_function.prev_func_name, add_cycles_to_function.prev_module_name)
# Generate the key for the given address
key = addr & ~0x0FFF
if key not in functions_addr.keys():
if 'Unknown' not in functions.keys():
functions['Unknown'] = {}
if 'Unknown' not in functions['Unknown'].keys():
functions['Unknown']['Unknown'] = {}
functions['Unknown']['Unknown']['cycles'] = 0
functions['Unknown']['Unknown']['count'] = 0
functions['Unknown']['Unknown']['cycles'] += cycles
add_cycles_to_function.prev_func_name = None
return None
for func_key, module in functions_addr[key].iteritems():
for module_key, module_value in module.iteritems():
if (module_value['start'] <= addr) and (addr < module_value['end']):
module_value['cycles'] += cycles
# In case o <Unknown> we prefer to fallback on the direct search
add_cycles_to_function.prev_func_name = func_key
add_cycles_to_function.prev_module_name = module_key
add_cycles_to_function.prev_entry = module_value
return (func_key, module_key)
print "Warning: Function %s @ 0x%x not found" % (func_name, addr)
add_cycles_to_function.prev_func_name = None
return None
# Static variables for the previous function
add_cycles_to_function.prev_func_name = None
add_cycles_to_function.prev_entry = None
def trace_read():
global trace_process
line = trace.readline()
trace_process += len(line)
return line
#
# Parse arguments
#
trace_name = None
symbols_file = None
opts,args = getopt.getopt(sys.argv[1:], "ht:vs:v", ["help","trace=","symbols="])
if (opts is None) or (not opts):
usage()
sys.exit()
for o,a in opts:
if o in ("-h","--help"):
usage()
sys.exit()
elif o in ("-t","--trace"):
trace_name = a
elif o in ("-s","--symbols"):
symbols_file = a
else:
assert False, "Unhandled option (%s)" % o
#
# We try first to see if we run the script from DS-5
#
try:
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
# Debugger object for accessing the debugger
debugger = Debugger()
# Initialisation commands
ec = debugger.getExecutionContext(0)
ec.getExecutionService().stop()
ec.getExecutionService().waitForStop()
# in case the execution context reference is out of date
ec = debugger.getExecutionContext(0)
#
# Get the module name and their memory range
#
info_file = ec.executeDSCommand("info file")
info_file_str = StringIO(info_file)
line = info_file_str.readline().strip('\n')
while line != '':
if ("Symbols from" in line):
# Get the module name from the line 'Symbols from "/home/...."'
module_name = line.split("\"")[1].split("/")[-1]
modules[module_name] = {}
# Look for the text section
line = info_file_str.readline().strip('\n')
while (line != '') and ("Symbols from" not in line):
if ("ER_RO" in line):
modules[module_name]['start'] = get_address_from_string(line.split()[0])
modules[module_name]['end'] = get_address_from_string(line.split()[2])
line = info_file_str.readline().strip('\n')
break;
if (".text" in line):
modules[module_name]['start'] = get_address_from_string(line.split()[0])
modules[module_name]['end'] = get_address_from_string(line.split()[2])
line = info_file_str.readline().strip('\n')
break;
line = info_file_str.readline().strip('\n')
line = info_file_str.readline().strip('\n')
#
# Get the function name and their memory range
#
info_func = ec.executeDSCommand("info func")
info_func_str = StringIO(info_func)
# Skip the first line 'Low-level symbols ...'
line = info_func_str.readline().strip('\n')
func_prev = None
while line != '':
# We ignore all the functions after 'Functions in'
if ("Functions in " in line):
line = info_func_str.readline().strip('\n')
while line != '':
line = info_func_str.readline().strip('\n')
line = info_func_str.readline().strip('\n')
continue
if ("Low-level symbols" in line):
# We need to fixup the last function of the module
if func_prev is not None:
func_prev['end'] = modules[module_name]['end']
func_prev = None
line = info_func_str.readline().strip('\n')
continue
func_name = line.split()[1]
func_start = get_address_from_string(line.split()[0])
module_name = get_module_from_addr(modules, func_start)
if func_name not in functions.keys():
functions[func_name] = {}
functions[func_name][module_name] = {}
functions[func_name][module_name]['start'] = func_start
functions[func_name][module_name]['cycles'] = 0
functions[func_name][module_name]['count'] = 0
# Set the end address of the previous function
if func_prev is not None:
func_prev['end'] = func_start
func_prev = functions[func_name][module_name]
line = info_func_str.readline().strip('\n')
# Fixup the last function
func_prev['end'] = modules[module_name]['end']
if symbols_file is not None:
pickle.dump((modules, functions), open(symbols_file, "w"))
except:
if symbols_file is None:
print "Error: Symbols file is required when run out of ARM DS-5"
sys.exit()
(modules, functions) = pickle.load(open(symbols_file, "r"))
#
# Build optimized table for the <Unknown> functions
#
functions_addr = {}
for func_key, module in functions.iteritems():
for module_key, module_value in module.iteritems():
key = module_value['start'] & ~0x0FFF
if key not in functions_addr.keys():
functions_addr[key] = {}
if func_key not in functions_addr[key].keys():
functions_addr[key][func_key] = {}
functions_addr[key][func_key][module_key] = module_value
#
# Process the trace file
#
if trace_name is None:
sys.exit()
trace = open(trace_name, "r")
trace_size = os.path.getsize(trace_name)
trace_process = 0
# Get the column names from the first line
columns = trace_read().split()
column_addr = columns.index('Address')
column_cycles = columns.index('Cycles')
column_function = columns.index('Function')
line = trace_read()
i = 0
prev_callee = None
while line:
try:
func_name = line.split('\t')[column_function].strip()
address = get_address_from_string(line.split('\t')[column_addr])
cycles = int(line.split('\t')[column_cycles])
callee = add_cycles_to_function(functions, func_name, address, cycles)
if (prev_callee != None) and (prev_callee != callee):
functions[prev_callee[0]][prev_callee[1]]['count'] += 1
prev_callee = callee
except ValueError:
pass
line = trace_read()
if ((i % 1000000) == 0) and (i != 0):
percent = (trace_process * 100.00) / trace_size
print "Processing file ... (%.2f %%)" % (percent)
i = i + 1
# Fixup the last callee
functions[prev_callee[0]][prev_callee[1]]['count'] += 1
#
# Process results
#
functions_cycles = {}
all_functions_cycles = {}
total_cycles = 0
for func_key, module in functions.iteritems():
for module_key, module_value in module.iteritems():
key = "%s/%s" % (module_key, func_key)
functions_cycles[key] = (module_value['cycles'], module_value['count'])
total_cycles += module_value['cycles']
if func_key not in all_functions_cycles.keys():
all_functions_cycles[func_key] = (module_value['cycles'], module_value['count'])
else:
all_functions_cycles[func_key] = tuple(map(sum, zip(all_functions_cycles[func_key], (module_value['cycles'], module_value['count']))))
sorted_functions_cycles = sorted(functions_cycles.iteritems(), key=operator.itemgetter(1), reverse = True)
sorted_all_functions_cycles = sorted(all_functions_cycles.items(), key=operator.itemgetter(1), reverse = True)
print
print "----"
for (key,value) in sorted_functions_cycles[:20]:
if value[0] != 0:
print "%s (cycles: %d - %d%%, count: %d)" % (key, value[0], (value[0] * 100) / total_cycles, value[1])
else:
break;
print "----"
for (key,value) in sorted_all_functions_cycles[:20]:
if value[0] != 0:
print "%s (cycles: %d - %d%%, count: %d)" % (key, value[0], (value[0] * 100) / total_cycles, value[1])
else:
break;
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/profile.py
|
#
# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from arm_ds.debugger_v1 import DebugException
import struct
import string
import edk2_debugger
class EfiFileSection(object):
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_IMAGE_DEBUG_TYPE_CODEVIEW = 0x2
SIZEOF_EFI_FFS_FILE_HEADER = 0x28
def __init__(self, ec, base):
self.base = base
self.ec = ec
def __str__(self):
return "FileSection(type:0x%X, size:0x%x)" % (self.get_type(), self.get_size())
def get_base(self):
return self.base
def get_type(self):
return struct.unpack("B", self.ec.getMemoryService().read(self.base + 0x3, 1, 8))[0]
def get_size(self):
return (struct.unpack("<I", self.ec.getMemoryService().read(self.base, 4, 32))[0] & 0x00ffffff)
def get_debug_filepath(self):
type = self.get_type()
if type == EfiFileSection.EFI_SECTION_TE:
section = EfiSectionTE(self, ec, self.base + 0x4)
elif type == EfiFileSection.EFI_SECTION_PE32:
section = EfiSectionPE32(self, ec, self.base + 0x4)
else:
raise Exception("EfiFileSection", "No debug section")
return section.get_debug_filepath()
class EfiSectionTE:
SIZEOF_EFI_TE_IMAGE_HEADER = 0x28
EFI_TE_IMAGE_SIGNATURE = ('V','Z')
def __init__(self, ec, base_te):
self.ec = ec
self.base_te = int(base_te)
te_sig = struct.unpack("cc", self.ec.getMemoryService().read(self.base_te, 2, 32))
if te_sig != EfiSectionTE.EFI_TE_IMAGE_SIGNATURE:
raise Exception("EfiFileSectionTE","TE Signature incorrect")
def get_debug_filepath(self):
stripped_size = struct.unpack("<H", self.ec.getMemoryService().read(self.base_te + 0x6, 2, 32))[0]
stripped_size -= EfiSectionTE.SIZEOF_EFI_TE_IMAGE_HEADER
debug_dir_entry_rva = self.ec.getMemoryService().readMemory32(self.base_te + 0x20)
if debug_dir_entry_rva == 0:
raise Exception("EfiFileSectionTE","No debug directory for image")
debug_dir_entry_rva -= stripped_size
debug_type = self.ec.getMemoryService().readMemory32(self.base_te + debug_dir_entry_rva + 0xC)
if (debug_type != 0xdf) and (debug_type != EfiFileSection.EFI_IMAGE_DEBUG_TYPE_CODEVIEW):
raise Exception("EfiFileSectionTE","Debug type is not dwarf")
debug_rva = self.ec.getMemoryService().readMemory32(self.base_te + debug_dir_entry_rva + 0x14)
debug_rva -= stripped_size
dwarf_sig = struct.unpack("cccc", self.ec.getMemoryService().read(self.base_te + debug_rva, 4, 32))
if (dwarf_sig != 0x66727764) and (dwarf_sig != FirmwareFile.CONST_NB10_SIGNATURE):
raise Exception("EfiFileSectionTE","Dwarf debug signature not found")
if dwarf_sig == 0x66727764:
filename = self.base_te + debug_rva + 0xc
else:
filename = self.base_te + debug_rva + 0x10
filename = struct.unpack("400s", self.ec.getMemoryService().read(filename, 400, 32))[0]
return filename[0:string.find(filename,'\0')]
def get_debug_elfbase(self):
stripped_size = struct.unpack("<H", self.ec.getMemoryService().read(self.base_te + 0x6, 2, 32))[0]
stripped_size -= EfiSectionTE.SIZEOF_EFI_TE_IMAGE_HEADER
return self.base_te - stripped_size
class EfiSectionPE32:
def __init__(self, ec, base_pe32):
self.ec = ec
self.base_pe32 = base_pe32
def get_debug_filepath(self):
# Offset from dos hdr to PE file hdr
file_header_offset = self.ec.getMemoryService().readMemory32(self.base_pe32 + 0x3C)
# Offset to debug dir in PE hdrs
debug_dir_entry_rva = self.ec.getMemoryService().readMemory32(self.base_pe32 + file_header_offset + 0xA8)
if debug_dir_entry_rva == 0:
raise Exception("EfiFileSectionPE32","No Debug Directory")
debug_type = self.ec.getMemoryService().readMemory32(self.base_pe32 + debug_dir_entry_rva + 0xC)
if (debug_type != 0xdf) and (debug_type != EfiFileSection.EFI_IMAGE_DEBUG_TYPE_CODEVIEW):
raise Exception("EfiFileSectionPE32","Debug type is not dwarf")
debug_rva = self.ec.getMemoryService().readMemory32(self.base_pe32 + debug_dir_entry_rva + 0x14)
dwarf_sig = struct.unpack("cccc", self.ec.getMemoryService().read(str(self.base_pe32 + debug_rva), 4, 32))
if (dwarf_sig != 0x66727764) and (dwarf_sig != FirmwareFile.CONST_NB10_SIGNATURE):
raise Exception("EfiFileSectionPE32","Dwarf debug signature not found")
if dwarf_sig == 0x66727764:
filename = self.base_pe32 + debug_rva + 0xc
else:
filename = self.base_pe32 + debug_rva + 0x10
filename = struct.unpack("400s", self.ec.getMemoryService().read(str(filename), 400, 32))[0]
return filename[0:string.find(filename,'\0')]
def get_debug_elfbase(self):
return self.base_pe32
class EfiSectionPE64:
def __init__(self, ec, base_pe64):
self.ec = ec
self.base_pe64 = base_pe64
def get_debug_filepath(self):
# Offset from dos hdr to PE file hdr (EFI_IMAGE_NT_HEADERS64)
file_header_offset = self.ec.getMemoryService().readMemory32(self.base_pe64 + 0x3C)
# Offset to debug dir in PE hdrs
debug_dir_entry_rva = self.ec.getMemoryService().readMemory32(self.base_pe64 + file_header_offset + 0xB8)
if debug_dir_entry_rva == 0:
raise Exception("EfiFileSectionPE64","No Debug Directory")
debug_type = self.ec.getMemoryService().readMemory32(self.base_pe64 + debug_dir_entry_rva + 0xC)
if (debug_type != 0xdf) and (debug_type != EfiFileSection.EFI_IMAGE_DEBUG_TYPE_CODEVIEW):
raise Exception("EfiFileSectionPE64","Debug type is not dwarf")
debug_rva = self.ec.getMemoryService().readMemory32(self.base_pe64 + debug_dir_entry_rva + 0x14)
dwarf_sig = struct.unpack("cccc", self.ec.getMemoryService().read(str(self.base_pe64 + debug_rva), 4, 32))
if (dwarf_sig != 0x66727764) and (dwarf_sig != FirmwareFile.CONST_NB10_SIGNATURE):
raise Exception("EfiFileSectionPE64","Dwarf debug signature not found")
if dwarf_sig == 0x66727764:
filename = self.base_pe64 + debug_rva + 0xc
else:
filename = self.base_pe64 + debug_rva + 0x10
filename = struct.unpack("400s", self.ec.getMemoryService().read(str(filename), 400, 32))[0]
return filename[0:string.find(filename,'\0')]
def get_debug_elfbase(self):
return self.base_pe64
class FirmwareFile:
EFI_FV_FILETYPE_RAW = 0x01
EFI_FV_FILETYPE_FREEFORM = 0x02
EFI_FV_FILETYPE_SECURITY_CORE = 0x03
EFI_FV_FILETYPE_PEI_CORE = 0x04
EFI_FV_FILETYPE_DXE_CORE = 0x05
EFI_FV_FILETYPE_PEIM = 0x06
EFI_FV_FILETYPE_DRIVER = 0x07
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x08
EFI_FV_FILETYPE_APPLICATION = 0x09
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0x0B
EFI_FV_FILETYPE_FFS_MIN = 0xF0
CONST_NB10_SIGNATURE = ('N','B','1','0')
def __init__(self, fv, base, ec):
self.fv = fv
self.base = base
self.ec = ec
def __str__(self):
return "FFS(state:0x%x, type:0x%X, size:0x%x)" % (self.get_state(), self.get_type(), self.get_size())
def get_base(self):
return self.base
def get_size(self):
size = (self.ec.getMemoryService().readMemory32(self.base + 0x14) & 0x00ffffff)
# Occupied size is the size considering the alignment
return size + ((0x8 - (size & 0x7)) & 0x7)
def get_type(self):
return self.ec.getMemoryService().readMemory8(self.base + 0x12)
def get_state(self):
state = self.ec.getMemoryService().readMemory8(self.base + 0x17)
polarity = self.fv.get_polarity()
if polarity:
state = ~state
highest_bit = 0x80;
while (highest_bit != 0) and ((highest_bit & state) == 0):
highest_bit >>= 1
return highest_bit
def get_next_section(self, section=None):
if section == None:
if self.get_type() != FirmwareFile.EFI_FV_FILETYPE_FFS_MIN:
section_base = self.get_base() + 0x18;
else:
return None
else:
section_base = int(section.get_base() + section.get_size())
# Align to next 4 byte boundary
if (section_base & 0x3) != 0:
section_base = section_base + 0x4 - (section_base & 0x3)
if section_base < self.get_base() + self.get_size():
return EfiFileSection(self.ec, section_base)
else:
return None
class FirmwareVolume:
CONST_FV_SIGNATURE = ('_','F','V','H')
EFI_FVB2_ERASE_POLARITY = 0x800
DebugInfos = []
def __init__(self, ec, fv_base, fv_size):
self.ec = ec
self.fv_base = fv_base
self.fv_size = fv_size
try:
signature = struct.unpack("cccc", self.ec.getMemoryService().read(fv_base + 0x28, 4, 32))
except DebugException:
raise Exception("FirmwareVolume", "Not possible to access the defined firmware volume at [0x%X,0x%X]. Could be the used build report does not correspond to your current debugging context." % (int(fv_base),int(fv_base+fv_size)))
if signature != FirmwareVolume.CONST_FV_SIGNATURE:
raise Exception("FirmwareVolume", "This is not a valid firmware volume")
def get_size(self):
return self.ec.getMemoryService().readMemory32(self.fv_base + 0x20)
def get_attributes(self):
return self.ec.getMemoryService().readMemory32(self.fv_base + 0x2C)
def get_polarity(self):
attributes = self.get_attributes()
if attributes & FirmwareVolume.EFI_FVB2_ERASE_POLARITY:
return 1
else:
return 0
def get_next_ffs(self, ffs=None):
if ffs == None:
# Get the offset of the first FFS file from the FV header
ffs_base = self.fv_base + self.ec.getMemoryService().readMemory16(self.fv_base + 0x30)
else:
# Goto the next FFS file
ffs_base = int(ffs.get_base() + ffs.get_size())
# Align to next 8 byte boundary
if (ffs_base & 0x7) != 0:
ffs_base = ffs_base + 0x8 - (ffs_base & 0x7)
if ffs_base < self.fv_base + self.get_size():
return FirmwareFile(self, ffs_base, self.ec)
else:
return None
def get_debug_info(self):
self.DebugInfos = []
ffs = self.get_next_ffs()
while ffs != None:
section = ffs.get_next_section()
while section != None:
type = section.get_type()
if (type == EfiFileSection.EFI_SECTION_TE) or (type == EfiFileSection.EFI_SECTION_PE32):
self.DebugInfos.append((section.get_base(), section.get_size(), section.get_type()))
section = ffs.get_next_section(section)
ffs = self.get_next_ffs(ffs)
def load_symbols_at(self, addr, verbose = False):
if self.DebugInfos == []:
self.get_debug_info()
for debug_info in self.DebugInfos:
if (addr >= debug_info[0]) and (addr < debug_info[0] + debug_info[1]):
if debug_info[2] == EfiFileSection.EFI_SECTION_TE:
section = EfiSectionTE(self.ec, debug_info[0] + 0x4)
elif debug_info[2] == EfiFileSection.EFI_SECTION_PE32:
section = EfiSectionPE32(self.ec, debug_info[0] + 0x4)
else:
raise Exception('FirmwareVolume','Section Type not supported')
try:
edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose)
except Exception, (ErrorClass, ErrorMessage):
if verbose:
print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage)
return debug_info
def load_all_symbols(self, verbose = False):
if self.DebugInfos == []:
self.get_debug_info()
for debug_info in self.DebugInfos:
if debug_info[2] == EfiFileSection.EFI_SECTION_TE:
section = EfiSectionTE(self.ec, debug_info[0] + 0x4)
elif debug_info[2] == EfiFileSection.EFI_SECTION_PE32:
section = EfiSectionPE32(self.ec, debug_info[0] + 0x4)
else:
continue
try:
edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose)
except Exception, (ErrorClass, ErrorMessage):
if verbose:
print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/firmware_volume.py
|
#
# Copyright (c) 2011-2012, ARM Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import re
class BuildReport:
PCDs = {}
def parse_platform_summary(self, file):
pass
def parse_pcd_report(self, report_file):
pcd_reg = re.compile(" (\*P|\*F|\*M| ) (\w+)(\ +)\: (.*) \((\w+)\) = (.*)\n")
for line in report_file.xreadlines():
stripped_line = line.strip()
if re.match("\<=+\>", stripped_line):
return
elif re.match("g.*Guid", stripped_line):
guid = stripped_line
self.PCDs[guid] = {}
else:
m = pcd_reg.match(line)
if m:
self.PCDs[guid][m.group(2)] = (m.group(6).strip(),m.group(5))
def parse_firmware_device(self, file):
pass
def parse_module_summary(self, file):
#print "Module Summary"
pass
CONST_SECTION_HEADERS = [('Platform Summary', parse_platform_summary),
('Platform Configuration Database Report',parse_pcd_report),
('Firmware Device (FD)',parse_firmware_device),
('Module Summary',parse_module_summary)]
def __init__(self, filename = 'report.log'):
report_file = open(filename, 'r')
for line in report_file.xreadlines():
for section_header in BuildReport.CONST_SECTION_HEADERS:
if line.strip() == section_header[0]:
section_header[1](self, report_file)
#print self.PCDs
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/build_report.py
|
#
# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from arm_ds.debugger_v1 import DebugException
import struct
import edk2_debugger
import firmware_volume
class DebugInfoTable:
CONST_DEBUG_INFO_TABLE_GUID = ( 0x49152E77L, 0x47641ADAL, 0xFE7AA2B7L, 0x8B5ED9FEL)
DebugInfos = []
def __init__(self, ec, debug_info_table_header_offset):
self.ec = ec
self.base = debug_info_table_header_offset
def get_debug_info(self):
# Get the information from EFI_DEBUG_IMAGE_INFO_TABLE_HEADER
count = self.ec.getMemoryService().readMemory32(self.base + 0x4)
if edk2_debugger.is_aarch64(self.ec):
debug_info_table_base = self.ec.getMemoryService().readMemory64(self.base + 0x8)
else:
debug_info_table_base = self.ec.getMemoryService().readMemory32(self.base + 0x8)
self.DebugInfos = []
for i in range(0, count):
# Get the address of the structure EFI_DEBUG_IMAGE_INFO
if edk2_debugger.is_aarch64(self.ec):
debug_info = self.ec.getMemoryService().readMemory64(debug_info_table_base + (i * 8))
else:
debug_info = self.ec.getMemoryService().readMemory32(debug_info_table_base + (i * 4))
if debug_info:
debug_info_type = self.ec.getMemoryService().readMemory32(debug_info)
# Normal Debug Info Type
if debug_info_type == 1:
if edk2_debugger.is_aarch64(self.ec):
# Get the base address of the structure EFI_LOADED_IMAGE_PROTOCOL
loaded_image_protocol = self.ec.getMemoryService().readMemory64(debug_info + 0x8)
image_base = self.ec.getMemoryService().readMemory64(loaded_image_protocol + 0x40)
image_size = self.ec.getMemoryService().readMemory32(loaded_image_protocol + 0x48)
else:
# Get the base address of the structure EFI_LOADED_IMAGE_PROTOCOL
loaded_image_protocol = self.ec.getMemoryService().readMemory32(debug_info + 0x4)
image_base = self.ec.getMemoryService().readMemory32(loaded_image_protocol + 0x20)
image_size = self.ec.getMemoryService().readMemory32(loaded_image_protocol + 0x28)
self.DebugInfos.append((image_base,image_size))
# Return (base, size)
def load_symbols_at(self, addr, verbose = False):
if self.DebugInfos == []:
self.get_debug_info()
found = False
for debug_info in self.DebugInfos:
if (addr >= debug_info[0]) and (addr < debug_info[0] + debug_info[1]):
if edk2_debugger.is_aarch64(self.ec):
section = firmware_volume.EfiSectionPE64(self.ec, debug_info[0])
else:
section = firmware_volume.EfiSectionPE32(self.ec, debug_info[0])
try:
edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose)
except Exception, (ErrorClass, ErrorMessage):
if verbose:
print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage)
found = True
return debug_info
if found == False:
raise Exception('DebugInfoTable','No symbol found at 0x%x' % addr)
def load_all_symbols(self, verbose = False):
if self.DebugInfos == []:
self.get_debug_info()
for debug_info in self.DebugInfos:
if edk2_debugger.is_aarch64(self.ec):
section = firmware_volume.EfiSectionPE64(self.ec, debug_info[0])
else:
section = firmware_volume.EfiSectionPE32(self.ec, debug_info[0])
try:
edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose)
except Exception, (ErrorClass, ErrorMessage):
if verbose:
print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage)
def dump(self):
self.get_debug_info()
for debug_info in self.DebugInfos:
base_pe32 = debug_info[0]
if edk2_debugger.is_aarch64(self.ec):
section = firmware_volume.EfiSectionPE64(self.ec, base_pe32)
else:
section = firmware_volume.EfiSectionPE32(self.ec, base_pe32)
print section.get_debug_filepath()
class SystemTable:
CONST_ST_SIGNATURE = ('I','B','I',' ','S','Y','S','T')
def __init__(self, ec, membase, memsize):
self.membase = membase
self.memsize = memsize
self.ec = ec
found = False
# Start from the top of the memory
offset = self.membase + self.memsize
# Align to highest 4MB boundary
offset = offset & ~0x3FFFFF
# We should not have a System Table at the top of the System Memory
offset = offset - 0x400000
# Start at top and look on 4MB boundaries for system table ptr structure
while offset > self.membase:
try:
signature = struct.unpack("cccccccc", self.ec.getMemoryService().read(str(offset), 8, 32))
except DebugException:
raise Exception('SystemTable','Fail to access System Memory. Ensure all the memory in the region [0x%x;0x%X] is accessible.' % (membase,membase+memsize))
if signature == SystemTable.CONST_ST_SIGNATURE:
found = True
if edk2_debugger.is_aarch64(self.ec):
self.system_table_base = self.ec.getMemoryService().readMemory64(offset + 0x8)
else:
self.system_table_base = self.ec.getMemoryService().readMemory32(offset + 0x8)
break
offset = offset - 0x400000
if not found:
raise Exception('SystemTable','System Table not found in System Memory [0x%x;0x%X]' % (membase,membase+memsize))
def get_configuration_table(self, conf_table_guid):
if edk2_debugger.is_aarch64(self.ec):
# Number of configuration Table entry
conf_table_entry_count = self.ec.getMemoryService().readMemory32(self.system_table_base + 0x68)
# Get location of the Configuration Table entries
conf_table_offset = self.ec.getMemoryService().readMemory64(self.system_table_base + 0x70)
else:
# Number of configuration Table entry
conf_table_entry_count = self.ec.getMemoryService().readMemory32(self.system_table_base + 0x40)
# Get location of the Configuration Table entries
conf_table_offset = self.ec.getMemoryService().readMemory32(self.system_table_base + 0x44)
for i in range(0, conf_table_entry_count):
if edk2_debugger.is_aarch64(self.ec):
offset = conf_table_offset + (i * 0x18)
else:
offset = conf_table_offset + (i * 0x14)
guid = struct.unpack("<IIII", self.ec.getMemoryService().read(str(offset), 16, 32))
if guid == conf_table_guid:
if edk2_debugger.is_aarch64(self.ec):
return self.ec.getMemoryService().readMemory64(offset + 0x10)
else:
return self.ec.getMemoryService().readMemory32(offset + 0x10)
raise Exception('SystemTable','Configuration Table not found')
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/ArmPlatformPkg/Scripts/Ds5/system_table.py
|
## @file
# This file contains the script to build UniversalPayload
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import argparse
import subprocess
import os
import shutil
import sys
from ctypes import *
sys.dont_write_bytecode = True
class UPLD_INFO_HEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Identifier', ARRAY(c_char, 4)),
('HeaderLength', c_uint32),
('SpecRevision', c_uint16),
('Reserved', c_uint16),
('Revision', c_uint32),
('Attribute', c_uint32),
('Capability', c_uint32),
('ProducerId', ARRAY(c_char, 16)),
('ImageId', ARRAY(c_char, 16)),
]
def __init__(self):
self.Identifier = b'PLDH'
self.HeaderLength = sizeof(UPLD_INFO_HEADER)
self.SpecRevision = 0x0009
self.Revision = 0x0000010105
self.ImageId = b'UEFI'
self.ProducerId = b'INTEL'
def RunCommand(cmd):
print(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,cwd=os.environ['WORKSPACE'])
while True:
line = p.stdout.readline()
if not line:
break
print(line.strip().decode(errors='ignore'))
p.communicate()
if p.returncode != 0:
print("- Failed - error happened when run command: %s"%cmd)
raise Exception("ERROR: when run command: %s"%cmd)
def BuildUniversalPayload(Args, MacroList):
BuildTarget = Args.Target
ToolChain = Args.ToolChain
Quiet = "--quiet" if Args.Quiet else ""
ElfToolChain = 'CLANGDWARF'
BuildDir = os.path.join(os.environ['WORKSPACE'], os.path.normpath("Build/UefiPayloadPkgX64"))
if Args.Arch == 'X64':
BuildArch = "X64"
ObjCopyFlag = "elf64-x86-64"
EntryOutputDir = os.path.join(BuildDir, "{}_{}".format (BuildTarget, ElfToolChain), os.path.normpath("X64/UefiPayloadPkg/UefiPayloadEntry/UniversalPayloadEntry/DEBUG/UniversalPayloadEntry.dll"))
else:
BuildArch = "IA32 -a X64"
ObjCopyFlag = "elf32-i386"
EntryOutputDir = os.path.join(BuildDir, "{}_{}".format (BuildTarget, ElfToolChain), os.path.normpath("IA32/UefiPayloadPkg/UefiPayloadEntry/UniversalPayloadEntry/DEBUG/UniversalPayloadEntry.dll"))
EntryModuleInf = os.path.normpath("UefiPayloadPkg/UefiPayloadEntry/UniversalPayloadEntry.inf")
DscPath = os.path.normpath("UefiPayloadPkg/UefiPayloadPkg.dsc")
DxeFvOutputDir = os.path.join(BuildDir, "{}_{}".format (BuildTarget, ToolChain), os.path.normpath("FV/DXEFV.Fv"))
BdsFvOutputDir = os.path.join(BuildDir, "{}_{}".format (BuildTarget, ToolChain), os.path.normpath("FV/BDSFV.Fv"))
PayloadReportPath = os.path.join(BuildDir, "UefiUniversalPayload.txt")
ModuleReportPath = os.path.join(BuildDir, "UefiUniversalPayloadEntry.txt")
UpldInfoFile = os.path.join(BuildDir, "UniversalPayloadInfo.bin")
if "CLANG_BIN" in os.environ:
LlvmObjcopyPath = os.path.join(os.environ["CLANG_BIN"], "llvm-objcopy")
else:
LlvmObjcopyPath = "llvm-objcopy"
try:
RunCommand('"%s" --version'%LlvmObjcopyPath)
except:
print("- Failed - Please check if LLVM is installed or if CLANG_BIN is set correctly")
sys.exit(1)
Pcds = ""
if (Args.pcd != None):
for PcdItem in Args.pcd:
Pcds += " --pcd {}".format (PcdItem)
Defines = ""
for key in MacroList:
Defines +=" -D {0}={1}".format(key, MacroList[key])
#
# Building DXE core and DXE drivers as DXEFV.
#
BuildPayload = "build -p {} -b {} -a X64 -t {} -y {} {}".format (DscPath, BuildTarget, ToolChain, PayloadReportPath, Quiet)
BuildPayload += Pcds
BuildPayload += Defines
RunCommand(BuildPayload)
#
# Building Universal Payload entry.
#
BuildModule = "build -p {} -b {} -a {} -m {} -t {} -y {} {}".format (DscPath, BuildTarget, BuildArch, EntryModuleInf, ElfToolChain, ModuleReportPath, Quiet)
BuildModule += Pcds
BuildModule += Defines
RunCommand(BuildModule)
#
# Buid Universal Payload Information Section ".upld_info"
#
upld_info_hdr = UPLD_INFO_HEADER()
upld_info_hdr.ImageId = Args.ImageId.encode()[:16]
upld_info_hdr.Attribute |= 1 if BuildTarget == "DEBUG" else 0
fp = open(UpldInfoFile, 'wb')
fp.write(bytearray(upld_info_hdr))
fp.close()
#
# Copy the DXEFV as a section in elf format Universal Payload entry.
#
remove_section = '"{}" -I {} -O {} --remove-section .upld_info --remove-section .upld.uefi_fv --remove-section .upld.bds_fv {}'.format (
LlvmObjcopyPath,
ObjCopyFlag,
ObjCopyFlag,
EntryOutputDir
)
add_section = '"{}" -I {} -O {} --add-section .upld_info={} --add-section .upld.uefi_fv={} --add-section .upld.bds_fv={} {}'.format (
LlvmObjcopyPath,
ObjCopyFlag,
ObjCopyFlag,
UpldInfoFile,
DxeFvOutputDir,
BdsFvOutputDir,
EntryOutputDir
)
set_section = '"{}" -I {} -O {} --set-section-alignment .upld_info=4 --set-section-alignment .upld.uefi_fv=16 --set-section-alignment .upld.bds_fv=16 {}'.format (
LlvmObjcopyPath,
ObjCopyFlag,
ObjCopyFlag,
EntryOutputDir
)
RunCommand(remove_section)
RunCommand(add_section)
RunCommand(set_section)
shutil.copy (EntryOutputDir, os.path.join(BuildDir, 'UniversalPayload.elf'))
def main():
parser = argparse.ArgumentParser(description='For building Universal Payload')
parser.add_argument('-t', '--ToolChain')
parser.add_argument('-b', '--Target', default='DEBUG')
parser.add_argument('-a', '--Arch', choices=['IA32', 'X64'], help='Specify the ARCH for payload entry module. Default build X64 image.', default ='X64')
parser.add_argument("-D", "--Macro", action="append", default=["UNIVERSAL_PAYLOAD=TRUE"])
parser.add_argument('-i', '--ImageId', type=str, help='Specify payload ID (16 bytes maximal).', default ='UEFI')
parser.add_argument('-q', '--Quiet', action='store_true', help='Disable all build messages except FATAL ERRORS.')
parser.add_argument("-p", "--pcd", action="append")
MacroList = {}
args = parser.parse_args()
if args.Macro is not None:
for Argument in args.Macro:
if Argument.count('=') != 1:
print("Unknown variable passed in: %s"%Argument)
raise Exception("ERROR: Unknown variable passed in: %s"%Argument)
tokens = Argument.strip().split('=')
MacroList[tokens[0].upper()] = tokens[1]
BuildUniversalPayload(args, MacroList)
print ("Successfully build Universal Payload")
if __name__ == '__main__':
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/UefiPayloadPkg/UniversalPayloadBuild.py
|
# -*- coding: utf-8 -*-
#
# Jansson documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 5 21:47:20 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['refcounting']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jansson'
copyright = u'2009-2020, Petri Lehtinen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.13.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'c:func'
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Janssondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jansson.tex', u'Jansson Documentation',
u'Petri Lehtinen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jansson', u'Jansson Documentation',
[u'Petri Lehtinen'], 1)
]
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/RedfishPkg/Library/JsonLib/jansson/doc/conf.py
|
"""
refcounting
~~~~~~~~~~~
Reference count annotations for C API functions. Has the same
result as the sphinx.ext.refcounting extension but works for all
functions regardless of the signature, and the reference counting
information is written inline with the documentation instead of a
separate file.
Adds a new directive "refcounting". The directive has no content
and one required positional parameter:: "new" or "borrow".
Example:
.. cfunction:: json_t *json_object(void)
.. refcounting:: new
<description of the json_object function>
:copyright: Copyright (c) 2009-2016 Petri Lehtinen <petri@digip.org>
:license: MIT, see LICENSE for details.
"""
from docutils import nodes
class refcounting(nodes.emphasis): pass
def visit(self, node):
self.visit_emphasis(node)
def depart(self, node):
self.depart_emphasis(node)
def html_visit(self, node):
self.body.append(self.starttag(node, 'em', '', CLASS='refcount'))
def html_depart(self, node):
self.body.append('</em>')
def refcounting_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if arguments[0] == 'borrow':
text = 'Return value: Borrowed reference.'
elif arguments[0] == 'new':
text = 'Return value: New reference.'
else:
raise Error('Valid arguments: new, borrow')
return [refcounting(text, text)]
def setup(app):
app.add_node(refcounting,
html=(html_visit, html_depart),
latex=(visit, depart),
text=(visit, depart),
man=(visit, depart))
app.add_directive('refcounting', refcounting_directive, 0, (1, 0, 0))
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/RedfishPkg/Library/JsonLib/jansson/doc/ext/refcounting.py
|
#!/usr/bin/python
#
# Copyright 2014 Apple Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import lldb
import os
import uuid
import string
import optparse
import shlex
guid_dict = {}
def EFI_GUID_TypeSummary (valobj,internal_dict):
""" Type summary for EFI GUID, print C Name if known
"""
# typedef struct {
# UINT32 Data1;
# UINT16 Data2;
# UINT16 Data3;
# UINT8 Data4[8];
# } EFI_GUID;
SBError = lldb.SBError()
data1_val = valobj.GetChildMemberWithName('Data1')
data1 = data1_val.GetValueAsUnsigned(0)
data2_val = valobj.GetChildMemberWithName('Data2')
data2 = data2_val.GetValueAsUnsigned(0)
data3_val = valobj.GetChildMemberWithName('Data3')
data3 = data3_val.GetValueAsUnsigned(0)
str = "%x-%x-%x-" % (data1, data2, data3)
data4_val = valobj.GetChildMemberWithName('Data4')
for i in range (data4_val.num_children):
if i == 2:
str +='-'
str += "%02x" % data4_val.GetChildAtIndex(i).data.GetUnsignedInt8(SBError, 0)
return guid_dict.get (str.upper(), '')
EFI_STATUS_Dict = {
(0x8000000000000000 | 1): "Load Error",
(0x8000000000000000 | 2): "Invalid Parameter",
(0x8000000000000000 | 3): "Unsupported",
(0x8000000000000000 | 4): "Bad Buffer Size",
(0x8000000000000000 | 5): "Buffer Too Small",
(0x8000000000000000 | 6): "Not Ready",
(0x8000000000000000 | 7): "Device Error",
(0x8000000000000000 | 8): "Write Protected",
(0x8000000000000000 | 9): "Out of Resources",
(0x8000000000000000 | 10): "Volume Corrupt",
(0x8000000000000000 | 11): "Volume Full",
(0x8000000000000000 | 12): "No Media",
(0x8000000000000000 | 13): "Media changed",
(0x8000000000000000 | 14): "Not Found",
(0x8000000000000000 | 15): "Access Denied",
(0x8000000000000000 | 16): "No Response",
(0x8000000000000000 | 17): "No mapping",
(0x8000000000000000 | 18): "Time out",
(0x8000000000000000 | 19): "Not started",
(0x8000000000000000 | 20): "Already started",
(0x8000000000000000 | 21): "Aborted",
(0x8000000000000000 | 22): "ICMP Error",
(0x8000000000000000 | 23): "TFTP Error",
(0x8000000000000000 | 24): "Protocol Error",
0 : "Success",
1 : "Warning Unknown Glyph",
2 : "Warning Delete Failure",
3 : "Warning Write Failure",
4 : "Warning Buffer Too Small",
(0x80000000 | 1): "Load Error",
(0x80000000 | 2): "Invalid Parameter",
(0x80000000 | 3): "Unsupported",
(0x80000000 | 4): "Bad Buffer Size",
(0x80000000 | 5): "Buffer Too Small",
(0x80000000 | 6): "Not Ready",
(0x80000000 | 7): "Device Error",
(0x80000000 | 8): "Write Protected",
(0x80000000 | 9): "Out of Resources",
(0x80000000 | 10): "Volume Corrupt",
(0x80000000 | 11): "Volume Full",
(0x80000000 | 12): "No Media",
(0x80000000 | 13): "Media changed",
(0x80000000 | 14): "Not Found",
(0x80000000 | 15): "Access Denied",
(0x80000000 | 16): "No Response",
(0x80000000 | 17): "No mapping",
(0x80000000 | 18): "Time out",
(0x80000000 | 19): "Not started",
(0x80000000 | 20): "Already started",
(0x80000000 | 21): "Aborted",
(0x80000000 | 22): "ICMP Error",
(0x80000000 | 23): "TFTP Error",
(0x80000000 | 24): "Protocol Error",
}
def EFI_STATUS_TypeSummary (valobj,internal_dict):
#
# Return summary string for EFI_STATUS from dictionary
#
Status = valobj.GetValueAsUnsigned(0)
return EFI_STATUS_Dict.get (Status, '')
def EFI_TPL_TypeSummary (valobj,internal_dict):
#
# Return TPL values
#
if valobj.TypeIsPointerType():
return ""
Tpl = valobj.GetValueAsUnsigned(0)
if Tpl < 4:
Str = "%d" % Tpl
elif Tpl == 6:
Str = "TPL_DRIVER (Obsolete Concept in edk2)"
elif Tpl < 8:
Str = "TPL_APPLICATION"
if Tpl - 4 > 0:
Str += " + " + "%d" % (Tpl - 4)
elif Tpl < 16:
Str = "TPL_CALLBACK"
if Tpl - 8 > 0:
Str += " + " + "%d" % (Tpl - 4)
elif Tpl < 31:
Str = "TPL_NOTIFY"
if Tpl - 16 > 0:
Str += " + " + "%d" % (Tpl - 4)
elif Tpl == 31:
Str = "TPL_HIGH_LEVEL"
else:
Str = "Invalid TPL"
return Str
def CHAR16_TypeSummary (valobj,internal_dict):
#
# Display EFI CHAR16 'unsigned short' as string
#
SBError = lldb.SBError()
Str = ''
if valobj.TypeIsPointerType():
if valobj.GetValueAsUnsigned () == 0:
return "NULL"
# CHAR16 * max string size 1024
for i in range (1024):
Char = valobj.GetPointeeData(i,1).GetUnsignedInt16(SBError, 0)
if SBError.fail or Char == 0:
break
Str += unichr (Char)
Str = 'L"' + Str + '"'
return Str.encode ('utf-8', 'replace')
if valobj.num_children == 0:
# CHAR16
if chr (valobj.unsigned) in string.printable:
Str = "L'" + unichr (valobj.unsigned) + "'"
return Str.encode ('utf-8', 'replace')
else:
# CHAR16 []
for i in range (valobj.num_children):
Char = valobj.GetChildAtIndex(i).data.GetUnsignedInt16(SBError, 0)
if Char == 0:
break
Str += unichr (Char)
Str = 'L"' + Str + '"'
return Str.encode ('utf-8', 'replace')
return Str
def CHAR8_TypeSummary (valobj,internal_dict):
#
# Display EFI CHAR8 'signed char' as string
# unichr() is used as a junk string can produce an error message like this:
# UnicodeEncodeError: 'ascii' codec can't encode character u'\x90' in position 1: ordinal not in range(128)
#
SBError = lldb.SBError()
Str = ''
if valobj.TypeIsPointerType():
if valobj.GetValueAsUnsigned () == 0:
return "NULL"
# CHAR8 * max string size 1024
for i in range (1024):
Char = valobj.GetPointeeData(i,1).GetUnsignedInt8(SBError, 0)
if SBError.fail or Char == 0:
break
Str += unichr (Char)
Str = '"' + Str + '"'
return Str.encode ('utf-8', 'replace')
if valobj.num_children == 0:
# CHAR8
if chr (valobj.unsigned) in string.printable:
Str = '"' + unichr (valobj.unsigned) + '"'
return Str.encode ('utf-8', 'replace')
else:
# CHAR8 []
for i in range (valobj.num_children):
Char = valobj.GetChildAtIndex(i).data.GetUnsignedInt8(SBError, 0)
if Char == 0:
break
Str += unichr (Char)
Str = '"' + Str + '"'
return Str.encode ('utf-8', 'replace')
return Str
device_path_dict = {
(0x01, 0x01): "PCI_DEVICE_PATH",
(0x01, 0x02): "PCCARD_DEVICE_PATH",
(0x01, 0x03): "MEMMAP_DEVICE_PATH",
(0x01, 0x04): "VENDOR_DEVICE_PATH",
(0x01, 0x05): "CONTROLLER_DEVICE_PATH",
(0x02, 0x01): "ACPI_HID_DEVICE_PATH",
(0x02, 0x02): "ACPI_EXTENDED_HID_DEVICE_PATH",
(0x02, 0x03): "ACPI_ADR_DEVICE_PATH",
(0x03, 0x01): "ATAPI_DEVICE_PATH",
(0x03, 0x12): "SATA_DEVICE_PATH",
(0x03, 0x02): "SCSI_DEVICE_PATH",
(0x03, 0x03): "FIBRECHANNEL_DEVICE_PATH",
(0x03, 0x04): "F1394_DEVICE_PATH",
(0x03, 0x05): "USB_DEVICE_PATH",
(0x03, 0x0f): "USB_CLASS_DEVICE_PATH",
(0x03, 0x10): "FW_SBP2_UNIT_LUN_DEVICE_PATH",
(0x03, 0x11): "DEVICE_LOGICAL_UNIT_DEVICE_PATH",
(0x03, 0x06): "I2O_DEVICE_PATH",
(0x03, 0x0b): "MAC_ADDR_DEVICE_PATH",
(0x03, 0x0c): "IPv4_DEVICE_PATH",
(0x03, 0x09): "INFINIBAND_DEVICE_PATH",
(0x03, 0x0e): "UART_DEVICE_PATH",
(0x03, 0x0a): "VENDOR_DEVICE_PATH",
(0x03, 0x13): "ISCSI_DEVICE_PATH",
(0x04, 0x01): "HARDDRIVE_DEVICE_PATH",
(0x04, 0x02): "CDROM_DEVICE_PATH",
(0x04, 0x03): "VENDOR_DEVICE_PATH",
(0x04, 0x04): "FILEPATH_DEVICE_PATH",
(0x04, 0x05): "MEDIA_PROTOCOL_DEVICE_PATH",
(0x05, 0x01): "BBS_BBS_DEVICE_PATH",
(0x7F, 0xFF): "EFI_DEVICE_PATH_PROTOCOL",
(0xFF, 0xFF): "EFI_DEVICE_PATH_PROTOCOL",
}
def EFI_DEVICE_PATH_PROTOCOL_TypeSummary (valobj,internal_dict):
#
#
#
if valobj.TypeIsPointerType():
# EFI_DEVICE_PATH_PROTOCOL *
return ""
Str = ""
if valobj.num_children == 3:
# EFI_DEVICE_PATH_PROTOCOL
Type = valobj.GetChildMemberWithName('Type').unsigned
SubType = valobj.GetChildMemberWithName('SubType').unsigned
if (Type, SubType) in device_path_dict:
TypeStr = device_path_dict[Type, SubType]
else:
TypeStr = ""
LenLow = valobj.GetChildMemberWithName('Length').GetChildAtIndex(0).unsigned
LenHigh = valobj.GetChildMemberWithName('Length').GetChildAtIndex(1).unsigned
Len = LenLow + (LenHigh >> 8)
Address = long ("%d" % valobj.addr)
if (Address == lldb.LLDB_INVALID_ADDRESS):
# Need to research this, it seems to be the nested struct case
ExprStr = ""
elif (Type & 0x7f == 0x7f):
ExprStr = "End Device Path" if SubType == 0xff else "End This Instance"
else:
ExprStr = "expr *(%s *)0x%08x" % (TypeStr, Address)
Str = " {\n"
Str += " (UINT8) Type = 0x%02x // %s\n" % (Type, "END" if (Type & 0x7f == 0x7f) else "")
Str += " (UINT8) SubType = 0x%02x // %s\n" % (SubType, ExprStr)
Str += " (UINT8 [2]) Length = { // 0x%04x (%d) bytes\n" % (Len, Len)
Str += " (UINT8) [0] = 0x%02x\n" % LenLow
Str += " (UINT8) [1] = 0x%02x\n" % LenHigh
Str += " }\n"
if (Type & 0x7f == 0x7f) and (SubType == 0xff):
pass
elif ExprStr != "":
NextNode = Address + Len
Str += "// Next node 'expr *(EFI_DEVICE_PATH_PROTOCOL *)0x%08x'\n" % NextNode
return Str
def TypePrintFormating(debugger):
#
# Set the default print formatting for EFI types in lldb.
# seems lldb defaults to decimal.
#
category = debugger.GetDefaultCategory()
FormatBool = lldb.SBTypeFormat(lldb.eFormatBoolean)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("BOOLEAN"), FormatBool)
FormatHex = lldb.SBTypeFormat(lldb.eFormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT64"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT64"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT32"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT32"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT16"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT16"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT8"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT8"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINTN"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("INTN"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("CHAR8"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("CHAR16"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_PHYSICAL_ADDRESS"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("PHYSICAL_ADDRESS"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_STATUS"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_TPL"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_LBA"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_BOOT_MODE"), FormatHex)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_FV_FILETYPE"), FormatHex)
#
# Smart type printing for EFI
#
debugger.HandleCommand("type summary add EFI_GUID --python-function lldbefi.EFI_GUID_TypeSummary")
debugger.HandleCommand("type summary add EFI_STATUS --python-function lldbefi.EFI_STATUS_TypeSummary")
debugger.HandleCommand("type summary add EFI_TPL --python-function lldbefi.EFI_TPL_TypeSummary")
debugger.HandleCommand("type summary add EFI_DEVICE_PATH_PROTOCOL --python-function lldbefi.EFI_DEVICE_PATH_PROTOCOL_TypeSummary")
debugger.HandleCommand("type summary add CHAR16 --python-function lldbefi.CHAR16_TypeSummary")
debugger.HandleCommand('type summary add --regex "CHAR16 \[[0-9]+\]" --python-function lldbefi.CHAR16_TypeSummary')
debugger.HandleCommand("type summary add CHAR8 --python-function lldbefi.CHAR8_TypeSummary")
debugger.HandleCommand('type summary add --regex "CHAR8 \[[0-9]+\]" --python-function lldbefi.CHAR8_TypeSummary')
debugger.HandleCommand(
'setting set frame-format "frame #${frame.index}: ${frame.pc}'
'{ ${module.file.basename}{:${function.name}()${function.pc-offset}}}'
'{ at ${line.file.fullpath}:${line.number}}\n"'
)
gEmulatorBreakWorkaroundNeeded = True
def LoadEmulatorEfiSymbols(frame, bp_loc , internal_dict):
#
# This is an lldb breakpoint script, and assumes the breakpoint is on a
# function with the same prototype as SecGdbScriptBreak(). The
# argument names are important as lldb looks them up.
#
# VOID
# SecGdbScriptBreak (
# char *FileName,
# int FileNameLength,
# long unsigned int LoadAddress,
# int AddSymbolFlag
# )
# {
# return;
# }
#
# When the emulator loads a PE/COFF image, it calls the stub function with
# the filename of the symbol file, the length of the FileName, the
# load address and a flag to indicate if this is a load or unload operation
#
global gEmulatorBreakWorkaroundNeeded
if gEmulatorBreakWorkaroundNeeded:
# turn off lldb debug prints on SIGALRM (EFI timer tick)
frame.thread.process.target.debugger.HandleCommand("process handle SIGALRM -n false")
gEmulatorBreakWorkaroundNeeded = False
# Convert C string to Python string
Error = lldb.SBError()
FileNamePtr = frame.FindVariable ("FileName").GetValueAsUnsigned()
FileNameLen = frame.FindVariable ("FileNameLength").GetValueAsUnsigned()
FileName = frame.thread.process.ReadCStringFromMemory (FileNamePtr, FileNameLen, Error)
if not Error.Success():
print("!ReadCStringFromMemory() did not find a %d byte C string at %x" % (FileNameLen, FileNamePtr))
# make breakpoint command continue
return False
debugger = frame.thread.process.target.debugger
if frame.FindVariable ("AddSymbolFlag").GetValueAsUnsigned() == 1:
LoadAddress = frame.FindVariable ("LoadAddress").GetValueAsUnsigned() - 0x240
debugger.HandleCommand ("target modules add %s" % FileName)
print("target modules load --slid 0x%x %s" % (LoadAddress, FileName))
debugger.HandleCommand ("target modules load --slide 0x%x --file %s" % (LoadAddress, FileName))
else:
target = debugger.GetSelectedTarget()
for SBModule in target.module_iter():
ModuleName = SBModule.GetFileSpec().GetDirectory() + '/'
ModuleName += SBModule.GetFileSpec().GetFilename()
if FileName == ModuleName or FileName == SBModule.GetFileSpec().GetFilename():
target.ClearModuleLoadAddress (SBModule)
if not target.RemoveModule (SBModule):
print("!lldb.target.RemoveModule (%s) FAILED" % SBModule)
# make breakpoint command continue
return False
def GuidToCStructStr (guid, Name=False):
#
# Convert a 16-byte bytesarray (or bytearray compat object) to C guid string
# { 0xB402621F, 0xA940, 0x1E4A, { 0x86, 0x6B, 0x4D, 0xC9, 0x16, 0x2B, 0x34, 0x7C } }
#
# Name=True means lookup name in GuidNameDict and us it if you find it
#
if not isinstance (guid, bytearray):
# convert guid object to UUID, and UUID to bytearray
Uuid = uuid.UUID(guid)
guid = bytearray (Uuid.bytes_le)
return "{ 0x%02.2X%02.2X%02.2X%02.2X, 0x%02.2X%02.2X, 0x%02.2X%02.2X, { 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X } }" % \
(guid[3], guid[2], guid[1], guid[0], guid[5], guid[4], guid[7], guid[6], guid[8], guid[9], guid[10], guid[11], guid[12], guid[13], guid[14], guid[15])
def ParseGuidString(GuidStr):
#
# Error check and convert C Guid init to string
# ParseGuidString("49152E77-1ADA-4764-B7A2-7AFEFED95E8B")
# ParseGuidString("{ 0xBA24B391, 0x73FD, 0xC54C, { 0x9E, 0xAF, 0x0C, 0xA7, 0x8A, 0x35, 0x46, 0xD1 } }")
#
if "{" in GuidStr :
# convert C form "{ 0xBA24B391, 0x73FD, 0xC54C, { 0x9E, 0xAF, 0x0C, 0xA7, 0x8A, 0x35, 0x46, 0xD1 } }"
# to string form BA24B391-73FD-C54C-9EAF-0CA78A3546D1
# make a list of Hex numbers like: ['0xBA24B391', '0x73FD', '0xC54C', '0x9E', '0xAF', '0x0C', '0xA7', '0x8A', '0x35', '0x46', '0xD1']
Hex = ''.join(x for x in GuidStr if x not in '{,}').split()
Str = "%08X-%04X-%04X-%02.2X%02.2X-%02.2X%02.2X%02.2X%02.2X%02.2X%02.2X" % \
(int(Hex[0], 0), int(Hex[1], 0), int(Hex[2], 0), int(Hex[3], 0), int(Hex[4], 0), \
int(Hex[5], 0), int(Hex[6], 0), int(Hex[7], 0), int(Hex[8], 0), int(Hex[9], 0), int(Hex[10], 0))
elif GuidStr.count('-') == 4:
# validate "49152E77-1ADA-4764-B7A2-7AFEFED95E8B" form
Check = "%s" % str(uuid.UUID(GuidStr)).upper()
if GuidStr.upper() == Check:
Str = GuidStr.upper()
else:
Ste = ""
else:
Str = ""
return Str
def create_guid_options():
usage = "usage: %prog [data]"
description='''lookup EFI_GUID by CName, C struct, or GUID string and print out all three.
'''
parser = optparse.OptionParser(description=description, prog='guid',usage=usage)
return parser
def efi_guid_command(debugger, command, result, dict):
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
parser = create_guid_options()
try:
(options, args) = parser.parse_args(command_args)
if len(args) >= 1:
if args[0] == "{":
# caller forgot to quote the string"
# mark arg[0] a string containing all args[n]
args[0] = ' '.join(args)
GuidStr = ParseGuidString (args[0])
if GuidStr == "":
# return Key of GuidNameDict for value args[0]
GuidStr = [Key for Key, Value in guid_dict.iteritems() if Value == args[0]][0]
GuidStr = GuidStr.upper()
except:
# if you don't handle exceptions, passing an incorrect argument to the OptionParser will cause LLDB to exit
# (courtesy of OptParse dealing with argument errors by throwing SystemExit)
result.SetError ("option parsing failed")
return
if len(args) >= 1:
if GuidStr in guid_dict:
print("%s = %s" % (guid_dict[GuidStr], GuidStr))
print("%s = %s" % (guid_dict[GuidStr], GuidToCStructStr (GuidStr)))
else:
print(GuidStr)
else:
# dump entire dictionary
width = max(len(v) for k,v in guid_dict.iteritems())
for value in sorted(guid_dict, key=guid_dict.get):
print('%-*s %s %s' % (width, guid_dict[value], value, GuidToCStructStr(value)))
return
#
########## Code that runs when this script is imported into LLDB ###########
#
def __lldb_init_module (debugger, internal_dict):
# This initializer is being run from LLDB in the embedded command interpreter
# Make the options so we can generate the help text for the new LLDB
# command line command prior to registering it with LLDB below
global guid_dict
# Source Guid.xref file if we can find it
inputfile = os.getcwd()
inputfile += os.sep + os.pardir + os.sep + 'FV' + os.sep + 'Guid.xref'
with open(inputfile) as f:
for line in f:
data = line.split(' ')
if len(data) >= 2:
guid_dict[data[0].upper()] = data[1].strip('\n')
# init EFI specific type formatters
TypePrintFormating (debugger)
# add guid command
parser = create_guid_options()
efi_guid_command.__doc__ = parser.format_help()
debugger.HandleCommand('command script add -f lldbefi.efi_guid_command guid')
Target = debugger.GetTargetAtIndex(0)
if Target:
Breakpoint = Target.BreakpointCreateByName('SecGdbScriptBreak')
if Breakpoint.GetNumLocations() == 1:
# Set the emulator breakpoints, if we are in the emulator
debugger.HandleCommand("breakpoint command add -s python -F lldbefi.LoadEmulatorEfiSymbols {id}".format(id=Breakpoint.GetID()))
print('Type r to run emulator. SecLldbScriptBreak armed. EFI modules should now get source level debugging in the emulator.')
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/EmulatorPkg/Unix/lldbefi.py
|
# @file
# Script to Build EmulatorPkg UEFI firmware
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import logging
import io
from edk2toolext.environment import shell_environment
from edk2toolext.environment.uefi_build import UefiBuilder
from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager
from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule
from edk2toolext.invocables.edk2_update import UpdateSettingsManager
from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager
from edk2toollib.utility_functions import RunCmd
from edk2toollib.utility_functions import GetHostInfo
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("EmulatorPkg",)
ArchSupported = ("X64", "IA32")
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('emulatorpkg', 'edk2-build')
WorkspaceRoot = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."))
# ####################################################################################### #
# Configuration for Update & Setup #
# ####################################################################################### #
class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager):
def GetPackagesSupported(self):
''' return iterable of edk2 packages supported by this build.
These should be edk2 workspace relative paths '''
return CommonPlatform.PackagesSupported
def GetArchitecturesSupported(self):
''' return iterable of edk2 architectures supported by this build '''
return CommonPlatform.ArchSupported
def GetTargetsSupported(self):
''' return iterable of edk2 target tags supported by this build '''
return CommonPlatform.TargetsSupported
def GetRequiredSubmodules(self):
''' return iterable containing RequiredSubmodule objects.
If no RequiredSubmodules return an empty iterable
'''
rs = []
# intentionally declare this one with recursive false to avoid overhead
rs.append(RequiredSubmodule(
"CryptoPkg/Library/OpensslLib/openssl", False))
# To avoid maintenance of this file for every new submodule
# lets just parse the .gitmodules and add each if not already in list.
# The GetRequiredSubmodules is designed to allow a build to optimize
# the desired submodules but it isn't necessary for this repository.
result = io.StringIO()
ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result)
# Cmd output is expected to look like:
# submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl
# submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
if ret == 0:
for line in result.getvalue().splitlines():
_, _, path = line.partition(" ")
if path is not None:
if path not in [x.path for x in rs]:
rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know
return rs
def SetArchitectures(self, list_of_requested_architectures):
''' Confirm the requests architecture list is valid and configure SettingsManager
to run only the requested architectures.
Raise Exception if a list_of_requested_architectures is not supported
'''
unsupported = set(list_of_requested_architectures) - \
set(self.GetArchitecturesSupported())
if(len(unsupported) > 0):
errorString = (
"Unsupported Architecture Requested: " + " ".join(unsupported))
logging.critical(errorString)
raise Exception(errorString)
self.ActualArchitectures = list_of_requested_architectures
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
return CommonPlatform.WorkspaceRoot
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
return CommonPlatform.Scopes
def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list:
''' Filter other cases that this package should be built
based on changed files. This should cover things that can't
be detected as dependencies. '''
build_these_packages = []
possible_packages = potentialPackagesList.copy()
for f in changedFilesList:
# BaseTools files that might change the build
if "BaseTools" in f:
if os.path.splitext(f) not in [".txt", ".md"]:
build_these_packages = possible_packages
break
# if the azure pipeline platform template file changed
if "platform-build-run-steps.yml" in f:
build_these_packages = possible_packages
break
return build_these_packages
def GetPlatformDscAndConfig(self) -> tuple:
''' If a platform desires to provide its DSC then Policy 4 will evaluate if
any of the changes will be built in the dsc.
The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>)
'''
return (os.path.join("EmulatorPkg", "EmulatorPkg.dsc"), {})
# ####################################################################################### #
# Actual Configuration for Platform Build #
# ####################################################################################### #
class PlatformBuilder(UefiBuilder, BuildSettingsManager):
def __init__(self):
UefiBuilder.__init__(self)
def AddCommandLineOptions(self, parserObj):
''' Add command line options to the argparser '''
parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="X64",
help="Optional - architecture to build. IA32 will use IA32 for Pei & Dxe. "
"X64 will use X64 for both PEI and DXE.")
def RetrieveCommandLineOptions(self, args):
''' Retrieve command line options from the argparser '''
shell_environment.GetBuildVars().SetValue(
"TARGET_ARCH", args.build_arch.upper(), "From CmdLine")
shell_environment.GetBuildVars().SetValue(
"ACTIVE_PLATFORM", "EmulatorPkg/EmulatorPkg.dsc", "From CmdLine")
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
return CommonPlatform.WorkspaceRoot
def GetPackagesPath(self):
''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath '''
return ()
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
return CommonPlatform.Scopes
def GetName(self):
''' Get the name of the repo, platform, or product being build '''
''' Used for naming the log file, among others '''
# check the startup nsh flag and if set then rename the log file.
# this helps in CI so we don't overwrite the build log since running
# uses the stuart_build command.
if(shell_environment.GetBuildVars().GetValue("MAKE_STARTUP_NSH", "FALSE") == "TRUE"):
return "EmulatorPkg_With_Run"
return "EmulatorPkg"
def GetLoggingLevel(self, loggerType):
''' Get the logging level for a given type
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
return logging.DEBUG
def SetPlatformEnv(self):
logging.debug("PlatformBuilder SetPlatformEnv")
self.env.SetValue("PRODUCT_NAME", "EmulatorPkg", "Platform Hardcoded")
self.env.SetValue("TOOL_CHAIN_TAG", "VS2019", "Default Toolchain")
# Add support for using the correct Platform Headers, tools, and Libs based on emulator architecture
# requested to be built when building VS2019 or VS2017
if self.env.GetValue("TOOL_CHAIN_TAG") == "VS2019" or self.env.GetValue("TOOL_CHAIN_TAG") == "VS2017":
key = self.env.GetValue("TOOL_CHAIN_TAG") + "_HOST"
if self.env.GetValue("TARGET_ARCH") == "IA32":
shell_environment.ShellEnvironment().set_shell_var(key, "x86")
elif self.env.GetValue("TARGET_ARCH") == "X64":
shell_environment.ShellEnvironment().set_shell_var(key, "x64")
# Add support for using the correct Platform Headers, tools, and Libs based on emulator architecture
# requested to be built when building on linux.
if GetHostInfo().os.upper() == "LINUX":
self.ConfigureLinuxDLinkPath()
if GetHostInfo().os.upper() == "WINDOWS":
self.env.SetValue("BLD_*_WIN_HOST_BUILD", "TRUE",
"Trigger Windows host build")
self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false")
# I don't see what this does but it is in build.sh
key = "BLD_*_BUILD_" + self.env.GetValue("TARGET_ARCH")
self.env.SetValue(key, "TRUE", "match script in build.sh")
return 0
def PlatformPreBuild(self):
return 0
def PlatformPostBuild(self):
return 0
def FlashRomImage(self):
''' Use the FlashRom Function to run the emulator. This gives an easy stuart command line to
activate the emulator. '''
OutputPath = os.path.join(self.env.GetValue(
"BUILD_OUTPUT_BASE"), self.env.GetValue("TARGET_ARCH"))
if (self.env.GetValue("MAKE_STARTUP_NSH") == "TRUE"):
f = open(os.path.join(OutputPath, "startup.nsh"), "w")
f.write("BOOT SUCCESS !!! \n")
# add commands here
f.write("reset\n")
f.close()
if GetHostInfo().os.upper() == "WINDOWS":
cmd = "WinHost.exe"
elif GetHostInfo().os.upper() == "LINUX":
cmd = "./Host"
else:
logging.critical("Unsupported Host")
return -1
return RunCmd(cmd, "", workingdir=OutputPath)
def ConfigureLinuxDLinkPath(self):
'''
logic copied from build.sh to setup the correct libraries
'''
if self.env.GetValue("TARGET_ARCH") == "IA32":
LIB_NAMES = ["ld-linux.so.2", "libdl.so.2 crt1.o", "crti.o crtn.o"]
LIB_SEARCH_PATHS = ["/usr/lib/i386-linux-gnu",
"/usr/lib32", "/lib32", "/usr/lib", "/lib"]
elif self.env.GetValue("TARGET_ARCH") == "X64":
LIB_NAMES = ["ld-linux-x86-64.so.2",
"libdl.so.2", "crt1.o", "crti.o", "crtn.o"]
LIB_SEARCH_PATHS = ["/usr/lib/x86_64-linux-gnu",
"/usr/lib64", "/lib64", "/usr/lib", "/lib"]
HOST_DLINK_PATHS = ""
for lname in LIB_NAMES:
logging.debug(f"Looking for {lname}")
for dname in LIB_SEARCH_PATHS:
logging.debug(f"In {dname}")
if os.path.isfile(os.path.join(dname, lname)):
logging.debug(f"Found {lname} in {dname}")
HOST_DLINK_PATHS += os.path.join(
os.path.join(dname, lname)) + os.pathsep
break
HOST_DLINK_PATHS = HOST_DLINK_PATHS.rstrip(os.pathsep)
logging.critical(f"Setting HOST_DLINK_PATHS to {HOST_DLINK_PATHS}")
shell_environment.ShellEnvironment().set_shell_var(
"HOST_DLINK_PATHS", HOST_DLINK_PATHS)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/EmulatorPkg/PlatformCI/PlatformBuild.py
|
## @ SplitFspBin.py
#
# Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import sys
import uuid
import copy
import struct
import argparse
from ctypes import *
from functools import reduce
"""
This utility supports some operations for Intel FSP 1.x/2.x image.
It supports:
- Display FSP 1.x/2.x information header
- Split FSP 2.x image into individual FSP-T/M/S/O component
- Rebase FSP 1.x/2.x components to a different base address
- Generate FSP 1.x/2.x mapping C header file
"""
CopyRightHeaderFile = """/*
*
* Automatically generated file; DO NOT EDIT.
* FSP mapping file
*
*/
"""
class c_uint24(Structure):
"""Little-Endian 24-bit Unsigned Integer"""
_pack_ = 1
_fields_ = [('Data', (c_uint8 * 3))]
def __init__(self, val=0):
self.set_value(val)
def __str__(self, indent=0):
return '0x%.6x' % self.value
def __int__(self):
return self.get_value()
def set_value(self, val):
self.Data[0:3] = Val2Bytes(val, 3)
def get_value(self):
return Bytes2Val(self.Data[0:3])
value = property(get_value, set_value)
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', ARRAY(c_uint8, 16)),
('FvLength', c_uint64),
('Signature', ARRAY(c_char, 4)),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8)
]
class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure):
_fields_ = [
('FvName', ARRAY(c_uint8, 16)),
('ExtHeaderSize', c_uint32)
]
class EFI_FFS_INTEGRITY_CHECK(Structure):
_fields_ = [
('Header', c_uint8),
('File', c_uint8)
]
class EFI_FFS_FILE_HEADER(Structure):
_fields_ = [
('Name', ARRAY(c_uint8, 16)),
('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', c_uint24),
('State', c_uint8)
]
class EFI_COMMON_SECTION_HEADER(Structure):
_fields_ = [
('Size', c_uint24),
('Type', c_uint8)
]
class FSP_COMMON_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32)
]
class FSP_INFORMATION_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32),
('Reserved1', c_uint16),
('SpecVersion', c_uint8),
('HeaderRevision', c_uint8),
('ImageRevision', c_uint32),
('ImageId', ARRAY(c_char, 8)),
('ImageSize', c_uint32),
('ImageBase', c_uint32),
('ImageAttribute', c_uint16),
('ComponentAttribute', c_uint16),
('CfgRegionOffset', c_uint32),
('CfgRegionSize', c_uint32),
('Reserved2', c_uint32),
('TempRamInitEntryOffset', c_uint32),
('Reserved3', c_uint32),
('NotifyPhaseEntryOffset', c_uint32),
('FspMemoryInitEntryOffset', c_uint32),
('TempRamExitEntryOffset', c_uint32),
('FspSiliconInitEntryOffset', c_uint32),
('FspMultiPhaseSiInitEntryOffset', c_uint32),
('ExtendedImageRevision', c_uint16),
('Reserved4', c_uint16),
('FspMultiPhaseMemInitEntryOffset', c_uint32),
('FspSmmInitEntryOffset', c_uint32)
]
class FSP_PATCH_TABLE(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint16),
('HeaderRevision', c_uint8),
('Reserved', c_uint8),
('PatchEntryNum', c_uint32)
]
class EFI_IMAGE_DATA_DIRECTORY(Structure):
_fields_ = [
('VirtualAddress', c_uint32),
('Size', c_uint32)
]
class EFI_TE_IMAGE_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 2)),
('Machine', c_uint16),
('NumberOfSections', c_uint8),
('Subsystem', c_uint8),
('StrippedSize', c_uint16),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('ImageBase', c_uint64),
('DataDirectoryBaseReloc', EFI_IMAGE_DATA_DIRECTORY),
('DataDirectoryDebug', EFI_IMAGE_DATA_DIRECTORY)
]
class EFI_IMAGE_DOS_HEADER(Structure):
_fields_ = [
('e_magic', c_uint16),
('e_cblp', c_uint16),
('e_cp', c_uint16),
('e_crlc', c_uint16),
('e_cparhdr', c_uint16),
('e_minalloc', c_uint16),
('e_maxalloc', c_uint16),
('e_ss', c_uint16),
('e_sp', c_uint16),
('e_csum', c_uint16),
('e_ip', c_uint16),
('e_cs', c_uint16),
('e_lfarlc', c_uint16),
('e_ovno', c_uint16),
('e_res', ARRAY(c_uint16, 4)),
('e_oemid', c_uint16),
('e_oeminfo', c_uint16),
('e_res2', ARRAY(c_uint16, 10)),
('e_lfanew', c_uint16)
]
class EFI_IMAGE_FILE_HEADER(Structure):
_fields_ = [
('Machine', c_uint16),
('NumberOfSections', c_uint16),
('TimeDateStamp', c_uint32),
('PointerToSymbolTable', c_uint32),
('NumberOfSymbols', c_uint32),
('SizeOfOptionalHeader', c_uint16),
('Characteristics', c_uint16)
]
class PE_RELOC_BLOCK_HEADER(Structure):
_fields_ = [
('PageRVA', c_uint32),
('BlockSize', c_uint32)
]
class EFI_IMAGE_OPTIONAL_HEADER32(Structure):
_fields_ = [
('Magic', c_uint16),
('MajorLinkerVersion', c_uint8),
('MinorLinkerVersion', c_uint8),
('SizeOfCode', c_uint32),
('SizeOfInitializedData', c_uint32),
('SizeOfUninitializedData', c_uint32),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('BaseOfData', c_uint32),
('ImageBase', c_uint32),
('SectionAlignment', c_uint32),
('FileAlignment', c_uint32),
('MajorOperatingSystemVersion', c_uint16),
('MinorOperatingSystemVersion', c_uint16),
('MajorImageVersion', c_uint16),
('MinorImageVersion', c_uint16),
('MajorSubsystemVersion', c_uint16),
('MinorSubsystemVersion', c_uint16),
('Win32VersionValue', c_uint32),
('SizeOfImage', c_uint32),
('SizeOfHeaders', c_uint32),
('CheckSum' , c_uint32),
('Subsystem', c_uint16),
('DllCharacteristics', c_uint16),
('SizeOfStackReserve', c_uint32),
('SizeOfStackCommit' , c_uint32),
('SizeOfHeapReserve', c_uint32),
('SizeOfHeapCommit' , c_uint32),
('LoaderFlags' , c_uint32),
('NumberOfRvaAndSizes', c_uint32),
('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16))
]
class EFI_IMAGE_OPTIONAL_HEADER32_PLUS(Structure):
_fields_ = [
('Magic', c_uint16),
('MajorLinkerVersion', c_uint8),
('MinorLinkerVersion', c_uint8),
('SizeOfCode', c_uint32),
('SizeOfInitializedData', c_uint32),
('SizeOfUninitializedData', c_uint32),
('AddressOfEntryPoint', c_uint32),
('BaseOfCode', c_uint32),
('ImageBase', c_uint64),
('SectionAlignment', c_uint32),
('FileAlignment', c_uint32),
('MajorOperatingSystemVersion', c_uint16),
('MinorOperatingSystemVersion', c_uint16),
('MajorImageVersion', c_uint16),
('MinorImageVersion', c_uint16),
('MajorSubsystemVersion', c_uint16),
('MinorSubsystemVersion', c_uint16),
('Win32VersionValue', c_uint32),
('SizeOfImage', c_uint32),
('SizeOfHeaders', c_uint32),
('CheckSum' , c_uint32),
('Subsystem', c_uint16),
('DllCharacteristics', c_uint16),
('SizeOfStackReserve', c_uint64),
('SizeOfStackCommit' , c_uint64),
('SizeOfHeapReserve', c_uint64),
('SizeOfHeapCommit' , c_uint64),
('LoaderFlags' , c_uint32),
('NumberOfRvaAndSizes', c_uint32),
('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16))
]
class EFI_IMAGE_OPTIONAL_HEADER(Union):
_fields_ = [
('PeOptHdr', EFI_IMAGE_OPTIONAL_HEADER32),
('PePlusOptHdr', EFI_IMAGE_OPTIONAL_HEADER32_PLUS)
]
class EFI_IMAGE_NT_HEADERS32(Structure):
_fields_ = [
('Signature', c_uint32),
('FileHeader', EFI_IMAGE_FILE_HEADER),
('OptionalHeader', EFI_IMAGE_OPTIONAL_HEADER)
]
class EFI_IMAGE_DIRECTORY_ENTRY:
EXPORT = 0
IMPORT = 1
RESOURCE = 2
EXCEPTION = 3
SECURITY = 4
BASERELOC = 5
DEBUG = 6
COPYRIGHT = 7
GLOBALPTR = 8
TLS = 9
LOAD_CONFIG = 10
class EFI_FV_FILETYPE:
ALL = 0x00
RAW = 0x01
FREEFORM = 0x02
SECURITY_CORE = 0x03
PEI_CORE = 0x04
DXE_CORE = 0x05
PEIM = 0x06
DRIVER = 0x07
COMBINED_PEIM_DRIVER = 0x08
APPLICATION = 0x09
SMM = 0x0a
FIRMWARE_VOLUME_IMAGE = 0x0b
COMBINED_SMM_DXE = 0x0c
SMM_CORE = 0x0d
OEM_MIN = 0xc0
OEM_MAX = 0xdf
DEBUG_MIN = 0xe0
DEBUG_MAX = 0xef
FFS_MIN = 0xf0
FFS_MAX = 0xff
FFS_PAD = 0xf0
class EFI_SECTION_TYPE:
"""Enumeration of all valid firmware file section types."""
ALL = 0x00
COMPRESSION = 0x01
GUID_DEFINED = 0x02
DISPOSABLE = 0x03
PE32 = 0x10
PIC = 0x11
TE = 0x12
DXE_DEPEX = 0x13
VERSION = 0x14
USER_INTERFACE = 0x15
COMPATIBILITY16 = 0x16
FIRMWARE_VOLUME_IMAGE = 0x17
FREEFORM_SUBTYPE_GUID = 0x18
RAW = 0x19
PEI_DEPEX = 0x1b
SMM_DEPEX = 0x1c
def AlignPtr (offset, alignment = 8):
return (offset + alignment - 1) & ~(alignment - 1)
def Bytes2Val (bytes):
return reduce(lambda x,y: (x<<8)|y, bytes[::-1] )
def Val2Bytes (value, blen):
return [(value>>(i*8) & 0xff) for i in range(blen)]
def IsIntegerType (val):
if sys.version_info[0] < 3:
if type(val) in (int, long):
return True
else:
if type(val) is int:
return True
return False
def IsStrType (val):
if sys.version_info[0] < 3:
if type(val) is str:
return True
else:
if type(val) is bytes:
return True
return False
def HandleNameStr (val):
if sys.version_info[0] < 3:
rep = "0x%X ('%s')" % (Bytes2Val (bytearray (val)), val)
else:
rep = "0x%X ('%s')" % (Bytes2Val (bytearray (val)), str (val, 'utf-8'))
return rep
def OutputStruct (obj, indent = 0, plen = 0):
if indent:
body = ''
else:
body = (' ' * indent + '<%s>:\n') % obj.__class__.__name__
if plen == 0:
plen = sizeof(obj)
max_key_len = 26
pstr = (' ' * (indent + 1) + '{0:<%d} = {1}\n') % max_key_len
for field in obj._fields_:
key = field[0]
val = getattr(obj, key)
rep = ''
if not isinstance(val, c_uint24) and isinstance(val, Structure):
body += pstr.format(key, val.__class__.__name__)
body += OutputStruct (val, indent + 1)
plen -= sizeof(val)
else:
if IsStrType (val):
rep = HandleNameStr (val)
elif IsIntegerType (val):
if (key == 'ImageRevision'):
FspImageRevisionMajor = ((val >> 24) & 0xFF)
FspImageRevisionMinor = ((val >> 16) & 0xFF)
FspImageRevisionRevision = ((val >> 8) & 0xFF)
FspImageRevisionBuildNumber = (val & 0xFF)
rep = '0x%08X' % val
elif (key == 'ExtendedImageRevision'):
FspImageRevisionRevision |= (val & 0xFF00)
FspImageRevisionBuildNumber |= ((val << 8) & 0xFF00)
rep = "0x%04X ('%02X.%02X.%04X.%04X')" % (val, FspImageRevisionMajor, FspImageRevisionMinor, FspImageRevisionRevision, FspImageRevisionBuildNumber)
elif field[1] == c_uint64:
rep = '0x%016X' % val
elif field[1] == c_uint32:
rep = '0x%08X' % val
elif field[1] == c_uint16:
rep = '0x%04X' % val
elif field[1] == c_uint8:
rep = '0x%02X' % val
else:
rep = '0x%X' % val
elif isinstance(val, c_uint24):
rep = '0x%X' % val.get_value()
elif 'c_ubyte_Array' in str(type(val)):
if sizeof(val) == 16:
if sys.version_info[0] < 3:
rep = str(bytearray(val))
else:
rep = bytes(val)
rep = str(uuid.UUID(bytes_le = rep)).upper()
else:
res = ['0x%02X'%i for i in bytearray(val)]
rep = '[%s]' % (','.join(res))
else:
rep = str(val)
plen -= sizeof(field[1])
body += pstr.format(key, rep)
if plen <= 0:
break
return body
class Section:
def __init__(self, offset, secdata):
self.SecHdr = EFI_COMMON_SECTION_HEADER.from_buffer (secdata, 0)
self.SecData = secdata[0:int(self.SecHdr.Size)]
self.Offset = offset
class FirmwareFile:
def __init__(self, offset, filedata):
self.FfsHdr = EFI_FFS_FILE_HEADER.from_buffer (filedata, 0)
self.FfsData = filedata[0:int(self.FfsHdr.Size)]
self.Offset = offset
self.SecList = []
def ParseFfs(self):
ffssize = len(self.FfsData)
offset = sizeof(self.FfsHdr)
if self.FfsHdr.Name != '\xff' * 16:
while offset < (ffssize - sizeof (EFI_COMMON_SECTION_HEADER)):
sechdr = EFI_COMMON_SECTION_HEADER.from_buffer (self.FfsData, offset)
sec = Section (offset, self.FfsData[offset:offset + int(sechdr.Size)])
self.SecList.append(sec)
offset += int(sechdr.Size)
offset = AlignPtr(offset, 4)
class FirmwareVolume:
def __init__(self, offset, fvdata):
self.FvHdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (fvdata, 0)
self.FvData = fvdata[0 : self.FvHdr.FvLength]
self.Offset = offset
if self.FvHdr.ExtHeaderOffset > 0:
self.FvExtHdr = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer (self.FvData, self.FvHdr.ExtHeaderOffset)
else:
self.FvExtHdr = None
self.FfsList = []
def ParseFv(self):
fvsize = len(self.FvData)
if self.FvExtHdr:
offset = self.FvHdr.ExtHeaderOffset + self.FvExtHdr.ExtHeaderSize
else:
offset = self.FvHdr.HeaderLength
offset = AlignPtr(offset)
while offset < (fvsize - sizeof (EFI_FFS_FILE_HEADER)):
ffshdr = EFI_FFS_FILE_HEADER.from_buffer (self.FvData, offset)
if (ffshdr.Name == '\xff' * 16) and (int(ffshdr.Size) == 0xFFFFFF):
offset = fvsize
else:
ffs = FirmwareFile (offset, self.FvData[offset:offset + int(ffshdr.Size)])
ffs.ParseFfs()
self.FfsList.append(ffs)
offset += int(ffshdr.Size)
offset = AlignPtr(offset)
class FspImage:
def __init__(self, offset, fih, fihoff, patch):
self.Fih = fih
self.FihOffset = fihoff
self.Offset = offset
self.FvIdxList = []
self.Type = "XTMSIXXXOXXXXXXX"[(fih.ComponentAttribute >> 12) & 0x0F]
self.PatchList = patch
self.PatchList.append(fihoff + 0x1C)
def AppendFv(self, FvIdx):
self.FvIdxList.append(FvIdx)
def Patch(self, delta, fdbin):
count = 0
applied = 0
for idx, patch in enumerate(self.PatchList):
ptype = (patch>>24) & 0x0F
if ptype not in [0x00, 0x0F]:
raise Exception('ERROR: Invalid patch type %d !' % ptype)
if patch & 0x80000000:
patch = self.Fih.ImageSize - (0x1000000 - (patch & 0xFFFFFF))
else:
patch = patch & 0xFFFFFF
if (patch < self.Fih.ImageSize) and (patch + sizeof(c_uint32) <= self.Fih.ImageSize):
offset = patch + self.Offset
value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)])
value += delta
fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(value, sizeof(c_uint32))
applied += 1
count += 1
# Don't count the FSP base address patch entry appended at the end
if count != 0:
count -= 1
applied -= 1
return (count, applied)
class FirmwareDevice:
def __init__(self, offset, fdfile):
self.FvList = []
self.FspList = []
self.FdFile = fdfile
self.Offset = 0
hfsp = open (self.FdFile, 'rb')
self.FdData = bytearray(hfsp.read())
hfsp.close()
def ParseFd(self):
offset = 0
fdsize = len(self.FdData)
self.FvList = []
while offset < (fdsize - sizeof (EFI_FIRMWARE_VOLUME_HEADER)):
fvh = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (self.FdData, offset)
if b'_FVH' != fvh.Signature:
raise Exception("ERROR: Invalid FV header !")
fv = FirmwareVolume (offset, self.FdData[offset:offset + fvh.FvLength])
fv.ParseFv ()
self.FvList.append(fv)
offset += fv.FvHdr.FvLength
def CheckFsp (self):
if len(self.FspList) == 0:
return
fih = None
for fsp in self.FspList:
if not fih:
fih = fsp.Fih
else:
newfih = fsp.Fih
if (newfih.ImageId != fih.ImageId) or (newfih.ImageRevision != fih.ImageRevision):
raise Exception("ERROR: Inconsistent FSP ImageId or ImageRevision detected !")
def ParseFsp(self):
flen = 0
for idx, fv in enumerate(self.FvList):
# Check if this FV contains FSP header
if flen == 0:
if len(fv.FfsList) == 0:
continue
ffs = fv.FfsList[0]
if len(ffs.SecList) == 0:
continue
sec = ffs.SecList[0]
if sec.SecHdr.Type != EFI_SECTION_TYPE.RAW:
continue
fihoffset = ffs.Offset + sec.Offset + sizeof(sec.SecHdr)
fspoffset = fv.Offset
offset = fspoffset + fihoffset
fih = FSP_INFORMATION_HEADER.from_buffer (self.FdData, offset)
if b'FSPH' != fih.Signature:
continue
offset += fih.HeaderLength
offset = AlignPtr(offset, 4)
plist = []
while True:
fch = FSP_COMMON_HEADER.from_buffer (self.FdData, offset)
if b'FSPP' != fch.Signature:
offset += fch.HeaderLength
offset = AlignPtr(offset, 4)
else:
fspp = FSP_PATCH_TABLE.from_buffer (self.FdData, offset)
offset += sizeof(fspp)
pdata = (c_uint32 * fspp.PatchEntryNum).from_buffer(self.FdData, offset)
plist = list(pdata)
break
fsp = FspImage (fspoffset, fih, fihoffset, plist)
fsp.AppendFv (idx)
self.FspList.append(fsp)
flen = fsp.Fih.ImageSize - fv.FvHdr.FvLength
else:
fsp.AppendFv (idx)
flen -= fv.FvHdr.FvLength
if flen < 0:
raise Exception("ERROR: Incorrect FV size in image !")
self.CheckFsp ()
class PeTeImage:
def __init__(self, offset, data):
self.Offset = offset
tehdr = EFI_TE_IMAGE_HEADER.from_buffer (data, 0)
if tehdr.Signature == b'VZ': # TE image
self.TeHdr = tehdr
elif tehdr.Signature == b'MZ': # PE image
self.TeHdr = None
self.DosHdr = EFI_IMAGE_DOS_HEADER.from_buffer (data, 0)
self.PeHdr = EFI_IMAGE_NT_HEADERS32.from_buffer (data, self.DosHdr.e_lfanew)
if self.PeHdr.Signature != 0x4550:
raise Exception("ERROR: Invalid PE32 header !")
if self.PeHdr.OptionalHeader.PeOptHdr.Magic == 0x10b: # PE32 image
if self.PeHdr.FileHeader.SizeOfOptionalHeader < EFI_IMAGE_OPTIONAL_HEADER32.DataDirectory.offset:
raise Exception("ERROR: Unsupported PE32 image !")
if self.PeHdr.OptionalHeader.PeOptHdr.NumberOfRvaAndSizes <= EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC:
raise Exception("ERROR: No relocation information available !")
elif self.PeHdr.OptionalHeader.PeOptHdr.Magic == 0x20b: # PE32+ image
if self.PeHdr.FileHeader.SizeOfOptionalHeader < EFI_IMAGE_OPTIONAL_HEADER32_PLUS.DataDirectory.offset:
raise Exception("ERROR: Unsupported PE32+ image !")
if self.PeHdr.OptionalHeader.PePlusOptHdr.NumberOfRvaAndSizes <= EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC:
raise Exception("ERROR: No relocation information available !")
else:
raise Exception("ERROR: Invalid PE32 optional header !")
self.Offset = offset
self.Data = data
self.RelocList = []
def IsTeImage(self):
return self.TeHdr is not None
def ParseReloc(self):
if self.IsTeImage():
rsize = self.TeHdr.DataDirectoryBaseReloc.Size
roffset = sizeof(self.TeHdr) - self.TeHdr.StrippedSize + self.TeHdr.DataDirectoryBaseReloc.VirtualAddress
else:
# Assuming PE32 image type (self.PeHdr.OptionalHeader.PeOptHdr.Magic == 0x10b)
rsize = self.PeHdr.OptionalHeader.PeOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].Size
roffset = self.PeHdr.OptionalHeader.PeOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].VirtualAddress
if self.PeHdr.OptionalHeader.PePlusOptHdr.Magic == 0x20b: # PE32+ image
rsize = self.PeHdr.OptionalHeader.PePlusOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].Size
roffset = self.PeHdr.OptionalHeader.PePlusOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].VirtualAddress
alignment = 4
offset = roffset
while offset < roffset + rsize:
offset = AlignPtr(offset, 4)
blkhdr = PE_RELOC_BLOCK_HEADER.from_buffer(self.Data, offset)
offset += sizeof(blkhdr)
# Read relocation type,offset pairs
rlen = blkhdr.BlockSize - sizeof(PE_RELOC_BLOCK_HEADER)
rnum = int (rlen/sizeof(c_uint16))
rdata = (c_uint16 * rnum).from_buffer(self.Data, offset)
for each in rdata:
roff = each & 0xfff
rtype = each >> 12
if rtype == 0: # IMAGE_REL_BASED_ABSOLUTE:
continue
if ((rtype != 3) and (rtype != 10)): # IMAGE_REL_BASED_HIGHLOW and IMAGE_REL_BASED_DIR64
raise Exception("ERROR: Unsupported relocation type %d!" % rtype)
# Calculate the offset of the relocation
aoff = blkhdr.PageRVA + roff
if self.IsTeImage():
aoff += sizeof(self.TeHdr) - self.TeHdr.StrippedSize
self.RelocList.append((rtype, aoff))
offset += sizeof(rdata)
def Rebase(self, delta, fdbin):
count = 0
if delta == 0:
return count
for (rtype, roff) in self.RelocList:
if rtype == 3: # IMAGE_REL_BASED_HIGHLOW
offset = roff + self.Offset
value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)])
value += delta
fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(value, sizeof(c_uint32))
count += 1
elif rtype == 10: # IMAGE_REL_BASED_DIR64
offset = roff + self.Offset
value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint64)])
value += delta
fdbin[offset:offset+sizeof(c_uint64)] = Val2Bytes(value, sizeof(c_uint64))
count += 1
else:
raise Exception('ERROR: Unknown relocation type %d !' % rtype)
if self.IsTeImage():
offset = self.Offset + EFI_TE_IMAGE_HEADER.ImageBase.offset
size = EFI_TE_IMAGE_HEADER.ImageBase.size
else:
offset = self.Offset + self.DosHdr.e_lfanew
offset += EFI_IMAGE_NT_HEADERS32.OptionalHeader.offset
if self.PeHdr.OptionalHeader.PePlusOptHdr.Magic == 0x20b: # PE32+ image
offset += EFI_IMAGE_OPTIONAL_HEADER32_PLUS.ImageBase.offset
size = EFI_IMAGE_OPTIONAL_HEADER32_PLUS.ImageBase.size
else:
offset += EFI_IMAGE_OPTIONAL_HEADER32.ImageBase.offset
size = EFI_IMAGE_OPTIONAL_HEADER32.ImageBase.size
value = Bytes2Val(fdbin[offset:offset+size]) + delta
fdbin[offset:offset+size] = Val2Bytes(value, size)
return count
def ShowFspInfo (fspfile):
fd = FirmwareDevice(0, fspfile)
fd.ParseFd ()
fd.ParseFsp ()
print ("\nFound the following %d Firmware Volumes in FSP binary:" % (len(fd.FvList)))
for idx, fv in enumerate(fd.FvList):
name = fv.FvExtHdr.FvName
if not name:
name = '\xff' * 16
else:
if sys.version_info[0] < 3:
name = str(bytearray(name))
else:
name = bytes(name)
guid = uuid.UUID(bytes_le = name)
print ("FV%d:" % idx)
print (" GUID : %s" % str(guid).upper())
print (" Offset : 0x%08X" % fv.Offset)
print (" Length : 0x%08X" % fv.FvHdr.FvLength)
print ("\n")
for fsp in fd.FspList:
fvlist = map(lambda x : 'FV%d' % x, fsp.FvIdxList)
print ("FSP_%s contains %s" % (fsp.Type, ','.join(fvlist)))
print ("%s" % (OutputStruct(fsp.Fih, 0, fsp.Fih.HeaderLength)))
def GenFspHdr (fspfile, outdir, hfile):
fd = FirmwareDevice(0, fspfile)
fd.ParseFd ()
fd.ParseFsp ()
if not hfile:
hfile = os.path.splitext(os.path.basename(fspfile))[0] + '.h'
fspname, ext = os.path.splitext(os.path.basename(hfile))
filename = os.path.join(outdir, fspname + ext)
hfsp = open(filename, 'w')
hfsp.write ('%s\n\n' % CopyRightHeaderFile)
firstfv = True
for fsp in fd.FspList:
fih = fsp.Fih
if firstfv:
if sys.version_info[0] < 3:
hfsp.write("#define FSP_IMAGE_ID 0x%016X /* '%s' */\n" % (Bytes2Val(bytearray(fih.ImageId)), fih.ImageId))
else:
hfsp.write("#define FSP_IMAGE_ID 0x%016X /* '%s' */\n" % (Bytes2Val(bytearray(fih.ImageId)), str (fih.ImageId, 'utf-8')))
hfsp.write("#define FSP_IMAGE_REV 0x%08X \n\n" % fih.ImageRevision)
firstfv = False
fv = fd.FvList[fsp.FvIdxList[0]]
hfsp.write ('#define FSP%s_BASE 0x%08X\n' % (fsp.Type, fih.ImageBase))
hfsp.write ('#define FSP%s_OFFSET 0x%08X\n' % (fsp.Type, fv.Offset))
hfsp.write ('#define FSP%s_LENGTH 0x%08X\n\n' % (fsp.Type, fih.ImageSize))
hfsp.close()
def SplitFspBin (fspfile, outdir, nametemplate):
fd = FirmwareDevice(0, fspfile)
fd.ParseFd ()
fd.ParseFsp ()
for fsp in fd.FspList:
if fsp.Fih.HeaderRevision < 3:
raise Exception("ERROR: FSP 1.x is not supported by the split command !")
ftype = fsp.Type
if not nametemplate:
nametemplate = fspfile
fspname, ext = os.path.splitext(os.path.basename(nametemplate))
filename = os.path.join(outdir, fspname + '_' + fsp.Type + ext)
hfsp = open(filename, 'wb')
print ("Create FSP component file '%s'" % filename)
for fvidx in fsp.FvIdxList:
fv = fd.FvList[fvidx]
hfsp.write(fv.FvData)
hfsp.close()
def RebaseFspBin (FspBinary, FspComponent, FspBase, OutputDir, OutputFile):
fd = FirmwareDevice(0, FspBinary)
fd.ParseFd ()
fd.ParseFsp ()
numcomp = len(FspComponent)
baselist = FspBase
if numcomp != len(baselist):
print ("ERROR: Required number of base does not match number of FSP component !")
return
newfspbin = fd.FdData[:]
for idx, fspcomp in enumerate(FspComponent):
found = False
for fsp in fd.FspList:
# Is this FSP 1.x single binary?
if fsp.Fih.HeaderRevision < 3:
found = True
ftype = 'X'
break
ftype = fsp.Type.lower()
if ftype == fspcomp:
found = True
break
if not found:
print ("ERROR: Could not find FSP_%c component to rebase !" % fspcomp.upper())
return
fspbase = baselist[idx]
if fspbase.startswith('0x'):
newbase = int(fspbase, 16)
else:
newbase = int(fspbase)
oldbase = fsp.Fih.ImageBase
delta = newbase - oldbase
print ("Rebase FSP-%c from 0x%08X to 0x%08X:" % (ftype.upper(),oldbase,newbase))
imglist = []
for fvidx in fsp.FvIdxList:
fv = fd.FvList[fvidx]
for ffs in fv.FfsList:
for sec in ffs.SecList:
if sec.SecHdr.Type in [EFI_SECTION_TYPE.TE, EFI_SECTION_TYPE.PE32]: # TE or PE32
offset = fd.Offset + fv.Offset + ffs.Offset + sec.Offset + sizeof(sec.SecHdr)
imglist.append ((offset, len(sec.SecData) - sizeof(sec.SecHdr)))
fcount = 0
pcount = 0
for (offset, length) in imglist:
img = PeTeImage(offset, fd.FdData[offset:offset + length])
img.ParseReloc()
pcount += img.Rebase(delta, newfspbin)
fcount += 1
print (" Patched %d entries in %d TE/PE32 images." % (pcount, fcount))
(count, applied) = fsp.Patch(delta, newfspbin)
print (" Patched %d entries using FSP patch table." % applied)
if count != applied:
print (" %d invalid entries are ignored !" % (count - applied))
if OutputFile == '':
filename = os.path.basename(FspBinary)
base, ext = os.path.splitext(filename)
OutputFile = base + "_%08X" % newbase + ext
fspname, ext = os.path.splitext(os.path.basename(OutputFile))
filename = os.path.join(OutputDir, fspname + ext)
fd = open(filename, "wb")
fd.write(newfspbin)
fd.close()
def main ():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='commands', dest="which")
parser_rebase = subparsers.add_parser('rebase', help='rebase a FSP into a new base address')
parser_rebase.set_defaults(which='rebase')
parser_rebase.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True)
parser_rebase.add_argument('-c', '--fspcomp', choices=['t','m','s','o','i'], nargs='+', dest='FspComponent', type=str, help='FSP component to rebase', default = "['t']", required = True)
parser_rebase.add_argument('-b', '--newbase', dest='FspBase', nargs='+', type=str, help='Rebased FSP binary file name', default = '', required = True)
parser_rebase.add_argument('-o', '--outdir' , dest='OutputDir', type=str, help='Output directory path', default = '.')
parser_rebase.add_argument('-n', '--outfile', dest='OutputFile', type=str, help='Rebased FSP binary file name', default = '')
parser_split = subparsers.add_parser('split', help='split a FSP into multiple components')
parser_split.set_defaults(which='split')
parser_split.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True)
parser_split.add_argument('-o', '--outdir' , dest='OutputDir', type=str, help='Output directory path', default = '.')
parser_split.add_argument('-n', '--nametpl', dest='NameTemplate', type=str, help='Output name template', default = '')
parser_genhdr = subparsers.add_parser('genhdr', help='generate a header file for FSP binary')
parser_genhdr.set_defaults(which='genhdr')
parser_genhdr.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True)
parser_genhdr.add_argument('-o', '--outdir' , dest='OutputDir', type=str, help='Output directory path', default = '.')
parser_genhdr.add_argument('-n', '--hfile', dest='HFileName', type=str, help='Output header file name', default = '')
parser_info = subparsers.add_parser('info', help='display FSP information')
parser_info.set_defaults(which='info')
parser_info.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True)
args = parser.parse_args()
if args.which in ['rebase', 'split', 'genhdr', 'info']:
if not os.path.exists(args.FspBinary):
raise Exception ("ERROR: Could not locate FSP binary file '%s' !" % args.FspBinary)
if hasattr(args, 'OutputDir') and not os.path.exists(args.OutputDir):
raise Exception ("ERROR: Invalid output directory '%s' !" % args.OutputDir)
if args.which == 'rebase':
RebaseFspBin (args.FspBinary, args.FspComponent, args.FspBase, args.OutputDir, args.OutputFile)
elif args.which == 'split':
SplitFspBin (args.FspBinary, args.OutputDir, args.NameTemplate)
elif args.which == 'genhdr':
GenFspHdr (args.FspBinary, args.OutputDir, args.HFileName)
elif args.which == 'info':
ShowFspInfo (args.FspBinary)
else:
parser.print_help()
return 0
if __name__ == '__main__':
sys.exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/SplitFspBin.py
|
#!/usr/bin/env python
## @ FspDscBsf2Yaml.py
# This script convert DSC or BSF format file into YAML format
#
# Copyright(c) 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
from collections import OrderedDict
from datetime import date
from FspGenCfgData import CFspBsf2Dsc, CGenCfgData
__copyright_tmp__ = """## @file
#
# Slim Bootloader CFGDATA %s File.
#
# Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
"""
class CFspDsc2Yaml():
def __init__(self):
self._Hdr_key_list = ['EMBED', 'STRUCT']
self._Bsf_key_list = ['NAME', 'HELP', 'TYPE', 'PAGE', 'PAGES',
'OPTION', 'CONDITION', 'ORDER', 'MARKER',
'SUBT', 'FIELD', 'FIND']
self.gen_cfg_data = None
self.cfg_reg_exp = re.compile(
"^([_a-zA-Z0-9$\\(\\)]+)\\s*\\|\\s*(0x[0-9A-F]+|\\*)"
"\\s*\\|\\s*(\\d+|0x[0-9a-fA-F]+)\\s*\\|\\s*(.+)")
self.bsf_reg_exp = re.compile("(%s):{(.+?)}(?:$|\\s+)"
% '|'.join(self._Bsf_key_list))
self.hdr_reg_exp = re.compile("(%s):{(.+?)}"
% '|'.join(self._Hdr_key_list))
self.prefix = ''
self.unused_idx = 0
self.offset = 0
self.base_offset = 0
def load_config_data_from_dsc(self, file_name):
"""
Load and parse a DSC CFGDATA file.
"""
gen_cfg_data = CGenCfgData('FSP')
if file_name.endswith('.dsc'):
if gen_cfg_data.ParseDscFile(file_name) != 0:
raise Exception('DSC file parsing error !')
if gen_cfg_data.CreateVarDict() != 0:
raise Exception('DSC variable creation error !')
else:
raise Exception('Unsupported file "%s" !' % file_name)
gen_cfg_data.UpdateDefaultValue()
self.gen_cfg_data = gen_cfg_data
def print_dsc_line(self):
"""
Debug function to print all DSC lines.
"""
for line in self.gen_cfg_data._DscLines:
print(line)
def format_value(self, field, text, indent=''):
"""
Format a CFGDATA item into YAML format.
"""
if (not text.startswith('!expand')) and (': ' in text):
tgt = ':' if field == 'option' else '- '
text = text.replace(': ', tgt)
lines = text.splitlines()
if len(lines) == 1 and field != 'help':
return text
else:
return '>\n ' + '\n '.join(
[indent + i.lstrip() for i in lines])
def reformat_pages(self, val):
# Convert XXX:YYY into XXX::YYY format for page definition
parts = val.split(',')
if len(parts) <= 1:
return val
new_val = []
for each in parts:
nodes = each.split(':')
if len(nodes) == 2:
each = '%s::%s' % (nodes[0], nodes[1])
new_val.append(each)
ret = ','.join(new_val)
return ret
def reformat_struct_value(self, utype, val):
# Convert DSC UINT16/32/64 array into new format by
# adding prefix 0:0[WDQ] to provide hint to the array format
if utype in ['UINT16', 'UINT32', 'UINT64']:
if val and val[0] == '{' and val[-1] == '}':
if utype == 'UINT16':
unit = 'W'
elif utype == 'UINT32':
unit = 'D'
else:
unit = 'Q'
val = '{ 0:0%s, %s }' % (unit, val[1:-1])
return val
def process_config(self, cfg):
if 'page' in cfg:
cfg['page'] = self.reformat_pages(cfg['page'])
if 'struct' in cfg:
cfg['value'] = self.reformat_struct_value(
cfg['struct'], cfg['value'])
def parse_dsc_line(self, dsc_line, config_dict, init_dict, include):
"""
Parse a line in DSC and update the config dictionary accordingly.
"""
init_dict.clear()
match = re.match('g(CfgData|\\w+FspPkgTokenSpaceGuid)\\.(.+)',
dsc_line)
if match:
match = self.cfg_reg_exp.match(match.group(2))
if not match:
return False
config_dict['cname'] = self.prefix + match.group(1)
value = match.group(4).strip()
length = match.group(3).strip()
config_dict['length'] = length
config_dict['value'] = value
if match.group(2) == '*':
self.offset += int(length, 0)
else:
org_offset = int(match.group(2), 0)
if org_offset == 0:
self.base_offset = self.offset
offset = org_offset + self.base_offset
if self.offset != offset:
if offset > self.offset:
init_dict['padding'] = offset - self.offset
self.offset = offset + int(length, 0)
return True
match = re.match("^\\s*#\\s+!([<>])\\s+include\\s+(.+)", dsc_line)
if match and len(config_dict) == 0:
# !include should not be inside a config field
# if so, do not convert include into YAML
init_dict = dict(config_dict)
config_dict.clear()
config_dict['cname'] = '$ACTION'
if match.group(1) == '<':
config_dict['include'] = match.group(2)
else:
config_dict['include'] = ''
return True
match = re.match("^\\s*#\\s+(!BSF|!HDR)\\s+(.+)", dsc_line)
if not match:
return False
remaining = match.group(2)
if match.group(1) == '!BSF':
result = self.bsf_reg_exp.findall(remaining)
if not result:
return False
for each in result:
key = each[0].lower()
val = each[1]
if key == 'field':
name = each[1]
if ':' not in name:
raise Exception('Incorrect bit field format !')
parts = name.split(':')
config_dict['length'] = parts[1]
config_dict['cname'] = '@' + parts[0]
return True
elif key in ['pages', 'page', 'find']:
init_dict = dict(config_dict)
config_dict.clear()
config_dict['cname'] = '$ACTION'
if key == 'find':
config_dict['find'] = val
else:
config_dict['page'] = val
return True
elif key == 'subt':
config_dict.clear()
parts = each[1].split(':')
tmp_name = parts[0][:-5]
if tmp_name == 'CFGHDR':
cfg_tag = '_$FFF_'
sval = '!expand { %s_TMPL : [ ' % \
tmp_name + '%s, %s, ' % (parts[1], cfg_tag) + \
', '.join(parts[2:]) + ' ] }'
else:
sval = '!expand { %s_TMPL : [ ' % \
tmp_name + ', '.join(parts[1:]) + ' ] }'
config_dict.clear()
config_dict['cname'] = tmp_name
config_dict['expand'] = sval
return True
else:
if key in ['name', 'help', 'option'] and \
val.startswith('+'):
val = config_dict[key] + '\n' + val[1:]
if val.strip() == '':
val = "''"
config_dict[key] = val
else:
match = self.hdr_reg_exp.match(remaining)
if not match:
return False
key = match.group(1)
remaining = match.group(2)
if key == 'EMBED':
parts = remaining.split(':')
names = parts[0].split(',')
if parts[-1] == 'END':
prefix = '>'
else:
prefix = '<'
skip = False
if parts[1].startswith('TAG_'):
tag_txt = '%s:%s' % (names[0], parts[1])
else:
tag_txt = names[0]
if parts[2] in ['START', 'END']:
if names[0] == 'PCIE_RP_PIN_CTRL[]':
skip = True
else:
tag_txt = '%s:%s' % (names[0], parts[1])
if not skip:
config_dict.clear()
config_dict['cname'] = prefix + tag_txt
return True
if key == 'STRUCT':
text = remaining.strip()
config_dict[key.lower()] = text
return False
def process_template_lines(self, lines):
"""
Process a line in DSC template section.
"""
template_name = ''
bsf_temp_dict = OrderedDict()
temp_file_dict = OrderedDict()
include_file = ['.']
for line in lines:
match = re.match("^\\s*#\\s+!([<>])\\s+include\\s+(.+)", line)
if match:
if match.group(1) == '<':
include_file.append(match.group(2))
else:
include_file.pop()
match = re.match(
"^\\s*#\\s+(!BSF)\\s+DEFT:{(.+?):(START|END)}", line)
if match:
if match.group(3) == 'START' and not template_name:
template_name = match.group(2).strip()
temp_file_dict[template_name] = list(include_file)
bsf_temp_dict[template_name] = []
if match.group(3) == 'END' and \
(template_name == match.group(2).strip()) and \
template_name:
template_name = ''
else:
if template_name:
bsf_temp_dict[template_name].append(line)
return bsf_temp_dict, temp_file_dict
def process_option_lines(self, lines):
"""
Process a line in DSC config section.
"""
cfgs = []
struct_end = False
config_dict = dict()
init_dict = dict()
include = ['']
for line in lines:
ret = self.parse_dsc_line(line, config_dict, init_dict, include)
if ret:
if 'padding' in init_dict:
num = init_dict['padding']
init_dict.clear()
padding_dict = {}
cfgs.append(padding_dict)
padding_dict['cname'] = 'UnusedUpdSpace%d' % \
self.unused_idx
padding_dict['length'] = '0x%x' % num
padding_dict['value'] = '{ 0 }'
self.unused_idx += 1
if cfgs and cfgs[-1]['cname'][0] != '@' and \
config_dict['cname'][0] == '@':
# it is a bit field, mark the previous one as virtual
cname = cfgs[-1]['cname']
new_cfg = dict(cfgs[-1])
new_cfg['cname'] = '@$STRUCT'
cfgs[-1].clear()
cfgs[-1]['cname'] = cname
cfgs.append(new_cfg)
if cfgs and cfgs[-1]['cname'] == 'CFGHDR' and \
config_dict['cname'][0] == '<':
# swap CfgHeader and the CFG_DATA order
if ':' in config_dict['cname']:
# replace the real TAG for CFG_DATA
cfgs[-1]['expand'] = cfgs[-1]['expand'].replace(
'_$FFF_', '0x%s' %
config_dict['cname'].split(':')[1][4:])
cfgs.insert(-1, config_dict)
else:
self.process_config(config_dict)
if struct_end:
struct_end = False
cfgs.insert(-1, config_dict)
else:
cfgs.append(config_dict)
if config_dict['cname'][0] == '>':
struct_end = True
config_dict = dict(init_dict)
return cfgs
def variable_fixup(self, each):
"""
Fix up some variable definitions for SBL.
"""
key = each
val = self.gen_cfg_data._MacroDict[each]
return key, val
def template_fixup(self, tmp_name, tmp_list):
"""
Fix up some special config templates for SBL
"""
return
def config_fixup(self, cfg_list):
"""
Fix up some special config items for SBL.
"""
# Insert FSPT_UPD/FSPM_UPD/FSPS_UPD tag so as to create C strcture
idxs = []
for idx, cfg in enumerate(cfg_list):
if cfg['cname'].startswith('<FSP_UPD_HEADER'):
idxs.append(idx)
if len(idxs) != 3:
return
# Handle insert backwards so that the index does not change in the loop
fsp_comp = 'SMT'
idx_comp = 0
for idx in idxs[::-1]:
# Add current FSP?_UPD start tag
cfgfig_dict = {}
cfgfig_dict['cname'] = '<FSP%s_UPD' % fsp_comp[idx_comp]
cfg_list.insert(idx, cfgfig_dict)
if idx_comp < 2:
# Add previous FSP?_UPD end tag
cfgfig_dict = {}
cfgfig_dict['cname'] = '>FSP%s_UPD' % fsp_comp[idx_comp + 1]
cfg_list.insert(idx, cfgfig_dict)
idx_comp += 1
# Add final FSPS_UPD end tag
cfgfig_dict = {}
cfgfig_dict['cname'] = '>FSP%s_UPD' % fsp_comp[0]
cfg_list.append(cfgfig_dict)
return
def get_section_range(self, section_name):
"""
Extract line number range from config file for a given section name.
"""
start = -1
end = -1
for idx, line in enumerate(self.gen_cfg_data._DscLines):
if start < 0 and line.startswith('[%s]' % section_name):
start = idx
elif start >= 0 and line.startswith('['):
end = idx
break
if start == -1:
start = 0
if end == -1:
end = len(self.gen_cfg_data._DscLines)
return start, end
def normalize_file_name(self, file, is_temp=False):
"""
Normalize file name convention so that it is consistent.
"""
if file.endswith('.dsc'):
file = file[:-4] + '.yaml'
dir_name = os.path.dirname(file)
base_name = os.path.basename(file)
if is_temp:
if 'Template_' not in file:
base_name = base_name.replace('Template', 'Template_')
else:
if 'CfgData_' not in file:
base_name = base_name.replace('CfgData', 'CfgData_')
if dir_name:
path = dir_name + '/' + base_name
else:
path = base_name
return path
def output_variable(self):
"""
Output variable block into a line list.
"""
lines = []
for each in self.gen_cfg_data._MacroDict:
key, value = self.variable_fixup(each)
lines.append('%-30s : %s' % (key, value))
return lines
def output_template(self):
"""
Output template block into a line list.
"""
self.offset = 0
self.base_offset = 0
start, end = self.get_section_range('PcdsDynamicVpd.Tmp')
bsf_temp_dict, temp_file_dict = self.process_template_lines(
self.gen_cfg_data._DscLines[start:end])
template_dict = dict()
lines = []
file_lines = {}
last_file = '.'
file_lines[last_file] = []
for tmp_name in temp_file_dict:
temp_file_dict[tmp_name][-1] = self.normalize_file_name(
temp_file_dict[tmp_name][-1], True)
if len(temp_file_dict[tmp_name]) > 1:
temp_file_dict[tmp_name][-2] = self.normalize_file_name(
temp_file_dict[tmp_name][-2], True)
for tmp_name in bsf_temp_dict:
file = temp_file_dict[tmp_name][-1]
if last_file != file and len(temp_file_dict[tmp_name]) > 1:
inc_file = temp_file_dict[tmp_name][-2]
file_lines[inc_file].extend(
['', '- !include %s' % temp_file_dict[tmp_name][-1], ''])
last_file = file
if file not in file_lines:
file_lines[file] = []
lines = file_lines[file]
text = bsf_temp_dict[tmp_name]
tmp_list = self.process_option_lines(text)
self.template_fixup(tmp_name, tmp_list)
template_dict[tmp_name] = tmp_list
lines.append('%s: >' % tmp_name)
lines.extend(self.output_dict(tmp_list, False)['.'])
lines.append('\n')
return file_lines
def output_config(self):
"""
Output config block into a line list.
"""
self.offset = 0
self.base_offset = 0
start, end = self.get_section_range('PcdsDynamicVpd.Upd')
cfgs = self.process_option_lines(
self.gen_cfg_data._DscLines[start:end])
self.config_fixup(cfgs)
file_lines = self.output_dict(cfgs, True)
return file_lines
def output_dict(self, cfgs, is_configs):
"""
Output one config item into a line list.
"""
file_lines = {}
level = 0
file = '.'
for each in cfgs:
if 'length' in each:
if not each['length'].endswith('b') and int(each['length'],
0) == 0:
continue
if 'include' in each:
if each['include']:
each['include'] = self.normalize_file_name(
each['include'])
file_lines[file].extend(
['', '- !include %s' % each['include'], ''])
file = each['include']
else:
file = '.'
continue
if file not in file_lines:
file_lines[file] = []
lines = file_lines[file]
name = each['cname']
prefix = name[0]
if prefix == '<':
level += 1
padding = ' ' * level
if prefix not in '<>@':
padding += ' '
else:
name = name[1:]
if prefix == '@':
padding += ' '
if ':' in name:
parts = name.split(':')
name = parts[0]
padding = padding[2:] if is_configs else padding
if prefix != '>':
if 'expand' in each:
lines.append('%s- %s' % (padding, each['expand']))
else:
lines.append('%s- %-12s :' % (padding, name))
for field in each:
if field in ['cname', 'expand', 'include']:
continue
value_str = self.format_value(
field, each[field], padding + ' ' * 16)
full_line = ' %s %-12s : %s' % (padding, field, value_str)
lines.extend(full_line.splitlines())
if prefix == '>':
level -= 1
if level == 0:
lines.append('')
return file_lines
def bsf_to_dsc(bsf_file, dsc_file):
fsp_dsc = CFspBsf2Dsc(bsf_file)
dsc_lines = fsp_dsc.get_dsc_lines()
fd = open(dsc_file, 'w')
fd.write('\n'.join(dsc_lines))
fd.close()
return
def dsc_to_yaml(dsc_file, yaml_file):
dsc2yaml = CFspDsc2Yaml()
dsc2yaml.load_config_data_from_dsc(dsc_file)
cfgs = {}
for cfg in ['Template', 'Option']:
if cfg == 'Template':
file_lines = dsc2yaml.output_template()
else:
file_lines = dsc2yaml.output_config()
for file in file_lines:
lines = file_lines[file]
if file == '.':
cfgs[cfg] = lines
else:
if ('/' in file or '\\' in file):
continue
file = os.path.basename(file)
out_dir = os.path.dirname(file)
fo = open(os.path.join(out_dir, file), 'w')
fo.write(__copyright_tmp__ % (
cfg, date.today().year) + '\n\n')
for line in lines:
fo.write(line + '\n')
fo.close()
variables = dsc2yaml.output_variable()
fo = open(yaml_file, 'w')
fo.write(__copyright_tmp__ % ('Default', date.today().year))
if len(variables) > 0:
fo.write('\n\nvariable:\n')
for line in variables:
fo.write(' ' + line + '\n')
fo.write('\n\ntemplate:\n')
for line in cfgs['Template']:
fo.write(' ' + line + '\n')
fo.write('\n\nconfigs:\n')
for line in cfgs['Option']:
fo.write(' ' + line + '\n')
fo.close()
def get_fsp_name_from_path(bsf_file):
name = ''
parts = bsf_file.split(os.sep)
for part in parts:
if part.endswith('FspBinPkg'):
name = part[:-9]
break
if not name:
raise Exception('Could not get FSP name from file path!')
return name
def usage():
print('\n'.join([
"FspDscBsf2Yaml Version 0.10",
"Usage:",
" FspDscBsf2Yaml BsfFile|DscFile YamlFile"
]))
def main():
#
# Parse the options and args
#
argc = len(sys.argv)
if argc < 3:
usage()
return 1
bsf_file = sys.argv[1]
yaml_file = sys.argv[2]
if os.path.isdir(yaml_file):
yaml_file = os.path.join(
yaml_file, get_fsp_name_from_path(bsf_file) + '.yaml')
if bsf_file.endswith('.dsc'):
dsc_file = bsf_file
bsf_file = ''
else:
dsc_file = os.path.splitext(yaml_file)[0] + '.dsc'
bsf_to_dsc(bsf_file, dsc_file)
dsc_to_yaml(dsc_file, yaml_file)
print("'%s' was created successfully!" % yaml_file)
return 0
if __name__ == '__main__':
sys.exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/FspDscBsf2Yaml.py
|
## @ PatchFv.py
#
# Copyright (c) 2014 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
#
# Read data from file
#
# param [in] binfile Binary file
# param [in] offset Offset
# param [in] len Length
#
# retval value Value
#
def readDataFromFile (binfile, offset, len=1):
fd = open(binfile, "r+b")
fsize = os.path.getsize(binfile)
offval = offset & 0xFFFFFFFF
if (offval & 0x80000000):
offval = fsize - (0xFFFFFFFF - offval + 1)
fd.seek(offval)
if sys.version_info[0] < 3:
bytearray = [ord(b) for b in fd.read(len)]
else:
bytearray = [b for b in fd.read(len)]
value = 0
idx = len - 1
while idx >= 0:
value = value << 8 | bytearray[idx]
idx = idx - 1
fd.close()
return value
#
# Check FSP header is valid or not
#
# param [in] binfile Binary file
#
# retval boolean True: valid; False: invalid
#
def IsFspHeaderValid (binfile):
fd = open (binfile, "rb")
bindat = fd.read(0x200) # only read first 0x200 bytes
fd.close()
HeaderList = [b'FSPH' , b'FSPP' , b'FSPE'] # Check 'FSPH', 'FSPP', and 'FSPE' in the FSP header
OffsetList = []
for each in HeaderList:
if each in bindat:
idx = bindat.index(each)
else:
idx = 0
OffsetList.append(idx)
if not OffsetList[0] or not OffsetList[1]: # If 'FSPH' or 'FSPP' is missing, it will return false
return False
if sys.version_info[0] < 3:
Revision = ord(bindat[OffsetList[0] + 0x0B])
else:
Revision = bindat[OffsetList[0] + 0x0B]
#
# if revision is bigger than 1, it means it is FSP v1.1 or greater revision, which must contain 'FSPE'.
#
if Revision > 1 and not OffsetList[2]:
return False # If FSP v1.1 or greater without 'FSPE', then return false
return True
#
# Patch data in file
#
# param [in] binfile Binary file
# param [in] offset Offset
# param [in] value Patch value
# param [in] len Length
#
# retval len Length
#
def patchDataInFile (binfile, offset, value, len=1):
fd = open(binfile, "r+b")
fsize = os.path.getsize(binfile)
offval = offset & 0xFFFFFFFF
if (offval & 0x80000000):
offval = fsize - (0xFFFFFFFF - offval + 1)
bytearray = []
idx = 0
while idx < len:
bytearray.append(value & 0xFF)
value = value >> 8
idx = idx + 1
fd.seek(offval)
if sys.version_info[0] < 3:
fd.write("".join(chr(b) for b in bytearray))
else:
fd.write(bytes(bytearray))
fd.close()
return len
class Symbols:
def __init__(self):
self.dictSymbolAddress = {}
self.dictGuidNameXref = {}
self.dictFfsOffset = {}
self.dictVariable = {}
self.dictModBase = {}
self.fdFile = None
self.string = ""
self.fdBase = 0xFFFFFFFF
self.fdSize = 0
self.index = 0
self.fvList = []
self.parenthesisOpenSet = '([{<'
self.parenthesisCloseSet = ')]}>'
#
# Get FD file
#
# retval self.fdFile Retrieve FD file
#
def getFdFile (self):
return self.fdFile
#
# Get FD size
#
# retval self.fdSize Retrieve the size of FD file
#
def getFdSize (self):
return self.fdSize
def parseFvInfFile (self, infFile):
fvInfo = {}
fvFile = infFile[0:-4] + ".Fv"
fvInfo['Name'] = os.path.splitext(os.path.basename(infFile))[0]
fvInfo['Offset'] = self.getFvOffsetInFd(fvFile)
fvInfo['Size'] = readDataFromFile (fvFile, 0x20, 4)
fdIn = open(infFile, "r")
rptLines = fdIn.readlines()
fdIn.close()
fvInfo['Base'] = 0
for rptLine in rptLines:
match = re.match("^EFI_BASE_ADDRESS\s*=\s*(0x[a-fA-F0-9]+)", rptLine)
if match:
fvInfo['Base'] = int(match.group(1), 16)
break
self.fvList.append(dict(fvInfo))
return 0
#
# Create dictionaries
#
# param [in] fvDir FV's directory
# param [in] fvNames All FV's names
#
# retval 0 Created dictionaries successfully
#
def createDicts (self, fvDir, fvNames):
#
# If the fvDir is not a directory, then raise an exception
#
if not os.path.isdir(fvDir):
raise Exception ("'%s' is not a valid directory!" % fvDir)
#
# If the Guid.xref is not existing in fvDir, then raise an exception
#
xrefFile = os.path.join(fvDir, "Guid.xref")
if not os.path.exists(xrefFile):
raise Exception("Cannot open GUID Xref file '%s'!" % xrefFile)
#
# Add GUID reference to dictionary
#
self.dictGuidNameXref = {}
self.parseGuidXrefFile(xrefFile)
#
# Split up each FV from fvNames and get the fdBase
#
fvList = fvNames.split(":")
fdBase = fvList.pop()
if len(fvList) == 0:
fvList.append(fdBase)
#
# If the FD file is not existing, then raise an exception
#
fdFile = os.path.join(fvDir, fdBase.strip() + ".fd")
if not os.path.exists(fdFile):
raise Exception("Cannot open FD file '%s'!" % fdFile)
#
# Get the size of the FD file
#
self.fdFile = fdFile
self.fdSize = os.path.getsize(fdFile)
#
# If the INF file, which is the first element of fvList, is not existing, then raise an exception
#
infFile = os.path.join(fvDir, fvList[0].strip()) + ".inf"
if not os.path.exists(infFile):
raise Exception("Cannot open INF file '%s'!" % infFile)
#
# Parse INF file in order to get fdBase and then assign those values to dictVariable
#
self.parseInfFile(infFile)
self.dictVariable = {}
self.dictVariable["FDSIZE"] = self.fdSize
self.dictVariable["FDBASE"] = self.fdBase
#
# Collect information from FV MAP file and FV TXT file then
# put them into dictionaries
#
self.fvList = []
self.dictSymbolAddress = {}
self.dictFfsOffset = {}
for file in fvList:
#
# If the .Fv.map file is not existing, then raise an exception.
# Otherwise, parse FV MAP file
#
fvFile = os.path.join(fvDir, file.strip()) + ".Fv"
mapFile = fvFile + ".map"
if not os.path.exists(mapFile):
raise Exception("Cannot open MAP file '%s'!" % mapFile)
infFile = fvFile[0:-3] + ".inf"
self.parseFvInfFile(infFile)
self.parseFvMapFile(mapFile)
#
# If the .Fv.txt file is not existing, then raise an exception.
# Otherwise, parse FV TXT file
#
fvTxtFile = fvFile + ".txt"
if not os.path.exists(fvTxtFile):
raise Exception("Cannot open FV TXT file '%s'!" % fvTxtFile)
self.parseFvTxtFile(fvTxtFile)
for fv in self.fvList:
self.dictVariable['_BASE_%s_' % fv['Name']] = fv['Base']
#
# Search all MAP files in FFS directory if it exists then parse MOD MAP file
#
ffsDir = os.path.join(fvDir, "Ffs")
if (os.path.isdir(ffsDir)):
for item in os.listdir(ffsDir):
if len(item) <= 0x24:
continue
mapFile =os.path.join(ffsDir, item, "%s.map" % item[0:0x24])
if not os.path.exists(mapFile):
continue
self.parseModMapFile(item[0x24:], mapFile)
return 0
#
# Get FV offset in FD file
#
# param [in] fvFile FV file
#
# retval offset Got FV offset successfully
#
def getFvOffsetInFd(self, fvFile):
#
# Check if the first 0x70 bytes of fvFile can be found in fdFile
#
fvHandle = open(fvFile, "r+b")
fdHandle = open(self.fdFile, "r+b")
offset = fdHandle.read().find(fvHandle.read(0x70))
fvHandle.close()
fdHandle.close()
if offset == -1:
raise Exception("Could not locate FV file %s in FD!" % fvFile)
return offset
#
# Parse INF file
#
# param [in] infFile INF file
#
# retval 0 Parsed INF file successfully
#
def parseInfFile(self, infFile):
#
# Get FV offset and search EFI_BASE_ADDRESS in the FD file
# then assign the value of EFI_BASE_ADDRESS to fdBase
#
fvOffset = self.getFvOffsetInFd(infFile[0:-4] + ".Fv")
fdIn = open(infFile, "r")
rptLine = fdIn.readline()
self.fdBase = 0xFFFFFFFF
while (rptLine != "" ):
#EFI_BASE_ADDRESS = 0xFFFDF400
match = re.match("^EFI_BASE_ADDRESS\s*=\s*(0x[a-fA-F0-9]+)", rptLine)
if match is not None:
self.fdBase = int(match.group(1), 16) - fvOffset
break
rptLine = fdIn.readline()
fdIn.close()
if self.fdBase == 0xFFFFFFFF:
raise Exception("Could not find EFI_BASE_ADDRESS in INF file!" % infFile)
return 0
#
# Parse FV TXT file
#
# param [in] fvTxtFile .Fv.txt file
#
# retval 0 Parsed FV TXT file successfully
#
def parseFvTxtFile(self, fvTxtFile):
fvName = os.path.basename(fvTxtFile)[0:-7].upper()
#
# Get information from .Fv.txt in order to create a dictionary
# For example,
# self.dictFfsOffset[912740BE-2284-4734-B971-84B027353F0C] = 0x000D4078
#
fvOffset = self.getFvOffsetInFd(fvTxtFile[0:-4])
fdIn = open(fvTxtFile, "r")
rptLine = fdIn.readline()
while (rptLine != "" ):
match = re.match("(0x[a-fA-F0-9]+)\s([0-9a-fA-F\-]+)", rptLine)
if match is not None:
if match.group(2) in self.dictFfsOffset:
self.dictFfsOffset[fvName + ':' + match.group(2)] = "0x%08X" % (int(match.group(1), 16) + fvOffset)
else:
self.dictFfsOffset[match.group(2)] = "0x%08X" % (int(match.group(1), 16) + fvOffset)
rptLine = fdIn.readline()
fdIn.close()
return 0
#
# Parse FV MAP file
#
# param [in] mapFile .Fv.map file
#
# retval 0 Parsed FV MAP file successfully
#
def parseFvMapFile(self, mapFile):
#
# Get information from .Fv.map in order to create dictionaries
# For example,
# self.dictModBase[FspSecCore:BASE] = 4294592776 (0xfffa4908)
# self.dictModBase[FspSecCore:ENTRY] = 4294606552 (0xfffa7ed8)
# self.dictModBase[FspSecCore:TEXT] = 4294593080 (0xfffa4a38)
# self.dictModBase[FspSecCore:DATA] = 4294612280 (0xfffa9538)
# self.dictSymbolAddress[FspSecCore:_SecStartup] = 0x00fffa4a38
#
fdIn = open(mapFile, "r")
rptLine = fdIn.readline()
modName = ""
foundModHdr = False
while (rptLine != "" ):
if rptLine[0] != ' ':
#DxeIpl (Fixed Flash Address, BaseAddress=0x00fffb4310, EntryPoint=0x00fffb4958,Type=PE)
match = re.match("([_a-zA-Z0-9\-]+)\s\(.+BaseAddress=(0x[0-9a-fA-F]+),\s+EntryPoint=(0x[0-9a-fA-F]+),\s*Type=\w+\)", rptLine)
if match is None:
#DxeIpl (Fixed Flash Address, BaseAddress=0x00fffb4310, EntryPoint=0x00fffb4958)
match = re.match("([_a-zA-Z0-9\-]+)\s\(.+BaseAddress=(0x[0-9a-fA-F]+),\s+EntryPoint=(0x[0-9a-fA-F]+)\)", rptLine)
if match is not None:
foundModHdr = True
modName = match.group(1)
if len(modName) == 36:
modName = self.dictGuidNameXref[modName.upper()]
self.dictModBase['%s:BASE' % modName] = int (match.group(2), 16)
self.dictModBase['%s:ENTRY' % modName] = int (match.group(3), 16)
#(GUID=86D70125-BAA3-4296-A62F-602BEBBB9081 .textbaseaddress=0x00fffb4398 .databaseaddress=0x00fffb4178)
match = re.match("\(GUID=([A-Z0-9\-]+)\s+\.textbaseaddress=(0x[0-9a-fA-F]+)\s+\.databaseaddress=(0x[0-9a-fA-F]+)\)", rptLine)
if match is not None:
if foundModHdr:
foundModHdr = False
else:
modName = match.group(1)
if len(modName) == 36:
modName = self.dictGuidNameXref[modName.upper()]
self.dictModBase['%s:TEXT' % modName] = int (match.group(2), 16)
self.dictModBase['%s:DATA' % modName] = int (match.group(3), 16)
else:
# 0x00fff8016c __ModuleEntryPoint
foundModHdr = False
match = re.match("^\s+(0x[a-z0-9]+)\s+([_a-zA-Z0-9]+)", rptLine)
if match is not None:
self.dictSymbolAddress["%s:%s"%(modName, match.group(2))] = match.group(1)
rptLine = fdIn.readline()
fdIn.close()
return 0
#
# Parse MOD MAP file
#
# param [in] moduleName Module name
# param [in] mapFile .Fv.map file
#
# retval 0 Parsed MOD MAP file successfully
# retval 1 There is no moduleEntryPoint in modSymbols
# retval 2 There is no offset for moduleEntryPoint in modSymbols
#
def parseModMapFile(self, moduleName, mapFile):
#
# Get information from mapFile by moduleName in order to create a dictionary
# For example,
# self.dictSymbolAddress[FspSecCore:___guard_fids_count] = 0x00fffa4778
#
modSymbols = {}
fdIn = open(mapFile, "r")
reportLines = fdIn.readlines()
fdIn.close()
moduleEntryPoint = "__ModuleEntryPoint"
reportLine = reportLines[0]
if reportLine.strip().find("Archive member included") != -1:
#GCC
# 0x0000000000001d55 IoRead8
patchMapFileMatchString = "\s+(0x[0-9a-fA-F]{16})\s+([^\s][^0x][_a-zA-Z0-9\-]+)\s"
matchKeyGroupIndex = 2
matchSymbolGroupIndex = 1
prefix = '_'
else:
#MSFT
#0003:00000190 _gComBase 00007a50 SerialPo
patchMapFileMatchString = "^\s[0-9a-fA-F]{4}:[0-9a-fA-F]{8}\s+(\w+)\s+([0-9a-fA-F]{8,16}\s+)"
matchKeyGroupIndex = 1
matchSymbolGroupIndex = 2
prefix = ''
for reportLine in reportLines:
match = re.match(patchMapFileMatchString, reportLine)
if match is not None:
modSymbols[prefix + match.group(matchKeyGroupIndex)] = match.group(matchSymbolGroupIndex)
# Handle extra module patchable PCD variable in Linux map since it might have different format
# .data._gPcd_BinaryPatch_PcdVpdBaseAddress
# 0x0000000000003714 0x4 /tmp/ccmytayk.ltrans1.ltrans.o
handleNext = False
if matchSymbolGroupIndex == 1:
for reportLine in reportLines:
if handleNext:
handleNext = False
pcdName = match.group(1)
match = re.match("\s+(0x[0-9a-fA-F]{16})\s+", reportLine)
if match is not None:
modSymbols[prefix + pcdName] = match.group(1)
else:
match = re.match("^\s\.data\.(_gPcd_BinaryPatch[_a-zA-Z0-9\-]+)", reportLine)
if match is not None:
handleNext = True
continue
if not moduleEntryPoint in modSymbols:
if matchSymbolGroupIndex == 2:
if not '_ModuleEntryPoint' in modSymbols:
return 1
else:
moduleEntryPoint = "_ModuleEntryPoint"
else:
return 1
modEntry = '%s:%s' % (moduleName,moduleEntryPoint)
if not modEntry in self.dictSymbolAddress:
modKey = '%s:ENTRY' % moduleName
if modKey in self.dictModBase:
baseOffset = self.dictModBase['%s:ENTRY' % moduleName] - int(modSymbols[moduleEntryPoint], 16)
else:
return 2
else:
baseOffset = int(self.dictSymbolAddress[modEntry], 16) - int(modSymbols[moduleEntryPoint], 16)
for symbol in modSymbols:
fullSym = "%s:%s" % (moduleName, symbol)
if not fullSym in self.dictSymbolAddress:
self.dictSymbolAddress[fullSym] = "0x00%08x" % (baseOffset+ int(modSymbols[symbol], 16))
return 0
#
# Parse Guid.xref file
#
# param [in] xrefFile the full directory of Guid.xref file
#
# retval 0 Parsed Guid.xref file successfully
#
def parseGuidXrefFile(self, xrefFile):
#
# Get information from Guid.xref in order to create a GuidNameXref dictionary
# The dictGuidNameXref, for example, will be like
# dictGuidNameXref [1BA0062E-C779-4582-8566-336AE8F78F09] = FspSecCore
#
fdIn = open(xrefFile, "r")
rptLine = fdIn.readline()
while (rptLine != "" ):
match = re.match("([0-9a-fA-F\-]+)\s([_a-zA-Z0-9]+)", rptLine)
if match is not None:
self.dictGuidNameXref[match.group(1).upper()] = match.group(2)
rptLine = fdIn.readline()
fdIn.close()
return 0
#
# Get current character
#
# retval self.string[self.index]
# retval '' Exception
#
def getCurr(self):
try:
return self.string[self.index]
except Exception:
return ''
#
# Check to see if it is last index
#
# retval self.index
#
def isLast(self):
return self.index == len(self.string)
#
# Move to next index
#
def moveNext(self):
self.index += 1
#
# Skip space
#
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
#
# Parse value
#
# retval value
#
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if char.lower() in '_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789:-':
var += char
self.moveNext()
else:
break
if ':' in var:
partList = var.split(':')
lenList = len(partList)
if lenList != 2 and lenList != 3:
raise Exception("Unrecognized expression %s" % var)
modName = partList[lenList-2]
modOff = partList[lenList-1]
if ('-' not in modName) and (modOff[0] in '0123456789'):
# MOD: OFFSET
var = self.getModGuid(modName) + ":" + modOff
if '-' in var: # GUID:OFFSET
value = self.getGuidOff(var)
else:
value = self.getSymbols(var)
self.synUsed = True
else:
if var[0] in '0123456789':
value = self.getNumber(var)
else:
value = self.getVariable(var)
return int(value)
#
# Parse single operation
#
# retval ~self.parseBrace() or self.parseValue()
#
def parseSingleOp(self):
self.skipSpace()
char = self.getCurr()
if char == '~':
self.moveNext()
return ~self.parseBrace()
else:
return self.parseValue()
#
# Parse symbol of Brace([, {, <)
#
# retval value or self.parseSingleOp()
#
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
parenthesisType = self.parenthesisOpenSet.find(char)
if parenthesisType >= 0:
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != self.parenthesisCloseSet[parenthesisType]:
raise Exception("No closing brace")
self.moveNext()
if parenthesisType == 1: # [ : Get content
value = self.getContent(value)
elif parenthesisType == 2: # { : To address
value = self.toAddress(value)
elif parenthesisType == 3: # < : To offset
value = self.toOffset(value)
return value
else:
return self.parseSingleOp()
#
# Parse symbol of Multiplier(*)
#
# retval value or self.parseSingleOp()
#
def parseMul(self):
values = [self.parseBrace()]
while True:
self.skipSpace()
char = self.getCurr()
if char == '*':
self.moveNext()
values.append(self.parseBrace())
else:
break
value = 1
for each in values:
value *= each
return value
#
# Parse symbol of And(&) and Or(|)
#
# retval value
#
def parseAndOr(self):
value = self.parseMul()
op = None
while True:
self.skipSpace()
char = self.getCurr()
if char == '&':
self.moveNext()
value &= self.parseMul()
elif char == '|':
div_index = self.index
self.moveNext()
value |= self.parseMul()
else:
break
return value
#
# Parse symbol of Add(+) and Minus(-)
#
# retval sum(values)
#
def parseAddMinus(self):
values = [self.parseAndOr()]
while True:
self.skipSpace()
char = self.getCurr()
if char == '+':
self.moveNext()
values.append(self.parseAndOr())
elif char == '-':
self.moveNext()
values.append(-1 * self.parseAndOr())
else:
break
return sum(values)
#
# Parse expression
#
# retval self.parseAddMinus()
#
def parseExpr(self):
return self.parseAddMinus()
#
# Get result
#
# retval value
#
def getResult(self):
value = self.parseExpr()
self.skipSpace()
if not self.isLast():
raise Exception("Unexpected character found '%s'" % self.getCurr())
return value
#
# Get module GUID
#
# retval value
#
def getModGuid(self, var):
guid = (guid for guid,name in self.dictGuidNameXref.items() if name==var)
try:
value = guid.next()
except Exception:
raise Exception("Unknown module name %s !" % var)
return value
#
# Get variable
#
# retval value
#
def getVariable(self, var):
value = self.dictVariable.get(var, None)
if value == None:
raise Exception("Unrecognized variable '%s'" % var)
return value
#
# Get number
#
# retval value
#
def getNumber(self, var):
var = var.strip()
if var.startswith('0x'): # HEX
value = int(var, 16)
else:
value = int(var, 10)
return value
#
# Get content
#
# param [in] value
#
# retval value
#
def getContent(self, value):
return readDataFromFile (self.fdFile, self.toOffset(value), 4)
#
# Change value to address
#
# param [in] value
#
# retval value
#
def toAddress(self, value):
if value < self.fdSize:
value = value + self.fdBase
return value
#
# Change value to offset
#
# param [in] value
#
# retval value
#
def toOffset(self, value):
offset = None
for fvInfo in self.fvList:
if (value >= fvInfo['Base']) and (value < fvInfo['Base'] + fvInfo['Size']):
offset = value - fvInfo['Base'] + fvInfo['Offset']
if not offset:
if (value >= self.fdBase) and (value < self.fdBase + self.fdSize):
offset = value - self.fdBase
else:
offset = value
if offset >= self.fdSize:
raise Exception("Invalid file offset 0x%08x !" % value)
return offset
#
# Get GUID offset
#
# param [in] value
#
# retval value
#
def getGuidOff(self, value):
# GUID:Offset
symbolName = value.split(':')
if len(symbolName) == 3:
fvName = symbolName[0].upper()
keyName = '%s:%s' % (fvName, symbolName[1])
offStr = symbolName[2]
elif len(symbolName) == 2:
keyName = symbolName[0]
offStr = symbolName[1]
if keyName in self.dictFfsOffset:
value = (int(self.dictFfsOffset[keyName], 16) + int(offStr, 16)) & 0xFFFFFFFF
else:
raise Exception("Unknown GUID %s !" % value)
return value
#
# Get symbols
#
# param [in] value
#
# retval ret
#
def getSymbols(self, value):
if value in self.dictSymbolAddress:
# Module:Function
ret = int (self.dictSymbolAddress[value], 16)
else:
raise Exception("Unknown symbol %s !" % value)
return ret
#
# Evaluate symbols
#
# param [in] expression
# param [in] isOffset
#
# retval value & 0xFFFFFFFF
#
def evaluate(self, expression, isOffset):
self.index = 0
self.synUsed = False
self.string = expression
value = self.getResult()
if isOffset:
if self.synUsed:
# Consider it as an address first
value = self.toOffset(value)
if value & 0x80000000:
# Consider it as a negative offset next
offset = (~value & 0xFFFFFFFF) + 1
if offset < self.fdSize:
value = self.fdSize - offset
if value >= self.fdSize:
raise Exception("Invalid offset expression !")
return value & 0xFFFFFFFF
#
# Print out the usage
#
def Usage():
print ("PatchFv Version 0.50")
print ("Usage: \n\tPatchFv FvBuildDir [FvFileBaseNames:]FdFileBaseNameToPatch \"Offset, Value\"")
def main():
#
# Parse the options and args
#
symTables = Symbols()
#
# If the arguments are less than 4, then return an error.
#
if len(sys.argv) < 4:
Usage()
return 1
#
# If it fails to create dictionaries, then return an error.
#
if symTables.createDicts(sys.argv[1], sys.argv[2]) != 0:
print ("ERROR: Failed to create symbol dictionary!!")
return 2
#
# Get FD file and size
#
fdFile = symTables.getFdFile()
fdSize = symTables.getFdSize()
try:
#
# Check to see if FSP header is valid
#
ret = IsFspHeaderValid(fdFile)
if ret == False:
raise Exception ("The FSP header is not valid. Stop patching FD.")
comment = ""
for fvFile in sys.argv[3:]:
#
# Check to see if it has enough arguments
#
items = fvFile.split(",")
if len (items) < 2:
raise Exception("Expect more arguments for '%s'!" % fvFile)
comment = ""
command = ""
params = []
for item in items:
item = item.strip()
if item.startswith("@"):
comment = item[1:]
elif item.startswith("$"):
command = item[1:]
else:
if len(params) == 0:
isOffset = True
else :
isOffset = False
#
# Parse symbols then append it to params
#
params.append (symTables.evaluate(item, isOffset))
#
# Patch a new value into FD file if it is not a command
#
if command == "":
# Patch a DWORD
if len (params) == 2:
offset = params[0]
value = params[1]
oldvalue = readDataFromFile(fdFile, offset, 4)
ret = patchDataInFile (fdFile, offset, value, 4) - 4
else:
raise Exception ("Patch command needs 2 parameters !")
if ret:
raise Exception ("Patch failed for offset 0x%08X" % offset)
else:
print ("Patched offset 0x%08X:[%08X] with value 0x%08X # %s" % (offset, oldvalue, value, comment))
elif command == "COPY":
#
# Copy binary block from source to destination
#
if len (params) == 3:
src = symTables.toOffset(params[0])
dest = symTables.toOffset(params[1])
clen = symTables.toOffset(params[2])
if (dest + clen <= fdSize) and (src + clen <= fdSize):
oldvalue = readDataFromFile(fdFile, src, clen)
ret = patchDataInFile (fdFile, dest, oldvalue, clen) - clen
else:
raise Exception ("Copy command OFFSET or LENGTH parameter is invalid !")
else:
raise Exception ("Copy command needs 3 parameters !")
if ret:
raise Exception ("Copy failed from offset 0x%08X to offset 0x%08X!" % (src, dest))
else :
print ("Copied %d bytes from offset 0x%08X ~ offset 0x%08X # %s" % (clen, src, dest, comment))
else:
raise Exception ("Unknown command %s!" % command)
return 0
except Exception as ex:
print ("ERROR: %s" % ex)
return 1
if __name__ == '__main__':
sys.exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/PatchFv.py
|
## @ GenCfgOpt.py
#
# Copyright (c) 2014 - 2022, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import struct
from datetime import date
from functools import reduce
# Generated file copyright header
__copyright_txt__ = """## @file
#
# THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION.
#
# This file lists all VPD informations for a platform collected by build.exe.
#
# Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
__copyright_bsf__ = """/** @file
Boot Setting File for Platform Configuration.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
This file is automatically generated. Please do NOT modify !!!
**/
"""
__copyright_h__ = """/** @file
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
This file is automatically generated. Please do NOT modify !!!
**/
"""
BuildOptionPcd = []
class CLogicalExpression:
def __init__(self):
self.index = 0
self.string = ''
def errExit(self, err = ''):
print ("ERROR: Express parsing for:")
print (" %s" % self.string)
print (" %s^" % (' ' * self.index))
if err:
print ("INFO : %s" % err)
raise SystemExit
def getNonNumber (self, n1, n2):
if not n1.isdigit():
return n1
if not n2.isdigit():
return n2
return None
def getCurr(self, lens = 1):
try:
if lens == -1:
return self.string[self.index :]
else:
if self.index + lens > len(self.string):
lens = len(self.string) - self.index
return self.string[self.index : self.index + lens]
except Exception:
return ''
def isLast(self):
return self.index == len(self.string)
def moveNext(self, len = 1):
self.index += len
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
def normNumber (self, val):
return True if val else False
def getNumber(self, var):
var = var.strip()
if re.match('^0x[a-fA-F0-9]+$', var):
value = int(var, 16)
elif re.match('^[+-]?\d+$', var):
value = int(var, 10)
else:
value = None
return value
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if re.match('^[\w.]', char):
var += char
self.moveNext()
else:
break
val = self.getNumber(var)
if val is None:
value = var
else:
value = "%d" % val
return value
def parseSingleOp(self):
self.skipSpace()
if re.match('^NOT\W', self.getCurr(-1)):
self.moveNext(3)
op = self.parseBrace()
val = self.getNumber (op)
if val is None:
self.errExit ("'%s' is not a number" % op)
return "%d" % (not self.normNumber(int(op)))
else:
return self.parseValue()
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
if char == '(':
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != ')':
self.errExit ("Expecting closing brace or operator")
self.moveNext()
return value
else:
value = self.parseSingleOp()
return value
def parseCompare(self):
value = self.parseBrace()
while True:
self.skipSpace()
char = self.getCurr()
if char in ['<', '>']:
self.moveNext()
next = self.getCurr()
if next == '=':
op = char + next
self.moveNext()
else:
op = char
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid number for comparision" % test)
elif char in ['=', '!']:
op = self.getCurr(2)
if op in ['==', '!=']:
self.moveNext(2)
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber((eval (value + op + result)))
else:
value = "%d" % self.normNumber(eval ("'" + value + "'" + op + "'" + result + "'"))
else:
break
else:
break
return value
def parseAnd(self):
value = self.parseCompare()
while True:
self.skipSpace()
if re.match('^AND\W', self.getCurr(-1)):
self.moveNext(3)
result = self.parseCompare()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(int(value) & int(result))
else:
self.errExit ("'%s' is not a valid op number for AND" % test)
else:
break
return value
def parseOrXor(self):
value = self.parseAnd()
op = None
while True:
self.skipSpace()
op = None
if re.match('^XOR\W', self.getCurr(-1)):
self.moveNext(3)
op = '^'
elif re.match('^OR\W', self.getCurr(-1)):
self.moveNext(2)
op = '|'
else:
break
if op:
result = self.parseAnd()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid op number for XOR/OR" % test)
return value
def parseExpr(self):
return self.parseOrXor()
def getResult(self):
value = self.parseExpr()
self.skipSpace()
if not self.isLast():
self.errExit ("Unexpected character found '%s'" % self.getCurr())
test = self.getNumber(value)
if test is None:
self.errExit ("Result '%s' is not a number" % value)
return int(value)
def evaluateExpress (self, Expr):
self.index = 0
self.string = Expr
if self.getResult():
Result = True
else:
Result = False
return Result
class CGenCfgOpt:
def __init__(self, Mode = ''):
self.Debug = False
self.Error = ''
self.Mode = Mode
self._GlobalDataDef = """
GlobalDataDef
SKUID = 0, "DEFAULT"
EndGlobalData
"""
self._BuidinOptionTxt = """
List &EN_DIS
Selection 0x1 , "Enabled"
Selection 0x0 , "Disabled"
EndList
"""
self._BsfKeyList = ['FIND','NAME','HELP','TYPE','PAGE', 'PAGES', 'BLOCK', 'OPTION','CONDITION','ORDER', 'MARKER', 'SUBT']
self._HdrKeyList = ['HEADER','STRUCT', 'EMBED', 'COMMENT']
self._BuidinOption = {'$EN_DIS' : 'EN_DIS'}
self._MacroDict = {}
self._VarDict = {}
self._PcdsDict = {}
self._CfgBlkDict = {}
self._CfgPageDict = {}
self._BsfTempDict = {}
self._CfgItemList = []
self._DscLines = []
self._DscFile = ''
self._MapVer = 0
self._DscTime = 0
def ParseMacros (self, MacroDefStr):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._MacroDict = {}
IsExpression = False
for Macro in MacroDefStr:
if Macro.startswith('-D'):
IsExpression = True
if len(Macro) > 2:
Macro = Macro[2:]
else :
continue
if IsExpression:
IsExpression = False
Match = re.match("(\w+)=(.+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
else:
Match = re.match("(\w+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = ''
if len(self._MacroDict) == 0:
Error = 1
else:
Error = 0
if self.Debug:
print ("INFO : Macro dictionary:")
for Each in self._MacroDict:
print (" $(%s) = [ %s ]" % (Each , self._MacroDict[Each]))
return Error
def EvaulateIfdef (self, Macro):
Result = Macro in self._MacroDict
if self.Debug:
print ("INFO : Eval Ifdef [%s] : %s" % (Macro, Result))
return Result
def ExpandMacros (self, Input, Preserve = False):
Line = Input
Match = re.findall("\$\(\w+\)", Input)
if Match:
for Each in Match:
Variable = Each[2:-1]
if Variable in self._MacroDict:
Line = Line.replace(Each, self._MacroDict[Variable])
else:
if self.Debug:
print ("WARN : %s is not defined" % Each)
if not Preserve:
Line = Line.replace(Each, Each[2:-1])
return Line
def ExpandPcds (self, Input):
Line = Input
Match = re.findall("(\w+\.\w+)", Input)
if Match:
for PcdName in Match:
if PcdName in self._PcdsDict:
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
else:
if self.Debug:
print ("WARN : %s is not defined" % PcdName)
return Line
def EvaluateExpress (self, Expr):
ExpExpr = self.ExpandPcds(Expr)
ExpExpr = self.ExpandMacros(ExpExpr)
LogExpr = CLogicalExpression()
Result = LogExpr.evaluateExpress (ExpExpr)
if self.Debug:
print ("INFO : Eval Express [%s] : %s" % (Expr, Result))
return Result
def ValueToByteArray (self, ValueStr, Length):
Match = re.match("\{\s*FILE:(.+)\}", ValueStr)
if Match:
FileList = Match.group(1).split(',')
Result = bytearray()
for File in FileList:
File = File.strip()
BinPath = os.path.join(os.path.dirname(self._DscFile), File)
Result.extend(bytearray(open(BinPath, 'rb').read()))
else:
try:
Result = bytearray(self.ValueToList(ValueStr, Length))
except ValueError as e:
raise Exception ("Bytes in '%s' must be in range 0~255 !" % ValueStr)
if len(Result) < Length:
Result.extend(b'\x00' * (Length - len(Result)))
elif len(Result) > Length:
raise Exception ("Value '%s' is too big to fit into %d bytes !" % (ValueStr, Length))
return Result[:Length]
def ValueToList (self, ValueStr, Length):
if ValueStr[0] == '{':
Result = []
BinList = ValueStr[1:-1].split(',')
InBitField = False
LastInBitField = False
Value = 0
BitLen = 0
for Element in BinList:
InBitField = False
Each = Element.strip()
if len(Each) == 0:
pass
else:
if Each[0] in ['"', "'"]:
Result.extend(list(bytearray(Each[1:-1], 'utf-8')))
elif ':' in Each:
Match = re.match("(.+):(\d+)b", Each)
if Match is None:
raise Exception("Invald value list format '%s' !" % Each)
InBitField = True
CurrentBitLen = int(Match.group(2))
CurrentValue = ((self.EvaluateExpress(Match.group(1)) & (1<<CurrentBitLen) - 1)) << BitLen
else:
Result.append(self.EvaluateExpress(Each.strip()))
if InBitField:
Value += CurrentValue
BitLen += CurrentBitLen
if LastInBitField and ((not InBitField) or (Element == BinList[-1])):
if BitLen % 8 != 0:
raise Exception("Invald bit field length!")
Result.extend(Val2Bytes(Value, BitLen // 8))
Value = 0
BitLen = 0
LastInBitField = InBitField
elif ValueStr.startswith("'") and ValueStr.endswith("'"):
Result = Str2Bytes (ValueStr, Length)
elif ValueStr.startswith('"') and ValueStr.endswith('"'):
Result = Str2Bytes (ValueStr, Length)
else:
Result = Val2Bytes (self.EvaluateExpress(ValueStr), Length)
return Result
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in ['UINT8','UINT16','UINT32','UINT64']:
return
dataarray = []
binlist = ConfigDict['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
dataarray.append(value)
unit = int(Struct[4:]) / 8
if int(ConfigDict['length']) != unit * len(dataarray):
raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname'])
bytearray = []
for each in dataarray:
value = each
for loop in range(int(unit)):
bytearray.append("0x%02X" % (value & 0xFF))
value = value >> 8
newvalue = '{' + ','.join(bytearray) + '}'
ConfigDict['value'] = newvalue
return ""
def ParseDscFile (self, DscFile, FvDir):
Hardcode = False
AutoAlign = False
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._DscFile = DscFile
self._FvDir = FvDir
self._DscLines = []
self._BsfTempDict = {}
# Initial DSC time is parent DSC time.
self._DscTime = os.path.getmtime(DscFile)
CfgDict = {}
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsVpdSect = False
IsTmpSect = False
TemplateName = ''
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
if type(DscFile) is list:
# it is DSC lines already
DscLines = DscFile
self._DscFile = '.'
else:
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
self._DscFile = DscFile
SkipLines = 0
MaxAlign = 32 #Default align to 32, but if there are 64 bit unit, align to 64
SizeAlign = 0 #record the struct max align
Base = 0 #Starting offset of sub-structure.
while len(DscLines):
DscLine = DscLines.pop(0).strip()
if SkipLines == 0:
self._DscLines.append (DscLine)
else:
SkipLines = SkipLines - 1
if len(DscLine) == 0:
continue
Handle = False
Match = re.match("^\[(.+)\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsVpdSect = False
IsUpdSect = False
IsTmpSect = False
SectionName = Match.group(1).lower()
if SectionName == "Defines".lower():
IsDefSect = True
if (SectionName == "PcdsFeatureFlag".lower() or SectionName == "PcdsFixedAtBuild".lower()):
IsPcdSect = True
elif SectionName == "PcdsDynamicVpd.Tmp".lower():
IsTmpSect = True
elif SectionName == "PcdsDynamicVpd.Upd".lower():
ConfigDict = {}
ConfigDict['header'] = 'ON'
ConfigDict['region'] = 'UPD'
ConfigDict['order'] = -1
ConfigDict['page'] = ''
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['marker'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['subreg'] = []
ConfigDict['condition'] = ''
ConfigDict['option'] = ''
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsVpdSect or IsTmpSect:
Match = False if DscLine[0] != '!' else True
if Match:
Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif|include)\s*(.+)?$", DscLine.split("#")[0])
Keyword = Match.group(1) if Match else ''
Remaining = Match.group(2) if Match else ''
Remaining = '' if Remaining is None else Remaining.strip()
if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'] and not Remaining:
raise Exception ("ERROR: Expression is expected after '!if' or !elseif' for line '%s'" % DscLine)
if Keyword == 'else':
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
raise Exception ("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine)
elif Keyword == 'endif':
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
raise Exception ("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine)
elif Keyword == 'ifdef' or Keyword == 'ifndef':
Result = self.EvaulateIfdef (Remaining)
if Keyword == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
elif Keyword == 'if' or Keyword == 'elseif':
Result = self.EvaluateExpress(Remaining)
if Keyword == "if":
ElifStack.append(0)
IfStack.append(Result)
else: #elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
raise Exception ("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine)
else:
if IfStack:
Handle = reduce(lambda x,y: x and y, IfStack)
else:
Handle = True
if Handle:
Match = re.match("!include\s+(.+)", DscLine)
if Match:
IncludeFilePath = Match.group(1)
IncludeFilePath = self.ExpandMacros(IncludeFilePath)
PackagesPath = os.getenv("PACKAGES_PATH")
if PackagesPath:
for PackagePath in PackagesPath.split(os.pathsep):
IncludeFilePathAbs = os.path.join(os.path.normpath(PackagePath), os.path.normpath(IncludeFilePath))
if os.path.exists(IncludeFilePathAbs):
IncludeDsc = open(IncludeFilePathAbs, "r")
break
else:
IncludeDsc = open(IncludeFilePath, "r")
if IncludeDsc == None:
print("ERROR: Cannot open file '%s'" % IncludeFilePath)
raise SystemExit
# Update DscTime when newer DSC time found.
CurrentDscTime = os.path.getmtime(os.path.realpath(IncludeDsc.name))
if CurrentDscTime > self._DscTime:
self._DscTime = CurrentDscTime
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
DscLines = NewDscLines + DscLines
del self._DscLines[-1]
Offset = 0
else:
if DscLine.startswith('!'):
print("ERROR: Unrecognized directive for line '%s'" % DscLine)
raise SystemExit
if not Handle:
del self._DscLines[-1]
continue
if IsDefSect:
#DEFINE UPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E09
#DEFINE FSP_T_UPD_TOOL_GUID = 34686CA3-34F9-4901-B82A-BA630F0714C6
#DEFINE FSP_M_UPD_TOOL_GUID = 39A250DB-E465-4DD1-A2AC-E2BD3C0E2385
#DEFINE FSP_S_UPD_TOOL_GUID = CAE3605B-5B34-4C85-B3D7-27D54273C40F
Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*(.+)", DscLine)
if Match:
self._MacroDict[Match.group(1)] = self.ExpandMacros(Match.group(2))
if self.Debug:
print ("INFO : DEFINE %s = [ %s ]" % (Match.group(1), self.ExpandMacros(Match.group(2))))
elif IsPcdSect:
#gSiPkgTokenSpaceGuid.PcdTxtEnable|FALSE
#gSiPkgTokenSpaceGuid.PcdOverclockEnable|TRUE
Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2)))
i = 0
while i < len(BuildOptionPcd):
Match = re.match("\s*([\w\.]+)\s*\=\s*(\w+)", BuildOptionPcd[i])
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
i += 1
elif IsTmpSect:
# !BSF DEFT:{GPIO_TMPL:START}
Match = re.match("^\s*#\s+(!BSF)\s+DEFT:{(.+?):(START|END)}", DscLine)
if Match:
if Match.group(3) == 'START' and not TemplateName:
TemplateName = Match.group(2).strip()
self._BsfTempDict[TemplateName] = []
if Match.group(3) == 'END' and (TemplateName == Match.group(2).strip()) and TemplateName:
TemplateName = ''
else:
if TemplateName:
Match = re.match("^!include\s*(.+)?$", DscLine)
if Match:
continue
self._BsfTempDict[TemplateName].append(DscLine)
else:
Match = re.match("^\s*#\s+(!BSF|@Bsf|!HDR)\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF' or Match.group(1) == '@Bsf':
Match = re.match("(?:^|.+\s+)PAGES:{(.+?)}", Remaining)
if Match:
# !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"}
PageList = Match.group(1).split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match("(\w+):\"(.+)\"", Page)
if Match != None:
self._CfgPageDict[Match.group(1)] = Match.group(2)
Match = re.match("(?:^|.+\s+)BLOCK:{NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*}", Remaining)
if Match:
self._CfgBlkDict['name'] = Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2)
for Key in self._BsfKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
if Key in ['NAME', 'HELP', 'OPTION'] and Match.group(1).startswith('+'):
ConfigDict[Key.lower()] += Match.group(1)[1:]
else:
ConfigDict[Key.lower()] = Match.group(1)
else:
for Key in self._HdrKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
ConfigDict[Key.lower()] = Match.group(1)
Match = re.match("^\s*#\s+@Prompt\s+(.+)", DscLine)
if Match:
ConfigDict['name'] = Match.group(1)
Match = re.match("^\s*#\s*@ValidList\s*(.+)\s*\|\s*(.+)\s*\|\s*(.+)\s*", DscLine)
if Match:
if Match.group(2).strip() in self._BuidinOption:
ConfigDict['option'] = Match.group(2).strip()
else:
OptionValueList = Match.group(2).split(',')
OptionStringList = Match.group(3).split(',')
Index = 0
for Option in OptionValueList:
Option = Option.strip()
ConfigDict['option'] = ConfigDict['option'] + str(Option) + ':' + OptionStringList[Index].strip()
Index += 1
if Index in range(len(OptionValueList)):
ConfigDict['option'] += ', '
ConfigDict['type'] = "Combo"
Match = re.match("^\s*#\s*@ValidRange\s*(.+)\s*\|\s*(.+)\s*-\s*(.+)\s*", DscLine)
if Match:
if "0x" in Match.group(2) or "0x" in Match.group(3):
ConfigDict['type'] = "EditNum, HEX, (%s,%s)" % (Match.group(2), Match.group(3))
else:
ConfigDict['type'] = "EditNum, DEC, (%s,%s)" % (Match.group(2), Match.group(3))
Match = re.match("^\s*##\s+(.+)", DscLine)
if Match:
ConfigDict['help'] = Match.group(1)
# Check VPD/UPD
if IsUpdSect:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)",DscLine)
else:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+)(?:\s*\|\s*(.+))?", DscLine)
if Match:
ConfigDict['space'] = Match.group(1)
ConfigDict['cname'] = Match.group(2)
if Match.group(3) != '*':
Hardcode = True
Offset = int (Match.group(3), 16)
else:
AutoAlign = True
if Hardcode and AutoAlign:
print("Hardcode and auto-align mixed mode is not supported by GenCfgOpt")
raise SystemExit
ConfigDict['offset'] = Offset
if ConfigDict['order'] == -1:
ConfigDict['order'] = ConfigDict['offset'] << 8
else:
(Major, Minor) = ConfigDict['order'].split('.')
ConfigDict['order'] = (int (Major, 16) << 8 ) + int (Minor, 16)
if IsUpdSect:
Value = Match.group(5).strip()
if Match.group(4).startswith("0x"):
Length = int (Match.group(4), 16)
else :
Length = int (Match.group(4))
Offset += Length
else:
Value = Match.group(4)
if Value is None:
Value = ''
Value = Value.strip()
if '|' in Value:
Match = re.match("^.+\s*\|\s*(.+)", Value)
if Match:
Value = Match.group(1)
Length = -1
ConfigDict['length'] = Length
Match = re.match("\$\((\w+)\)", Value)
if Match:
if Match.group(1) in self._MacroDict:
Value = self._MacroDict[Match.group(1)]
ConfigDict['value'] = Value
if (len(Value) > 0) and (Value[0] == '{'):
Value = self.FormatListValue(ConfigDict)
if ConfigDict['name'] == '':
# Clear BSF specific items
ConfigDict['bsfname'] = ''
ConfigDict['help'] = ''
ConfigDict['type'] = ''
ConfigDict['option'] = ''
if IsUpdSect and AutoAlign:
ItemLength = int(ConfigDict['length'])
ItemOffset = int(ConfigDict['offset'])
ItemStruct = ConfigDict['struct']
Unit = 1
if ItemLength in [1, 2, 4, 8] and not ConfigDict['value'].startswith('{'):
Unit = ItemLength
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = 8
if ItemStruct != '':
UnitDict = {'UINT8':1, 'UINT16':2, 'UINT32':4, 'UINT64':8}
if ItemStruct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
Unit = UnitDict[ItemStruct]
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = max(SizeAlign, Unit)
if (ConfigDict['embed'].find(':START') != -1):
Base = ItemOffset
SubOffset = ItemOffset - Base
SubRemainder = SubOffset % Unit
if SubRemainder:
Diff = Unit - SubRemainder
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
if (ConfigDict['embed'].find(':END') != -1):
Remainder = Offset % (MaxAlign/8) # MaxAlign is either 32 or 64
if Remainder:
Diff = int((MaxAlign/8) - Remainder)
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
MaxAlign = 32 # Reset to default 32 align when struct end
if (ConfigDict['cname'] == 'UpdTerminator'):
# ItemLength is the size of UpdTerminator
# Itemlength might be 16, 32, or 64
# Struct align to 64 if UpdTerminator
# or struct size is 64 bit, else align to 32
Remainder = Offset % max(ItemLength/8, 4, SizeAlign)
Offset = Offset + ItemLength
if Remainder:
Diff = int(max(ItemLength/8, 4, SizeAlign) - Remainder)
ItemOffset = ItemOffset + Diff
ConfigDict['offset'] = ItemOffset
self._CfgItemList.append(ConfigDict.copy())
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['marker'] = ''
ConfigDict['order'] = -1
ConfigDict['subreg'] = []
ConfigDict['option'] = ''
else:
# It could be a virtual item as below
# !BSF FIELD:{SerialDebugPortAddress0:1}
# or
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
Match = re.match("^\s*#\s+(!BSF|@Bsf)\s+FIELD:{(.+):(\d+)([Bb])?}", DscLine)
if Match:
SubCfgDict = ConfigDict.copy()
if (Match.group(4) == None) or (Match.group(4) == 'B'):
UnitBitLen = 8
elif Match.group(4) == 'b':
UnitBitLen = 1
else:
print("ERROR: Invalide BSF FIELD length for line '%s'" % DscLine)
raise SystemExit
SubCfgDict['cname'] = Match.group(2)
SubCfgDict['bitlength'] = int (Match.group(3)) * UnitBitLen
if SubCfgDict['bitlength'] > 0:
LastItem = self._CfgItemList[-1]
if len(LastItem['subreg']) == 0:
SubOffset = 0
else:
SubOffset = LastItem['subreg'][-1]['bitoffset'] + LastItem['subreg'][-1]['bitlength']
SubCfgDict['bitoffset'] = SubOffset
LastItem['subreg'].append (SubCfgDict.copy())
ConfigDict['name'] = ''
return Error
def GetBsfBitFields (self, subitem, bytes):
start = subitem['bitoffset']
end = start + subitem['bitlength']
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
bitsvalue = bitsvalue[::-1]
bitslen = len(bitsvalue)
if start > bitslen or end > bitslen:
raise Exception ("Invalid bits offset [%d,%d] %d for %s" % (start, end, bitslen, subitem['name']))
return '0x%X' % (int(bitsvalue[start:end][::-1], 2))
def UpdateSubRegionDefaultValue (self):
Error = 0
for Item in self._CfgItemList:
if len(Item['subreg']) == 0:
continue
bytearray = []
if Item['value'][0] == '{':
binlist = Item['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
bytearray.append(value)
else:
if Item['value'].startswith('0x'):
value = int(Item['value'], 16)
else:
value = int(Item['value'])
idx = 0
while idx < Item['length']:
bytearray.append(value & 0xFF)
value = value >> 8
idx = idx + 1
for SubItem in Item['subreg']:
valuestr = self.GetBsfBitFields(SubItem, bytearray)
SubItem['value'] = valuestr
return Error
def NoDscFileChange (self, OutPutFile):
NoFileChange = True
if not os.path.exists(OutPutFile):
NoFileChange = False
else:
OutputTime = os.path.getmtime(OutPutFile)
if self._DscTime > OutputTime:
NoFileChange = False
return NoFileChange
def CreateSplitUpdTxt (self, UpdTxtFile):
GuidList = ['FSP_T_UPD_TOOL_GUID','FSP_M_UPD_TOOL_GUID','FSP_S_UPD_TOOL_GUID','FSP_I_UPD_TOOL_GUID']
SignatureList = ['0x545F', '0x4D5F','0x535F','0x495F'] # _T, _M, _S and _I signature for FSPT, FSPM, FSPS, FSPI
for Index in range(len(GuidList)):
UpdTxtFile = ''
FvDir = self._FvDir
if GuidList[Index] not in self._MacroDict:
NoFSPI = False
if GuidList[Index] == 'FSP_I_UPD_TOOL_GUID':
NoFSPI = True
continue
else:
self.Error = "%s definition is missing in DSC file" % (GuidList[Index])
return 1
if UpdTxtFile == '':
UpdTxtFile = os.path.join(FvDir, self._MacroDict[GuidList[Index]] + '.txt')
if (self.NoDscFileChange (UpdTxtFile)):
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD TXT file'
return 256
TxtFd = open(UpdTxtFile, "w")
TxtFd.write("%s\n" % (__copyright_txt__ % date.today().year))
NextOffset = 0
SpaceIdx = 0
StartAddr = 0
EndAddr = 0
Default = 'DEFAULT|'
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
StartAddr = Item['offset']
NextOffset = StartAddr
InRange = True
if Item['cname'] == 'UpdTerminator' and InRange == True:
EndAddr = Item['offset']
InRange = False
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != 'UPD':
continue
Offset = Item['offset']
if StartAddr > Offset or EndAddr < Offset:
continue
if NextOffset < Offset:
# insert one line
TxtFd.write("%s.UnusedUpdSpace%d|%s0x%04X|0x%04X|{0}\n" % (Item['space'], SpaceIdx, Default, NextOffset - StartAddr, Offset - NextOffset))
SpaceIdx = SpaceIdx + 1
NextOffset = Offset + Item['length']
TxtFd.write("%s.%s|%s0x%04X|%s|%s\n" % (Item['space'],Item['cname'],Default,Item['offset'] - StartAddr,Item['length'],Item['value']))
TxtFd.close()
return 0
def CreateVarDict (self):
Error = 0
self._VarDict = {}
if len(self._CfgItemList) > 0:
Item = self._CfgItemList[-1]
self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] + Item['length'])
for Item in self._CfgItemList:
Embed = Item['embed']
Match = re.match("^(\w+):(\w+):(START|END)", Embed)
if Match:
StructName = Match.group(1)
VarName = '_%s_%s_' % (Match.group(3), StructName)
if Match.group(3) == 'END':
self._VarDict[VarName] = Item['offset'] + Item['length']
self._VarDict['_LENGTH_%s_' % StructName] = \
self._VarDict['_END_%s_' % StructName] - self._VarDict['_START_%s_' % StructName]
if Match.group(2).startswith('TAG_'):
if (self.Mode != 'FSP') and (self._VarDict['_LENGTH_%s_' % StructName] % 4):
raise Exception("Size of structure '%s' is %d, not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName]))
self._VarDict['_TAG_%s_' % StructName] = int (Match.group(2)[4:], 16) & 0xFFF
else:
self._VarDict[VarName] = Item['offset']
if Item['marker']:
self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = Item['offset']
return Error
def UpdateBsfBitUnit (self, Item):
BitTotal = 0
BitOffset = 0
StartIdx = 0
Unit = None
UnitDec = {1:'BYTE', 2:'WORD', 4:'DWORD', 8:'QWORD'}
for Idx, SubItem in enumerate(Item['subreg']):
if Unit is None:
Unit = SubItem['bitunit']
BitLength = SubItem['bitlength']
BitTotal += BitLength
BitOffset += BitLength
if BitOffset > 64 or BitOffset > Unit * 8:
break
if BitOffset == Unit * 8:
for SubIdx in range (StartIdx, Idx + 1):
Item['subreg'][SubIdx]['bitunit'] = Unit
BitOffset = 0
StartIdx = Idx + 1
Unit = None
if BitOffset > 0:
raise Exception ("Bit fields cannot fit into %s for '%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname']))
ExpectedTotal = Item['length'] * 8
if Item['length'] * 8 != BitTotal:
raise Exception ("Bit fields total length (%d) does not match length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname']))
def UpdateDefaultValue (self):
Error = 0
for Idx, Item in enumerate(self._CfgItemList):
if len(Item['subreg']) == 0:
Value = Item['value']
if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or Value[0] == '"'):
# {XXX} or 'XXX' strings
self.FormatListValue(self._CfgItemList[Idx])
else:
Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value)
if not Match:
NumValue = self.EvaluateExpress (Value)
Item['value'] = '0x%X' % NumValue
else:
ValArray = self.ValueToByteArray (Item['value'], Item['length'])
for SubItem in Item['subreg']:
SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray)
self.UpdateBsfBitUnit (Item)
return Error
def ProcessMultilines (self, String, MaxCharLength):
Multilines = ''
StringLength = len(String)
CurrentStringStart = 0
StringOffset = 0
BreakLineDict = []
if len(String) <= MaxCharLength:
while (StringOffset < StringLength):
if StringOffset >= 1:
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
StringOffset += 1
if BreakLineDict != []:
for Each in BreakLineDict:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
else:
Multilines = " %s\n" % String
else:
NewLineStart = 0
NewLineCount = 0
FoundSpaceChar = False
while (StringOffset < StringLength):
if StringOffset >= 1:
if NewLineCount >= MaxCharLength - 1:
if String[StringOffset] == ' ' and StringLength - StringOffset > 10:
BreakLineDict.append (NewLineStart + NewLineCount)
NewLineStart = NewLineStart + NewLineCount
NewLineCount = 0
FoundSpaceChar = True
elif StringOffset == StringLength - 1 and FoundSpaceChar == False:
BreakLineDict.append (0)
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
NewLineStart = StringOffset + 1
NewLineCount = 0
StringOffset += 1
NewLineCount += 1
if BreakLineDict != []:
BreakLineDict.sort ()
for Each in BreakLineDict:
if Each > 0:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
return Multilines
def CreateField (self, Item, Name, Length, Offset, Struct, BsfName, Help, Option, BitsLength = None):
PosName = 28
PosComment = 30
NameLine=''
HelpLine=''
OptionLine=''
if Length == 0 and Name == 'Dummy':
return '\n'
IsArray = False
if Length in [1,2,4,8]:
Type = "UINT%d" % (Length * 8)
if Name.startswith("UnusedUpdSpace") and Length != 1:
IsArray = True
Type = "UINT8"
else:
IsArray = True
Type = "UINT8"
if Item and Item['value'].startswith('{'):
Type = "UINT8"
IsArray = True
if Struct != '':
Type = Struct
if Struct in ['UINT8','UINT16','UINT32','UINT64']:
IsArray = True
Unit = int(Type[4:]) / 8
Length = Length / Unit
else:
IsArray = False
if IsArray:
Name = Name + '[%d]' % Length
if len(Type) < PosName:
Space1 = PosName - len(Type)
else:
Space1 = 1
if BsfName != '':
NameLine=" - %s\n" % BsfName
else:
NameLine="\n"
if Help != '':
HelpLine = self.ProcessMultilines (Help, 80)
if Option != '':
OptionLine = self.ProcessMultilines (Option, 80)
if Offset is None:
OffsetStr = '????'
else:
OffsetStr = '0x%04X' % Offset
if BitsLength is None:
BitsLength = ''
else:
BitsLength = ' : %d' % BitsLength
return "\n/** Offset %s%s%s%s**/\n %s%s%s%s;\n" % (OffsetStr, NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name, BitsLength)
def PostProcessBody (self, TextBody):
NewTextBody = []
OldTextBody = []
IncludeLine = False
StructName = ''
VariableName = ''
IsUpdHdrDefined = False
IsUpdHeader = False
for Line in TextBody:
SplitToLines = Line.splitlines()
MatchComment = re.match("^/\*\sCOMMENT:(\w+):([\w|\W|\s]+)\s\*/\s([\s\S]*)", SplitToLines[0])
if MatchComment:
if MatchComment.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if IsUpdHdrDefined != True or IsUpdHeader != True:
CommentLine = " " + MatchComment.group(2) + "\n"
NewTextBody.append("/**" + CommentLine + "**/\n")
Line = Line[(len(SplitToLines[0]) + 1):]
Match = re.match("^/\*\sEMBED_STRUCT:(\w+):(\w+):(START|END)\s\*/\s([\s\S]*)", Line)
if Match:
Line = Match.group(4)
if Match.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if Match and Match.group(3) == 'START':
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('typedef struct {\n')
StructName = Match.group(1)
VariableName = Match.group(2)
MatchOffset = re.search('/\*\*\sOffset\s0x([a-fA-F0-9]+)', Line)
if MatchOffset:
Offset = int(MatchOffset.group(1), 16)
else:
Offset = None
Line
IncludeLine = True
OldTextBody.append (self.CreateField (None, VariableName, 0, Offset, StructName, '', '', ''))
if IncludeLine:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append (Line)
else:
OldTextBody.append (Line)
if Match and Match.group(3) == 'END':
if (StructName != Match.group(1)) or (VariableName != Match.group(2)):
print ("Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(1)))
else:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('} %s;\n\n' % StructName)
IsUpdHdrDefined = True
IncludeLine = False
NewTextBody.extend(OldTextBody)
return NewTextBody
def WriteLinesWithoutTailingSpace (self, HeaderFd, Line):
TxtBody2 = Line.splitlines(True)
for Line2 in TxtBody2:
Line2 = Line2.rstrip()
Line2 += '\n'
HeaderFd.write (Line2)
return 0
def CreateHeaderFile (self, InputHeaderFile):
FvDir = self._FvDir
HeaderFileName = 'FspUpd.h'
HeaderFile = os.path.join(FvDir, HeaderFileName)
# Check if header needs to be recreated
if (self.NoDscFileChange (HeaderFile)):
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD header file'
return 256
TxtBody = []
for Item in self._CfgItemList:
if str(Item['cname']) == 'Signature' and Item['length'] == 8:
Value = int(Item['value'], 16)
Chars = []
while Value != 0x0:
Chars.append(chr(Value & 0xFF))
Value = Value >> 8
SignatureStr = ''.join(Chars)
# Signature will be _T / _M / _S / _I for FSPT / FSPM / FSPS /FSPI accordingly
if '_T' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPT_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_M' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPM_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_S' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPS_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_I' in SignatureStr[6:6+2]:
if NoFSPI == False:
TxtBody.append("#define FSPI_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
TxtBody.append("\n")
for Region in ['UPD']:
UpdOffsetTable = []
UpdSignature = ['0x545F', '0x4D5F', '0x535F', '0x495F'] #['_T', '_M', '_S', '_I'] signature for FSPT, FSPM, FSPS, FSPI
UpdStructure = ['FSPT_UPD', 'FSPM_UPD', 'FSPS_UPD', 'FSPI_UPD']
for Item in self._CfgItemList:
if Item["cname"] == 'Signature' and Item["value"][0:6] in UpdSignature:
Item["offset"] = 0 # re-initialize offset to 0 when new UPD structure starting
UpdOffsetTable.append (Item["offset"])
for UpdIdx in range(len(UpdOffsetTable)):
CommentLine = ""
for Item in self._CfgItemList:
if Item["comment"] != '' and Item["offset"] >= UpdOffsetTable[UpdIdx]:
MatchComment = re.match("^(U|V)PD_DATA_REGION:([\w|\W|\s]+)", Item["comment"])
if MatchComment and MatchComment.group(1) == Region[0]:
CommentLine = " " + MatchComment.group(2) + "\n"
TxtBody.append("/**" + CommentLine + "**/\n")
elif Item["offset"] >= UpdOffsetTable[UpdIdx] and Item["comment"] == '':
Match = re.match("^FSP([\w|\W|\s])_UPD", UpdStructure[UpdIdx])
if Match:
TxtBody.append("/** Fsp " + Match.group(1) + " UPD Configuration\n**/\n")
TxtBody.append("typedef struct {\n")
NextOffset = 0
SpaceIdx = 0
Offset = 0
LastVisible = True
ResvOffset = 0
ResvIdx = 0
LineBuffer = []
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == UpdSignature[UpdIdx] or Region[0] == 'V':
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != Region:
continue
if Item["offset"] < UpdOffsetTable[UpdIdx]:
continue
NextVisible = LastVisible
if LastVisible and (Item['header'] == 'OFF'):
NextVisible = False
ResvOffset = Item['offset']
elif (not LastVisible) and Item['header'] == 'ON':
NextVisible = True
Name = "Reserved" + Region[0] + "pdSpace%d" % ResvIdx
ResvIdx = ResvIdx + 1
TxtBody.append(self.CreateField (Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', ''))
if Offset < Item["offset"]:
if LastVisible:
Name = "Unused" + Region[0] + "pdSpace%d" % SpaceIdx
LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', ''))
SpaceIdx = SpaceIdx + 1
Offset = Item["offset"]
LastVisible = NextVisible
Offset = Offset + Item["length"]
if LastVisible:
for Each in LineBuffer:
TxtBody.append (Each)
LineBuffer = []
Comment = Item["comment"]
Embed = Item["embed"].upper()
if Embed.endswith(':START') or Embed.endswith(':END'):
if not Comment == '' and Embed.endswith(':START'):
Marker = '/* COMMENT:%s */ \n' % Item["comment"]
Marker = Marker + '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
Marker = '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
if Embed == '':
Marker = ''
else:
self.Error = "Invalid embedded structure format '%s'!\n" % Item["embed"]
return 4
Line = Marker + self.CreateField (Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option'])
TxtBody.append(Line)
if Item['cname'] == 'UpdTerminator':
break
TxtBody.append("} " + UpdStructure[UpdIdx] + ";\n\n")
# Handle the embedded data structure
TxtBody = self.PostProcessBody (TxtBody)
HeaderTFileName = 'FsptUpd.h'
HeaderMFileName = 'FspmUpd.h'
HeaderSFileName = 'FspsUpd.h'
HeaderIFileName = 'FspiUpd.h'
UpdRegionCheck = ['FSPT', 'FSPM', 'FSPS', 'FSPI'] # FSPX_UPD_REGION
UpdConfigCheck = ['FSP_T', 'FSP_M', 'FSP_S', 'FSP_I'] # FSP_X_CONFIG, FSP_X_TEST_CONFIG, FSP_X_RESTRICTED_CONFIG
UpdSignatureCheck = ['FSPT_UPD_SIGNATURE', 'FSPM_UPD_SIGNATURE', 'FSPS_UPD_SIGNATURE', 'FSPI_UPD_SIGNATURE']
ExcludedSpecificUpd = ['FSPT_ARCH_UPD', 'FSPM_ARCH_UPD', 'FSPS_ARCH_UPD', 'FSPI_ARCH_UPD']
ExcludedSpecificUpd1 = ['FSPT_ARCH2_UPD', 'FSPM_ARCH2_UPD', 'FSPS_ARCH2_UPD']
IncLines = []
if InputHeaderFile != '':
if not os.path.exists(InputHeaderFile):
self.Error = "Input header file '%s' does not exist" % InputHeaderFile
return 6
InFd = open(InputHeaderFile, "r")
IncLines = InFd.readlines()
InFd.close()
for item in range(len(UpdRegionCheck)):
if UpdRegionCheck[item] == 'FSPT':
HeaderFd = open(os.path.join(FvDir, HeaderTFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderTFileName))
elif UpdRegionCheck[item] == 'FSPM':
HeaderFd = open(os.path.join(FvDir, HeaderMFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderMFileName))
elif UpdRegionCheck[item] == 'FSPS':
HeaderFd = open(os.path.join(FvDir, HeaderSFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderSFileName))
elif UpdRegionCheck[item] == 'FSPI':
HeaderFd = open(os.path.join(FvDir, HeaderIFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderIFileName))
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <%s>\n\n" % HeaderFileName)
HeaderFd.write("#pragma pack(1)\n\n")
Export = False
for Line in IncLines:
Match = re.search ("!EXPORT\s+([A-Z]+)\s+EXTERNAL_BOOTLOADER_STRUCT_(BEGIN|END)\s+", Line)
if Match:
if Match.group(2) == "BEGIN" and Match.group(1) == UpdRegionCheck[item]:
Export = True
continue
else:
Export = False
continue
if Export:
HeaderFd.write(Line)
HeaderFd.write("\n")
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("}\s([_A-Z0-9]+);", Line)
if Match and (UpdRegionCheck[item] in Match.group(1) or UpdConfigCheck[item] in Match.group(1)) and (ExcludedSpecificUpd[item] not in Match.group(1)) and (ExcludedSpecificUpd1[item] not in Match.group(1)):
EndIndex = Index
StructStart.append(StartIndex)
StructEnd.append(EndIndex)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
HeaderFd = open(HeaderFile, "w")
FileBase = os.path.basename(HeaderFile)
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <FspEas.h>\n\n")
HeaderFd.write("#pragma pack(1)\n\n")
for item in range(len(UpdRegionCheck)):
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("#define\s([_A-Z0-9]+)\s*", Line)
if Match and (UpdSignatureCheck[item] in Match.group(1) or UpdSignatureCheck[item] in Match.group(1)):
StructStart.append(Index - 1)
StructEnd.append(Index)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
return 0
def WriteBsfStruct (self, BsfFd, Item):
LogExpr = CLogicalExpression()
if Item['type'] == "None":
Space = "gPlatformFspPkgTokenSpaceGuid"
else:
Space = Item['space']
Line = " $%s_%s" % (Space, Item['cname'])
Match = re.match("\s*\{([x0-9a-fA-F,\s]+)\}\s*", Item['value'])
if Match:
DefaultValue = Match.group(1).strip()
else:
DefaultValue = Item['value'].strip()
if 'bitlength' in Item:
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue))
else:
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue))
TmpList = []
if Item['type'] == "Combo":
if not Item['option'] in self._BuidinOption:
OptList = Item['option'].split(',')
for Option in OptList:
Option = Option.strip()
(OpVal, OpStr) = Option.split(':')
test = LogExpr.getNumber (OpVal)
if test is None:
raise Exception("Selection Index '%s' is not a number" % OpVal)
TmpList.append((OpVal, OpStr))
return TmpList
def WriteBsfOption (self, BsfFd, Item):
PcdName = Item['space'] + '_' + Item['cname']
WriteHelp = 0
if Item['type'] == "Combo":
if Item['option'] in self._BuidinOption:
Options = self._BuidinOption[Item['option']]
else:
Options = PcdName
BsfFd.write(' %s $%s, "%s", &%s,\n' % (Item['type'], PcdName, Item['name'], Options))
WriteHelp = 1
elif Item['type'].startswith("EditNum"):
Match = re.match("EditNum\s*,\s*(HEX|DEC)\s*,\s*\((\d+|0x[0-9A-Fa-f]+)\s*,\s*(\d+|0x[0-9A-Fa-f]+)\)", Item['type'])
if Match:
BsfFd.write(' EditNum $%s, "%s", %s,\n' % (PcdName, Item['name'], Match.group(1)))
WriteHelp = 2
elif Item['type'].startswith("EditText"):
BsfFd.write(' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name']))
WriteHelp = 1
elif Item['type'] == "Table":
Columns = Item['option'].split(',')
if len(Columns) != 0:
BsfFd.write(' %s $%s "%s",' % (Item['type'], PcdName, Item['name']))
for Col in Columns:
Fmt = Col.split(':')
if len(Fmt) != 3:
raise Exception("Column format '%s' is invalid !" % Fmt)
try:
Dtype = int(Fmt[1].strip())
except:
raise Exception("Column size '%s' is invalid !" % Fmt[1])
BsfFd.write('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip()))
BsfFd.write(',\n')
WriteHelp = 1
if WriteHelp > 0:
HelpLines = Item['help'].split('\\n\\r')
FirstLine = True
for HelpLine in HelpLines:
if FirstLine:
FirstLine = False
BsfFd.write(' Help "%s"\n' % (HelpLine))
else:
BsfFd.write(' "%s"\n' % (HelpLine))
if WriteHelp == 2:
BsfFd.write(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3)))
def GenerateBsfFile (self, BsfFile):
if BsfFile == '':
self.Error = "BSF output file '%s' is invalid" % BsfFile
return 1
if (self.NoDscFileChange (BsfFile)):
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD BSF file'
return 256
Error = 0
OptionDict = {}
BsfFd = open(BsfFile, "w")
BsfFd.write("%s\n" % (__copyright_bsf__ % date.today().year))
BsfFd.write("%s\n" % self._GlobalDataDef)
BsfFd.write("StructDef\n")
NextOffset = -1
for Item in self._CfgItemList:
if Item['find'] != '':
BsfFd.write('\n Find "%s"\n' % Item['find'])
NextOffset = Item['offset'] + Item['length']
if Item['name'] != '':
if NextOffset != Item['offset']:
BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset))
if len(Item['subreg']) > 0:
NextOffset = Item['offset']
BitsOffset = NextOffset * 8
for SubItem in Item['subreg']:
BitsOffset += SubItem['bitlength']
if SubItem['name'] == '':
if 'bitlength' in SubItem:
BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength']))
else:
BsfFd.write(" Skip %d bytes\n" % (SubItem['length']))
else:
Options = self.WriteBsfStruct(BsfFd, SubItem)
if len(Options) > 0:
OptionDict[SubItem['space']+'_'+SubItem['cname']] = Options
NextBitsOffset = (Item['offset'] + Item['length']) * 8
if NextBitsOffset > BitsOffset:
BitsGap = NextBitsOffset - BitsOffset
BitsRemain = BitsGap % 8
if BitsRemain:
BsfFd.write(" Skip %d bits\n" % BitsRemain)
BitsGap -= BitsRemain
BytesRemain = int(BitsGap / 8)
if BytesRemain:
BsfFd.write(" Skip %d bytes\n" % BytesRemain)
NextOffset = Item['offset'] + Item['length']
else:
NextOffset = Item['offset'] + Item['length']
Options = self.WriteBsfStruct(BsfFd, Item)
if len(Options) > 0:
OptionDict[Item['space']+'_'+Item['cname']] = Options
BsfFd.write("\nEndStruct\n\n")
BsfFd.write("%s" % self._BuidinOptionTxt)
for Each in OptionDict:
BsfFd.write("List &%s\n" % Each)
for Item in OptionDict[Each]:
BsfFd.write(' Selection %s , "%s"\n' % (Item[0], Item[1]))
BsfFd.write("EndList\n\n")
BsfFd.write("BeginInfoBlock\n")
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
BsfFd.write("EndInfoBlock\n\n")
for Each in self._CfgPageDict:
BsfFd.write('Page "%s"\n' % self._CfgPageDict[Each])
BsfItems = []
for Item in self._CfgItemList:
if Item['name'] != '':
if Item['page'] != Each:
continue
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['name'] != '':
BsfItems.append(SubItem)
else:
BsfItems.append(Item)
BsfItems.sort(key=lambda x: x['order'])
for Item in BsfItems:
self.WriteBsfOption (BsfFd, Item)
BsfFd.write("EndPage\n\n")
BsfFd.close()
return Error
def Usage():
print ("GenCfgOpt Version 0.59")
print ("Usage:")
print (" GenCfgOpt UPDTXT PlatformDscFile BuildFvDir [-D Macros]")
print (" GenCfgOpt HEADER PlatformDscFile BuildFvDir InputHFile [-D Macros]")
print (" GenCfgOpt GENBSF PlatformDscFile BuildFvDir BsfOutFile [-D Macros]")
def Main():
#
# Parse the options and args
#
i = 1
GenCfgOpt = CGenCfgOpt()
while i < len(sys.argv):
if sys.argv[i].strip().lower() == "--pcd":
BuildOptionPcd.append(sys.argv[i+1])
i += 1
i += 1
argc = len(sys.argv)
if argc < 4:
Usage()
return 1
else:
DscFile = sys.argv[2]
if not os.path.exists(DscFile):
print ("ERROR: Cannot open DSC file '%s' !" % DscFile)
return 2
OutFile = ''
if argc > 4:
if sys.argv[4][0] == '-':
Start = 4
else:
OutFile = sys.argv[4]
Start = 5
if argc > Start:
if GenCfgOpt.ParseMacros(sys.argv[Start:]) != 0:
print ("ERROR: Macro parsing failed !")
return 3
FvDir = sys.argv[3]
if not os.path.exists(FvDir):
os.makedirs(FvDir)
if GenCfgOpt.ParseDscFile(DscFile, FvDir) != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 5
if GenCfgOpt.UpdateSubRegionDefaultValue() != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 7
if sys.argv[1] == "UPDTXT":
Ret = GenCfgOpt.CreateSplitUpdTxt(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return Ret
elif sys.argv[1] == "HEADER":
Ret = GenCfgOpt.CreateHeaderFile(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return 8
return Ret
elif sys.argv[1] == "GENBSF":
Ret = GenCfgOpt.GenerateBsfFile(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return 9
return Ret
else:
if argc < 5:
Usage()
return 1
print ("ERROR: Unknown command '%s' !" % sys.argv[1])
Usage()
return 1
return 0
return 0
if __name__ == '__main__':
sys.exit(Main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/GenCfgOpt.py
|
# @ GenCfgData.py
#
# Copyright (c) 2014 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import marshal
from functools import reduce
from datetime import date
# Generated file copyright header
__copyright_tmp__ = """/** @file
Configuration %s File.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
This file is automatically generated. Please do NOT modify !!!
**/
"""
__copyright_dsc__ = """## @file
#
# Copyright (c) %04d, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
[PcdsDynamicVpd.Upd]
#
# Global definitions in BSF
# !BSF BLOCK:{NAME:"FSP UPD Configuration", VER:"0.1"}
#
"""
def Bytes2Val(Bytes):
return reduce(lambda x, y: (x << 8) | y, Bytes[::-1])
def Bytes2Str(Bytes):
return '{ %s }' % (', '.join('0x%02X' % i for i in Bytes))
def Str2Bytes(Value, Blen):
Result = bytearray(Value[1:-1], 'utf-8') # Excluding quotes
if len(Result) < Blen:
Result.extend(b'\x00' * (Blen - len(Result)))
return Result
def Val2Bytes(Value, Blen):
return [(Value >> (i * 8) & 0xff) for i in range(Blen)]
def Array2Val(ValStr):
ValStr = ValStr.strip()
if ValStr.startswith('{'):
ValStr = ValStr[1:]
if ValStr.endswith('}'):
ValStr = ValStr[:-1]
if ValStr.startswith("'"):
ValStr = ValStr[1:]
if ValStr.endswith("'"):
ValStr = ValStr[:-1]
Value = 0
for Each in ValStr.split(',')[::-1]:
Each = Each.strip()
if Each.startswith('0x'):
Base = 16
else:
Base = 10
Value = (Value << 8) | int(Each, Base)
return Value
def GetCopyrightHeader(FileType, AllowModify=False):
FileDescription = {
'bsf': 'Boot Setting',
'dsc': 'Definition',
'dlt': 'Delta',
'inc': 'C Binary Blob',
'h': 'C Struct Header'
}
if FileType in ['bsf', 'dsc', 'dlt']:
CommentChar = '#'
else:
CommentChar = ''
Lines = __copyright_tmp__.split('\n')
if AllowModify:
Lines = [Line for Line in Lines if 'Please do NOT modify' not in Line]
CopyrightHdr = '\n'.join('%s%s' % (
CommentChar, Line) for Line in Lines)[:-1] + '\n'
return CopyrightHdr % (FileDescription[FileType], date.today().year)
class CLogicalExpression:
def __init__(self):
self.index = 0
self.string = ''
def errExit(self, err=''):
print("ERROR: Express parsing for:")
print(" %s" % self.string)
print(" %s^" % (' ' * self.index))
if err:
print("INFO : %s" % err)
raise SystemExit
def getNonNumber(self, n1, n2):
if not n1.isdigit():
return n1
if not n2.isdigit():
return n2
return None
def getCurr(self, lens=1):
try:
if lens == -1:
return self.string[self.index:]
else:
if self.index + lens > len(self.string):
lens = len(self.string) - self.index
return self.string[self.index: self.index + lens]
except Exception:
return ''
def isLast(self):
return self.index == len(self.string)
def moveNext(self, len=1):
self.index += len
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
def normNumber(self, val):
return True if val else False
def getNumber(self, var):
var = var.strip()
if re.match('^0x[a-fA-F0-9]+$', var):
value = int(var, 16)
elif re.match('^[+-]?\\d+$', var):
value = int(var, 10)
else:
value = None
return value
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if re.match('^[\\w.]', char):
var += char
self.moveNext()
else:
break
val = self.getNumber(var)
if val is None:
value = var
else:
value = "%d" % val
return value
def parseSingleOp(self):
self.skipSpace()
if re.match('^NOT\\W', self.getCurr(-1)):
self.moveNext(3)
op = self.parseBrace()
val = self.getNumber(op)
if val is None:
self.errExit("'%s' is not a number" % op)
return "%d" % (not self.normNumber(int(op)))
else:
return self.parseValue()
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
if char == '(':
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != ')':
self.errExit("Expecting closing brace or operator")
self.moveNext()
return value
else:
value = self.parseSingleOp()
return value
def parseCompare(self):
value = self.parseBrace()
while True:
self.skipSpace()
char = self.getCurr()
if char in ['<', '>']:
self.moveNext()
next = self.getCurr()
if next == '=':
op = char + next
self.moveNext()
else:
op = char
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval(value + op + result))
else:
self.errExit("'%s' is not a valid number for comparision"
% test)
elif char in ['=', '!']:
op = self.getCurr(2)
if op in ['==', '!=']:
self.moveNext(2)
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber((eval(value + op
+ result)))
else:
value = "%d" % self.normNumber(eval("'" + value +
"'" + op + "'" +
result + "'"))
else:
break
else:
break
return value
def parseAnd(self):
value = self.parseCompare()
while True:
self.skipSpace()
if re.match('^AND\\W', self.getCurr(-1)):
self.moveNext(3)
result = self.parseCompare()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(int(value) & int(result))
else:
self.errExit("'%s' is not a valid op number for AND" %
test)
else:
break
return value
def parseOrXor(self):
value = self.parseAnd()
op = None
while True:
self.skipSpace()
op = None
if re.match('^XOR\\W', self.getCurr(-1)):
self.moveNext(3)
op = '^'
elif re.match('^OR\\W', self.getCurr(-1)):
self.moveNext(2)
op = '|'
else:
break
if op:
result = self.parseAnd()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval(value + op + result))
else:
self.errExit("'%s' is not a valid op number for XOR/OR" %
test)
return value
def parseExpr(self):
return self.parseOrXor()
def getResult(self):
value = self.parseExpr()
self.skipSpace()
if not self.isLast():
self.errExit("Unexpected character found '%s'" % self.getCurr())
test = self.getNumber(value)
if test is None:
self.errExit("Result '%s' is not a number" % value)
return int(value)
def evaluateExpress(self, Expr):
self.index = 0
self.string = Expr
if self.getResult():
Result = True
else:
Result = False
return Result
class CFspBsf2Dsc:
def __init__(self, bsf_file):
self.cfg_list = CFspBsf2Dsc.parse_bsf(bsf_file)
def get_dsc_lines(self):
return CFspBsf2Dsc.generate_dsc(self.cfg_list)
def save_dsc(self, dsc_file):
return CFspBsf2Dsc.generate_dsc(self.cfg_list, dsc_file)
@staticmethod
def parse_bsf(bsf_file):
fd = open(bsf_file, 'r')
bsf_txt = fd.read()
fd.close()
find_list = []
regex = re.compile(r'\s+Find\s+"(.*?)"(.*?)^\s+(\$(.*?)|Skip)\s+',
re.S | re.MULTILINE)
for match in regex.finditer(bsf_txt):
find = match.group(1)
name = match.group(3)
line = bsf_txt[:match.end()].count("\n")
find_list.append((name, find, line))
idx = 0
count = 0
prefix = ''
chk_dict = {}
cfg_list = []
cfg_temp = {'find': '', 'cname': '', 'length': 0, 'value': '0',
'type': 'Reserved', 'isbit': False,
'embed': '', 'page': '', 'option': '', 'instance': 0}
regex = re.compile(
r'^\s+(\$(.*?)|Skip)\s+(\d+)\s+(bits|bytes)(\s+\$_DEFAULT_\s'
r'+=\s+(.+?))?$', re.S |
re.MULTILINE)
for match in regex.finditer(bsf_txt):
dlen = int(match.group(3))
if match.group(1) == 'Skip':
key = 'gPlatformFspPkgTokenSpaceGuid_BsfSkip%d' % idx
val = ', '.join(['%02X' % ord(i) for i in '\x00' * dlen])
idx += 1
option = '$SKIP'
else:
key = match.group(2)
val = match.group(6)
option = ''
is_bit = True if match.group(4) == 'bits' else False
cfg_item = dict(cfg_temp)
line = bsf_txt[:match.end()].count("\n")
finds = [i for i in find_list if line >= i[2]]
if len(finds) > 0:
prefix = finds[0][1]
cfg_item['embed'] = '%s:TAG_%03X:START' % \
(prefix, ord(prefix[-1]))
cfg_item['find'] = prefix
cfg_item['cname'] = 'Signature'
cfg_item['length'] = len(finds[0][1])
str2byte = Str2Bytes("'" + finds[0][1] + "'",
len(finds[0][1]))
cfg_item['value'] = '0x%X' % Bytes2Val(str2byte)
cfg_list.append(dict(cfg_item))
cfg_item = dict(cfg_temp)
find_list.pop(0)
count = 0
cfg_item['cname'] = key
cfg_item['length'] = dlen
cfg_item['value'] = val
cfg_item['option'] = option
cfg_item['isbit'] = is_bit
if key not in chk_dict.keys():
chk_dict[key] = 0
else:
chk_dict[key] += 1
cfg_item['instance'] = chk_dict[key]
cfg_list.append(cfg_item)
count += 1
if prefix:
cfg_item = dict(cfg_temp)
cfg_item['cname'] = 'Dummy'
cfg_item['embed'] = '%s:%03X:END' % (prefix, ord(prefix[-1]))
cfg_list.append(cfg_item)
option_dict = {}
selreg = re.compile(
r'\s+Selection\s*(.+?)\s*,\s*"(.*?)"$', re.S |
re.MULTILINE)
regex = re.compile(
r'^List\s&(.+?)$(.+?)^EndList$', re.S | re.MULTILINE)
for match in regex.finditer(bsf_txt):
key = match.group(1)
option_dict[key] = []
for select in selreg.finditer(match.group(2)):
option_dict[key].append(
(int(select.group(1), 0), select.group(2)))
chk_dict = {}
pagereg = re.compile(
r'^Page\s"(.*?)"$(.+?)^EndPage$', re.S | re.MULTILINE)
for match in pagereg.finditer(bsf_txt):
page = match.group(1)
for line in match.group(2).splitlines():
match = re.match(
r'\s+(Combo|EditNum)\s\$(.+?),\s"(.*?)",\s(.+?),$', line)
if match:
cname = match.group(2)
if cname not in chk_dict.keys():
chk_dict[cname] = 0
else:
chk_dict[cname] += 1
instance = chk_dict[cname]
cfg_idxs = [i for i, j in enumerate(cfg_list)
if j['cname'] == cname and
j['instance'] == instance]
if len(cfg_idxs) != 1:
raise Exception(
"Multiple CFG item '%s' found !" % cname)
cfg_item = cfg_list[cfg_idxs[0]]
cfg_item['page'] = page
cfg_item['type'] = match.group(1)
cfg_item['prompt'] = match.group(3)
cfg_item['range'] = None
if cfg_item['type'] == 'Combo':
cfg_item['option'] = option_dict[match.group(4)[1:]]
elif cfg_item['type'] == 'EditNum':
cfg_item['option'] = match.group(4)
match = re.match(r'\s+ Help\s"(.*?)"$', line)
if match:
cfg_item['help'] = match.group(1)
match = re.match(r'\s+"Valid\srange:\s(.*)"$', line)
if match:
parts = match.group(1).split()
cfg_item['option'] = (
(int(parts[0], 0), int(parts[2], 0),
cfg_item['option']))
return cfg_list
@staticmethod
def generate_dsc(option_list, dsc_file=None):
dsc_lines = []
header = '%s' % (__copyright_dsc__ % date.today().year)
dsc_lines.extend(header.splitlines())
pages = []
for cfg_item in option_list:
if cfg_item['page'] and (cfg_item['page'] not in pages):
pages.append(cfg_item['page'])
page_id = 0
for page in pages:
dsc_lines.append(' # !BSF PAGES:{PG%02X::"%s"}' % (page_id, page))
page_id += 1
dsc_lines.append('')
last_page = ''
is_bit = False
dlen = 0
dval = 0
bit_fields = []
for idx, option in enumerate(option_list):
if not is_bit and option['isbit']:
is_bit = True
dlen = 0
dval = 0
idxs = idx
if is_bit and not option['isbit']:
is_bit = False
if dlen % 8 != 0:
raise Exception("Bit fields are not aligned at "
"byte boundary !")
bit_fields.append((idxs, idx, dlen, dval))
if is_bit:
blen = option['length']
bval = int(option['value'], 0)
dval = dval + ((bval & ((1 << blen) - 1)) << dlen)
print(dlen, blen, bval, hex(dval))
dlen += blen
struct_idx = 0
for idx, option in enumerate(option_list):
dsc_lines.append('')
default = option['value']
pos = option['cname'].find('_')
name = option['cname'][pos + 1:]
for start_idx, end_idx, bits_len, bits_val in bit_fields:
if idx == start_idx:
val_str = Bytes2Str(Val2Bytes(bits_val, bits_len // 8))
dsc_lines.append(' # !HDR STRUCT:{BIT_FIELD_DATA_%d}'
% struct_idx)
dsc_lines.append(' # !BSF NAME:{BIT_FIELD_STRUCT}')
dsc_lines.append(' gCfgData.BitFiledStruct%d '
' | * | 0x%04X | %s' %
(struct_idx, bits_len // 8, val_str))
dsc_lines.append('')
struct_idx += 1
if option['find']:
dsc_lines.append(' # !BSF FIND:{%s}' % option['find'])
dsc_lines.append('')
if option['instance'] > 0:
name = name + '_%s' % option['instance']
if option['embed']:
dsc_lines.append(' # !HDR EMBED:{%s}' % option['embed'])
if option['type'] == 'Reserved':
dsc_lines.append(' # !BSF NAME:{Reserved} TYPE:{Reserved}')
if option['option'] == '$SKIP':
dsc_lines.append(' # !BSF OPTION:{$SKIP}')
else:
prompt = option['prompt']
if last_page != option['page']:
last_page = option['page']
dsc_lines.append(' # !BSF PAGE:{PG%02X}' %
(pages.index(option['page'])))
if option['type'] == 'Combo':
dsc_lines.append(' # !BSF NAME:{%s} TYPE:{%s}' %
(prompt, option['type']))
ops = []
for val, text in option['option']:
ops.append('0x%x:%s' % (val, text))
dsc_lines.append(' # !BSF OPTION:{%s}' % (', '.join(ops)))
elif option['type'] == 'EditNum':
cfg_len = option['length']
if ',' in default and cfg_len > 8:
dsc_lines.append(' # !BSF NAME:{%s} TYPE:{Table}' %
(prompt))
if cfg_len > 16:
cfg_len = 16
ops = []
for i in range(cfg_len):
ops.append('%X:1:HEX' % i)
dsc_lines.append(' # !BSF OPTION:{%s}' %
(', '.join(ops)))
else:
dsc_lines.append(
' # !BSF NAME:{%s} TYPE:{%s, %s, (0x%X, 0x%X)}' %
(prompt, option['type'], option['option'][2],
option['option'][0], option['option'][1]))
dsc_lines.append(' # !BSF HELP:{%s}' % option['help'])
if ',' in default:
default = '{%s}' % default
if option['isbit']:
dsc_lines.append(' # !BSF FIELD:{%s:%db}'
% (name, option['length']))
else:
dsc_lines.append(' gCfgData.%-30s | * | 0x%04X | %s' %
(name, option['length'], default))
if dsc_file:
fd = open(dsc_file, 'w')
fd.write('\n'.join(dsc_lines))
fd.close()
return dsc_lines
class CGenCfgData:
def __init__(self, Mode=''):
self.Debug = False
self.Error = ''
self.ReleaseMode = True
self.Mode = Mode
self._GlobalDataDef = """
GlobalDataDef
SKUID = 0, "DEFAULT"
EndGlobalData
"""
self._BuidinOptionTxt = """
List &EN_DIS
Selection 0x1 , "Enabled"
Selection 0x0 , "Disabled"
EndList
"""
self._StructType = ['UINT8', 'UINT16', 'UINT32', 'UINT64']
self._BsfKeyList = ['FIND', 'NAME', 'HELP', 'TYPE', 'PAGE', 'PAGES',
'BLOCK', 'OPTION', 'CONDITION', 'ORDER', 'MARKER',
'SUBT']
self._HdrKeyList = ['HEADER', 'STRUCT', 'EMBED', 'COMMENT']
self._BuidinOption = {'$EN_DIS': 'EN_DIS'}
self._MacroDict = {}
self._VarDict = {}
self._PcdsDict = {}
self._CfgBlkDict = {}
self._CfgPageDict = {}
self._CfgOptsDict = {}
self._BsfTempDict = {}
self._CfgItemList = []
self._DscLines = []
self._DscFile = ''
self._CfgPageTree = {}
self._MapVer = 0
self._MinCfgTagId = 0x100
def ParseMacros(self, MacroDefStr):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._MacroDict = {}
IsExpression = False
for Macro in MacroDefStr:
if Macro.startswith('-D'):
IsExpression = True
if len(Macro) > 2:
Macro = Macro[2:]
else:
continue
if IsExpression:
IsExpression = False
Match = re.match("(\\w+)=(.+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
else:
Match = re.match("(\\w+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = ''
if len(self._MacroDict) == 0:
Error = 1
else:
Error = 0
if self.Debug:
print("INFO : Macro dictionary:")
for Each in self._MacroDict:
print(" $(%s) = [ %s ]" % (Each,
self._MacroDict[Each]))
return Error
def EvaulateIfdef(self, Macro):
Result = Macro in self._MacroDict
if self.Debug:
print("INFO : Eval Ifdef [%s] : %s" % (Macro, Result))
return Result
def ExpandMacros(self, Input, Preserve=False):
Line = Input
Match = re.findall("\\$\\(\\w+\\)", Input)
if Match:
for Each in Match:
Variable = Each[2:-1]
if Variable in self._MacroDict:
Line = Line.replace(Each, self._MacroDict[Variable])
else:
if self.Debug:
print("WARN : %s is not defined" % Each)
if not Preserve:
Line = Line.replace(Each, Each[2:-1])
return Line
def ExpandPcds(self, Input):
Line = Input
Match = re.findall("(\\w+\\.\\w+)", Input)
if Match:
for PcdName in Match:
if PcdName in self._PcdsDict:
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
else:
if self.Debug:
print("WARN : %s is not defined" % PcdName)
return Line
def EvaluateExpress(self, Expr):
ExpExpr = self.ExpandPcds(Expr)
ExpExpr = self.ExpandMacros(ExpExpr)
LogExpr = CLogicalExpression()
Result = LogExpr.evaluateExpress(ExpExpr)
if self.Debug:
print("INFO : Eval Express [%s] : %s" % (Expr, Result))
return Result
def ValueToByteArray(self, ValueStr, Length):
Match = re.match("\\{\\s*FILE:(.+)\\}", ValueStr)
if Match:
FileList = Match.group(1).split(',')
Result = bytearray()
for File in FileList:
File = File.strip()
BinPath = os.path.join(os.path.dirname(self._DscFile), File)
Result.extend(bytearray(open(BinPath, 'rb').read()))
else:
try:
Result = bytearray(self.ValueToList(ValueStr, Length))
except ValueError:
raise Exception("Bytes in '%s' must be in range 0~255 !" %
ValueStr)
if len(Result) < Length:
Result.extend(b'\x00' * (Length - len(Result)))
elif len(Result) > Length:
raise Exception("Value '%s' is too big to fit into %d bytes !" %
(ValueStr, Length))
return Result[:Length]
def ValueToList(self, ValueStr, Length):
if ValueStr[0] == '{':
Result = []
BinList = ValueStr[1:-1].split(',')
InBitField = False
LastInBitField = False
Value = 0
BitLen = 0
for Element in BinList:
InBitField = False
Each = Element.strip()
if len(Each) == 0:
pass
else:
if Each[0] in ['"', "'"]:
Result.extend(list(bytearray(Each[1:-1], 'utf-8')))
elif ':' in Each:
Match = re.match("(.+):(\\d+)b", Each)
if Match is None:
raise Exception("Invald value list format '%s' !"
% Each)
InBitField = True
CurrentBitLen = int(Match.group(2))
CurrentValue = ((self.EvaluateExpress(Match.group(1))
& (1 << CurrentBitLen) - 1)) << BitLen
else:
Result.append(self.EvaluateExpress(Each.strip()))
if InBitField:
Value += CurrentValue
BitLen += CurrentBitLen
if LastInBitField and ((not InBitField) or (Element ==
BinList[-1])):
if BitLen % 8 != 0:
raise Exception("Invald bit field length!")
Result.extend(Val2Bytes(Value, BitLen // 8))
Value = 0
BitLen = 0
LastInBitField = InBitField
elif ValueStr.startswith("'") and ValueStr.endswith("'"):
Result = Str2Bytes(ValueStr, Length)
elif ValueStr.startswith('"') and ValueStr.endswith('"'):
Result = Str2Bytes(ValueStr, Length)
else:
Result = Val2Bytes(self.EvaluateExpress(ValueStr), Length)
return Result
def FormatDeltaValue(self, ConfigDict):
ValStr = ConfigDict['value']
if ValStr[0] == "'":
# Remove padding \x00 in the value string
ValStr = "'%s'" % ValStr[1:-1].rstrip('\x00')
Struct = ConfigDict['struct']
if Struct in self._StructType:
# Format the array using its struct type
Unit = int(Struct[4:]) // 8
Value = Array2Val(ConfigDict['value'])
Loop = ConfigDict['length'] // Unit
Values = []
for Each in range(Loop):
Values.append(Value & ((1 << (Unit * 8)) - 1))
Value = Value >> (Unit * 8)
ValStr = '{ ' + ', '.join([('0x%%0%dX' % (Unit * 2)) %
x for x in Values]) + ' }'
return ValStr
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in self._StructType:
return
DataList = self.ValueToList(ConfigDict['value'], ConfigDict['length'])
Unit = int(Struct[4:]) // 8
if int(ConfigDict['length']) != Unit * len(DataList):
# Fallback to byte array
Unit = 1
if int(ConfigDict['length']) != len(DataList):
raise Exception("Array size is not proper for '%s' !" %
ConfigDict['cname'])
ByteArray = []
for Value in DataList:
for Loop in range(Unit):
ByteArray.append("0x%02X" % (Value & 0xFF))
Value = Value >> 8
NewValue = '{' + ','.join(ByteArray) + '}'
ConfigDict['value'] = NewValue
return ""
def GetOrderNumber(self, Offset, Order, BitOff=0):
if isinstance(Order, int):
if Order == -1:
Order = Offset << 16
else:
(Major, Minor) = Order.split('.')
Order = (int(Major, 16) << 16) + ((int(Minor, 16) & 0xFF) << 8)
return Order + (BitOff & 0xFF)
def SubtituteLine(self, Line, Args):
Args = Args.strip()
Vars = Args.split(':')
Line = self.ExpandMacros(Line, True)
for Idx in range(len(Vars)-1, 0, -1):
Line = Line.replace('$(%d)' % Idx, Vars[Idx].strip())
return Line
def CfgDuplicationCheck(self, CfgDict, Name):
if not self.Debug:
return
if Name == 'Dummy':
return
if Name not in CfgDict:
CfgDict[Name] = 1
else:
print("WARNING: Duplicated item found '%s' !" %
CfgDict['cname'])
def AddBsfChildPage(self, Child, Parent='root'):
def AddBsfChildPageRecursive(PageTree, Parent, Child):
Key = next(iter(PageTree))
if Parent == Key:
PageTree[Key].append({Child: []})
return True
else:
Result = False
for Each in PageTree[Key]:
if AddBsfChildPageRecursive(Each, Parent, Child):
Result = True
break
return Result
return AddBsfChildPageRecursive(self._CfgPageTree, Parent, Child)
def ParseDscFile(self, DscFile):
self._DscLines = []
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._BsfTempDict = {}
self._CfgPageTree = {'root': []}
CfgDict = {}
SectionNameList = ["Defines".lower(), "PcdsFeatureFlag".lower(),
"PcdsDynamicVpd.Tmp".lower(),
"PcdsDynamicVpd.Upd".lower()]
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsTmpSect = False
TemplateName = ''
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
if type(DscFile) is list:
# it is DSC lines already
DscLines = DscFile
self._DscFile = '.'
else:
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
self._DscFile = DscFile
BsfRegExp = re.compile("(%s):{(.+?)}(?:$|\\s+)" % '|'.
join(self._BsfKeyList))
HdrRegExp = re.compile("(%s):{(.+?)}" % '|'.join(self._HdrKeyList))
CfgRegExp = re.compile("^([_a-zA-Z0-9]+)\\s*\\|\\s*\
(0x[0-9A-F]+|\\*)\\s*\\|\\s*(\\d+|0x[0-9a-fA-F]+)\\s*\\|\\s*(.+)")
TksRegExp = re.compile("^(g[_a-zA-Z0-9]+\\.)(.+)")
SkipLines = 0
while len(DscLines):
DscLine = DscLines.pop(0).strip()
if SkipLines == 0:
self._DscLines.append(DscLine)
else:
SkipLines = SkipLines - 1
if len(DscLine) == 0:
continue
Handle = False
Match = re.match("^\\[(.+)\\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsTmpSect = False
SectionName = Match.group(1).lower()
if SectionName == SectionNameList[0]:
IsDefSect = True
if SectionName == SectionNameList[1]:
IsPcdSect = True
elif SectionName == SectionNameList[2]:
IsTmpSect = True
elif SectionName == SectionNameList[3]:
ConfigDict = {
'header': 'ON',
'page': '',
'name': '',
'find': '',
'struct': '',
'embed': '',
'marker': '',
'option': '',
'comment': '',
'condition': '',
'order': -1,
'subreg': []
}
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsTmpSect:
Match = False if DscLine[0] != '!' else True
if Match:
Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif\
|include)\\s*(.+)?$", DscLine.split("#")[0])
Keyword = Match.group(1) if Match else ''
Remaining = Match.group(2) if Match else ''
Remaining = '' if Remaining is None else Remaining.strip()
if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'
] and not Remaining:
raise Exception("ERROR: Expression is expected after \
'!if' or !elseif' for line '%s'" % DscLine)
if Keyword == 'else':
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
raise Exception("ERROR: No paired '!if' found for \
'!else' for line '%s'" % DscLine)
elif Keyword == 'endif':
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
raise Exception("ERROR: No paired '!if' found for \
'!endif' for line '%s'" % DscLine)
elif Keyword == 'ifdef' or Keyword == 'ifndef':
Result = self.EvaulateIfdef(Remaining)
if Keyword == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
elif Keyword == 'if' or Keyword == 'elseif':
Result = self.EvaluateExpress(Remaining)
if Keyword == "if":
ElifStack.append(0)
IfStack.append(Result)
else: # elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
raise Exception("ERROR: No paired '!if' found for \
'!elif' for line '%s'" % DscLine)
else:
if IfStack:
Handle = reduce(lambda x, y: x and y, IfStack)
else:
Handle = True
if Handle:
if Keyword == 'include':
Remaining = self.ExpandMacros(Remaining)
# Relative to DSC filepath
IncludeFilePath = os.path.join(
os.path.dirname(self._DscFile), Remaining)
if not os.path.exists(IncludeFilePath):
# Relative to repository to find \
# dsc in common platform
IncludeFilePath = os.path.join(
os.path.dirname(self._DscFile), "..",
Remaining)
try:
IncludeDsc = open(IncludeFilePath, "r")
except Exception:
raise Exception("ERROR: Cannot open \
file '%s'." % IncludeFilePath)
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
DscLines = NewDscLines + DscLines
del self._DscLines[-1]
else:
if DscLine.startswith('!'):
raise Exception("ERROR: Unrecoginized \
directive for line '%s'" % DscLine)
if not Handle:
del self._DscLines[-1]
continue
if IsDefSect:
Match = re.match("^\\s*(?:DEFINE\\s+)*(\\w+)\\s*=\\s*(.+)",
DscLine)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
if self.Debug:
print("INFO : DEFINE %s = [ %s ]" % (Match.group(1),
Match.group(2)))
elif IsPcdSect:
Match = re.match("^\\s*([\\w\\.]+)\\s*\\|\\s*(\\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print("INFO : PCD %s = [ %s ]" % (Match.group(1),
Match.group(2)))
elif IsTmpSect:
# !BSF DEFT:{GPIO_TMPL:START}
Match = re.match("^\\s*#\\s+(!BSF)\\s+DEFT:{(.+?):\
(START|END)}", DscLine)
if Match:
if Match.group(3) == 'START' and not TemplateName:
TemplateName = Match.group(2).strip()
self._BsfTempDict[TemplateName] = []
if Match.group(3) == 'END' and (
TemplateName == Match.group(2).strip()
) and TemplateName:
TemplateName = ''
else:
if TemplateName:
Match = re.match("^!include\\s*(.+)?$", DscLine)
if Match:
continue
self._BsfTempDict[TemplateName].append(DscLine)
else:
Match = re.match("^\\s*#\\s+(!BSF|!HDR)\\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF':
Result = BsfRegExp.findall(Remaining)
if Result:
for Each in Result:
Key = Each[0]
Remaining = Each[1]
if Key == 'BLOCK':
Match = re.match(
"NAME:\"(.+)\"\\s*,\\s*\
VER:\"(.+)\"\\s*", Remaining)
if Match:
self._CfgBlkDict['name'] = \
Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2
)
elif Key == 'SUBT':
# GPIO_TMPL:1:2:3
Remaining = Remaining.strip()
Match = re.match("(\\w+)\\s*:", Remaining)
if Match:
TemplateName = Match.group(1)
for Line in self._BsfTempDict[
TemplateName][::-1]:
NewLine = self.SubtituteLine(
Line, Remaining)
DscLines.insert(0, NewLine)
SkipLines += 1
elif Key == 'PAGES':
# !BSF PAGES:{HSW:"Haswell System Agent", \
# LPT:"Lynx Point PCH"}
PageList = Remaining.split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match('(\\w+):\
(\\w*:)?\\"(.+)\\"', Page)
if Match:
PageName = Match.group(1)
ParentName = Match.group(2)
if not ParentName or \
ParentName == ':':
ParentName = 'root'
else:
ParentName = ParentName[:-1]
if not self.AddBsfChildPage(
PageName, ParentName):
raise Exception("Cannot find \
parent page '%s'!" % ParentName)
self._CfgPageDict[
PageName] = Match.group(3)
else:
raise Exception("Invalid page \
definitions '%s'!" % Page)
elif Key in ['NAME', 'HELP', 'OPTION'
] and Remaining.startswith('+'):
# Allow certain options to be extended \
# to multiple lines
ConfigDict[Key.lower()] += Remaining[1:]
else:
if Key == 'NAME':
Remaining = Remaining.strip()
elif Key == 'CONDITION':
Remaining = self.ExpandMacros(
Remaining.strip())
ConfigDict[Key.lower()] = Remaining
else:
Match = HdrRegExp.match(Remaining)
if Match:
Key = Match.group(1)
Remaining = Match.group(2)
if Key == 'EMBED':
Parts = Remaining.split(':')
Names = Parts[0].split(',')
DummyDict = ConfigDict.copy()
if len(Names) > 1:
Remaining = Names[0] + ':' + ':'.join(
Parts[1:])
DummyDict['struct'] = Names[1]
else:
DummyDict['struct'] = Names[0]
DummyDict['cname'] = 'Dummy'
DummyDict['name'] = ''
DummyDict['embed'] = Remaining
DummyDict['offset'] = Offset
DummyDict['length'] = 0
DummyDict['value'] = '0'
DummyDict['type'] = 'Reserved'
DummyDict['help'] = ''
DummyDict['subreg'] = []
self._CfgItemList.append(DummyDict)
else:
ConfigDict[Key.lower()] = Remaining
# Check CFG line
# gCfgData.VariableName | * | 0x01 | 0x1
Clear = False
Match = TksRegExp.match(DscLine)
if Match:
DscLine = 'gCfgData.%s' % Match.group(2)
if DscLine.startswith('gCfgData.'):
Match = CfgRegExp.match(DscLine[9:])
else:
Match = None
if Match:
ConfigDict['space'] = 'gCfgData'
ConfigDict['cname'] = Match.group(1)
if Match.group(2) != '*':
Offset = int(Match.group(2), 16)
ConfigDict['offset'] = Offset
ConfigDict['order'] = self.GetOrderNumber(
ConfigDict['offset'], ConfigDict['order'])
Value = Match.group(4).strip()
if Match.group(3).startswith("0x"):
Length = int(Match.group(3), 16)
else:
Length = int(Match.group(3))
Offset += Length
ConfigDict['length'] = Length
Match = re.match("\\$\\((\\w+)\\)", Value)
if Match:
if Match.group(1) in self._MacroDict:
Value = self._MacroDict[Match.group(1)]
ConfigDict['value'] = Value
if re.match("\\{\\s*FILE:(.+)\\}", Value):
# Expand embedded binary file
ValArray = self.ValueToByteArray(ConfigDict['value'],
ConfigDict['length'])
NewValue = Bytes2Str(ValArray)
self._DscLines[-1] = re.sub(r'(.*)(\{\s*FILE:.+\})',
r'\1 %s' % NewValue,
self._DscLines[-1])
ConfigDict['value'] = NewValue
if ConfigDict['name'] == '':
# Clear BSF specific items
ConfigDict['bsfname'] = ''
ConfigDict['help'] = ''
ConfigDict['type'] = ''
ConfigDict['option'] = ''
self.CfgDuplicationCheck(CfgDict, ConfigDict['cname'])
self._CfgItemList.append(ConfigDict.copy())
Clear = True
else:
# It could be a virtual item as below
# !BSF FIELD:{SerialDebugPortAddress0:1}
# or
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
Match = re.match(r"^\s*#\s+(!BSF)\s+FIELD:{(.+)}", DscLine)
if Match:
BitFieldTxt = Match.group(2)
Match = re.match("(.+):(\\d+)b([BWDQ])?", BitFieldTxt)
if not Match:
raise Exception("Incorrect bit field \
format '%s' !" % BitFieldTxt)
UnitBitLen = 1
SubCfgDict = ConfigDict.copy()
SubCfgDict['cname'] = Match.group(1)
SubCfgDict['bitlength'] = int(
Match.group(2)) * UnitBitLen
if SubCfgDict['bitlength'] > 0:
LastItem = self._CfgItemList[-1]
if len(LastItem['subreg']) == 0:
SubOffset = 0
else:
SubOffset = \
LastItem['subreg'][-1]['bitoffset'] \
+ LastItem['subreg'][-1]['bitlength']
if Match.group(3) == 'B':
SubCfgDict['bitunit'] = 1
elif Match.group(3) == 'W':
SubCfgDict['bitunit'] = 2
elif Match.group(3) == 'Q':
SubCfgDict['bitunit'] = 8
else:
SubCfgDict['bitunit'] = 4
SubCfgDict['bitoffset'] = SubOffset
SubCfgDict['order'] = self.GetOrderNumber(
SubCfgDict['offset'], SubCfgDict['order'],
SubOffset)
SubCfgDict['value'] = ''
SubCfgDict['cname'] = '%s_%s' % (LastItem['cname'],
Match.group(1))
self.CfgDuplicationCheck(CfgDict,
SubCfgDict['cname'])
LastItem['subreg'].append(SubCfgDict.copy())
Clear = True
if Clear:
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['marker'] = ''
ConfigDict['comment'] = ''
ConfigDict['order'] = -1
ConfigDict['subreg'] = []
ConfigDict['option'] = ''
ConfigDict['condition'] = ''
return Error
def GetBsfBitFields(self, subitem, bytes):
start = subitem['bitoffset']
end = start + subitem['bitlength']
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
bitsvalue = bitsvalue[::-1]
bitslen = len(bitsvalue)
if start > bitslen or end > bitslen:
raise Exception("Invalid bits offset [%d,%d] %d for %s" %
(start, end, bitslen, subitem['name']))
return '0x%X' % (int(bitsvalue[start:end][::-1], 2))
def UpdateBsfBitFields(self, SubItem, NewValue, ValueArray):
Start = SubItem['bitoffset']
End = Start + SubItem['bitlength']
Blen = len(ValueArray)
BitsValue = ''.join('{0:08b}'.format(i) for i in ValueArray[::-1])
BitsValue = BitsValue[::-1]
BitsLen = len(BitsValue)
if Start > BitsLen or End > BitsLen:
raise Exception("Invalid bits offset [%d,%d] %d for %s" %
(Start, End, BitsLen, SubItem['name']))
BitsValue = BitsValue[:Start] + '{0:0{1}b}'.format(
NewValue, SubItem['bitlength'])[::-1] + BitsValue[End:]
ValueArray[:] = bytearray.fromhex(
'{0:0{1}x}'.format(int(BitsValue[::-1], 2), Blen * 2))[::-1]
def CreateVarDict(self):
Error = 0
self._VarDict = {}
if len(self._CfgItemList) > 0:
Item = self._CfgItemList[-1]
self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] +
Item['length'])
for Item in self._CfgItemList:
Embed = Item['embed']
Match = re.match("^(\\w+):(\\w+):(START|END)", Embed)
if Match:
StructName = Match.group(1)
VarName = '_%s_%s_' % (Match.group(3), StructName)
if Match.group(3) == 'END':
self._VarDict[VarName] = Item['offset'] + Item['length']
self._VarDict['_LENGTH_%s_' % StructName] = \
self._VarDict['_END_%s_' % StructName] - \
self._VarDict['_START_%s_' % StructName]
if Match.group(2).startswith('TAG_'):
if (self.Mode != 'FSP') and (self._VarDict
['_LENGTH_%s_' %
StructName] % 4):
raise Exception("Size of structure '%s' is %d, \
not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName]))
self._VarDict['_TAG_%s_' % StructName] = int(
Match.group(2)[4:], 16) & 0xFFF
else:
self._VarDict[VarName] = Item['offset']
if Item['marker']:
self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = \
Item['offset']
return Error
def UpdateBsfBitUnit(self, Item):
BitTotal = 0
BitOffset = 0
StartIdx = 0
Unit = None
UnitDec = {1: 'BYTE', 2: 'WORD', 4: 'DWORD', 8: 'QWORD'}
for Idx, SubItem in enumerate(Item['subreg']):
if Unit is None:
Unit = SubItem['bitunit']
BitLength = SubItem['bitlength']
BitTotal += BitLength
BitOffset += BitLength
if BitOffset > 64 or BitOffset > Unit * 8:
break
if BitOffset == Unit * 8:
for SubIdx in range(StartIdx, Idx + 1):
Item['subreg'][SubIdx]['bitunit'] = Unit
BitOffset = 0
StartIdx = Idx + 1
Unit = None
if BitOffset > 0:
raise Exception("Bit fields cannot fit into %s for \
'%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname']))
ExpectedTotal = Item['length'] * 8
if Item['length'] * 8 != BitTotal:
raise Exception("Bit fields total length (%d) does not match \
length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname']))
def UpdateDefaultValue(self):
Error = 0
for Idx, Item in enumerate(self._CfgItemList):
if len(Item['subreg']) == 0:
Value = Item['value']
if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or
Value[0] == '"'):
# {XXX} or 'XXX' strings
self.FormatListValue(self._CfgItemList[Idx])
else:
Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value)
if not Match:
NumValue = self.EvaluateExpress(Value)
Item['value'] = '0x%X' % NumValue
else:
ValArray = self.ValueToByteArray(Item['value'], Item['length'])
for SubItem in Item['subreg']:
SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray)
self.UpdateBsfBitUnit(Item)
return Error
@staticmethod
def ExpandIncludeFiles(FilePath, CurDir=''):
if CurDir == '':
CurDir = os.path.dirname(FilePath)
FilePath = os.path.basename(FilePath)
InputFilePath = os.path.join(CurDir, FilePath)
File = open(InputFilePath, "r")
Lines = File.readlines()
File.close()
NewLines = []
for LineNum, Line in enumerate(Lines):
Match = re.match("^!include\\s*(.+)?$", Line)
if Match:
IncPath = Match.group(1)
TmpPath = os.path.join(CurDir, IncPath)
OrgPath = TmpPath
if not os.path.exists(TmpPath):
CurDir = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "..")
TmpPath = os.path.join(CurDir, IncPath)
if not os.path.exists(TmpPath):
raise Exception("ERROR: Cannot open include file '%s'." %
OrgPath)
else:
NewLines.append(('# Included from file: %s\n' %
IncPath, TmpPath, 0))
NewLines.append(('# %s\n' % ('=' * 80), TmpPath, 0))
NewLines.extend(CGenCfgData.ExpandIncludeFiles
(IncPath, CurDir))
else:
NewLines.append((Line, InputFilePath, LineNum))
return NewLines
def OverrideDefaultValue(self, DltFile):
Error = 0
DltLines = CGenCfgData.ExpandIncludeFiles(DltFile)
PlatformId = None
for Line, FilePath, LineNum in DltLines:
Line = Line.strip()
if not Line or Line.startswith('#'):
continue
Match = re.match("\\s*(\\w+)\\.(\\w+)(\\.\\w+)?\\s*\\|\\s*(.+)",
Line)
if not Match:
raise Exception("Unrecognized line '%s' (File:'%s' Line:%d) !"
% (Line, FilePath, LineNum + 1))
Found = False
InScope = False
for Idx, Item in enumerate(self._CfgItemList):
if not InScope:
if not (Item['embed'].endswith(':START') and
Item['embed'].startswith(Match.group(1))):
continue
InScope = True
if Item['cname'] == Match.group(2):
Found = True
break
if Item['embed'].endswith(':END') and \
Item['embed'].startswith(Match.group(1)):
break
Name = '%s.%s' % (Match.group(1), Match.group(2))
if not Found:
ErrItem = Match.group(2) if InScope else Match.group(1)
raise Exception("Invalid configuration '%s' in '%s' \
(File:'%s' Line:%d) !" % (ErrItem, Name, FilePath, LineNum + 1))
ValueStr = Match.group(4).strip()
if Match.group(3) is not None:
# This is a subregion item
BitField = Match.group(3)[1:]
Found = False
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['cname'] == '%s_%s' % \
(Item['cname'], BitField):
Found = True
break
if not Found:
raise Exception("Invalid configuration bit field \
'%s' in '%s.%s' (File:'%s' Line:%d) !" % (BitField, Name, BitField,
FilePath, LineNum + 1))
try:
Value = int(ValueStr, 16) if ValueStr.startswith('0x') \
else int(ValueStr, 10)
except Exception:
raise Exception("Invalid value '%s' for bit field '%s.%s' \
(File:'%s' Line:%d) !" % (ValueStr, Name, BitField, FilePath, LineNum + 1))
if Value >= 2 ** SubItem['bitlength']:
raise Exception("Invalid configuration bit field value \
'%s' for '%s.%s' (File:'%s' Line:%d) !" % (Value, Name, BitField,
FilePath, LineNum + 1))
ValArray = self.ValueToByteArray(Item['value'], Item['length'])
self.UpdateBsfBitFields(SubItem, Value, ValArray)
if Item['value'].startswith('{'):
Item['value'] = '{' + ', '.join('0x%02X' % i
for i in ValArray) + '}'
else:
BitsValue = ''.join('{0:08b}'.format(i)
for i in ValArray[::-1])
Item['value'] = '0x%X' % (int(BitsValue, 2))
else:
if Item['value'].startswith('{') and \
not ValueStr.startswith('{'):
raise Exception("Data array required for '%s' \
(File:'%s' Line:%d) !" % (Name, FilePath, LineNum + 1))
Item['value'] = ValueStr
if Name == 'PLATFORMID_CFG_DATA.PlatformId':
PlatformId = ValueStr
if (PlatformId is None) and (self.Mode != 'FSP'):
raise Exception("PLATFORMID_CFG_DATA.PlatformId is missing \
in file '%s' !" % (DltFile))
return Error
def ProcessMultilines(self, String, MaxCharLength):
Multilines = ''
StringLength = len(String)
CurrentStringStart = 0
StringOffset = 0
BreakLineDict = []
if len(String) <= MaxCharLength:
while (StringOffset < StringLength):
if StringOffset >= 1:
if String[StringOffset - 1] == '\\' and \
String[StringOffset] == 'n':
BreakLineDict.append(StringOffset + 1)
StringOffset += 1
if BreakLineDict != []:
for Each in BreakLineDict:
Multilines += " %s\n" % String[CurrentStringStart:Each].\
lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].\
lstrip()
else:
Multilines = " %s\n" % String
else:
NewLineStart = 0
NewLineCount = 0
FoundSpaceChar = False
while(StringOffset < StringLength):
if StringOffset >= 1:
if NewLineCount >= MaxCharLength - 1:
if String[StringOffset] == ' ' and \
StringLength - StringOffset > 10:
BreakLineDict.append(NewLineStart + NewLineCount)
NewLineStart = NewLineStart + NewLineCount
NewLineCount = 0
FoundSpaceChar = True
elif StringOffset == StringLength - 1 \
and FoundSpaceChar is False:
BreakLineDict.append(0)
if String[StringOffset - 1] == '\\' and \
String[StringOffset] == 'n':
BreakLineDict.append(StringOffset + 1)
NewLineStart = StringOffset + 1
NewLineCount = 0
StringOffset += 1
NewLineCount += 1
if BreakLineDict != []:
BreakLineDict.sort()
for Each in BreakLineDict:
if Each > 0:
Multilines += " %s\n" % String[
CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].\
lstrip()
return Multilines
def CreateField(self, Item, Name, Length, Offset, Struct,
BsfName, Help, Option, BitsLength=None):
PosName = 28
NameLine = ''
HelpLine = ''
OptionLine = ''
if Length == 0 and Name == 'Dummy':
return '\n'
IsArray = False
if Length in [1, 2, 4, 8]:
Type = "UINT%d" % (Length * 8)
else:
IsArray = True
Type = "UINT8"
if Item and Item['value'].startswith('{'):
Type = "UINT8"
IsArray = True
if Struct != '':
Type = Struct
if Struct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
IsArray = True
Unit = int(Type[4:]) // 8
Length = Length / Unit
else:
IsArray = False
if IsArray:
Name = Name + '[%d]' % Length
if len(Type) < PosName:
Space1 = PosName - len(Type)
else:
Space1 = 1
if BsfName != '':
NameLine = " %s\n" % BsfName
else:
NameLine = "\n"
if Help != '':
HelpLine = self.ProcessMultilines(Help, 80)
if Option != '':
OptionLine = self.ProcessMultilines(Option, 80)
if BitsLength is None:
BitsLength = ''
else:
BitsLength = ' : %d' % BitsLength
return "\n/** %s%s%s**/\n %s%s%s%s;\n" % \
(NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name,
BitsLength)
def SplitTextBody(self, TextBody):
Marker1 = '{ /* _COMMON_STRUCT_START_ */'
Marker2 = '; /* _COMMON_STRUCT_END_ */'
ComBody = []
TxtBody = []
IsCommon = False
for Line in TextBody:
if Line.strip().endswith(Marker1):
Line = Line.replace(Marker1[1:], '')
IsCommon = True
if Line.strip().endswith(Marker2):
Line = Line.replace(Marker2[1:], '')
if IsCommon:
ComBody.append(Line)
IsCommon = False
continue
if IsCommon:
ComBody.append(Line)
else:
TxtBody.append(Line)
return ComBody, TxtBody
def GetStructArrayInfo(self, Input):
ArrayStr = Input.split('[')
Name = ArrayStr[0]
if len(ArrayStr) > 1:
NumStr = ''.join(c for c in ArrayStr[-1] if c.isdigit())
NumStr = '1000' if len(NumStr) == 0 else NumStr
ArrayNum = int(NumStr)
else:
ArrayNum = 0
return Name, ArrayNum
def PostProcessBody(self, TextBody, IncludeEmbedOnly=True):
NewTextBody = []
OldTextBody = []
IncTextBody = []
StructBody = []
IncludeLine = False
EmbedFound = False
StructName = ''
ArrayVarName = ''
VariableName = ''
Count = 0
Level = 0
IsCommonStruct = False
for Line in TextBody:
if Line.startswith('#define '):
IncTextBody.append(Line)
continue
if not Line.startswith('/* EMBED_STRUCT:'):
Match = False
else:
Match = re.match("^/\\*\\sEMBED_STRUCT:([\\w\\[\\]\\*]+):\
([\\w\\[\\]\\*]+):(\\w+):(START|END)([\\s\\d]+)\\*/([\\s\\S]*)", Line)
if Match:
ArrayMarker = Match.group(5)
if Match.group(4) == 'END':
Level -= 1
if Level == 0:
Line = Match.group(6)
else: # 'START'
Level += 1
if Level == 1:
Line = Match.group(6)
else:
EmbedFound = True
TagStr = Match.group(3)
if TagStr.startswith('TAG_'):
try:
TagVal = int(TagStr[4:], 16)
except Exception:
TagVal = -1
if (TagVal >= 0) and (TagVal < self._MinCfgTagId):
IsCommonStruct = True
if Level == 1:
if IsCommonStruct:
Suffix = ' /* _COMMON_STRUCT_START_ */'
else:
Suffix = ''
StructBody = ['typedef struct {%s' % Suffix]
StructName = Match.group(1)
StructType = Match.group(2)
VariableName = Match.group(3)
MatchOffset = re.search('/\\*\\*\\sOffset\\s0x\
([a-fA-F0-9]+)', Line)
if MatchOffset:
Offset = int(MatchOffset.group(1), 16)
else:
Offset = None
IncludeLine = True
ModifiedStructType = StructType.rstrip()
if ModifiedStructType.endswith(']'):
Idx = ModifiedStructType.index('[')
if ArrayMarker != ' ':
# Auto array size
OldTextBody.append('')
ArrayVarName = VariableName
if int(ArrayMarker) == 1000:
Count = 1
else:
Count = int(ArrayMarker) + 1000
else:
if Count < 1000:
Count += 1
VariableTemp = ArrayVarName + '[%d]' % (
Count if Count < 1000 else Count - 1000)
OldTextBody[-1] = self.CreateField(
None, VariableTemp, 0, Offset,
ModifiedStructType[:Idx], '',
'Structure Array', '')
else:
ArrayVarName = ''
OldTextBody.append(self.CreateField(
None, VariableName, 0, Offset,
ModifiedStructType, '', '', ''))
if IncludeLine:
StructBody.append(Line)
else:
OldTextBody.append(Line)
if Match and Match.group(4) == 'END':
if Level == 0:
if (StructType != Match.group(2)) or \
(VariableName != Match.group(3)):
print("Unmatched struct name '%s' and '%s' !" %
(StructName, Match.group(2)))
else:
if IsCommonStruct:
Suffix = ' /* _COMMON_STRUCT_END_ */'
else:
Suffix = ''
Line = '} %s;%s\n\n\n' % (StructName, Suffix)
StructBody.append(Line)
if (Line not in NewTextBody) and \
(Line not in OldTextBody):
NewTextBody.extend(StructBody)
IncludeLine = False
IsCommonStruct = False
if not IncludeEmbedOnly:
NewTextBody.extend(OldTextBody)
if EmbedFound:
NewTextBody = self.PostProcessBody(NewTextBody, False)
NewTextBody = IncTextBody + NewTextBody
return NewTextBody
def WriteHeaderFile(self, TxtBody, FileName, Type='h'):
FileNameDef = os.path.basename(FileName).replace('.', '_')
FileNameDef = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', FileNameDef)
FileNameDef = re.sub('([a-z0-9])([A-Z])', r'\1_\2',
FileNameDef).upper()
Lines = []
Lines.append("%s\n" % GetCopyrightHeader(Type))
Lines.append("#ifndef __%s__\n" % FileNameDef)
Lines.append("#define __%s__\n\n" % FileNameDef)
if Type == 'h':
Lines.append("#pragma pack(1)\n\n")
Lines.extend(TxtBody)
if Type == 'h':
Lines.append("#pragma pack()\n\n")
Lines.append("#endif\n")
# Don't rewrite if the contents are the same
Create = True
if os.path.exists(FileName):
HdrFile = open(FileName, "r")
OrgTxt = HdrFile.read()
HdrFile.close()
NewTxt = ''.join(Lines)
if OrgTxt == NewTxt:
Create = False
if Create:
HdrFile = open(FileName, "w")
HdrFile.write(''.join(Lines))
HdrFile.close()
def CreateHeaderFile(self, HdrFileName, ComHdrFileName=''):
LastStruct = ''
SpaceIdx = 0
Offset = 0
FieldIdx = 0
LastFieldIdx = 0
ResvOffset = 0
ResvIdx = 0
TxtBody = []
LineBuffer = []
CfgTags = []
LastVisible = True
TxtBody.append("typedef struct {\n")
for Item in self._CfgItemList:
# Search for CFGDATA tags
Embed = Item["embed"].upper()
if Embed.endswith(':START'):
Match = re.match(r'(\w+)_CFG_DATA:TAG_([0-9A-F]+):START',
Embed)
if Match:
TagName = Match.group(1)
TagId = int(Match.group(2), 16)
CfgTags.append((TagId, TagName))
# Only process visible items
NextVisible = LastVisible
if LastVisible and (Item['header'] == 'OFF'):
NextVisible = False
ResvOffset = Item['offset']
elif (not LastVisible) and Item['header'] == 'ON':
NextVisible = True
Name = "ReservedUpdSpace%d" % ResvIdx
ResvIdx = ResvIdx + 1
TxtBody.append(self.CreateField(
Item, Name, Item["offset"] - ResvOffset,
ResvOffset, '', '', '', ''))
FieldIdx += 1
if Offset < Item["offset"]:
if LastVisible:
Name = "UnusedUpdSpace%d" % SpaceIdx
LineBuffer.append(self.CreateField
(Item, Name, Item["offset"] -
Offset, Offset, '', '', '', ''))
FieldIdx += 1
SpaceIdx = SpaceIdx + 1
Offset = Item["offset"]
LastVisible = NextVisible
Offset = Offset + Item["length"]
if LastVisible:
for Each in LineBuffer:
TxtBody.append(Each)
LineBuffer = []
Embed = Item["embed"].upper()
if Embed.endswith(':START') or Embed.endswith(':END'):
# EMBED_STRUCT: StructName : \
# ItemName : VariableName : START|END
Name, ArrayNum = self.GetStructArrayInfo(Item["struct"])
Remaining = Item["embed"]
if (LastFieldIdx + 1 == FieldIdx) and (LastStruct == Name):
ArrayMarker = ' '
else:
ArrayMarker = '%d' % ArrayNum
LastFieldIdx = FieldIdx
LastStruct = Name
Marker = '/* EMBED_STRUCT:%s:%s%s*/ ' % (Name, Remaining,
ArrayMarker)
# if Embed.endswith(':START') and Comment != '':
# Marker = '/* COMMENT:%s */ \n' % Item["comment"] + Marker
else:
if Embed == '':
Marker = ''
else:
self.Error = "Invalid embedded structure \
format '%s'!\n" % Item["embed"]
return 4
# Generate bit fields for structure
if len(Item['subreg']) > 0 and Item["struct"]:
StructType = Item["struct"]
StructName, ArrayNum = self.GetStructArrayInfo(StructType)
if (LastFieldIdx + 1 == FieldIdx) and \
(LastStruct == Item["struct"]):
ArrayMarker = ' '
else:
ArrayMarker = '%d' % ArrayNum
TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:START%s*/\n' %
(StructName, StructType, Item["cname"],
ArrayMarker))
for SubItem in Item['subreg']:
Name = SubItem["cname"]
if Name.startswith(Item["cname"]):
Name = Name[len(Item["cname"]) + 1:]
Line = self.CreateField(
SubItem, Name, SubItem["bitunit"],
SubItem["offset"], SubItem['struct'],
SubItem['name'], SubItem['help'],
SubItem['option'], SubItem['bitlength'])
TxtBody.append(Line)
TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:END%s*/\n' %
(StructName, StructType, Item["cname"],
ArrayMarker))
LastFieldIdx = FieldIdx
LastStruct = Item["struct"]
FieldIdx += 1
else:
FieldIdx += 1
Line = Marker + self.CreateField(
Item, Item["cname"], Item["length"], Item["offset"],
Item['struct'], Item['name'], Item['help'],
Item['option'])
TxtBody.append(Line)
TxtBody.append("}\n\n")
# Handle the embedded data structure
TxtBody = self.PostProcessBody(TxtBody)
ComBody, TxtBody = self.SplitTextBody(TxtBody)
# Prepare TAG defines
PltTagDefTxt = ['\n']
ComTagDefTxt = ['\n']
for TagId, TagName in sorted(CfgTags):
TagLine = '#define %-30s 0x%03X\n' % ('CDATA_%s_TAG' %
TagName, TagId)
if TagId < self._MinCfgTagId:
# TAG ID < 0x100, it is a generic TAG
ComTagDefTxt.append(TagLine)
else:
PltTagDefTxt.append(TagLine)
PltTagDefTxt.append('\n\n')
ComTagDefTxt.append('\n\n')
# Write file back
self.WriteHeaderFile(PltTagDefTxt + TxtBody, HdrFileName)
if ComHdrFileName:
self.WriteHeaderFile(ComTagDefTxt + ComBody, ComHdrFileName)
return 0
def UpdateConfigItemValue(self, Item, ValueStr):
IsArray = True if Item['value'].startswith('{') else False
IsString = True if Item['value'].startswith("'") else False
Bytes = self.ValueToByteArray(ValueStr, Item['length'])
if IsString:
NewValue = "'%s'" % Bytes.decode("utf-8")
elif IsArray:
NewValue = Bytes2Str(Bytes)
else:
Fmt = '0x%X' if Item['value'].startswith('0x') else '%d'
NewValue = Fmt % Bytes2Val(Bytes)
Item['value'] = NewValue
def LoadDefaultFromBinaryArray(self, BinDat, IgnoreFind=False):
FindOff = 0
StartOff = 0
for Item in self._CfgItemList:
if Item['length'] == 0:
continue
if not IgnoreFind and Item['find']:
FindBin = Item['find'].encode()
Offset = BinDat.find(FindBin)
if Offset >= 0:
TestOff = BinDat[Offset+len(FindBin):].find(FindBin)
if TestOff >= 0:
raise Exception('Multiple match found for "%s" !' %
Item['find'])
FindOff = Offset + len(FindBin)
StartOff = Item['offset']
else:
raise Exception('Could not find "%s" !' % Item['find'])
if Item['offset'] + Item['length'] > len(BinDat):
raise Exception('Mismatching format between DSC \
and BIN files !')
Offset = FindOff + (Item['offset'] - StartOff)
ValStr = Bytes2Str(BinDat[Offset: Offset + Item['length']])
self.UpdateConfigItemValue(Item, ValStr)
self.UpdateDefaultValue()
def PatchBinaryArray(self, BinDat):
FileOff = 0
Offset = 0
FindOff = 0
PatchList = []
CfgBin = bytearray()
for Item in self._CfgItemList:
if Item['length'] == 0:
continue
if Item['find']:
if len(CfgBin) > 0:
PatchList.append((FileOff, CfgBin))
FindBin = Item['find'].encode()
FileOff = BinDat.find(FindBin)
if FileOff < 0:
raise Exception('Could not find "%s" !' % Item['find'])
else:
TestOff = BinDat[FileOff+len(FindBin):].find(FindBin)
if TestOff >= 0:
raise Exception('Multiple match found for "%s" !' %
Item['find'])
FileOff += len(FindBin)
Offset = Item['offset']
FindOff = Offset
CfgBin = bytearray()
if Item['offset'] > Offset:
Gap = Item['offset'] - Offset
CfgBin.extend(b'\x00' * Gap)
if Item['type'] == 'Reserved' and Item['option'] == '$SKIP':
# keep old data
NewOff = FileOff + (Offset - FindOff)
FileData = bytearray(BinDat[NewOff: NewOff + Item['length']])
CfgBin.extend(FileData)
else:
CfgBin.extend(self.ValueToByteArray(Item['value'],
Item['length']))
Offset = Item['offset'] + Item['length']
if len(CfgBin) > 0:
PatchList.append((FileOff, CfgBin))
for FileOff, CfgBin in PatchList:
Length = len(CfgBin)
if FileOff + Length < len(BinDat):
BinDat[FileOff:FileOff+Length] = CfgBin[:]
return BinDat
def GenerateBinaryArray(self):
Offset = 0
BinDat = bytearray()
for Item in self._CfgItemList:
if Item['offset'] > Offset:
Gap = Item['offset'] - Offset
BinDat.extend(b'\x00' * Gap)
BinDat.extend(self.ValueToByteArray(Item['value'], Item['length']))
Offset = Item['offset'] + Item['length']
return BinDat
def GenerateBinary(self, BinFileName):
BinFile = open(BinFileName, "wb")
BinFile.write(self.GenerateBinaryArray())
BinFile.close()
return 0
def GenerateDataIncFile(self, DatIncFileName, BinFile=None):
# Put a prefix GUID before CFGDATA so that it can be located later on
Prefix = b'\xa7\xbd\x7f\x73\x20\x1e\x46\xd6\xbe\x8f\
x64\x12\x05\x8d\x0a\xa8'
if BinFile:
Fin = open(BinFile, 'rb')
BinDat = Prefix + bytearray(Fin.read())
Fin.close()
else:
BinDat = Prefix + self.GenerateBinaryArray()
FileName = os.path.basename(DatIncFileName).upper()
FileName = FileName.replace('.', '_')
TxtLines = []
TxtLines.append("UINT8 mConfigDataBlob[%d] = {\n" % len(BinDat))
Count = 0
Line = [' ']
for Each in BinDat:
Line.append('0x%02X, ' % Each)
Count = Count + 1
if (Count & 0x0F) == 0:
Line.append('\n')
TxtLines.append(''.join(Line))
Line = [' ']
if len(Line) > 1:
TxtLines.append(''.join(Line) + '\n')
TxtLines.append("};\n\n")
self.WriteHeaderFile(TxtLines, DatIncFileName, 'inc')
return 0
def CheckCfgData(self):
# Check if CfgData contains any duplicated name
def AddItem(Item, ChkList):
Name = Item['cname']
if Name in ChkList:
return Item
if Name not in ['Dummy', 'Reserved', 'CfgHeader', 'CondValue']:
ChkList.append(Name)
return None
Duplicate = None
ChkList = []
for Item in self._CfgItemList:
Duplicate = AddItem(Item, ChkList)
if not Duplicate:
for SubItem in Item['subreg']:
Duplicate = AddItem(SubItem, ChkList)
if Duplicate:
break
if Duplicate:
break
if Duplicate:
self.Error = "Duplicated CFGDATA '%s' found !\n" % \
Duplicate['cname']
return -1
return 0
def PrintData(self):
for Item in self._CfgItemList:
if not Item['length']:
continue
print("%-10s @Offset:0x%04X Len:%3d Val:%s" %
(Item['cname'], Item['offset'], Item['length'],
Item['value']))
for SubItem in Item['subreg']:
print(" %-20s BitOff:0x%04X BitLen:%-3d Val:%s" %
(SubItem['cname'], SubItem['bitoffset'],
SubItem['bitlength'], SubItem['value']))
def FormatArrayValue(self, Input, Length):
Dat = self.ValueToByteArray(Input, Length)
return ','.join('0x%02X' % Each for Each in Dat)
def GetItemOptionList(self, Item):
TmpList = []
if Item['type'] == "Combo":
if not Item['option'] in self._BuidinOption:
OptList = Item['option'].split(',')
for Option in OptList:
Option = Option.strip()
try:
(OpVal, OpStr) = Option.split(':')
except Exception:
raise Exception("Invalide option format '%s' !" %
Option)
TmpList.append((OpVal, OpStr))
return TmpList
def WriteBsfStruct(self, BsfFd, Item):
if Item['type'] == "None":
Space = "gPlatformFspPkgTokenSpaceGuid"
else:
Space = Item['space']
Line = " $%s_%s" % (Space, Item['cname'])
Match = re.match("\\s*(\\{.+\\})\\s*", Item['value'])
if Match:
DefaultValue = self.FormatArrayValue(Match.group(1).strip(),
Item['length'])
else:
DefaultValue = Item['value'].strip()
if 'bitlength' in Item:
if Item['bitlength']:
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" %
(Line, ' ' * (64 - len(Line)), Item['bitlength'],
DefaultValue))
else:
if Item['length']:
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" %
(Line, ' ' * (64 - len(Line)), Item['length'],
DefaultValue))
return self.GetItemOptionList(Item)
def GetBsfOption(self, OptionName):
if OptionName in self._CfgOptsDict:
return self._CfgOptsDict[OptionName]
else:
return OptionName
def WriteBsfOption(self, BsfFd, Item):
PcdName = Item['space'] + '_' + Item['cname']
WriteHelp = 0
BsfLines = []
if Item['type'] == "Combo":
if Item['option'] in self._BuidinOption:
Options = self._BuidinOption[Item['option']]
else:
Options = self.GetBsfOption(PcdName)
BsfLines.append(' %s $%s, "%s", &%s,\n' % (
Item['type'], PcdName, Item['name'], Options))
WriteHelp = 1
elif Item['type'].startswith("EditNum"):
Match = re.match("EditNum\\s*,\\s*(HEX|DEC)\\s*,\\s*\\(\
(\\d+|0x[0-9A-Fa-f]+)\\s*,\\s*(\\d+|0x[0-9A-Fa-f]+)\\)", Item['type'])
if Match:
BsfLines.append(' EditNum $%s, "%s", %s,\n' % (
PcdName, Item['name'], Match.group(1)))
WriteHelp = 2
elif Item['type'].startswith("EditText"):
BsfLines.append(' %s $%s, "%s",\n' % (Item['type'], PcdName,
Item['name']))
WriteHelp = 1
elif Item['type'] == "Table":
Columns = Item['option'].split(',')
if len(Columns) != 0:
BsfLines.append(' %s $%s "%s",' % (Item['type'], PcdName,
Item['name']))
for Col in Columns:
Fmt = Col.split(':')
if len(Fmt) != 3:
raise Exception("Column format '%s' is invalid !" %
Fmt)
try:
Dtype = int(Fmt[1].strip())
except Exception:
raise Exception("Column size '%s' is invalid !" %
Fmt[1])
BsfLines.append('\n Column "%s", %d bytes, %s' %
(Fmt[0].strip(), Dtype, Fmt[2].strip()))
BsfLines.append(',\n')
WriteHelp = 1
if WriteHelp > 0:
HelpLines = Item['help'].split('\\n\\r')
FirstLine = True
for HelpLine in HelpLines:
if FirstLine:
FirstLine = False
BsfLines.append(' Help "%s"\n' % (HelpLine))
else:
BsfLines.append(' "%s"\n' % (HelpLine))
if WriteHelp == 2:
BsfLines.append(' "Valid range: %s ~ %s"\n' %
(Match.group(2), Match.group(3)))
if len(Item['condition']) > 4:
CondList = Item['condition'].split(',')
Idx = 0
for Cond in CondList:
Cond = Cond.strip()
if Cond.startswith('#'):
BsfLines.insert(Idx, Cond + '\n')
Idx += 1
elif Cond.startswith('@#'):
BsfLines.append(Cond[1:] + '\n')
for Line in BsfLines:
BsfFd.write(Line)
def WriteBsfPages(self, PageTree, BsfFd):
BsfFd.write('\n')
Key = next(iter(PageTree))
for Page in PageTree[Key]:
PageName = next(iter(Page))
BsfFd.write('Page "%s"\n' % self._CfgPageDict[PageName])
if len(PageTree[Key]):
self.WriteBsfPages(Page, BsfFd)
BsfItems = []
for Item in self._CfgItemList:
if Item['name'] != '':
if Item['page'] != PageName:
continue
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['name'] != '':
BsfItems.append(SubItem)
else:
BsfItems.append(Item)
BsfItems.sort(key=lambda x: x['order'])
for Item in BsfItems:
self.WriteBsfOption(BsfFd, Item)
BsfFd.write("EndPage\n\n")
def GenerateBsfFile(self, BsfFile):
if BsfFile == '':
self.Error = "BSF output file '%s' is invalid" % BsfFile
return 1
Error = 0
OptionDict = {}
BsfFd = open(BsfFile, "w")
BsfFd.write("%s\n" % GetCopyrightHeader('bsf'))
BsfFd.write("%s\n" % self._GlobalDataDef)
BsfFd.write("StructDef\n")
NextOffset = -1
for Item in self._CfgItemList:
if Item['find'] != '':
BsfFd.write('\n Find "%s"\n' % Item['find'])
NextOffset = Item['offset'] + Item['length']
if Item['name'] != '':
if NextOffset != Item['offset']:
BsfFd.write(" Skip %d bytes\n" %
(Item['offset'] - NextOffset))
if len(Item['subreg']) > 0:
NextOffset = Item['offset']
BitsOffset = NextOffset * 8
for SubItem in Item['subreg']:
BitsOffset += SubItem['bitlength']
if SubItem['name'] == '':
if 'bitlength' in SubItem:
BsfFd.write(" Skip %d bits\n" %
(SubItem['bitlength']))
else:
BsfFd.write(" Skip %d bytes\n" %
(SubItem['length']))
else:
Options = self.WriteBsfStruct(BsfFd, SubItem)
if len(Options) > 0:
OptionDict[SubItem
['space']+'_'+SubItem
['cname']] = Options
NextBitsOffset = (Item['offset'] + Item['length']) * 8
if NextBitsOffset > BitsOffset:
BitsGap = NextBitsOffset - BitsOffset
BitsRemain = BitsGap % 8
if BitsRemain:
BsfFd.write(" Skip %d bits\n" % BitsRemain)
BitsGap -= BitsRemain
BytesRemain = BitsGap // 8
if BytesRemain:
BsfFd.write(" Skip %d bytes\n" %
BytesRemain)
NextOffset = Item['offset'] + Item['length']
else:
NextOffset = Item['offset'] + Item['length']
Options = self.WriteBsfStruct(BsfFd, Item)
if len(Options) > 0:
OptionDict[Item['space']+'_'+Item['cname']] = Options
BsfFd.write("\nEndStruct\n\n")
BsfFd.write("%s" % self._BuidinOptionTxt)
NameList = []
OptionList = []
for Each in sorted(OptionDict):
if OptionDict[Each] not in OptionList:
NameList.append(Each)
OptionList.append(OptionDict[Each])
BsfFd.write("List &%s\n" % Each)
for Item in OptionDict[Each]:
BsfFd.write(' Selection %s , "%s"\n' %
(self.EvaluateExpress(Item[0]), Item[1]))
BsfFd.write("EndList\n\n")
else:
# Item has idential options as other item
# Try to reuse the previous options instead
Idx = OptionList.index(OptionDict[Each])
self._CfgOptsDict[Each] = NameList[Idx]
BsfFd.write("BeginInfoBlock\n")
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
BsfFd.write("EndInfoBlock\n\n")
self.WriteBsfPages(self._CfgPageTree, BsfFd)
BsfFd.close()
return Error
def WriteDeltaLine(self, OutLines, Name, ValStr, IsArray):
if IsArray:
Output = '%s | { %s }' % (Name, ValStr)
else:
Output = '%s | 0x%X' % (Name, Array2Val(ValStr))
OutLines.append(Output)
def WriteDeltaFile(self, OutFile, PlatformId, OutLines):
DltFd = open(OutFile, "w")
DltFd.write("%s\n" % GetCopyrightHeader('dlt', True))
if PlatformId is not None:
DltFd.write('#\n')
DltFd.write('# Delta configuration values \
for platform ID 0x%04X\n' % PlatformId)
DltFd.write('#\n\n')
for Line in OutLines:
DltFd.write('%s\n' % Line)
DltFd.close()
def GenerateDeltaFile(self, OutFile, AbsfFile):
# Parse ABSF Build in dict
if not os.path.exists(AbsfFile):
Lines = []
else:
with open(AbsfFile) as Fin:
Lines = Fin.readlines()
AbsfBuiltValDict = {}
Process = False
for Line in Lines:
Line = Line.strip()
if Line.startswith('StructDef'):
Process = True
if Line.startswith('EndStruct'):
break
if not Process:
continue
Match = re.match('\\s*\\$gCfgData_(\\w+)\\s+\
(\\d+)\\s+(bits|bytes)\\s+\\$_AS_BUILT_\\s+=\\s+(.+)\\$', Line)
if Match:
if Match.group(1) not in AbsfBuiltValDict:
AbsfBuiltValDict[Match.group(1)] = Match.group(4).strip()
else:
raise Exception("Duplicated configuration \
name '%s' found !", Match.group(1))
# Match config item in DSC
PlatformId = None
OutLines = []
TagName = ''
Level = 0
for Item in self._CfgItemList:
Name = None
if Level == 0 and Item['embed'].endswith(':START'):
TagName = Item['embed'].split(':')[0]
Level += 1
if Item['cname'] in AbsfBuiltValDict:
ValStr = AbsfBuiltValDict[Item['cname']]
Name = '%s.%s' % (TagName, Item['cname'])
if not Item['subreg'] and Item['value'].startswith('{'):
Value = Array2Val(Item['value'])
IsArray = True
else:
Value = int(Item['value'], 16)
IsArray = False
AbsfVal = Array2Val(ValStr)
if AbsfVal != Value:
if 'PLATFORMID_CFG_DATA.PlatformId' == Name:
PlatformId = AbsfVal
self.WriteDeltaLine(OutLines, Name, ValStr, IsArray)
else:
if 'PLATFORMID_CFG_DATA.PlatformId' == Name:
raise Exception("'PlatformId' has the \
same value as DSC default !")
if Item['subreg']:
for SubItem in Item['subreg']:
if SubItem['cname'] in AbsfBuiltValDict:
ValStr = AbsfBuiltValDict[SubItem['cname']]
if Array2Val(ValStr) == int(SubItem['value'], 16):
continue
Name = '%s.%s.%s' % (TagName, Item['cname'],
SubItem['cname'])
self.WriteDeltaLine(OutLines, Name, ValStr, False)
if Item['embed'].endswith(':END'):
Level -= 1
if PlatformId is None and Lines:
raise Exception("'PlatformId' configuration \
is missing in ABSF file!")
else:
PlatformId = 0
self.WriteDeltaFile(OutFile, PlatformId, Lines)
return 0
def GenerateDscFile(self, OutFile):
DscFd = open(OutFile, "w")
for Line in self._DscLines:
DscFd.write(Line + '\n')
DscFd.close()
return 0
def Usage():
print('\n'.join([
"GenCfgData Version 0.01",
"Usage:",
" GenCfgData GENINC BinFile \
IncOutFile [-D Macros]",
" GenCfgData GENPKL DscFile \
PklOutFile [-D Macros]",
" GenCfgData GENINC DscFile[;DltFile] \
IncOutFile [-D Macros]",
" GenCfgData GENBIN DscFile[;DltFile] \
BinOutFile [-D Macros]",
" GenCfgData GENBSF DscFile[;DltFile] \
BsfOutFile [-D Macros]",
" GenCfgData GENDLT DscFile[;AbsfFile] \
DltOutFile [-D Macros]",
" GenCfgData GENDSC DscFile \
DscOutFile [-D Macros]",
" GenCfgData GENHDR DscFile[;DltFile] \
HdrOutFile[;ComHdrOutFile] [-D Macros]"
]))
def Main():
#
# Parse the options and args
#
argc = len(sys.argv)
if argc < 4:
Usage()
return 1
GenCfgData = CGenCfgData()
Command = sys.argv[1].upper()
OutFile = sys.argv[3]
if argc > 5 and GenCfgData.ParseMacros(sys.argv[4:]) != 0:
raise Exception("ERROR: Macro parsing failed !")
FileList = sys.argv[2].split(';')
if len(FileList) == 2:
DscFile = FileList[0]
DltFile = FileList[1]
elif len(FileList) == 1:
DscFile = FileList[0]
DltFile = ''
else:
raise Exception("ERROR: Invalid parameter '%s' !" % sys.argv[2])
if Command == "GENDLT" and DscFile.endswith('.dlt'):
# It needs to expand an existing DLT file
DltFile = DscFile
Lines = CGenCfgData.ExpandIncludeFiles(DltFile)
OutTxt = ''.join([x[0] for x in Lines])
OutFile = open(OutFile, "w")
OutFile.write(OutTxt)
OutFile.close()
return 0
if not os.path.exists(DscFile):
raise Exception("ERROR: Cannot open file '%s' !" % DscFile)
CfgBinFile = ''
if DltFile:
if not os.path.exists(DltFile):
raise Exception("ERROR: Cannot open file '%s' !" % DltFile)
if Command == "GENDLT":
CfgBinFile = DltFile
DltFile = ''
BinFile = ''
if (DscFile.lower().endswith('.bin')) and (Command == "GENINC"):
# It is binary file
BinFile = DscFile
DscFile = ''
if BinFile:
if GenCfgData.GenerateDataIncFile(OutFile, BinFile) != 0:
raise Exception(GenCfgData.Error)
return 0
if DscFile.lower().endswith('.pkl'):
with open(DscFile, "rb") as PklFile:
GenCfgData.__dict__ = marshal.load(PklFile)
else:
if GenCfgData.ParseDscFile(DscFile) != 0:
raise Exception(GenCfgData.Error)
# if GenCfgData.CheckCfgData() != 0:
# raise Exception(GenCfgData.Error)
if GenCfgData.CreateVarDict() != 0:
raise Exception(GenCfgData.Error)
if Command == 'GENPKL':
with open(OutFile, "wb") as PklFile:
marshal.dump(GenCfgData.__dict__, PklFile)
return 0
if DltFile and Command in ['GENHDR', 'GENBIN', 'GENINC', 'GENBSF']:
if GenCfgData.OverrideDefaultValue(DltFile) != 0:
raise Exception(GenCfgData.Error)
if GenCfgData.UpdateDefaultValue() != 0:
raise Exception(GenCfgData.Error)
# GenCfgData.PrintData ()
if sys.argv[1] == "GENBIN":
if GenCfgData.GenerateBinary(OutFile) != 0:
raise Exception(GenCfgData.Error)
elif sys.argv[1] == "GENHDR":
OutFiles = OutFile.split(';')
BrdOutFile = OutFiles[0].strip()
if len(OutFiles) > 1:
ComOutFile = OutFiles[1].strip()
else:
ComOutFile = ''
if GenCfgData.CreateHeaderFile(BrdOutFile, ComOutFile) != 0:
raise Exception(GenCfgData.Error)
elif sys.argv[1] == "GENBSF":
if GenCfgData.GenerateBsfFile(OutFile) != 0:
raise Exception(GenCfgData.Error)
elif sys.argv[1] == "GENINC":
if GenCfgData.GenerateDataIncFile(OutFile) != 0:
raise Exception(GenCfgData.Error)
elif sys.argv[1] == "GENDLT":
if GenCfgData.GenerateDeltaFile(OutFile, CfgBinFile) != 0:
raise Exception(GenCfgData.Error)
elif sys.argv[1] == "GENDSC":
if GenCfgData.GenerateDscFile(OutFile) != 0:
raise Exception(GenCfgData.Error)
else:
raise Exception("Unsuported command '%s' !" % Command)
return 0
if __name__ == '__main__':
sys.exit(Main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/FspGenCfgData.py
|
# @file
# Split a file into two pieces at the request offset.
#
# Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
import unittest
import tempfile
import os
import shutil
import struct as st
import filecmp
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import FspDscBsf2Yaml
YamlHeaderLineLength = 10
HdrFileHeaderLineLength = 32
BsfFileHeaderLineLength = 19
def GenFileWithoutHdr(inputfile, numLineToStrip):
yaml_file = open(inputfile, "r")
lines = yaml_file.readlines()
yaml_file.close()
del lines[:numLineToStrip]
noHdrOutputFileName = "no-header-" + inputfile
stripped_file = open(noHdrOutputFileName, "w")
for line in lines:
stripped_file.write(line)
stripped_file.close()
return noHdrOutputFileName
class TestFspScripts(unittest.TestCase):
def test_generateFspHeader_fromDsc(self):
# Generate HEADER
cmd = '{} {} HEADER {} {} {}'.format(
'python',
'..\GenCfgOpt.py',
'QemuFspPkg.dsc',
'.',
"")
os.system(cmd)
noHdrOutputFileName = GenFileWithoutHdr("FspUpd.h", HdrFileHeaderLineLength)
self.assertTrue(filecmp.cmp(noHdrOutputFileName,
'ExpectedFspUpd.h'))
def test_generateFspsHeader_fromDsc(self):
noHdrOutputFileName = GenFileWithoutHdr("FspsUpd.h", HdrFileHeaderLineLength)
self.assertTrue(filecmp.cmp(noHdrOutputFileName,
'ExpectedFspsUpd.h'))
def test_generateFsptHeader_fromDsc(self):
noHdrOutputFileName = GenFileWithoutHdr("FsptUpd.h", HdrFileHeaderLineLength)
self.assertTrue(filecmp.cmp(noHdrOutputFileName,
'ExpectedFsptUpd.h'))
def test_generateFspmHeader_fromDsc(self):
noHdrOutputFileName = GenFileWithoutHdr("FspmUpd.h", HdrFileHeaderLineLength)
self.assertTrue(filecmp.cmp(noHdrOutputFileName,
'ExpectedFspmUpd.h'))
def test_generateBsf_fromDsc(self):
# Generate BSF
cmd = '{} {} GENBSF {} {} {}'.format(
'python',
'..\GenCfgOpt.py',
'QemuFspPkg.dsc',
'.',
"Output.bsf")
os.system(cmd)
noHdrOutputFileName = GenFileWithoutHdr("Output.bsf", BsfFileHeaderLineLength)
self.assertTrue(filecmp.cmp(noHdrOutputFileName,
'ExpectedOutput.bsf'))
def test_generateYaml_fromDsc(self):
# Generate YAML
cmd = '{} {} {} {}'.format(
'python',
'..\FspDscBsf2Yaml.py',
'QemuFspPkg.dsc',
"Output.yaml")
os.system(cmd)
noHdrOutputFileName = GenFileWithoutHdr("Output.yaml", YamlHeaderLineLength)
self.assertTrue(filecmp.cmp(noHdrOutputFileName,
'ExpectedOutput.yaml'))
if __name__ == '__main__':
unittest.main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/Tests/test_yaml.py
|
#!/usr/bin/env python
# @ SingleSign.py
# Single signing script
#
# Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import sys
import re
import shutil
import subprocess
SIGNING_KEY = {
# Key Id | Key File Name start |
# =================================================================
# KEY_ID_MASTER is used for signing Slimboot Key Hash Manifest \
# container (KEYH Component)
"KEY_ID_MASTER_RSA2048": "MasterTestKey_Priv_RSA2048.pem",
"KEY_ID_MASTER_RSA3072": "MasterTestKey_Priv_RSA3072.pem",
# KEY_ID_CFGDATA is used for signing external Config data blob)
"KEY_ID_CFGDATA_RSA2048": "ConfigTestKey_Priv_RSA2048.pem",
"KEY_ID_CFGDATA_RSA3072": "ConfigTestKey_Priv_RSA3072.pem",
# KEY_ID_FIRMWAREUPDATE is used for signing capsule firmware update image)
"KEY_ID_FIRMWAREUPDATE_RSA2048": "FirmwareUpdateTestKey_Priv_RSA2048.pem",
"KEY_ID_FIRMWAREUPDATE_RSA3072": "FirmwareUpdateTestKey_Priv_RSA3072.pem",
# KEY_ID_CONTAINER is used for signing container header with mono signature
"KEY_ID_CONTAINER_RSA2048": "ContainerTestKey_Priv_RSA2048.pem",
"KEY_ID_CONTAINER_RSA3072": "ContainerTestKey_Priv_RSA3072.pem",
# CONTAINER_COMP1_KEY_ID is used for signing container components
"KEY_ID_CONTAINER_COMP_RSA2048": "ContainerCompTestKey_Priv_RSA2048.pem",
"KEY_ID_CONTAINER_COMP_RSA3072": "ContainerCompTestKey_Priv_RSA3072.pem",
# KEY_ID_OS1_PUBLIC, KEY_ID_OS2_PUBLIC is used for referencing \
# Boot OS public keys
"KEY_ID_OS1_PUBLIC_RSA2048": "OS1_TestKey_Pub_RSA2048.pem",
"KEY_ID_OS1_PUBLIC_RSA3072": "OS1_TestKey_Pub_RSA3072.pem",
"KEY_ID_OS2_PUBLIC_RSA2048": "OS2_TestKey_Pub_RSA2048.pem",
"KEY_ID_OS2_PUBLIC_RSA3072": "OS2_TestKey_Pub_RSA3072.pem",
}
MESSAGE_SBL_KEY_DIR = """!!! PRE-REQUISITE: Path to SBL_KEY_DIR has.
to be set with SBL KEYS DIRECTORY !!! \n!!! Generate keys.
using GenerateKeys.py available in BootloaderCorePkg/Tools.
directory !!! \n !!! Run $python.
BootloaderCorePkg/Tools/GenerateKeys.py -k $PATH_TO_SBL_KEY_DIR !!!\n
!!! Set SBL_KEY_DIR environ with path to SBL KEYS DIR !!!\n"
!!! Windows $set SBL_KEY_DIR=$PATH_TO_SBL_KEY_DIR !!!\n
!!! Linux $export SBL_KEY_DIR=$PATH_TO_SBL_KEY_DIR !!!\n"""
def get_openssl_path():
if os.name == 'nt':
if 'OPENSSL_PATH' not in os.environ:
openssl_dir = "C:\\Openssl\\bin\\"
if os.path.exists(openssl_dir):
os.environ['OPENSSL_PATH'] = openssl_dir
else:
os.environ['OPENSSL_PATH'] = "C:\\Openssl\\"
if 'OPENSSL_CONF' not in os.environ:
openssl_cfg = "C:\\Openssl\\openssl.cfg"
if os.path.exists(openssl_cfg):
os.environ['OPENSSL_CONF'] = openssl_cfg
openssl = os.path.join(
os.environ.get('OPENSSL_PATH', ''),
'openssl.exe')
else:
# Get openssl path for Linux cases
openssl = shutil.which('openssl')
return openssl
def run_process(arg_list, print_cmd=False, capture_out=False):
sys.stdout.flush()
if print_cmd:
print(' '.join(arg_list))
exc = None
result = 0
output = ''
try:
if capture_out:
output = subprocess.check_output(arg_list).decode()
else:
result = subprocess.call(arg_list)
except Exception as ex:
result = 1
exc = ex
if result:
if not print_cmd:
print('Error in running process:\n %s' % ' '.join(arg_list))
if exc is None:
sys.exit(1)
else:
raise exc
return output
def check_file_pem_format(priv_key):
# Check for file .pem format
key_name = os.path.basename(priv_key)
if os.path.splitext(key_name)[1] == ".pem":
return True
else:
return False
def get_key_id(priv_key):
# Extract base name if path is provided.
key_name = os.path.basename(priv_key)
# Check for KEY_ID in key naming.
if key_name.startswith('KEY_ID'):
return key_name
else:
return None
def get_sbl_key_dir():
# Check Key store setting SBL_KEY_DIR path
if 'SBL_KEY_DIR' not in os.environ:
exception_string = "ERROR: SBL_KEY_DIR is not defined." \
" Set SBL_KEY_DIR with SBL Keys directory!!\n"
raise Exception(exception_string + MESSAGE_SBL_KEY_DIR)
sbl_key_dir = os.environ.get('SBL_KEY_DIR')
if not os.path.exists(sbl_key_dir):
exception_string = "ERROR:SBL_KEY_DIR set " + sbl_key_dir \
+ " is not valid." \
" Set the correct SBL_KEY_DIR path !!\n" \
+ MESSAGE_SBL_KEY_DIR
raise Exception(exception_string)
else:
return sbl_key_dir
def get_key_from_store(in_key):
# Check in_key is path to key
if os.path.exists(in_key):
return in_key
# Get Slimboot key dir path
sbl_key_dir = get_sbl_key_dir()
# Extract if in_key is key_id
priv_key = get_key_id(in_key)
if priv_key is not None:
if (priv_key in SIGNING_KEY):
# Generate key file name from key id
priv_key_file = SIGNING_KEY[priv_key]
else:
exception_string = "KEY_ID" + priv_key + "is not found " \
"is not found in supported KEY IDs!!"
raise Exception(exception_string)
elif check_file_pem_format(in_key):
# check if file name is provided in pem format
priv_key_file = in_key
else:
priv_key_file = None
raise Exception('key provided %s is not valid!' % in_key)
# Create a file path
# Join Key Dir and priv_key_file
try:
priv_key = os.path.join(sbl_key_dir, priv_key_file)
except Exception:
raise Exception('priv_key is not found %s!' % priv_key)
# Check for priv_key construted based on KEY ID exists in specified path
if not os.path.isfile(priv_key):
exception_string = "!!! ERROR: Key file corresponding to" \
+ in_key + "do not exist in Sbl key " \
"directory at" + sbl_key_dir + "!!! \n" \
+ MESSAGE_SBL_KEY_DIR
raise Exception(exception_string)
return priv_key
#
# Sign an file using openssl
#
# priv_key [Input] Key Id or Path to Private key
# hash_type [Input] Signing hash
# sign_scheme[Input] Sign/padding scheme
# in_file [Input] Input file to be signed
# out_file [Input/Output] Signed data file
#
def single_sign_file(priv_key, hash_type, sign_scheme, in_file, out_file):
_hash_type_string = {
"SHA2_256": 'sha256',
"SHA2_384": 'sha384',
"SHA2_512": 'sha512',
}
_hash_digest_Size = {
# Hash_string : Hash_Size
"SHA2_256": 32,
"SHA2_384": 48,
"SHA2_512": 64,
"SM3_256": 32,
}
_sign_scheme_string = {
"RSA_PKCS1": 'pkcs1',
"RSA_PSS": 'pss',
}
priv_key = get_key_from_store(priv_key)
# Temporary files to store hash generated
hash_file_tmp = out_file+'.hash.tmp'
hash_file = out_file+'.hash'
# Generate hash using openssl dgst in hex format
cmdargs = [get_openssl_path(),
'dgst',
'-'+'%s' % _hash_type_string[hash_type],
'-out', '%s' % hash_file_tmp, '%s' % in_file]
run_process(cmdargs)
# Extract hash form dgst command output and convert to ascii
with open(hash_file_tmp, 'r') as fin:
hashdata = fin.read()
fin.close()
try:
hashdata = hashdata.rsplit('=', 1)[1].strip()
except Exception:
raise Exception('Hash Data not found for signing!')
if len(hashdata) != (_hash_digest_Size[hash_type] * 2):
raise Exception('Hash Data size do match with for hash type!')
hashdata_bytes = bytearray.fromhex(hashdata)
open(hash_file, 'wb').write(hashdata_bytes)
print("Key used for Singing %s !!" % priv_key)
# sign using Openssl pkeyutl
cmdargs = [get_openssl_path(),
'pkeyutl', '-sign', '-in', '%s' % hash_file,
'-inkey', '%s' % priv_key, '-out',
'%s' % out_file, '-pkeyopt',
'digest:%s' % _hash_type_string[hash_type],
'-pkeyopt', 'rsa_padding_mode:%s' %
_sign_scheme_string[sign_scheme]]
run_process(cmdargs)
return
#
# Extract public key using openssl
#
# in_key [Input] Private key or public key in pem format
# pub_key_file [Input/Output] Public Key to a file
#
# return keydata (mod, exp) in bin format
#
def single_sign_gen_pub_key(in_key, pub_key_file=None):
in_key = get_key_from_store(in_key)
# Expect key to be in PEM format
is_prv_key = False
cmdline = [get_openssl_path(), 'rsa', '-pubout', '-text', '-noout',
'-in', '%s' % in_key]
# Check if it is public key or private key
text = open(in_key, 'r').read()
if '-BEGIN RSA PRIVATE KEY-' in text:
is_prv_key = True
elif '-BEGIN PUBLIC KEY-' in text:
cmdline.extend(['-pubin'])
else:
raise Exception('Unknown key format "%s" !' % in_key)
if pub_key_file:
cmdline.extend(['-out', '%s' % pub_key_file])
capture = False
else:
capture = True
output = run_process(cmdline, capture_out=capture)
if not capture:
output = text = open(pub_key_file, 'r').read()
data = output.replace('\r', '')
data = data.replace('\n', '')
data = data.replace(' ', '')
# Extract the modulus
if is_prv_key:
match = re.search('modulus(.*)publicExponent:\\s+(\\d+)\\s+', data)
else:
match = re.search('Modulus(?:.*?):(.*)Exponent:\\s+(\\d+)\\s+', data)
if not match:
raise Exception('Public key not found!')
modulus = match.group(1).replace(':', '')
exponent = int(match.group(2))
mod = bytearray.fromhex(modulus)
# Remove the '00' from the front if the MSB is 1
if mod[0] == 0 and (mod[1] & 0x80):
mod = mod[1:]
exp = bytearray.fromhex('{:08x}'.format(exponent))
keydata = mod + exp
return keydata
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/ConfigEditor/SingleSign.py
|
# @ GenYamlCfg.py
#
# Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
import os
import sys
import re
import marshal
import string
import operator as op
import ast
import tkinter.messagebox as messagebox
import tkinter
from datetime import date
from collections import OrderedDict
from CommonUtility import value_to_bytearray, value_to_bytes, \
bytes_to_value, get_bits_from_bytes, set_bits_to_bytes
# Generated file copyright header
__copyright_tmp__ = """/** @file
Platform Configuration %s File.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
This file is automatically generated. Please do NOT modify !!!
**/
"""
def get_copyright_header(file_type, allow_modify=False):
file_description = {
'yaml': 'Boot Setting',
'dlt': 'Delta',
'inc': 'C Binary Blob',
'h': 'C Struct Header'
}
if file_type in ['yaml', 'dlt']:
comment_char = '#'
else:
comment_char = ''
lines = __copyright_tmp__.split('\n')
if allow_modify:
lines = [line for line in lines if 'Please do NOT modify' not in line]
copyright_hdr = '\n'.join('%s%s' % (comment_char, line)
for line in lines)[:-1] + '\n'
return copyright_hdr % (file_description[file_type], date.today().year)
def check_quote(text):
if (text[0] == "'" and text[-1] == "'") or (text[0] == '"'
and text[-1] == '"'):
return True
return False
def strip_quote(text):
new_text = text.strip()
if check_quote(new_text):
return new_text[1:-1]
return text
def strip_delimiter(text, delim):
new_text = text.strip()
if new_text:
if new_text[0] == delim[0] and new_text[-1] == delim[-1]:
return new_text[1:-1]
return text
def bytes_to_bracket_str(bytes):
return '{ %s }' % (', '.join('0x%02x' % i for i in bytes))
def array_str_to_value(val_str):
val_str = val_str.strip()
val_str = strip_delimiter(val_str, '{}')
val_str = strip_quote(val_str)
value = 0
for each in val_str.split(',')[::-1]:
each = each.strip()
value = (value << 8) | int(each, 0)
return value
def write_lines(lines, file):
fo = open(file, "w")
fo.write(''.join([x[0] for x in lines]))
fo.close()
def read_lines(file):
if not os.path.exists(file):
test_file = os.path.basename(file)
if os.path.exists(test_file):
file = test_file
fi = open(file, 'r')
lines = fi.readlines()
fi.close()
return lines
def expand_file_value(path, value_str):
result = bytearray()
match = re.match("\\{\\s*FILE:(.+)\\}", value_str)
if match:
file_list = match.group(1).split(',')
for file in file_list:
file = file.strip()
bin_path = os.path.join(path, file)
result.extend(bytearray(open(bin_path, 'rb').read()))
print('\n\n result ', result)
return result
class ExpressionEval(ast.NodeVisitor):
operators = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.floordiv,
ast.Mod: op.mod,
ast.Eq: op.eq,
ast.NotEq: op.ne,
ast.Gt: op.gt,
ast.Lt: op.lt,
ast.GtE: op.ge,
ast.LtE: op.le,
ast.BitXor: op.xor,
ast.BitAnd: op.and_,
ast.BitOr: op.or_,
ast.Invert: op.invert,
ast.USub: op.neg
}
def __init__(self):
self._debug = False
self._expression = ''
self._namespace = {}
self._get_variable = None
def eval(self, expr, vars={}):
self._expression = expr
if type(vars) is dict:
self._namespace = vars
self._get_variable = None
else:
self._namespace = {}
self._get_variable = vars
node = ast.parse(self._expression, mode='eval')
result = self.visit(node.body)
if self._debug:
print('EVAL [ %s ] = %s' % (expr, str(result)))
return result
def visit_Name(self, node):
if self._get_variable is not None:
return self._get_variable(node.id)
else:
return self._namespace[node.id]
def visit_Num(self, node):
return node.n
def visit_NameConstant(self, node):
return node.value
def visit_BoolOp(self, node):
result = False
if isinstance(node.op, ast.And):
for value in node.values:
result = self.visit(value)
if not result:
break
elif isinstance(node.op, ast.Or):
for value in node.values:
result = self.visit(value)
if result:
break
return True if result else False
def visit_UnaryOp(self, node):
val = self.visit(node.operand)
return ExpressionEval.operators[type(node.op)](val)
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
return ExpressionEval.operators[type(node.op)](lhs, rhs)
def visit_Compare(self, node):
right = self.visit(node.left)
result = True
for operation, comp in zip(node.ops, node.comparators):
if not result:
break
left = right
right = self.visit(comp)
result = ExpressionEval.operators[type(operation)](left, right)
return result
def visit_Call(self, node):
if node.func.id in ['ternary']:
condition = self.visit(node.args[0])
val_true = self.visit(node.args[1])
val_false = self.visit(node.args[2])
return val_true if condition else val_false
elif node.func.id in ['offset', 'length']:
if self._get_variable is not None:
return self._get_variable(node.args[0].s, node.func.id)
else:
raise ValueError("Unsupported function: " + repr(node))
def generic_visit(self, node):
raise ValueError("malformed node or string: " + repr(node))
class CFG_YAML():
TEMPLATE = 'template'
CONFIGS = 'configs'
VARIABLE = 'variable'
def __init__(self):
self.log_line = False
self.allow_template = False
self.cfg_tree = None
self.tmp_tree = None
self.var_dict = None
self.def_dict = {}
self.yaml_path = ''
self.lines = []
self.full_lines = []
self.index = 0
self.re_expand = re.compile(
r'(.+:\s+|\s*\-\s*)!expand\s+\{\s*(\w+_TMPL)\s*:\s*\[(.+)]\s*\}')
self.re_include = re.compile(r'(.+:\s+|\s*\-\s*)!include\s+(.+)')
@staticmethod
def count_indent(line):
return next((i for i, c in enumerate(line) if not c.isspace()),
len(line))
@staticmethod
def substitue_args(text, arg_dict):
for arg in arg_dict:
text = text.replace('$' + arg, arg_dict[arg])
return text
@staticmethod
def dprint(*args):
pass
def process_include(self, line, insert=True):
match = self.re_include.match(line)
if not match:
raise Exception("Invalid !include format '%s' !" % line.strip())
prefix = match.group(1)
include = match.group(2)
if prefix.strip() == '-':
prefix = ''
adjust = 0
else:
adjust = 2
include = strip_quote(include)
request = CFG_YAML.count_indent(line) + adjust
if self.log_line:
# remove the include line itself
del self.full_lines[-1]
inc_path = os.path.join(self.yaml_path, include)
if not os.path.exists(inc_path):
# try relative path to project root
try_path = os.path.join(os.path.dirname(os.path.realpath(__file__)
), "../..", include)
if os.path.exists(try_path):
inc_path = try_path
else:
raise Exception("ERROR: Cannot open file '%s'." % inc_path)
lines = read_lines(inc_path)
current = 0
same_line = False
for idx, each in enumerate(lines):
start = each.lstrip()
if start == '' or start[0] == '#':
continue
if start[0] == '>':
# append the content directly at the same line
same_line = True
start = idx
current = CFG_YAML.count_indent(each)
break
lines = lines[start+1:] if same_line else lines[start:]
leading = ''
if same_line:
request = len(prefix)
leading = '>'
lines = [prefix + '%s\n' % leading] + [' ' * request +
i[current:] for i in lines]
if insert:
self.lines = lines + self.lines
return lines
def process_expand(self, line):
match = self.re_expand.match(line)
if not match:
raise Exception("Invalid !expand format '%s' !" % line.strip())
lines = []
prefix = match.group(1)
temp_name = match.group(2)
args = match.group(3)
if prefix.strip() == '-':
indent = 0
else:
indent = 2
lines = self.process_expand_template(temp_name, prefix, args, indent)
self.lines = lines + self.lines
def process_expand_template(self, temp_name, prefix, args, indent=2):
# expand text with arg substitution
if temp_name not in self.tmp_tree:
raise Exception("Could not find template '%s' !" % temp_name)
parts = args.split(',')
parts = [i.strip() for i in parts]
num = len(parts)
arg_dict = dict(zip(['(%d)' % (i + 1) for i in range(num)], parts))
str_data = self.tmp_tree[temp_name]
text = DefTemplate(str_data).safe_substitute(self.def_dict)
text = CFG_YAML.substitue_args(text, arg_dict)
target = CFG_YAML.count_indent(prefix) + indent
current = CFG_YAML.count_indent(text)
padding = target * ' '
if indent == 0:
leading = []
else:
leading = [prefix + '\n']
text = leading + [(padding + i + '\n')[current:]
for i in text.splitlines()]
return text
def load_file(self, yaml_file):
self.index = 0
self.lines = read_lines(yaml_file)
def peek_line(self):
if len(self.lines) == 0:
return None
else:
return self.lines[0]
def put_line(self, line):
self.lines.insert(0, line)
if self.log_line:
del self.full_lines[-1]
def get_line(self):
if len(self.lines) == 0:
return None
else:
line = self.lines.pop(0)
if self.log_line:
self.full_lines.append(line.rstrip())
return line
def get_multiple_line(self, indent):
text = ''
newind = indent + 1
while True:
line = self.peek_line()
if line is None:
break
sline = line.strip()
if sline != '':
newind = CFG_YAML.count_indent(line)
if newind <= indent:
break
self.get_line()
if sline != '':
text = text + line
return text
def traverse_cfg_tree(self, handler):
def _traverse_cfg_tree(root, level=0):
# config structure
for key in root:
if type(root[key]) is OrderedDict:
level += 1
handler(key, root[key], level)
_traverse_cfg_tree(root[key], level)
level -= 1
_traverse_cfg_tree(self.cfg_tree)
def count(self):
def _count(name, cfgs, level):
num[0] += 1
num = [0]
self.traverse_cfg_tree(_count)
return num[0]
def parse(self, parent_name='', curr=None, level=0):
child = None
last_indent = None
key = ''
temp_chk = {}
while True:
line = self.get_line()
if line is None:
break
curr_line = line.strip()
if curr_line == '' or curr_line[0] == '#':
continue
indent = CFG_YAML.count_indent(line)
if last_indent is None:
last_indent = indent
if indent != last_indent:
# outside of current block, put the line back to queue
self.put_line(' ' * indent + curr_line)
if curr_line.endswith(': >'):
# multiline marker
old_count = len(self.full_lines)
line = self.get_multiple_line(indent)
if self.log_line and not self.allow_template \
and '!include ' in line:
# expand include in template
new_lines = []
lines = line.splitlines()
for idx, each in enumerate(lines):
if '!include ' in each:
new_line = ''.join(self.process_include(each,
False))
new_lines.append(new_line)
else:
new_lines.append(each)
self.full_lines = self.full_lines[:old_count] + new_lines
curr_line = curr_line + line
if indent > last_indent:
# child nodes
if child is None:
raise Exception('Unexpected format at line: %s'
% (curr_line))
level += 1
self.parse(key, child, level)
level -= 1
line = self.peek_line()
if line is not None:
curr_line = line.strip()
indent = CFG_YAML.count_indent(line)
if indent >= last_indent:
# consume the line
self.get_line()
else:
# end of file
indent = -1
if curr is None:
curr = OrderedDict()
if indent < last_indent:
return curr
marker1 = curr_line[0]
marker2 = curr_line[-1]
start = 1 if marker1 == '-' else 0
pos = curr_line.find(': ')
if pos > 0:
child = None
key = curr_line[start:pos].strip()
if curr_line[pos + 2] == '>':
curr[key] = curr_line[pos + 3:]
else:
# XXXX: !include / !expand
if '!include ' in curr_line:
self.process_include(line)
elif '!expand ' in curr_line:
if self.allow_template and not self.log_line:
self.process_expand(line)
else:
value_str = curr_line[pos + 2:].strip()
curr[key] = value_str
if self.log_line and value_str[0] == '{':
# expand {FILE: xxxx} format in the log line
if value_str[1:].rstrip().startswith('FILE:'):
value_bytes = expand_file_value(
self.yaml_path, value_str)
value_str = bytes_to_bracket_str(value_bytes)
self.full_lines[-1] = line[
:indent] + curr_line[:pos + 2] + value_str
elif marker2 == ':':
child = OrderedDict()
key = curr_line[start:-1].strip()
if key == '$ACTION':
# special virtual nodes, rename to ensure unique key
key = '$ACTION_%04X' % self.index
self.index += 1
if key in curr:
if key not in temp_chk:
# check for duplicated keys at same level
temp_chk[key] = 1
else:
raise Exception("Duplicated item '%s:%s' found !"
% (parent_name, key))
curr[key] = child
if self.var_dict is None and key == CFG_YAML.VARIABLE:
self.var_dict = child
if self.tmp_tree is None and key == CFG_YAML.TEMPLATE:
self.tmp_tree = child
if self.var_dict:
for each in self.var_dict:
txt = self.var_dict[each]
if type(txt) is str:
self.def_dict['(%s)' % each] = txt
if self.tmp_tree and key == CFG_YAML.CONFIGS:
# apply template for the main configs
self.allow_template = True
else:
child = None
# - !include cfg_opt.yaml
if '!include ' in curr_line:
self.process_include(line)
return curr
def load_yaml(self, opt_file):
self.var_dict = None
self.yaml_path = os.path.dirname(opt_file)
self.load_file(opt_file)
yaml_tree = self.parse()
self.tmp_tree = yaml_tree[CFG_YAML.TEMPLATE]
self.cfg_tree = yaml_tree[CFG_YAML.CONFIGS]
return self.cfg_tree
def expand_yaml(self, opt_file):
self.log_line = True
self.load_yaml(opt_file)
self.log_line = False
text = '\n'.join(self.full_lines)
self.full_lines = []
return text
class DefTemplate(string.Template):
idpattern = '\\([_A-Z][_A-Z0-9]*\\)|[_A-Z][_A-Z0-9]*'
class CGenYamlCfg:
STRUCT = '$STRUCT'
bits_width = {'b': 1, 'B': 8, 'W': 16, 'D': 32, 'Q': 64}
builtin_option = {'$EN_DIS': [('0', 'Disable'), ('1', 'Enable')]}
exclude_struct = ['FSP_UPD_HEADER', 'FSPT_ARCH_UPD',
'FSPM_ARCH_UPD', 'FSPS_ARCH_UPD',
'GPIO_GPP_*', 'GPIO_CFG_DATA',
'GpioConfPad*', 'GpioPinConfig',
'BOOT_OPTION*', 'PLATFORMID_CFG_DATA', '\\w+_Half[01]']
include_tag = ['GPIO_CFG_DATA']
keyword_set = set(['name', 'type', 'option', 'help', 'length',
'value', 'order', 'struct', 'condition'])
def __init__(self):
self._mode = ''
self._debug = False
self._macro_dict = {}
self.binseg_dict = {}
self.initialize()
def initialize(self):
self._old_bin = None
self._cfg_tree = {}
self._tmp_tree = {}
self._cfg_list = []
self._cfg_page = {'root': {'title': '', 'child': []}}
self._cur_page = ''
self._var_dict = {}
self._def_dict = {}
self._yaml_path = ''
@staticmethod
def deep_convert_dict(layer):
# convert OrderedDict to list + dict
new_list = layer
if isinstance(layer, OrderedDict):
new_list = list(layer.items())
for idx, pair in enumerate(new_list):
new_node = CGenYamlCfg.deep_convert_dict(pair[1])
new_list[idx] = dict({pair[0]: new_node})
return new_list
@staticmethod
def deep_convert_list(layer):
if isinstance(layer, list):
od = OrderedDict({})
for each in layer:
if isinstance(each, dict):
key = next(iter(each))
od[key] = CGenYamlCfg.deep_convert_list(each[key])
return od
else:
return layer
@staticmethod
def expand_include_files(file_path, cur_dir=''):
if cur_dir == '':
cur_dir = os.path.dirname(file_path)
file_path = os.path.basename(file_path)
input_file_path = os.path.join(cur_dir, file_path)
file = open(input_file_path, "r")
lines = file.readlines()
file.close()
new_lines = []
for line_num, line in enumerate(lines):
match = re.match("^!include\\s*(.+)?$", line.strip())
if match:
inc_path = match.group(1)
tmp_path = os.path.join(cur_dir, inc_path)
org_path = tmp_path
if not os.path.exists(tmp_path):
cur_dir = os.path.join(os.path.dirname
(os.path.realpath(__file__)
), "..", "..")
tmp_path = os.path.join(cur_dir, inc_path)
if not os.path.exists(tmp_path):
raise Exception("ERROR: Cannot open include\
file '%s'." % org_path)
else:
new_lines.append(('# Included from file: %s\n' % inc_path,
tmp_path, 0))
new_lines.append(('# %s\n' % ('=' * 80), tmp_path, 0))
new_lines.extend(CGenYamlCfg.expand_include_files
(inc_path, cur_dir))
else:
new_lines.append((line, input_file_path, line_num))
return new_lines
@staticmethod
def format_struct_field_name(input, count=0):
name = ''
cap = True
if '_' in input:
input = input.lower()
for each in input:
if each == '_':
cap = True
continue
elif cap:
each = each.upper()
cap = False
name = name + each
if count > 1:
name = '%s[%d]' % (name, count)
return name
def get_mode(self):
return self._mode
def set_mode(self, mode):
self._mode = mode
def get_last_error(self):
return ''
def get_variable(self, var, attr='value'):
if var in self._var_dict:
var = self._var_dict[var]
return var
item = self.locate_cfg_item(var, False)
if item is None:
raise ValueError("Cannot find variable '%s' !" % var)
if item:
if 'indx' in item:
item = self.get_item_by_index(item['indx'])
if attr == 'offset':
var = item['offset']
elif attr == 'length':
var = item['length']
elif attr == 'value':
var = self.get_cfg_item_value(item)
else:
raise ValueError("Unsupported variable attribute '%s' !" %
attr)
return var
def eval(self, expr):
def _handler(pattern):
if pattern.group(1):
target = 1
else:
target = 2
result = self.get_variable(pattern.group(target))
if result is None:
raise ValueError('Unknown variable $(%s) !' %
pattern.group(target))
return hex(result)
expr_eval = ExpressionEval()
if '$' in expr:
# replace known variable first
expr = re.sub(r'\$\(([_a-zA-Z][\w\.]*)\)|\$([_a-zA-Z][\w\.]*)',
_handler, expr)
return expr_eval.eval(expr, self.get_variable)
def parse_macros(self, macro_def_str):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._macro_dict = {}
is_expression = False
for macro in macro_def_str:
if macro.startswith('-D'):
is_expression = True
if len(macro) > 2:
macro = macro[2:]
else:
continue
if is_expression:
is_expression = False
match = re.match("(\\w+)=(.+)", macro)
if match:
self._macro_dict[match.group(1)] = match.group(2)
else:
match = re.match("(\\w+)", macro)
if match:
self._macro_dict[match.group(1)] = ''
if len(self._macro_dict) == 0:
error = 1
else:
error = 0
if self._debug:
print("INFO : Macro dictionary:")
for each in self._macro_dict:
print(" $(%s) = [ %s ]"
% (each, self._macro_dict[each]))
return error
def get_cfg_list(self, page_id=None):
if page_id is None:
# return full list
return self._cfg_list
else:
# build a new list for items under a page ID
cfgs = [i for i in self._cfg_list if i['cname'] and
(i['page'] == page_id)]
return cfgs
def get_cfg_page(self):
return self._cfg_page
def get_cfg_item_length(self, item):
return item['length']
def get_cfg_item_value(self, item, array=False):
value_str = item['value']
length = item['length']
return self.get_value(value_str, length, array)
def format_value_to_str(self, value, bit_length, old_value=''):
# value is always int
length = (bit_length + 7) // 8
fmt = ''
if old_value.startswith('0x'):
fmt = '0x'
elif old_value and (old_value[0] in ['"', "'", '{']):
fmt = old_value[0]
else:
fmt = ''
bvalue = value_to_bytearray(value, length)
if fmt in ['"', "'"]:
svalue = bvalue.rstrip(b'\x00').decode()
value_str = fmt + svalue + fmt
elif fmt == "{":
value_str = '{ ' + ', '.join(['0x%02x' % i for i in bvalue]) + ' }'
elif fmt == '0x':
hex_len = length * 2
if len(old_value) == hex_len + 2:
fstr = '0x%%0%dx' % hex_len
else:
fstr = '0x%x'
value_str = fstr % value
else:
if length <= 2:
value_str = '%d' % value
elif length <= 8:
value_str = '0x%x' % value
else:
value_str = '{ ' + ', '.join(['0x%02x' % i for i in
bvalue]) + ' }'
return value_str
def reformat_value_str(self, value_str, bit_length, old_value=None):
value = self.parse_value(value_str, bit_length, False)
if old_value is None:
old_value = value_str
new_value = self.format_value_to_str(value, bit_length, old_value)
return new_value
def get_value(self, value_str, bit_length, array=True):
value_str = value_str.strip()
if value_str[0] == "'" and value_str[-1] == "'" or \
value_str[0] == '"' and value_str[-1] == '"':
value_str = value_str[1:-1]
bvalue = bytearray(value_str.encode())
if len(bvalue) == 0:
bvalue = bytearray(b'\x00')
if array:
return bvalue
else:
return bytes_to_value(bvalue)
else:
if value_str[0] in '{':
value_str = value_str[1:-1].strip()
value = 0
for each in value_str.split(',')[::-1]:
each = each.strip()
value = (value << 8) | int(each, 0)
if array:
length = (bit_length + 7) // 8
return value_to_bytearray(value, length)
else:
return value
def parse_value(self, value_str, bit_length, array=True):
length = (bit_length + 7) // 8
if check_quote(value_str):
value_str = bytes_to_bracket_str(value_str[1:-1].encode())
elif (',' in value_str) and (value_str[0] != '{'):
value_str = '{ %s }' % value_str
if value_str[0] == '{':
result = expand_file_value(self._yaml_path, value_str)
if len(result) == 0:
bin_list = value_str[1:-1].split(',')
value = 0
bit_len = 0
unit_len = 1
for idx, element in enumerate(bin_list):
each = element.strip()
if len(each) == 0:
continue
in_bit_field = False
if each[0] in "'" + '"':
each_value = bytearray(each[1:-1], 'utf-8')
elif ':' in each:
match = re.match("^(.+):(\\d+)([b|B|W|D|Q])$", each)
if match is None:
raise SystemExit("Exception: Invald value\
list format '%s' !" % each)
if match.group(1) == '0' and match.group(2) == '0':
unit_len = CGenYamlCfg.bits_width[match.group(3)
] // 8
cur_bit_len = int(match.group(2)
) * CGenYamlCfg.bits_width[
match.group(3)]
value += ((self.eval(match.group(1)) & (
1 << cur_bit_len) - 1)) << bit_len
bit_len += cur_bit_len
each_value = bytearray()
if idx + 1 < len(bin_list):
in_bit_field = True
else:
try:
each_value = value_to_bytearray(
self.eval(each.strip()), unit_len)
except Exception:
raise SystemExit("Exception: Value %d cannot \
fit into %s bytes !" % (each, unit_len))
if not in_bit_field:
if bit_len > 0:
if bit_len % 8 != 0:
raise SystemExit("Exception: Invalid bit \
field alignment '%s' !" % value_str)
result.extend(value_to_bytes(value, bit_len // 8))
value = 0
bit_len = 0
result.extend(each_value)
elif check_quote(value_str):
result = bytearray(value_str[1:-1], 'utf-8') # Excluding quotes
else:
result = value_to_bytearray(self.eval(value_str), length)
if len(result) < length:
result.extend(b'\x00' * (length - len(result)))
elif len(result) > length:
raise SystemExit("Exception: Value '%s' is too big to fit \
into %d bytes !" % (value_str, length))
if array:
return result
else:
return bytes_to_value(result)
return result
def get_cfg_item_options(self, item):
tmp_list = []
if item['type'] == "Combo":
if item['option'] in CGenYamlCfg.builtin_option:
for op_val, op_str in CGenYamlCfg.builtin_option[item['option'
]]:
tmp_list.append((op_val, op_str))
else:
if item['option'].find(';') != -1:
opt_list = item['option'].split(';')
else:
opt_list = re.split(', ', item['option'])
for option in opt_list:
option = option.strip()
try:
if option.find(':') != -1:
(op_val, op_str) = option.split(':')
else:
op_val = option
op_str = option
except Exception:
raise SystemExit("Exception: Invalid \
option format '%s' !" % option)
tmp_list.append((op_val, op_str))
return tmp_list
def get_page_title(self, page_id, top=None):
if top is None:
top = self.get_cfg_page()['root']
for node in top['child']:
page_key = next(iter(node))
if page_id == page_key:
return node[page_key]['title']
else:
result = self.get_page_title(page_id, node[page_key])
if result is not None:
return result
return None
def print_pages(self, top=None, level=0):
if top is None:
top = self.get_cfg_page()['root']
for node in top['child']:
page_id = next(iter(node))
print('%s%s: %s' % (' ' * level, page_id, node[page_id]['title']))
level += 1
self.print_pages(node[page_id], level)
level -= 1
def get_item_by_index(self, index):
return self._cfg_list[index]
def get_item_by_path(self, path):
node = self.locate_cfg_item(path)
if node:
return self.get_item_by_index(node['indx'])
else:
return None
def locate_cfg_path(self, item):
def _locate_cfg_path(root, level=0):
# config structure
if item is root:
return path
for key in root:
if type(root[key]) is OrderedDict:
level += 1
path.append(key)
ret = _locate_cfg_path(root[key], level)
if ret:
return ret
path.pop()
return None
path = []
return _locate_cfg_path(self._cfg_tree)
def locate_cfg_item(self, path, allow_exp=True):
def _locate_cfg_item(root, path, level=0):
if len(path) == level:
return root
next_root = root.get(path[level], None)
if next_root is None:
if allow_exp:
raise Exception('Not a valid CFG config option path: %s' %
'.'.join(path[:level+1]))
else:
return None
return _locate_cfg_item(next_root, path, level + 1)
path_nodes = path.split('.')
return _locate_cfg_item(self._cfg_tree, path_nodes)
def traverse_cfg_tree(self, handler, top=None):
def _traverse_cfg_tree(root, level=0):
# config structure
for key in root:
if type(root[key]) is OrderedDict:
level += 1
handler(key, root[key], level)
_traverse_cfg_tree(root[key], level)
level -= 1
if top is None:
top = self._cfg_tree
_traverse_cfg_tree(top)
def print_cfgs(self, root=None, short=True, print_level=256):
def _print_cfgs(name, cfgs, level):
if 'indx' in cfgs:
act_cfg = self.get_item_by_index(cfgs['indx'])
else:
offset = 0
length = 0
value = ''
if CGenYamlCfg.STRUCT in cfgs:
cfg = cfgs[CGenYamlCfg.STRUCT]
offset = int(cfg['offset'])
length = int(cfg['length'])
if 'value' in cfg:
value = cfg['value']
if length == 0:
return
act_cfg = dict({'value': value, 'offset': offset,
'length': length})
value = act_cfg['value']
bit_len = act_cfg['length']
offset = (act_cfg['offset'] + 7) // 8
if value != '':
try:
value = self.reformat_value_str(act_cfg['value'],
act_cfg['length'])
except Exception:
value = act_cfg['value']
length = bit_len // 8
bit_len = '(%db)' % bit_len if bit_len % 8 else '' * 4
if level <= print_level:
if short and len(value) > 40:
value = '%s ... %s' % (value[:20], value[-20:])
print('%04X:%04X%-6s %s%s : %s' % (offset, length, bit_len,
' ' * level, name, value))
self.traverse_cfg_tree(_print_cfgs)
def build_var_dict(self):
def _build_var_dict(name, cfgs, level):
if level <= 2:
if CGenYamlCfg.STRUCT in cfgs:
struct_info = cfgs[CGenYamlCfg.STRUCT]
self._var_dict['_LENGTH_%s_' % name] = struct_info[
'length'] // 8
self._var_dict['_OFFSET_%s_' % name] = struct_info[
'offset'] // 8
self._var_dict = {}
self.traverse_cfg_tree(_build_var_dict)
self._var_dict['_LENGTH_'] = self._cfg_tree[CGenYamlCfg.STRUCT][
'length'] // 8
return 0
def add_cfg_page(self, child, parent, title=''):
def _add_cfg_page(cfg_page, child, parent):
key = next(iter(cfg_page))
if parent == key:
cfg_page[key]['child'].append({child: {'title': title,
'child': []}})
return True
else:
result = False
for each in cfg_page[key]['child']:
if _add_cfg_page(each, child, parent):
result = True
break
return result
return _add_cfg_page(self._cfg_page, child, parent)
def set_cur_page(self, page_str):
if not page_str:
return
if ',' in page_str:
page_list = page_str.split(',')
else:
page_list = [page_str]
for page_str in page_list:
parts = page_str.split(':')
if len(parts) in [1, 3]:
page = parts[0].strip()
if len(parts) == 3:
# it is a new page definition, add it into tree
parent = parts[1] if parts[1] else 'root'
parent = parent.strip()
if parts[2][0] == '"' and parts[2][-1] == '"':
parts[2] = parts[2][1:-1]
if not self.add_cfg_page(page, parent, parts[2]):
raise SystemExit("Error: Cannot find parent page \
'%s'!" % parent)
else:
raise SystemExit("Error: Invalid page format '%s' !"
% page_str)
self._cur_page = page
def extend_variable(self, line):
# replace all variables
if line == '':
return line
loop = 2
while loop > 0:
line_after = DefTemplate(line).safe_substitute(self._def_dict)
if line == line_after:
break
loop -= 1
line = line_after
return line_after
def reformat_number_per_type(self, itype, value):
if check_quote(value) or value.startswith('{'):
return value
parts = itype.split(',')
if len(parts) > 3 and parts[0] == 'EditNum':
num_fmt = parts[1].strip()
else:
num_fmt = ''
if num_fmt == 'HEX' and not value.startswith('0x'):
value = '0x%X' % int(value, 10)
elif num_fmt == 'DEC' and value.startswith('0x'):
value = '%d' % int(value, 16)
return value
def add_cfg_item(self, name, item, offset, path):
self.set_cur_page(item.get('page', ''))
if name[0] == '$':
# skip all virtual node
return 0
if not set(item).issubset(CGenYamlCfg.keyword_set):
for each in list(item):
if each not in CGenYamlCfg.keyword_set:
raise Exception("Invalid attribute '%s' for '%s'!" %
(each, '.'.join(path)))
length = item.get('length', 0)
if type(length) is str:
match = re.match("^(\\d+)([b|B|W|D|Q])([B|W|D|Q]?)\\s*$", length)
if match:
unit_len = CGenYamlCfg.bits_width[match.group(2)]
length = int(match.group(1), 10) * unit_len
else:
try:
length = int(length, 0) * 8
except Exception:
raise Exception("Invalid length field '%s' for '%s' !" %
(length, '.'.join(path)))
if offset % 8 > 0:
raise Exception("Invalid alignment for field '%s' for \
'%s' !" % (name, '.'.join(path)))
else:
# define is length in bytes
length = length * 8
if not name.isidentifier():
raise Exception("Invalid config name '%s' for '%s' !" %
(name, '.'.join(path)))
itype = str(item.get('type', 'Reserved'))
value = str(item.get('value', ''))
if value:
if not (check_quote(value) or value.startswith('{')):
if ',' in value:
value = '{ %s }' % value
else:
value = self.reformat_number_per_type(itype, value)
help = str(item.get('help', ''))
if '\n' in help:
help = ' '.join([i.strip() for i in help.splitlines()])
option = str(item.get('option', ''))
if '\n' in option:
option = ' '.join([i.strip() for i in option.splitlines()])
# extend variables for value and condition
condition = str(item.get('condition', ''))
if condition:
condition = self.extend_variable(condition)
value = self.extend_variable(value)
order = str(item.get('order', ''))
if order:
if '.' in order:
(major, minor) = order.split('.')
order = int(major, 16)
else:
order = int(order, 16)
else:
order = offset
cfg_item = dict()
cfg_item['length'] = length
cfg_item['offset'] = offset
cfg_item['value'] = value
cfg_item['type'] = itype
cfg_item['cname'] = str(name)
cfg_item['name'] = str(item.get('name', ''))
cfg_item['help'] = help
cfg_item['option'] = option
cfg_item['page'] = self._cur_page
cfg_item['order'] = order
cfg_item['path'] = '.'.join(path)
cfg_item['condition'] = condition
if 'struct' in item:
cfg_item['struct'] = item['struct']
self._cfg_list.append(cfg_item)
item['indx'] = len(self._cfg_list) - 1
# remove used info for reducing pkl size
item.pop('option', None)
item.pop('condition', None)
item.pop('help', None)
item.pop('name', None)
item.pop('page', None)
return length
def build_cfg_list(self, cfg_name='', top=None, path=[],
info={'offset': 0}):
if top is None:
top = self._cfg_tree
info.clear()
info = {'offset': 0}
start = info['offset']
is_leaf = True
for key in top:
path.append(key)
if type(top[key]) is OrderedDict:
is_leaf = False
self.build_cfg_list(key, top[key], path, info)
path.pop()
if is_leaf:
length = self.add_cfg_item(cfg_name, top, info['offset'], path)
info['offset'] += length
elif cfg_name == '' or (cfg_name and cfg_name[0] != '$'):
# check first element for struct
first = next(iter(top))
struct_str = CGenYamlCfg.STRUCT
if first != struct_str:
struct_node = OrderedDict({})
top[struct_str] = struct_node
top.move_to_end(struct_str, False)
else:
struct_node = top[struct_str]
struct_node['offset'] = start
struct_node['length'] = info['offset'] - start
if struct_node['length'] % 8 != 0:
raise SystemExit("Error: Bits length not aligned for %s !" %
str(path))
def get_field_value(self, top=None):
def _get_field_value(name, cfgs, level):
if 'indx' in cfgs:
act_cfg = self.get_item_by_index(cfgs['indx'])
if act_cfg['length'] == 0:
return
value = self.get_value(act_cfg['value'], act_cfg['length'],
False)
set_bits_to_bytes(result, act_cfg['offset'] -
struct_info['offset'], act_cfg['length'],
value)
if top is None:
top = self._cfg_tree
struct_info = top[CGenYamlCfg.STRUCT]
result = bytearray((struct_info['length'] + 7) // 8)
self.traverse_cfg_tree(_get_field_value, top)
return result
data_diff = ''
def find_data_difference(self, act_val, act_cfg):
# checks for any difference between BSF and Binary file
config_val = ''
if act_val != act_cfg['value']:
if 'DEC' in act_cfg['type']:
bsf_val = '0x%x' % int(act_val)
if bsf_val != act_cfg['value']:
config_val = bsf_val
else:
config_val = ''
else:
config_val = act_val
available_fv1 = 'none'
available_fv2 = 'none'
if self.detect_fsp():
if len(self.available_fv) >= 1:
if len(self.available_fv) > 1:
available_fv1 = self.available_fv[1]
if self.available_fv[2]:
available_fv2 = self.available_fv[2]
else:
available_fv1 = self.available_fv[1]
if act_cfg['length'] == 16:
config_val = int(config_val, 16)
config_val = '0x%x' % config_val
act_cfg['value'] = int(
act_cfg['value'], 16)
act_cfg['value'] = '0x%x' % \
act_cfg['value']
if config_val:
string = ('.' + act_cfg['cname'])
if (act_cfg['path'].endswith(self.available_fv[0] + string)
or act_cfg['path'].endswith(available_fv1 + string)
or act_cfg['path'].endswith(available_fv2 + string)) \
and 'BsfSkip' not in act_cfg['cname'] \
and 'Reserved' not in act_cfg['name']:
if act_cfg['option'] != '':
if act_cfg['length'] == 8:
config_val = int(config_val, 16)
config_val = '0x%x' % config_val
act_cfg['value'] = int(
act_cfg['value'], 16)
act_cfg['value'] = '0x%x' % \
act_cfg['value']
option = act_cfg['option']
cfg_val = ''
bin_val = ''
for i in option.split(','):
if act_cfg['value'] in i:
bin_val = i
elif config_val in i:
cfg_val = i
if cfg_val != '' and bin_val != '':
self.data_diff += '\n\nBinary: ' \
+ act_cfg['name'] \
+ ': ' + bin_val.replace(' ', '') \
+ '\nConfig file: ' \
+ act_cfg['name'] + ': ' \
+ cfg_val.replace(' ', '') + '\n'
else:
self.data_diff += '\n\nBinary: ' \
+ act_cfg['name'] + ': ' + act_cfg['value'] \
+ '\nConfig file: ' + act_cfg['name'] \
+ ': ' + config_val + '\n'
def set_field_value(self, top, value_bytes, force=False):
def _set_field_value(name, cfgs, level):
if 'indx' not in cfgs:
return
act_cfg = self.get_item_by_index(cfgs['indx'])
actual_offset = act_cfg['offset'] - struct_info['offset']
if force or act_cfg['value'] == '':
value = get_bits_from_bytes(full_bytes,
actual_offset,
act_cfg['length'])
act_val = act_cfg['value']
if act_val == '':
act_val = '%d' % value
act_val = self.reformat_number_per_type(act_cfg
['type'],
act_val)
act_cfg['value'] = self.format_value_to_str(
value, act_cfg['length'], act_val)
self.find_data_difference(act_val, act_cfg)
if 'indx' in top:
# it is config option
value = bytes_to_value(value_bytes)
act_cfg = self.get_item_by_index(top['indx'])
act_cfg['value'] = self.format_value_to_str(
value, act_cfg['length'], act_cfg['value'])
else:
# it is structure
struct_info = top[CGenYamlCfg.STRUCT]
length = struct_info['length'] // 8
full_bytes = bytearray(value_bytes[:length])
if len(full_bytes) < length:
full_bytes.extend(bytearray(length - len(value_bytes)))
self.traverse_cfg_tree(_set_field_value, top)
def update_def_value(self):
def _update_def_value(name, cfgs, level):
if 'indx' in cfgs:
act_cfg = self.get_item_by_index(cfgs['indx'])
if act_cfg['value'] != '' and act_cfg['length'] > 0:
try:
act_cfg['value'] = self.reformat_value_str(
act_cfg['value'], act_cfg['length'])
except Exception:
raise Exception("Invalid value expression '%s' \
for '%s' !" % (act_cfg['value'], act_cfg['path']))
else:
if CGenYamlCfg.STRUCT in cfgs and 'value' in \
cfgs[CGenYamlCfg.STRUCT]:
curr = cfgs[CGenYamlCfg.STRUCT]
value_bytes = self.get_value(curr['value'],
curr['length'], True)
self.set_field_value(cfgs, value_bytes)
self.traverse_cfg_tree(_update_def_value, self._cfg_tree)
def evaluate_condition(self, item):
expr = item['condition']
result = self.parse_value(expr, 1, False)
return result
def detect_fsp(self):
cfg_segs = self.get_cfg_segment()
if len(cfg_segs) == 3:
fsp = True
for idx, seg in enumerate(cfg_segs):
if not seg[0].endswith('UPD_%s' % 'TMS'[idx]):
fsp = False
break
else:
fsp = False
if fsp:
self.set_mode('FSP')
return fsp
def get_cfg_segment(self):
def _get_cfg_segment(name, cfgs, level):
if 'indx' not in cfgs:
if name.startswith('$ACTION_'):
if 'find' in cfgs:
find[0] = cfgs['find']
else:
if find[0]:
act_cfg = self.get_item_by_index(cfgs['indx'])
segments.append([find[0], act_cfg['offset'] // 8, 0])
find[0] = ''
return
find = ['']
segments = []
self.traverse_cfg_tree(_get_cfg_segment, self._cfg_tree)
cfg_len = self._cfg_tree[CGenYamlCfg.STRUCT]['length'] // 8
if len(segments) == 0:
segments.append(['', 0, cfg_len])
segments.append(['', cfg_len, 0])
cfg_segs = []
for idx, each in enumerate(segments[:-1]):
cfg_segs.append((each[0], each[1],
segments[idx+1][1] - each[1]))
return cfg_segs
def get_bin_segment(self, bin_data):
cfg_segs = self.get_cfg_segment()
bin_segs = []
for seg in cfg_segs:
key = seg[0].encode()
if key == 0:
bin_segs.append([seg[0], 0, len(bin_data)])
break
pos = bin_data.find(key)
if pos >= 0:
# ensure no other match for the key
next_pos = bin_data.find(key, pos + len(seg[0]))
if next_pos >= 0:
if key == b'$SKLFSP$' or key == b'$BSWFSP$':
string = ('Warning: Multiple matches for %s in '
'binary!\n\nA workaround applied to such '
'FSP 1.x binary to use second'
' match instead of first match!' % key)
messagebox.showwarning('Warning!', string)
pos = next_pos
else:
print("Warning: Multiple matches for '%s' "
"in binary, the 1st instance will be used !"
% seg[0])
bin_segs.append([seg[0], pos, seg[2]])
self.binseg_dict[seg[0]] = pos
else:
bin_segs.append([seg[0], -1, seg[2]])
self.binseg_dict[seg[0]] = -1
continue
return bin_segs
available_fv = []
missing_fv = []
def extract_cfg_from_bin(self, bin_data):
# get cfg bin length
cfg_bins = bytearray()
bin_segs = self.get_bin_segment(bin_data)
Dummy_offset = 0
for each in bin_segs:
if each[1] != -1:
cfg_bins.extend(bin_data[each[1]:each[1] + each[2]])
self.available_fv.append(each[0])
else:
self.missing_fv.append(each[0])
string = each[0] + ' is not availabe.'
messagebox.showinfo('', string)
cfg_bins.extend(bytearray(each[2]))
Dummy_offset += each[2]
return cfg_bins
def save_current_to_bin(self):
cfg_bins = self.generate_binary_array()
if self._old_bin is None:
return cfg_bins
bin_data = bytearray(self._old_bin)
bin_segs = self.get_bin_segment(self._old_bin)
cfg_off = 0
for each in bin_segs:
length = each[2]
if each[1] != -1:
bin_data[each[1]:each[1] + length] = cfg_bins[cfg_off:
cfg_off
+ length]
cfg_off += length
else:
cfg_off += length
print('Patched the loaded binary successfully !')
return bin_data
def show_data_difference(self, data_diff):
# Displays if any data difference detected in BSF and Binary file
pop_up_text = 'There are differences in Config file and binary '\
'data detected!\n'
pop_up_text += data_diff
window = tkinter.Tk()
window.title("Data Difference")
window.resizable(1, 1)
# Window Size
window.geometry("800x400")
frame = tkinter.Frame(window, height=800, width=700)
frame.pack(side=tkinter.BOTTOM)
# Vertical (y) Scroll Bar
scroll = tkinter.Scrollbar(window)
scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text = tkinter.Text(window, wrap=tkinter.NONE,
yscrollcommand=scroll.set,
width=700, height=400)
text.insert(tkinter.INSERT, pop_up_text)
text.pack()
# Configure the scrollbars
scroll.config(command=text.yview)
exit_button = tkinter.Button(
window, text="Close", command=window.destroy)
exit_button.pack(in_=frame, side=tkinter.RIGHT, padx=20, pady=10)
def load_default_from_bin(self, bin_data):
self._old_bin = bin_data
cfg_bins = self.extract_cfg_from_bin(bin_data)
self.set_field_value(self._cfg_tree, cfg_bins, True)
if self.data_diff:
self.show_data_difference(self.data_diff)
return cfg_bins
def generate_binary_array(self, path=''):
if path == '':
top = None
else:
top = self.locate_cfg_item(path)
if not top:
raise Exception("Invalid configuration path '%s' !"
% path)
return self.get_field_value(top)
def generate_binary(self, bin_file_name, path=''):
bin_file = open(bin_file_name, "wb")
bin_file.write(self.generate_binary_array(path))
bin_file.close()
return 0
def write_delta_file(self, out_file, platform_id, out_lines):
dlt_fd = open(out_file, "w")
dlt_fd.write("%s\n" % get_copyright_header('dlt', True))
if platform_id is not None:
dlt_fd.write('#\n')
dlt_fd.write('# Delta configuration values for '
'platform ID 0x%04X\n'
% platform_id)
dlt_fd.write('#\n\n')
for line in out_lines:
dlt_fd.write('%s\n' % line)
dlt_fd.close()
def override_default_value(self, dlt_file):
error = 0
dlt_lines = CGenYamlCfg.expand_include_files(dlt_file)
platform_id = None
for line, file_path, line_num in dlt_lines:
line = line.strip()
if not line or line.startswith('#'):
continue
match = re.match("\\s*([\\w\\.]+)\\s*\\|\\s*(.+)", line)
if not match:
raise Exception("Unrecognized line '%s' "
"(File:'%s' Line:%d) !"
% (line, file_path, line_num + 1))
path = match.group(1)
value_str = match.group(2)
top = self.locate_cfg_item(path)
if not top:
raise Exception(
"Invalid configuration '%s' (File:'%s' Line:%d) !" %
(path, file_path, line_num + 1))
if 'indx' in top:
act_cfg = self.get_item_by_index(top['indx'])
bit_len = act_cfg['length']
else:
struct_info = top[CGenYamlCfg.STRUCT]
bit_len = struct_info['length']
value_bytes = self.parse_value(value_str, bit_len)
self.set_field_value(top, value_bytes, True)
if path == 'PLATFORMID_CFG_DATA.PlatformId':
platform_id = value_str
if platform_id is None:
raise Exception(
"PLATFORMID_CFG_DATA.PlatformId is missing "
"in file '%s' !" %
(dlt_file))
return error
def generate_delta_file_from_bin(self, delta_file, old_data,
new_data, full=False):
new_data = self.load_default_from_bin(new_data)
lines = []
platform_id = None
def_platform_id = 0
for item in self._cfg_list:
if not full and (item['type'] in ['Reserved']):
continue
old_val = get_bits_from_bytes(old_data, item['offset'],
item['length'])
new_val = get_bits_from_bytes(new_data, item['offset'],
item['length'])
full_name = item['path']
if 'PLATFORMID_CFG_DATA.PlatformId' == full_name:
def_platform_id = old_val
if new_val != old_val or full:
val_str = self.reformat_value_str(item['value'],
item['length'])
text = '%-40s | %s' % (full_name, val_str)
lines.append(text)
if self.get_mode() != 'FSP':
if platform_id is None or def_platform_id == platform_id:
platform_id = def_platform_id
print("WARNING: 'PlatformId' configuration is "
"same as default %d!" % platform_id)
lines.insert(0, '%-40s | %s\n\n' %
('PLATFORMID_CFG_DATA.PlatformId',
'0x%04X' % platform_id))
else:
platform_id = None
self.write_delta_file(delta_file, platform_id, lines)
return 0
def generate_delta_file(self, delta_file, bin_file, bin_file2, full=False):
fd = open(bin_file, 'rb')
new_data = self.extract_cfg_from_bin(bytearray(fd.read()))
fd.close()
if bin_file2 == '':
old_data = self.generate_binary_array()
else:
old_data = new_data
fd = open(bin_file2, 'rb')
new_data = self.extract_cfg_from_bin(bytearray(fd.read()))
fd.close()
return self.generate_delta_file_from_bin(delta_file,
old_data, new_data, full)
def prepare_marshal(self, is_save):
if is_save:
# Ordered dict is not marshallable, convert to list
self._cfg_tree = CGenYamlCfg.deep_convert_dict(self._cfg_tree)
else:
# Revert it back
self._cfg_tree = CGenYamlCfg.deep_convert_list(self._cfg_tree)
def generate_yml_file(self, in_file, out_file):
cfg_yaml = CFG_YAML()
text = cfg_yaml.expand_yaml(in_file)
yml_fd = open(out_file, "w")
yml_fd.write(text)
yml_fd.close()
return 0
def write_cfg_header_file(self, hdr_file_name, tag_mode,
tag_dict, struct_list):
lines = []
lines.append('\n\n')
if self.get_mode() == 'FSP':
lines.append('#include <FspUpd.h>\n')
tag_mode = tag_mode & 0x7F
tag_list = sorted(list(tag_dict.items()), key=lambda x: x[1])
for tagname, tagval in tag_list:
if (tag_mode == 0 and tagval >= 0x100) or \
(tag_mode == 1 and tagval < 0x100):
continue
lines.append('#define %-30s 0x%03X\n' % (
'CDATA_%s_TAG' % tagname[:-9], tagval))
lines.append('\n\n')
name_dict = {}
new_dict = {}
for each in struct_list:
if (tag_mode == 0 and each['tag'] >= 0x100) or \
(tag_mode == 1 and each['tag'] < 0x100):
continue
new_dict[each['name']] = (each['alias'], each['count'])
if each['alias'] not in name_dict:
name_dict[each['alias']] = 1
lines.extend(self.create_struct(each['alias'],
each['node'], new_dict))
lines.append('#pragma pack()\n\n')
self.write_header_file(lines, hdr_file_name)
def write_header_file(self, txt_body, file_name, type='h'):
file_name_def = os.path.basename(file_name).replace('.', '_')
file_name_def = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', file_name_def)
file_name_def = re.sub('([a-z0-9])([A-Z])', r'\1_\2',
file_name_def).upper()
lines = []
lines.append("%s\n" % get_copyright_header(type))
lines.append("#ifndef __%s__\n" % file_name_def)
lines.append("#define __%s__\n\n" % file_name_def)
if type == 'h':
lines.append("#pragma pack(1)\n\n")
lines.extend(txt_body)
if type == 'h':
lines.append("#pragma pack()\n\n")
lines.append("#endif\n")
# Don't rewrite if the contents are the same
create = True
if os.path.exists(file_name):
hdr_file = open(file_name, "r")
org_txt = hdr_file.read()
hdr_file.close()
new_txt = ''.join(lines)
if org_txt == new_txt:
create = False
if create:
hdr_file = open(file_name, "w")
hdr_file.write(''.join(lines))
hdr_file.close()
def generate_data_inc_file(self, dat_inc_file_name, bin_file=None):
# Put a prefix GUID before CFGDATA so that it can be located later on
prefix = b'\xa7\xbd\x7f\x73\x20\x1e\x46\xd6\
xbe\x8f\x64\x12\x05\x8d\x0a\xa8'
if bin_file:
fin = open(bin_file, 'rb')
bin_dat = prefix + bytearray(fin.read())
fin.close()
else:
bin_dat = prefix + self.generate_binary_array()
file_name = os.path.basename(dat_inc_file_name).upper()
file_name = file_name.replace('.', '_')
txt_lines = []
txt_lines.append("UINT8 mConfigDataBlob[%d] = {\n" % len(bin_dat))
count = 0
line = [' ']
for each in bin_dat:
line.append('0x%02X, ' % each)
count = count + 1
if (count & 0x0F) == 0:
line.append('\n')
txt_lines.append(''.join(line))
line = [' ']
if len(line) > 1:
txt_lines.append(''.join(line) + '\n')
txt_lines.append("};\n\n")
self.write_header_file(txt_lines, dat_inc_file_name, 'inc')
return 0
def get_struct_array_info(self, input):
parts = input.split(':')
if len(parts) > 1:
var = parts[1]
input = parts[0]
else:
var = ''
array_str = input.split('[')
name = array_str[0]
if len(array_str) > 1:
num_str = ''.join(c for c in array_str[-1] if c.isdigit())
num_str = '1000' if len(num_str) == 0 else num_str
array_num = int(num_str)
else:
array_num = 0
return name, array_num, var
def process_multilines(self, string, max_char_length):
multilines = ''
string_length = len(string)
current_string_start = 0
string_offset = 0
break_line_dict = []
if len(string) <= max_char_length:
while (string_offset < string_length):
if string_offset >= 1:
if string[string_offset - 1] == '\\' and string[
string_offset] == 'n':
break_line_dict.append(string_offset + 1)
string_offset += 1
if break_line_dict != []:
for each in break_line_dict:
multilines += " %s\n" % string[
current_string_start:each].lstrip()
current_string_start = each
if string_length - current_string_start > 0:
multilines += " %s\n" % string[
current_string_start:].lstrip()
else:
multilines = " %s\n" % string
else:
new_line_start = 0
new_line_count = 0
found_space_char = False
while (string_offset < string_length):
if string_offset >= 1:
if new_line_count >= max_char_length - 1:
if string[string_offset] == ' ' and \
string_length - string_offset > 10:
break_line_dict.append(new_line_start
+ new_line_count)
new_line_start = new_line_start + new_line_count
new_line_count = 0
found_space_char = True
elif string_offset == string_length - 1 and \
found_space_char is False:
break_line_dict.append(0)
if string[string_offset - 1] == '\\' and string[
string_offset] == 'n':
break_line_dict.append(string_offset + 1)
new_line_start = string_offset + 1
new_line_count = 0
string_offset += 1
new_line_count += 1
if break_line_dict != []:
break_line_dict.sort()
for each in break_line_dict:
if each > 0:
multilines += " %s\n" % string[
current_string_start:each].lstrip()
current_string_start = each
if string_length - current_string_start > 0:
multilines += " %s\n" % \
string[current_string_start:].lstrip()
return multilines
def create_field(self, item, name, length, offset, struct,
bsf_name, help, option, bits_length=None):
pos_name = 28
name_line = ''
# help_line = ''
# option_line = ''
if length == 0 and name == 'dummy':
return '\n'
if bits_length == 0:
return '\n'
is_array = False
if length in [1, 2, 4, 8]:
type = "UINT%d" % (length * 8)
else:
is_array = True
type = "UINT8"
if item and item['value'].startswith('{'):
type = "UINT8"
is_array = True
if struct != '':
struct_base = struct.rstrip('*')
name = '*' * (len(struct) - len(struct_base)) + name
struct = struct_base
type = struct
if struct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
is_array = True
unit = int(type[4:]) // 8
length = length / unit
else:
is_array = False
if is_array:
name = name + '[%d]' % length
if len(type) < pos_name:
space1 = pos_name - len(type)
else:
space1 = 1
if bsf_name != '':
name_line = " %s\n" % bsf_name
else:
name_line = "N/A\n"
# if help != '':
# help_line = self.process_multilines(help, 80)
# if option != '':
# option_line = self.process_multilines(option, 80)
if offset is None:
offset_str = '????'
else:
offset_str = '0x%04X' % offset
if bits_length is None:
bits_length = ''
else:
bits_length = ' : %d' % bits_length
# return "\n/** %s%s%s**/\n %s%s%s%s;\n" % (name_line, help_line,
# option_line, type, ' ' * space1, name, bits_length)
return "\n /* Offset %s: %s */\n %s%s%s%s;\n" % (
offset_str, name_line.strip(), type, ' ' * space1,
name, bits_length)
def create_struct(self, cname, top, struct_dict):
index = 0
last = ''
lines = []
off_base = -1
if cname in struct_dict:
if struct_dict[cname][2]:
return []
lines.append('\ntypedef struct {\n')
for field in top:
if field[0] == '$':
continue
index += 1
t_item = top[field]
if 'indx' not in t_item:
if CGenYamlCfg.STRUCT not in top[field]:
continue
if struct_dict[field][1] == 0:
continue
append = True
struct_info = top[field][CGenYamlCfg.STRUCT]
if 'struct' in struct_info:
struct, array_num, var = self.get_struct_array_info(
struct_info['struct'])
if array_num > 0:
if last == struct:
append = False
last = struct
if var == '':
var = field
field = CGenYamlCfg.format_struct_field_name(
var, struct_dict[field][1])
else:
struct = struct_dict[field][0]
field = CGenYamlCfg.format_struct_field_name(
field, struct_dict[field][1])
if append:
offset = t_item['$STRUCT']['offset'] // 8
if off_base == -1:
off_base = offset
line = self.create_field(None, field, 0, 0, struct,
'', '', '')
lines.append(' %s' % line)
last = struct
continue
item = self.get_item_by_index(t_item['indx'])
if item['cname'] == 'CfgHeader' and index == 1 or \
(item['cname'] == 'CondValue' and index == 2):
continue
bit_length = None
length = (item['length'] + 7) // 8
match = re.match("^(\\d+)([b|B|W|D|Q])([B|W|D|Q]?)",
t_item['length'])
if match and match.group(2) == 'b':
bit_length = int(match.group(1))
if match.group(3) != '':
length = CGenYamlCfg.bits_width[match.group(3)] // 8
else:
length = 4
offset = item['offset'] // 8
if off_base == -1:
off_base = offset
struct = item.get('struct', '')
name = field
prompt = item['name']
help = item['help']
option = item['option']
line = self.create_field(item, name, length, offset, struct,
prompt, help, option, bit_length)
lines.append(' %s' % line)
last = struct
lines.append('\n} %s;\n\n' % cname)
return lines
def write_fsp_sig_header_file(self, hdr_file_name):
hdr_fd = open(hdr_file_name, 'w')
hdr_fd.write("%s\n" % get_copyright_header('h'))
hdr_fd.write("#ifndef __FSPUPD_H__\n"
"#define __FSPUPD_H__\n\n"
"#include <FspEas.h>\n\n"
"#pragma pack(1)\n\n")
lines = []
for fsp_comp in 'TMS':
top = self.locate_cfg_item('FSP%s_UPD' % fsp_comp)
if not top:
raise Exception('Could not find FSP UPD definition !')
bins = self.get_field_value(top)
lines.append("#define FSP%s_UPD_SIGNATURE"
" 0x%016X /* '%s' */\n\n"
% (fsp_comp, bytes_to_value(bins[:8]),
bins[:8].decode()))
hdr_fd.write(''.join(lines))
hdr_fd.write("#pragma pack()\n\n"
"#endif\n")
hdr_fd.close()
def create_header_file(self, hdr_file_name, com_hdr_file_name='', path=''):
def _build_header_struct(name, cfgs, level):
if CGenYamlCfg.STRUCT in cfgs:
if 'CfgHeader' in cfgs:
# collect CFGDATA TAG IDs
cfghdr = self.get_item_by_index(cfgs['CfgHeader']['indx'])
tag_val = array_str_to_value(cfghdr['value']) >> 20
tag_dict[name] = tag_val
if level == 1:
tag_curr[0] = tag_val
struct_dict[name] = (level, tag_curr[0], cfgs)
if path == 'FSP_SIG':
self.write_fsp_sig_header_file(hdr_file_name)
return
tag_curr = [0]
tag_dict = {}
struct_dict = {}
if path == '':
top = None
else:
top = self.locate_cfg_item(path)
if not top:
raise Exception("Invalid configuration path '%s' !" % path)
_build_header_struct(path, top, 0)
self.traverse_cfg_tree(_build_header_struct, top)
if tag_curr[0] == 0:
hdr_mode = 2
else:
hdr_mode = 1
if re.match('FSP[TMS]_UPD', path):
hdr_mode |= 0x80
# filter out the items to be built for tags and structures
struct_list = []
for each in struct_dict:
match = False
for check in CGenYamlCfg.exclude_struct:
if re.match(check, each):
match = True
if each in tag_dict:
if each not in CGenYamlCfg.include_tag:
del tag_dict[each]
break
if not match:
struct_list.append({'name': each, 'alias': '', 'count': 0,
'level': struct_dict[each][0],
'tag': struct_dict[each][1],
'node': struct_dict[each][2]})
# sort by level so that the bottom level struct
# will be build first to satisfy dependencies
struct_list = sorted(struct_list, key=lambda x: x['level'],
reverse=True)
# Convert XXX_[0-9]+ to XXX as an array hint
for each in struct_list:
cfgs = each['node']
if 'struct' in cfgs['$STRUCT']:
each['alias'], array_num, var = self.get_struct_array_info(
cfgs['$STRUCT']['struct'])
else:
match = re.match('(\\w+)(_\\d+)', each['name'])
if match:
each['alias'] = match.group(1)
else:
each['alias'] = each['name']
# count items for array build
for idx, each in enumerate(struct_list):
if idx > 0:
last_struct = struct_list[idx-1]['node']['$STRUCT']
curr_struct = each['node']['$STRUCT']
if struct_list[idx-1]['alias'] == each['alias'] and \
curr_struct['length'] == last_struct['length'] and \
curr_struct['offset'] == last_struct['offset'] + \
last_struct['length']:
for idx2 in range(idx-1, -1, -1):
if struct_list[idx2]['count'] > 0:
struct_list[idx2]['count'] += 1
break
continue
each['count'] = 1
# generate common header
if com_hdr_file_name:
self.write_cfg_header_file(com_hdr_file_name, 0, tag_dict,
struct_list)
# generate platform header
self.write_cfg_header_file(hdr_file_name, hdr_mode, tag_dict,
struct_list)
return 0
def load_yaml(self, cfg_file):
cfg_yaml = CFG_YAML()
self.initialize()
self._cfg_tree = cfg_yaml.load_yaml(cfg_file)
self._def_dict = cfg_yaml.def_dict
self._yaml_path = os.path.dirname(cfg_file)
self.build_cfg_list()
self.build_var_dict()
self.update_def_value()
return 0
def usage():
print('\n'.join([
"GenYamlCfg Version 0.50",
"Usage:",
" GenYamlCfg GENINC BinFile IncOutFile "
" [-D Macros]",
" GenYamlCfg GENPKL YamlFile PklOutFile "
" [-D Macros]",
" GenYamlCfg GENBIN YamlFile[;DltFile] BinOutFile "
" [-D Macros]",
" GenYamlCfg GENDLT YamlFile[;BinFile] DltOutFile "
" [-D Macros]",
" GenYamlCfg GENYML YamlFile YamlOutFile"
" [-D Macros]",
" GenYamlCfg GENHDR YamlFile HdrOutFile "
" [-D Macros]"
]))
def main():
# Parse the options and args
argc = len(sys.argv)
if argc < 4:
usage()
return 1
gen_cfg_data = CGenYamlCfg()
command = sys.argv[1].upper()
out_file = sys.argv[3]
if argc >= 5 and gen_cfg_data.parse_macros(sys.argv[4:]) != 0:
raise Exception("ERROR: Macro parsing failed !")
file_list = sys.argv[2].split(';')
if len(file_list) >= 2:
yml_file = file_list[0]
dlt_file = file_list[1]
elif len(file_list) == 1:
yml_file = file_list[0]
dlt_file = ''
else:
raise Exception("ERROR: Invalid parameter '%s' !" % sys.argv[2])
yml_scope = ''
if '@' in yml_file:
parts = yml_file.split('@')
yml_file = parts[0]
yml_scope = parts[1]
if command == "GENDLT" and yml_file.endswith('.dlt'):
# It needs to expand an existing DLT file
dlt_file = yml_file
lines = gen_cfg_data.expand_include_files(dlt_file)
write_lines(lines, out_file)
return 0
if command == "GENYML":
if not yml_file.lower().endswith('.yaml'):
raise Exception('Only YAML file is supported !')
gen_cfg_data.generate_yml_file(yml_file, out_file)
return 0
bin_file = ''
if (yml_file.lower().endswith('.bin')) and (command == "GENINC"):
# It is binary file
bin_file = yml_file
yml_file = ''
if bin_file:
gen_cfg_data.generate_data_inc_file(out_file, bin_file)
return 0
cfg_bin_file = ''
cfg_bin_file2 = ''
if dlt_file:
if command == "GENDLT":
cfg_bin_file = dlt_file
dlt_file = ''
if len(file_list) >= 3:
cfg_bin_file2 = file_list[2]
if yml_file.lower().endswith('.pkl'):
with open(yml_file, "rb") as pkl_file:
gen_cfg_data.__dict__ = marshal.load(pkl_file)
gen_cfg_data.prepare_marshal(False)
# Override macro definition again for Pickle file
if argc >= 5:
gen_cfg_data.parse_macros(sys.argv[4:])
else:
gen_cfg_data.load_yaml(yml_file)
if command == 'GENPKL':
gen_cfg_data.prepare_marshal(True)
with open(out_file, "wb") as pkl_file:
marshal.dump(gen_cfg_data.__dict__, pkl_file)
json_file = os.path.splitext(out_file)[0] + '.json'
fo = open(json_file, 'w')
path_list = []
cfgs = {'_cfg_page': gen_cfg_data._cfg_page,
'_cfg_list': gen_cfg_data._cfg_list,
'_path_list': path_list}
# optimize to reduce size
path = None
for each in cfgs['_cfg_list']:
new_path = each['path'][:-len(each['cname'])-1]
if path != new_path:
path = new_path
each['path'] = path
path_list.append(path)
else:
del each['path']
if each['order'] == each['offset']:
del each['order']
del each['offset']
# value is just used to indicate display type
value = each['value']
if value.startswith('0x'):
hex_len = ((each['length'] + 7) // 8) * 2
if len(value) == hex_len:
value = 'x%d' % hex_len
else:
value = 'x'
each['value'] = value
elif value and value[0] in ['"', "'", '{']:
each['value'] = value[0]
else:
del each['value']
fo.write(repr(cfgs))
fo.close()
return 0
if dlt_file:
gen_cfg_data.override_default_value(dlt_file)
gen_cfg_data.detect_fsp()
if command == "GENBIN":
if len(file_list) == 3:
old_data = gen_cfg_data.generate_binary_array()
fi = open(file_list[2], 'rb')
new_data = bytearray(fi.read())
fi.close()
if len(new_data) != len(old_data):
raise Exception("Binary file '%s' length does not match, \
ignored !" % file_list[2])
else:
gen_cfg_data.load_default_from_bin(new_data)
gen_cfg_data.override_default_value(dlt_file)
gen_cfg_data.generate_binary(out_file, yml_scope)
elif command == "GENDLT":
full = True if 'FULL' in gen_cfg_data._macro_dict else False
gen_cfg_data.generate_delta_file(out_file, cfg_bin_file,
cfg_bin_file2, full)
elif command == "GENHDR":
out_files = out_file.split(';')
brd_out_file = out_files[0].strip()
if len(out_files) > 1:
com_out_file = out_files[1].strip()
else:
com_out_file = ''
gen_cfg_data.create_header_file(brd_out_file, com_out_file, yml_scope)
elif command == "GENINC":
gen_cfg_data.generate_data_inc_file(out_file)
elif command == "DEBUG":
gen_cfg_data.print_cfgs()
else:
raise Exception("Unsuported command '%s' !" % command)
return 0
if __name__ == '__main__':
sys.exit(main())
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/ConfigEditor/GenYamlCfg.py
|
#!/usr/bin/env python
# @ CommonUtility.py
# Common utility script
#
# Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import sys
import shutil
import subprocess
import string
from ctypes import ARRAY, c_char, c_uint16, c_uint32, \
c_uint8, Structure, sizeof
from importlib.machinery import SourceFileLoader
from SingleSign import single_sign_gen_pub_key
# Key types defined should match with cryptolib.h
PUB_KEY_TYPE = {
"RSA": 1,
"ECC": 2,
"DSA": 3,
}
# Signing type schemes defined should match with cryptolib.h
SIGN_TYPE_SCHEME = {
"RSA_PKCS1": 1,
"RSA_PSS": 2,
"ECC": 3,
"DSA": 4,
}
# Hash values defined should match with cryptolib.h
HASH_TYPE_VALUE = {
"SHA2_256": 1,
"SHA2_384": 2,
"SHA2_512": 3,
"SM3_256": 4,
}
# Hash values defined should match with cryptolib.h
HASH_VAL_STRING = dict(map(reversed, HASH_TYPE_VALUE.items()))
AUTH_TYPE_HASH_VALUE = {
"SHA2_256": 1,
"SHA2_384": 2,
"SHA2_512": 3,
"SM3_256": 4,
"RSA2048SHA256": 1,
"RSA3072SHA384": 2,
}
HASH_DIGEST_SIZE = {
"SHA2_256": 32,
"SHA2_384": 48,
"SHA2_512": 64,
"SM3_256": 32,
}
class PUB_KEY_HDR (Structure):
_pack_ = 1
_fields_ = [
('Identifier', ARRAY(c_char, 4)), # signature ('P', 'U', 'B', 'K')
('KeySize', c_uint16), # Length of Public Key
('KeyType', c_uint8), # RSA or ECC
('Reserved', ARRAY(c_uint8, 1)),
('KeyData', ARRAY(c_uint8, 0)),
]
def __init__(self):
self.Identifier = b'PUBK'
class SIGNATURE_HDR (Structure):
_pack_ = 1
_fields_ = [
('Identifier', ARRAY(c_char, 4)),
('SigSize', c_uint16),
('SigType', c_uint8),
('HashAlg', c_uint8),
('Signature', ARRAY(c_uint8, 0)),
]
def __init__(self):
self.Identifier = b'SIGN'
class LZ_HEADER(Structure):
_pack_ = 1
_fields_ = [
('signature', ARRAY(c_char, 4)),
('compressed_len', c_uint32),
('length', c_uint32),
('version', c_uint16),
('svn', c_uint8),
('attribute', c_uint8)
]
_compress_alg = {
b'LZDM': 'Dummy',
b'LZ4 ': 'Lz4',
b'LZMA': 'Lzma',
}
def print_bytes(data, indent=0, offset=0, show_ascii=False):
bytes_per_line = 16
printable = ' ' + string.ascii_letters + string.digits + string.punctuation
str_fmt = '{:s}{:04x}: {:%ds} {:s}' % (bytes_per_line * 3)
bytes_per_line
data_array = bytearray(data)
for idx in range(0, len(data_array), bytes_per_line):
hex_str = ' '.join(
'%02X' % val for val in data_array[idx:idx + bytes_per_line])
asc_str = ''.join('%c' % (val if (chr(val) in printable) else '.')
for val in data_array[idx:idx + bytes_per_line])
print(str_fmt.format(
indent * ' ',
offset + idx, hex_str,
' ' + asc_str if show_ascii else ''))
def get_bits_from_bytes(bytes, start, length):
if length == 0:
return 0
byte_start = (start) // 8
byte_end = (start + length - 1) // 8
bit_start = start & 7
mask = (1 << length) - 1
val = bytes_to_value(bytes[byte_start:byte_end + 1])
val = (val >> bit_start) & mask
return val
def set_bits_to_bytes(bytes, start, length, bvalue):
if length == 0:
return
byte_start = (start) // 8
byte_end = (start + length - 1) // 8
bit_start = start & 7
mask = (1 << length) - 1
val = bytes_to_value(bytes[byte_start:byte_end + 1])
val &= ~(mask << bit_start)
val |= ((bvalue & mask) << bit_start)
bytes[byte_start:byte_end+1] = value_to_bytearray(
val,
byte_end + 1 - byte_start)
def value_to_bytes(value, length):
return value.to_bytes(length, 'little')
def bytes_to_value(bytes):
return int.from_bytes(bytes, 'little')
def value_to_bytearray(value, length):
return bytearray(value_to_bytes(value, length))
# def value_to_bytearray (value, length):
return bytearray(value_to_bytes(value, length))
def get_aligned_value(value, alignment=4):
if alignment != (1 << (alignment.bit_length() - 1)):
raise Exception(
'Alignment (0x%x) should to be power of 2 !' % alignment)
value = (value + (alignment - 1)) & ~(alignment - 1)
return value
def get_padding_length(data_len, alignment=4):
new_data_len = get_aligned_value(data_len, alignment)
return new_data_len - data_len
def get_file_data(file, mode='rb'):
return open(file, mode).read()
def gen_file_from_object(file, object):
open(file, 'wb').write(object)
def gen_file_with_size(file, size):
open(file, 'wb').write(b'\xFF' * size)
def check_files_exist(base_name_list, dir='', ext=''):
for each in base_name_list:
if not os.path.exists(os.path.join(dir, each + ext)):
return False
return True
def load_source(name, filepath):
mod = SourceFileLoader(name, filepath).load_module()
return mod
def get_openssl_path():
if os.name == 'nt':
if 'OPENSSL_PATH' not in os.environ:
openssl_dir = "C:\\Openssl\\bin\\"
if os.path.exists(openssl_dir):
os.environ['OPENSSL_PATH'] = openssl_dir
else:
os.environ['OPENSSL_PATH'] = "C:\\Openssl\\"
if 'OPENSSL_CONF' not in os.environ:
openssl_cfg = "C:\\Openssl\\openssl.cfg"
if os.path.exists(openssl_cfg):
os.environ['OPENSSL_CONF'] = openssl_cfg
openssl = os.path.join(
os.environ.get('OPENSSL_PATH', ''),
'openssl.exe')
else:
# Get openssl path for Linux cases
openssl = shutil.which('openssl')
return openssl
def run_process(arg_list, print_cmd=False, capture_out=False):
sys.stdout.flush()
if os.name == 'nt' and os.path.splitext(arg_list[0])[1] == '' and \
os.path.exists(arg_list[0] + '.exe'):
arg_list[0] += '.exe'
if print_cmd:
print(' '.join(arg_list))
exc = None
result = 0
output = ''
try:
if capture_out:
output = subprocess.check_output(arg_list).decode()
else:
result = subprocess.call(arg_list)
except Exception as ex:
result = 1
exc = ex
if result:
if not print_cmd:
print('Error in running process:\n %s' % ' '.join(arg_list))
if exc is None:
sys.exit(1)
else:
raise exc
return output
# Adjust hash type algorithm based on Public key file
def adjust_hash_type(pub_key_file):
key_type = get_key_type(pub_key_file)
if key_type == 'RSA2048':
hash_type = 'SHA2_256'
elif key_type == 'RSA3072':
hash_type = 'SHA2_384'
else:
hash_type = None
return hash_type
def rsa_sign_file(
priv_key, pub_key, hash_type, sign_scheme,
in_file, out_file, inc_dat=False, inc_key=False):
bins = bytearray()
if inc_dat:
bins.extend(get_file_data(in_file))
# def single_sign_file(priv_key, hash_type, sign_scheme, in_file, out_file):
out_data = get_file_data(out_file)
sign = SIGNATURE_HDR()
sign.SigSize = len(out_data)
sign.SigType = SIGN_TYPE_SCHEME[sign_scheme]
sign.HashAlg = HASH_TYPE_VALUE[hash_type]
bins.extend(bytearray(sign) + out_data)
if inc_key:
key = gen_pub_key(priv_key, pub_key)
bins.extend(key)
if len(bins) != len(out_data):
gen_file_from_object(out_file, bins)
def get_key_type(in_key):
# Check in_key is file or key Id
if not os.path.exists(in_key):
key = bytearray(gen_pub_key(in_key))
else:
# Check for public key in binary format.
key = bytearray(get_file_data(in_key))
pub_key_hdr = PUB_KEY_HDR.from_buffer(key)
if pub_key_hdr.Identifier != b'PUBK':
pub_key = gen_pub_key(in_key)
pub_key_hdr = PUB_KEY_HDR.from_buffer(pub_key)
key_type = next(
(key for key,
value in PUB_KEY_TYPE.items() if value == pub_key_hdr.KeyType))
return '%s%d' % (key_type, (pub_key_hdr.KeySize - 4) * 8)
def get_auth_hash_type(key_type, sign_scheme):
if key_type == "RSA2048" and sign_scheme == "RSA_PKCS1":
hash_type = 'SHA2_256'
auth_type = 'RSA2048_PKCS1_SHA2_256'
elif key_type == "RSA3072" and sign_scheme == "RSA_PKCS1":
hash_type = 'SHA2_384'
auth_type = 'RSA3072_PKCS1_SHA2_384'
elif key_type == "RSA2048" and sign_scheme == "RSA_PSS":
hash_type = 'SHA2_256'
auth_type = 'RSA2048_PSS_SHA2_256'
elif key_type == "RSA3072" and sign_scheme == "RSA_PSS":
hash_type = 'SHA2_384'
auth_type = 'RSA3072_PSS_SHA2_384'
else:
hash_type = ''
auth_type = ''
return auth_type, hash_type
# def single_sign_gen_pub_key(in_key, pub_key_file=None):
def gen_pub_key(in_key, pub_key=None):
keydata = single_sign_gen_pub_key(in_key, pub_key)
publickey = PUB_KEY_HDR()
publickey.KeySize = len(keydata)
publickey.KeyType = PUB_KEY_TYPE['RSA']
key = bytearray(publickey) + keydata
if pub_key:
gen_file_from_object(pub_key, key)
return key
def decompress(in_file, out_file, tool_dir=''):
if not os.path.isfile(in_file):
raise Exception("Invalid input file '%s' !" % in_file)
# Remove the Lz Header
fi = open(in_file, 'rb')
di = bytearray(fi.read())
fi.close()
lz_hdr = LZ_HEADER.from_buffer(di)
offset = sizeof(lz_hdr)
if lz_hdr.signature == b"LZDM" or lz_hdr.compressed_len == 0:
fo = open(out_file, 'wb')
fo.write(di[offset:offset + lz_hdr.compressed_len])
fo.close()
return
temp = os.path.splitext(out_file)[0] + '.tmp'
if lz_hdr.signature == b"LZMA":
alg = "Lzma"
elif lz_hdr.signature == b"LZ4 ":
alg = "Lz4"
else:
raise Exception("Unsupported compression '%s' !" % lz_hdr.signature)
fo = open(temp, 'wb')
fo.write(di[offset:offset + lz_hdr.compressed_len])
fo.close()
compress_tool = "%sCompress" % alg
if alg == "Lz4":
try:
cmdline = [
os.path.join(tool_dir, compress_tool),
"-d",
"-o", out_file,
temp]
run_process(cmdline, False, True)
except Exception:
msg_string = "Could not find/use CompressLz4 tool, " \
"trying with python lz4..."
print(msg_string)
try:
import lz4.block
if lz4.VERSION != '3.1.1':
msg_string = "Recommended lz4 module version " \
"is '3.1.1'," + lz4.VERSION \
+ " is currently installed."
print(msg_string)
except ImportError:
msg_string = "Could not import lz4, use " \
"'python -m pip install lz4==3.1.1' " \
"to install it."
print(msg_string)
exit(1)
decompress_data = lz4.block.decompress(get_file_data(temp))
with open(out_file, "wb") as lz4bin:
lz4bin.write(decompress_data)
else:
cmdline = [
os.path.join(tool_dir, compress_tool),
"-d",
"-o", out_file,
temp]
run_process(cmdline, False, True)
os.remove(temp)
def compress(in_file, alg, svn=0, out_path='', tool_dir=''):
if not os.path.isfile(in_file):
raise Exception("Invalid input file '%s' !" % in_file)
basename, ext = os.path.splitext(os.path.basename(in_file))
if out_path:
if os.path.isdir(out_path):
out_file = os.path.join(out_path, basename + '.lz')
else:
out_file = os.path.join(out_path)
else:
out_file = os.path.splitext(in_file)[0] + '.lz'
if alg == "Lzma":
sig = "LZMA"
elif alg == "Tiano":
sig = "LZUF"
elif alg == "Lz4":
sig = "LZ4 "
elif alg == "Dummy":
sig = "LZDM"
else:
raise Exception("Unsupported compression '%s' !" % alg)
in_len = os.path.getsize(in_file)
if in_len > 0:
compress_tool = "%sCompress" % alg
if sig == "LZDM":
shutil.copy(in_file, out_file)
compress_data = get_file_data(out_file)
elif sig == "LZ4 ":
try:
cmdline = [
os.path.join(tool_dir, compress_tool),
"-e",
"-o", out_file,
in_file]
run_process(cmdline, False, True)
compress_data = get_file_data(out_file)
except Exception:
msg_string = "Could not find/use CompressLz4 tool, " \
"trying with python lz4..."
print(msg_string)
try:
import lz4.block
if lz4.VERSION != '3.1.1':
msg_string = "Recommended lz4 module version " \
"is '3.1.1', " + lz4.VERSION \
+ " is currently installed."
print(msg_string)
except ImportError:
msg_string = "Could not import lz4, use " \
"'python -m pip install lz4==3.1.1' " \
"to install it."
print(msg_string)
exit(1)
compress_data = lz4.block.compress(
get_file_data(in_file),
mode='high_compression')
elif sig == "LZMA":
cmdline = [
os.path.join(tool_dir, compress_tool),
"-e",
"-o", out_file,
in_file]
run_process(cmdline, False, True)
compress_data = get_file_data(out_file)
else:
compress_data = bytearray()
lz_hdr = LZ_HEADER()
lz_hdr.signature = sig.encode()
lz_hdr.svn = svn
lz_hdr.compressed_len = len(compress_data)
lz_hdr.length = os.path.getsize(in_file)
data = bytearray()
data.extend(lz_hdr)
data.extend(compress_data)
gen_file_from_object(out_file, data)
return out_file
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/ConfigEditor/CommonUtility.py
|
# @ ConfigEditor.py
#
# Copyright(c) 2018 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import sys
import marshal
import tkinter
import tkinter.ttk as ttk
import tkinter.messagebox as messagebox
import tkinter.filedialog as filedialog
from pickle import FALSE, TRUE
from pathlib import Path
from GenYamlCfg import CGenYamlCfg, bytes_to_value, \
bytes_to_bracket_str, value_to_bytes, array_str_to_value
from ctypes import sizeof, Structure, ARRAY, c_uint8, c_uint64, c_char, \
c_uint32, c_uint16
from functools import reduce
sys.path.insert(0, '..')
from FspDscBsf2Yaml import bsf_to_dsc, dsc_to_yaml # noqa
sys.dont_write_bytecode = True
class create_tool_tip(object):
'''
create a tooltip for a given widget
'''
in_progress = False
def __init__(self, widget, text=''):
self.top_win = None
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
def enter(self, event=None):
if self.in_progress:
return
if self.widget.winfo_class() == 'Treeview':
# Only show help when cursor is on row header.
rowid = self.widget.identify_row(event.y)
if rowid != '':
return
else:
x, y, cx, cy = self.widget.bbox("insert")
cursor = self.widget.winfo_pointerxy()
x = self.widget.winfo_rootx() + 35
y = self.widget.winfo_rooty() + 20
if cursor[1] > y and cursor[1] < y + 20:
y += 20
# creates a toplevel window
self.top_win = tkinter.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.top_win.wm_overrideredirect(True)
self.top_win.wm_geometry("+%d+%d" % (x, y))
label = tkinter.Message(self.top_win,
text=self.text,
justify='left',
background='bisque',
relief='solid',
borderwidth=1,
font=("times", "10", "normal"))
label.pack(ipadx=1)
self.in_progress = True
def leave(self, event=None):
if self.top_win:
self.top_win.destroy()
self.in_progress = False
class validating_entry(tkinter.Entry):
def __init__(self, master, **kw):
tkinter.Entry.__init__(*(self, master), **kw)
self.parent = master
self.old_value = ''
self.last_value = ''
self.variable = tkinter.StringVar()
self.variable.trace("w", self.callback)
self.config(textvariable=self.variable)
self.config({"background": "#c0c0c0"})
self.bind("<Return>", self.move_next)
self.bind("<Tab>", self.move_next)
self.bind("<Escape>", self.cancel)
for each in ['BackSpace', 'Delete']:
self.bind("<%s>" % each, self.ignore)
self.display(None)
def ignore(self, even):
return "break"
def move_next(self, event):
if self.row < 0:
return
row, col = self.row, self.col
txt, row_id, col_id = self.parent.get_next_cell(row, col)
self.display(txt, row_id, col_id)
return "break"
def cancel(self, event):
self.variable.set(self.old_value)
self.display(None)
def display(self, txt, row_id='', col_id=''):
if txt is None:
self.row = -1
self.col = -1
self.place_forget()
else:
row = int('0x' + row_id[1:], 0) - 1
col = int(col_id[1:]) - 1
self.row = row
self.col = col
self.old_value = txt
self.last_value = txt
x, y, width, height = self.parent.bbox(row_id, col)
self.place(x=x, y=y, w=width)
self.variable.set(txt)
self.focus_set()
self.icursor(0)
def callback(self, *Args):
cur_val = self.variable.get()
new_val = self.validate(cur_val)
if new_val is not None and self.row >= 0:
self.last_value = new_val
self.parent.set_cell(self.row, self.col, new_val)
self.variable.set(self.last_value)
def validate(self, value):
if len(value) > 0:
try:
int(value, 16)
except Exception:
return None
# Normalize the cell format
self.update()
cell_width = self.winfo_width()
max_len = custom_table.to_byte_length(cell_width) * 2
cur_pos = self.index("insert")
if cur_pos == max_len + 1:
value = value[-max_len:]
else:
value = value[:max_len]
if value == '':
value = '0'
fmt = '%%0%dX' % max_len
return fmt % int(value, 16)
class custom_table(ttk.Treeview):
_Padding = 20
_Char_width = 6
def __init__(self, parent, col_hdr, bins):
cols = len(col_hdr)
col_byte_len = []
for col in range(cols): # Columns
col_byte_len.append(int(col_hdr[col].split(':')[1]))
byte_len = sum(col_byte_len)
rows = (len(bins) + byte_len - 1) // byte_len
self.rows = rows
self.cols = cols
self.col_byte_len = col_byte_len
self.col_hdr = col_hdr
self.size = len(bins)
self.last_dir = ''
style = ttk.Style()
style.configure("Custom.Treeview.Heading",
font=('calibri', 10, 'bold'),
foreground="blue")
ttk.Treeview.__init__(self, parent, height=rows,
columns=[''] + col_hdr, show='headings',
style="Custom.Treeview",
selectmode='none')
self.bind("<Button-1>", self.click)
self.bind("<FocusOut>", self.focus_out)
self.entry = validating_entry(self, width=4, justify=tkinter.CENTER)
self.heading(0, text='LOAD')
self.column(0, width=60, stretch=0, anchor=tkinter.CENTER)
for col in range(cols): # Columns
text = col_hdr[col].split(':')[0]
byte_len = int(col_hdr[col].split(':')[1])
self.heading(col+1, text=text)
self.column(col+1, width=self.to_cell_width(byte_len),
stretch=0, anchor=tkinter.CENTER)
idx = 0
for row in range(rows): # Rows
text = '%04X' % (row * len(col_hdr))
vals = ['%04X:' % (cols * row)]
for col in range(cols): # Columns
if idx >= len(bins):
break
byte_len = int(col_hdr[col].split(':')[1])
value = bytes_to_value(bins[idx:idx+byte_len])
hex = ("%%0%dX" % (byte_len * 2)) % value
vals.append(hex)
idx += byte_len
self.insert('', 'end', values=tuple(vals))
if idx >= len(bins):
break
@staticmethod
def to_cell_width(byte_len):
return byte_len * 2 * custom_table._Char_width + custom_table._Padding
@staticmethod
def to_byte_length(cell_width):
return(cell_width - custom_table._Padding) \
// (2 * custom_table._Char_width)
def focus_out(self, event):
self.entry.display(None)
def refresh_bin(self, bins):
if not bins:
return
# Reload binary into widget
bin_len = len(bins)
for row in range(self.rows):
iid = self.get_children()[row]
for col in range(self.cols):
idx = row * sum(self.col_byte_len) + \
sum(self.col_byte_len[:col])
byte_len = self.col_byte_len[col]
if idx + byte_len <= self.size:
byte_len = int(self.col_hdr[col].split(':')[1])
if idx + byte_len > bin_len:
val = 0
else:
val = bytes_to_value(bins[idx:idx+byte_len])
hex_val = ("%%0%dX" % (byte_len * 2)) % val
self.set(iid, col + 1, hex_val)
def get_cell(self, row, col):
iid = self.get_children()[row]
txt = self.item(iid, 'values')[col]
return txt
def get_next_cell(self, row, col):
rows = self.get_children()
col += 1
if col > self.cols:
col = 1
row += 1
cnt = row * sum(self.col_byte_len) + sum(self.col_byte_len[:col])
if cnt > self.size:
# Reached the last cell, so roll back to beginning
row = 0
col = 1
txt = self.get_cell(row, col)
row_id = rows[row]
col_id = '#%d' % (col + 1)
return(txt, row_id, col_id)
def set_cell(self, row, col, val):
iid = self.get_children()[row]
self.set(iid, col, val)
def load_bin(self):
# Load binary from file
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load binary file",
filetypes=(("Binary files", "*.bin"), (
"binary files", "*.bin")))
if path:
self.last_dir = os.path.dirname(path)
fd = open(path, 'rb')
bins = bytearray(fd.read())[:self.size]
fd.close()
bins.extend(b'\x00' * (self.size - len(bins)))
return bins
return None
def click(self, event):
row_id = self.identify_row(event.y)
col_id = self.identify_column(event.x)
if row_id == '' and col_id == '#1':
# Clicked on "LOAD" cell
bins = self.load_bin()
self.refresh_bin(bins)
return
if col_id == '#1':
# Clicked on column 1(Offset column)
return
item = self.identify('item', event.x, event.y)
if not item or not col_id:
# Not clicked on valid cell
return
# Clicked cell
row = int('0x' + row_id[1:], 0) - 1
col = int(col_id[1:]) - 1
if row * self.cols + col > self.size:
return
vals = self.item(item, 'values')
if col < len(vals):
txt = self.item(item, 'values')[col]
self.entry.display(txt, row_id, col_id)
def get(self):
bins = bytearray()
row_ids = self.get_children()
for row_id in row_ids:
row = int('0x' + row_id[1:], 0) - 1
for col in range(self.cols):
idx = row * sum(self.col_byte_len) + \
sum(self.col_byte_len[:col])
byte_len = self.col_byte_len[col]
if idx + byte_len > self.size:
break
hex = self.item(row_id, 'values')[col + 1]
values = value_to_bytes(int(hex, 16)
& ((1 << byte_len * 8) - 1), byte_len)
bins.extend(values)
return bins
class c_uint24(Structure):
"""Little-Endian 24-bit Unsigned Integer"""
_pack_ = 1
_fields_ = [('Data', (c_uint8 * 3))]
def __init__(self, val=0):
self.set_value(val)
def __str__(self, indent=0):
return '0x%.6x' % self.value
def __int__(self):
return self.get_value()
def set_value(self, val):
self.Data[0:3] = Val2Bytes(val, 3)
def get_value(self):
return Bytes2Val(self.Data[0:3])
value = property(get_value, set_value)
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', ARRAY(c_uint8, 16)),
('FvLength', c_uint64),
('Signature', ARRAY(c_char, 4)),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8)
]
class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure):
_fields_ = [
('FvName', ARRAY(c_uint8, 16)),
('ExtHeaderSize', c_uint32)
]
class EFI_FFS_INTEGRITY_CHECK(Structure):
_fields_ = [
('Header', c_uint8),
('File', c_uint8)
]
class EFI_FFS_FILE_HEADER(Structure):
_fields_ = [
('Name', ARRAY(c_uint8, 16)),
('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', c_uint24),
('State', c_uint8)
]
class EFI_COMMON_SECTION_HEADER(Structure):
_fields_ = [
('Size', c_uint24),
('Type', c_uint8)
]
class EFI_SECTION_TYPE:
"""Enumeration of all valid firmware file section types."""
ALL = 0x00
COMPRESSION = 0x01
GUID_DEFINED = 0x02
DISPOSABLE = 0x03
PE32 = 0x10
PIC = 0x11
TE = 0x12
DXE_DEPEX = 0x13
VERSION = 0x14
USER_INTERFACE = 0x15
COMPATIBILITY16 = 0x16
FIRMWARE_VOLUME_IMAGE = 0x17
FREEFORM_SUBTYPE_GUID = 0x18
RAW = 0x19
PEI_DEPEX = 0x1b
SMM_DEPEX = 0x1c
class FSP_COMMON_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32)
]
class FSP_INFORMATION_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32),
('Reserved1', c_uint16),
('SpecVersion', c_uint8),
('HeaderRevision', c_uint8),
('ImageRevision', c_uint32),
('ImageId', ARRAY(c_char, 8)),
('ImageSize', c_uint32),
('ImageBase', c_uint32),
('ImageAttribute', c_uint16),
('ComponentAttribute', c_uint16),
('CfgRegionOffset', c_uint32),
('CfgRegionSize', c_uint32),
('Reserved2', c_uint32),
('TempRamInitEntryOffset', c_uint32),
('Reserved3', c_uint32),
('NotifyPhaseEntryOffset', c_uint32),
('FspMemoryInitEntryOffset', c_uint32),
('TempRamExitEntryOffset', c_uint32),
('FspSiliconInitEntryOffset', c_uint32),
('FspMultiPhaseSiInitEntryOffset', c_uint32),
('ExtendedImageRevision', c_uint16),
('Reserved4', c_uint16)
]
class FSP_EXTENDED_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32),
('Revision', c_uint8),
('Reserved', c_uint8),
('FspProducerId', ARRAY(c_char, 6)),
('FspProducerRevision', c_uint32),
('FspProducerDataSize', c_uint32)
]
class FSP_PATCH_TABLE(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint16),
('HeaderRevision', c_uint8),
('Reserved', c_uint8),
('PatchEntryNum', c_uint32)
]
class Section:
def __init__(self, offset, secdata):
self.SecHdr = EFI_COMMON_SECTION_HEADER.from_buffer(secdata, 0)
self.SecData = secdata[0:int(self.SecHdr.Size)]
self.Offset = offset
def AlignPtr(offset, alignment=8):
return (offset + alignment - 1) & ~(alignment - 1)
def Bytes2Val(bytes):
return reduce(lambda x, y: (x << 8) | y, bytes[:: -1])
def Val2Bytes(value, blen):
return [(value >> (i*8) & 0xff) for i in range(blen)]
class FirmwareFile:
def __init__(self, offset, filedata):
self.FfsHdr = EFI_FFS_FILE_HEADER.from_buffer(filedata, 0)
self.FfsData = filedata[0:int(self.FfsHdr.Size)]
self.Offset = offset
self.SecList = []
def ParseFfs(self):
ffssize = len(self.FfsData)
offset = sizeof(self.FfsHdr)
if self.FfsHdr.Name != '\xff' * 16:
while offset < (ffssize - sizeof(EFI_COMMON_SECTION_HEADER)):
sechdr = EFI_COMMON_SECTION_HEADER.from_buffer(
self.FfsData, offset)
sec = Section(
offset, self.FfsData[offset:offset + int(sechdr.Size)])
self.SecList.append(sec)
offset += int(sechdr.Size)
offset = AlignPtr(offset, 4)
class FirmwareVolume:
def __init__(self, offset, fvdata):
self.FvHdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer(fvdata, 0)
self.FvData = fvdata[0: self.FvHdr.FvLength]
self.Offset = offset
if self.FvHdr.ExtHeaderOffset > 0:
self.FvExtHdr = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer(
self.FvData, self.FvHdr.ExtHeaderOffset)
else:
self.FvExtHdr = None
self.FfsList = []
def ParseFv(self):
fvsize = len(self.FvData)
if self.FvExtHdr:
offset = self.FvHdr.ExtHeaderOffset + self.FvExtHdr.ExtHeaderSize
else:
offset = self.FvHdr.HeaderLength
offset = AlignPtr(offset)
while offset < (fvsize - sizeof(EFI_FFS_FILE_HEADER)):
ffshdr = EFI_FFS_FILE_HEADER.from_buffer(self.FvData, offset)
if (ffshdr.Name == '\xff' * 16) and \
(int(ffshdr.Size) == 0xFFFFFF):
offset = fvsize
else:
ffs = FirmwareFile(
offset, self.FvData[offset:offset + int(ffshdr.Size)])
ffs.ParseFfs()
self.FfsList.append(ffs)
offset += int(ffshdr.Size)
offset = AlignPtr(offset)
class FspImage:
def __init__(self, offset, fih, fihoff, patch):
self.Fih = fih
self.FihOffset = fihoff
self.Offset = offset
self.FvIdxList = []
self.Type = "XTMSXXXXOXXXXXXX"[(fih.ComponentAttribute >> 12) & 0x0F]
self.PatchList = patch
self.PatchList.append(fihoff + 0x1C)
def AppendFv(self, FvIdx):
self.FvIdxList.append(FvIdx)
def Patch(self, delta, fdbin):
count = 0
applied = 0
for idx, patch in enumerate(self.PatchList):
ptype = (patch >> 24) & 0x0F
if ptype not in [0x00, 0x0F]:
raise Exception('ERROR: Invalid patch type %d !' % ptype)
if patch & 0x80000000:
patch = self.Fih.ImageSize - (0x1000000 - (patch & 0xFFFFFF))
else:
patch = patch & 0xFFFFFF
if (patch < self.Fih.ImageSize) and \
(patch + sizeof(c_uint32) <= self.Fih.ImageSize):
offset = patch + self.Offset
value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)])
value += delta
fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(
value, sizeof(c_uint32))
applied += 1
count += 1
# Don't count the FSP base address patch entry appended at the end
if count != 0:
count -= 1
applied -= 1
return (count, applied)
class FirmwareDevice:
def __init__(self, offset, FdData):
self.FvList = []
self.FspList = []
self.FspExtList = []
self.FihList = []
self.BuildList = []
self.OutputText = ""
self.Offset = 0
self.FdData = FdData
def ParseFd(self):
offset = 0
fdsize = len(self.FdData)
self.FvList = []
while offset < (fdsize - sizeof(EFI_FIRMWARE_VOLUME_HEADER)):
fvh = EFI_FIRMWARE_VOLUME_HEADER.from_buffer(self.FdData, offset)
if b'_FVH' != fvh.Signature:
raise Exception("ERROR: Invalid FV header !")
fv = FirmwareVolume(
offset, self.FdData[offset:offset + fvh.FvLength])
fv.ParseFv()
self.FvList.append(fv)
offset += fv.FvHdr.FvLength
def CheckFsp(self):
if len(self.FspList) == 0:
return
fih = None
for fsp in self.FspList:
if not fih:
fih = fsp.Fih
else:
newfih = fsp.Fih
if (newfih.ImageId != fih.ImageId) or \
(newfih.ImageRevision != fih.ImageRevision):
raise Exception(
"ERROR: Inconsistent FSP ImageId or "
"ImageRevision detected !")
def ParseFsp(self):
flen = 0
for idx, fv in enumerate(self.FvList):
# Check if this FV contains FSP header
if flen == 0:
if len(fv.FfsList) == 0:
continue
ffs = fv.FfsList[0]
if len(ffs.SecList) == 0:
continue
sec = ffs.SecList[0]
if sec.SecHdr.Type != EFI_SECTION_TYPE.RAW:
continue
fihoffset = ffs.Offset + sec.Offset + sizeof(sec.SecHdr)
fspoffset = fv.Offset
offset = fspoffset + fihoffset
fih = FSP_INFORMATION_HEADER.from_buffer(self.FdData, offset)
self.FihList.append(fih)
if b'FSPH' != fih.Signature:
continue
offset += fih.HeaderLength
offset = AlignPtr(offset, 2)
Extfih = FSP_EXTENDED_HEADER.from_buffer(self.FdData, offset)
self.FspExtList.append(Extfih)
offset = AlignPtr(offset, 4)
plist = []
while True:
fch = FSP_COMMON_HEADER.from_buffer(self.FdData, offset)
if b'FSPP' != fch.Signature:
offset += fch.HeaderLength
offset = AlignPtr(offset, 4)
else:
fspp = FSP_PATCH_TABLE.from_buffer(
self.FdData, offset)
offset += sizeof(fspp)
start_offset = offset + 32
end_offset = offset + 32
while True:
end_offset += 1
if(self.FdData[
end_offset: end_offset + 1] == b'\xff'):
break
self.BuildList.append(
self.FdData[start_offset:end_offset])
pdata = (c_uint32 * fspp.PatchEntryNum).from_buffer(
self.FdData, offset)
plist = list(pdata)
break
fsp = FspImage(fspoffset, fih, fihoffset, plist)
fsp.AppendFv(idx)
self.FspList.append(fsp)
flen = fsp.Fih.ImageSize - fv.FvHdr.FvLength
else:
fsp.AppendFv(idx)
flen -= fv.FvHdr.FvLength
if flen < 0:
raise Exception("ERROR: Incorrect FV size in image !")
self.CheckFsp()
def IsIntegerType(self, val):
if sys.version_info[0] < 3:
if type(val) in (int, long):
return True
else:
if type(val) is int:
return True
return False
def ConvertRevisionString(self, obj):
for field in obj._fields_:
key = field[0]
val = getattr(obj, key)
rep = ''
if self.IsIntegerType(val):
if (key == 'ImageRevision'):
FspImageRevisionMajor = ((val >> 24) & 0xFF)
FspImageRevisionMinor = ((val >> 16) & 0xFF)
FspImageRevisionRevision = ((val >> 8) & 0xFF)
FspImageRevisionBuildNumber = (val & 0xFF)
rep = '0x%08X' % val
elif (key == 'ExtendedImageRevision'):
FspImageRevisionRevision |= (val & 0xFF00)
FspImageRevisionBuildNumber |= ((val << 8) & 0xFF00)
rep = "0x%04X ('%02X.%02X.%04X.%04X')" % (val, FspImageRevisionMajor, FspImageRevisionMinor, FspImageRevisionRevision, FspImageRevisionBuildNumber)
return rep
def OutputFsp(self):
def copy_text_to_clipboard():
window.clipboard_clear()
window.clipboard_append(self.OutputText)
window = tkinter.Tk()
window.title("Fsp Headers")
window.resizable(0, 0)
# Window Size
window.geometry("300x400+350+150")
frame = tkinter.Frame(window)
frame.pack(side=tkinter.BOTTOM)
# Vertical (y) Scroll Bar
scroll = tkinter.Scrollbar(window)
scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text = tkinter.Text(window,
wrap=tkinter.NONE, yscrollcommand=scroll.set)
i = 0
self.OutputText = self.OutputText + "Fsp Header Details \n\n"
while i < len(self.FihList):
try:
# self.OutputText += str(self.BuildList[i].decode()) + "\n"
self.OutputText += str(self.BuildList[i]) + "\n"
except Exception:
self.OutputText += "No description found\n"
self.OutputText += "FSP Header :\n "
self.OutputText += "Signature : " + \
str(self.FihList[i].Signature.decode('utf-8')) + "\n "
self.OutputText += "Header Length : " + \
str(hex(self.FihList[i].HeaderLength)) + "\n "
self.OutputText += "Reserved1 : " + \
str(hex(self.FihList[i].Reserved1)) + "\n "
self.OutputText += "Header Revision : " + \
str(hex(self.FihList[i].HeaderRevision)) + "\n "
self.OutputText += "Spec Version : " + \
str(hex(self.FihList[i].SpecVersion)) + "\n "
self.OutputText += "Image Revision : " + \
str(hex(self.FihList[i].ImageRevision)) + "\n "
self.OutputText += "Image Id : " + \
str(self.FihList[i].ImageId.decode('utf-8')) + "\n "
self.OutputText += "Image Size : " + \
str(hex(self.FihList[i].ImageSize)) + "\n "
self.OutputText += "Image Base : " + \
str(hex(self.FihList[i].ImageBase)) + "\n "
self.OutputText += "Image Attribute : " + \
str(hex(self.FihList[i].ImageAttribute)) + "\n "
self.OutputText += "Component Attribute : " + \
str(hex(self.FihList[i].ComponentAttribute)) + "\n "
self.OutputText += "Cfg Region Offset : " + \
str(hex(self.FihList[i].CfgRegionOffset)) + "\n "
self.OutputText += "Cfg Region Size : " + \
str(hex(self.FihList[i].CfgRegionSize)) + "\n "
self.OutputText += "Reserved2 : " + \
str(hex(self.FihList[i].Reserved2)) + "\n "
self.OutputText += "Temp Ram Init Entry : " + \
str(hex(self.FihList[i].TempRamInitEntryOffset)) + "\n "
self.OutputText += "Reserved3 : " + \
str(hex(self.FihList[i].Reserved3)) + "\n "
self.OutputText += "Notify Phase Entry : " + \
str(hex(self.FihList[i].NotifyPhaseEntryOffset)) + "\n "
self.OutputText += "Fsp Memory Init Entry : " + \
str(hex(self.FihList[i].FspMemoryInitEntryOffset)) + "\n "
self.OutputText += "Temp Ram Exit Entry : " + \
str(hex(self.FihList[i].TempRamExitEntryOffset)) + "\n "
self.OutputText += "Fsp Silicon Init Entry : " + \
str(hex(self.FihList[i].FspSiliconInitEntryOffset)) + "\n "
self.OutputText += "Fsp Multi Phase Si Init Entry : " + \
str(hex(self.FihList[i].FspMultiPhaseSiInitEntryOffset)) + "\n "
# display ExtendedImageRevision & Reserved4 if HeaderRevision >= 6
for fsp in self.FihList:
if fsp.HeaderRevision >= 6:
Display_ExtndImgRev = TRUE
else:
Display_ExtndImgRev = FALSE
self.OutputText += "\n"
if Display_ExtndImgRev == TRUE:
self.OutputText += "ExtendedImageRevision : " + \
str(self.ConvertRevisionString(self.FihList[i])) + "\n "
self.OutputText += "Reserved4 : " + \
str(hex(self.FihList[i].Reserved4)) + "\n\n"
self.OutputText += "FSP Extended Header:\n "
self.OutputText += "Signature : " + \
str(self.FspExtList[i].Signature.decode('utf-8')) + "\n "
self.OutputText += "Header Length : " + \
str(hex(self.FspExtList[i].HeaderLength)) + "\n "
self.OutputText += "Header Revision : " + \
str(hex(self.FspExtList[i].Revision)) + "\n "
self.OutputText += "Fsp Producer Id : " + \
str(self.FspExtList[i].FspProducerId.decode('utf-8')) + "\n "
self.OutputText += "FspProducerRevision : " + \
str(hex(self.FspExtList[i].FspProducerRevision)) + "\n\n"
i += 1
text.insert(tkinter.INSERT, self.OutputText)
text.pack()
# Configure the scrollbars
scroll.config(command=text.yview)
copy_button = tkinter.Button(
window, text="Copy to Clipboard", command=copy_text_to_clipboard)
copy_button.pack(in_=frame, side=tkinter.LEFT, padx=20, pady=10)
exit_button = tkinter.Button(
window, text="Close", command=window.destroy)
exit_button.pack(in_=frame, side=tkinter.RIGHT, padx=20, pady=10)
window.mainloop()
class state:
def __init__(self):
self.state = False
def set(self, value):
self.state = value
def get(self):
return self.state
class application(tkinter.Frame):
def __init__(self, master=None):
root = master
self.debug = True
self.mode = 'FSP'
self.last_dir = '.'
self.page_id = ''
self.page_list = {}
self.conf_list = {}
self.cfg_page_dict = {}
self.cfg_data_obj = None
self.org_cfg_data_bin = None
self.in_left = state()
self.in_right = state()
self.search_text = ''
# Check if current directory contains a file with a .yaml extension
# if not default self.last_dir to a Platform directory where it is
# easier to locate *BoardPkg\CfgData\*Def.yaml files
self.last_dir = '.'
if not any(fname.endswith('.yaml') for fname in os.listdir('.')):
platform_path = Path(os.path.realpath(__file__)).parents[2].\
joinpath('Platform')
if platform_path.exists():
self.last_dir = platform_path
tkinter.Frame.__init__(self, master, borderwidth=2)
self.menu_string = [
'Save Config Data to Binary', 'Load Config Data from Binary',
'Show Binary Information',
'Load Config Changes from Delta File',
'Save Config Changes to Delta File',
'Save Full Config Data to Delta File',
'Open Config BSF file'
]
root.geometry("1200x800")
# Search string
fram = tkinter.Frame(root)
# adding label to search box
tkinter.Label(fram, text='Text to find:').pack(side=tkinter.LEFT)
# adding of single line text box
self.edit = tkinter.Entry(fram, width=30)
# positioning of text box
self.edit.pack(
side=tkinter.LEFT, fill=tkinter.BOTH, expand=1, padx=(4, 4))
# setting focus
self.edit.focus_set()
# adding of search button
butt = tkinter.Button(fram, text='Search', relief=tkinter.GROOVE,
command=self.search_bar)
butt.pack(side=tkinter.RIGHT, padx=(4, 4))
fram.pack(side=tkinter.TOP, anchor=tkinter.SE)
paned = ttk.Panedwindow(root, orient=tkinter.HORIZONTAL)
paned.pack(fill=tkinter.BOTH, expand=True, padx=(4, 4))
status = tkinter.Label(master, text="", bd=1, relief=tkinter.SUNKEN,
anchor=tkinter.W)
status.pack(side=tkinter.BOTTOM, fill=tkinter.X)
frame_left = ttk.Frame(paned, height=800, relief="groove")
self.left = ttk.Treeview(frame_left, show="tree")
# Set up tree HScroller
pady = (10, 10)
self.tree_scroll = ttk.Scrollbar(frame_left,
orient="vertical",
command=self.left.yview)
self.left.configure(yscrollcommand=self.tree_scroll.set)
self.left.bind("<<TreeviewSelect>>", self.on_config_page_select_change)
self.left.bind("<Enter>", lambda e: self.in_left.set(True))
self.left.bind("<Leave>", lambda e: self.in_left.set(False))
self.left.bind("<MouseWheel>", self.on_tree_scroll)
self.left.pack(side='left',
fill=tkinter.BOTH,
expand=True,
padx=(5, 0),
pady=pady)
self.tree_scroll.pack(side='right', fill=tkinter.Y,
pady=pady, padx=(0, 5))
frame_right = ttk.Frame(paned, relief="groove")
self.frame_right = frame_right
self.conf_canvas = tkinter.Canvas(frame_right, highlightthickness=0)
self.page_scroll = ttk.Scrollbar(frame_right,
orient="vertical",
command=self.conf_canvas.yview)
self.right_grid = ttk.Frame(self.conf_canvas)
self.conf_canvas.configure(yscrollcommand=self.page_scroll.set)
self.conf_canvas.pack(side='left',
fill=tkinter.BOTH,
expand=True,
pady=pady,
padx=(5, 0))
self.page_scroll.pack(side='right', fill=tkinter.Y,
pady=pady, padx=(0, 5))
self.conf_canvas.create_window(0, 0, window=self.right_grid,
anchor='nw')
self.conf_canvas.bind('<Enter>', lambda e: self.in_right.set(True))
self.conf_canvas.bind('<Leave>', lambda e: self.in_right.set(False))
self.conf_canvas.bind("<Configure>", self.on_canvas_configure)
self.conf_canvas.bind_all("<MouseWheel>", self.on_page_scroll)
paned.add(frame_left, weight=2)
paned.add(frame_right, weight=10)
style = ttk.Style()
style.layout("Treeview", [('Treeview.treearea', {'sticky': 'nswe'})])
menubar = tkinter.Menu(root)
file_menu = tkinter.Menu(menubar, tearoff=0)
file_menu.add_command(label="Open Config YAML file",
command=self.load_from_yaml)
file_menu.add_command(label=self.menu_string[6],
command=self.load_from_bsf_file)
file_menu.add_command(label=self.menu_string[2],
command=self.load_from_fd)
file_menu.add_command(label=self.menu_string[0],
command=self.save_to_bin,
state='disabled')
file_menu.add_command(label=self.menu_string[1],
command=self.load_from_bin,
state='disabled')
file_menu.add_command(label=self.menu_string[3],
command=self.load_from_delta,
state='disabled')
file_menu.add_command(label=self.menu_string[4],
command=self.save_to_delta,
state='disabled')
file_menu.add_command(label=self.menu_string[5],
command=self.save_full_to_delta,
state='disabled')
file_menu.add_command(label="About", command=self.about)
menubar.add_cascade(label="File", menu=file_menu)
self.file_menu = file_menu
root.config(menu=menubar)
if len(sys.argv) > 1:
path = sys.argv[1]
if not path.endswith('.yaml') and not path.endswith('.pkl'):
messagebox.showerror('LOADING ERROR',
"Unsupported file '%s' !" % path)
return
else:
self.load_cfg_file(path)
if len(sys.argv) > 2:
path = sys.argv[2]
if path.endswith('.dlt'):
self.load_delta_file(path)
elif path.endswith('.bin'):
self.load_bin_file(path)
else:
messagebox.showerror('LOADING ERROR',
"Unsupported file '%s' !" % path)
return
def search_bar(self):
# get data from text box
self.search_text = self.edit.get()
# Clear the page and update it according to search value
self.refresh_config_data_page()
def set_object_name(self, widget, name):
self.conf_list[id(widget)] = name
def get_object_name(self, widget):
if id(widget) in self.conf_list:
return self.conf_list[id(widget)]
else:
return None
def limit_entry_size(self, variable, limit):
value = variable.get()
if len(value) > limit:
variable.set(value[:limit])
def on_canvas_configure(self, event):
self.right_grid.grid_columnconfigure(0, minsize=event.width)
def on_tree_scroll(self, event):
if not self.in_left.get() and self.in_right.get():
# This prevents scroll event from being handled by both left and
# right frame at the same time.
self.on_page_scroll(event)
return 'break'
def on_page_scroll(self, event):
if self.in_right.get():
# Only scroll when it is in active area
min, max = self.page_scroll.get()
if not((min == 0.0) and (max == 1.0)):
self.conf_canvas.yview_scroll(-1 * int(event.delta / 120),
'units')
def update_visibility_for_widget(self, widget, args):
visible = True
item = self.get_config_data_item_from_widget(widget, True)
if item is None:
return visible
elif not item:
return visible
if self.cfg_data_obj.binseg_dict:
str_split = item['path'].split('.')
if str_split[-2] not in CGenYamlCfg.available_fv and \
str_split[-2] not in CGenYamlCfg.missing_fv:
if self.cfg_data_obj.binseg_dict[str_split[-3]] == -1:
visible = False
widget.grid_remove()
return visible
else:
if self.cfg_data_obj.binseg_dict[str_split[-2]] == -1:
visible = False
widget.grid_remove()
return visible
result = 1
if item['condition']:
result = self.evaluate_condition(item)
if result == 2:
# Gray
widget.configure(state='disabled')
elif result == 0:
# Hide
visible = False
widget.grid_remove()
else:
# Show
widget.grid()
widget.configure(state='normal')
if visible and self.search_text != '':
name = item['name']
if name.lower().find(self.search_text.lower()) == -1:
visible = False
widget.grid_remove()
return visible
def update_widgets_visibility_on_page(self):
self.walk_widgets_in_layout(self.right_grid,
self.update_visibility_for_widget)
def combo_select_changed(self, event):
self.update_config_data_from_widget(event.widget, None)
self.update_widgets_visibility_on_page()
def edit_num_finished(self, event):
widget = event.widget
item = self.get_config_data_item_from_widget(widget)
if not item:
return
parts = item['type'].split(',')
if len(parts) > 3:
min = parts[2].lstrip()[1:]
max = parts[3].rstrip()[:-1]
min_val = array_str_to_value(min)
max_val = array_str_to_value(max)
text = widget.get()
if ',' in text:
text = '{ %s }' % text
try:
value = array_str_to_value(text)
if value < min_val or value > max_val:
raise Exception('Invalid input!')
self.set_config_item_value(item, text)
except Exception:
pass
text = item['value'].strip('{').strip('}').strip()
widget.delete(0, tkinter.END)
widget.insert(0, text)
self.update_widgets_visibility_on_page()
def update_page_scroll_bar(self):
# Update scrollbar
self.frame_right.update()
self.conf_canvas.config(scrollregion=self.conf_canvas.bbox("all"))
def on_config_page_select_change(self, event):
self.update_config_data_on_page()
sel = self.left.selection()
if len(sel) > 0:
page_id = sel[0]
self.build_config_data_page(page_id)
self.update_widgets_visibility_on_page()
self.update_page_scroll_bar()
def walk_widgets_in_layout(self, parent, callback_function, args=None):
for widget in parent.winfo_children():
callback_function(widget, args)
def clear_widgets_inLayout(self, parent=None):
if parent is None:
parent = self.right_grid
for widget in parent.winfo_children():
widget.destroy()
parent.grid_forget()
self.conf_list.clear()
def build_config_page_tree(self, cfg_page, parent):
for page in cfg_page['child']:
page_id = next(iter(page))
# Put CFG items into related page list
self.page_list[page_id] = self.cfg_data_obj.get_cfg_list(page_id)
self.page_list[page_id].sort(key=lambda x: x['order'])
page_name = self.cfg_data_obj.get_page_title(page_id)
child = self.left.insert(
parent, 'end',
iid=page_id, text=page_name,
value=0)
if len(page[page_id]) > 0:
self.build_config_page_tree(page[page_id], child)
def is_config_data_loaded(self):
return True if len(self.page_list) else False
def set_current_config_page(self, page_id):
self.page_id = page_id
def get_current_config_page(self):
return self.page_id
def get_current_config_data(self):
page_id = self.get_current_config_page()
if page_id in self.page_list:
return self.page_list[page_id]
else:
return []
invalid_values = {}
def build_config_data_page(self, page_id):
self.clear_widgets_inLayout()
self.set_current_config_page(page_id)
disp_list = []
for item in self.get_current_config_data():
disp_list.append(item)
row = 0
disp_list.sort(key=lambda x: x['order'])
for item in disp_list:
self.add_config_item(item, row)
row += 2
if self.invalid_values:
string = 'The following contails invalid options/values \n\n'
for i in self.invalid_values:
string += i + ": " + str(self.invalid_values[i]) + "\n"
reply = messagebox.showwarning('Warning!', string)
if reply == 'ok':
self.invalid_values.clear()
fsp_version = ''
def load_config_data(self, file_name):
gen_cfg_data = CGenYamlCfg()
if file_name.endswith('.pkl'):
with open(file_name, "rb") as pkl_file:
gen_cfg_data.__dict__ = marshal.load(pkl_file)
gen_cfg_data.prepare_marshal(False)
elif file_name.endswith('.yaml'):
if gen_cfg_data.load_yaml(file_name) != 0:
raise Exception(gen_cfg_data.get_last_error())
else:
raise Exception('Unsupported file "%s" !' % file_name)
# checking fsp version
if gen_cfg_data.detect_fsp():
self.fsp_version = '2.X'
else:
self.fsp_version = '1.X'
return gen_cfg_data
def about(self):
msg = 'Configuration Editor\n--------------------------------\n \
Version 0.8\n2021'
lines = msg.split('\n')
width = 30
text = []
for line in lines:
text.append(line.center(width, ' '))
messagebox.showinfo('Config Editor', '\n'.join(text))
def update_last_dir(self, path):
self.last_dir = os.path.dirname(path)
def get_open_file_name(self, ftype):
if self.is_config_data_loaded():
if ftype == 'dlt':
question = ''
elif ftype == 'bin':
question = 'All configuration will be reloaded from BIN file, \
continue ?'
elif ftype == 'yaml':
question = ''
elif ftype == 'bsf':
question = ''
else:
raise Exception('Unsupported file type !')
if question:
reply = messagebox.askquestion('', question, icon='warning')
if reply == 'no':
return None
if ftype == 'yaml':
if self.mode == 'FSP':
file_type = 'YAML'
file_ext = 'yaml'
else:
file_type = 'YAML or PKL'
file_ext = 'pkl *.yaml'
else:
file_type = ftype.upper()
file_ext = ftype
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes=(("%s files" % file_type, "*.%s" % file_ext), (
"all files", "*.*")))
if path:
self.update_last_dir(path)
return path
else:
return None
def load_from_delta(self):
path = self.get_open_file_name('dlt')
if not path:
return
self.load_delta_file(path)
def load_delta_file(self, path):
self.reload_config_data_from_bin(self.org_cfg_data_bin)
try:
self.cfg_data_obj.override_default_value(path)
except Exception as e:
messagebox.showerror('LOADING ERROR', str(e))
return
self.update_last_dir(path)
self.refresh_config_data_page()
def load_from_bin(self):
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes={("Binaries", "*.fv *.fd *.bin *.rom")})
if not path:
return
self.load_bin_file(path)
def load_bin_file(self, path):
with open(path, 'rb') as fd:
bin_data = bytearray(fd.read())
if len(bin_data) < len(self.org_cfg_data_bin):
messagebox.showerror('Binary file size is smaller than what \
YAML requires !')
return
try:
self.reload_config_data_from_bin(bin_data)
except Exception as e:
messagebox.showerror('LOADING ERROR', str(e))
return
def load_from_bsf_file(self):
path = self.get_open_file_name('bsf')
if not path:
return
self.load_bsf_file(path)
def load_bsf_file(self, path):
bsf_file = path
dsc_file = os.path.splitext(bsf_file)[0] + '.dsc'
yaml_file = os.path.splitext(bsf_file)[0] + '.yaml'
bsf_to_dsc(bsf_file, dsc_file)
dsc_to_yaml(dsc_file, yaml_file)
self.load_cfg_file(yaml_file)
return
def load_from_fd(self):
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes={("Binaries", "*.fv *.fd *.bin *.rom")})
if not path:
return
self.load_fd_file(path)
def load_fd_file(self, path):
with open(path, 'rb') as fd:
bin_data = bytearray(fd.read())
fd = FirmwareDevice(0, bin_data)
fd.ParseFd()
fd.ParseFsp()
fd.OutputFsp()
def load_cfg_file(self, path):
# Save current values in widget and clear database
self.clear_widgets_inLayout()
self.left.delete(*self.left.get_children())
self.cfg_data_obj = self.load_config_data(path)
self.update_last_dir(path)
self.org_cfg_data_bin = self.cfg_data_obj.generate_binary_array()
self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'],
'')
msg_string = 'Click YES if it is FULL FSP '\
+ self.fsp_version + ' Binary'
reply = messagebox.askquestion('Form', msg_string)
if reply == 'yes':
self.load_from_bin()
for menu in self.menu_string:
self.file_menu.entryconfig(menu, state="normal")
return 0
def load_from_yaml(self):
path = self.get_open_file_name('yaml')
if not path:
return
self.load_cfg_file(path)
def get_save_file_name(self, extension):
path = filedialog.asksaveasfilename(
initialdir=self.last_dir,
title="Save file",
defaultextension=extension)
if path:
self.last_dir = os.path.dirname(path)
return path
else:
return None
def save_delta_file(self, full=False):
path = self.get_save_file_name(".dlt")
if not path:
return
self.update_config_data_on_page()
new_data = self.cfg_data_obj.generate_binary_array()
self.cfg_data_obj.generate_delta_file_from_bin(path,
self.org_cfg_data_bin,
new_data, full)
def save_to_delta(self):
self.save_delta_file()
def save_full_to_delta(self):
self.save_delta_file(True)
def save_to_bin(self):
path = self.get_save_file_name(".bin")
if not path:
return
self.update_config_data_on_page()
bins = self.cfg_data_obj.save_current_to_bin()
with open(path, 'wb') as fd:
fd.write(bins)
def refresh_config_data_page(self):
self.clear_widgets_inLayout()
self.on_config_page_select_change(None)
def set_config_data_page(self):
page_id_list = []
for idx, page in enumerate(
self.cfg_data_obj._cfg_page['root']['child']):
page_id_list.append(list(page.keys())[0])
page_list = self.cfg_data_obj.get_cfg_list(page_id_list[idx])
self.cfg_page_dict[page_id_list[idx]] = 0
for item in page_list:
str_split = item['path'].split('.')
if str_split[-2] not in CGenYamlCfg.available_fv and \
str_split[-2] not in CGenYamlCfg.missing_fv:
if self.cfg_data_obj.binseg_dict[str_split[-3]] != -1:
self.cfg_page_dict[page_id_list[idx]] += 1
else:
if self.cfg_data_obj.binseg_dict[str_split[-2]] != -1:
self.cfg_page_dict[page_id_list[idx]] += 1
removed_page = 0
for idx, id in enumerate(page_id_list):
if self.cfg_page_dict[id] == 0:
del self.cfg_data_obj._cfg_page['root']['child'][idx-removed_page] # noqa: E501
removed_page += 1
def reload_config_data_from_bin(self, bin_dat):
self.cfg_data_obj.load_default_from_bin(bin_dat)
self.set_config_data_page()
self.left.delete(*self.left.get_children())
self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'],
'')
self.refresh_config_data_page()
def set_config_item_value(self, item, value_str):
itype = item['type'].split(',')[0]
if itype == "Table":
new_value = value_str
elif itype == "EditText":
length = (self.cfg_data_obj.get_cfg_item_length(item) + 7) // 8
new_value = value_str[:length]
if item['value'].startswith("'"):
new_value = "'%s'" % new_value
else:
try:
new_value = self.cfg_data_obj.reformat_value_str(
value_str,
self.cfg_data_obj.get_cfg_item_length(item),
item['value'])
except Exception:
print("WARNING: Failed to format value string '%s' for '%s' !"
% (value_str, item['path']))
new_value = item['value']
if item['value'] != new_value:
if self.debug:
print('Update %s from %s to %s !'
% (item['cname'], item['value'], new_value))
item['value'] = new_value
def get_config_data_item_from_widget(self, widget, label=False):
name = self.get_object_name(widget)
if not name or not len(self.page_list):
return None
if name.startswith('LABEL_'):
if label:
path = name[6:]
else:
return None
else:
path = name
item = self.cfg_data_obj.get_item_by_path(path)
return item
def update_config_data_from_widget(self, widget, args):
item = self.get_config_data_item_from_widget(widget)
if item is None:
return
elif not item:
if isinstance(widget, tkinter.Label):
return
raise Exception('Failed to find "%s" !' %
self.get_object_name(widget))
itype = item['type'].split(',')[0]
if itype == "Combo":
opt_list = self.cfg_data_obj.get_cfg_item_options(item)
tmp_list = [opt[0] for opt in opt_list]
idx = widget.current()
if idx != -1:
self.set_config_item_value(item, tmp_list[idx])
elif itype in ["EditNum", "EditText"]:
self.set_config_item_value(item, widget.get())
elif itype in ["Table"]:
new_value = bytes_to_bracket_str(widget.get())
self.set_config_item_value(item, new_value)
def evaluate_condition(self, item):
try:
result = self.cfg_data_obj.evaluate_condition(item)
except Exception:
print("WARNING: Condition '%s' is invalid for '%s' !"
% (item['condition'], item['path']))
result = 1
return result
def add_config_item(self, item, row):
parent = self.right_grid
name = tkinter.Label(parent, text=item['name'], anchor="w")
parts = item['type'].split(',')
itype = parts[0]
widget = None
if itype == "Combo":
# Build
opt_list = self.cfg_data_obj.get_cfg_item_options(item)
current_value = self.cfg_data_obj.get_cfg_item_value(item, False)
option_list = []
current = None
for idx, option in enumerate(opt_list):
option_str = option[0]
try:
option_value = self.cfg_data_obj.get_value(
option_str,
len(option_str), False)
except Exception:
option_value = 0
print('WARNING: Option "%s" has invalid format for "%s" !'
% (option_str, item['path']))
if option_value == current_value:
current = idx
option_list.append(option[1])
widget = ttk.Combobox(parent, value=option_list, state="readonly")
widget.bind("<<ComboboxSelected>>", self.combo_select_changed)
widget.unbind_class("TCombobox", "<MouseWheel>")
if current is None:
print('WARNING: Value "%s" is an invalid option for "%s" !' %
(current_value, item['path']))
self.invalid_values[item['path']] = current_value
else:
widget.current(current)
elif itype in ["EditNum", "EditText"]:
txt_val = tkinter.StringVar()
widget = tkinter.Entry(parent, textvariable=txt_val)
value = item['value'].strip("'")
if itype in ["EditText"]:
txt_val.trace(
'w',
lambda *args: self.limit_entry_size
(txt_val, (self.cfg_data_obj.get_cfg_item_length(item)
+ 7) // 8))
elif itype in ["EditNum"]:
value = item['value'].strip("{").strip("}").strip()
widget.bind("<FocusOut>", self.edit_num_finished)
txt_val.set(value)
elif itype in ["Table"]:
bins = self.cfg_data_obj.get_cfg_item_value(item, True)
col_hdr = item['option'].split(',')
widget = custom_table(parent, col_hdr, bins)
else:
if itype and itype not in ["Reserved"]:
print("WARNING: Type '%s' is invalid for '%s' !" %
(itype, item['path']))
self.invalid_values[item['path']] = itype
if widget:
create_tool_tip(widget, item['help'])
self.set_object_name(name, 'LABEL_' + item['path'])
self.set_object_name(widget, item['path'])
name.grid(row=row, column=0, padx=10, pady=5, sticky="nsew")
widget.grid(row=row + 1, rowspan=1, column=0,
padx=10, pady=5, sticky="nsew")
def update_config_data_on_page(self):
self.walk_widgets_in_layout(self.right_grid,
self.update_config_data_from_widget)
if __name__ == '__main__':
root = tkinter.Tk()
app = application(master=root)
root.title("Config Editor")
root.mainloop()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/Tools/ConfigEditor/ConfigEditor.py
|
## @file
# Automate the process of building the various reset vector types
#
# Copyright (c) 2014, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import glob
import os
import subprocess
import sys
def RunCommand(commandLine):
#print ' '.join(commandLine)
return subprocess.call(commandLine)
for filename in glob.glob(os.path.join('Bin', '*.raw')):
os.remove(filename)
arch = 'ia32'
debugType = None
output = os.path.join('Bin', 'ResetVec')
output += '.' + arch
if debugType is not None:
output += '.' + debugType
output += '.raw'
commandLine = (
'nasm',
'-D', 'ARCH_%s' % arch.upper(),
'-D', 'DEBUG_%s' % str(debugType).upper(),
'-o', output,
'ResetVectorCode.asm',
)
ret = RunCommand(commandLine)
print '\tASM\t' + output
if ret != 0: sys.exit(ret)
commandLine = (
'python',
'Tools/FixupForRawSection.py',
output,
)
print '\tFIXUP\t' + output
ret = RunCommand(commandLine)
if ret != 0: sys.exit(ret)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/FspSecCore/Vtf0/Build.py
|
## @file
# Apply fixup to VTF binary image for FFS Raw section
#
# Copyright (c) 2014, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import sys
filename = sys.argv[1]
if filename.lower().find('ia32') >= 0:
d = open(sys.argv[1], 'rb').read()
c = ((len(d) + 4 + 7) & ~7) - 4
if c > len(d):
c -= len(d)
f = open(sys.argv[1], 'wb')
f.write('\x90' * c)
f.write(d)
f.close()
else:
from struct import pack
PAGE_PRESENT = 0x01
PAGE_READ_WRITE = 0x02
PAGE_USER_SUPERVISOR = 0x04
PAGE_WRITE_THROUGH = 0x08
PAGE_CACHE_DISABLE = 0x010
PAGE_ACCESSED = 0x020
PAGE_DIRTY = 0x040
PAGE_PAT = 0x080
PAGE_GLOBAL = 0x0100
PAGE_2M_MBO = 0x080
PAGE_2M_PAT = 0x01000
def NopAlign4k(s):
c = ((len(s) + 0xfff) & ~0xfff) - len(s)
return ('\x90' * c) + s
def PageDirectoryEntries4GbOf2MbPages(baseAddress):
s = ''
for i in range(0x800):
i = (
baseAddress + long(i << 21) +
PAGE_2M_MBO +
PAGE_CACHE_DISABLE +
PAGE_ACCESSED +
PAGE_DIRTY +
PAGE_READ_WRITE +
PAGE_PRESENT
)
s += pack('Q', i)
return s
def PageDirectoryPointerTable4GbOf2MbPages(pdeBase):
s = ''
for i in range(0x200):
i = (
pdeBase +
(min(i, 3) << 12) +
PAGE_CACHE_DISABLE +
PAGE_ACCESSED +
PAGE_READ_WRITE +
PAGE_PRESENT
)
s += pack('Q', i)
return s
def PageMapLevel4Table4GbOf2MbPages(pdptBase):
s = ''
for i in range(0x200):
i = (
pdptBase +
(min(i, 0) << 12) +
PAGE_CACHE_DISABLE +
PAGE_ACCESSED +
PAGE_READ_WRITE +
PAGE_PRESENT
)
s += pack('Q', i)
return s
def First4GbPageEntries(topAddress):
PDE = PageDirectoryEntries4GbOf2MbPages(0L)
pml4tBase = topAddress - 0x1000
pdptBase = pml4tBase - 0x1000
pdeBase = pdptBase - len(PDE)
PDPT = PageDirectoryPointerTable4GbOf2MbPages(pdeBase)
PML4T = PageMapLevel4Table4GbOf2MbPages(pdptBase)
return PDE + PDPT + PML4T
def AlignAndAddPageTables():
d = open(sys.argv[1], 'rb').read()
code = NopAlign4k(d)
topAddress = 0x100000000 - len(code)
d = ('\x90' * 4) + First4GbPageEntries(topAddress) + code
f = open(sys.argv[1], 'wb')
f.write(d)
f.close()
AlignAndAddPageTables()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/IntelFsp2Pkg/FspSecCore/Vtf0/Tools/FixupForRawSection.py
|
# -*- coding: utf-8 -*-
# dict_conv.py (Python3 script)
import sys
ENC_UTF16_BE = 1
ENC_UTF16_LE = 2
def add_char(enc, s, c):
if enc == ENC_UTF16_BE:
s += "\\x00"
s += c
if enc == ENC_UTF16_LE:
s += "\\x00"
return s
def conv(enc, s):
n = len(s)
r = ""
i = 0
while i < n:
c = s[i]
if c == '\\':
c = s[i+1]
if c == '\\' or c == '"':
r = add_char(enc, r, "\\" + c)
i += 2
continue
else:
raise("Unknown escape {0}".format(s))
r = add_char(enc, r, c)
i += 1
return r
def main(enc):
print("# This file was generated by dict_conv.py.")
for line in sys.stdin:
s = line.strip()
if s[0] == '#':
print(s)
continue
if s[0] == '"' and s[-1] == '"':
s = conv(enc, s[1:-1])
print("\"{0}\"".format(s))
else:
raise("Invalid format {0}".format(s))
def usage(argv):
raise RuntimeError("Usage: python {0} utf16_be/utf16_le".format(argv[0]))
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
if argc >= 2:
s = argv[1]
if s == 'utf16_be':
enc = ENC_UTF16_BE
elif s == 'utf16_le':
enc = ENC_UTF16_LE
else:
usage(argv)
else:
usage(argv)
main(enc)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/harnesses/dict_conv.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# gperf_fold_key_conv.py
# Copyright (c) 2016-2018 K.Kosako
import sys
import re
REG_LINE_GPERF = re.compile('#line .+gperf"')
REG_HASH_FUNC = re.compile('hash\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+size_t\s+len\s*\)')
REG_STR_AT = re.compile('str\[(\d+)\]')
REG_RETURN_TYPE = re.compile('^const\s+short\s+int\s*\*')
REG_FOLD_KEY = re.compile('unicode_fold(\d)_key\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+size_t\s+len\)')
REG_ENTRY = re.compile('\{".*?",\s*(-?\d+)\s*\}')
REG_IF_LEN = re.compile('\s*if\s*\(\s*len\s*<=\s*MAX_WORD_LENGTH.+')
REG_GET_HASH = re.compile('(?:register\s+)?(?:unsigned\s+)?int\s+key\s*=\s*hash\s*\(str,\s*len\);')
REG_GET_CODE = re.compile('(?:register\s+)?const\s+char\s*\*\s*s\s*=\s*wordlist\[key\]\.name;')
REG_CODE_CHECK = re.compile('if\s*\(\*str\s*==\s*\*s\s*&&\s*!strncmp.+\)')
REG_RETURN_WL = re.compile('return\s+&wordlist\[key\];')
REG_RETURN_0 = re.compile('return 0;')
def parse_line(s, key_len):
s = s.rstrip()
r = re.sub(REG_LINE_GPERF, '', s)
if r != s: return r
r = re.sub(REG_HASH_FUNC, 'hash(OnigCodePoint codes[])', s)
if r != s: return r
r = re.sub(REG_STR_AT, 'onig_codes_byte_at(codes, \\1)', s)
if r != s: return r
r = re.sub(REG_RETURN_TYPE, 'int', s)
if r != s: return r
r = re.sub(REG_FOLD_KEY, 'unicode_fold\\1_key(OnigCodePoint codes[])', s)
if r != s: return r
r = re.sub(REG_ENTRY, '\\1', s)
if r != s: return r
r = re.sub(REG_IF_LEN, '', s)
if r != s: return r
r = re.sub(REG_GET_HASH, 'int key = hash(codes);', s)
if r != s: return r
r = re.sub(REG_GET_CODE, 'int index = wordlist[key];', s)
if r != s: return r
r = re.sub(REG_CODE_CHECK,
'if (index >= 0 && onig_codes_cmp(codes, OnigUnicodeFolds%d + index, %d) == 0)' % (key_len, key_len), s)
if r != s: return r
r = re.sub(REG_RETURN_WL, 'return index;', s)
if r != s: return r
r = re.sub(REG_RETURN_0, 'return -1;', s)
if r != s: return r
return s
def parse_file(f, key_len):
print "/* This file was converted by gperf_fold_key_conv.py\n from gperf output file. */"
while True:
line = f.readline()
if not line:
break
s = parse_line(line, key_len)
print s
# main
argv = sys.argv
argc = len(argv)
key_len = int(argv[1])
parse_file(sys.stdin, key_len)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/src/gperf_fold_key_conv.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# make_unicode_property_data.py
# Copyright (c) 2016-2019 K.Kosako
import sys
import re
POSIX_LIST = [
'NEWLINE', 'Alpha', 'Blank', 'Cntrl', 'Digit', 'Graph', 'Lower',
'Print', 'Punct', 'Space', 'Upper', 'XDigit', 'Word', 'Alnum', 'ASCII'
]
MAX_CODE_POINT = 0x10ffff
GRAPHEME_CLUSTER_BREAK_NAME_PREFIX = 'Grapheme_Cluster_Break_'
UD_FIRST_REG = re.compile("<.+,\s*First>")
UD_LAST_REG = re.compile("<.+,\s*Last>")
PR_TOTAL_REG = re.compile("#\s*Total\s+(?:code\s+points|elements):")
PR_LINE_REG = re.compile("([0-9A-Fa-f]+)(?:..([0-9A-Fa-f]+))?\s*;\s*(\w+)")
PA_LINE_REG = re.compile("(\w+)\s*;\s*(\w+)")
PVA_LINE_REG = re.compile("(sc|gc)\s*;\s*(\w+)\s*;\s*(\w+)(?:\s*;\s*(\w+))?")
BL_LINE_REG = re.compile("([0-9A-Fa-f]+)\.\.([0-9A-Fa-f]+)\s*;\s*(.*)")
UNICODE_VERSION_REG = re.compile("#\s*.*-(\d+)\.(\d+)\.(\d+)\.txt")
EMOJI_VERSION_REG = re.compile("(?i)#\s*Version:\s*(\d+)\.(\d+)")
VERSION_INFO = [-1, -1, -1]
EMOJI_VERSION_INFO = [-1, -1]
DIC = { }
KDIC = { }
PropIndex = { }
PROPERTY_NAME_MAX_LEN = 0
PROPS = None
def normalize_prop_name(name):
name = re.sub(r'[ _]', '', name)
name = name.lower()
return name
def fix_block_name(name):
s = re.sub(r'[- ]+', '_', name)
return 'In_' + s
def print_ranges(ranges):
for (start, end) in ranges:
print "0x%06x, 0x%06x" % (start, end)
print len(ranges)
def print_prop_and_index(prop, i):
print "%-35s %3d" % (prop + ',', i)
PropIndex[prop] = i
PRINT_CACHE = { }
def print_property(prop, data, desc):
print ''
print "/* PROPERTY: '%s': %s */" % (prop, desc)
prev_prop = dic_find_by_value(PRINT_CACHE, data)
if prev_prop is not None:
print "#define CR_%s CR_%s" % (prop, prev_prop)
else:
PRINT_CACHE[prop] = data
print "static const OnigCodePoint"
print "CR_%s[] = { %d," % (prop, len(data))
for (start, end) in data:
print "0x%04x, 0x%04x," % (start, end)
print "}; /* END of CR_%s */" % prop
def dic_find_by_value(dic, v):
for key, val in dic.items():
if val == v:
return key
return None
def make_reverse_dic(dic):
rev = {}
for key, val in dic.items():
d = rev.get(val, None)
if d is None:
rev[val] = [key]
else:
d.append(key)
return rev
def normalize_ranges(in_ranges, sort=False):
if sort:
ranges = sorted(in_ranges)
else:
ranges = in_ranges
r = []
prev = None
for (start, end) in ranges:
if prev >= start - 1:
(pstart, pend) = r.pop()
end = max(pend, end)
start = pstart
r.append((start, end))
prev = end
return r
def inverse_ranges(in_ranges):
r = []
prev = 0x000000
for (start, end) in in_ranges:
if prev < start:
r.append((prev, start - 1))
prev = end + 1
if prev < MAX_CODE_POINT:
r.append((prev, MAX_CODE_POINT))
return r
def add_ranges(r1, r2):
r = r1 + r2
return normalize_ranges(r, True)
def sub_one_range(one_range, rs):
r = []
(s1, e1) = one_range
n = len(rs)
for i in range(0, n):
(s2, e2) = rs[i]
if s2 >= s1 and s2 <= e1:
if s2 > s1:
r.append((s1, s2 - 1))
if e2 >= e1:
return r
s1 = e2 + 1
elif s2 < s1 and e2 >= s1:
if e2 < e1:
s1 = e2 + 1
else:
return r
r.append((s1, e1))
return r
def sub_ranges(r1, r2):
r = []
for one_range in r1:
rs = sub_one_range(one_range, r2)
r.extend(rs)
return r
def add_ranges_in_dic(dic):
r = []
for k, v in dic.items():
r = r + v
return normalize_ranges(r, True)
def normalize_ranges_in_dic(dic, sort=False):
for k, v in dic.items():
r = normalize_ranges(v, sort)
dic[k] = r
def merge_dic(to_dic, from_dic):
to_keys = to_dic.keys()
from_keys = from_dic.keys()
common = list(set(to_keys) & set(from_keys))
if len(common) != 0:
print >> sys.stderr, "merge_dic: collision: %s" % sorted(common)
to_dic.update(from_dic)
def merge_props(to_props, from_props):
common = list(set(to_props) & set(from_props))
if len(common) != 0:
print >> sys.stderr, "merge_props: collision: %s" % sorted(common)
to_props.extend(from_props)
def add_range_into_dic(dic, name, start, end):
d = dic.get(name, None)
if d is None:
d = [(start, end)]
dic[name] = d
else:
d.append((start, end))
def list_sub(a, b):
x = set(a) - set(b)
return list(x)
def parse_unicode_data_file(f):
dic = { }
assigned = []
for line in f:
s = line.strip()
if len(s) == 0:
continue
if s[0] == '#':
continue
a = s.split(';')
code = int(a[0], 16)
desc = a[1]
prop = a[2]
if UD_FIRST_REG.match(desc) is not None:
start = code
end = None
elif UD_LAST_REG.match(desc) is not None:
end = code
else:
start = end = code
if end is not None:
assigned.append((start, end))
add_range_into_dic(dic, prop, start, end)
if len(prop) == 2:
add_range_into_dic(dic, prop[0:1], start, end)
normalize_ranges_in_dic(dic)
return dic, assigned
def parse_properties(path, klass, prop_prefix = None, version_reg = None):
version_match = None
with open(path, 'r') as f:
dic = { }
prop = None
props = []
for line in f:
s = line.strip()
if len(s) == 0:
continue
if s[0] == '#' and version_reg is not None and version_match is None:
version_match = version_reg.match(s)
if version_match is not None:
continue
m = PR_LINE_REG.match(s)
if m:
prop = m.group(3)
if prop_prefix is not None:
prop = prop_prefix + prop
if m.group(2):
start = int(m.group(1), 16)
end = int(m.group(2), 16)
add_range_into_dic(dic, prop, start, end)
else:
start = int(m.group(1), 16)
add_range_into_dic(dic, prop, start, start)
elif PR_TOTAL_REG.match(s) is not None:
KDIC[prop] = klass
props.append(prop)
normalize_ranges_in_dic(dic)
return (dic, props, version_match)
def parse_property_aliases(path):
a = { }
with open(path, 'r') as f:
for line in f:
s = line.strip()
if len(s) == 0:
continue
m = PA_LINE_REG.match(s)
if not(m):
continue
if m.group(1) == m.group(2):
continue
a[m.group(1)] = m.group(2)
return a
def parse_property_value_aliases(path):
a = { }
with open(path, 'r') as f:
for line in f:
s = line.strip()
if len(s) == 0:
continue
m = PVA_LINE_REG.match(s)
if not(m):
continue
cat = m.group(1)
x2 = m.group(2)
x3 = m.group(3)
x4 = m.group(4)
if cat == 'sc':
if x2 != x3:
a[x2] = x3
if x4 and x4 != x3:
a[x4] = x3
else:
if x2 != x3:
a[x3] = x2
if x4 and x4 != x2:
a[x4] = x2
return a
def parse_blocks(path):
dic = { }
blocks = []
with open(path, 'r') as f:
for line in f:
s = line.strip()
if len(s) == 0:
continue
m = BL_LINE_REG.match(s)
if not(m):
continue
start = int(m.group(1), 16)
end = int(m.group(2), 16)
block = fix_block_name(m.group(3))
add_range_into_dic(dic, block, start, end)
blocks.append(block)
noblock = fix_block_name('No_Block')
dic[noblock] = inverse_ranges(add_ranges_in_dic(dic))
blocks.append(noblock)
return dic, blocks
def add_primitive_props(assigned):
DIC['Assigned'] = normalize_ranges(assigned)
DIC['Any'] = [(0x000000, 0x10ffff)]
DIC['ASCII'] = [(0x000000, 0x00007f)]
DIC['NEWLINE'] = [(0x00000a, 0x00000a)]
DIC['Cn'] = inverse_ranges(DIC['Assigned'])
DIC['C'].extend(DIC['Cn'])
DIC['C'] = normalize_ranges(DIC['C'], True)
d = []
d.extend(DIC['Ll'])
d.extend(DIC['Lt'])
d.extend(DIC['Lu'])
DIC['LC'] = normalize_ranges(d, True)
def add_posix_props(dic):
alnum = []
alnum.extend(dic['Alphabetic'])
alnum.extend(dic['Nd']) # Nd == Decimal_Number
alnum = normalize_ranges(alnum, True)
blank = [(0x0009, 0x0009)]
blank.extend(dic['Zs']) # Zs == Space_Separator
blank = normalize_ranges(blank, True)
word = []
word.extend(dic['Alphabetic'])
word.extend(dic['M']) # M == Mark
word.extend(dic['Nd'])
word.extend(dic['Pc']) # Pc == Connector_Punctuation
word = normalize_ranges(word, True)
graph = sub_ranges(dic['Any'], dic['White_Space'])
graph = sub_ranges(graph, dic['Cc'])
graph = sub_ranges(graph, dic['Cs']) # Cs == Surrogate
graph = sub_ranges(graph, dic['Cn']) # Cn == Unassigned
graph = normalize_ranges(graph, True)
p = []
p.extend(graph)
p.extend(dic['Zs'])
p = normalize_ranges(p, True)
dic['Alpha'] = dic['Alphabetic']
dic['Upper'] = dic['Uppercase']
dic['Lower'] = dic['Lowercase']
dic['Punct'] = dic['P'] # P == Punctuation
dic['Digit'] = dic['Nd']
dic['XDigit'] = [(0x0030, 0x0039), (0x0041, 0x0046), (0x0061, 0x0066)]
dic['Alnum'] = alnum
dic['Space'] = dic['White_Space']
dic['Blank'] = blank
dic['Cntrl'] = dic['Cc']
dic['Word'] = word
dic['Graph'] = graph
dic['Print'] = p
def set_max_prop_name(name):
global PROPERTY_NAME_MAX_LEN
n = len(name)
if n > PROPERTY_NAME_MAX_LEN:
PROPERTY_NAME_MAX_LEN = n
def entry_prop_name(name, index):
set_max_prop_name(name)
if OUTPUT_LIST_MODE and index >= len(POSIX_LIST):
print >> UPF, "%3d: %s" % (index, name)
def entry_and_print_prop_and_index(name, index):
entry_prop_name(name, index)
nname = normalize_prop_name(name)
print_prop_and_index(nname, index)
def parse_and_merge_properties(path, klass, prop_prefix = None, version_reg = None):
dic, props, ver_m = parse_properties(path, klass, prop_prefix, version_reg)
merge_dic(DIC, dic)
merge_props(PROPS, props)
return dic, props, ver_m
### main ###
argv = sys.argv
argc = len(argv)
POSIX_ONLY = False
INCLUDE_GRAPHEME_CLUSTER_DATA = False
for i in range(1, argc):
arg = argv[i]
if arg == '-posix':
POSIX_ONLY = True
elif arg == '-gc':
INCLUDE_GRAPHEME_CLUSTER_DATA = True
else:
print >> sys.stderr, "Invalid argument: %s" % arg
OUTPUT_LIST_MODE = not(POSIX_ONLY)
with open('UnicodeData.txt', 'r') as f:
dic, assigned = parse_unicode_data_file(f)
DIC = dic
add_primitive_props(assigned)
PROPS = DIC.keys()
PROPS = list_sub(PROPS, POSIX_LIST)
_, _, ver_m = parse_and_merge_properties('DerivedCoreProperties.txt', 'Derived Property', None, UNICODE_VERSION_REG)
if ver_m is not None:
VERSION_INFO[0] = int(ver_m.group(1))
VERSION_INFO[1] = int(ver_m.group(2))
VERSION_INFO[2] = int(ver_m.group(3))
dic, props, _ = parse_and_merge_properties('Scripts.txt', 'Script')
DIC['Unknown'] = inverse_ranges(add_ranges_in_dic(dic))
parse_and_merge_properties('PropList.txt', 'Binary Property')
_, _, ver_m = parse_and_merge_properties('emoji-data.txt', 'Emoji Property', None, EMOJI_VERSION_REG)
if ver_m is not None:
EMOJI_VERSION_INFO[0] = int(ver_m.group(1))
EMOJI_VERSION_INFO[1] = int(ver_m.group(2))
PROPS.append('Unknown')
KDIC['Unknown'] = 'Script'
ALIASES = parse_property_aliases('PropertyAliases.txt')
a = parse_property_value_aliases('PropertyValueAliases.txt')
merge_dic(ALIASES, a)
dic, BLOCKS = parse_blocks('Blocks.txt')
merge_dic(DIC, dic)
if INCLUDE_GRAPHEME_CLUSTER_DATA:
dic, props, _ = parse_properties('GraphemeBreakProperty.txt',
'GraphemeBreak Property',
GRAPHEME_CLUSTER_BREAK_NAME_PREFIX)
merge_dic(DIC, dic)
merge_props(PROPS, props)
#prop = GRAPHEME_CLUSTER_BREAK_NAME_PREFIX + 'Other'
#DIC[prop] = inverse_ranges(add_ranges_in_dic(dic))
#PROPS.append(prop)
#KDIC[prop] = 'GrapemeBreak Property'
add_posix_props(DIC)
PROPS = sorted(PROPS)
s = '''%{
/* Generated by make_unicode_property_data.py. */
'''
print s
for prop in POSIX_LIST:
print_property(prop, DIC[prop], "POSIX [[:%s:]]" % prop)
print ''
if not(POSIX_ONLY):
for prop in PROPS:
klass = KDIC.get(prop, None)
if klass is None:
n = len(prop)
if n == 1:
klass = 'Major Category'
elif n == 2:
klass = 'General Category'
else:
klass = '-'
print_property(prop, DIC[prop], klass)
for block in BLOCKS:
print_property(block, DIC[block], 'Block')
print ''
print "static const OnigCodePoint*\nconst CodeRanges[] = {"
for prop in POSIX_LIST:
print " CR_%s," % prop
if not(POSIX_ONLY):
for prop in PROPS:
print " CR_%s," % prop
for prop in BLOCKS:
print " CR_%s," % prop
s = '''};
#define pool_offset(s) offsetof(struct unicode_prop_name_pool_t, unicode_prop_name_pool_str##s)
%}
struct PoolPropertyNameCtype {
short int name;
short int ctype;
};
%%
'''
sys.stdout.write(s)
if OUTPUT_LIST_MODE:
UPF = open("UNICODE_PROPERTIES", "w")
if VERSION_INFO[0] < 0:
raise RuntimeError("Unicode Version is not found")
if EMOJI_VERSION_INFO[0] < 0:
raise RuntimeError("Emoji Version is not found")
print >> UPF, "Unicode Properties (Unicode Version: %d.%d.%d, Emoji: %d.%d)" % (VERSION_INFO[0], VERSION_INFO[1], VERSION_INFO[2], EMOJI_VERSION_INFO[0], EMOJI_VERSION_INFO[1])
print >> UPF, ''
index = -1
for prop in POSIX_LIST:
index += 1
entry_and_print_prop_and_index(prop, index)
if not(POSIX_ONLY):
for prop in PROPS:
index += 1
entry_and_print_prop_and_index(prop, index)
NALIASES = map(lambda (k,v):(normalize_prop_name(k), k, v), ALIASES.items())
NALIASES = sorted(NALIASES)
for (nk, k, v) in NALIASES:
nv = normalize_prop_name(v)
if PropIndex.get(nk, None) is not None:
print >> sys.stderr, "ALIASES: already exists: %s => %s" % (k, v)
continue
aindex = PropIndex.get(nv, None)
if aindex is None:
#print >> sys.stderr, "ALIASES: value is not exist: %s => %s" % (k, v)
continue
entry_prop_name(k, aindex)
print_prop_and_index(nk, aindex)
for name in BLOCKS:
index += 1
entry_and_print_prop_and_index(name, index)
print '%%'
print ''
if not(POSIX_ONLY):
if VERSION_INFO[0] < 0:
raise RuntimeError("Unicode Version is not found")
if EMOJI_VERSION_INFO[0] < 0:
raise RuntimeError("Emoji Version is not found")
print "#define UNICODE_PROPERTY_VERSION %02d%02d%02d" % (VERSION_INFO[0], VERSION_INFO[1], VERSION_INFO[2])
print "#define UNICODE_EMOJI_VERSION %02d%02d" % (EMOJI_VERSION_INFO[0], EMOJI_VERSION_INFO[1])
print ''
print "#define PROPERTY_NAME_MAX_SIZE %d" % (PROPERTY_NAME_MAX_LEN + 10)
print "#define CODE_RANGES_NUM %d" % (index + 1)
index_props = make_reverse_dic(PropIndex)
print ''
for i in range(index + 1):
for p in index_props[i]:
print "#define PROP_INDEX_%s %d" % (p.upper(), i)
if OUTPUT_LIST_MODE:
UPF.close()
sys.exit(0)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/src/make_unicode_property_data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# make_unicode_fold_data.py
# Copyright (c) 2016-2020 K.Kosako
import sys
import re
SOURCE_FILE = 'CaseFolding.txt'
GPERF_UNFOLD_KEY_FILE = 'unicode_unfold_key.gperf'
GPERF_FOLD_KEY_FILES = ['unicode_fold1_key.gperf', 'unicode_fold2_key.gperf', 'unicode_fold3_key.gperf']
DataName = 'OnigUnicodeFolds'
ENCODING = 'utf-8'
LINE_REG = re.compile("([0-9A-F]{1,6}); (.); ([0-9A-F]{1,6})(?: ([0-9A-F]{1,6}))?(?: ([0-9A-F]{1,6}))?;(?:\s*#\s*)(.*)")
VERSION_REG = re.compile("#.*-(\d+)\.(\d+)\.(\d+)\.txt")
VERSION_INFO = [-1, -1, -1]
FOLDS = {}
TURKISH_FOLDS = {}
LOCALE_FOLDS = {}
UNFOLDS = {}
TURKISH_UNFOLDS = {}
LOCALE_UNFOLDS = {}
COPYRIGHT = '''
/*-
* Copyright (c) 2017-2020 K.Kosako
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
'''.strip()
class Entry:
def __init__(self, fold):
self.fold = fold
self.unfolds = []
self.fold_len = len(fold)
self.index = -1
self.comment = None
def fold_key(fold):
sfold = map(lambda i: "%06x" % i, fold)
return ':'.join(sfold)
def form16(x, size):
form = "0x%06x" if x > 0xffff else "0x%04x"
s = form % x
rem = size - len(s)
if rem > 0:
s = ' ' * rem + s
return s
def form3bytes(x):
x0 = x & 0xff
x1 = (x>>8) & 0xff
x2 = (x>>16) & 0xff
return "\\x%02x\\x%02x\\x%02x" % (x2, x1, x0)
def enc_len(code, encode):
u = unichr(code)
s = u.encode(encode)
return len(s)
def check_version_info(s):
m = VERSION_REG.match(s)
if m is not None:
VERSION_INFO[0] = int(m.group(1))
VERSION_INFO[1] = int(m.group(2))
VERSION_INFO[2] = int(m.group(3))
def parse_line(s):
if len(s) == 0:
return False
if s[0] == '#':
if VERSION_INFO[0] < 0:
check_version_info(s)
return False
m = LINE_REG.match(s)
if m is None:
print >> sys.stderr, s.encode(ENCODING)
sys.exit(-1)
s_unfold = m.group(1)
s_type = m.group(2)
s_fold = m.group(3)
comment = m.group(6)
if s_type == 'S':
return False;
unfold = int(s_unfold, 16)
f1 = int(s_fold, 16)
fold = [f1]
if m.group(4) is not None:
f2 = int(m.group(4), 16)
fold.append(f2)
if m.group(5) is not None:
f3 = int(m.group(5), 16)
fold.append(f3)
if s_type == 'T':
dic = TURKISH_FOLDS
undic = TURKISH_UNFOLDS
else:
dic = FOLDS
undic = UNFOLDS
key = fold_key(fold)
e = dic.get(key, None)
if e is None:
e = Entry(fold)
e.comment = comment
dic[key] = e
e.unfolds.append(unfold)
if undic.get(unfold, None) is not None:
print >> sys.stderr, ("unfold dup: 0x%04x %s\n" % (unfold, s_type))
undic[unfold] = e
return True
def parse_file(f):
line = f.readline()
while line:
s = line.strip()
parse_line(s)
line = f.readline()
def make_locale():
for unfold, te in TURKISH_UNFOLDS.items():
e = UNFOLDS.get(unfold, None)
if e is None:
continue
fkey = fold_key(e.fold)
if len(e.unfolds) == 1:
del FOLDS[fkey]
else:
e.unfolds.remove(unfold)
e = Entry(e.fold)
e.unfolds.append(unfold)
LOCALE_FOLDS[fkey] = e
LOCALE_UNFOLDS[unfold] = e
del UNFOLDS[unfold]
def output_typedef(f):
s = """\
typedef unsigned long OnigCodePoint;
"""
print >> f, s
def divide_by_fold_len(d):
l = d.items()
l1 = filter(lambda (k,e):e.fold_len == 1, l)
l2 = filter(lambda (k,e):e.fold_len == 2, l)
l3 = filter(lambda (k,e):e.fold_len == 3, l)
sl1 = sorted(l1, key=lambda (k,e):k)
sl2 = sorted(l2, key=lambda (k,e):k)
sl3 = sorted(l3, key=lambda (k,e):k)
return (sl1, sl2, sl3)
def output_comment(f, s):
f.write(" /* %s */" % s)
def output_data_n1(f, n, fn, c, out_comment):
for k, e in fn:
e.index = c
if out_comment and n > 1 and e.comment is not None:
output_comment(f, e.comment)
print >> f, ''
f.write(' ')
f.write("/*%4d*/ " % c)
for i in range(0, n):
s = form16(e.fold[i], 8)
f.write(" %s," % s)
usize = len(e.unfolds)
f.write(" %d," % usize)
for u in e.unfolds:
s = form16(u, 8)
f.write(" %s," % s)
if out_comment and n == 1 and e.comment is not None:
if len(e.comment) < 35:
s = e.comment
else:
s = e.comment[0:33] + '..'
output_comment(f, s)
f.write("\n")
c += n + 1 + usize
return c
def output_data_n(f, name, n, fn, lfn, out_comment):
print >> f, "OnigCodePoint %s%d[] = {" % (name, n)
c = 0
c = output_data_n1(f, n, fn, c, out_comment)
print >> f, "#define FOLDS%d_NORMAL_END_INDEX %d" % (n, c)
print >> f, " /* ----- LOCALE ----- */"
c = output_data_n1(f, n, lfn, c, out_comment)
print >> f, "#define FOLDS%d_END_INDEX %d" % (n, c)
print >> f, "};"
def output_fold_data(f, name, out_comment):
f1, f2, f3 = divide_by_fold_len(FOLDS)
lf1, lf2, lf3 = divide_by_fold_len(LOCALE_FOLDS)
output_data_n(f, name, 1, f1, lf1, out_comment)
print >> f, ''
output_data_n(f, name, 2, f2, lf2, out_comment)
print >> f, ''
output_data_n(f, name, 3, f3, lf3, out_comment)
print >> f, ''
def output_macros(f, name):
print >> f, "#define FOLDS1_FOLD(i) (%s1 + (i))" % name
print >> f, "#define FOLDS2_FOLD(i) (%s2 + (i))" % name
print >> f, "#define FOLDS3_FOLD(i) (%s3 + (i))" % name
print >> f, "#define FOLDS1_UNFOLDS_NUM(i) %s1[(i)+1]" % name
print >> f, "#define FOLDS2_UNFOLDS_NUM(i) %s2[(i)+2]" % name
print >> f, "#define FOLDS3_UNFOLDS_NUM(i) %s3[(i)+3]" % name
print >> f, "#define FOLDS1_UNFOLDS(i) (%s1 + (i) + 2)" % name
print >> f, "#define FOLDS2_UNFOLDS(i) (%s2 + (i) + 3)" % name
print >> f, "#define FOLDS3_UNFOLDS(i) (%s3 + (i) + 4)" % name
print >> f, "#define FOLDS1_NEXT_INDEX(i) ((i) + 2 + %s1[(i)+1])" % name
print >> f, "#define FOLDS2_NEXT_INDEX(i) ((i) + 3 + %s1[(i)+2])" % name
print >> f, "#define FOLDS3_NEXT_INDEX(i) ((i) + 4 + %s1[(i)+3])" % name
def output_fold_source(f, out_comment):
print >> f, "/* This file was generated by make_unicode_fold_data.py. */"
print >> f, COPYRIGHT
print >> f, "\n"
print >> f, '#include "regenc.h"'
print >> f, ''
if VERSION_INFO[0] < 0:
raise RuntimeError("Version is not found")
print "#define UNICODE_CASEFOLD_VERSION %02d%02d%02d" % (VERSION_INFO[0], VERSION_INFO[1], VERSION_INFO[2])
print ''
#output_macros(f, DataName)
print >> f, ''
#output_typedef(f)
output_fold_data(f, DataName, out_comment)
def output_gperf_unfold_key(f):
head = "%{\n/* This gperf source file was generated by make_unicode_fold_data.py */\n\n" + COPYRIGHT + """\
#include "regint.h"
%}
struct ByUnfoldKey {
OnigCodePoint code;
short int index;
short int fold_len;
};
%%
"""
f.write(head)
UNFOLDS.update(LOCALE_UNFOLDS)
l = UNFOLDS.items()
sl = sorted(l, key=lambda (k,e):(e.fold_len, e.index))
for k, e in sl:
f.write('"%s", /*0x%04x*/ %4d, %d\n' %
(form3bytes(k), k, e.index, e.fold_len))
print >> f, '%%'
def output_gperf_fold_key(f, key_len):
head = "%{\n/* This gperf source file was generated by make_unicode_fold_data.py */\n\n" + COPYRIGHT + """\
#include "regint.h"
%}
short int
%%
"""
f.write(head)
l = FOLDS.items()
l = filter(lambda (k,e):e.fold_len == key_len, l)
sl = sorted(l, key=lambda (k,e):e.index)
for k, e in sl:
skey = ''.join(map(lambda i: form3bytes(i), e.fold))
f.write('"%s", %4d\n' % (skey, e.index))
print >> f, '%%'
def output_gperf_source():
with open(GPERF_UNFOLD_KEY_FILE, 'w') as f:
output_gperf_unfold_key(f)
FOLDS.update(LOCALE_FOLDS)
for i in range(1, 4):
with open(GPERF_FOLD_KEY_FILES[i-1], 'w') as f:
output_gperf_fold_key(f, i)
def unfolds_byte_length_check(encode):
l = UNFOLDS.items()
sl = sorted(l, key=lambda (k,e):(e.fold_len, e.index))
for unfold, e in sl:
key_len = enc_len(unfold, encode)
fold_len = sum(map(lambda c: enc_len(c, encode), e.fold))
if key_len > fold_len:
sfolds = ' '.join(map(lambda c: "0x%06x" % c, e.fold))
s = "%s byte length: %d > %d: 0x%06x => %s" % (encode, key_len, fold_len, unfold, sfolds)
print >> sys.stderr, s
def double_fold_check():
l = UNFOLDS.items()
sl = sorted(l, key=lambda (k,e):(e.fold_len, e.index))
for unfold, e in sl:
for f in e.fold:
#print >> sys.stderr, ("check 0x%06x" % f)
e2 = UNFOLDS.get(f)
if e2 is not None:
s = "double folds: 0x%06x => %s, 0x%06x => %s" % (unfold, e.fold, f, e2.fold)
print >> sys.stderr, s
def unfold_is_multi_code_folds_head_check():
l = UNFOLDS.items()
l2 = filter(lambda (k,e):e.fold_len == 2, l)
l3 = filter(lambda (k,e):e.fold_len == 3, l)
sl = sorted(l, key=lambda (k,e):(e.fold_len, e.index))
for unfold, _ in sl:
for k, e in l2:
if e.fold[0] == unfold:
s = "unfold 0x%06x is multi-code fold head in %s" % (unfold, e.fold)
print >> sys.stderr, s
for k, e in l3:
if e.fold[0] == unfold:
s = "unfold 0x%06x is multi-code fold head in %s" % (unfold, e.fold)
print >> sys.stderr, s
def make_one_folds(l):
h = {}
for unfold, e in l:
if e.fold_len != 1:
continue
fold = e.fold[0]
unfolds = h.get(fold)
if unfolds is None:
unfolds = [unfold]
h[fold] = unfolds
else:
unfolds.append(unfold)
return h
def make_foldn_heads(l, fold_len, one_folds):
h = {}
for unfold, e in l:
if e.fold_len != fold_len:
continue
unfolds = one_folds.get(e.fold[0])
h[e.fold[0]] = (e, unfolds)
return h
def fold2_expansion_num(e, one_folds):
n = len(e.unfolds)
n0 = 1
u0 = one_folds.get(e.fold[0])
if u0 is not None:
n0 += len(u0)
n1 = 1
u1 = one_folds.get(e.fold[1])
if u1 is not None:
n1 += len(u1)
n += (n0 * n1)
return n
def fold3_expansion_num(e, one_folds):
n = len(e.unfolds)
n0 = 1
u0 = one_folds.get(e.fold[0])
if u0 is not None:
n0 += len(u0)
n1 = 1
u1 = one_folds.get(e.fold[1])
if u1 is not None:
n1 += len(u1)
n2 = 1
u2 = one_folds.get(e.fold[2])
if u2 is not None:
n2 += len(u2)
n += (n0 * n1 * n2)
return n
def get_all_folds_expansion_num(x, one_folds, fold2_heads, fold3_heads):
e = UNFOLDS[x]
n = 0
if e.fold_len == 1:
n1 = len(e.unfolds) + 1 # +1: fold
fx = e.fold[0]
r = fold2_heads.get(fx)
n2 = n3 = 0
if r is not None:
e2, _ = r
n2 = fold2_expansion_num(e2, one_folds)
r = fold3_heads.get(fx)
if r is not None:
e3, _ = r
n3 = fold3_expansion_num(e3, one_folds)
n = max(n1, n2, n3)
elif e.fold_len == 2:
n = fold2_expansion_num(e, one_folds)
elif e.fold_len == 3:
n = fold3_expansion_num(e, one_folds)
else:
raise RuntimeError("Invalid fold_len %d" % (e.fold_len))
return n
def get_all_folds_expansion_max_num():
l = UNFOLDS.items()
one_folds = make_one_folds(l)
fold2_heads = make_foldn_heads(l, 2, one_folds)
fold3_heads = make_foldn_heads(l, 3, one_folds)
sl = sorted(l, key=lambda (k,e):(e.fold_len, e.index))
nmax = 0
max_unfold = None
for unfold, e in sl:
n = get_all_folds_expansion_num(unfold, one_folds, fold2_heads, fold3_heads)
if nmax < n:
nmax = n
max_unfold = unfold
return (nmax, max_unfold)
## main ##
with open(SOURCE_FILE, 'r') as f:
parse_file(f)
make_locale()
out_comment = True
output_fold_source(sys.stdout, out_comment)
output_gperf_source()
#unfolds_byte_length_check('utf-8')
#unfolds_byte_length_check('utf-16')
double_fold_check()
unfold_is_multi_code_folds_head_check()
#max_num, max_code = get_all_folds_expansion_max_num()
#max_num -= 1 # remove self
#print >> sys.stderr, "max expansion: 0x%06x: %d" % (max_code, max_num)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/src/make_unicode_fold_data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# make_unicode_egcb_data.py
# Copyright (c) 2017-2019 K.Kosako
import sys
import re
MAX_CODE_POINT = 0x10ffff
PR_TOTAL_REG = re.compile("#\s*Total\s+(?:code\s+points|elements):")
PR_LINE_REG = re.compile("([0-9A-Fa-f]+)(?:..([0-9A-Fa-f]+))?\s*;\s*(\w+)")
PA_LINE_REG = re.compile("(\w+)\s*;\s*(\w+)")
PVA_LINE_REG = re.compile("(sc|gc)\s*;\s*(\w+)\s*;\s*(\w+)(?:\s*;\s*(\w+))?")
BL_LINE_REG = re.compile("([0-9A-Fa-f]+)\.\.([0-9A-Fa-f]+)\s*;\s*(.*)")
VERSION_REG = re.compile("#\s*.*-(\d+)\.(\d+)\.(\d+)\.txt")
VERSION_INFO = [-1, -1, -1]
DIC = { }
PROPS = []
PropIndex = { }
def check_version_info(s):
m = VERSION_REG.match(s)
if m is not None:
VERSION_INFO[0] = int(m.group(1))
VERSION_INFO[1] = int(m.group(2))
VERSION_INFO[2] = int(m.group(3))
def print_ranges(ranges):
for (start, end) in ranges:
print "0x%06x, 0x%06x" % (start, end)
def print_prop_and_index(prop, i):
print "%-35s %3d" % (prop + ',', i)
PropIndex[prop] = i
def dic_find_by_value(dic, v):
for key, val in dic.items():
if val == v:
return key
return None
def normalize_ranges(in_ranges, sort=False):
if sort:
ranges = sorted(in_ranges)
else:
ranges = in_ranges
r = []
prev = None
for (start, end) in ranges:
if prev >= start - 1:
(pstart, pend) = r.pop()
end = max(pend, end)
start = pstart
r.append((start, end))
prev = end
return r
def inverse_ranges(in_ranges):
r = []
prev = 0x000000
for (start, end) in in_ranges:
if prev < start:
r.append((prev, start - 1))
prev = end + 1
if prev < MAX_CODE_POINT:
r.append((prev, MAX_CODE_POINT))
return r
def add_ranges(r1, r2):
r = r1 + r2
return normalize_ranges(r, True)
def sub_one_range(one_range, rs):
r = []
(s1, e1) = one_range
n = len(rs)
for i in range(0, n):
(s2, e2) = rs[i]
if s2 >= s1 and s2 <= e1:
if s2 > s1:
r.append((s1, s2 - 1))
if e2 >= e1:
return r
s1 = e2 + 1
elif s2 < s1 and e2 >= s1:
if e2 < e1:
s1 = e2 + 1
else:
return r
r.append((s1, e1))
return r
def sub_ranges(r1, r2):
r = []
for one_range in r1:
rs = sub_one_range(one_range, r2)
r.extend(rs)
return r
def add_ranges_in_dic(dic):
r = []
for k, v in dic.items():
r = r + v
return normalize_ranges(r, True)
def normalize_ranges_in_dic(dic, sort=False):
for k, v in dic.items():
r = normalize_ranges(v, sort)
dic[k] = r
def merge_dic(to_dic, from_dic):
to_keys = to_dic.keys()
from_keys = from_dic.keys()
common = list(set(to_keys) & set(from_keys))
if len(common) != 0:
print >> sys.stderr, "merge_dic: collision: %s" % sorted(common)
to_dic.update(from_dic)
def merge_props(to_props, from_props):
common = list(set(to_props) & set(from_props))
if len(common) != 0:
print >> sys.stderr, "merge_props: collision: %s" % sorted(common)
to_props.extend(from_props)
def add_range_into_dic(dic, name, start, end):
d = dic.get(name, None)
if d is None:
d = [(start, end)]
dic[name] = d
else:
d.append((start, end))
def list_sub(a, b):
x = set(a) - set(b)
return list(x)
def parse_properties(path):
with open(path, 'r') as f:
dic = { }
prop = None
props = []
for line in f:
s = line.strip()
if len(s) == 0:
continue
if s[0] == '#':
if VERSION_INFO[0] < 0:
check_version_info(s)
m = PR_LINE_REG.match(s)
if m:
prop = m.group(3)
if m.group(2):
start = int(m.group(1), 16)
end = int(m.group(2), 16)
add_range_into_dic(dic, prop, start, end)
else:
start = int(m.group(1), 16)
add_range_into_dic(dic, prop, start, start)
elif PR_TOTAL_REG.match(s) is not None:
props.append(prop)
normalize_ranges_in_dic(dic)
return (dic, props)
### main ###
argv = sys.argv
argc = len(argv)
dic, props = parse_properties('GraphemeBreakProperty.txt')
merge_dic(DIC, dic)
merge_props(PROPS, props)
PROPS = sorted(PROPS)
print '/* unicode_egcb_data.c: Generated by make_unicode_egcb_data.py. */'
COPYRIGHT = '''
/*-
* Copyright (c) 2017-2019 K.Kosako
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
'''.strip()
print COPYRIGHT
print ''
if VERSION_INFO[0] < 0:
raise RuntimeError("Version is not found")
print "#define GRAPHEME_BREAK_PROPERTY_VERSION %02d%02d%02d" % (VERSION_INFO[0], VERSION_INFO[1], VERSION_INFO[2])
print ''
ranges = []
for prop in PROPS:
rs = DIC[prop]
for (start, end) in rs:
ranges.append((start, end, prop))
ranges = sorted(ranges, key=lambda x: x[0])
prev = -1
for (start, end, prop) in ranges:
if prev >= start:
raise ValueError("{2}:{0} - {1} range overlap prev value {3}".format(start, end, prop, prev))
print '/*'
for prop in PROPS:
print "%s" % prop
print '*/'
print ''
num_ranges = len(ranges)
print "static int EGCB_RANGE_NUM = %d;" % num_ranges
print 'static EGCB_RANGE_TYPE EGCB_RANGES[] = {'
for i, (start, end, prop) in enumerate(ranges):
if i == num_ranges - 1:
comma = ''
else:
comma = ','
type_name = 'EGCB_' + prop
print " {0x%06x, 0x%06x, %s }%s" % (start, end, type_name, comma)
print '};'
sys.exit(0)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/src/make_unicode_egcb_data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# make_unicode_wb_data.py
# Copyright (c) 2019 K.Kosako
import sys
import re
MAX_CODE_POINT = 0x10ffff
PR_TOTAL_REG = re.compile("#\s*Total\s+(?:code\s+points|elements):")
PR_LINE_REG = re.compile("([0-9A-Fa-f]+)(?:..([0-9A-Fa-f]+))?\s*;\s*(\w+)")
PA_LINE_REG = re.compile("(\w+)\s*;\s*(\w+)")
PVA_LINE_REG = re.compile("(sc|gc)\s*;\s*(\w+)\s*;\s*(\w+)(?:\s*;\s*(\w+))?")
BL_LINE_REG = re.compile("([0-9A-Fa-f]+)\.\.([0-9A-Fa-f]+)\s*;\s*(.*)")
VERSION_REG = re.compile("#\s*.*-(\d+)\.(\d+)\.(\d+)\.txt")
VERSION_INFO = [-1, -1, -1]
DIC = { }
PROPS = []
PropIndex = { }
def check_version_info(s):
m = VERSION_REG.match(s)
if m is not None:
VERSION_INFO[0] = int(m.group(1))
VERSION_INFO[1] = int(m.group(2))
VERSION_INFO[2] = int(m.group(3))
def print_ranges(ranges):
for (start, end) in ranges:
print "0x%06x, 0x%06x" % (start, end)
def print_prop_and_index(prop, i):
print "%-35s %3d" % (prop + ',', i)
PropIndex[prop] = i
def dic_find_by_value(dic, v):
for key, val in dic.items():
if val == v:
return key
return None
def normalize_ranges(in_ranges, sort=False):
if sort:
ranges = sorted(in_ranges)
else:
ranges = in_ranges
r = []
prev = None
for (start, end) in ranges:
if prev >= start - 1:
(pstart, pend) = r.pop()
end = max(pend, end)
start = pstart
r.append((start, end))
prev = end
return r
def inverse_ranges(in_ranges):
r = []
prev = 0x000000
for (start, end) in in_ranges:
if prev < start:
r.append((prev, start - 1))
prev = end + 1
if prev < MAX_CODE_POINT:
r.append((prev, MAX_CODE_POINT))
return r
def add_ranges(r1, r2):
r = r1 + r2
return normalize_ranges(r, True)
def sub_one_range(one_range, rs):
r = []
(s1, e1) = one_range
n = len(rs)
for i in range(0, n):
(s2, e2) = rs[i]
if s2 >= s1 and s2 <= e1:
if s2 > s1:
r.append((s1, s2 - 1))
if e2 >= e1:
return r
s1 = e2 + 1
elif s2 < s1 and e2 >= s1:
if e2 < e1:
s1 = e2 + 1
else:
return r
r.append((s1, e1))
return r
def sub_ranges(r1, r2):
r = []
for one_range in r1:
rs = sub_one_range(one_range, r2)
r.extend(rs)
return r
def add_ranges_in_dic(dic):
r = []
for k, v in dic.items():
r = r + v
return normalize_ranges(r, True)
def normalize_ranges_in_dic(dic, sort=False):
for k, v in dic.items():
r = normalize_ranges(v, sort)
dic[k] = r
def merge_dic(to_dic, from_dic):
to_keys = to_dic.keys()
from_keys = from_dic.keys()
common = list(set(to_keys) & set(from_keys))
if len(common) != 0:
print >> sys.stderr, "merge_dic: collision: %s" % sorted(common)
to_dic.update(from_dic)
def merge_props(to_props, from_props):
common = list(set(to_props) & set(from_props))
if len(common) != 0:
print >> sys.stderr, "merge_props: collision: %s" % sorted(common)
to_props.extend(from_props)
def add_range_into_dic(dic, name, start, end):
d = dic.get(name, None)
if d is None:
d = [(start, end)]
dic[name] = d
else:
d.append((start, end))
def list_sub(a, b):
x = set(a) - set(b)
return list(x)
def parse_properties(path):
with open(path, 'r') as f:
dic = { }
prop = None
props = []
for line in f:
s = line.strip()
if len(s) == 0:
continue
if s[0] == '#':
if VERSION_INFO[0] < 0:
check_version_info(s)
m = PR_LINE_REG.match(s)
if m:
prop = m.group(3)
if m.group(2):
start = int(m.group(1), 16)
end = int(m.group(2), 16)
add_range_into_dic(dic, prop, start, end)
else:
start = int(m.group(1), 16)
add_range_into_dic(dic, prop, start, start)
elif PR_TOTAL_REG.match(s) is not None:
props.append(prop)
normalize_ranges_in_dic(dic)
return (dic, props)
### main ###
argv = sys.argv
argc = len(argv)
dic, props = parse_properties('WordBreakProperty.txt')
merge_dic(DIC, dic)
merge_props(PROPS, props)
PROPS = sorted(PROPS)
print '/* unicode_wb_data.c: Generated by make_unicode_wb_data.py. */'
COPYRIGHT = '''
/*-
* Copyright (c) 2019 K.Kosako
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
'''.strip()
print COPYRIGHT
print ''
if VERSION_INFO[0] < 0:
raise RuntimeError("Version is not found.")
print "#define WORD_BREAK_PROPERTY_VERSION %02d%02d%02d" % (VERSION_INFO[0], VERSION_INFO[1], VERSION_INFO[2])
print ''
ranges = []
for prop in PROPS:
rs = DIC[prop]
for (start, end) in rs:
ranges.append((start, end, prop))
ranges = sorted(ranges, key=lambda x: x[0])
prev = -1
for (start, end, prop) in ranges:
if prev >= start:
raise ValueError("{2}:{0} - {1} range overlap prev value {3}".format(start, end, prop, prev))
print '/*'
for prop in PROPS:
print "%s" % prop
print '*/'
print ''
num_ranges = len(ranges)
print "static int WB_RANGE_NUM = %d;" % num_ranges
print 'static WB_RANGE_TYPE WB_RANGES[] = {'
for i, (start, end, prop) in enumerate(ranges):
if i == num_ranges - 1:
comma = ''
else:
comma = ','
type_name = 'WB_' + prop
print " {0x%06x, 0x%06x, %s }%s" % (start, end, type_name, comma)
print '};'
sys.exit(0)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/src/make_unicode_wb_data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# gperf_unfold_key_conv.py
# Copyright (c) 2016-2018 K.Kosako
import sys
import re
REG_LINE_GPERF = re.compile('#line .+gperf"')
REG_HASH_FUNC = re.compile('hash\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+size_t\s+len\s*\)')
REG_STR_AT = re.compile('str\[(\d+)\]')
REG_UNFOLD_KEY = re.compile('onigenc_unicode_unfold_key\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+size_t\s+len\)')
REG_ENTRY = re.compile('\{".+?",\s*/\*(.+?)\*/\s*(-?\d+),\s*(\d)\}')
REG_EMPTY_ENTRY = re.compile('\{"",\s*(-?\d+),\s*(\d)\}')
REG_IF_LEN = re.compile('\s*if\s*\(\s*len\s*<=\s*MAX_WORD_LENGTH.+')
REG_GET_HASH = re.compile('(?:register\s+)?(?:unsigned\s+)?int\s+key\s*=\s*hash\s*\(str,\s*len\);')
REG_GET_CODE = re.compile('(?:register\s+)?const\s+char\s*\*\s*s\s*=\s*wordlist\[key\]\.name;')
REG_CODE_CHECK = re.compile('if\s*\(\*str\s*==\s*\*s\s*&&\s*!strncmp.+\)')
def parse_line(s):
s = s.rstrip()
r = re.sub(REG_LINE_GPERF, '', s)
if r != s: return r
r = re.sub(REG_HASH_FUNC, 'hash(OnigCodePoint codes[])', s)
if r != s: return r
r = re.sub(REG_STR_AT, 'onig_codes_byte_at(codes, \\1)', s)
if r != s: return r
r = re.sub(REG_UNFOLD_KEY, 'onigenc_unicode_unfold_key(OnigCodePoint code)', s)
if r != s: return r
r = re.sub(REG_ENTRY, '{\\1, \\2, \\3}', s)
if r != s: return r
r = re.sub(REG_EMPTY_ENTRY, '{0xffffffff, \\1, \\2}', s)
if r != s: return r
r = re.sub(REG_IF_LEN, '', s)
if r != s: return r
r = re.sub(REG_GET_HASH, 'int key = hash(&code);', s)
if r != s: return r
r = re.sub(REG_GET_CODE, 'OnigCodePoint gcode = wordlist[key].code;', s)
if r != s: return r
r = re.sub(REG_CODE_CHECK, 'if (code == gcode && wordlist[key].index >= 0)', s)
if r != s: return r
return s
def parse_file(f):
print "/* This file was converted by gperf_unfold_key_conv.py\n from gperf output file. */"
line = f.readline()
while line:
s = parse_line(line)
print s
line = f.readline()
# main
parse_file(sys.stdin)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Universal/RegularExpressionDxe/oniguruma/src/gperf_unfold_key_conv.py
|
# Copyright 2015 The Brotli Authors. All rights reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
import os
import platform
import re
import unittest
try:
from setuptools import Extension
from setuptools import setup
except:
from distutils.core import Extension
from distutils.core import setup
from distutils.command.build_ext import build_ext
from distutils import errors
from distutils import dep_util
from distutils import log
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
def get_version():
""" Return BROTLI_VERSION string as defined in 'common/version.h' file. """
version_file_path = os.path.join(CURR_DIR, 'c', 'common', 'version.h')
version = 0
with open(version_file_path, 'r') as f:
for line in f:
m = re.match(r'#define\sBROTLI_VERSION\s+0x([0-9a-fA-F]+)', line)
if m:
version = int(m.group(1), 16)
if version == 0:
return ''
# Semantic version is calculated as (MAJOR << 24) | (MINOR << 12) | PATCH.
major = version >> 24
minor = (version >> 12) & 0xFFF
patch = version & 0xFFF
return '{0}.{1}.{2}'.format(major, minor, patch)
def get_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('python', pattern='*_test.py')
return test_suite
class BuildExt(build_ext):
def get_source_files(self):
filenames = build_ext.get_source_files(self)
for ext in self.extensions:
filenames.extend(ext.depends)
return filenames
def build_extension(self, ext):
if ext.sources is None or not isinstance(ext.sources, (list, tuple)):
raise errors.DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
ext_path = self.get_ext_fullpath(ext.name)
depends = ext.sources + ext.depends
if not (self.force or dep_util.newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
c_sources = []
cxx_sources = []
for source in ext.sources:
if source.endswith('.c'):
c_sources.append(source)
else:
cxx_sources.append(source)
extra_args = ext.extra_compile_args or []
objects = []
for lang, sources in (('c', c_sources), ('c++', cxx_sources)):
if lang == 'c++':
if self.compiler.compiler_type == 'msvc':
extra_args.append('/EHsc')
macros = ext.define_macros[:]
if platform.system() == 'Darwin':
macros.append(('OS_MACOSX', '1'))
elif self.compiler.compiler_type == 'mingw32':
# On Windows Python 2.7, pyconfig.h defines "hypot" as "_hypot",
# This clashes with GCC's cmath, and causes compilation errors when
# building under MinGW: http://bugs.python.org/issue11566
macros.append(('_hypot', 'hypot'))
for undef in ext.undef_macros:
macros.append((undef,))
objs = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
objects.extend(objs)
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# when using GCC on Windows, we statically link libgcc and libstdc++,
# so that we don't need to package extra DLLs
if self.compiler.compiler_type == 'mingw32':
extra_args.extend(['-static-libgcc', '-static-libstdc++'])
ext_path = self.get_ext_fullpath(ext.name)
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects,
ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
NAME = 'Brotli'
VERSION = get_version()
URL = 'https://github.com/google/brotli'
DESCRIPTION = 'Python bindings for the Brotli compression library'
AUTHOR = 'The Brotli Authors'
LICENSE = 'MIT'
PLATFORMS = ['Posix', 'MacOS X', 'Windows']
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Unix Shell',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving',
'Topic :: System :: Archiving :: Compression',
'Topic :: Text Processing :: Fonts',
'Topic :: Utilities',
]
PACKAGE_DIR = {'': 'python'}
PY_MODULES = ['brotli']
EXT_MODULES = [
Extension(
'_brotli',
sources=[
'python/_brotli.cc',
'c/common/constants.c',
'c/common/context.c',
'c/common/dictionary.c',
'c/common/platform.c',
'c/common/shared_dictionary.c',
'c/common/transform.c',
'c/dec/bit_reader.c',
'c/dec/decode.c',
'c/dec/huffman.c',
'c/dec/state.c',
'c/enc/backward_references.c',
'c/enc/backward_references_hq.c',
'c/enc/bit_cost.c',
'c/enc/block_splitter.c',
'c/enc/brotli_bit_stream.c',
'c/enc/cluster.c',
'c/enc/command.c',
'c/enc/compound_dictionary.c',
'c/enc/compress_fragment.c',
'c/enc/compress_fragment_two_pass.c',
'c/enc/dictionary_hash.c',
'c/enc/encode.c',
'c/enc/encoder_dict.c',
'c/enc/entropy_encode.c',
'c/enc/fast_log.c',
'c/enc/histogram.c',
'c/enc/literal_cost.c',
'c/enc/memory.c',
'c/enc/metablock.c',
'c/enc/static_dict.c',
'c/enc/utf8_util.c',
],
depends=[
'c/common/constants.h',
'c/common/context.h',
'c/common/dictionary.h',
'c/common/platform.h',
'c/common/shared_dictionary_internal.h',
'c/common/transform.h',
'c/common/version.h',
'c/dec/bit_reader.h',
'c/dec/huffman.h',
'c/dec/prefix.h',
'c/dec/state.h',
'c/enc/backward_references.h',
'c/enc/backward_references_hq.h',
'c/enc/backward_references_inc.h',
'c/enc/bit_cost.h',
'c/enc/bit_cost_inc.h',
'c/enc/block_encoder_inc.h',
'c/enc/block_splitter.h',
'c/enc/block_splitter_inc.h',
'c/enc/brotli_bit_stream.h',
'c/enc/cluster.h',
'c/enc/cluster_inc.h',
'c/enc/command.h',
'c/enc/compound_dictionary.h',
'c/enc/compress_fragment.h',
'c/enc/compress_fragment_two_pass.h',
'c/enc/dictionary_hash.h',
'c/enc/encoder_dict.h',
'c/enc/entropy_encode.h',
'c/enc/entropy_encode_static.h',
'c/enc/fast_log.h',
'c/enc/find_match_length.h',
'c/enc/hash.h',
'c/enc/hash_composite_inc.h',
'c/enc/hash_forgetful_chain_inc.h',
'c/enc/hash_longest_match64_inc.h',
'c/enc/hash_longest_match_inc.h',
'c/enc/hash_longest_match_quickly_inc.h',
'c/enc/hash_rolling_inc.h',
'c/enc/hash_to_binary_tree_inc.h',
'c/enc/histogram.h',
'c/enc/histogram_inc.h',
'c/enc/literal_cost.h',
'c/enc/memory.h',
'c/enc/metablock.h',
'c/enc/metablock_inc.h',
'c/enc/params.h',
'c/enc/prefix.h',
'c/enc/quality.h',
'c/enc/ringbuffer.h',
'c/enc/static_dict.h',
'c/enc/static_dict_lut.h',
'c/enc/utf8_util.h',
'c/enc/write_bits.h',
],
include_dirs=[
'c/include',
],
language='c++'),
]
TEST_SUITE = 'setup.get_test_suite'
CMD_CLASS = {
'build_ext': BuildExt,
}
setup(
name=NAME,
description=DESCRIPTION,
version=VERSION,
url=URL,
author=AUTHOR,
license=LICENSE,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
package_dir=PACKAGE_DIR,
py_modules=PY_MODULES,
ext_modules=EXT_MODULES,
test_suite=TEST_SUITE,
cmdclass=CMD_CLASS)
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/setup.py
|
#! /usr/bin/env python
"""Compression/decompression utility using the Brotli algorithm."""
from __future__ import print_function
import argparse
import sys
import os
import platform
import brotli
# default values of encoder parameters
DEFAULT_PARAMS = {
'mode': brotli.MODE_GENERIC,
'quality': 11,
'lgwin': 22,
'lgblock': 0,
}
def get_binary_stdio(stream):
""" Return the specified standard input, output or errors stream as a
'raw' buffer object suitable for reading/writing binary data from/to it.
"""
assert stream in ['stdin', 'stdout', 'stderr'], 'invalid stream name'
stdio = getattr(sys, stream)
if sys.version_info[0] < 3:
if sys.platform == 'win32':
# set I/O stream binary flag on python2.x (Windows)
runtime = platform.python_implementation()
if runtime == 'PyPy':
# the msvcrt trick doesn't work in pypy, so I use fdopen
mode = 'rb' if stream == 'stdin' else 'wb'
stdio = os.fdopen(stdio.fileno(), mode, 0)
else:
# this works with CPython -- untested on other implementations
import msvcrt
msvcrt.setmode(stdio.fileno(), os.O_BINARY)
return stdio
else:
# get 'buffer' attribute to read/write binary data on python3.x
if hasattr(stdio, 'buffer'):
return stdio.buffer
else:
orig_stdio = getattr(sys, '__%s__' % stream)
return orig_stdio.buffer
def main(args=None):
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__), description=__doc__)
parser.add_argument(
'--version', action='version', version=brotli.__version__)
parser.add_argument(
'-i',
'--input',
metavar='FILE',
type=str,
dest='infile',
help='Input file',
default=None)
parser.add_argument(
'-o',
'--output',
metavar='FILE',
type=str,
dest='outfile',
help='Output file',
default=None)
parser.add_argument(
'-f',
'--force',
action='store_true',
help='Overwrite existing output file',
default=False)
parser.add_argument(
'-d',
'--decompress',
action='store_true',
help='Decompress input file',
default=False)
params = parser.add_argument_group('optional encoder parameters')
params.add_argument(
'-m',
'--mode',
metavar='MODE',
type=int,
choices=[0, 1, 2],
help='The compression mode can be 0 for generic input, '
'1 for UTF-8 encoded text, or 2 for WOFF 2.0 font data. '
'Defaults to 0.')
params.add_argument(
'-q',
'--quality',
metavar='QUALITY',
type=int,
choices=list(range(0, 12)),
help='Controls the compression-speed vs compression-density '
'tradeoff. The higher the quality, the slower the '
'compression. Range is 0 to 11. Defaults to 11.')
params.add_argument(
'--lgwin',
metavar='LGWIN',
type=int,
choices=list(range(10, 25)),
help='Base 2 logarithm of the sliding window size. Range is '
'10 to 24. Defaults to 22.')
params.add_argument(
'--lgblock',
metavar='LGBLOCK',
type=int,
choices=[0] + list(range(16, 25)),
help='Base 2 logarithm of the maximum input block size. '
'Range is 16 to 24. If set to 0, the value will be set based '
'on the quality. Defaults to 0.')
# set default values using global DEFAULT_PARAMS dictionary
parser.set_defaults(**DEFAULT_PARAMS)
options = parser.parse_args(args=args)
if options.infile:
if not os.path.isfile(options.infile):
parser.error('file "%s" not found' % options.infile)
with open(options.infile, 'rb') as infile:
data = infile.read()
else:
if sys.stdin.isatty():
# interactive console, just quit
parser.error('no input')
infile = get_binary_stdio('stdin')
data = infile.read()
if options.outfile:
if os.path.isfile(options.outfile) and not options.force:
parser.error('output file exists')
outfile = open(options.outfile, 'wb')
else:
outfile = get_binary_stdio('stdout')
try:
if options.decompress:
data = brotli.decompress(data)
else:
data = brotli.compress(
data,
mode=options.mode,
quality=options.quality,
lgwin=options.lgwin,
lgblock=options.lgblock)
except brotli.error as e:
parser.exit(1,
'bro: error: %s: %s' % (e, options.infile or 'sys.stdin'))
outfile.write(data)
outfile.close()
if __name__ == '__main__':
main()
|
nvtrust-main
|
infrastructure/kvm/ovmf/ovmf_source/MdeModulePkg/Library/BrotliCustomDecompressLib/brotli/python/bro.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.