text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```python
#!/usr/bin/env python3
#
"""
Tests for jobserver.py classes' methods
"""
import functools
import mock
import os
import pytest
import sys
from contextlib import nullcontext
from errno import ENOENT
from selectors import EVENT_READ
# Job server only works on Linux for now.
pytestmark = pytest.mark.skipif(sys.platform != 'linux', reason='JobServer only works on Linux.')
if sys.platform == 'linux':
from twisterlib.jobserver import GNUMakeJobClient, GNUMakeJobServer, JobClient, JobHandle
from fcntl import F_GETFL
def test_jobhandle(capfd):
def f(a, b, c=None, d=None):
print(f'{a}, {b}, {c}, {d}')
def exiter():
with JobHandle(f, 1, 2, c='three', d=4):
return
exiter()
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert '1, 2, three, 4' in out
def test_jobclient_get_job():
jc = JobClient()
job = jc.get_job()
assert isinstance(job, JobHandle)
assert job.release_func is None
def test_jobclient_env():
env = JobClient.env()
assert env == {}
def test_jobclient_pass_fds():
fds = JobClient.pass_fds()
assert fds == []
TESTDATA_1 = [
({}, {'env': {'k': 'v'}, 'pass_fds': []}),
({'env': {}, 'pass_fds': ['fd']}, {'env': {}, 'pass_fds': ['fd']}),
]
@pytest.mark.parametrize(
'kwargs, expected_kwargs',
TESTDATA_1,
ids=['no values', 'preexisting values']
)
def test_jobclient_popen(kwargs, expected_kwargs):
jc = JobClient()
argv = ['cmd', 'and', 'some', 'args']
proc_mock = mock.Mock()
popen_mock = mock.Mock(return_value=proc_mock)
env_mock = {'k': 'v'}
with mock.patch('subprocess.Popen', popen_mock), \
mock.patch('os.environ', env_mock):
proc = jc.popen(argv, **kwargs)
popen_mock.assert_called_once_with(argv, **expected_kwargs)
assert proc == proc_mock
TESTDATA_2 = [
(False, 0),
(True, 0),
(False, 4),
(True, 16),
]
@pytest.mark.parametrize(
'inheritable, internal_jobs',
TESTDATA_2,
ids=['no inheritable, no internal', 'inheritable, no internal',
'no inheritable, internal', 'inheritable, internal']
)
def test_gnumakejobclient_dunders(inheritable, internal_jobs):
inherit_read_fd = mock.Mock()
inherit_write_fd = mock.Mock()
inheritable_pipe = (inherit_read_fd, inherit_write_fd) if inheritable else \
None
internal_read_fd = mock.Mock()
internal_write_fd = mock.Mock()
def mock_pipe():
return (internal_read_fd, internal_write_fd)
close_mock = mock.Mock()
write_mock = mock.Mock()
set_blocking_mock = mock.Mock()
selector_mock = mock.Mock()
def deleter():
jobs = mock.Mock()
makeflags = mock.Mock()
gmjc = GNUMakeJobClient(
inheritable_pipe,
jobs,
internal_jobs=internal_jobs,
makeflags=makeflags
)
assert gmjc.jobs == jobs
if internal_jobs:
write_mock.assert_called_once_with(internal_write_fd,
b'+' * internal_jobs)
set_blocking_mock.assert_any_call(internal_read_fd, False)
selector_mock().register.assert_any_call(internal_read_fd,
EVENT_READ,
internal_write_fd)
if inheritable:
set_blocking_mock.assert_any_call(inherit_read_fd, False)
selector_mock().register.assert_any_call(inherit_read_fd,
EVENT_READ,
inherit_write_fd)
with mock.patch('os.close', close_mock), \
mock.patch('os.write', write_mock), \
mock.patch('os.set_blocking', set_blocking_mock), \
mock.patch('os.pipe', mock_pipe), \
mock.patch('selectors.DefaultSelector', selector_mock):
deleter()
if internal_jobs:
close_mock.assert_any_call(internal_read_fd)
close_mock.assert_any_call(internal_write_fd)
if inheritable:
close_mock.assert_any_call(inherit_read_fd)
close_mock.assert_any_call(inherit_write_fd)
TESTDATA_3 = [
(
{'MAKEFLAGS': '-j1'},
0,
(False, False),
['Running in sequential mode (-j1)'],
None,
[None, 1],
{'internal_jobs': 1, 'makeflags': '-j1'}
),
(
{'MAKEFLAGS': 'n--jobserver-auth=0,1'},
1,
(True, True),
[
'-jN forced on command line; ignoring GNU make jobserver',
'MAKEFLAGS contained dry-run flag'
],
0,
None,
None
),
(
{'MAKEFLAGS': '--jobserver-auth=0,1'},
0,
(True, True),
['using GNU make jobserver'],
None,
[[0, 1], 0],
{'internal_jobs': 1, 'makeflags': '--jobserver-auth=0,1'}
),
(
{'MAKEFLAGS': '--jobserver-auth=123,321'},
0,
(False, False),
['No file descriptors; ignoring GNU make jobserver'],
None,
[None, 0],
{'internal_jobs': 1, 'makeflags': '--jobserver-auth=123,321'}
),
(
{'MAKEFLAGS': '--jobserver-auth=0,1'},
0,
(False, True),
[f'FD 0 is not readable (flags=2); ignoring GNU make jobserver'],
None,
[None, 0],
{'internal_jobs': 1, 'makeflags': '--jobserver-auth=0,1'}
),
(
{'MAKEFLAGS': '--jobserver-auth=0,1'},
0,
(True, False),
[f'FD 1 is not writable (flags=2); ignoring GNU make jobserver'],
None,
[None, 0],
{'internal_jobs': 1, 'makeflags': '--jobserver-auth=0,1'}
),
(None, 0, (False, False), [], None, None, None),
]
@pytest.mark.parametrize(
'env, jobs, fcntl_ok_per_pipe, expected_logs,' \
' exit_code, expected_args, expected_kwargs',
TESTDATA_3,
ids=['env, no jobserver-auth', 'env, jobs, dry run', 'env, no jobs',
'env, no jobs, oserror', 'env, no jobs, wrong read pipe',
'env, no jobs, wrong write pipe', 'environ, no makeflags']
)
def test_gnumakejobclient_from_environ(
caplog,
env,
jobs,
fcntl_ok_per_pipe,
expected_logs,
exit_code,
expected_args,
expected_kwargs
):
def mock_fcntl(fd, flag):
if flag == F_GETFL:
if fd == 0:
if fcntl_ok_per_pipe[0]:
return os.O_RDONLY
else:
return 2
elif fd == 1:
if fcntl_ok_per_pipe[1]:
return os.O_WRONLY
else:
return 2
raise OSError(ENOENT, 'dummy error')
gmjc_init_mock = mock.Mock(return_value=None)
with mock.patch('fcntl.fcntl', mock_fcntl), \
mock.patch('os.close', mock.Mock()), \
mock.patch('twisterlib.jobserver.GNUMakeJobClient.__init__',
gmjc_init_mock), \
pytest.raises(SystemExit) if exit_code is not None else \
nullcontext() as se:
gmjc = GNUMakeJobClient.from_environ(env=env, jobs=jobs)
# As patching __del__ is hard to do, we'll instead
# cover possible exceptions and mock os calls
if gmjc:
gmjc._inheritable_pipe = getattr(gmjc, '_inheritable_pipe', None)
if gmjc:
gmjc._internal_pipe = getattr(gmjc, '_internal_pipe', None)
assert all([log in caplog.text for log in expected_logs])
if se:
assert str(se.value) == str(exit_code)
return
if expected_args is None and expected_kwargs is None:
assert gmjc is None
else:
gmjc_init_mock.assert_called_once_with(*expected_args,
**expected_kwargs)
def test_gnumakejobclient_get_job():
inherit_read_fd = mock.Mock()
inherit_write_fd = mock.Mock()
inheritable_pipe = (inherit_read_fd, inherit_write_fd)
internal_read_fd = mock.Mock()
internal_write_fd = mock.Mock()
def mock_pipe():
return (internal_read_fd, internal_write_fd)
selected = [[mock.Mock(fd=0, data=1)], [mock.Mock(fd=1, data=0)]]
def mock_select():
nonlocal selected
return selected
def mock_read(fd, length):
nonlocal selected
if fd == 0:
selected = selected[1:]
raise BlockingIOError
return b'?' * length
close_mock = mock.Mock()
write_mock = mock.Mock()
set_blocking_mock = mock.Mock()
selector_mock = mock.Mock()
selector_mock().select = mock.Mock(side_effect=mock_select)
def deleter():
jobs = mock.Mock()
gmjc = GNUMakeJobClient(
inheritable_pipe,
jobs
)
with mock.patch('os.read', side_effect=mock_read):
job = gmjc.get_job()
with job:
expected_func = functools.partial(os.write, 0, b'?')
assert job.release_func.func == expected_func.func
assert job.release_func.args == expected_func.args
assert job.release_func.keywords == expected_func.keywords
with mock.patch('os.close', close_mock), \
mock.patch('os.write', write_mock), \
mock.patch('os.set_blocking', set_blocking_mock), \
mock.patch('os.pipe', mock_pipe), \
mock.patch('selectors.DefaultSelector', selector_mock):
deleter()
write_mock.assert_any_call(0, b'?')
TESTDATA_4 = [
('dummy makeflags', mock.ANY, mock.ANY, {'MAKEFLAGS': 'dummy makeflags'}),
(None, 0, False, {'MAKEFLAGS': ''}),
(None, 1, True, {'MAKEFLAGS': ' -j1'}),
(None, 2, True, {'MAKEFLAGS': ' -j2 --jobserver-auth=0,1'}),
(None, 0, True, {'MAKEFLAGS': ' --jobserver-auth=0,1'}),
]
@pytest.mark.parametrize(
'makeflags, jobs, use_inheritable_pipe, expected_makeflags',
TESTDATA_4,
ids=['preexisting makeflags', 'no jobs, no pipe', 'one job',
' multiple jobs', 'no jobs']
)
def test_gnumakejobclient_env(
makeflags,
jobs,
use_inheritable_pipe,
expected_makeflags
):
inheritable_pipe = (0, 1) if use_inheritable_pipe else None
selector_mock = mock.Mock()
env = None
def deleter():
gmjc = GNUMakeJobClient(None, None)
gmjc.jobs = jobs
gmjc._makeflags = makeflags
gmjc._inheritable_pipe = inheritable_pipe
nonlocal env
env = gmjc.env()
with mock.patch.object(GNUMakeJobClient, '__del__', mock.Mock()), \
mock.patch('selectors.DefaultSelector', selector_mock):
deleter()
assert env == expected_makeflags
TESTDATA_5 = [
(2, False, []),
(1, True, []),
(2, True, (0, 1)),
(0, True, (0, 1)),
]
@pytest.mark.parametrize(
'jobs, use_inheritable_pipe, expected_fds',
TESTDATA_5,
ids=['no pipe', 'one job', ' multiple jobs', 'no jobs']
)
def test_gnumakejobclient_pass_fds(jobs, use_inheritable_pipe, expected_fds):
inheritable_pipe = (0, 1) if use_inheritable_pipe else None
selector_mock = mock.Mock()
fds = None
def deleter():
gmjc = GNUMakeJobClient(None, None)
gmjc.jobs = jobs
gmjc._inheritable_pipe = inheritable_pipe
nonlocal fds
fds = gmjc.pass_fds()
with mock.patch('twisterlib.jobserver.GNUMakeJobClient.__del__',
mock.Mock()), \
mock.patch('selectors.DefaultSelector', selector_mock):
deleter()
assert fds == expected_fds
TESTDATA_6 = [
(0, 8),
(32, 16),
(4, 4),
]
@pytest.mark.parametrize(
'jobs, expected_jobs',
TESTDATA_6,
ids=['no jobs', 'too many jobs', 'valid jobs']
)
def test_gnumakejobserver(jobs, expected_jobs):
def mock_init(self, p, j):
self._inheritable_pipe = p
self._internal_pipe = None
self.jobs = j
pipe = (0, 1)
cpu_count = 8
pipe_buf = 16
selector_mock = mock.Mock()
write_mock = mock.Mock()
del_mock = mock.Mock()
def deleter():
GNUMakeJobServer(jobs=jobs)
with mock.patch.object(GNUMakeJobClient, '__del__', del_mock), \
mock.patch.object(GNUMakeJobClient, '__init__', mock_init), \
mock.patch('os.pipe', return_value=pipe), \
mock.patch('os.write', write_mock), \
mock.patch('multiprocessing.cpu_count', return_value=cpu_count), \
mock.patch('select.PIPE_BUF', pipe_buf), \
mock.patch('selectors.DefaultSelector', selector_mock):
deleter()
write_mock.assert_called_once_with(pipe[1], b'+' * expected_jobs)
``` | /content/code_sandbox/scripts/tests/twister/test_jobserver.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,148 |
```python
#!/usr/bin/env python3
#
"""
Tests for testinstance class
"""
import mmap
import mock
import os
import pytest
import sys
from contextlib import nullcontext
ZEPHYR_BASE = os.getenv('ZEPHYR_BASE')
sys.path.insert(0, os.path.join(ZEPHYR_BASE, 'scripts', 'pylib', 'twister'))
from twisterlib.statuses import TwisterStatus
from twisterlib.testsuite import (
_find_src_dir_path,
_get_search_area_boundary,
find_c_files_in,
scan_file,
scan_testsuite_path,
ScanPathResult,
TestCase,
TestSuite
)
from twisterlib.error import TwisterException, TwisterRuntimeError
TESTDATA_1 = [
(
ScanPathResult(
['a', 'b'],
'Found a test that does not start with test_',
False,
False,
True,
['feature_a', 'feature_b']
),
ScanPathResult(
['a', 'b'],
'Found a test that does not start with test_',
False,
False,
True,
['feature_a', 'feature_b']
),
True
),
# (
# ScanPathResult(),
# ScanPathResult(),
# True
# ),
(
ScanPathResult(
['a', 'b'],
'Found a test that does not start with test_',
False,
False,
True,
['feature_a', 'feature_b']
),
'I am not a ScanPathResult.',
False
),
# (
# ScanPathResult(
# ['a', 'b'],
# 'Found a test that does not start with test_',
# False,
# False,
# True,
# ['feature_a', 'feature_b']
# ),
# ScanPathResult(),
# False
# ),
]
@pytest.mark.parametrize(
'original, provided, expected',
TESTDATA_1,
ids=[
'identical',
# 'empties',
'wrong type',
# 'different with empty'
]
)
def test_scanpathresults_dunders(original, provided, expected):
result = original == provided
assert result == expected
TESTDATA_2 = [
(
os.path.join('testsuites', 'tests', 'test_ztest.c'),
ScanPathResult(
warnings=None,
matches=[
'a',
'c',
'unit_a',
'newline',
'test_test_aa',
'user',
'last'
],
has_registered_test_suites=False,
has_run_registered_test_suites=False,
has_test_main=False,
ztest_suite_names = ['test_api']
)
),
(
os.path.join('testsuites', 'tests', 'test_a', 'test_ztest_error.c'),
ScanPathResult(
warnings='Found a test that does not start with test_',
matches=['1a', '1c', '2a', '2b'],
has_registered_test_suites=False,
has_run_registered_test_suites=False,
has_test_main=True,
ztest_suite_names = ['feature1', 'feature2']
)
),
(
os.path.join('testsuites', 'tests', 'test_a', 'test_ztest_error_1.c'),
ScanPathResult(
warnings='found invalid #ifdef, #endif in ztest_test_suite()',
matches=['unit_1a', 'unit_1b', 'Unit_1c'],
has_registered_test_suites=False,
has_run_registered_test_suites=False,
has_test_main=False,
ztest_suite_names = ['feature3']
)
),
(
os.path.join(
'testsuites',
'tests',
'test_d',
'test_ztest_error_register_test_suite.c'
),
ScanPathResult(
warnings=None,
matches=['unit_1a', 'unit_1b'],
has_registered_test_suites=True,
has_run_registered_test_suites=False,
has_test_main=False,
ztest_suite_names = ['feature4']
)
),
(
os.path.join(
'testsuites',
'tests',
'test_e',
'test_ztest_new_suite.c'
),
ScanPathResult(
warnings=None,
matches=['1a', '1b'],
has_registered_test_suites=False,
has_run_registered_test_suites=True,
has_test_main=False,
ztest_suite_names = ['feature5']
)
),
# (
# os.path.join(
# 'testsuites',
# 'tests',
# 'test_e',
# 'test_ztest_no_suite.c'
# ),
# ScanPathResult(
# warnings=None,
# matches=None,
# has_registered_test_suites=False,
# has_run_registered_test_suites=False,
# has_test_main=False,
# ztest_suite_names = []
# )
# ),
]
@pytest.mark.parametrize(
'test_file, expected',
TESTDATA_2,
ids=[
'valid',
'test not starting with test_',
'invalid ifdef with test_main',
'registered testsuite',
'new testsuite with registered run',
# 'empty testsuite'
]
)
def test_scan_file(test_data, test_file, class_env, expected: ScanPathResult):
"""
Testing scan_file method with different
ztest files for warnings and results
"""
result: ScanPathResult = scan_file(os.path.join(test_data, test_file))
assert result == expected
# Generate testcases depending on available mmap attributes
TESTIDS_3 = []
TESTDATA_3 = []
try:
TESTDATA_3.append(
(
'nt',
{'access': mmap.ACCESS_READ}
)
)
TESTIDS_3.append('windows')
except AttributeError:
pass
try:
TESTDATA_3.append(
(
'posix',
{
'flags': mmap.MAP_PRIVATE,
'prot': mmap.PROT_READ,
'offset': 0
}
)
)
TESTIDS_3.append('linux')
except AttributeError:
pass
@pytest.mark.parametrize(
'os_name, expected',
TESTDATA_3,
ids=TESTIDS_3
)
def test_scan_file_mmap(os_name, expected):
class TestException(Exception):
pass
def assert_mmap(*args, **kwargs):
assert expected.items() <= kwargs.items()
# We do this to skip the rest of scan_file
def raise_exception(*args, **kwargs):
raise TestException('')
with mock.patch('mmap.mmap', mock.Mock(side_effect=assert_mmap)), \
mock.patch('builtins.open', mock.mock_open(read_data='dummy data')), \
mock.patch('os.name', os_name), \
mock.patch('contextlib.closing', raise_exception):
try:
scan_file('dummy/path')
except TestException:
assert True
return
assert False
TESTDATA_4 = [
(
ZEPHYR_BASE,
'.',
'test_c',
'Tests should reference the category and subsystem' \
' with a dot as a separator.'
),
(
os.path.join(ZEPHYR_BASE, 'scripts', 'tests'),
'.',
'',
'Tests should reference the category and subsystem' \
' with a dot as a separator.'),
]
@pytest.mark.parametrize(
'testsuite_root, workdir, name, exception',
TESTDATA_4
)
def test_get_unique_exception(testsuite_root, workdir, name, exception):
"""
Test to check if tests reference the category and subsystem
with a dot as a separator
"""
with pytest.raises(TwisterException):
unique = TestSuite(testsuite_root, workdir, name)
assert unique == exception
TEST_DATA_REL_PATH = os.path.join(
'scripts',
'tests',
'twister',
'test_data',
'testsuites'
)
TESTDATA_5 = [
(
os.path.join(ZEPHYR_BASE, TEST_DATA_REL_PATH),
os.path.join(ZEPHYR_BASE, TEST_DATA_REL_PATH, 'tests', 'test_a'),
os.path.join(
os.sep,
TEST_DATA_REL_PATH,
'tests',
'test_a',
'test_a.check_1'
),
os.path.join(
os.sep,
TEST_DATA_REL_PATH,
'tests',
'test_a',
'test_a.check_1'
),
),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1',
'test_a.check_1'
),
(
ZEPHYR_BASE,
os.path.join(
ZEPHYR_BASE,
TEST_DATA_REL_PATH,
'test_b'
),
os.path.join(os.sep, TEST_DATA_REL_PATH, 'test_b', 'test_b.check_1'),
os.path.join(os.sep, TEST_DATA_REL_PATH, 'test_b', 'test_b.check_1')
),
(
os.path.join(ZEPHYR_BASE, 'scripts', 'tests'),
os.path.join(ZEPHYR_BASE, 'scripts', 'tests'),
'test_b.check_1',
os.path.join('scripts', 'tests', 'test_b.check_1')
),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1.check_2',
'test_a.check_1.check_2'
),
]
@pytest.mark.parametrize(
'testsuite_root, suite_path, name, expected',
TESTDATA_5
)
def test_get_unique(testsuite_root, suite_path, name, expected):
"""
Test to check if the unique name is given
for each testsuite root and workdir
"""
suite = TestSuite(testsuite_root, suite_path, name)
assert suite.name == expected
TESTDATA_6 = [
(
b'/* dummy */\r\n ztest_run_test_suite(feature)',
[
mock.Mock(
start=mock.Mock(return_value=0),
end=mock.Mock(return_value=0)
)
],
False,
(0, 13)
),
(
b'ztest_register_test_suite(featureX, NULL, ztest_unit_test(test_a));',
[
mock.Mock(
start=mock.Mock(return_value=0),
end=mock.Mock(return_value=26)
)
],
True,
(26, 67)
),
(
b'dummy text',
[
mock.Mock(
start=mock.Mock(return_value=0),
end=mock.Mock(return_value=0)
)
],
False,
ValueError
)
]
@pytest.mark.parametrize(
'search_area, suite_regex_matches, is_registered_test_suite, expected',
TESTDATA_6,
ids=['run suite', 'registered suite', 'error']
)
def test_get_search_area_boundary(
search_area,
suite_regex_matches,
is_registered_test_suite,
expected
):
with pytest.raises(expected) if \
isinstance(expected, type) and issubclass(expected, Exception) \
else nullcontext() as exception:
result = _get_search_area_boundary(
search_area,
suite_regex_matches,
is_registered_test_suite
)
if exception:
assert str(exception.value) == 'can\'t find ztest_run_test_suite'
return
assert result == expected
TESTDATA_7 = [
(True, [os.path.join('', 'home', 'user', 'dummy_path', 'dummy.c'),
os.path.join('', 'home', 'user', 'dummy_path', 'dummy.cpp')]),
(False, [])
]
@pytest.mark.parametrize(
'isdir, expected',
TESTDATA_7,
ids=['valid', 'not a directory']
)
def test_find_c_files_in(isdir, expected):
old_dir = os.path.join('', 'home', 'user', 'dummy_base_dir')
new_path = os.path.join('', 'home', 'user', 'dummy_path')
cur_dir = old_dir
def mock_chdir(path, *args, **kwargs):
nonlocal cur_dir
cur_dir = path
# We simulate such a structure:
# <new_path>
# dummy.c
# wrong_dummy.h
# dummy_dir
# dummy.cpp
# wrong_dummy.hpp
# <old_dir>
# wrong_dummy.c
new_path_base = ['dummy.c', 'wrong_dummy.h']
new_path_subs = ['dummy.cpp', 'wrong_dummy.hpp']
old_dir_base = ['wrong_dummy.c']
def format_tester(fmt):
formats = [
{'name': 'subdirs', 'fmt': '**/*.'},
{'name': 'base', 'fmt': '*.'}
]
for format in formats:
if fmt.startswith(format['fmt']):
return format['name'], fmt[len(format['fmt']):]
raise ValueError('This test wasn\'t designed for those globs.'
' Please fix the test before PR!')
def mock_glob(fmt, *args, **kwargs):
from_where, extension = format_tester(fmt)
if cur_dir == old_dir:
if from_where == 'subdirs':
return []
elif from_where == 'base':
return list(filter(lambda fn: fn.endswith(extension),
old_dir_base))
else:
return []
if cur_dir == new_path:
if from_where == 'subdirs':
return list(filter(lambda fn: fn.endswith(extension),
new_path_subs))
elif from_where == 'base':
return list(filter(lambda fn: fn.endswith(extension),
new_path_base))
else:
return []
raise ValueError('This test wasn\'t designed for those dirs.'
'Please fix the test before PR!')
with mock.patch('os.path.isdir', return_value=isdir), \
mock.patch('os.getcwd', return_value=cur_dir), \
mock.patch('glob.glob', mock_glob), \
mock.patch('os.chdir', side_effect=mock_chdir) as chdir_mock:
filenames = find_c_files_in(new_path)
assert sorted(filenames) == sorted(expected)
assert chdir_mock.call_args is None or \
chdir_mock.call_args == mock.call(old_dir)
TESTDATA_8 = [
(
os.path.join('dummy', 'path'),
['testsuite_file_1', 'testsuite_file_2'],
['src_dir_file_1', 'src_dir_file_2', 'src_dir_file_3'],
{'src_dir_file_1': 1000, 'src_dir_file_2': 2000, 'src_dir_file_3': 0},
{
'testsuite_file_1': ScanPathResult(
matches = ['test_a', 'b'],
warnings = 'dummy warning',
has_registered_test_suites = True,
has_run_registered_test_suites = True,
has_test_main = True,
ztest_suite_names = ['feature_a']
),
'testsuite_file_2': ValueError,
'src_dir_file_1': ScanPathResult(
matches = ['test_b', 'a'],
warnings = None,
has_registered_test_suites = True,
has_run_registered_test_suites = True,
has_test_main = True,
ztest_suite_names = ['feature_b']
),
'src_dir_file_2': ValueError,
'src_dir_file_3': ValueError,
},
[
'testsuite_file_2: can\'t find: dummy exception',
'testsuite_file_1: dummy warning',
'src_dir_file_2: error parsing source file: dummy exception',
],
None,
(['a', 'b', 'test_a', 'test_b'], ['feature_a', 'feature_b'])
),
(
os.path.join('dummy', 'path'),
[],
['src_dir_file'],
{'src_dir_file': 1000},
{
'src_dir_file': ScanPathResult(
matches = ['test_b', 'a'],
warnings = None,
has_registered_test_suites = True,
has_run_registered_test_suites = False,
has_test_main = True,
ztest_suite_names = ['feature_b']
),
},
[
'Found call to \'ztest_register_test_suite()\'' \
' but no call to \'ztest_run_registered_test_suites()\''
],
TwisterRuntimeError(
'Found call to \'ztest_register_test_suite()\'' \
' but no call to \'ztest_run_registered_test_suites()\''
),
None
),
(
os.path.join('dummy', 'path'),
[],
['src_dir_file'],
{'src_dir_file': 100},
{
'src_dir_file': ScanPathResult(
matches = ['test_b', 'a'],
warnings = 'dummy warning',
has_registered_test_suites = True,
has_run_registered_test_suites = True,
has_test_main = True,
ztest_suite_names = ['feature_b']
),
},
['src_dir_file: dummy warning'],
TwisterRuntimeError('src_dir_file: dummy warning'),
None
),
]
@pytest.mark.parametrize(
'testsuite_path, testsuite_glob, src_dir_glob, sizes, scanpathresults,' \
' expected_logs, expected_exception, expected',
TESTDATA_8,
ids=[
'valid',
'warning in src dir',
'register with run error',
]
)
def test_scan_testsuite_path(
caplog,
testsuite_path,
testsuite_glob,
src_dir_glob,
sizes,
scanpathresults,
expected_logs,
expected_exception,
expected
):
src_dir_path = os.path.join(testsuite_path, 'src')
def mock_fsdp(path, *args, **kwargs):
return src_dir_path
def mock_find(path, *args, **kwargs):
if path == src_dir_path:
return src_dir_glob
elif path == testsuite_path:
return testsuite_glob
else:
return []
def mock_sf(filename, *args, **kwargs):
if isinstance(scanpathresults[filename], type) and \
issubclass(scanpathresults[filename], Exception):
raise scanpathresults[filename]('dummy exception')
return scanpathresults[filename]
def mock_stat(filename, *args, **kwargs):
result = mock.Mock()
type(result).st_size = sizes[filename]
return result
with mock.patch('twisterlib.testsuite._find_src_dir_path', mock_fsdp), \
mock.patch('twisterlib.testsuite.find_c_files_in', mock_find), \
mock.patch('twisterlib.testsuite.scan_file', mock_sf), \
mock.patch('os.stat', mock_stat), \
pytest.raises(type(expected_exception)) if \
expected_exception else nullcontext() as exception:
result = scan_testsuite_path(testsuite_path)
assert all(
[expected_log in " ".join(caplog.text.split()) \
for expected_log in expected_logs]
)
if expected_exception:
assert str(expected_exception) == str(exception.value)
return
assert len(result[0]) == len(expected[0])
assert all(
[expected_subcase in result[0] for expected_subcase in expected[0]]
)
assert len(result[1]) == len(expected[1])
assert all(
[expected_subcase in result[1] for expected_subcase in expected[1]]
)
TESTDATA_9 = [
('dummy/path', 'dummy/path/src', 'dummy/path/src'),
('dummy/path', 'dummy/src', 'dummy/src'),
('dummy/path', 'another/path', '')
]
@pytest.mark.parametrize(
'test_dir_path, isdir_path, expected',
TESTDATA_9,
ids=['src here', 'src in parent', 'no path']
)
def test_find_src_dir_path(test_dir_path, isdir_path, expected):
def mock_isdir(path, *args, **kwargs):
return os.path.normpath(path) == isdir_path
with mock.patch('os.path.isdir', mock_isdir):
result = _find_src_dir_path(test_dir_path)
assert os.path.normpath(result) == expected or result == expected
TEST_DATA_REL_PATH = os.path.join(
'scripts',
'tests',
'twister',
'test_data',
'testsuites'
)
TESTDATA_10 = [
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1',
{
'testcases': ['testcase1', 'testcase2']
},
['subcase1', 'subcase2'],
['testsuite_a', 'testsuite_b'],
[
('test_a.check_1.testcase1', False),
('test_a.check_1.testcase2', False)
],
),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1',
{},
['subcase_repeat', 'subcase_repeat', 'subcase_alone'],
['testsuite_a'],
[
('test_a.check_1.subcase_repeat', False),
('test_a.check_1.subcase_alone', False)
],
),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1',
{},
[],
['testsuite_a', 'testsuite_a'],
[
('test_a.check_1', True)
],
),
]
@pytest.mark.parametrize(
'testsuite_root, suite_path, name, data,' \
' parsed_subcases, suite_names, expected',
TESTDATA_10,
ids=['data', 'subcases', 'empty']
)
def test_testsuite_add_subcases(
testsuite_root,
suite_path,
name,
data,
parsed_subcases,
suite_names,
expected
):
"""
Test to check if the unique name is given
for each testsuite root and workdir
"""
suite = TestSuite(testsuite_root, suite_path, name)
suite.add_subcases(data, parsed_subcases, suite_names)
assert sorted(suite.ztest_suite_names) == sorted(suite_names)
assert len(suite.testcases) == len(expected)
for testcase in suite.testcases:
for expected_value in expected:
if expected_value[0] == testcase.name and \
expected_value[1] == testcase.freeform:
break
else:
assert False
TESTDATA_11 = [
# (
# ZEPHYR_BASE,
# ZEPHYR_BASE,
# 'test_a.check_1',
# {
# 'testcases': ['testcase1', 'testcase2']
# },
# [],
# ),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1',
{
'testcases': ['testcase1', 'testcase2'],
'harness': 'console',
'harness_config': { 'dummy': 'config' }
},
[
('harness', 'console'),
('harness_config', { 'dummy': 'config' })
],
),
# (
# ZEPHYR_BASE,
# ZEPHYR_BASE,
# 'test_a.check_1',
# {
# 'harness': 'console'
# },
# Exception,
# )
]
@pytest.mark.parametrize(
'testsuite_root, suite_path, name, data, expected',
TESTDATA_11,
ids=[
# 'no harness',
'proper harness',
# 'harness error'
]
)
def test_testsuite_load(
testsuite_root,
suite_path,
name,
data,
expected
):
suite = TestSuite(testsuite_root, suite_path, name)
with pytest.raises(expected) if \
isinstance(expected, type) and issubclass(expected, Exception) \
else nullcontext() as exception:
suite.load(data)
if exception:
assert str(exception.value) == 'Harness config error: console harness' \
' defined without a configuration.'
return
for attr_name, value in expected:
assert getattr(suite, attr_name) == value
def test_testcase_dunders():
case_lesser = TestCase(name='A lesser name')
case_greater = TestCase(name='a greater name')
case_greater.status = TwisterStatus.FAIL
assert case_lesser < case_greater
assert str(case_greater) == 'a greater name'
assert repr(case_greater) == f'<TestCase a greater name with {str(TwisterStatus.FAIL)}>'
TESTDATA_11 = [
(
ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites',
ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites/tests/test_a',
'test_a.check_1',
'test_a.check_1'
),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1',
'test_a.check_1'
),
(
ZEPHYR_BASE,
ZEPHYR_BASE + '/scripts/tests/twister/test_data/testsuites/test_b',
'test_b.check_1',
'test_b.check_1'
),
(
os.path.join(ZEPHYR_BASE, 'scripts/tests'),
os.path.join(ZEPHYR_BASE, 'scripts/tests'),
'test_b.check_1',
'test_b.check_1'
),
(
ZEPHYR_BASE,
ZEPHYR_BASE,
'test_a.check_1.check_2',
'test_a.check_1.check_2'
),
]
@pytest.mark.parametrize("testsuite_root, suite_path, name, expected", TESTDATA_11)
def test_get_no_detailed_test_id(testsuite_root, suite_path, name, expected):
'''Test to check if the name without path is given for each testsuite'''
suite = TestSuite(testsuite_root, suite_path, name, detailed_test_id=False)
print(suite.name)
assert suite.name == expected
``` | /content/code_sandbox/scripts/tests/twister/test_testsuite.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,697 |
```python
#!/usr/bin/env python3
#
'''
This test file contains testsuites for testsuite.py module of twister
'''
import sys
import os
import mock
import pytest
from contextlib import nullcontext
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib.statuses import TwisterStatus
from twisterlib.testplan import TestPlan, change_skip_to_error_if_integration
from twisterlib.testinstance import TestInstance
from twisterlib.testsuite import TestSuite
from twisterlib.platform import Platform
from twisterlib.quarantine import Quarantine
from twisterlib.error import TwisterRuntimeError
def test_testplan_add_testsuites_short(class_testplan):
""" Testing add_testcase function of Testsuite class in twister """
# Test 1: Check the list of testsuites after calling add testsuites function is as expected
class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml'
class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
class_testplan.add_testsuites()
tests_rel_dir = 'scripts/tests/twister/test_data/testsuites/tests/'
expected_testsuites = ['test_b.check_1',
'test_b.check_2',
'test_c.check_1',
'test_c.check_2',
'test_a.check_1',
'test_a.check_2',
'test_d.check_1',
'test_e.check_1',
'sample_test.app',
'test_config.main']
testsuite_list = []
for key in sorted(class_testplan.testsuites.keys()):
testsuite_list.append(os.path.basename(os.path.normpath(key)))
assert sorted(testsuite_list) == sorted(expected_testsuites)
# Test 2 : Assert Testcase name is expected & all testsuites values are testcase class objects
suite = class_testplan.testsuites.get(tests_rel_dir + 'test_a/test_a.check_1')
assert suite.name == tests_rel_dir + 'test_a/test_a.check_1'
assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values())
@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")])
def test_add_configurations_short(test_data, class_env, board_root_dir):
""" Testing add_configurations function of TestPlan class in Twister
Test : Asserting on default platforms list
"""
class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
plan = TestPlan(class_env)
plan.parse_configuration(config_file=class_env.test_config)
if board_root_dir == "board_config":
plan.add_configurations()
assert sorted(plan.default_platforms) == sorted(['demo_board_1', 'demo_board_3'])
elif board_root_dir == "board_config_file_not_exist":
plan.add_configurations()
assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
def test_get_all_testsuites_short(class_testplan, all_testsuites_dict):
""" Testing get_all_testsuites function of TestPlan class in Twister """
plan = class_testplan
plan.testsuites = all_testsuites_dict
expected_tests = ['sample_test.app', 'test_a.check_1.1a',
'test_a.check_1.1c',
'test_a.check_1.2a', 'test_a.check_1.2b',
'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a',
'test_a.check_1.unit_1b', 'test_a.check_2.1a',
'test_a.check_2.1c', 'test_a.check_2.2a',
'test_a.check_2.2b', 'test_a.check_2.Unit_1c',
'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b',
'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
'test_c.check_2', 'test_d.check_1.unit_1a',
'test_d.check_1.unit_1b',
'test_e.check_1.1a', 'test_e.check_1.1b',
'test_config.main']
assert sorted(plan.get_all_tests()) == sorted(expected_tests)
def test_get_platforms_short(class_testplan, platforms_list):
""" Testing get_platforms function of TestPlan class in Twister """
plan = class_testplan
plan.platforms = platforms_list
platform = plan.get_platform("demo_board_1")
assert isinstance(platform, Platform)
assert platform.name == "demo_board_1"
TESTDATA_PART1 = [
("toolchain_allow", ['gcc'], None, None, "Not in testsuite toolchain allow list"),
("platform_allow", ['demo_board_1'], None, None, "Not in testsuite platform allow list"),
("toolchain_exclude", ['zephyr'], None, None, "In test case toolchain exclude"),
("platform_exclude", ['demo_board_2'], None, None, "In test case platform exclude"),
("arch_exclude", ['x86'], None, None, "In test case arch exclude"),
("arch_allow", ['arm'], None, None, "Not in test case arch allow list"),
("skip", True, None, None, "Skip filter"),
("tags", set(['sensor', 'bluetooth']), "ignore_tags", ['bluetooth'], "Excluded tags per platform (exclude_tags)"),
("min_flash", "2024", "flash", "1024", "Not enough FLASH"),
("min_ram", "500", "ram", "256", "Not enough RAM"),
("None", "None", "env", ['BSIM_OUT_PATH', 'demo_env'], "Environment (BSIM_OUT_PATH, demo_env) not satisfied"),
("build_on_all", True, None, None, "Platform is excluded on command line."),
("build_on_all", True, "level", "foobar", "Unknown test level 'foobar'"),
(None, None, "supported_toolchains", ['gcc', 'xcc', 'xt-clang'], "Not supported by the toolchain"),
]
@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
TESTDATA_PART1)
def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
""" Testing apply_filters function of TestPlan class in Twister
Part 1: Response of apply_filters function have
appropriate values according to the filters
"""
plan = class_testplan
if tc_attribute is None and plat_attribute is None:
plan.apply_filters()
plan.platforms = platforms_list
plan.platform_names = [p.name for p in platforms_list]
plan.testsuites = all_testsuites_dict
for plat in plan.platforms:
if plat_attribute == "ignore_tags":
plat.ignore_tags = plat_value
if plat_attribute == "flash":
plat.flash = plat_value
if plat_attribute == "ram":
plat.ram = plat_value
if plat_attribute == "env":
plat.env = plat_value
plat.env_satisfied = False
if plat_attribute == "supported_toolchains":
plat.supported_toolchains = plat_value
for _, testcase in plan.testsuites.items():
if tc_attribute == "toolchain_allow":
testcase.toolchain_allow = tc_value
if tc_attribute == "platform_allow":
testcase.platform_allow = tc_value
if tc_attribute == "toolchain_exclude":
testcase.toolchain_exclude = tc_value
if tc_attribute == "platform_exclude":
testcase.platform_exclude = tc_value
if tc_attribute == "arch_exclude":
testcase.arch_exclude = tc_value
if tc_attribute == "arch_allow":
testcase.arch_allow = tc_value
if tc_attribute == "skip":
testcase.skip = tc_value
if tc_attribute == "tags":
testcase.tags = tc_value
if tc_attribute == "min_flash":
testcase.min_flash = tc_value
if tc_attribute == "min_ram":
testcase.min_ram = tc_value
if plat_attribute == "level":
plan.options.level = plat_value
if tc_attribute == "build_on_all":
for _, testcase in plan.testsuites.items():
testcase.build_on_all = tc_value
plan.apply_filters(exclude_platform=['demo_board_1'])
elif plat_attribute == "supported_toolchains":
plan.apply_filters(force_toolchain=False,
exclude_platform=['demo_board_1'],
platform=['demo_board_2'])
elif tc_attribute is None and plat_attribute is None:
plan.apply_filters()
else:
plan.apply_filters(exclude_platform=['demo_board_1'],
platform=['demo_board_2'])
filtered_instances = list(filter(lambda item: item.status == TwisterStatus.FILTER, plan.instances.values()))
for d in filtered_instances:
assert d.reason == expected_discards
TESTDATA_PART2 = [
("runnable", "True", "Not runnable on device"),
("exclude_tag", ['test_a'], "Command line testsuite exclude filter"),
("run_individual_tests", ['scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'], "TestSuite name filter"),
("arch", ['arm_test'], "Command line testsuite arch filter"),
("tag", ['test_d'], "Command line testsuite tag filter")
]
@pytest.mark.parametrize("extra_filter, extra_filter_value, expected_discards", TESTDATA_PART2)
def test_apply_filters_part2(class_testplan, all_testsuites_dict,
platforms_list, extra_filter, extra_filter_value, expected_discards):
""" Testing apply_filters function of TestPlan class in Twister
Part 2 : Response of apply_filters function (discard dictionary) have
appropriate values according to the filters
"""
class_testplan.platforms = platforms_list
class_testplan.platform_names = [p.name for p in platforms_list]
class_testplan.testsuites = all_testsuites_dict
kwargs = {
extra_filter : extra_filter_value,
"exclude_platform" : [
'demo_board_1'
],
"platform" : [
'demo_board_2'
]
}
class_testplan.apply_filters(**kwargs)
filtered_instances = list(filter(lambda item: item.status == TwisterStatus.FILTER, class_testplan.instances.values()))
for d in filtered_instances:
assert d.reason == expected_discards
TESTDATA_PART3 = [
(20, 20, -1, 0),
(-2, -1, 10, 20),
(0, 0, 0, 0)
]
@pytest.mark.parametrize("tc_min_flash, plat_flash, tc_min_ram, plat_ram",
TESTDATA_PART3)
def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list,
tc_min_flash, plat_flash, tc_min_ram, plat_ram):
""" Testing apply_filters function of TestPlan class in Twister
Part 3 : Testing edge cases for ram and flash values of platforms & testsuites
"""
class_testplan.platforms = platforms_list
class_testplan.platform_names = [p.name for p in platforms_list]
class_testplan.testsuites = all_testsuites_dict
for plat in class_testplan.platforms:
plat.flash = plat_flash
plat.ram = plat_ram
for _, testcase in class_testplan.testsuites.items():
testcase.min_ram = tc_min_ram
testcase.min_flash = tc_min_flash
class_testplan.apply_filters(exclude_platform=['demo_board_1'],
platform=['demo_board_2'])
filtered_instances = list(filter(lambda item: item.status == TwisterStatus.FILTER, class_testplan.instances.values()))
assert not filtered_instances
def test_add_instances_short(tmp_path, class_env, all_testsuites_dict, platforms_list):
""" Testing add_instances() function of TestPlan class in Twister
Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name)
Test 2: Values of 'instances' dictionary in Testsuite class are an
instance of 'TestInstance' class
Test 3: Values of 'instances' dictionary have expected values.
"""
class_env.outdir = tmp_path
plan = TestPlan(class_env)
plan.platforms = platforms_list
platform = plan.get_platform("demo_board_2")
instance_list = []
for _, testcase in all_testsuites_dict.items():
instance = TestInstance(testcase, platform, class_env.outdir)
instance_list.append(instance)
plan.add_instances(instance_list)
assert list(plan.instances.keys()) == \
[platform.name + '/' + s for s in list(all_testsuites_dict.keys())]
assert all(isinstance(n, TestInstance) for n in list(plan.instances.values()))
assert list(plan.instances.values()) == instance_list
QUARANTINE_BASIC = {
'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3'
}
QUARANTINE_WITH_REGEXP = {
'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'a2 and c2 on x86',
'demo_board_1/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
'demo_board_2/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'a2 and c2 on x86'
}
QUARANTINE_PLATFORM = {
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_1' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_2' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_1' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_e/test_e.check_1' : 'all on board_3',
'demo_board_3/scripts/tests/twister/test_data/testsuites/tests/test_config/test_config.main' : 'all on board_3'
}
QUARANTINE_MULTIFILES = {
**QUARANTINE_BASIC,
**QUARANTINE_WITH_REGEXP
}
@pytest.mark.parametrize(
("quarantine_files, quarantine_verify, expected_val"),
[
(['basic.yaml'], False, QUARANTINE_BASIC),
(['with_regexp.yaml'], False, QUARANTINE_WITH_REGEXP),
(['with_regexp.yaml'], True, QUARANTINE_WITH_REGEXP),
(['platform.yaml'], False, QUARANTINE_PLATFORM),
(['basic.yaml', 'with_regexp.yaml'], False, QUARANTINE_MULTIFILES),
(['empty.yaml'], False, {})
],
ids=[
'basic',
'with_regexp',
'quarantine_verify',
'platform',
'multifiles',
'empty'
])
def test_quarantine_short(class_testplan, platforms_list, test_data,
quarantine_files, quarantine_verify, expected_val):
""" Testing quarantine feature in Twister
"""
class_testplan.options.all = True
class_testplan.platforms = platforms_list
class_testplan.platform_names = [p.name for p in platforms_list]
class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
class_testplan.add_testsuites()
quarantine_list = [
os.path.join(test_data, 'quarantines', quarantine_file) for quarantine_file in quarantine_files
]
class_testplan.quarantine = Quarantine(quarantine_list)
class_testplan.options.quarantine_verify = quarantine_verify
class_testplan.apply_filters()
for testname, instance in class_testplan.instances.items():
if quarantine_verify:
if testname in expected_val:
assert instance.status == TwisterStatus.NONE
else:
assert instance.status == TwisterStatus.FILTER
assert instance.reason == "Not under quarantine"
else:
if testname in expected_val:
assert instance.status == TwisterStatus.FILTER
assert instance.reason == "Quarantine: " + expected_val[testname]
else:
assert instance.status == TwisterStatus.NONE
TESTDATA_PART4 = [
(os.path.join('test_d', 'test_d.check_1'), ['dummy'],
None, 'Snippet not supported'),
(os.path.join('test_c', 'test_c.check_1'), ['cdc-acm-console'],
0, None),
(os.path.join('test_d', 'test_d.check_1'), ['dummy', 'cdc-acm-console'],
2, 'Snippet not supported'),
]
@pytest.mark.parametrize(
'testpath, required_snippets, expected_filtered_len, expected_filtered_reason',
TESTDATA_PART4,
ids=['app', 'global', 'multiple']
)
def test_required_snippets_short(
class_testplan,
all_testsuites_dict,
platforms_list,
testpath,
required_snippets,
expected_filtered_len,
expected_filtered_reason
):
""" Testing required_snippets function of TestPlan class in Twister """
plan = class_testplan
testpath = os.path.join('scripts', 'tests', 'twister', 'test_data',
'testsuites', 'tests', testpath)
testsuite = class_testplan.testsuites.get(testpath)
plan.platforms = platforms_list
plan.platform_names = [p.name for p in platforms_list]
plan.testsuites = {testpath: testsuite}
print(plan.testsuites)
for _, testcase in plan.testsuites.items():
testcase.exclude_platform = []
testcase.required_snippets = required_snippets
testcase.build_on_all = True
plan.apply_filters()
filtered_instances = list(
filter(lambda item: item.status == TwisterStatus.FILTER, plan.instances.values())
)
if expected_filtered_len is not None:
assert len(filtered_instances) == expected_filtered_len
if expected_filtered_reason is not None:
for d in filtered_instances:
assert d.reason == expected_filtered_reason
def test_testplan_get_level():
testplan = TestPlan(env=mock.Mock())
lvl1 = mock.Mock()
lvl1.name = 'a lvl'
lvl2 = mock.Mock()
lvl2.name = 'a lvl'
lvl3 = mock.Mock()
lvl3.name = 'other lvl'
testplan.levels.append(lvl1)
testplan.levels.append(lvl2)
testplan.levels.append(lvl3)
name = 'a lvl'
res = testplan.get_level(name)
assert res == lvl1
res = testplan.get_level(name)
assert res == lvl1
lvl_missed = mock.Mock()
lvl_missed.name = 'missed lvl'
res = testplan.get_level('missed_lvl')
assert res is None
testplan.levels.remove(lvl1)
testplan.levels.remove(lvl2)
res = testplan.get_level(name)
assert res is None
TESTDATA_1 = [
('', {}),
(
"""\
levels:
- name: lvl1
adds:
- sc1
- sc2
inherits: []
- name: lvl2
adds:
- sc1-1
- sc1-2
inherits: [lvl1]
""",
{
'lvl1': ['sc1', 'sc2'],
'lvl2': ['sc1-1', 'sc1-2', 'sc1', 'sc2']
}
),
]
@pytest.mark.parametrize(
'config_yaml, expected_scenarios',
TESTDATA_1,
ids=['no config', 'valid config']
)
def test_testplan_parse_configuration(tmp_path, config_yaml, expected_scenarios):
testplan = TestPlan(env=mock.Mock())
testplan.scenarios = ['sc1', 'sc1-1', 'sc1-2', 'sc2']
tmp_config_file = tmp_path / 'config_file.yaml'
if config_yaml:
tmp_config_file.write_text(config_yaml)
with pytest.raises(TwisterRuntimeError) if not config_yaml else nullcontext():
testplan.parse_configuration(tmp_config_file)
if not testplan.levels:
assert expected_scenarios == {}
for level in testplan.levels:
assert sorted(level.scenarios) == sorted(expected_scenarios[level.name])
TESTDATA_2 = [
([], [], False),
(['ts1.tc3'], [], True),
(['ts2.tc2'], ['- ts2'], False),
]
@pytest.mark.parametrize(
'sub_tests, expected_outs, expect_error',
TESTDATA_2,
ids=['no subtests', 'subtests not found', 'valid subtests']
)
def test_testplan_find_subtests(
capfd,
sub_tests,
expected_outs,
expect_error
):
testplan = TestPlan(env=mock.Mock())
testplan.options = mock.Mock(sub_test=sub_tests)
testplan.run_individual_testsuite = []
testplan.testsuites = {
'ts1': mock.Mock(
testcases=[
mock.Mock(),
mock.Mock(),
]
),
'ts2': mock.Mock(
testcases=[
mock.Mock(),
mock.Mock(),
mock.Mock(),
]
)
}
testplan.testsuites['ts1'].name = 'ts1'
testplan.testsuites['ts1'].testcases[0].name = 'ts1.tc1'
testplan.testsuites['ts1'].testcases[1].name = 'ts1.tc2'
testplan.testsuites['ts2'].name = 'ts2'
testplan.testsuites['ts2'].testcases[0].name = 'ts2.tc1'
testplan.testsuites['ts2'].testcases[1].name = 'ts2.tc2'
testplan.testsuites['ts2'].testcases[2].name = 'ts2.tc3'
with pytest.raises(TwisterRuntimeError) if expect_error else nullcontext():
testplan.find_subtests()
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stdout.write(err)
assert all([printout in out for printout in expected_outs])
TESTDATA_3 = [
(0, 0, [], False, [], TwisterRuntimeError, []),
(1, 1, [], False, [], TwisterRuntimeError, []),
(1, 0, [], True, [], TwisterRuntimeError, ['No quarantine list given to be verified']),
# (1, 0, ['qfile.yaml'], False, ['# empty'], None, ['Quarantine file qfile.yaml is empty']),
(1, 0, ['qfile.yaml'], False, ['- platforms:\n - demo_board_3\n comment: "board_3"'], None, []),
]
@pytest.mark.parametrize(
'added_testsuite_count, load_errors, ql, qv, ql_data, exception, expected_logs',
TESTDATA_3,
ids=['no tests', 'load errors', 'quarantine verify without quarantine list',
# 'empty quarantine file',
'valid quarantine file']
)
def test_testplan_discover(
tmp_path,
caplog,
added_testsuite_count,
load_errors,
ql,
qv,
ql_data,
exception,
expected_logs
):
for qf, data in zip(ql, ql_data):
tmp_qf = tmp_path / qf
tmp_qf.write_text(data)
testplan = TestPlan(env=mock.Mock())
testplan.options = mock.Mock(
test='ts1',
quarantine_list=[tmp_path / qf for qf in ql],
quarantine_verify=qv,
)
testplan.testsuites = {
'ts1': mock.Mock(id=1),
'ts2': mock.Mock(id=2),
}
testplan.run_individual_testsuite = 'ts0'
testplan.load_errors = load_errors
testplan.add_testsuites = mock.Mock(return_value=added_testsuite_count)
testplan.find_subtests = mock.Mock()
testplan.report_duplicates = mock.Mock()
testplan.parse_configuration = mock.Mock()
testplan.add_configurations = mock.Mock()
with pytest.raises(exception) if exception else nullcontext():
testplan.discover()
testplan.add_testsuites.assert_called_once_with(testsuite_filter='ts1')
assert all([log in caplog.text for log in expected_logs])
TESTDATA_4 = [
(None, None, None, None, '00',
TwisterRuntimeError, [], []),
(None, True, None, None, '6/4',
TwisterRuntimeError, set(['t-p3', 't-p4', 't-p1', 't-p2']), []),
(None, None, 'load_tests.json', None, '0/4',
TwisterRuntimeError, set(['lt-p1', 'lt-p3', 'lt-p4', 'lt-p2']), []),
('suffix', None, None, True, '2/4',
None, set(['ts-p4', 'ts-p2', 'ts-p3']), [2, 4]),
]
@pytest.mark.parametrize(
'report_suffix, only_failed, load_tests, test_only, subset,' \
' exception, expected_selected_platforms, expected_generate_subset_args',
TESTDATA_4,
ids=['apply_filters only', 'only failed', 'load tests', 'test only']
)
def test_testplan_load(
tmp_path,
report_suffix,
only_failed,
load_tests,
test_only,
subset,
exception,
expected_selected_platforms,
expected_generate_subset_args
):
twister_json = """\
{
"testsuites": [
{
"name": "ts1",
"platform": "t-p1",
"testcases": []
},
{
"name": "ts1",
"platform": "t-p2",
"testcases": []
},
{
"name": "ts2",
"platform": "t-p3",
"testcases": []
},
{
"name": "ts2",
"platform": "t-p4",
"testcases": []
}
]
}
"""
twister_file = tmp_path / 'twister.json'
twister_file.write_text(twister_json)
twister_suffix_json = """\
{
"testsuites": [
{
"name": "ts1",
"platform": "ts-p1",
"testcases": []
},
{
"name": "ts1",
"platform": "ts-p2",
"testcases": []
},
{
"name": "ts2",
"platform": "ts-p3",
"testcases": []
},
{
"name": "ts2",
"platform": "ts-p4",
"testcases": []
}
]
}
"""
twister_suffix_file = tmp_path / 'twister_suffix.json'
twister_suffix_file.write_text(twister_suffix_json)
load_tests_json = """\
{
"testsuites": [
{
"name": "ts1",
"platform": "lt-p1",
"testcases": []
},
{
"name": "ts1",
"platform": "lt-p2",
"testcases": []
},
{
"name": "ts2",
"platform": "lt-p3",
\"testcases": []
},
{
"name": "ts2",
"platform": "lt-p4",
"testcases": []
}
]
}
"""
load_tests_file = tmp_path / 'load_tests.json'
load_tests_file.write_text(load_tests_json)
testplan = TestPlan(env=mock.Mock(outdir=tmp_path))
testplan.testsuites = {
'ts1': mock.Mock(testcases=[], extra_configs=[]),
'ts2': mock.Mock(testcases=[], extra_configs=[]),
}
testplan.testsuites['ts1'].name = 'ts1'
testplan.testsuites['ts2'].name = 'ts2'
testplan.options = mock.Mock(
report_summary=None,
outdir=tmp_path,
report_suffix=report_suffix,
only_failed=only_failed,
load_tests=tmp_path / load_tests if load_tests else None,
test_only=test_only,
exclude_platform=['t-p0', 't-p1',
'ts-p0', 'ts-p1',
'lt-p0', 'lt-p1'],
platform=['t-p1', 't-p2', 't-p3', 't-p4',
'ts-p1', 'ts-p2', 'ts-p3', 'ts-p4',
'lt-p1', 'lt-p2', 'lt-p3', 'lt-p4'],
subset=subset
)
testplan.platforms=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()]
testplan.platforms[0].name = 't-p1'
testplan.platforms[1].name = 't-p2'
testplan.platforms[2].name = 't-p3'
testplan.platforms[3].name = 't-p4'
testplan.platforms[4].name = 'ts-p1'
testplan.platforms[5].name = 'ts-p2'
testplan.platforms[6].name = 'ts-p3'
testplan.platforms[7].name = 'ts-p4'
testplan.platforms[8].name = 'lt-p1'
testplan.platforms[9].name = 'lt-p2'
testplan.platforms[10].name = 'lt-p3'
testplan.platforms[11].name = 'lt-p4'
testplan.platforms[0].normalized_name = 't-p1'
testplan.platforms[1].normalized_name = 't-p2'
testplan.platforms[2].normalized_name = 't-p3'
testplan.platforms[3].normalized_name = 't-p4'
testplan.platforms[4].normalized_name = 'ts-p1'
testplan.platforms[5].normalized_name = 'ts-p2'
testplan.platforms[6].normalized_name = 'ts-p3'
testplan.platforms[7].normalized_name = 'ts-p4'
testplan.platforms[8].normalized_name = 'lt-p1'
testplan.platforms[9].normalized_name = 'lt-p2'
testplan.platforms[10].normalized_name = 'lt-p3'
testplan.platforms[11].normalized_name = 'lt-p4'
testplan.generate_subset = mock.Mock()
testplan.apply_filters = mock.Mock()
with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \
pytest.raises(exception) if exception else nullcontext():
testplan.load()
assert testplan.selected_platforms == expected_selected_platforms
if expected_generate_subset_args:
testplan.generate_subset.assert_called_once_with(*expected_generate_subset_args)
else:
testplan.generate_subset.assert_not_called()
TESTDATA_5 = [
(False, False, None, 1, 2,
['plat1/testA', 'plat1/testB', 'plat1/testC',
'plat3/testA', 'plat3/testB', 'plat3/testC']),
(False, False, None, 1, 5,
['plat1/testA',
'plat3/testA', 'plat3/testB', 'plat3/testC']),
(False, False, None, 2, 2,
['plat2/testA', 'plat2/testB']),
(True, False, None, 1, 2,
['plat1/testA', 'plat2/testA', 'plat1/testB',
'plat3/testA', 'plat3/testB', 'plat3/testC']),
(True, False, None, 2, 2,
['plat2/testB', 'plat1/testC']),
(True, True, 123, 1, 2,
['plat2/testA', 'plat2/testB', 'plat1/testC',
'plat3/testB', 'plat3/testA', 'plat3/testC']),
(True, True, 123, 2, 2,
['plat1/testB', 'plat1/testA']),
]
@pytest.mark.parametrize(
'device_testing, shuffle, seed, subset, sets, expected_subset',
TESTDATA_5,
ids=['subset 1', 'subset 1 out of 5', 'subset 2',
'device testing, subset 1', 'device testing, subset 2',
'device testing, shuffle with seed, subset 1',
'device testing, shuffle with seed, subset 2']
)
def test_testplan_generate_subset(
device_testing,
shuffle,
seed,
subset,
sets,
expected_subset
):
testplan = TestPlan(env=mock.Mock())
testplan.options = mock.Mock(
device_testing=device_testing,
shuffle_tests=shuffle,
shuffle_tests_seed=seed
)
testplan.instances = {
'plat1/testA': mock.Mock(status=TwisterStatus.NONE),
'plat1/testB': mock.Mock(status=TwisterStatus.NONE),
'plat1/testC': mock.Mock(status=TwisterStatus.NONE),
'plat2/testA': mock.Mock(status=TwisterStatus.NONE),
'plat2/testB': mock.Mock(status=TwisterStatus.NONE),
'plat3/testA': mock.Mock(status=TwisterStatus.SKIP),
'plat3/testB': mock.Mock(status=TwisterStatus.SKIP),
'plat3/testC': mock.Mock(status=TwisterStatus.ERROR),
}
testplan.generate_subset(subset, sets)
assert [instance for instance in testplan.instances.keys()] == \
expected_subset
def test_testplan_handle_modules():
testplan = TestPlan(env=mock.Mock())
modules = [mock.Mock(meta={'name': 'name1'}),
mock.Mock(meta={'name': 'name2'})]
with mock.patch('twisterlib.testplan.parse_modules', return_value=modules):
testplan.handle_modules()
assert testplan.modules == ['name1', 'name2']
TESTDATA_6 = [
(True, False, False, 0, 'report_test_tree'),
(True, True, False, 0, 'report_test_tree'),
(True, False, True, 0, 'report_test_tree'),
(True, True, True, 0, 'report_test_tree'),
(False, True, False, 0, 'report_test_list'),
(False, True, True, 0, 'report_test_list'),
(False, False, True, 0, 'report_tag_list'),
(False, False, False, 1, None),
]
@pytest.mark.parametrize(
'test_tree, list_tests, list_tags, expected_res, expected_method',
TESTDATA_6,
ids=['test tree', 'test tree + test list', 'test tree + tag list',
'test tree + test list + tag list', 'test list',
'test list + tag list', 'tag list', 'no report']
)
def test_testplan_report(
test_tree,
list_tests,
list_tags,
expected_res,
expected_method
):
testplan = TestPlan(env=mock.Mock())
testplan.report_test_tree = mock.Mock()
testplan.report_test_list = mock.Mock()
testplan.report_tag_list = mock.Mock()
testplan.options = mock.Mock(
test_tree=test_tree,
list_tests=list_tests,
list_tags=list_tags,
)
res = testplan.report()
assert res == expected_res
methods = ['report_test_tree', 'report_test_list', 'report_tag_list']
if expected_method:
methods.remove(expected_method)
getattr(testplan, expected_method).assert_called_once()
for method in methods:
getattr(testplan, method).assert_not_called()
TESTDATA_7 = [
(
[
mock.Mock(
yamlfile='a.yaml',
scenarios=['scenario1', 'scenario2']
),
mock.Mock(
yamlfile='b.yaml',
scenarios=['scenario1']
)
],
TwisterRuntimeError,
'Duplicated test scenarios found:\n' \
'- scenario1 found in:\n' \
' - a.yaml\n' \
' - b.yaml\n',
[]
),
(
[
mock.Mock(
yamlfile='a.yaml',
scenarios=['scenario.a.1', 'scenario.a.2']
),
mock.Mock(
yamlfile='b.yaml',
scenarios=['scenario.b.1']
)
],
None,
None,
['No duplicates found.']
),
]
@pytest.mark.parametrize(
'testsuites, expected_error, error_msg, expected_logs',
TESTDATA_7,
ids=['a duplicate', 'no duplicates']
)
def test_testplan_report_duplicates(
capfd,
caplog,
testsuites,
expected_error,
error_msg,
expected_logs
):
def mock_get(name):
return list(filter(lambda x: name in x.scenarios, testsuites))
testplan = TestPlan(env=mock.Mock())
testplan.scenarios = [scenario for testsuite in testsuites \
for scenario in testsuite.scenarios]
testplan.get_testsuite = mock.Mock(side_effect=mock_get)
with pytest.raises(expected_error) if expected_error is not None else \
nullcontext() as err:
testplan.report_duplicates()
if expected_error:
assert str(err._excinfo[1]) == error_msg
assert all([log in caplog.text for log in expected_logs])
def test_testplan_report_tag_list(capfd):
testplan = TestPlan(env=mock.Mock())
testplan.testsuites = {
'testsuite0': mock.Mock(tags=set(['tag1', 'tag2'])),
'testsuite1': mock.Mock(tags=set(['tag1', 'tag2', 'tag3'])),
'testsuite2': mock.Mock(tags=set(['tag1', 'tag3'])),
'testsuite3': mock.Mock(tags=set(['tag']))
}
testplan.report_tag_list()
out,err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert '- tag' in out
assert '- tag1' in out
assert '- tag2' in out
assert '- tag3' in out
def test_testplan_report_test_tree(capfd):
testplan = TestPlan(env=mock.Mock())
testplan.get_tests_list = mock.Mock(
return_value=['1.dummy.case.1', '1.dummy.case.2',
'2.dummy.case.1', '2.dummy.case.2',
'3.dummy.case.1', '3.dummy.case.2',
'4.dummy.case.1', '4.dummy.case.2',
'5.dummy.case.1', '5.dummy.case.2',
'sample.group1.case1', 'sample.group1.case2',
'sample.group2.case', 'sample.group3.case1',
'sample.group3.case2', 'sample.group3.case3']
)
testplan.report_test_tree()
out,err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
expected = """
Testsuite
Samples
group1
sample.group1.case1
sample.group1.case2
group2
sample.group2.case
group3
sample.group3.case1
sample.group3.case2
sample.group3.case3
Tests
1
dummy
1.dummy.case.1
1.dummy.case.2
2
dummy
2.dummy.case.1
2.dummy.case.2
3
dummy
3.dummy.case.1
3.dummy.case.2
4
dummy
4.dummy.case.1
4.dummy.case.2
5
dummy
5.dummy.case.1
5.dummy.case.2
"""
expected = expected[1:]
assert expected in out
def test_testplan_report_test_list(capfd):
testplan = TestPlan(env=mock.Mock())
testplan.get_tests_list = mock.Mock(
return_value=['4.dummy.case.1', '4.dummy.case.2',
'3.dummy.case.2', '2.dummy.case.2',
'1.dummy.case.1', '1.dummy.case.2',
'3.dummy.case.1', '2.dummy.case.1',
'5.dummy.case.1', '5.dummy.case.2']
)
testplan.report_test_list()
out,err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert ' - 1.dummy.case.1\n' \
' - 1.dummy.case.2\n' \
' - 2.dummy.case.1\n' \
' - 2.dummy.case.2\n' \
' - 3.dummy.case.1\n' \
' - 3.dummy.case.2\n' \
' - 4.dummy.case.1\n' \
' - 4.dummy.case.2\n' \
' - 5.dummy.case.1\n' \
' - 5.dummy.case.2\n' \
'10 total.' in out
def test_testplan_info(capfd):
TestPlan.info('dummy text')
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert 'dummy text\n' in out
TESTDATA_8 = [
(False, False, ['p1e2', 'p2', 'p3', 'p3@B'], ['p2']),
(False, True, None, None),
(True, False, ['p1e2', 'p2', 'p3', 'p3@B'], ['p3']),
]
@pytest.mark.parametrize(
'override_default_platforms, create_duplicate, expected_platform_names, expected_defaults',
TESTDATA_8,
ids=['no override defaults', 'create duplicate', 'override defaults']
)
def test_testplan_add_configurations(
tmp_path,
override_default_platforms,
create_duplicate,
expected_platform_names,
expected_defaults
):
# tmp_path
# boards <- board root
# x86
# p1
# | p1e1.yaml
# | p1e2.yaml
# p2
# p2.yaml
# p2-1.yaml <- duplicate
# p2-2.yaml <- load error
# arm
# p3
# p3.yaml
# p3_B.conf
tmp_board_root_dir = tmp_path / 'boards'
tmp_board_root_dir.mkdir()
tmp_arch1_dir = tmp_board_root_dir / 'x86'
tmp_arch1_dir.mkdir()
tmp_p1_dir = tmp_arch1_dir / 'p1'
tmp_p1_dir.mkdir()
p1e1_bs_yaml = """\
boards:
- name: ple1
vendor: zephyr
socs:
- name: unit_testing
- name: ple2
vendor: zephyr
socs:
- name: unit_testing
"""
p1e1_yamlfile = tmp_p1_dir / 'board.yml'
p1e1_yamlfile.write_text(p1e1_bs_yaml)
p1e1_yaml = """\
identifier: p1e1
name: Platform 1 Edition 1
type: native
arch: x86
vendor: vendor1
toolchain:
- zephyr
twister: False
"""
p1e1_yamlfile = tmp_p1_dir / 'p1e1.yaml'
p1e1_yamlfile.write_text(p1e1_yaml)
p1e2_yaml = """\
identifier: p1e2
name: Platform 1 Edition 2
type: native
arch: x86
vendor: vendor1
toolchain:
- zephyr
"""
p1e2_yamlfile = tmp_p1_dir / 'p1e2.yaml'
p1e2_yamlfile.write_text(p1e2_yaml)
tmp_p2_dir = tmp_arch1_dir / 'p2'
tmp_p2_dir.mkdir()
p2_bs_yaml = """\
boards:
- name: p2
vendor: zephyr
socs:
- name: unit_testing
- name: p2_2
vendor: zephyr
socs:
- name: unit_testing
"""
p2_yamlfile = tmp_p2_dir / 'board.yml'
p2_yamlfile.write_text(p2_bs_yaml)
p2_yaml = """\
identifier: p2
name: Platform 2
type: sim
arch: x86
vendor: vendor2
toolchain:
- zephyr
testing:
default: True
"""
p2_yamlfile = tmp_p2_dir / 'p2.yaml'
p2_yamlfile.write_text(p2_yaml)
if create_duplicate:
p2_yamlfile = tmp_p2_dir / 'p2-1.yaml'
p2_yamlfile.write_text(p2_yaml)
p2_2_yaml = """\
testing:
#@%!#!#^#@%@:1.0
identifier: p2_2
name: Platform 2 2
type: sim
arch: x86
vendor: vendor2
toolchain:
- zephyr
"""
p2_2_yamlfile = tmp_p2_dir / 'p2-2.yaml'
p2_2_yamlfile.write_text(p2_2_yaml)
tmp_arch2_dir = tmp_board_root_dir / 'arm'
tmp_arch2_dir.mkdir()
tmp_p3_dir = tmp_arch2_dir / 'p3'
tmp_p3_dir.mkdir()
p3_bs_yaml = """\
boards:
- name: p3
vendor: zephyr
socs:
- name: unit_testing
"""
p3_yamlfile = tmp_p3_dir / 'board.yml'
p3_yamlfile.write_text(p3_bs_yaml)
p3_yaml = """\
identifier: p3
name: Platform 3
type: unit
arch: arm
vendor: vendor3
toolchain:
- zephyr
"""
p3_yamlfile = tmp_p3_dir / 'p3.yaml'
p3_yamlfile.write_text(p3_yaml)
p3_yamlfile = tmp_p3_dir / 'p3_B.conf'
p3_yamlfile.write_text('')
env = mock.Mock(board_roots=[tmp_board_root_dir])
testplan = TestPlan(env=env)
testplan.test_config = {
'platforms': {
'override_default_platforms': override_default_platforms,
'default_platforms': ['p3', 'p1e1']
}
}
with pytest.raises(Exception) if create_duplicate else nullcontext():
testplan.add_configurations()
if expected_defaults is not None:
assert sorted(expected_defaults) == sorted(testplan.default_platforms)
if expected_platform_names is not None:
assert sorted(expected_platform_names) == sorted(testplan.platform_names)
def test_testplan_get_all_tests():
testplan = TestPlan(env=mock.Mock())
tc1 = mock.Mock()
tc1.name = 'tc1'
tc2 = mock.Mock()
tc2.name = 'tc2'
tc3 = mock.Mock()
tc3.name = 'tc3'
tc4 = mock.Mock()
tc4.name = 'tc4'
tc5 = mock.Mock()
tc5.name = 'tc5'
ts1 = mock.Mock(testcases=[tc1, tc2])
ts2 = mock.Mock(testcases=[tc3, tc4, tc5])
testplan.testsuites = {
'ts1': ts1,
'ts2': ts2
}
res = testplan.get_all_tests()
assert sorted(res) == ['tc1', 'tc2', 'tc3', 'tc4', 'tc5']
TESTDATA_9 = [
([], False, 7),
([], True, 5),
(['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], False, 3),
(['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], True, 0),
]
@pytest.mark.parametrize(
'testsuite_filter, use_alt_root, expected_suite_count',
TESTDATA_9,
ids=['no testsuite filter', 'no testsuite filter, alt root',
'testsuite filter', 'testsuite filter, alt root']
)
def test_testplan_add_testsuites(tmp_path, testsuite_filter, use_alt_root, expected_suite_count):
# tmp_path
# tests <- test root
# good_test
# testcase.yaml
# wrong_test
# testcase.yaml
# good_sample
# sample.yaml
# others
# other.txt
# other_tests <- alternate test root
# good_test
# testcase.yaml
tmp_test_root_dir = tmp_path / 'tests'
tmp_test_root_dir.mkdir()
tmp_good_test_dir = tmp_test_root_dir / 'good_test'
tmp_good_test_dir.mkdir()
testcase_yaml_1 = """\
tests:
dummy.common.1:
build_on_all: true
dummy.common.2:
build_on_all: true
dummy.common.3:
build_on_all: true
dummy.special:
build_on_all: false
"""
testfile_1 = tmp_good_test_dir / 'testcase.yaml'
testfile_1.write_text(testcase_yaml_1)
tmp_bad_test_dir = tmp_test_root_dir / 'wrong_test'
tmp_bad_test_dir.mkdir()
testcase_yaml_2 = """\
tests:
wrong:
yaml: {]}
"""
testfile_2 = tmp_bad_test_dir / 'testcase.yaml'
testfile_2.write_text(testcase_yaml_2)
tmp_good_sample_dir = tmp_test_root_dir / 'good_sample'
tmp_good_sample_dir.mkdir()
samplecase_yaml_1 = """\
tests:
sample.dummy.common.1:
tags:
- samples
sample.dummy.common.2:
tags:
- samples
sample.dummy.special.1:
tags:
- samples
"""
samplefile_1 = tmp_good_sample_dir / 'sample.yaml'
samplefile_1.write_text(samplecase_yaml_1)
tmp_other_dir = tmp_test_root_dir / 'others'
tmp_other_dir.mkdir()
_ = tmp_other_dir / 'other.txt'
tmp_alt_test_root_dir = tmp_path / 'other_tests'
tmp_alt_test_root_dir.mkdir()
tmp_alt_good_test_dir = tmp_alt_test_root_dir / 'good_test'
tmp_alt_good_test_dir.mkdir()
testcase_yaml_3 = """\
tests:
dummy.alt.1:
build_on_all: true
dummy.alt.2:
build_on_all: true
"""
testfile_3 = tmp_alt_good_test_dir / 'testcase.yaml'
testfile_3.write_text(testcase_yaml_3)
env = mock.Mock(
test_roots=[tmp_test_root_dir],
alt_config_root=[tmp_alt_test_root_dir] if use_alt_root else []
)
testplan = TestPlan(env=env)
res = testplan.add_testsuites(testsuite_filter)
assert res == expected_suite_count
def test_testplan_str():
testplan = TestPlan(env=mock.Mock())
testplan.name = 'my name'
res = testplan.__str__()
assert res == 'my name'
TESTDATA_10 = [
('a platform', True),
('other platform', False),
]
@pytest.mark.parametrize(
'name, expect_found',
TESTDATA_10,
ids=['platform exists', 'no platform']
)
def test_testplan_get_platform(name, expect_found):
testplan = TestPlan(env=mock.Mock())
p1 = mock.Mock()
p1.name = 'some platform'
p2 = mock.Mock()
p2.name = 'a platform'
testplan.platforms = [p1, p2]
res = testplan.get_platform(name)
if expect_found:
assert res.name == name
else:
assert res is None
TESTDATA_11 = [
(True, 'runnable'),
(False, 'buildable'),
]
@pytest.mark.parametrize(
'device_testing, expected_tfilter',
TESTDATA_11,
ids=['device testing', 'no device testing']
)
def test_testplan_load_from_file(caplog, device_testing, expected_tfilter):
def get_platform(name):
p = mock.Mock()
p.name = name
p.normalized_name = name
return p
ts1tc1 = mock.Mock()
ts1tc1.name = 'TS1.tc1'
ts1 = mock.Mock(testcases=[ts1tc1])
ts1.name = 'TestSuite 1'
ts2 = mock.Mock(testcases=[])
ts2.name = 'TestSuite 2'
ts3tc1 = mock.Mock()
ts3tc1.name = 'TS3.tc1'
ts3tc2 = mock.Mock()
ts3tc2.name = 'TS3.tc2'
ts3 = mock.Mock(testcases=[ts3tc1, ts3tc2])
ts3.name = 'TestSuite 3'
ts4tc1 = mock.Mock()
ts4tc1.name = 'TS4.tc1'
ts4 = mock.Mock(testcases=[ts4tc1])
ts4.name = 'TestSuite 4'
ts5 = mock.Mock(testcases=[])
ts5.name = 'TestSuite 5'
testplan = TestPlan(env=mock.Mock(outdir=os.path.join('out', 'dir')))
testplan.options = mock.Mock(device_testing=device_testing, test_only=True, report_summary=None)
testplan.testsuites = {
'TestSuite 1': ts1,
'TestSuite 2': ts2,
'TestSuite 3': ts3,
'TestSuite 4': ts4,
'TestSuite 5': ts5
}
testplan.get_platform = mock.Mock(side_effect=get_platform)
testplan_data = """\
{
"testsuites": [
{
"name": "TestSuite 1",
"platform": "Platform 1",
"run_id": 1,
"execution_time": 60.00,
"used_ram": 4096,
"available_ram": 12278,
"used_rom": 1024,
"available_rom": 1047552,
"status": "passed",
"reason": "OK",
"testcases": [
{
"identifier": "TS1.tc1",
"status": "passed",
"reason": "passed",
"execution_time": 60.00,
"log": ""
}
]
},
{
"name": "TestSuite 2",
"platform": "Platform 1"
},
{
"name": "TestSuite 3",
"platform": "Platform 1",
"run_id": 1,
"execution_time": 360.00,
"used_ram": 4096,
"available_ram": 12278,
"used_rom": 1024,
"available_rom": 1047552,
"status": "error",
"reason": "File Not Found Error",
"testcases": [
{
"identifier": "TS3.tc1",
"status": "error",
"reason": "File Not Found Error.",
"execution_time": 360.00,
"log": "[ERROR]: File 'dummy.yaml' not found!\\nClosing..."
},
{
"identifier": "TS3.tc2"
}
]
},
{
"name": "TestSuite 4",
"platform": "Platform 1",
"execution_time": 360.00,
"used_ram": 4096,
"available_ram": 12278,
"used_rom": 1024,
"available_rom": 1047552,
"status": "skipped",
"reason": "Not in requested test list.",
"testcases": [
{
"identifier": "TS4.tc1",
"status": "skipped",
"reason": "Not in requested test list.",
"execution_time": 360.00,
"log": "[INFO] Parsing..."
},
{
"identifier": "TS3.tc2"
}
]
},
{
"name": "TestSuite 5",
"platform": "Platform 2"
}
]
}
"""
filter_platform = ['Platform 1']
check_runnable_mock = mock.Mock(return_value=True)
with mock.patch('builtins.open', mock.mock_open(read_data=testplan_data)), \
mock.patch('twisterlib.testinstance.TestInstance.check_runnable', check_runnable_mock), \
mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()):
testplan.load_from_file('dummy.yaml', filter_platform)
expected_instances = {
'Platform 1/TestSuite 1': {
'metrics': {
'handler_time': 60.0,
'used_ram': 4096,
'used_rom': 1024,
'available_ram': 12278,
'available_rom': 1047552
},
'retries': 0,
'testcases': {
'TS1.tc1': {
'status': TwisterStatus.PASS,
'reason': None,
'duration': 60.0,
'output': ''
}
}
},
'Platform 1/TestSuite 2': {
'metrics': {
'handler_time': 0,
'used_ram': 0,
'used_rom': 0,
'available_ram': 0,
'available_rom': 0
},
'retries': 0,
'testcases': []
},
'Platform 1/TestSuite 3': {
'metrics': {
'handler_time': 360.0,
'used_ram': 4096,
'used_rom': 1024,
'available_ram': 12278,
'available_rom': 1047552
},
'retries': 1,
'testcases': {
'TS3.tc1': {
'status': TwisterStatus.ERROR,
'reason': None,
'duration': 360.0,
'output': '[ERROR]: File \'dummy.yaml\' not found!\nClosing...'
},
'TS3.tc2': {
'status': TwisterStatus.NONE,
'reason': None,
'duration': 0,
'output': ''
}
}
},
'Platform 1/TestSuite 4': {
'metrics': {
'handler_time': 360.0,
'used_ram': 4096,
'used_rom': 1024,
'available_ram': 12278,
'available_rom': 1047552
},
'retries': 0,
'testcases': {
'TS4.tc1': {
'status': TwisterStatus.SKIP,
'reason': 'Not in requested test list.',
'duration': 360.0,
'output': '[INFO] Parsing...'
}
}
},
}
for n, i in testplan.instances.items():
assert expected_instances[n]['metrics'] == i.metrics
assert expected_instances[n]['retries'] == i.retries
for t in i.testcases:
assert expected_instances[n]['testcases'][str(t)]['status'] == t.status
assert expected_instances[n]['testcases'][str(t)]['reason'] == t.reason
assert expected_instances[n]['testcases'][str(t)]['duration'] == t.duration
assert expected_instances[n]['testcases'][str(t)]['output'] == t.output
check_runnable_mock.assert_called_with(mock.ANY, expected_tfilter, mock.ANY, mock.ANY)
expected_logs = [
'loading TestSuite 1...',
'loading TestSuite 2...',
'loading TestSuite 3...',
'loading TestSuite 4...',
]
assert all([log in caplog.text for log in expected_logs])
def test_testplan_add_instances():
testplan = TestPlan(env=mock.Mock())
instance1 = mock.Mock()
instance1.name = 'instance 1'
instance2 = mock.Mock()
instance2.name = 'instance 2'
instance_list = [instance1, instance2]
testplan.add_instances(instance_list)
assert testplan.instances == {
'instance 1': instance1,
'instance 2': instance2,
}
def test_testplan_get_testsuite():
testplan = TestPlan(env=mock.Mock())
testplan.testsuites = {
'testsuite0': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
'testsuite1': mock.Mock(testcases=[mock.Mock()]),
'testsuite2': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
'testsuite3': mock.Mock(testcases=[])
}
testplan.testsuites['testsuite0'].testcases[0].name = 'testcase name 0'
testplan.testsuites['testsuite0'].testcases[1].name = 'testcase name 1'
testplan.testsuites['testsuite1'].testcases[0].name = 'sample id'
testplan.testsuites['testsuite2'].testcases[0].name = 'dummy id'
testplan.testsuites['testsuite2'].testcases[1].name = 'sample id'
id = 'sample id'
res = testplan.get_testsuite(id)
assert len(res) == 2
assert testplan.testsuites['testsuite1'] in res
assert testplan.testsuites['testsuite2'] in res
def test_testplan_verify_platforms_existence(caplog):
testplan = TestPlan(env=mock.Mock())
testplan.platform_names = ['a platform', 'other platform']
platform_names = ['other platform', 'some platform']
log_info = 'PLATFORM ERROR'
with pytest.raises(SystemExit) as se:
testplan.verify_platforms_existence(platform_names, log_info)
assert str(se.value) == '2'
assert 'PLATFORM ERROR - unrecognized platform - some platform'
TESTDATA_12 = [
(True),
(False)
]
@pytest.mark.parametrize(
'exists',
TESTDATA_12,
ids=['links dir exists', 'links dir does not exist']
)
def test_testplan_create_build_dir_links(exists):
outdir = os.path.join('out', 'dir')
instances_linked = []
def mock_link(links_dir_path, instance):
assert links_dir_path == os.path.join(outdir, 'twister_links')
instances_linked.append(instance)
instances = {
'inst0': mock.Mock(status=TwisterStatus.PASS),
'inst1': mock.Mock(status=TwisterStatus.SKIP),
'inst2': mock.Mock(status=TwisterStatus.ERROR),
}
expected_instances = [instances['inst0'], instances['inst2']]
testplan = TestPlan(env=mock.Mock(outdir=outdir))
testplan._create_build_dir_link = mock.Mock(side_effect=mock_link)
testplan.instances = instances
with mock.patch('os.path.exists', return_value=exists), \
mock.patch('os.mkdir', mock.Mock()) as mkdir_mock:
testplan.create_build_dir_links()
if not exists:
mkdir_mock.assert_called_once()
assert expected_instances == instances_linked
TESTDATA_13 = [
('nt'),
('Linux')
]
@pytest.mark.parametrize(
'os_name',
TESTDATA_13,
)
def test_testplan_create_build_dir_link(os_name):
def mock_makedirs(path, exist_ok=False):
assert exist_ok
assert path == instance_build_dir
def mock_symlink(source, target):
assert source == instance_build_dir
assert target == os.path.join('links', 'path', 'test_0')
def mock_call(cmd, shell=False):
assert shell
assert cmd == ['mklink', '/J', os.path.join('links', 'path', 'test_0'),
instance_build_dir]
def mock_join(*paths):
slash = "\\" if os.name == 'nt' else "/"
return slash.join(paths)
with mock.patch('os.name', os_name), \
mock.patch('os.symlink', side_effect=mock_symlink), \
mock.patch('os.makedirs', side_effect=mock_makedirs), \
mock.patch('subprocess.call', side_effect=mock_call), \
mock.patch('os.path.join', side_effect=mock_join):
testplan = TestPlan(env=mock.Mock())
links_dir_path = os.path.join('links', 'path')
instance_build_dir = os.path.join('some', 'far', 'off', 'build', 'dir')
instance = mock.Mock(build_dir=instance_build_dir)
testplan._create_build_dir_link(links_dir_path, instance)
assert instance.build_dir == os.path.join('links', 'path', 'test_0')
assert testplan.link_dir_counter == 1
TESTDATA_14 = [
('bad platform', 'dummy reason', [],
'dummy status', 'dummy reason'),
('good platform', 'quarantined', [],
TwisterStatus.ERROR, 'quarantined but is one of the integration platforms'),
('good platform', 'dummy reason', [{'type': 'command line filter'}],
'dummy status', 'dummy reason'),
('good platform', 'dummy reason', [{'type': 'Skip filter'}],
'dummy status', 'dummy reason'),
('good platform', 'dummy reason', [{'type': 'platform key filter'}],
'dummy status', 'dummy reason'),
('good platform', 'dummy reason', [{'type': 'Toolchain filter'}],
'dummy status', 'dummy reason'),
('good platform', 'dummy reason', [{'type': 'Module filter'}],
'dummy status', 'dummy reason'),
('good platform', 'dummy reason', [{'type': 'testsuite filter'}],
TwisterStatus.ERROR, 'dummy reason but is one of the integration platforms'),
]
@pytest.mark.parametrize(
'platform_name, reason, filters,' \
' expected_status, expected_reason',
TESTDATA_14,
ids=['wrong platform', 'quarantined', 'command line filtered',
'skip filtered', 'platform key filtered', 'toolchain filtered',
'module filtered', 'skip to error change']
)
def test_change_skip_to_error_if_integration(
platform_name,
reason,
filters,
expected_status,
expected_reason
):
options = mock.Mock()
platform = mock.Mock()
platform.name = platform_name
testsuite = mock.Mock(integration_platforms=['good platform', 'a platform'])
instance = mock.Mock(
testsuite=testsuite,
platform=platform,
filters=filters,
status='dummy status',
reason=reason
)
change_skip_to_error_if_integration(options, instance)
assert instance.status == expected_status
assert instance.reason == expected_reason
``` | /content/code_sandbox/scripts/tests/twister/test_testplan.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 15,144 |
```python
#!/usr/bin/env python3
#
"""
Tests for config_parser.py
"""
import os
import pytest
import mock
import scl
from twisterlib.config_parser import TwisterConfigParser, extract_fields_from_arg_list, ConfigurationError
from contextlib import nullcontext
def test_extract_single_field_from_string_argument():
target_fields = {"FIELD1"}
arg_list = "FIELD1=value1 FIELD2=value2 FIELD3=value3"
extracted_fields, other_fields = extract_fields_from_arg_list(
target_fields, arg_list)
assert extracted_fields == {"FIELD1": ["value1"]}
assert other_fields == "FIELD2=value2 FIELD3=value3"
def test_no_fields_to_extract():
target_fields = set()
arg_list = "arg1 arg2 arg3"
extracted_fields, other_fields = extract_fields_from_arg_list(
target_fields, arg_list)
assert extracted_fields == {}
assert other_fields == "arg1 arg2 arg3"
def test_missing_fields():
target_fields = {"CONF_FILE", "OVERLAY_CONFIG", "DTC_OVERLAY_FILE"}
arg_list = "arg1 arg2 arg3"
extracted_fields, other_fields = extract_fields_from_arg_list(
target_fields, arg_list)
assert extracted_fields == {"CONF_FILE": [], "OVERLAY_CONFIG": [], "DTC_OVERLAY_FILE": []}
assert other_fields == "arg1 arg2 arg3"
def test_load_yaml_with_extra_args_and_retrieve_scenario_data(zephyr_base):
filename = "test_data.yaml"
yaml_data = '''
tests:
scenario1:
tags: ['tag1', 'tag2']
extra_args: '--CONF_FILE=file1.conf --OVERLAY_CONFIG=config1.conf'
filter: 'filter1'
common:
filter: 'filter2'
'''
loaded_schema = scl.yaml_load(
os.path.join(zephyr_base, 'scripts', 'schemas','twister', 'testsuite-schema.yaml')
)
with mock.patch('builtins.open', mock.mock_open(read_data=yaml_data)):
parser = TwisterConfigParser(filename, loaded_schema)
parser.load()
scenario_data = parser.get_scenario('scenario1')
scenario_common = parser.common
assert scenario_data['tags'] == {'tag1', 'tag2'}
assert scenario_data['extra_args'] == ['--CONF_FILE=file1.conf', '--OVERLAY_CONFIG=config1.conf']
assert scenario_common == {'filter': 'filter2'}
def test_default_values(zephyr_base):
filename = "test_data.yaml"
yaml_data = '''
tests:
scenario1:
tags: 'tag1'
extra_args: ''
'''
loaded_schema = scl.yaml_load(
os.path.join(zephyr_base, 'scripts', 'schemas', 'twister','testsuite-schema.yaml')
)
with mock.patch('builtins.open', mock.mock_open(read_data=yaml_data)):
parser = TwisterConfigParser(filename, loaded_schema)
parser.load()
expected_scenario_data = { 'type': 'integration',
'extra_args': [],
'extra_configs': [],
'extra_conf_files': [],
'extra_overlay_confs': [],
'extra_dtc_overlay_files': [],
'required_snippets': [],
'build_only': False,
'build_on_all': False,
'skip': False, 'slow': False,
'timeout': 60,
'min_ram': 8,
'modules': [],
'depends_on': set(),
'min_flash': 32,
'arch_allow': set(),
'arch_exclude': set(),
'extra_sections': [],
'integration_platforms': [],
'ignore_faults': False,
'ignore_qemu_crash': False,
'testcases': [],
'platform_type': [],
'platform_exclude': set(),
'platform_allow': set(),
'platform_key': [],
'toolchain_exclude': set(),
'toolchain_allow': set(),
'filter': '',
'levels': [],
'harness': 'test',
'harness_config': {},
'seed': 0, 'sysbuild': False
}
assert expected_scenario_data.items() <= expected_scenario_data.items()
@pytest.mark.parametrize(
'value, typestr, expected',
[
(' hello ', 'str', 'hello'),
('3.14', 'float', 3.14),
('10', 'int', 10),
('True', 'bool', 'True'), # do-nothing cast
('key: val', 'map', 'key: val'), # do-nothing cast
('test', 'int', ValueError),
('test', 'unknown', ConfigurationError),
('1 2 2 3', 'list', ['1', '2', '2','3']),
('1 2 2 3', 'set', {'1', '2', '3'})
],
ids=['str to str', 'str to float', 'str to int', 'str to bool', 'str to map',
'invalid', 'to unknown', "to list", "to set"]
)
def test_cast_value(zephyr_base, value, typestr, expected):
loaded_schema = scl.yaml_load(
os.path.join(zephyr_base, 'scripts', 'schemas', 'twister','testsuite-schema.yaml')
)
parser = TwisterConfigParser("config.yaml", loaded_schema)
with pytest.raises(expected) if \
isinstance(expected, type) and issubclass(expected, Exception) else nullcontext():
result = parser._cast_value(value, typestr)
assert result == expected
def test_load_invalid_test_config_yaml(zephyr_base):
filename = "test_data.yaml"
yaml_data = '''
gibberish data
'''
loaded_schema = scl.yaml_load(
os.path.join(zephyr_base, 'scripts', 'schemas','twister', 'test-config-schema.yaml')
)
with mock.patch('builtins.open', mock.mock_open(read_data=yaml_data)):
parser = TwisterConfigParser(filename, loaded_schema)
with pytest.raises(Exception):
parser.load()
def test_load_yaml_with_no_scenario_data(zephyr_base):
filename = "test_data.yaml"
yaml_data = '''
tests:
common:
extra_args: '--CONF_FILE=file2.conf --OVERLAY_CONFIG=config2.conf'
'''
loaded_schema = scl.yaml_load(
os.path.join(zephyr_base, 'scripts', 'schemas','twister', 'testsuite-schema.yaml')
)
with mock.patch('builtins.open', mock.mock_open(read_data=yaml_data)):
parser = TwisterConfigParser(filename, loaded_schema)
parser.load()
with pytest.raises(KeyError):
scenario_data = parser.get_scenario('scenario1')
assert scenario_data is None
``` | /content/code_sandbox/scripts/tests/twister/test_config_parser.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,454 |
```yaml
testing:
ignore_tags:
- correct_schema_1
- correct_schema_2
``` | /content/code_sandbox/scripts/tests/twister/test_data/platform_correct_schema.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 21 |
```yaml
test:
testing.incorrect_schema:
tag: demo incorrect_tag
platform_exclude: demo_board_1
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuite_incorrect_schema.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 25 |
```yaml
test:
ignore_tags:
- incorrect_schema_1
- incorrect_schema_2
``` | /content/code_sandbox/scripts/tests/twister/test_data/platform_incorrect_schema.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 21 |
```python
#!/usr/bin/env python3
#
"""
Tests for the error classes
"""
import os
import pytest
from pathlib import Path
from twisterlib.error import ConfigurationError
def test_configurationerror():
cfile = Path('some') / 'path'
message = 'dummy message'
expected_err = f'{os.path.join("some", "path")}: dummy message'
with pytest.raises(ConfigurationError, match=expected_err):
raise ConfigurationError(cfile, message)
``` | /content/code_sandbox/scripts/tests/twister/test_errors.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 101 |
```yaml
tests:
testing.correct_schema:
tags: demo_correct correct_tags
platform_exclude: demo_board_1
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuite_correct_schema.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 25 |
```yaml
- scenarios:
- test_a.check_1
platforms:
- demo_board_1
- demo_board_3
comment: "a1 on board_1 and board_3"
``` | /content/code_sandbox/scripts/tests/twister/test_data/quarantines/basic.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 44 |
```yaml
- scenarios:
- test_(a|c).check_2
architectures:
- x86.*
comment: "a2 and c2 on x86"
- scenarios:
- test_d.*
comment: "all test_d"
``` | /content/code_sandbox/scripts/tests/twister/test_data/quarantines/with_regexp.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 53 |
```yaml
platforms:
override_default_platforms: false
increased_platform_scope: true
levels:
- name: smoke
description: >
A plan to be used verifying basic zephyr features on hardware.
adds:
- kernel.threads.*
- kernel.timer.behavior
- arch.interrupt
- boards.*
- name: acceptance
description: >
More coverage
adds:
- kernel.*
- arch.interrupt
- boards.*
``` | /content/code_sandbox/scripts/tests/twister/test_data/test_config.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 103 |
```yaml
- platforms:
- demo_board_3
comment: "all on board_3"
``` | /content/code_sandbox/scripts/tests/twister/test_data/quarantines/platform.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 21 |
```yaml
identifier: demo_board_1
name: DEMO_Board_1
type: mcu
arch: arm
toolchain:
- zephyr
- gnuarmemb
- xtools
ram: 256
flash: 1024
supported:
- supported_board_2
- supported_board_3
testing:
default: true
``` | /content/code_sandbox/scripts/tests/twister/test_data/boards/1_level/2_level/board_config_1.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 81 |
```yaml
# empty quarantine file
``` | /content/code_sandbox/scripts/tests/twister/test_data/quarantines/empty.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
boards:
- name: demo_board_1
vendor: zephyr
socs:
- name: unit_testing
- name: demo_board_2
vendor: zephyr
socs:
- name: unit_testing
- name: demo_board_3
vendor: zephyr
socs:
- name: unit_testing
``` | /content/code_sandbox/scripts/tests/twister/test_data/boards/1_level/2_level/board.yml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 84 |
```yaml
identifier: demo_board_2
name: DEMO_Board_2
type: mcu
arch: x86
toolchain:
- zephyr
- gnuarmemb
- xtools
ram: 256
flash: 1024
supported:
- supported_board_1
- supported_board_3
``` | /content/code_sandbox/scripts/tests/twister/test_data/boards/1_level/2_level/board_config_2.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 75 |
```yaml
identifier: demo_board_3
name: DEMO_Board_3
type: mcu
arch: arm
toolchain:
- zephyr
- gnuarmemb
- xtools
ram: 256
flash: 1024
supported:
- supported_board_1
- supported_board_2
testing:
default: true
``` | /content/code_sandbox/scripts/tests/twister/test_data/boards/1_level/2_level/board_config_3.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 81 |
```python
from twisterlib.mixins import DisablePyTestCollectionMixin
class TestClassToIgnore(DisablePyTestCollectionMixin):
def test_to_ignore(self):
assert False
``` | /content/code_sandbox/scripts/tests/twister/test_data/mixins/test_to_ignore.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 37 |
```c
/*
*
*/
ztest_test_suite(test_api,
ztest_1cpu_unit_test(test_a) /* comment! */,
/* comment */ztest_1cpu_unit_test(test_b),
ztest_1cpu_unit_test(test_c),
ztest_unit_test(test_unit_a), ztest_unit_test(test_unit_b),
ztest_1cpu_unit_test(
test_newline),
ztest_1cpu_unit_test(test_test_test_aa),
ztest_user_unit_test(test_user),
ztest_1cpu_unit_test(test_last));
ztest_run_test_suite(test_api);
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_ztest.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 122 |
```yaml
tests:
test_e.check_1:
tags: test_e
build_only: true
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_e/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 22 |
```c
/*
*
*/
ZTEST_SUITE(feature5, NULL, NULL, NULL, NULL, NULL);
ZTEST(feature5, test_1a);
ZTEST(feature5, test_1b);
ztest_run_registered_test_suites(feature4);
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_e/test_ztest_new_suite.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 51 |
```c
/*
*
*/
/* I'm empty. */
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_e/test_ztest_no_suite.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10 |
```yaml
tests:
test_a.check_1:
tags: test_a
build_only: true
test_a.check_2:
extra_args: CONF_FILE="test.conf"
tags: test_a
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_a/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 45 |
```yaml
tests:
test_c.check_1:
tags: test_c
test_c.check_2:
extra_args: CONF_FILE="test.conf"
tags: test_c
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_c/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 39 |
```c
/*
*
*/
static void test_1b(void)
{
ztest_test_skip();
}
void test_main(void)
{
#ifdef TEST_feature1
ztest_test_suite(feature1,
ztest_unit_test(1a), ztest_unit_test(test_1b),
ztest_unit_test(test_1c)
);
#endif
#ifdef TEST_feature2
ztest_test_suite(feature2,
ztest_unit_test(test_2a),
ztest_unit_test(test_2b)
);
ztest_run_test_suite(feature2);
#endif
}
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_a/test_ztest_error.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 122 |
```c
/*
*
*/
ztest_test_suite(feature3,
ztest_unit_test(test_unit_1a),
#ifdef CONFIG_WHATEVER
ztest_unit_test(test_unit_1b),
#endif
ztest_unit_test(test_Unit_1c)
);
ztest_run_test_suite(feature3);
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_a/test_ztest_error_1.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```yaml
tests:
test_b.check_1:
min_ram: 32
tags: test_b
test_b.check_2:
min_ram: 32
extra_args: CONF_FILE="test.conf"
tags: test_b
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_b/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 53 |
```yaml
common:
extra_args: >
CONF_FILE=conf1;conf2 DTC_OVERLAY_FILE=overlay1;overlay2
OVERLAY_CONFIG=oc1.conf UNRELATED1=abc
extra_conf_files:
- "conf3"
- "conf4"
extra_overlay_confs:
- "oc2.conf"
extra_dtc_overlay_files:
- "overlay3"
- "overlay4"
tests:
test_config.main:
extra_args: >
CONF_FILE=conf5;conf6 DTC_OVERLAY_FILE=overlay5;overlay6
OVERLAY_CONFIG=oc3.conf UNRELATED2=xyz
extra_conf_files:
- "conf7"
- "conf8"
extra_overlay_confs:
- "oc4.conf"
extra_dtc_overlay_files:
- "overlay7"
- "overlay8"
extra_configs:
- CONFIG_FOO=y
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_config/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 199 |
```yaml
tests:
test_d.check_1:
tags: test_d
build_only: true
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_d/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 22 |
```yaml
name: dummy
boards:
demo_board_2:
append:
EXTRA_CONF_FILE: dummy.conf
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_d/snippets/dummy/snippet.yml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 24 |
```unknown
CONFIG_BOOT_BANNER=n
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_d/snippets/dummy/dummy.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7 |
```c
/*
*
*/
ztest_register_test_suite(feature4, NULL,
ztest_unit_test(test_unit_1a),
ztest_unit_test(test_unit_1b));
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/tests/test_d/test_ztest_error_register_test_suite.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 36 |
```yaml
sample:
name: Sample Library
tests:
sample_test.app:
tags: sample_tag
harness: console
harness_config:
type: multi_line
regex:
- "Hello World!"
- "Sample says: Hello World!"
``` | /content/code_sandbox/scripts/tests/twister/test_data/testsuites/samples/test_app/test_sample_app.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 55 |
```shell
#!/bin/sh
#
#
#
[ -f $1 ] && rm $1
[ -f $1_error.types ] && rm $1_error.types
[ -f $1_warning.types ] && rm $1_warning.types
dirs_to_check="arch drivers include kernel lib"
files=$(for d in ${dirs_to_check}; do find $d/ -type f -name '*.[ch]'; done)
for i in $files; do
${ZEPHYR_BASE}/scripts/checkpatch.pl --mailback --no-tree -f --emacs --summary-file --show-types --ignore BRACES,PRINTK_WITHOUT_KERN_LEVEL,SPLIT_STRING --max-line-length=100 $i >> $1
done
grep ERROR: $1 |cut -d : -f 3,4 |sort -u > $1_error.types
grep WARNING: $1 |cut -d : -f 3,4 |sort -u > $1_warning.types
for i in `cat $1_error.types`; do
echo -n $i ' '; grep $i $1 | wc -l
done
for i in `cat $1_warning.types`; do
echo -n $i ' '; grep $i $1 | wc -l
done
``` | /content/code_sandbox/scripts/checkpatch/do_checkpatch.sh | shell | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 265 |
```unknown
#!/usr/bin/env bash
#
#
#
exe_name=$(basename $0)
# outputs the date and time in pre-set formats
# default format is: 20150114-181112
# usage: timestamp [-a] [-d] [-u] [-s] [-S]
# where: -a changes default to: 2015-01-14-18-11-56
# -d changes default to: 20150114
# -u changes default to: 20150114_181201
# -s changes default to: 20150114-1812.04
# -S changes default to: 20150114-1812
# Some switches can be mixed and matched, eg. -Sa gives 2015-01-14-18-13
date_format="%Y%m%d"
time_format="%H%M"
seconds_format="%S"
seconds_separator=""
date_time_separator="-"
function usage {
printf "usage: %s [-a] [-d] [-u] [-s] [-S]\n" $exe_name >&2
}
function fail {
usage
exit -1
}
function get_opts {
declare -r optstr="adusSh"
while getopts ${optstr} opt; do
case ${opt} in
a) all_separated=1 ;;
d) date_only=1 ;;
u) date_time_separator="_" ;;
s) seconds_separator="." ;;
S) no_seconds=1 ;;
h) usage; exit 0 ;;
*) fail ;;
esac
done
}
get_opts $@
if [ x${all_separated} == x1 ]; then
date_format="%Y-%m-%d"
time_format="%H-%M"
seconds_separator="-"
fi
if [ x${date_only} == x1 ]; then
date_time_separator=""
time_format=""
seconds_format=""
seconds_separator=""
fi
if [ x${no_seconds} == x1 ]; then
seconds_format=""
seconds_separator=""
fi
output_date=${date_format}${date_time_separator}
output_time=${time_format}${seconds_separator}${seconds_format}
output=$(date +${output_date}${output_time})
echo ${output}
``` | /content/code_sandbox/scripts/checkpatch/timestamp | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 468 |
```python
from __future__ import annotations
import pytest
import textwrap
from unittest import mock
from pathlib import Path
from twisterlib.harness import Pytest
from twisterlib.testsuite import TestSuite
from twisterlib.testinstance import TestInstance
from twisterlib.platform import Platform
@pytest.fixture
def testinstance() -> TestInstance:
testsuite = TestSuite('.', 'samples/hello', 'unit.test')
testsuite.harness_config = {}
testsuite.ignore_faults = False
testsuite.sysbuild = False
platform = Platform()
testinstance = TestInstance(testsuite, platform, 'outdir')
testinstance.handler = mock.Mock()
testinstance.handler.options = mock.Mock()
testinstance.handler.options.verbose = 1
testinstance.handler.options.fixture = ['fixture1:option1', 'fixture2']
testinstance.handler.options.pytest_args = None
testinstance.handler.type_str = 'native'
return testinstance
@pytest.mark.parametrize('device_type', ['native', 'qemu'])
def test_pytest_command(testinstance: TestInstance, device_type):
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
testinstance.handler.type_str = device_type
ref_command = [
'pytest',
'samples/hello/pytest',
f'--build-dir={testinstance.build_dir}',
f'--junit-xml={testinstance.build_dir}/report.xml',
f'--device-type={device_type}',
'--twister-fixture=fixture1:option1',
'--twister-fixture=fixture2'
]
command = pytest_harness.generate_command()
for c in ref_command:
assert c in command
def test_pytest_command_dut_scope(testinstance: TestInstance):
pytest_harness = Pytest()
dut_scope = 'session'
testinstance.testsuite.harness_config['pytest_dut_scope'] = dut_scope
pytest_harness.configure(testinstance)
command = pytest_harness.generate_command()
assert f'--dut-scope={dut_scope}' in command
def test_pytest_command_extra_args(testinstance: TestInstance):
pytest_harness = Pytest()
pytest_args = ['-k test1', '-m mark1']
testinstance.testsuite.harness_config['pytest_args'] = pytest_args
pytest_harness.configure(testinstance)
command = pytest_harness.generate_command()
for c in pytest_args:
assert c in command
def test_pytest_command_extra_args_in_options(testinstance: TestInstance):
pytest_harness = Pytest()
pytest_args_from_yaml = '--extra-option'
pytest_args_from_cmd = ['-k', 'test_from_cmd']
testinstance.testsuite.harness_config['pytest_args'] = [pytest_args_from_yaml]
testinstance.handler.options.pytest_args = pytest_args_from_cmd
pytest_harness.configure(testinstance)
command = pytest_harness.generate_command()
assert pytest_args_from_cmd[0] in command
assert pytest_args_from_cmd[1] in command
assert pytest_args_from_yaml in command
@pytest.mark.parametrize(
('pytest_root', 'expected'),
[
(
['pytest/test_shell_help.py'],
['samples/hello/pytest/test_shell_help.py']
),
(
['pytest/test_shell_help.py', 'pytest/test_shell_version.py', 'test_dir'],
['samples/hello/pytest/test_shell_help.py',
'samples/hello/pytest/test_shell_version.py',
'samples/hello/test_dir']
),
(
['../shell/pytest/test_shell.py'],
['samples/shell/pytest/test_shell.py']
),
(
['/tmp/test_temp.py'],
['/tmp/test_temp.py']
),
(
['~/tmp/test_temp.py'],
['/home/joe/tmp/test_temp.py']
),
(
['$ZEPHYR_BASE/samples/subsys/testsuite/pytest/shell/pytest'],
['/zephyr_base/samples/subsys/testsuite/pytest/shell/pytest']
),
(
['pytest/test_shell_help.py::test_A', 'pytest/test_shell_help.py::test_B'],
['samples/hello/pytest/test_shell_help.py::test_A',
'samples/hello/pytest/test_shell_help.py::test_B']
),
(
['pytest/test_shell_help.py::test_A[param_a]'],
['samples/hello/pytest/test_shell_help.py::test_A[param_a]']
)
],
ids=[
'one_file',
'more_files',
'relative_path',
'absollute_path',
'user_dir',
'with_env_var',
'subtests',
'subtest_with_param'
]
)
def test_pytest_handle_source_list(testinstance: TestInstance, monkeypatch, pytest_root, expected):
monkeypatch.setenv('ZEPHYR_BASE', '/zephyr_base')
monkeypatch.setenv('HOME', '/home/joe')
testinstance.testsuite.harness_config['pytest_root'] = pytest_root
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
command = pytest_harness.generate_command()
for pytest_src in expected:
assert pytest_src in command
def test_if_report_is_parsed(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
def test_1():
pass
def test_2():
pass
""")
test_file = pytester.path / 'test_valid.py'
test_file.write_text(test_file_content)
report_file = Path('report.xml')
result = pytester.runpytest(
str(test_file),
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(passed=2)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.status == "passed"
assert testinstance.status == "passed"
assert len(testinstance.testcases) == 2
for tc in testinstance.testcases:
assert tc.status == "passed"
def test_if_report_with_error(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
def test_exp():
raise Exception('Test error')
def test_err():
assert False
""")
test_file = pytester.path / 'test_error.py'
test_file.write_text(test_file_content)
report_file = pytester.path / 'report.xml'
result = pytester.runpytest(
str(test_file),
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(failed=2)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.status == "failed"
assert testinstance.status == "failed"
assert len(testinstance.testcases) == 2
for tc in testinstance.testcases:
assert tc.status == "failed"
assert tc.output
assert tc.reason
assert testinstance.reason
assert '2/2' in testinstance.reason
def test_if_report_with_skip(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
import pytest
@pytest.mark.skip('Test skipped')
def test_skip_1():
pass
def test_skip_2():
pytest.skip('Skipped on runtime')
""")
test_file = pytester.path / 'test_skip.py'
test_file.write_text(test_file_content)
report_file = pytester.path / 'report.xml'
result = pytester.runpytest(
str(test_file),
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(skipped=2)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.status == "skipped"
assert testinstance.status == "skipped"
assert len(testinstance.testcases) == 2
for tc in testinstance.testcases:
assert tc.status == "skipped"
def test_if_report_with_filter(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
import pytest
def test_A():
pass
def test_B():
pass
""")
test_file = pytester.path / 'test_filter.py'
test_file.write_text(test_file_content)
report_file = pytester.path / 'report.xml'
result = pytester.runpytest(
str(test_file),
'-k', 'test_B',
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(passed=1)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.status == "passed"
assert testinstance.status == "passed"
assert len(testinstance.testcases) == 1
def test_if_report_with_no_collected(pytester, testinstance: TestInstance):
test_file_content = textwrap.dedent("""
import pytest
def test_A():
pass
""")
test_file = pytester.path / 'test_filter.py'
test_file.write_text(test_file_content)
report_file = pytester.path / 'report.xml'
result = pytester.runpytest(
str(test_file),
'-k', 'test_B',
f'--junit-xml={str(report_file)}'
)
result.assert_outcomes(passed=0)
assert report_file.is_file()
pytest_harness = Pytest()
pytest_harness.configure(testinstance)
pytest_harness.report_file = report_file
pytest_harness._update_test_status()
assert pytest_harness.status == "skipped"
assert testinstance.status == "skipped"
``` | /content/code_sandbox/scripts/tests/twister/pytest_integration/test_harness_pytest.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,178 |
```shell
#!/usr/bin/env bash
#
#
#
exe_name=$(basename $0)
# check the last n patches patches from the current branch for errors
# usage: maintainer-checkpatch.bash [(-n <num commits>) | (-c <commit>)] [-s]
# where: -n <num commits> selects the last n commits (default: 1)
# -c <commit> selects the "since" commit
# -s asks for a summary instead of details
#
# -c and -n are mutually exclusive
checkpatch_switches="\
--patch \
--no-tree \
--show-types \
--max-line-length=100 \
"
ignore_list=BRACES,PRINTK_WITHOUT_KERN_LEVEL,SPLIT_STRING,FILE_PATH_CHANGES,GERRIT_CHANGE_ID
timestamp_bin=${ZEPHYR_BASE}/scripts/checkpatch/timestamp
timestamp="${timestamp_bin} -u"
checkpatch_bin=${ZEPHYR_BASE}/scripts/checkpatch.pl
checkpatch="${checkpatch_bin} ${checkpatch_switches} --ignore ${ignore_list}"
ts=$(${timestamp})
outdir=/tmp/${exe_name}-${ts}
declare num_commits=1
declare summary=n
declare since_commit=""
function usage {
printf "usage: %s [(-n <num commits>) | (-c <commit>)] [-s]\n" $exe_name >&2
}
function fail {
usage
exit -1
}
function format_patch_fail {
printf "'git format-patch' failed\n"
exit -1
}
function verify_needed {
needed="\
${timestamp_bin} \
${checkpatch_bin} \
"
for i in $needed; do
type $i &>/dev/null
if [ $? != 0 ]; then
printf "need '%s' but not found in PATH\n" $i >&2
exit -1
fi
done
}
function get_opts {
declare -r optstr="n:c:sh"
while getopts ${optstr} opt; do
case ${opt} in
n) num_commits=${OPTARG} ;;
c) since_commit=${OPTARG} ;;
s) summary=y ;;
h) usage; exit 0 ;;
*) fail ;;
esac
done
if [ ${num_commits} != 1 -a "x${since_commit}" != x ]; then
fail
fi
}
verify_needed
get_opts $@
if [ x${since_commit} != x ]; then
since=${since_commit}
else
since=HEAD~${num_commits}
fi
git format-patch ${since} -o ${outdir} 2>/dev/null >/dev/null
[ $? = 0 ] || format_patch_fail
for i in $(ls ${outdir}/*); do
printf "\n$(basename ${i})\n"
if [ ${summary} = y ]; then
${checkpatch} $i | grep "total:"
else
${checkpatch} $i
fi
done
rm -rf ${outdir}
``` | /content/code_sandbox/scripts/checkpatch/maintainer-checkpatch.bash | shell | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 650 |
```unknown
_cpu_arch_t
k_mem_partition_attr_t
k_timepoint_t
mbedtls_pk_context
z_arch_esf_t
pinctrl_soc_pin_t
io_rw_32
\b[a-zA-Z_][a-zA-Z0-9_]*TypeDef
Pwm
FILE
``` | /content/code_sandbox/scripts/checkpatch/typedefsfile | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 60 |
```shell
#!/usr/bin/env bash
#
#
#
# crawls the source tree to find out the amount of checkpatch issues
# and optionally update scripts/known_checkpatch_issues
# usage: check_known_checkpatch_issues.sh [-u]
# where: -u updates the known_checkpatch_issues db and commits it
# -q is the quiet mode (don't display the diff on stdout)
exe_name=$(basename $0)
do_checkpatch_bin=${ZEPHYR_BASE}/scripts/checkpatch/do_checkpatch.sh
timestamp_bin=${ZEPHYR_BASE}/scripts/checkpatch/timestamp
declare update=n
declare quiet=n
function usage {
printf "usage: %s [-u][-q]\n" ${exe_name} >&2
}
function fail {
usage
exit -1
}
function verify_needed {
needed="\
${do_checkpatch_bin} \
${timestamp_bin} \
"
for i in ${needed}; do
type $i &>/dev/null
if [ $? != 0 ]; then
printf "need '%s' but not found in PATH\n" $i >&2
exit -1
fi
done
}
function get_opts {
declare -r optstr="quh"
while getopts ${optstr} opt; do
case ${opt} in
u) update=y ;;
q) quiet=y ;;
h) usage; exit 0 ;;
*) fail ;;
esac
done
}
verify_needed
get_opts $@
do_checkpatch=${do_checkpatch_bin}
timestamp="${timestamp_bin} -u"
ts=$(${timestamp})
uid=$(id -u)
pid=$$
suffix=${uid}-${pid}-${ts}
checkpatch_results=/tmp/checkpatch.results-${suffix}
known_checkpatch_issues=${ZEPHYR_BASE}/scripts/known_checkpatch_issues
checkpatch_issues=/tmp/checkpatch_issues-${suffix}
git_log_params="\
--abbrev=8 \
--abbrev-commit \
"
commit_id_str=$(git log ${git_log_params} HEAD | head -n 1)
echo ${commit_id_str} > ${checkpatch_issues}
${do_checkpatch} ${checkpatch_results} >> ${checkpatch_issues}
diff_file=/tmp/checkpatch.results.diff-${suffix}
diff -u ${known_checkpatch_issues} ${checkpatch_issues} > ${diff_file}
if [ ${quiet} = n ]; then
cat ${diff_file}
fi
# find all lines that starts with '+' but not '+commit' or '+++ diff'
minuses_err_str=(\
$(cat ${diff_file} | \
grep -v -E "^\-\-\-" | grep -v -E "^\-commit " | grep -E "^\-" | \
awk '{print $1}' | cut -d\- -f 2-) \
)
minuses_num_err=(\
$(cat ${diff_file} | \
grep -v -E "^\-\-\-" | grep -v -E "^\-commit " | grep -E "^\-" | \
awk '{print $2}') \
)
plusses_err_str=(\
$(cat ${diff_file} | \
grep -v -E "^\+\+\+" | grep -v -E "^\+commit " | grep -E "^\+" | \
awk '{print $1}' | cut -d\+ -f 2-) \
)
plusses_num_err=(\
$(cat ${diff_file} | \
grep -v -E "^\+\+\+" | grep -v -E "^\+commit " | grep -E "^\+" | \
awk '{print $2}') \
)
exit_code=0
declare -i num_plusses=${#plusses_num_err[@]}
declare -i num_minuses=${#minuses_num_err[@]}
declare -i test_num=${num_plusses}
while [ ${test_num} -gt 0 ]; do
test_num+=-1
match=n
declare -i i=${num_minuses}
while [ $i -gt 0 ]; do
i+=-1
if [ ${plusses_err_str[${test_num}]} = ${minuses_err_str[$i]} ]; then
n_minus=${minuses_num_err[$i]}
n_plus=${plusses_num_err[${test_num}]}
if [ ${n_plus} -gt ${n_minus} ]; then
exit_code=1
break 2
fi
match=y
break 1
fi
done
if [ ${match} = n ]; then
# there was no match for the plus line, so that is a new error
exit_code=1
break 1
fi
done
if [ ${update} = y ]; then
msg="known_checkpatch_issues: updating to ${commit_id_str}"
cp ${checkpatch_issues} ${known_checkpatch_issues}
git add ${known_checkpatch_issues}
git commit -m "${msg}"
fi
exit ${exit_code}
``` | /content/code_sandbox/scripts/checkpatch/check_known_checkpatch_issues.sh | shell | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,057 |
```yaml
# This file contains information on what files are associated with which
# twister tag.
#
# File format
# ###########
#
# "tag" (the quotes are only needed for titles with special characters,
# like colons):
# files:
# List of paths and/or glob patterns giving the files in the tag,
# relative to the root directory.
#
# If a path or glob pattern ends in a '/', it matches all files within
# the given directory or directories. Otherwise, an exact match is
# required.
#
# Paths to directories should always have a trailing '/'.
#
# files-regex:
# List of regular expressions applied to paths to determine if they
# belong to the tag. The regular expression may match anywhere within
# the path, but can be anchored with ^ and $ as usual.
#
# Can be combined with a 'files' key.
#
# Note: Prefer plain 'files' patterns where possible. get_maintainer.py
# will check that they match some file, but won't check regexes
# (because it might be slow).
#
# files-exclude:
# Like 'files', but any matching files will be excluded from the tag.
#
# files-regex-exclude:
# Like 'files-regex', but any matching files will be excluded from the
# tag.
#
# All tags must have a 'files' and/or 'files-regex' key.
# 1. Avoid putting include/ in entries as any include/ change we want
# to get test coverage as broad as possible.
# 2. Keep tag entries sorted alphabetically
bluetooth:
files:
- drivers/bluetooth/
- subsys/bluetooth/
- subsys/net/l2/bluetooth/
net:
files:
- subsys/net/
- include/zephyr/net/
- drivers/wifi/
- drivers/net/
- drivers/ethernet/
- drivers/ieee802154/
- drivers/ptp_clock/
test_framework:
files:
- subsys/testsuite/
- samples/subsys/testsuite/
- tests/subsys/testsuite/
- tests/ztest/
cmsis_dsp:
files:
- modules/Kconfig.cmsis_dsp
# we have no means of telling file changes in a module, so for assume any
# change to west.yml is updating something we need to re-test for.
- west.yml
mcumgr:
files:
- subsys/mgmt/mcumgr/
- tests/subsys/mgmt/mcumgr/
- samples/subsys/mgmt/mcumgr/
- include/zephyr/mgmt/mcumgr/
kernel:
files:
- kernel/
- arch/
posix:
files:
- lib/posix/
# cbprintf:
# files:
# - lib/os/cbprintf*
# - lib/posix
``` | /content/code_sandbox/scripts/ci/tags.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 628 |
```python
#!/usr/bin/env python3
#
#
"""Check minimal libc error numbers against newlib.
This script loads the errno.h included in Zephyr's minimal libc and checks its
contents against the SDK's newlib errno.h. This is done to ensure that both C
libraries are aligned at all times.
"""
import os
from pathlib import Path
import re
import sys
def parse_errno(path):
with open(path, 'r') as f:
r = re.compile(r'^\s*#define\s+([A-Z]+)\s+([0-9]+)')
errnos = []
for line in f:
m = r.match(line)
if m:
errnos.append(m.groups())
return errnos
def main():
minimal = Path("lib/libc/minimal/include/errno.h")
newlib = Path("arm-zephyr-eabi/arm-zephyr-eabi/include/sys/errno.h")
try:
minimal = os.environ['ZEPHYR_BASE'] / minimal
newlib = os.environ['ZEPHYR_SDK_INSTALL_DIR'] / newlib
except KeyError as e:
print(f'Environment variable missing: {e}', file=sys.stderr)
sys.exit(1)
minimal = parse_errno(minimal)
newlib = parse_errno(newlib)
for e in minimal:
if e[0] not in [x[0] for x in newlib] or e[1] != next(
filter(lambda _e: _e[0] == e[0], newlib))[1]:
print('Invalid entry in errno.h:', file=sys.stderr)
print(f'{e[0]} (with value {e[1]})', file=sys.stderr)
sys.exit(1)
print('errno.h validated correctly')
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/ci/errno.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 393 |
```python
#!/usr/bin/env python3
# This script upload test ci results to the zephyr ES instance for reporting and analysis.
# see path_to_url
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
import sys
import os
import json
import argparse
def gendata(f, index, run_date=None, run_id=None, run_attempt=None):
with open(f, "r") as j:
data = json.load(j)
for t in data['testsuites']:
name = t['name']
_grouping = name.split("/")[-1]
main_group = _grouping.split(".")[0]
sub_group = _grouping.split(".")[1]
env = data['environment']
if run_date:
env['run_date'] = run_date
if run_id:
env['run_id'] = run_id
if run_attempt:
env['run_attempt'] = run_attempt
t['environment'] = env
t['component'] = main_group
t['sub_component'] = sub_group
yield {
"_index": index,
"_source": t
}
def main():
args = parse_args()
if args.index:
index_name = args.index
else:
index_name = 'tests-zephyr-1'
settings = {
"index": {
"number_of_shards": 4
}
}
mappings = {
"properties": {
"execution_time": {"type": "float"},
"retries": {"type": "integer"},
"testcases.execution_time": {"type": "float"},
}
}
if args.dry_run:
xx = None
for f in args.files:
xx = gendata(f, index_name, args.run_date, args.run_id, args.run_attempt)
for x in xx:
print(x)
sys.exit(0)
es = Elasticsearch(
[os.environ['ELASTICSEARCH_SERVER']],
api_key=os.environ['ELASTICSEARCH_KEY'],
verify_certs=False
)
if args.create_index:
es.indices.create(index=index_name, mappings=mappings, settings=settings)
else:
if args.run_date:
print(f"Setting run date from command line: {args.run_date}")
for f in args.files:
bulk(es, gendata(f, index_name, args.run_date, args.run_id, args.run_attempt))
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('-y','--dry-run', action="store_true", help='Dry run.')
parser.add_argument('-c','--create-index', action="store_true", help='Create index.')
parser.add_argument('-i', '--index', help='index to push to.', required=True)
parser.add_argument('-r', '--run-date', help='Run date in ISO format', required=False)
parser.add_argument('--run-id', required=False,
help="unique run-id (e.g. from github.run_id context)")
parser.add_argument('--run-attempt', required=False,
help="unique run attempt number (e.g. from github.run_attempt context)")
parser.add_argument('files', metavar='FILE', nargs='+', help='file with test data.')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
``` | /content/code_sandbox/scripts/ci/upload_test_results_es.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 723 |
```python
#!/usr/bin/env python3
import os
import json
import argparse
from opensearchpy import OpenSearch
from opensearchpy.helpers import bulk
host = "dashboards.staging.zephyrproject.io"
port = 443
def main():
args = parse_args()
if args.user and args.password:
auth = (args.user, args.password)
else:
auth = (os.environ['OPENSEARCH_USER'], os.environ['OPENSEARCH_PASS'])
client = OpenSearch(
hosts = [{'host': host, 'port': port}],
http_auth=auth,
use_ssl=True,
verify_certs = False,
ssl_assert_hostname = False,
ssl_show_warn = False,
)
index_name = args.index
for f in args.files:
with open(f, "r") as j:
data = json.load(j)
bulk_data = []
for t in data['testsuites']:
t['environment'] = data['environment']
bulk_data.append({
"_index": index_name,
"_id": t['run_id'],
"_source": t
}
)
bulk(client, bulk_data)
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('-u', '--user', help='username')
parser.add_argument('-p', '--password', help='password')
parser.add_argument('-i', '--index', help='index to push to.', required=True)
parser.add_argument('files', metavar='FILE', nargs='+', help='file with test data.')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
``` | /content/code_sandbox/scripts/ci/upload_test_results.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 357 |
```python
#!/usr/bin/env python3
"""
Syntax of file:
[
{
"version": "<commit>",
"date": "<date>",
"weekly: False,
},
]
"""
import json
import argparse
import urllib.request
import os
import tempfile
from git import Git
from datetime import datetime
VERSIONS_FILE = "versions.json"
def parse_args():
parser = argparse.ArgumentParser(
description="Manage versions to be tested.", allow_abbrev=False)
parser.add_argument('-l', '--list', action="store_true",
help="List all published versions")
parser.add_argument('-u', '--update',
help="Update versions file from tree.")
parser.add_argument('-L', '--latest', action="store_true",
help="Get latest published version")
parser.add_argument('-w', '--weekly', action="store_true",
help="Mark as weekly")
parser.add_argument('-W', '--list-weekly', action="store_true",
help="List weekly commits")
parser.add_argument('-v', '--verbose', action="store_true",
help="Verbose output")
return parser.parse_args()
def get_versions():
data = None
fo = tempfile.NamedTemporaryFile()
if not os.path.exists('versions.json'):
url = 'path_to_url
urllib.request.urlretrieve(url, fo.name)
with open(fo.name, "r") as fp:
data = json.load(fp)
return data
def handle_compat(item):
item_compat = {}
if isinstance(item, str):
item_compat['version'] = item
item_compat['weekly'] = False
item_compat['date'] = None
else:
item_compat = item
return item_compat
def show_versions(weekly=False):
data = get_versions()
for item in data:
item_compat = handle_compat(item)
is_weekly = item_compat.get('weekly', False)
if weekly and not is_weekly:
continue
wstr = ""
datestr = ""
if args.verbose:
if is_weekly:
wstr = "(marked for weekly testing)"
if item_compat.get('date'):
pdate = datetime.strptime(item_compat['date'], '%Y-%m-%dT%H:%M:%S.%f')
date = pdate.strftime("%b %d %Y %H:%M:%S")
datestr = f"published on {date}"
print(f"- {item_compat['version']} {datestr} {wstr}")
else:
print(f"{item_compat['version']}")
def show_latest():
data = get_versions()
latest = data[-1]
item_compat = handle_compat(latest)
ver = item_compat.get("version")
date = item_compat.get("date", False)
is_weekly = item_compat.get('weekly')
datestr = ""
if date:
datestr = f"published on {date}"
if args.verbose:
print(f"Latest version is {ver} {datestr}")
if args.verbose and is_weekly:
print("This version is marked for weekly testing.")
if not args.verbose:
print(f"{ver}")
def update(git_tree, is_weekly=False):
g = Git(git_tree)
today = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
version = g.describe("--abbrev=12")
published = False
data = get_versions()
if not is_weekly:
wday = datetime.today().strftime('%A')
if wday == 'Monday':
is_weekly = True
found = list(filter(lambda item: (isinstance(item, dict) and
item.get('version') == version) or item == version, data))
if found:
published = True
print("version already published")
else:
print(f"New version {version}, adding to file...")
if data and not published:
with open(VERSIONS_FILE, "w") as versions:
item = {}
item['version'] = version
item['date'] = today
item['weekly'] = is_weekly
data.append(item)
json.dump(data, versions)
def main():
global args
args = parse_args()
if args.update:
update(args.update, args.weekly)
elif args.list or args.list_weekly:
show_versions(weekly=args.list_weekly)
elif args.latest:
show_latest()
else:
print("You did not specify any options")
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/ci/version_mgr.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 976 |
```python
#!/usr/bin/env python3
import argparse
import collections
from email.utils import parseaddr
import json
import logging
import os
from pathlib import Path
import re
import subprocess
import sys
import tempfile
import traceback
import shlex
import shutil
import textwrap
import unidiff
from yamllint import config, linter
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error, Failure
import magic
from west.manifest import Manifest
from west.manifest import ManifestProject
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
from get_maintainer import Maintainers, MaintainersError
import list_boards
import list_hardware
logger = None
def git(*args, cwd=None, ignore_non_zero=False):
# Helper for running a Git command. Returns the rstrip()ed stdout output.
# Called like git("diff"). Exits with SystemError (raised by sys.exit()) on
# errors if 'ignore_non_zero' is set to False (default: False). 'cwd' is the
# working directory to use (default: current directory).
git_cmd = ("git",) + args
try:
cp = subprocess.run(git_cmd, capture_output=True, cwd=cwd)
except OSError as e:
err(f"failed to run '{cmd2str(git_cmd)}': {e}")
if not ignore_non_zero and (cp.returncode or cp.stderr):
err(f"'{cmd2str(git_cmd)}' exited with status {cp.returncode} and/or "
f"wrote to stderr.\n"
f"==stdout==\n"
f"{cp.stdout.decode('utf-8')}\n"
f"==stderr==\n"
f"{cp.stderr.decode('utf-8')}\n")
return cp.stdout.decode("utf-8").rstrip()
def get_shas(refspec):
"""
Returns the list of Git SHAs for 'refspec'.
:param refspec:
:return:
"""
return git('rev-list',
f'--max-count={-1 if "." in refspec else 1}', refspec).split()
def get_files(filter=None, paths=None):
filter_arg = (f'--diff-filter={filter}',) if filter else ()
paths_arg = ('--', *paths) if paths else ()
out = git('diff', '--name-only', *filter_arg, COMMIT_RANGE, *paths_arg)
files = out.splitlines()
for file in list(files):
if not os.path.isfile(os.path.join(GIT_TOP, file)):
# Drop submodule directories from the list.
files.remove(file)
return files
class FmtdFailure(Failure):
def __init__(self, severity, title, file, line=None, col=None, desc=""):
self.severity = severity
self.title = title
self.file = file
self.line = line
self.col = col
self.desc = desc
description = f':{desc}' if desc else ''
msg_body = desc or title
txt = f'\n{title}{description}\nFile:{file}' + \
(f'\nLine:{line}' if line else '') + \
(f'\nColumn:{col}' if col else '')
msg = f'{file}' + (f':{line}' if line else '') + f' {msg_body}'
typ = severity.lower()
super().__init__(msg, typ)
self.text = txt
class ComplianceTest:
"""
Base class for tests. Inheriting classes should have a run() method and set
these class variables:
name:
Test name
doc:
Link to documentation related to what's being tested
path_hint:
The path the test runs itself in. This is just informative and used in
the message that gets printed when running the test.
There are two magic strings that can be used instead of a path:
- The magic string "<zephyr-base>" can be used to refer to the
environment variable ZEPHYR_BASE or, when missing, the calculated base of
the zephyr tree
- The magic string "<git-top>" refers to the top-level repository
directory. This avoids running 'git' to find the top-level directory
before main() runs (class variable assignments run when the 'class ...'
statement runs). That avoids swallowing errors, because main() reports
them to GitHub
"""
def __init__(self):
self.case = TestCase(type(self).name, "Guidelines")
# This is necessary because Failure can be subclassed, but since it is
# always restored form the element tree, the subclass is lost upon
# restoring
self.fmtd_failures = []
def _result(self, res, text):
res.text = text.rstrip()
self.case.result += [res]
def error(self, text, msg=None, type_="error"):
"""
Signals a problem with running the test, with message 'msg'.
Raises an exception internally, so you do not need to put a 'return'
after error().
"""
err = Error(msg or f'{type(self).name} error', type_)
self._result(err, text)
raise EndTest
def skip(self, text, msg=None, type_="skip"):
"""
Signals that the test should be skipped, with message 'msg'.
Raises an exception internally, so you do not need to put a 'return'
after skip().
"""
skpd = Skipped(msg or f'{type(self).name} skipped', type_)
self._result(skpd, text)
raise EndTest
def failure(self, text, msg=None, type_="failure"):
"""
Signals that the test failed, with message 'msg'. Can be called many
times within the same test to report multiple failures.
"""
fail = Failure(msg or f'{type(self).name} issues', type_)
self._result(fail, text)
def fmtd_failure(self, severity, title, file, line=None, col=None, desc=""):
"""
Signals that the test failed, and store the information in a formatted
standardized manner. Can be called many times within the same test to
report multiple failures.
"""
fail = FmtdFailure(severity, title, file, line, col, desc)
self._result(fail, fail.text)
self.fmtd_failures.append(fail)
class EndTest(Exception):
"""
Raised by ComplianceTest.error()/skip() to end the test.
Tests can raise EndTest themselves to immediately end the test, e.g. from
within a nested function call.
"""
class CheckPatch(ComplianceTest):
"""
Runs checkpatch and reports found issues
"""
name = "Checkpatch"
doc = "See path_to_url#coding-style for more details."
path_hint = "<git-top>"
def run(self):
checkpatch = os.path.join(ZEPHYR_BASE, 'scripts', 'checkpatch.pl')
if not os.path.exists(checkpatch):
self.skip(f'{checkpatch} not found')
diff = subprocess.Popen(('git', 'diff', '--no-ext-diff', COMMIT_RANGE),
stdout=subprocess.PIPE,
cwd=GIT_TOP)
try:
subprocess.run((checkpatch, '--mailback', '--no-tree', '-'),
check=True,
stdin=diff.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True, cwd=GIT_TOP)
except subprocess.CalledProcessError as ex:
output = ex.output.decode("utf-8")
regex = r'^\s*\S+:(\d+):\s*(ERROR|WARNING):(.+?):(.+)(?:\n|\r\n?)+' \
r'^\s*#(\d+):\s*FILE:\s*(.+):(\d+):'
matches = re.findall(regex, output, re.MULTILINE)
for m in matches:
self.fmtd_failure(m[1].lower(), m[2], m[5], m[6], col=None,
desc=m[3])
# If the regex has not matched add the whole output as a failure
if len(matches) == 0:
self.failure(output)
class BoardYmlCheck(ComplianceTest):
"""
Check the board.yml files
"""
name = "BoardYml"
doc = "Check the board.yml file format"
path_hint = "<zephyr-base>"
def check_board_file(self, file, vendor_prefixes):
"""Validate a single board file."""
with open(file) as fp:
for line_num, line in enumerate(fp.readlines(), start=1):
if "vendor:" in line:
_, vnd = line.strip().split(":", 2)
vnd = vnd.strip()
if vnd not in vendor_prefixes:
desc = f"invalid vendor: {vnd}"
self.fmtd_failure("error", "BoardYml", file, line_num,
desc=desc)
def run(self):
vendor_prefixes = ["others"]
with open(os.path.join(ZEPHYR_BASE, "dts", "bindings", "vendor-prefixes.txt")) as fp:
for line in fp.readlines():
line = line.strip()
if not line or line.startswith("#"):
continue
vendor, _ = line.split("\t", 2)
vendor_prefixes.append(vendor)
path = Path(ZEPHYR_BASE)
for file in path.glob("**/board.yml"):
self.check_board_file(file, vendor_prefixes)
class ClangFormatCheck(ComplianceTest):
"""
Check if clang-format reports any issues
"""
name = "ClangFormat"
doc = "See path_to_url#clang-format for more details."
path_hint = "<git-top>"
def run(self):
for file in get_files():
if Path(file).suffix not in ['.c', '.h']:
continue
diff = subprocess.Popen(('git', 'diff', '-U0', '--no-color', COMMIT_RANGE, '--', file),
stdout=subprocess.PIPE,
cwd=GIT_TOP)
try:
subprocess.run(('clang-format-diff.py', '-p1'),
check=True,
stdin=diff.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=GIT_TOP)
except subprocess.CalledProcessError as ex:
patchset = unidiff.PatchSet.from_string(ex.output, encoding="utf-8")
for patch in patchset:
for hunk in patch:
# Strip the before and after context
msg = "".join([str(l) for l in hunk[3:-3]])
# show the hunk at the last line
self.fmtd_failure("notice",
"You may want to run clang-format on this change",
file, line=hunk.source_start + hunk.source_length - 3,
desc=f'\r\n{msg}')
class DevicetreeBindingsCheck(ComplianceTest):
"""
Checks if we are introducing any unwanted properties in Devicetree Bindings.
"""
name = "DevicetreeBindings"
doc = "See path_to_url for more details."
path_hint = "<zephyr-base>"
def run(self, full=True):
dts_bindings = self.parse_dt_bindings()
for dts_binding in dts_bindings:
self.required_false_check(dts_binding)
def parse_dt_bindings(self):
"""
Returns a list of dts/bindings/**/*.yaml files
"""
dt_bindings = []
for file_name in get_files(filter="d"):
if 'dts/bindings/' in file_name and file_name.endswith('.yaml'):
dt_bindings.append(file_name)
return dt_bindings
def required_false_check(self, dts_binding):
with open(dts_binding) as file:
line_number = 0
for line in file:
line_number += 1
if 'required: false' in line:
self.fmtd_failure(
'warning', 'Devicetree Bindings', dts_binding,
line_number, col=None,
desc="'required: false' is redundant, please remove")
class KconfigCheck(ComplianceTest):
"""
Checks is we are introducing any new warnings/errors with Kconfig,
for example using undefined Kconfig variables.
"""
name = "Kconfig"
doc = "See path_to_url for more details."
path_hint = "<zephyr-base>"
def run(self, full=True, no_modules=False, filename="Kconfig", hwm=None):
self.no_modules = no_modules
kconf = self.parse_kconfig(filename=filename, hwm=hwm)
self.check_top_menu_not_too_long(kconf)
self.check_no_pointless_menuconfigs(kconf)
self.check_no_undef_within_kconfig(kconf)
self.check_no_redefined_in_defconfig(kconf)
self.check_no_enable_in_boolean_prompt(kconf)
self.check_soc_name_sync(kconf)
if full:
self.check_no_undef_outside_kconfig(kconf)
def get_modules(self, modules_file, settings_file):
"""
Get a list of modules and put them in a file that is parsed by
Kconfig
This is needed to complete Kconfig sanity tests.
"""
if self.no_modules:
with open(modules_file, 'w') as fp_module_file:
fp_module_file.write("# Empty\n")
return
# Invoke the script directly using the Python executable since this is
# not a module nor a pip-installed Python utility
zephyr_module_path = os.path.join(ZEPHYR_BASE, "scripts",
"zephyr_module.py")
cmd = [sys.executable, zephyr_module_path,
'--kconfig-out', modules_file, '--settings-out', settings_file]
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
self.error(ex.output.decode("utf-8"))
modules_dir = ZEPHYR_BASE + '/modules'
modules = [name for name in os.listdir(modules_dir) if
os.path.exists(os.path.join(modules_dir, name, 'Kconfig'))]
with open(modules_file, 'r') as fp_module_file:
content = fp_module_file.read()
with open(modules_file, 'w') as fp_module_file:
for module in modules:
fp_module_file.write("ZEPHYR_{}_KCONFIG = {}\n".format(
re.sub('[^a-zA-Z0-9]', '_', module).upper(),
modules_dir + '/' + module + '/Kconfig'
))
fp_module_file.write(content)
def get_module_setting_root(self, root, settings_file):
"""
Parse the Zephyr module generated settings file given by 'settings_file'
and return all root settings defined by 'root'.
"""
# Invoke the script directly using the Python executable since this is
# not a module nor a pip-installed Python utility
root_paths = []
if os.path.exists(settings_file):
with open(settings_file, 'r') as fp_setting_file:
content = fp_setting_file.read()
lines = content.strip().split('\n')
for line in lines:
root = root.upper()
if line.startswith(f'"{root}_ROOT":'):
_, root_path = line.split(":", 1)
root_paths.append(Path(root_path.strip('"')))
return root_paths
def get_kconfig_dts(self, kconfig_dts_file, settings_file):
"""
Generate the Kconfig.dts using dts/bindings as the source.
This is needed to complete Kconfig compliance tests.
"""
# Invoke the script directly using the Python executable since this is
# not a module nor a pip-installed Python utility
zephyr_drv_kconfig_path = os.path.join(ZEPHYR_BASE, "scripts", "dts",
"gen_driver_kconfig_dts.py")
binding_paths = []
binding_paths.append(os.path.join(ZEPHYR_BASE, "dts", "bindings"))
dts_root_paths = self.get_module_setting_root('dts', settings_file)
for p in dts_root_paths:
binding_paths.append(p / "dts" / "bindings")
cmd = [sys.executable, zephyr_drv_kconfig_path,
'--kconfig-out', kconfig_dts_file, '--bindings-dirs']
for binding_path in binding_paths:
cmd.append(binding_path)
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
self.error(ex.output.decode("utf-8"))
def get_v1_model_syms(self, kconfig_v1_file, kconfig_v1_syms_file):
"""
Generate a symbol define Kconfig file.
This function creates a file with all Kconfig symbol definitions from
old boards model so that those symbols will not appear as undefined
symbols in hardware model v2.
This is needed to complete Kconfig compliance tests.
"""
os.environ['HWM_SCHEME'] = 'v1'
# 'kconfiglib' is global
# pylint: disable=undefined-variable
try:
kconf_v1 = kconfiglib.Kconfig(filename=kconfig_v1_file, warn=False)
except kconfiglib.KconfigError as e:
self.failure(str(e))
raise EndTest
with open(kconfig_v1_syms_file, 'w') as fp_kconfig_v1_syms_file:
for s in kconf_v1.defined_syms:
if s.type != kconfiglib.UNKNOWN:
fp_kconfig_v1_syms_file.write('config ' + s.name)
fp_kconfig_v1_syms_file.write('\n\t' + kconfiglib.TYPE_TO_STR[s.type])
fp_kconfig_v1_syms_file.write('\n\n')
def get_v2_model(self, kconfig_dir, settings_file):
"""
Get lists of v2 boards and SoCs and put them in a file that is parsed by
Kconfig
This is needed to complete Kconfig sanity tests.
"""
os.environ['HWM_SCHEME'] = 'v2'
kconfig_file = os.path.join(kconfig_dir, 'boards', 'Kconfig')
kconfig_boards_file = os.path.join(kconfig_dir, 'boards', 'Kconfig.boards')
kconfig_defconfig_file = os.path.join(kconfig_dir, 'boards', 'Kconfig.defconfig')
board_roots = self.get_module_setting_root('board', settings_file)
board_roots.insert(0, Path(ZEPHYR_BASE))
soc_roots = self.get_module_setting_root('soc', settings_file)
soc_roots.insert(0, Path(ZEPHYR_BASE))
root_args = argparse.Namespace(**{'board_roots': board_roots,
'soc_roots': soc_roots, 'board': None})
v2_boards = list_boards.find_v2_boards(root_args)
with open(kconfig_defconfig_file, 'w') as fp:
for board in v2_boards:
fp.write('osource "' + (Path(board.dir) / 'Kconfig.defconfig').as_posix() + '"\n')
with open(kconfig_boards_file, 'w') as fp:
for board in v2_boards:
board_str = 'BOARD_' + re.sub(r"[^a-zA-Z0-9_]", "_", board.name).upper()
fp.write('config ' + board_str + '\n')
fp.write('\t bool\n')
for qualifier in list_boards.board_v2_qualifiers(board):
board_str = ('BOARD_' + board.name + '_' +
re.sub(r"[^a-zA-Z0-9_]", "_", qualifier)).upper()
fp.write('config ' + board_str + '\n')
fp.write('\t bool\n')
fp.write(
'source "' + (Path(board.dir) / ('Kconfig.' + board.name)).as_posix() + '"\n\n'
)
with open(kconfig_file, 'w') as fp:
fp.write(
'osource "' + (Path(kconfig_dir) / 'boards' / 'Kconfig.syms.v1').as_posix() + '"\n'
)
for board in v2_boards:
fp.write('osource "' + (Path(board.dir) / 'Kconfig').as_posix() + '"\n')
kconfig_defconfig_file = os.path.join(kconfig_dir, 'soc', 'Kconfig.defconfig')
kconfig_soc_file = os.path.join(kconfig_dir, 'soc', 'Kconfig.soc')
kconfig_file = os.path.join(kconfig_dir, 'soc', 'Kconfig')
root_args = argparse.Namespace(**{'soc_roots': [Path(ZEPHYR_BASE)]})
v2_systems = list_hardware.find_v2_systems(root_args)
soc_folders = {soc.folder for soc in v2_systems.get_socs()}
with open(kconfig_defconfig_file, 'w') as fp:
for folder in soc_folders:
fp.write('osource "' + (Path(folder) / 'Kconfig.defconfig').as_posix() + '"\n')
with open(kconfig_soc_file, 'w') as fp:
for folder in soc_folders:
fp.write('source "' + (Path(folder) / 'Kconfig.soc').as_posix() + '"\n')
with open(kconfig_file, 'w') as fp:
for folder in soc_folders:
fp.write('source "' + (Path(folder) / 'Kconfig').as_posix() + '"\n')
kconfig_file = os.path.join(kconfig_dir, 'arch', 'Kconfig')
root_args = argparse.Namespace(**{'arch_roots': [Path(ZEPHYR_BASE)], 'arch': None})
v2_archs = list_hardware.find_v2_archs(root_args)
with open(kconfig_file, 'w') as fp:
for arch in v2_archs['archs']:
fp.write('source "' + (Path(arch['path']) / 'Kconfig').as_posix() + '"\n')
def parse_kconfig(self, filename="Kconfig", hwm=None):
"""
Returns a kconfiglib.Kconfig object for the Kconfig files. We reuse
this object for all tests to avoid having to reparse for each test.
"""
# Put the Kconfiglib path first to make sure no local Kconfiglib version is
# used
kconfig_path = os.path.join(ZEPHYR_BASE, "scripts", "kconfig")
if not os.path.exists(kconfig_path):
self.error(kconfig_path + " not found")
kconfiglib_dir = tempfile.mkdtemp(prefix="kconfiglib_")
sys.path.insert(0, kconfig_path)
# Import globally so that e.g. kconfiglib.Symbol can be referenced in
# tests
global kconfiglib
import kconfiglib
# Look up Kconfig files relative to ZEPHYR_BASE
os.environ["srctree"] = ZEPHYR_BASE
# Parse the entire Kconfig tree, to make sure we see all symbols
os.environ["SOC_DIR"] = "soc/"
os.environ["ARCH_DIR"] = "arch/"
os.environ["BOARD"] = "boards"
os.environ["ARCH"] = "*"
os.environ["KCONFIG_BINARY_DIR"] = kconfiglib_dir
os.environ['DEVICETREE_CONF'] = "dummy"
os.environ['TOOLCHAIN_HAS_NEWLIB'] = "y"
# Older name for DEVICETREE_CONF, for compatibility with older Zephyr
# versions that don't have the renaming
os.environ["GENERATED_DTS_BOARD_CONF"] = "dummy"
# For multi repo support
self.get_modules(os.path.join(kconfiglib_dir, "Kconfig.modules"),
os.path.join(kconfiglib_dir, "settings_file.txt"))
# For Kconfig.dts support
self.get_kconfig_dts(os.path.join(kconfiglib_dir, "Kconfig.dts"),
os.path.join(kconfiglib_dir, "settings_file.txt"))
# To make compliance work with old hw model and HWMv2 simultaneously.
kconfiglib_boards_dir = os.path.join(kconfiglib_dir, 'boards')
os.makedirs(kconfiglib_boards_dir, exist_ok=True)
os.makedirs(os.path.join(kconfiglib_dir, 'soc'), exist_ok=True)
os.makedirs(os.path.join(kconfiglib_dir, 'arch'), exist_ok=True)
os.environ["BOARD_DIR"] = kconfiglib_boards_dir
self.get_v2_model(kconfiglib_dir, os.path.join(kconfiglib_dir, "settings_file.txt"))
# Tells Kconfiglib to generate warnings for all references to undefined
# symbols within Kconfig files
os.environ["KCONFIG_WARN_UNDEF"] = "y"
try:
# Note this will both print warnings to stderr _and_ return
# them: so some warnings might get printed
# twice. "warn_to_stderr=False" could unfortunately cause
# some (other) warnings to never be printed.
return kconfiglib.Kconfig(filename=filename)
except kconfiglib.KconfigError as e:
self.failure(str(e))
raise EndTest
finally:
# Clean up the temporary directory
shutil.rmtree(kconfiglib_dir)
def get_logging_syms(self, kconf):
# Returns a set() with the names of the Kconfig symbols generated with
# logging template in samples/tests folders. The Kconfig symbols doesn't
# include `CONFIG_` and for each module declared there is one symbol
# per suffix created.
suffixes = [
"_LOG_LEVEL",
"_LOG_LEVEL_DBG",
"_LOG_LEVEL_ERR",
"_LOG_LEVEL_INF",
"_LOG_LEVEL_WRN",
"_LOG_LEVEL_OFF",
"_LOG_LEVEL_INHERIT",
"_LOG_LEVEL_DEFAULT",
]
# Warning: Needs to work with both --perl-regexp and the 're' module.
regex = r"^\s*(?:module\s*=\s*)([A-Z0-9_]+)\s*(?:#|$)"
# Grep samples/ and tests/ for symbol definitions
grep_stdout = git("grep", "-I", "-h", "--perl-regexp", regex, "--",
":samples", ":tests", cwd=ZEPHYR_BASE)
names = re.findall(regex, grep_stdout, re.MULTILINE)
kconf_syms = []
for name in names:
for suffix in suffixes:
kconf_syms.append(f"{name}{suffix}")
return set(kconf_syms)
def get_defined_syms(self, kconf):
# Returns a set() with the names of all defined Kconfig symbols (with no
# 'CONFIG_' prefix). This is complicated by samples and tests defining
# their own Kconfig trees. For those, just grep for 'config FOO' to find
# definitions. Doing it "properly" with Kconfiglib is still useful for
# the main tree, because some symbols are defined using preprocessor
# macros.
# Warning: Needs to work with both --perl-regexp and the 're' module.
# (?:...) is a non-capturing group.
regex = r"^\s*(?:menu)?config\s*([A-Z0-9_]+)\s*(?:#|$)"
# Grep samples/ and tests/ for symbol definitions
grep_stdout = git("grep", "-I", "-h", "--perl-regexp", regex, "--",
":samples", ":tests", cwd=ZEPHYR_BASE)
# Generate combined list of configs and choices from the main Kconfig tree.
kconf_syms = kconf.unique_defined_syms + kconf.unique_choices
# Symbols from the main Kconfig tree + grepped definitions from samples
# and tests
return set(
[sym.name for sym in kconf_syms]
+ re.findall(regex, grep_stdout, re.MULTILINE)
).union(self.get_logging_syms(kconf))
def check_top_menu_not_too_long(self, kconf):
"""
Checks that there aren't too many items in the top-level menu (which
might be a sign that stuff accidentally got added there)
"""
max_top_items = 50
n_top_items = 0
node = kconf.top_node.list
while node:
# Only count items with prompts. Other items will never be
# shown in the menuconfig (outside show-all mode).
if node.prompt:
n_top_items += 1
node = node.next
if n_top_items > max_top_items:
self.failure(f"""
Expected no more than {max_top_items} potentially visible items (items with
prompts) in the top-level Kconfig menu, found {n_top_items} items. If you're
deliberately adding new entries, then bump the 'max_top_items' variable in
{__file__}.""")
def check_no_redefined_in_defconfig(self, kconf):
# Checks that no symbols are (re)defined in defconfigs.
for node in kconf.node_iter():
# 'kconfiglib' is global
# pylint: disable=undefined-variable
if "defconfig" in node.filename and (node.prompt or node.help):
name = (node.item.name if node.item not in
(kconfiglib.MENU, kconfiglib.COMMENT) else str(node))
self.failure(f"""
Kconfig node '{name}' found with prompt or help in {node.filename}.
Options must not be defined in defconfig files.
""")
continue
def check_no_enable_in_boolean_prompt(self, kconf):
# Checks that boolean's prompt does not start with "Enable...".
for node in kconf.node_iter():
# skip Kconfig nodes not in-tree (will present an absolute path)
if os.path.isabs(node.filename):
continue
# 'kconfiglib' is global
# pylint: disable=undefined-variable
# only process boolean symbols with a prompt
if (not isinstance(node.item, kconfiglib.Symbol) or
node.item.type != kconfiglib.BOOL or
not node.prompt or
not node.prompt[0]):
continue
if re.match(r"^[Ee]nable.*", node.prompt[0]):
self.failure(f"""
Boolean option '{node.item.name}' prompt must not start with 'Enable...'. Please
check Kconfig guidelines.
""")
continue
def check_no_pointless_menuconfigs(self, kconf):
# Checks that there are no pointless 'menuconfig' symbols without
# children in the Kconfig files
bad_mconfs = []
for node in kconf.node_iter():
# 'kconfiglib' is global
# pylint: disable=undefined-variable
# Avoid flagging empty regular menus and choices, in case people do
# something with 'osource' (could happen for 'menuconfig' symbols
# too, though it's less likely)
if node.is_menuconfig and not node.list and \
isinstance(node.item, kconfiglib.Symbol):
bad_mconfs.append(node)
if bad_mconfs:
self.failure("""\
Found pointless 'menuconfig' symbols without children. Use regular 'config'
symbols instead. See
path_to_url#menuconfig-symbols.
""" + "\n".join(f"{node.item.name:35} {node.filename}:{node.linenr}"
for node in bad_mconfs))
def check_no_undef_within_kconfig(self, kconf):
"""
Checks that there are no references to undefined Kconfig symbols within
the Kconfig files
"""
undef_ref_warnings = "\n\n\n".join(warning for warning in kconf.warnings
if "undefined symbol" in warning)
if undef_ref_warnings:
self.failure(f"Undefined Kconfig symbols:\n\n {undef_ref_warnings}")
def check_soc_name_sync(self, kconf):
root_args = argparse.Namespace(**{'soc_roots': [Path(ZEPHYR_BASE)]})
v2_systems = list_hardware.find_v2_systems(root_args)
soc_names = {soc.name for soc in v2_systems.get_socs()}
soc_kconfig_names = set()
for node in kconf.node_iter():
# 'kconfiglib' is global
# pylint: disable=undefined-variable
if isinstance(node.item, kconfiglib.Symbol) and node.item.name == "SOC":
n = node.item
for d in n.defaults:
soc_kconfig_names.add(d[0].name)
soc_name_warnings = []
for name in soc_names:
if name not in soc_kconfig_names:
soc_name_warnings.append(f"soc name: {name} not found in CONFIG_SOC defaults.")
if soc_name_warnings:
soc_name_warning_str = '\n'.join(soc_name_warnings)
self.failure(f'''
Missing SoC names or CONFIG_SOC vs soc.yml out of sync:
{soc_name_warning_str}
''')
def check_no_undef_outside_kconfig(self, kconf):
"""
Checks that there are no references to undefined Kconfig symbols
outside Kconfig files (any CONFIG_FOO where no FOO symbol exists)
"""
# Grep for symbol references.
#
# Example output line for a reference to CONFIG_FOO at line 17 of
# foo/bar.c:
#
# foo/bar.c<null>17<null>#ifdef CONFIG_FOO
#
# 'git grep --only-matching' would get rid of the surrounding context
# ('#ifdef '), but it was added fairly recently (second half of 2018),
# so we extract the references from each line ourselves instead.
#
# The regex uses word boundaries (\b) to isolate the reference, and
# negative lookahead to automatically whitelist the following:
#
# - ##, for token pasting (CONFIG_FOO_##X)
#
# - $, e.g. for CMake variable expansion (CONFIG_FOO_${VAR})
#
# - @, e.g. for CMakes's configure_file() (CONFIG_FOO_@VAR@)
#
# - {, e.g. for Python scripts ("CONFIG_FOO_{}_BAR".format(...)")
#
# - *, meant for comments like '#endif /* CONFIG_FOO_* */
defined_syms = self.get_defined_syms(kconf)
# Maps each undefined symbol to a list <filename>:<linenr> strings
undef_to_locs = collections.defaultdict(list)
# Warning: Needs to work with both --perl-regexp and the 're' module
regex = r"\bCONFIG_[A-Z0-9_]+\b(?!\s*##|[$@{(.*])"
# Skip doc/releases and doc/security/vulnerabilities.rst, which often
# reference removed symbols
grep_stdout = git("grep", "--line-number", "-I", "--null",
"--perl-regexp", regex, "--", ":!/doc/releases",
":!/doc/security/vulnerabilities.rst",
cwd=Path(GIT_TOP))
# splitlines() supports various line terminators
for grep_line in grep_stdout.splitlines():
path, lineno, line = grep_line.split("\0")
# Extract symbol references (might be more than one) within the
# line
for sym_name in re.findall(regex, line):
sym_name = sym_name[7:] # Strip CONFIG_
if sym_name not in defined_syms and \
sym_name not in self.UNDEF_KCONFIG_ALLOWLIST and \
not (sym_name.endswith("_MODULE") and sym_name[:-7] in defined_syms):
undef_to_locs[sym_name].append(f"{path}:{lineno}")
if not undef_to_locs:
return
# String that describes all referenced but undefined Kconfig symbols,
# in alphabetical order, along with the locations where they're
# referenced. Example:
#
# CONFIG_ALSO_MISSING arch/xtensa/core/fatal.c:273
# CONFIG_MISSING arch/xtensa/core/fatal.c:264, subsys/fb/cfb.c:20
undef_desc = "\n".join(f"CONFIG_{sym_name:35} {', '.join(locs)}"
for sym_name, locs in sorted(undef_to_locs.items()))
self.failure(f"""
Found references to undefined Kconfig symbols. If any of these are false
positives, then add them to UNDEF_KCONFIG_ALLOWLIST in {__file__}.
If the reference is for a comment like /* CONFIG_FOO_* */ (or
/* CONFIG_FOO_*_... */), then please use exactly that form (with the '*'). The
CI check knows not to flag it.
More generally, a reference followed by $, @, {{, (, ., *, or ## will never be
flagged.
{undef_desc}""")
# Many of these are symbols used as examples. Note that the list is sorted
# alphabetically, and skips the CONFIG_ prefix.
UNDEF_KCONFIG_ALLOWLIST = {
"ALSO_MISSING",
"APP_LINK_WITH_",
"APP_LOG_LEVEL", # Application log level is not detected correctly as
# the option is defined using a template, so it can't
# be grepped
"APP_LOG_LEVEL_DBG",
"ARMCLANG_STD_LIBC", # The ARMCLANG_STD_LIBC is defined in the
# toolchain Kconfig which is sourced based on
# Zephyr toolchain variant and therefore not
# visible to compliance.
"BOARD_", # Used as regex in scripts/utils/board_v1_to_v2.py
"BOOT_DIRECT_XIP", # Used in sysbuild for MCUboot configuration
"BOOT_DIRECT_XIP_REVERT", # Used in sysbuild for MCUboot configuration
"BOOT_FIRMWARE_LOADER", # Used in sysbuild for MCUboot configuration
"BOOT_RAM_LOAD", # Used in sysbuild for MCUboot configuration
"BOOT_SWAP_USING_MOVE", # Used in sysbuild for MCUboot configuration
"BOOT_SWAP_USING_SCRATCH", # Used in sysbuild for MCUboot configuration
"BOOT_ENCRYPTION_KEY_FILE", # Used in sysbuild
"BOOT_ENCRYPT_IMAGE", # Used in sysbuild
"BINDESC_", # Used in documentation as a prefix
"BOOT_UPGRADE_ONLY", # Used in example adjusting MCUboot config, but
# symbol is defined in MCUboot itself.
"BOOT_SERIAL_BOOT_MODE", # Used in (sysbuild-based) test/
# documentation
"BOOT_SERIAL_CDC_ACM", # Used in (sysbuild-based) test
"BOOT_SERIAL_ENTRANCE_GPIO", # Used in (sysbuild-based) test
"BOOT_SERIAL_IMG_GRP_HASH", # Used in documentation
"BOOT_SHARE_DATA", # Used in Kconfig text
"BOOT_SHARE_DATA_BOOTINFO", # Used in (sysbuild-based) test
"BOOT_SHARE_BACKEND_RETENTION", # Used in Kconfig text
"BOOT_SIGNATURE_KEY_FILE", # MCUboot setting used by sysbuild
"BOOT_SIGNATURE_TYPE_ECDSA_P256", # MCUboot setting used by sysbuild
"BOOT_SIGNATURE_TYPE_ED25519", # MCUboot setting used by sysbuild
"BOOT_SIGNATURE_TYPE_NONE", # MCUboot setting used by sysbuild
"BOOT_SIGNATURE_TYPE_RSA", # MCUboot setting used by sysbuild
"BOOT_VALIDATE_SLOT0", # Used in (sysbuild-based) test
"BOOT_WATCHDOG_FEED", # Used in (sysbuild-based) test
"CDC_ACM_PORT_NAME_",
"CHRE", # Optional module
"CHRE_LOG_LEVEL_DBG", # Optional module
"CLOCK_STM32_SYSCLK_SRC_",
"CMU",
"COMPILER_RT_RTLIB",
"BT_6LOWPAN", # Defined in Linux, mentioned in docs
"CMD_CACHE", # Defined in U-Boot, mentioned in docs
"CRC", # Used in TI CC13x2 / CC26x2 SDK comment
"DEEP_SLEEP", # #defined by RV32M1 in ext/
"DESCRIPTION",
"ERR",
"ESP_DIF_LIBRARY", # Referenced in CMake comment
"EXPERIMENTAL",
"FFT", # Used as an example in cmake/extensions.cmake
"FLAG", # Used as an example
"FOO",
"FOO_LOG_LEVEL",
"FOO_SETTING_1",
"FOO_SETTING_2",
"HEAP_MEM_POOL_ADD_SIZE_", # Used as an option matching prefix
"LSM6DSO_INT_PIN",
"LIBGCC_RTLIB",
"LLVM_USE_LD", # Both LLVM_USE_* are in cmake/toolchain/llvm/Kconfig
"LLVM_USE_LLD", # which are only included if LLVM is selected but
# not other toolchains. Compliance check would complain,
# for example, if you are using GCC.
"MCUBOOT_LOG_LEVEL_WRN", # Used in example adjusting MCUboot
# config,
"MCUBOOT_LOG_LEVEL_INF",
"MCUBOOT_DOWNGRADE_PREVENTION", # but symbols are defined in MCUboot
# itself.
"MCUBOOT_ACTION_HOOKS", # Used in (sysbuild-based) test
"MCUBOOT_CLEANUP_ARM_CORE", # Used in (sysbuild-based) test
"MCUBOOT_SERIAL", # Used in (sysbuild-based) test/
# documentation
"MCUMGR_GRP_EXAMPLE_OTHER_HOOK", # Used in documentation
"MISSING",
"MODULES",
"MYFEATURE",
"MY_DRIVER_0",
"NORMAL_SLEEP", # #defined by RV32M1 in ext/
"NRF_WIFI_FW_BIN", # Directly passed from CMakeLists.txt
"OPT",
"OPT_0",
"PEDO_THS_MIN",
"PSA_H", # This is used in config-psa.h as guard for the header file
"REG1",
"REG2",
"RIMAGE_SIGNING_SCHEMA", # Optional module
"LOG_BACKEND_MOCK_OUTPUT_DEFAULT", #Referenced in tests/subsys/logging/log_syst
"LOG_BACKEND_MOCK_OUTPUT_SYST", #Referenced in testcase.yaml of log_syst test
"SEL",
"SHIFT",
"SINGLE_APPLICATION_SLOT", # Used in sysbuild for MCUboot configuration
"SOC_SERIES_", # Used as regex in scripts/utils/board_v1_to_v2.py
"SOC_WATCH", # Issue 13749
"SOME_BOOL",
"SOME_INT",
"SOME_OTHER_BOOL",
"SOME_STRING",
"SRAM2", # Referenced in a comment in samples/application_development
"STACK_SIZE", # Used as an example in the Kconfig docs
"STD_CPP", # Referenced in CMake comment
"TEST1",
"TOOLCHAIN_ARCMWDT_SUPPORTS_THREAD_LOCAL_STORAGE", # The symbol is defined in the toolchain
# Kconfig which is sourced based on Zephyr
# toolchain variant and therefore not visible
# to compliance.
"TYPE_BOOLEAN",
"USB_CONSOLE",
"USE_STDC_",
"WHATEVER",
"EXTRA_FIRMWARE_DIR", # Linux, in boards/xtensa/intel_adsp_cavs25/doc
"HUGETLBFS", # Linux, in boards/xtensa/intel_adsp_cavs25/doc
"MODVERSIONS", # Linux, in boards/xtensa/intel_adsp_cavs25/doc
"SECURITY_LOADPIN", # Linux, in boards/xtensa/intel_adsp_cavs25/doc
"ZEPHYR_TRY_MASS_ERASE", # MCUBoot setting described in sysbuild
# documentation
"ZTEST_FAIL_TEST_", # regex in tests/ztest/fail/CMakeLists.txt
"SUIT_MPI_GENERATE", # Used by nRF runners to program provisioning data, based on build configuration
"SUIT_MPI_APP_AREA_PATH", # Used by nRF runners to program provisioning data, based on build configuration
"SUIT_MPI_RAD_AREA_PATH", # Used by nRF runners to program provisioning data, based on build configuration
}
class KconfigBasicCheck(KconfigCheck):
"""
Checks if we are introducing any new warnings/errors with Kconfig,
for example using undefined Kconfig variables.
This runs the basic Kconfig test, which is checking only for undefined
references inside the Kconfig tree.
"""
name = "KconfigBasic"
doc = "See path_to_url for more details."
path_hint = "<zephyr-base>"
def run(self):
super().run(full=False)
class KconfigBasicNoModulesCheck(KconfigCheck):
"""
Checks if we are introducing any new warnings/errors with Kconfig when no
modules are available. Catches symbols used in the main repository but
defined only in a module.
"""
name = "KconfigBasicNoModules"
doc = "See path_to_url for more details."
path_hint = "<zephyr-base>"
def run(self):
super().run(full=False, no_modules=True)
class KconfigHWMv2Check(KconfigCheck, ComplianceTest):
"""
This runs the Kconfig test for board and SoC v2 scheme.
This check ensures that all symbols inside the v2 scheme is also defined
within the same tree.
This ensures the board and SoC trees are fully self-contained and reusable.
"""
name = "KconfigHWMv2"
doc = "See path_to_url for more details."
def run(self):
# Use dedicated Kconfig board / soc v2 scheme file.
# This file sources only v2 scheme tree.
kconfig_file = os.path.join(os.path.dirname(__file__), "Kconfig.board.v2")
super().run(full=False, hwm="v2", filename=kconfig_file)
class Nits(ComplianceTest):
"""
Checks various nits in added/modified files. Doesn't check stuff that's
already covered by e.g. checkpatch.pl and pylint.
"""
name = "Nits"
doc = "See path_to_url#coding-style for more details."
path_hint = "<git-top>"
def run(self):
# Loop through added/modified files
for fname in get_files(filter="d"):
if "Kconfig" in fname:
self.check_kconfig_header(fname)
self.check_redundant_zephyr_source(fname)
if fname.startswith("dts/bindings/"):
self.check_redundant_document_separator(fname)
if fname.endswith((".c", ".conf", ".cpp", ".dts", ".overlay",
".h", ".ld", ".py", ".rst", ".txt", ".yaml",
".yml")) or \
"Kconfig" in fname or \
"defconfig" in fname or \
fname == "README":
self.check_source_file(fname)
def check_kconfig_header(self, fname):
# Checks for a spammy copy-pasted header format
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
contents = f.read()
# 'Kconfig - yada yada' has a copy-pasted redundant filename at the
# top. This probably means all of the header was copy-pasted.
if re.match(r"\s*#\s*(K|k)config[\w.-]*\s*-", contents):
self.failure(f"""
Please use this format for the header in '{fname}' (see
path_to_url#header-comments-and-other-nits):
# <Overview of symbols defined in the file, preferably in plain English>
(Blank line)
(Blank line)
(Kconfig definitions)
Skip the "Kconfig - " part of the first line, since it's clear that the comment
is about Kconfig from context. The "# Kconfig - " is what triggers this
failure.
""")
def check_redundant_zephyr_source(self, fname):
# Checks for 'source "$(ZEPHYR_BASE)/Kconfig[.zephyr]"', which can be
# be simplified to 'source "Kconfig[.zephyr]"'
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
# Look for e.g. rsource as well, for completeness
match = re.search(
r'^\s*(?:o|r|or)?source\s*"\$\(?ZEPHYR_BASE\)?/(Kconfig(?:\.zephyr)?)"',
f.read(), re.MULTILINE)
if match:
self.failure("""
Redundant 'source "$(ZEPHYR_BASE)/{0}" in '{1}'. Just do 'source "{0}"'
instead. The $srctree environment variable already points to the Zephyr root,
and all 'source's are relative to it.""".format(match.group(1), fname))
def check_redundant_document_separator(self, fname):
# Looks for redundant '...' document separators in bindings
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
if re.search(r"^\.\.\.", f.read(), re.MULTILINE):
self.failure(f"""\
Redundant '...' document separator in {fname}. Binding YAML files are never
concatenated together, so no document separators are needed.""")
def check_source_file(self, fname):
# Generic nits related to various source files
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
contents = f.read()
if not contents.endswith("\n"):
self.failure(f"Missing newline at end of '{fname}'. Check your text "
f"editor settings.")
if contents.startswith("\n"):
self.failure(f"Please remove blank lines at start of '{fname}'")
if contents.endswith("\n\n"):
self.failure(f"Please remove blank lines at end of '{fname}'")
class GitDiffCheck(ComplianceTest):
"""
Checks for conflict markers or whitespace errors with git diff --check
"""
name = "GitDiffCheck"
doc = "Git conflict markers and whitespace errors are not allowed in added changes"
path_hint = "<git-top>"
def run(self):
offending_lines = []
# Use regex to filter out unnecessay output
# Reason: `--check` is mutually exclusive with `--name-only` and `-s`
p = re.compile(r"\S+\: .*\.")
for shaidx in get_shas(COMMIT_RANGE):
# Ignore non-zero return status code
# Reason: `git diff --check` sets the return code to the number of offending lines
diff = git("diff", f"{shaidx}^!", "--check", ignore_non_zero=True)
lines = p.findall(diff)
lines = map(lambda x: f"{shaidx}: {x}", lines)
offending_lines.extend(lines)
if len(offending_lines) > 0:
self.failure("\n".join(offending_lines))
class GitLint(ComplianceTest):
"""
Runs gitlint on the commits and finds issues with style and syntax
"""
name = "Gitlint"
doc = "See path_to_url#commit-guidelines for more details"
path_hint = "<git-top>"
def run(self):
# By default gitlint looks for .gitlint configuration only in
# the current directory
try:
subprocess.run('gitlint --commits ' + COMMIT_RANGE,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True, cwd=GIT_TOP)
except subprocess.CalledProcessError as ex:
self.failure(ex.output.decode("utf-8"))
class PyLint(ComplianceTest):
"""
Runs pylint on all .py files, with a limited set of checks enabled. The
configuration is in the pylintrc file.
"""
name = "Pylint"
doc = "See path_to_url for more details"
path_hint = "<git-top>"
def run(self):
# Path to pylint configuration file
pylintrc = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pylintrc"))
# Path to additional pylint check scripts
check_script_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../pylint/checkers"))
# List of files added/modified by the commit(s).
files = get_files(filter="d")
# Filter out everything but Python files. Keep filenames
# relative (to GIT_TOP) to stay farther from any command line
# limit.
py_files = filter_py(GIT_TOP, files)
if not py_files:
return
python_environment = os.environ.copy()
if "PYTHONPATH" in python_environment:
python_environment["PYTHONPATH"] = check_script_dir + ":" + \
python_environment["PYTHONPATH"]
else:
python_environment["PYTHONPATH"] = check_script_dir
pylintcmd = ["pylint", "--output-format=json2", "--rcfile=" + pylintrc,
"--load-plugins=argparse-checker"] + py_files
logger.info(cmd2str(pylintcmd))
try:
subprocess.run(pylintcmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=GIT_TOP,
env=python_environment)
except subprocess.CalledProcessError as ex:
output = ex.output.decode("utf-8")
messages = json.loads(output)['messages']
for m in messages:
severity = 'unknown'
if m['messageId'][0] in ('F', 'E'):
severity = 'error'
elif m['messageId'][0] in ('W','C', 'R', 'I'):
severity = 'warning'
self.fmtd_failure(severity, m['messageId'], m['path'],
m['line'], col=str(m['column']), desc=m['message']
+ f" ({m['symbol']})")
if len(messages) == 0:
# If there are no specific messages add the whole output as a failure
self.failure(output)
def filter_py(root, fnames):
# PyLint check helper. Returns all Python script filenames among the
# filenames in 'fnames', relative to directory 'root'.
#
# Uses the python-magic library, so that we can detect Python
# files that don't end in .py as well. python-magic is a frontend
# to libmagic, which is also used by 'file'.
return [fname for fname in fnames
if (fname.endswith(".py") or
magic.from_file(os.path.join(root, fname),
mime=True) == "text/x-python")]
class Identity(ComplianceTest):
"""
Checks if Emails of author and signed-off messages are consistent.
"""
name = "Identity"
doc = "See path_to_url#commit-guidelines for more details"
# git rev-list and git log don't depend on the current (sub)directory
# unless explicited
path_hint = "<git-top>"
def run(self):
for shaidx in get_shas(COMMIT_RANGE):
commit = git("log", "--decorate=short", "-n 1", shaidx)
signed = []
author = ""
sha = ""
parsed_addr = None
for line in commit.split("\n"):
match = re.search(r"^commit\s([^\s]*)", line)
if match:
sha = match.group(1)
match = re.search(r"^Author:\s(.*)", line)
if match:
author = match.group(1)
parsed_addr = parseaddr(author)
match = re.search(r"signed-off-by:\s(.*)", line, re.IGNORECASE)
if match:
signed.append(match.group(1))
error1 = f"{sha}: author email ({author}) needs to match one of " \
f"the signed-off-by entries."
error2 = f"{sha}: author email ({author}) does not follow the " \
f"syntax: First Last <email>."
error3 = f"{sha}: author email ({author}) must be a real email " \
f"and cannot end in @users.noreply.github.com"
failure = None
if author not in signed:
failure = error1
if not parsed_addr or len(parsed_addr[0].split(" ")) < 2:
if not failure:
failure = error2
else:
failure = failure + "\n" + error2
elif parsed_addr[1].endswith("@users.noreply.github.com"):
failure = error3
if failure:
self.failure(failure)
class BinaryFiles(ComplianceTest):
"""
Check that the diff contains no binary files.
"""
name = "BinaryFiles"
doc = "No binary files allowed."
path_hint = "<git-top>"
def run(self):
BINARY_ALLOW_PATHS = ("doc/", "boards/", "samples/")
# svg files are always detected as binary, see .gitattributes
BINARY_ALLOW_EXT = (".jpg", ".jpeg", ".png", ".svg", ".webp")
for stat in git("diff", "--numstat", "--diff-filter=A",
COMMIT_RANGE).splitlines():
added, deleted, fname = stat.split("\t")
if added == "-" and deleted == "-":
if (fname.startswith(BINARY_ALLOW_PATHS) and
fname.endswith(BINARY_ALLOW_EXT)):
continue
self.failure(f"Binary file not allowed: {fname}")
class ImageSize(ComplianceTest):
"""
Check that any added image is limited in size.
"""
name = "ImageSize"
doc = "Check the size of image files."
path_hint = "<git-top>"
def run(self):
SIZE_LIMIT = 250 << 10
BOARD_SIZE_LIMIT = 100 << 10
for file in get_files(filter="d"):
full_path = os.path.join(GIT_TOP, file)
mime_type = magic.from_file(full_path, mime=True)
if not mime_type.startswith("image/"):
continue
size = os.path.getsize(full_path)
limit = SIZE_LIMIT
if file.startswith("boards/"):
limit = BOARD_SIZE_LIMIT
if size > limit:
self.failure(f"Image file too large: {file} reduce size to "
f"less than {limit >> 10}kB")
class MaintainersFormat(ComplianceTest):
"""
Check that MAINTAINERS file parses correctly.
"""
name = "MaintainersFormat"
doc = "Check that MAINTAINERS file parses correctly."
path_hint = "<git-top>"
def run(self):
MAINTAINERS_FILES = ["MAINTAINERS.yml", "MAINTAINERS.yaml"]
for file in MAINTAINERS_FILES:
if not os.path.exists(file):
continue
try:
Maintainers(file)
except MaintainersError as ex:
self.failure(f"Error parsing {file}: {ex}")
class ModulesMaintainers(ComplianceTest):
"""
Check that all modules have a MAINTAINERS entry.
"""
name = "ModulesMaintainers"
doc = "Check that all modules have a MAINTAINERS entry."
path_hint = "<git-top>"
def run(self):
MAINTAINERS_FILES = ["MAINTAINERS.yml", "MAINTAINERS.yaml"]
manifest = Manifest.from_file()
maintainers_file = None
for file in MAINTAINERS_FILES:
if os.path.exists(file):
maintainers_file = file
break
if not maintainers_file:
return
maintainers = Maintainers(maintainers_file)
for project in manifest.get_projects([]):
if not manifest.is_active(project):
continue
if isinstance(project, ManifestProject):
continue
area = f"West project: {project.name}"
if area not in maintainers.areas:
self.failure(f"Missing {maintainers_file} entry for: \"{area}\"")
class YAMLLint(ComplianceTest):
"""
YAMLLint
"""
name = "YAMLLint"
doc = "Check YAML files with YAMLLint."
path_hint = "<git-top>"
def run(self):
config_file = os.path.join(ZEPHYR_BASE, ".yamllint")
for file in get_files(filter="d"):
if Path(file).suffix not in ['.yaml', '.yml']:
continue
yaml_config = config.YamlLintConfig(file=config_file)
if file.startswith(".github/"):
# Tweak few rules for workflow files.
yaml_config.rules["line-length"] = False
yaml_config.rules["truthy"]["allowed-values"].extend(['on', 'off'])
elif file == ".codecov.yml":
yaml_config.rules["truthy"]["allowed-values"].extend(['yes', 'no'])
with open(file, 'r') as fp:
for p in linter.run(fp, yaml_config):
self.fmtd_failure('warning', f'YAMLLint ({p.rule})', file,
p.line, col=p.column, desc=p.desc)
class KeepSorted(ComplianceTest):
"""
Check for blocks of code or config that should be kept sorted.
"""
name = "KeepSorted"
doc = "Check for blocks of code or config that should be kept sorted."
path_hint = "<git-top>"
MARKER = "zephyr-keep-sorted"
def block_is_sorted(self, block_data):
lines = []
for line in textwrap.dedent(block_data).splitlines():
if len(lines) > 0 and line.startswith((" ", "\t")):
# Fold back indented lines
lines[-1] += line.strip()
else:
lines.append(line.strip())
if lines != sorted(lines):
return False
return True
def check_file(self, file, fp):
mime_type = magic.from_file(file, mime=True)
if not mime_type.startswith("text/"):
return
block_data = ""
in_block = False
start_marker = f"{self.MARKER}-start"
stop_marker = f"{self.MARKER}-stop"
start_line = None
stop_line = None
for line_num, line in enumerate(fp.readlines(), start=1):
if start_marker in line:
if in_block:
desc = f"nested {start_marker}"
self.fmtd_failure("error", "KeepSorted", file, line_num,
desc=desc)
in_block = True
block_data = ""
start_line = line_num + 1
elif stop_marker in line:
if not in_block:
desc = f"{stop_marker} without {start_marker}"
self.fmtd_failure("error", "KeepSorted", file, line_num,
desc=desc)
in_block = False
stop_line = line_num - 1
if not self.block_is_sorted(block_data):
desc = (f"sorted block is not sorted, sort by running: " +
f"\"ex -s -c '{start_line},{stop_line} sort i|x' {file}\"")
self.fmtd_failure("error", "KeepSorted", file, line_num,
desc=desc)
elif not line.strip() or line.startswith("#"):
# Ignore comments and blank lines
continue
elif in_block:
block_data += line
if in_block:
self.failure(f"unterminated {start_marker} in {file}")
def run(self):
for file in get_files(filter="d"):
with open(file, "r") as fp:
self.check_file(file, fp)
def init_logs(cli_arg):
# Initializes logging
global logger
level = os.environ.get('LOG_LEVEL', "WARN")
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(levelname)-8s: %(message)s'))
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(cli_arg or level)
logger.info("Log init completed, level=%s",
logging.getLevelName(logger.getEffectiveLevel()))
def inheritors(klass):
subclasses = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return subclasses
def annotate(res):
"""
path_to_url#about-workflow-commands
"""
msg = res.message.replace('%', '%25').replace('\n', '%0A').replace('\r', '%0D')
notice = f'::{res.severity} file={res.file}' + \
(f',line={res.line}' if res.line else '') + \
(f',col={res.col}' if res.col else '') + \
f',title={res.title}::{msg}'
print(notice)
def resolve_path_hint(hint):
if hint == "<zephyr-base>":
return ZEPHYR_BASE
elif hint == "<git-top>":
return GIT_TOP
else:
return hint
def parse_args(argv):
default_range = 'HEAD~1..HEAD'
parser = argparse.ArgumentParser(
description="Check for coding style and documentation warnings.", allow_abbrev=False)
parser.add_argument('-c', '--commits', default=default_range,
help=f'''Commit range in the form: a..[b], default is
{default_range}''')
parser.add_argument('-o', '--output', default="compliance.xml",
help='''Name of outfile in JUnit format,
default is ./compliance.xml''')
parser.add_argument('-n', '--no-case-output', action="store_true",
help="Do not store the individual test case output.")
parser.add_argument('-l', '--list', action="store_true",
help="List all checks and exit")
parser.add_argument("-v", "--loglevel", choices=['DEBUG', 'INFO', 'WARNING',
'ERROR', 'CRITICAL'],
help="python logging level")
parser.add_argument('-m', '--module', action="append", default=[],
help="Checks to run. All checks by default. (case " \
"insensitive)")
parser.add_argument('-e', '--exclude-module', action="append", default=[],
help="Do not run the specified checks (case " \
"insensitive)")
parser.add_argument('-j', '--previous-run', default=None,
help='''Pre-load JUnit results in XML format
from a previous run and combine with new results.''')
parser.add_argument('--annotate', action="store_true",
help="Print GitHub Actions-compatible annotations.")
return parser.parse_args(argv)
def _main(args):
# The "real" main(), which is wrapped to catch exceptions and report them
# to GitHub. Returns the number of test failures.
global ZEPHYR_BASE
ZEPHYR_BASE = os.environ.get('ZEPHYR_BASE')
if not ZEPHYR_BASE:
# Let the user run this script as ./scripts/ci/check_compliance.py without
# making them set ZEPHYR_BASE.
ZEPHYR_BASE = str(Path(__file__).resolve().parents[2])
# Propagate this decision to child processes.
os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE
# The absolute path of the top-level git directory. Initialize it here so
# that issues running Git can be reported to GitHub.
global GIT_TOP
GIT_TOP = git("rev-parse", "--show-toplevel")
# The commit range passed in --commit, e.g. "HEAD~3"
global COMMIT_RANGE
COMMIT_RANGE = args.commits
init_logs(args.loglevel)
logger.info(f'Running tests on commit range {COMMIT_RANGE}')
if args.list:
for testcase in inheritors(ComplianceTest):
print(testcase.name)
return 0
# Load saved test results from an earlier run, if requested
if args.previous_run:
if not os.path.exists(args.previous_run):
# This probably means that an earlier pass had an internal error
# (the script is currently run multiple times by the ci-pipelines
# repo). Since that earlier pass might've posted an error to
# GitHub, avoid generating a GitHub comment here, by avoiding
# sys.exit() (which gets caught in main()).
print(f"error: '{args.previous_run}' not found",
file=sys.stderr)
return 1
logging.info(f"Loading previous results from {args.previous_run}")
for loaded_suite in JUnitXml.fromfile(args.previous_run):
suite = loaded_suite
break
else:
suite = TestSuite("Compliance")
included = list(map(lambda x: x.lower(), args.module))
excluded = list(map(lambda x: x.lower(), args.exclude_module))
for testcase in inheritors(ComplianceTest):
# "Modules" and "testcases" are the same thing. Better flags would have
# been --tests and --exclude-tests or the like, but it's awkward to
# change now.
if included and testcase.name.lower() not in included:
continue
if testcase.name.lower() in excluded:
print("Skipping " + testcase.name)
continue
test = testcase()
try:
print(f"Running {test.name:16} tests in "
f"{resolve_path_hint(test.path_hint)} ...")
test.run()
except EndTest:
pass
# Annotate if required
if args.annotate:
for res in test.fmtd_failures:
annotate(res)
suite.add_testcase(test.case)
if args.output:
xml = JUnitXml()
xml.add_testsuite(suite)
xml.update_statistics()
xml.write(args.output, pretty=True)
failed_cases = []
name2doc = {testcase.name: testcase.doc
for testcase in inheritors(ComplianceTest)}
for case in suite:
if case.result:
if case.is_skipped:
logging.warning(f"Skipped {case.name}")
else:
failed_cases.append(case)
else:
# Some checks like codeowners can produce no .result
logging.info(f"No JUnit result for {case.name}")
n_fails = len(failed_cases)
if n_fails:
print(f"{n_fails} checks failed")
for case in failed_cases:
for res in case.result:
errmsg = res.text.strip()
logging.error(f"Test {case.name} failed: \n{errmsg}")
if args.no_case_output:
continue
with open(f"{case.name}.txt", "w") as f:
docs = name2doc.get(case.name)
f.write(f"{docs}\n")
for res in case.result:
errmsg = res.text.strip()
f.write(f'\n {errmsg}')
if args.output:
print(f"\nComplete results in {args.output}")
return n_fails
def main(argv=None):
args = parse_args(argv)
try:
# pylint: disable=unused-import
from lxml import etree
except ImportError:
print("\nERROR: Python module lxml not installed, unable to proceed")
print("See path_to_url")
return 1
try:
n_fails = _main(args)
except BaseException:
# Catch BaseException instead of Exception to include stuff like
# SystemExit (raised by sys.exit())
print(f"Python exception in `{__file__}`:\n\n"
f"```\n{traceback.format_exc()}\n```")
raise
sys.exit(n_fails)
def cmd2str(cmd):
# Formats the command-line arguments in the iterable 'cmd' into a string,
# for error messages and the like
return " ".join(shlex.quote(word) for word in cmd)
def err(msg):
cmd = sys.argv[0] # Empty if missing
if cmd:
cmd += ": "
sys.exit(f"{cmd} error: {msg}")
if __name__ == "__main__":
main(sys.argv[1:])
``` | /content/code_sandbox/scripts/ci/check_compliance.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 15,803 |
```unknown
# pylint configuration for the PyLint check in check_compliance.py.
#
# To run pylint manually with this configuration from the Zephyr repo, do
#
# pylint3 --rcfile=ci-tools/scripts/pylintrc <Python file>
#
# This command will check all scripts:
#
# pylint3 --rcfile=ci-tools/scripts/pylintrc $(git ls-files '*.py')
[MASTER]
# Use multiple processes
jobs=0
# Do not pickle collected data for comparisons
persistent=no
[REPORTS]
# Only show messages, not full report
reports=no
# Disable score
score=no
[MESSAGES CONTROL]
# Only enable specific (hopefully) uncontroversial warnings. Use
# 'pylint3 --list-msgs' to list messages and their IDs.
#
# These might be nice to check too, but currently trigger false positives:
#
# no-member
# arguments-differ
# redefine-in-handler
# abstract-method
#
# These might be too controversial:
#
# no-else-return
# consider-using-get
# redefined-builtin
#
# These tell you to use logger.warning("foo %d bar", 3) instead of e.g.
# logger.warning("foo {} bar".format(3)), but it's not a clear win in all
# cases. f-strings would be nicer too, and it's easier to convert from format()
# to those.
#
# logging-not-lazy
# logging-format-interpolation
# logging-fstring-interpolation
disable=all
# Identifiers are in the same order as in 'pylint3 --list-msgs'. Entire
# message "types" (~= severities) like F(atal), E(error),... are listed
# first.
enable=
F, # atal
empty-docstring,
unneeded-not,
singleton-comparison,
misplaced-comparison-constant,
unidiomatic-typecheck,
consider-using-enumerate,
consider-iterating-dictionary,
bad-classmethod-argument,
bad-mcs-method-argument,
bad-mcs-classmethod-argument,
single-string-used-for-slots,
trailing-newlines,
trailing-whitespace,
missing-final-newline,
superfluous-parens,
mixed-line-endings,
unexpected-line-ending-format,
invalid-characters-in-docstring,
useless-import-alias,
len-as-condition,
syntax-error,
init-is-generator,
return-in-init,
function-redefined,
not-in-loop,
return-outside-function,
yield-outside-function,
nonexistent-operator,
duplicate-argument-name,
abstract-class-instantiated,
bad-reversed-sequence,
too-many-star-expressions,
invalid-star-assignment-target,
star-needs-assignment-target,
nonlocal-and-global,
continue-in-finally,
nonlocal-without-binding,
misplaced-format-function,
method-hidden,
access-member-before-definition,
no-method-argument,
no-self-argument,
invalid-slots-object,
assigning-non-slot,
invalid-slots,
inherit-non-class,
inconsistent-mro,
duplicate-bases,
non-iterator-returned,
unexpected-special-method-signature,
invalid-length-returned,
relative-beyond-top-level,
used-before-assignment,
undefined-variable,
undefined-all-variable,
invalid-all-object,
no-name-in-module,
unpacking-non-sequence,
bad-except-order,
raising-bad-type,
bad-exception-context,
misplaced-bare-raise,
raising-non-exception,
notimplemented-raised,
catching-non-exception,
bad-super-call,
not-callable,
assignment-from-no-return,
no-value-for-parameter,
too-many-function-args,
unexpected-keyword-arg,
redundant-keyword-arg,
missing-kwoa,
invalid-sequence-index,
invalid-slice-index,
assignment-from-none,
not-context-manager,
invalid-unary-operand-type,
unsupported-binary-operation,
repeated-keyword,
not-an-iterable,
not-a-mapping,
unsupported-membership-test,
unsubscriptable-object,
unsupported-assignment-operation,
unsupported-delete-operation,
invalid-metaclass,
unhashable-dict-key,
logging-unsupported-format,
logging-format-truncated,
logging-too-many-args,
logging-too-few-args,
bad-format-character,
truncated-format-string,
mixed-format-string,
format-needs-mapping,
missing-format-string-key,
too-many-format-args,
too-few-format-args,
bad-string-format-type,
bad-str-strip-call,
invalid-envvar-value,
yield-inside-async-function,
not-async-context-manager,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
literal-comparison,
comparison-with-itself,
no-self-use,
no-classmethod-decorator,
no-staticmethod-decorator,
cyclic-import,
duplicate-code,
consider-merging-isinstance,
simplifiable-if-statement,
redefined-argument-from-local,
trailing-comma-tuple,
stop-iteration-return,
useless-return,
consider-swap-variables,
consider-using-join,
consider-using-in,
chained-comparison,
consider-using-dict-comprehension,
consider-using-set-comprehension,
simplifiable-if-expression,
unreachable,
pointless-statement,
pointless-string-statement,
expression-not-assigned,
unnecessary-pass,
unnecessary-lambda,
duplicate-key,
assign-to-new-keyword,
useless-else-on-loop,
confusing-with-statement,
using-constant-test,
comparison-with-callable,
lost-exception,
assert-on-tuple,
bad-staticmethod-argument,
super-init-not-called,
non-parent-init-called,
useless-super-delegation,
unnecessary-semicolon,
bad-indentation,
mixed-indentation,
deprecated-module,
reimported,
import-self,
misplaced-future,
global-variable-not-assigned,
unused-import,
unused-variable,
undefined-loop-variable,
unbalanced-tuple-unpacking,
possibly-unused-variable,
self-cls-assignment,
bare-except,
duplicate-except,
try-except-raise,
binary-op-exception,
raising-format-tuple,
wrong-exception-operation,
keyword-arg-before-vararg,
bad-format-string-key,
unused-format-string-key,
bad-format-string,
unused-format-string-argument,
format-combined-specification,
missing-format-attribute,
invalid-format-index,
anomalous-backslash-in-string,
anomalous-unicode-escape-in-string,
bad-open-mode,
redundant-unittest-assert,
deprecated-method,
bad-thread-instantiation,
shallow-copy-environ,
invalid-envvar-default,
deprecated-string-function,
deprecated-str-translate-call,
deprecated-itertools-function,
deprecated-types-field,
# Custom Zephyr check scripts
zephyr-arg-parse,
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=10
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=yes
``` | /content/code_sandbox/scripts/ci/pylintrc | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,578 |
```python
#!/usr/bin/env python3
import os
import sh
import argparse
import re
from unidiff import PatchSet
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
RESERVED_NAMES_SCRIPT = "/scripts/coccinelle/reserved_names.cocci"
coccinelle_scripts = [RESERVED_NAMES_SCRIPT,
"/scripts/coccinelle/same_identifier.cocci",
#"/scripts/coccinelle/identifier_length.cocci",
]
coccinelle_reserved_names_exclude_regex = [
r"lib/libc/.*",
r"lib/posix/.*",
r"include/zephyr/posix/.*",
]
def parse_coccinelle(contents: str, violations: dict):
reg = re.compile("([a-zA-Z0-9_/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)")
for line in contents.split("\n"):
r = reg.match(line)
if r:
f = r.group(1)
if f in violations:
violations[f].append(r.group(3))
else:
violations[r.group(1)] = [r.group(3)]
def parse_args():
parser = argparse.ArgumentParser(
description="Check commits against Cocccinelle rules", allow_abbrev=False)
parser.add_argument('-r', "--repository", required=False,
help="Path to repository")
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
parser.add_argument("-o", "--output", required=False,
help="Print violation into a file")
return parser.parse_args()
def main():
args = parse_args()
if not args.commits:
exit("missing commit range")
if args.repository is None:
repository_path = os.environ['ZEPHYR_BASE']
else:
repository_path = args.repository
sh_special_args = {
'_tty_out': False,
'_cwd': repository_path
}
# pylint does not like the 'sh' library
# pylint: disable=too-many-function-args,unexpected-keyword-arg
commit = sh.git("diff", args.commits, **sh_special_args)
patch_set = PatchSet(commit)
zephyr_base = os.getenv("ZEPHYR_BASE")
violations = {}
numViolations = 0
for f in patch_set:
if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path):
continue
for script in coccinelle_scripts:
skip_reserved_names = False
if script == RESERVED_NAMES_SCRIPT:
for path in coccinelle_reserved_names_exclude_regex:
if re.match(path, f.path):
skip_reserved_names = True
break
if skip_reserved_names:
continue
script_path =zephyr_base + "/" + script
print(f"Running {script} on {f.path}")
try:
cocci = sh.coccicheck(
"--mode=report",
"--cocci=" +
script_path,
f.path,
_timeout=10,
**sh_special_args)
parse_coccinelle(cocci, violations)
except sh.TimeoutException:
print("we timed out waiting, skipping...")
for hunk in f:
for line in hunk:
if line.is_added:
violation = "{}:{}".format(f.path, line.target_line_no)
if violation in violations:
numViolations += 1
if args.output:
with open(args.output, "a+") as fp:
fp.write("{}:{}\n".format(
violation, "\t\n".join(
violations[violation])))
else:
print(
"{}:{}".format(
violation, "\t\n".join(
violations[violation])))
return numViolations
if __name__ == "__main__":
ret = main()
exit(ret)
``` | /content/code_sandbox/scripts/ci/guideline_check.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 874 |
```unknown
# Kconfig top-level ci for compliance testing Kconfig tree for boards / SoC v2 scheme.
#
#
mainmenu "Zephyr board / SoC v2 Configuration"
source "boards/Kconfig.v2"
source "soc/Kconfig.v2"
``` | /content/code_sandbox/scripts/ci/Kconfig.board.v2 | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```python
#!/usr/bin/env python3
# Script that operates on a merged PR and sends data to elasticsearch for
# further insepctions using the PR dashboard at
# path_to_url
import sys
import os
from github import Github
import argparse
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from datetime import timedelta
import pprint
date_format = '%Y-%m-%d %H:%M:%S'
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument('--pull-request', help='pull request number', type=int)
parser.add_argument('--range', help='execute based on a date range, for example 2023-01-01..2023-01-05')
parser.add_argument('--repo', help='github repo', default='zephyrproject-rtos/zephyr')
parser.add_argument('--es-index', help='Elasticsearch index')
parser.add_argument('-y','--dry-run', action="store_true", help='dry run, do not upload data')
return parser.parse_args()
def gendata(data, index):
for t in data:
yield {
"_index": index,
"_source": t
}
def process_pr(pr):
reviews = pr.get_reviews()
print(f'#{pr.number}: {pr.title} - {pr.comments} Comments, reviews: {reviews.totalCount}, {len(pr.assignees)} Assignees (Updated {pr.updated_at})')
assignee_reviews = 0
prj = {}
assignees = []
labels = []
for label in pr.labels:
labels.append(label.name)
reviewers = set()
for review in reviews:
# get list of all approved reviews
if review.user and review.state == 'APPROVED':
reviewers.add(review.user.login)
for assignee in pr.assignees:
# list assignees for later checks
assignees.append(assignee.login)
if assignee.login in reviewers:
assignee_reviews += 1
if assignee_reviews > 0 or pr.merged_by.login in assignees:
# in case of assignee reviews or if PR was merged by an assignee
prj['review_rule'] = "yes"
elif not pr.assignees or \
(pr.user.login in assignees and len(assignees) == 1) or \
('Trivial' in labels or 'Hotfix' in labels):
# in case where no assignees set or if submitter is the only assignee
# or in case of trivial or hotfixes
prj['review_rule'] = "na"
else:
# everything else
prj['review_rule'] = "no"
created = pr.created_at
# if a PR was made ready for review from draft, calculate based on when it
# was moved out of draft.
for event in pr.get_issue_events():
if event.event == 'ready_for_review':
created = event.created_at
# calculate time the PR was in review, hours and business days.
delta = pr.closed_at - created
deltah = delta.total_seconds() / 3600
prj['hours_open'] = deltah
dates = (created + timedelta(idx + 1) for idx in range((pr.closed_at - created).days))
# Get number of business days per the guidelines, we need at least 2.
business_days = sum(1 for day in dates if day.weekday() < 5)
prj['business_days_open'] = business_days
trivial = 'Trivial' in labels
hotfix = 'Hotfix' in labels
min_review_time_rule = "no"
if hotfix or (trivial and deltah >= 4) or business_days >= 2:
min_review_time_rule = "yes"
prj['time_rule'] = min_review_time_rule
# This is all data we get easily though the Github API and serves as the basis
# for displaying some trends and metrics.
# Data can be extended in the future if we find more information that
# is useful through the API
prj['nr'] = pr.number
prj['url'] = pr.url
prj['title'] = pr.title
prj['comments'] = pr.comments
prj['reviews'] = reviews.totalCount
prj['assignees'] = assignees
prj['updated'] = pr.updated_at.strftime("%Y-%m-%d %H:%M:%S")
prj['created'] = pr.created_at.strftime("%Y-%m-%d %H:%M:%S")
prj['closed'] = pr.closed_at.strftime("%Y-%m-%d %H:%M:%S")
prj['merged_by'] = pr.merged_by.login
prj['submitted_by'] = pr.user.login
prj['changed_files'] = pr.changed_files
prj['additions'] = pr.additions
prj['deletions'] = pr.deletions
prj['commits'] = pr.commits
# The branch we are targeting. main vs release branches.
prj['base'] = pr.base.ref
# list all reviewers
prj['reviewers'] = list(reviewers)
prj['labels'] = labels
return prj
def main():
args = parse_args()
token = os.environ.get('GITHUB_TOKEN')
if not token:
sys.exit('Github token not set in environment, please set the '
'GITHUB_TOKEN environment variable and retry.')
gh = Github(token)
json_list = []
gh_repo = gh.get_repo(args.repo)
if args.pull_request:
pr = gh_repo.get_pull(args.pull_request)
prj = process_pr(pr)
json_list.append(prj)
elif args.range:
query = f'repo:{args.repo} merged:{args.range} is:pr is:closed sort:updated-desc base:main'
prs = gh.search_issues(query=f'{query}')
for _pr in prs:
pr = gh_repo.get_pull(_pr.number)
prj = process_pr(pr)
json_list.append(prj)
if json_list and not args.dry_run:
# Send data over to elasticsearch.
es = Elasticsearch(
[os.environ['ELASTICSEARCH_SERVER']],
api_key=os.environ['ELASTICSEARCH_KEY'],
verify_certs=False
)
try:
if args.es_index:
index = args.es_index
else:
index = os.environ['PR_STAT_ES_INDEX']
bulk(es, gendata(json_list, index))
except KeyError as e:
print(f"Error: {e} not set.")
print(json_list)
if args.dry_run:
pprint.pprint(json_list)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/ci/stats/merged_prs.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,499 |
```python
#!/usr/bin/env python3
# A script to generate twister options based on modified files.
import re, os
import argparse
import yaml
import fnmatch
import subprocess
import json
import logging
import sys
import glob
from pathlib import Path
from git import Repo
from west.manifest import Manifest
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
if "ZEPHYR_BASE" not in os.environ:
exit("$ZEPHYR_BASE environment variable undefined.")
# These are globaly used variables. They are assigned in __main__ and are visible in further methods
# however, pylint complains that it doesn't recognized them when used (used-before-assignment).
zephyr_base = Path(os.environ['ZEPHYR_BASE'])
repository_path = zephyr_base
repo_to_scan = Repo(zephyr_base)
args = None
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
logging.getLogger("pykwalify.core").setLevel(50)
sys.path.append(os.path.join(zephyr_base, 'scripts'))
import list_boards
def _get_match_fn(globs, regexes):
# Constructs a single regex that tests for matches against the globs in
# 'globs' and the regexes in 'regexes'. Parts are joined with '|' (OR).
# Returns the search() method of the compiled regex.
#
# Returns None if there are neither globs nor regexes, which should be
# interpreted as no match.
if not (globs or regexes):
return None
regex = ""
if globs:
glob_regexes = []
for glob in globs:
# Construct a regex equivalent to the glob
glob_regex = glob.replace(".", "\\.").replace("*", "[^/]*") \
.replace("?", "[^/]")
if not glob.endswith("/"):
# Require a full match for globs that don't end in /
glob_regex += "$"
glob_regexes.append(glob_regex)
# The glob regexes must anchor to the beginning of the path, since we
# return search(). (?:) is a non-capturing group.
regex += "^(?:{})".format("|".join(glob_regexes))
if regexes:
if regex:
regex += "|"
regex += "|".join(regexes)
return re.compile(regex).search
class Tag:
"""
Represents an entry for a tag in tags.yaml.
These attributes are available:
name:
List of GitHub labels for the area. Empty if the area has no 'labels'
key.
description:
Text from 'description' key, or None if the area has no 'description'
key
"""
def _contains(self, path):
# Returns True if the area contains 'path', and False otherwise
return self._match_fn and self._match_fn(path) and not \
(self._exclude_match_fn and self._exclude_match_fn(path))
def __repr__(self):
return "<Tag {}>".format(self.name)
class Filters:
def __init__(self, modified_files, ignore_path, alt_tags, testsuite_root,
pull_request=False, platforms=[], detailed_test_id=True, quarantine_list=None, tc_roots_th=20):
self.modified_files = modified_files
self.testsuite_root = testsuite_root
self.resolved_files = []
self.twister_options = []
self.full_twister = False
self.all_tests = []
self.tag_options = []
self.pull_request = pull_request
self.platforms = platforms
self.detailed_test_id = detailed_test_id
self.ignore_path = ignore_path
self.tag_cfg_file = alt_tags
self.quarantine_list = quarantine_list
self.tc_roots_th = tc_roots_th
def process(self):
self.find_modules()
self.find_tags()
self.find_tests()
if not self.platforms:
self.find_archs()
self.find_boards()
self.find_excludes()
def get_plan(self, options, integration=False, use_testsuite_root=True):
fname = "_test_plan_partial.json"
cmd = [f"{zephyr_base}/scripts/twister", "-c"] + options + ["--save-tests", fname ]
if not self.detailed_test_id:
cmd += ["--no-detailed-test-id"]
if self.testsuite_root and use_testsuite_root:
for root in self.testsuite_root:
cmd+=["-T", root]
if integration:
cmd.append("--integration")
if self.quarantine_list:
for q in self.quarantine_list:
cmd += ["--quarantine-list", q]
logging.info(" ".join(cmd))
_ = subprocess.call(cmd)
with open(fname, newline='') as jsonfile:
json_data = json.load(jsonfile)
suites = json_data.get("testsuites", [])
self.all_tests.extend(suites)
if os.path.exists(fname):
os.remove(fname)
def find_modules(self):
if 'west.yml' in self.modified_files and args.commits is not None:
print(f"Manifest file 'west.yml' changed")
print("=========")
old_manifest_content = repo_to_scan.git.show(f"{args.commits[:-2]}:west.yml")
with open("west_old.yml", "w") as manifest:
manifest.write(old_manifest_content)
old_manifest = Manifest.from_file("west_old.yml")
new_manifest = Manifest.from_file("west.yml")
old_projs = set((p.name, p.revision) for p in old_manifest.projects)
new_projs = set((p.name, p.revision) for p in new_manifest.projects)
logging.debug(f'old_projs: {old_projs}')
logging.debug(f'new_projs: {new_projs}')
# Removed projects
rprojs = set(filter(lambda p: p[0] not in list(p[0] for p in new_projs),
old_projs - new_projs))
# Updated projects
uprojs = set(filter(lambda p: p[0] in list(p[0] for p in old_projs),
new_projs - old_projs))
# Added projects
aprojs = new_projs - old_projs - uprojs
# All projs
projs = rprojs | uprojs | aprojs
projs_names = [name for name, rev in projs]
logging.info(f'rprojs: {rprojs}')
logging.info(f'uprojs: {uprojs}')
logging.info(f'aprojs: {aprojs}')
logging.info(f'project: {projs_names}')
_options = []
for p in projs_names:
_options.extend(["-t", p ])
if self.platforms:
for platform in self.platforms:
_options.extend(["-p", platform])
self.get_plan(_options, True)
def find_archs(self):
# we match both arch/<arch>/* and include/zephyr/arch/<arch> and skip common.
archs = set()
for f in self.modified_files:
p = re.match(r"^arch\/([^/]+)\/", f)
if not p:
p = re.match(r"^include\/zephyr\/arch\/([^/]+)\/", f)
if p:
if p.group(1) != 'common':
archs.add(p.group(1))
# Modified file is treated as resolved, since a matching scope was found
self.resolved_files.append(f)
_options = []
for arch in archs:
_options.extend(["-a", arch ])
if _options:
logging.info(f'Potential architecture filters...')
if self.platforms:
for platform in self.platforms:
_options.extend(["-p", platform])
self.get_plan(_options, True)
else:
self.get_plan(_options, True)
def find_boards(self):
changed_boards = set()
matched_boards = {}
resolved_files = []
for file in self.modified_files:
if file.endswith(".rst") or file.endswith(".png") or file.endswith(".jpg"):
continue
if file.startswith("boards/"):
changed_boards.add(file)
resolved_files.append(file)
roots = [zephyr_base]
if repository_path != zephyr_base:
roots.append(repository_path)
# Look for boards in monitored repositories
lb_args = argparse.Namespace(**{'arch_roots': roots, 'board_roots': roots, 'board': None, 'soc_roots':roots,
'board_dir': None})
known_boards = list_boards.find_v2_boards(lb_args)
for changed in changed_boards:
for board in known_boards:
c = (zephyr_base / changed).resolve()
if c.is_relative_to(board.dir.resolve()):
for file in glob.glob(os.path.join(board.dir, f"{board.name}*.yaml")):
with open(file, 'r') as f:
b = yaml.load(f.read(), Loader=SafeLoader)
matched_boards[b['identifier']] = board
logging.info(f"found boards: {','.join(matched_boards.keys())}")
# If modified file is caught by "find_boards" workflow (change in "boards" dir AND board recognized)
# it means a proper testing scope for this file was found and this file can be removed
# from further consideration
for _, board in matched_boards.items():
self.resolved_files.extend(list(filter(lambda f: str(board.dir.relative_to(zephyr_base)) in f, resolved_files)))
_options = []
if len(matched_boards) > 20:
logging.warning(f"{len(matched_boards)} boards changed, this looks like a global change, skipping test handling, revert to default.")
self.full_twister = True
return
for board in matched_boards:
_options.extend(["-p", board ])
if _options:
logging.info(f'Potential board filters...')
self.get_plan(_options)
def find_tests(self):
tests = set()
for f in self.modified_files:
if f.endswith(".rst"):
continue
d = os.path.dirname(f)
scope_found = False
while not scope_found and d:
head, tail = os.path.split(d)
if os.path.exists(os.path.join(d, "testcase.yaml")) or \
os.path.exists(os.path.join(d, "sample.yaml")):
tests.add(d)
# Modified file is treated as resolved, since a matching scope was found
self.resolved_files.append(f)
scope_found = True
elif tail == "common":
# Look for yamls in directories collocated with common
yamls_found = [yaml for yaml in glob.iglob(head + '/**/testcase.yaml', recursive=True)]
yamls_found.extend([yaml for yaml in glob.iglob(head + '/**/sample.yaml', recursive=True)])
if yamls_found:
for yaml in yamls_found:
tests.add(os.path.dirname(yaml))
self.resolved_files.append(f)
scope_found = True
else:
d = os.path.dirname(d)
else:
d = os.path.dirname(d)
_options = []
for t in tests:
_options.extend(["-T", t ])
if len(tests) > self.tc_roots_th:
logging.warning(f"{len(tests)} tests changed, this looks like a global change, skipping test handling, revert to default")
self.full_twister = True
return
if _options:
logging.info(f'Potential test filters...({len(tests)} changed...)')
if self.platforms:
for platform in self.platforms:
_options.extend(["-p", platform])
self.get_plan(_options, use_testsuite_root=False)
def find_tags(self):
with open(self.tag_cfg_file, 'r') as ymlfile:
tags_config = yaml.safe_load(ymlfile)
tags = {}
for t,x in tags_config.items():
tag = Tag()
tag.exclude = True
tag.name = t
# tag._match_fn(path) tests if the path matches files and/or
# files-regex
tag._match_fn = _get_match_fn(x.get("files"), x.get("files-regex"))
# Like tag._match_fn(path), but for files-exclude and
# files-regex-exclude
tag._exclude_match_fn = \
_get_match_fn(x.get("files-exclude"), x.get("files-regex-exclude"))
tags[tag.name] = tag
for f in self.modified_files:
for t in tags.values():
if t._contains(f):
t.exclude = False
exclude_tags = set()
for t in tags.values():
if t.exclude:
exclude_tags.add(t.name)
for tag in exclude_tags:
self.tag_options.extend(["-e", tag ])
if exclude_tags:
logging.info(f'Potential tag based filters: {exclude_tags}')
def find_excludes(self, skip=[]):
with open(self.ignore_path, "r") as twister_ignore:
ignores = twister_ignore.read().splitlines()
ignores = filter(lambda x: not x.startswith("#"), ignores)
found = set()
files_not_resolved = list(filter(lambda x: x not in self.resolved_files, self.modified_files))
for pattern in ignores:
if pattern:
found.update(fnmatch.filter(files_not_resolved, pattern))
logging.debug(found)
logging.debug(files_not_resolved)
# Full twister run can be ordered by detecting great number of tests/boards changed
# or if not all modified files were resolved (corresponding scope found)
self.full_twister = self.full_twister or sorted(files_not_resolved) != sorted(found)
if self.full_twister:
_options = []
logging.info(f'Need to run full or partial twister...')
if self.platforms:
for platform in self.platforms:
_options.extend(["-p", platform])
_options.extend(self.tag_options)
self.get_plan(_options)
else:
_options.extend(self.tag_options)
self.get_plan(_options, True)
else:
logging.info(f'No twister needed or partial twister run only...')
def parse_args():
parser = argparse.ArgumentParser(
description="Generate twister argument files based on modified file",
allow_abbrev=False)
parser.add_argument('-c', '--commits', default=None,
help="Commit range in the form: a..b")
parser.add_argument('-m', '--modified-files', default=None,
help="File with information about changed/deleted/added files.")
parser.add_argument('-o', '--output-file', default="testplan.json",
help="JSON file with the test plan to be passed to twister")
parser.add_argument('-P', '--pull-request', action="store_true",
help="This is a pull request")
parser.add_argument('-p', '--platform', action="append",
help="Limit this for a platform or a list of platforms.")
parser.add_argument('-t', '--tests_per_builder', default=700, type=int,
help="Number of tests per builder")
parser.add_argument('-n', '--default-matrix', default=10, type=int,
help="Number of tests per builder")
parser.add_argument('--testcase-roots-threshold', default=20, type=int,
help="Threshold value for number of modified testcase roots, up to which an optimized scope is still applied."
"When exceeded, full scope will be triggered")
parser.add_argument('--detailed-test-id', action='store_true',
help="Include paths to tests' locations in tests' names.")
parser.add_argument("--no-detailed-test-id", dest='detailed_test_id', action="store_false",
help="Don't put paths into tests' names.")
parser.add_argument('-r', '--repo-to-scan', default=None,
help="Repo to scan")
parser.add_argument('--ignore-path',
default=os.path.join(zephyr_base, 'scripts', 'ci', 'twister_ignore.txt'),
help="Path to a text file with patterns of files to be matched against changed files")
parser.add_argument('--alt-tags',
default=os.path.join(zephyr_base, 'scripts', 'ci', 'tags.yaml'),
help="Path to a file describing relations between directories and tags")
parser.add_argument(
"-T", "--testsuite-root", action="append", default=[],
help="Base directory to recursively search for test cases. All "
"testcase.yaml files under here will be processed. May be "
"called multiple times. Defaults to the 'samples/' and "
"'tests/' directories at the base of the Zephyr tree.")
parser.add_argument(
"--quarantine-list", action="append", metavar="FILENAME",
help="Load list of test scenarios under quarantine. The entries in "
"the file need to correspond to the test scenarios names as in "
"corresponding tests .yaml files. These scenarios "
"will be skipped with quarantine as the reason.")
# Include paths in names by default.
parser.set_defaults(detailed_test_id=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
files = []
errors = 0
if args.repo_to_scan:
repository_path = Path(args.repo_to_scan)
repo_to_scan = Repo(repository_path)
if args.commits:
commit = repo_to_scan.git.diff("--name-only", args.commits)
files = commit.split("\n")
elif args.modified_files:
with open(args.modified_files, "r") as fp:
files = json.load(fp)
if files:
print("Changed files:\n=========")
print("\n".join(files))
print("=========")
f = Filters(files, args.ignore_path, args.alt_tags, args.testsuite_root,
args.pull_request, args.platform, args.detailed_test_id, args.quarantine_list,
args.testcase_roots_threshold)
f.process()
# remove dupes and filtered cases
dup_free = []
dup_free_set = set()
logging.info(f'Total tests gathered: {len(f.all_tests)}')
for ts in f.all_tests:
if ts.get('status') == 'filtered':
continue
n = ts.get("name")
a = ts.get("arch")
p = ts.get("platform")
if ts.get('status') == 'error':
logging.info(f"Error found: {n} on {p} ({ts.get('reason')})")
errors += 1
if (n, a, p,) not in dup_free_set:
dup_free.append(ts)
dup_free_set.add((n, a, p,))
logging.info(f'Total tests to be run: {len(dup_free)}')
with open(".testplan", "w") as tp:
total_tests = len(dup_free)
if total_tests and total_tests < args.tests_per_builder:
nodes = 1
else:
nodes = round(total_tests / args.tests_per_builder)
tp.write(f"TWISTER_TESTS={total_tests}\n")
tp.write(f"TWISTER_NODES={nodes}\n")
tp.write(f"TWISTER_FULL={f.full_twister}\n")
logging.info(f'Total nodes to launch: {nodes}')
header = ['test', 'arch', 'platform', 'status', 'extra_args', 'handler',
'handler_time', 'used_ram', 'used_rom']
# write plan
if dup_free:
data = {}
data['testsuites'] = dup_free
with open(args.output_file, 'w', newline='') as json_file:
json.dump(data, json_file, indent=4, separators=(',',':'))
sys.exit(errors)
``` | /content/code_sandbox/scripts/ci/test_plan.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,323 |
```python
#!/usr/bin/env python3
'''
This script uses edtlib and the devicetree data in the build directory
to generate a CMake file which contains devicetree data.
That data can then be used in the rest of the build system.
The generated CMake file looks like this:
add_custom_target(devicetree_target)
set_target_properties(devicetree_target PROPERTIES
"DT_PROP|/soc|compatible" "vnd,soc;")
...
It defines a special CMake target, and saves various values in the
devicetree as CMake target properties.
Be careful:
"Property" here can refer to a CMake target property or a
DTS property. DTS property values are stored inside
CMake target properties, along with other devicetree data.
The build system includes this generated file early on, so
devicetree values can be used at CMake processing time.
Access is not done directly, but with Zephyr CMake extension APIs,
like this:
# sets 'compat' to "vnd,soc" in CMake
dt_prop(compat PATH "/soc" PROPERTY compatible INDEX 0)
This is analogous to how DTS values are encoded as C macros,
which can be read in source code using C APIs like
DT_PROP(node_id, foo) from devicetree.h.
'''
import argparse
import os
import pickle
import sys
from collections import defaultdict
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'python-devicetree',
'src'))
def parse_args():
# Returns parsed command-line arguments
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--cmake-out", required=True,
help="path to write the CMake property file")
parser.add_argument("--edt-pickle", required=True,
help="path to read the pickled edtlib.EDT object from")
return parser.parse_args()
def main():
args = parse_args()
with open(args.edt_pickle, 'rb') as f:
edt = pickle.load(f)
# In what looks like an undocumented implementation detail, CMake
# target properties are stored in a C++ standard library map whose
# keys and values are each arbitrary strings, so we can use
# whatever we want as target property names.
#
# We therefore use '|' as a field separator character below within
# because it's not a valid character in DTS node paths or property
# names. This lets us store the "real" paths and property names
# without conversion to lowercase-and-underscores like we have to
# do in C.
#
# If CMake adds restrictions on target property names later, we
# can just tweak the generated file to use a more restrictive
# property encoding, perhaps reusing the same style documented in
# macros.bnf for C macros.
cmake_props = []
chosen_nodes = edt.chosen_nodes
for node in chosen_nodes:
path = chosen_nodes[node].path
cmake_props.append(f'"DT_CHOSEN|{node}" "{path}"')
# The separate loop over edt.nodes here is meant to keep
# all of the alias-related properties in one place.
for node in edt.nodes:
path = node.path
for alias in node.aliases:
cmake_props.append(f'"DT_ALIAS|{alias}" "{path}"')
compatible2paths = defaultdict(list)
for node in edt.nodes:
cmake_props.append(f'"DT_NODE|{node.path}" TRUE')
for label in node.labels:
cmake_props.append(f'"DT_NODELABEL|{label}" "{node.path}"')
for item in node.props:
# We currently do not support phandles for edt -> cmake conversion.
if "phandle" not in node.props[item].type:
if "array" in node.props[item].type:
# Convert array to CMake list
cmake_value = ''
for val in node.props[item].val:
cmake_value = f'{cmake_value}{val};'
else:
cmake_value = node.props[item].val
# Encode node's property 'item' as a CMake target property
# with a name like 'DT_PROP|<path>|<property>'.
cmake_prop = f'DT_PROP|{node.path}|{item}'
cmake_props.append(f'"{cmake_prop}" "{cmake_value}"')
if item == 'compatible':
# compatibles is always an array
for comp in node.props[item].val:
compatible2paths[comp].append(node.path)
if node.regs is not None:
cmake_props.append(f'"DT_REG|{node.path}|NUM" "{len(node.regs)}"')
cmake_addr = ''
cmake_size = ''
for reg in node.regs:
if reg.addr is None:
cmake_addr = f'{cmake_addr}NONE;'
else:
cmake_addr = f'{cmake_addr}{hex(reg.addr)};'
if reg.size is None:
cmake_size = f'{cmake_size}NONE;'
else:
cmake_size = f'{cmake_size}{hex(reg.size)};'
cmake_props.append(f'"DT_REG|{node.path}|ADDR" "{cmake_addr}"')
cmake_props.append(f'"DT_REG|{node.path}|SIZE" "{cmake_size}"')
for comp in compatible2paths.keys():
cmake_path = ''
for path in compatible2paths[comp]:
cmake_path = f'{cmake_path}{path};'
# Remove the last ';'
cmake_path = cmake_path[:-1]
cmake_comp = f'DT_COMP|{comp}'
cmake_props.append(f'"{cmake_comp}" "{cmake_path}"')
with open(args.cmake_out, "w", encoding="utf-8") as cmake_file:
print('add_custom_target(devicetree_target)', file=cmake_file)
print(file=cmake_file)
for prop in cmake_props:
print(
f'set_target_properties(devicetree_target PROPERTIES {prop})',
file=cmake_file
)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/dts/gen_dts_cmake.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,336 |
```python
#!/usr/bin/env python3
#
#
import argparse
import os
import sys
import re
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'python-devicetree',
'src'))
def binding_paths(bindings_dirs):
# Returns a list with the paths to all bindings (.yaml files) in
# 'bindings_dirs'
binding_paths = []
for bindings_dir in bindings_dirs:
for root, _, filenames in os.walk(bindings_dir):
for filename in filenames:
if filename.endswith(".yaml") or filename.endswith(".yml"):
binding_paths.append(os.path.join(root, filename))
return binding_paths
def parse_args():
# Returns parsed command-line arguments
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--kconfig-out", required=True,
help="path to write the Kconfig file")
parser.add_argument("--bindings-dirs", nargs='+', required=True,
help="directory with bindings in YAML format, "
"we allow multiple")
return parser.parse_args()
def printfile(s):
print(s, file=kconfig_file)
def str2ident(s):
# Converts 's' to a form suitable for (part of) an identifier
return re.sub('[-,.@/+]', '_', s.upper())
def compat2kconfig(compat):
compat_ident = str2ident(compat)
printfile(f'')
printfile(f'DT_COMPAT_{compat_ident} := {compat}')
printfile(f'')
printfile(f'config DT_HAS_{compat_ident}_ENABLED')
printfile(f'\tdef_bool $(dt_compat_enabled,$(DT_COMPAT_{compat_ident}))')
def main():
global kconfig_file
args = parse_args()
compat_list = []
for binding_path in binding_paths(args.bindings_dirs):
with open(binding_path, encoding="utf-8") as f:
contents = f.read()
try:
# Parsed PyYAML output (Python lists/dictionaries/strings/etc.,
# representing the file)
raw = yaml.load(contents, Loader=SafeLoader)
except yaml.YAMLError as e:
print(f"WARNING: '{binding_path}' appears in binding "
f"directories but isn't valid YAML: {e}")
continue
if raw is None or 'compatible' not in raw:
continue
compat_list.append(raw['compatible'])
# Remove any duplicates and sort the list
compat_list = sorted(set(compat_list))
with open(args.kconfig_out, "w", encoding="utf-8") as kconfig_file:
printfile(f'# Generated devicetree Kconfig')
printfile(f'#')
for c in compat_list:
compat2kconfig(c)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/dts/gen_driver_kconfig_dts.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 646 |
```python
import yaml
import ijson
import json
import re
import argparse
import xlsxwriter
class Json_report:
json_object = {
"components":[]
}
simulators = [
'unit_testing',
'native',
'qemu',
'mps2/an385'
]
report_json = {}
def __init__(self):
args = parse_args()
self.parse_testplan(args.testplan)
self.maintainers_file = self.get_maintainers_file( args.maintainers)
self.report_json = self.generate_json_report( args.coverage)
if args.format == "json":
self.save_json_report( args.output, self.report_json)
elif args.format == "xlsx":
self.generate_xlsx_report(self.report_json, args.output)
elif args.format == "all":
self.save_json_report( args.output, self.report_json)
self.generate_xlsx_report(self.report_json, args.output)
else:
print("Format incorrect")
def get_maintainers_file(self, maintainers):
maintainers_file = ""
with open(maintainers, 'r') as file:
maintainers_file = yaml.safe_load(file)
file.close()
return maintainers_file
def parse_testplan(self, testplan_path):
with open(testplan_path, 'r') as file:
parser = ijson.items(file, 'testsuites')
for element in parser:
for testsuite in element:
for testcase in testsuite['testcases']:
if testcase['status'] is None:
testcase_name = testcase['identifier']
component_name = testcase_name[:testcase_name.find('.')]
component = {
"name": component_name,
"sub_components":[],
"files":[]
}
features = self.json_object['components']
known_component_flag = False
for item in features:
if component_name == item['name']:
component = item
known_component_flag = True
break
sub_component_name = testcase_name[testcase_name.find('.'):]
sub_component_name = sub_component_name[1:]
if sub_component_name.find(".") > 0:
sub_component_name = sub_component_name[:sub_component_name.find(".")]
if known_component_flag is False:
sub_component = {
"name":sub_component_name,
"test_suites":[]
}
test_suite = {
"name":testsuite['name'],
"path":testsuite['path'],
"platforms":[],
"runnable": testsuite['runnable'],
"status":"",
"test_cases":[]
}
test_case = {
"name":testcase_name
}
if any(platform in testsuite['platform'] for platform in self.simulators):
if test_suite['status'] == "":
test_suite['status'] = 'sim_only'
if test_suite['status'] == 'hw_only':
test_suite['status'] = 'mixed'
else:
if test_suite['status'] == "":
test_suite['status'] = 'hw_only'
if test_suite['status'] == 'sim_only':
test_suite['status'] = 'mixed'
test_suite['test_cases'].append(test_case)
test_suite['platforms'].append(testsuite['platform'])
sub_component["test_suites"].append(test_suite)
component['sub_components'].append(sub_component)
self.json_object['components'].append(component)
else:
sub_component = {}
sub_components = component['sub_components']
known_sub_component_flag = False
for i_sub_component in sub_components:
if sub_component_name == i_sub_component['name']:
sub_component = i_sub_component
known_sub_component_flag = True
break
if known_sub_component_flag is False:
sub_component = {
"name":sub_component_name,
"test_suites":[]
}
test_suite = {
"name":testsuite['name'],
"path":testsuite['path'],
"platforms":[],
"runnable": testsuite['runnable'],
"status":"",
"test_cases":[]
}
test_case = {
"name": testcase_name
}
if any(platform in testsuite['platform'] for platform in self.simulators):
if test_suite['status'] == "":
test_suite['status'] = 'sim_only'
if test_suite['status'] == 'hw_only':
test_suite['status'] = 'mixed'
else:
if test_suite['status'] == "":
test_suite['status'] = 'hw_only'
if test_suite['status'] == 'sim_only':
test_suite['status'] = 'mixed'
test_suite['test_cases'].append(test_case)
test_suite['platforms'].append(testsuite['platform'])
sub_component["test_suites"].append(test_suite)
component['sub_components'].append(sub_component)
else:
test_suite = {}
test_suites = sub_component['test_suites']
known_testsuite_flag = False
for i_testsuite in test_suites:
if testsuite['name'] == i_testsuite['name']:
test_suite = i_testsuite
known_testsuite_flag = True
break
if known_testsuite_flag is False:
test_suite = {
"name":testsuite['name'],
"path":testsuite['path'],
"platforms":[],
"runnable": testsuite['runnable'],
"status":"",
"test_cases":[]
}
test_case = {
"name": testcase_name
}
if any(platform in testsuite['platform'] for platform in self.simulators):
if test_suite['status'] == "":
test_suite['status'] = 'sim_only'
if test_suite['status'] == 'hw_only':
test_suite['status'] = 'mixed'
else:
if test_suite['status'] == "":
test_suite['status'] = 'hw_only'
if test_suite['status'] == 'sim_only':
test_suite['status'] = 'mixed'
test_suite['test_cases'].append(test_case)
test_suite['platforms'].append(testsuite['platform'])
sub_component["test_suites"].append(test_suite)
else:
if any(platform in testsuite['platform'] for platform in self.simulators):
if test_suite['status'] == "":
test_suite['status'] = 'sim_only'
if test_suite['status'] == 'hw_only':
test_suite['status'] = 'mixed'
else:
if test_suite['status'] == "":
test_suite['status'] = 'hw_only'
if test_suite['status'] == 'sim_only':
test_suite['status'] = 'mixed'
test_case = {}
test_cases = test_suite['test_cases']
known_testcase_flag = False
for i_testcase in test_cases:
if testcase_name == i_testcase['name']:
test_case = i_testcase
known_testcase_flag = True
break
if known_testcase_flag is False:
test_case = {
"name":testcase_name
}
test_suite['test_cases'].append(test_case)
file.close()
def get_files_from_maintainers_file(self, component_name):
files_path = []
for item in self.maintainers_file:
_found_flag = False
try:
tests = self.maintainers_file[item].get('tests', [])
for i_test in tests:
if component_name in i_test:
_found_flag = True
if _found_flag is True:
for path in self.maintainers_file[item]['files']:
path = path.replace('*','.*')
files_path.append(path)
except TypeError:
print("ERROR: Fail while parsing MAINTAINERS file at %s", component_name)
return files_path
def generate_json_report(self, coverage):
output_json = {
"components":[]
}
with open(coverage, 'r') as file:
parser = ijson.items(file, 'files')
for element in parser:
for i_json_component in self.json_object['components']:
json_component = {}
json_component["name"]=i_json_component["name"]
json_component["sub_components"] = i_json_component["sub_components"]
json_component["Comment"] = ""
files_path = []
files_path = self.get_files_from_maintainers_file(i_json_component["name"])
json_files = []
if len(files_path) != 0:
for i_file in files_path:
for i_covered_file in element:
x = re.search(('.*'+i_file+'.*'), i_covered_file['file'])
if x:
file_name = i_covered_file['file'][i_covered_file['file'].rfind('/')+1:]
file_path = i_covered_file['file']
file_coverage, file_lines, file_hit = self._calculate_coverage_of_file(i_covered_file)
json_file = {
"Name":file_name,
"Path":file_path,
"Lines": file_lines,
"Hit":file_hit,
"Coverage": file_coverage,
"Covered_Functions": [],
"Uncovered_Functions": []
}
for i_fun in i_covered_file['functions']:
if i_fun['execution_count'] != 0:
json_covered_funciton ={
"Name":i_fun['name']
}
json_file['Covered_Functions'].append(json_covered_funciton)
for i_fun in i_covered_file['functions']:
if i_fun['execution_count'] == 0:
json_uncovered_funciton ={
"Name":i_fun['name']
}
json_file['Uncovered_Functions'].append(json_uncovered_funciton)
comp_exists = [x for x in json_files if x['Path'] == json_file['Path']]
if not comp_exists:
json_files.append(json_file)
json_component['files']=json_files
output_json['components'].append(json_component)
else:
json_component["files"] = []
json_component["Comment"] = "Missed in maintainers.yml file."
output_json['components'].append(json_component)
return output_json
def _calculate_coverage_of_file(self, file):
tracked_lines = len(file['lines'])
covered_lines = 0
for line in file['lines']:
if line['count'] != 0:
covered_lines += 1
return ((covered_lines/tracked_lines)*100), tracked_lines, covered_lines
def save_json_report(self, output_path, json_object):
json_object = json.dumps(json_object, indent=4)
with open(output_path+'.json', "w") as outfile:
outfile.write(json_object)
def _find_char(self, path, str, n):
sep = path.split(str, n)
if len(sep) <= n:
return -1
return len(path) - len(sep[-1]) - len(str)
def _component_calculate_stats(self, json_component):
testsuites_count = 0
runnable_count = 0
build_only_count = 0
sim_only_count = 0
hw_only_count = 0
mixed_count = 0
for i_sub_component in json_component['sub_components']:
for i_testsuit in i_sub_component['test_suites']:
testsuites_count += 1
if i_testsuit['runnable'] is True:
runnable_count += 1
else:
build_only_count += 1
if i_testsuit['status'] == "hw_only":
hw_only_count += 1
elif i_testsuit['status'] == "sim_only":
sim_only_count += 1
else:
mixed_count += 1
return testsuites_count, runnable_count, build_only_count, sim_only_count, hw_only_count, mixed_count
def _xlsx_generate_summary_page(self, workbook, json_report):
# formats
header_format = workbook.add_format(
{
"bold": True,
"fg_color": "#538DD5",
"color":"white"
}
)
cell_format = workbook.add_format(
{
'valign': 'vcenter'
}
)
#generate summary page
worksheet = workbook.add_worksheet('Summary')
row = 0
col = 0
worksheet.write(row,col,"Components",header_format)
worksheet.write(row,col+1,"TestSuites",header_format)
worksheet.write(row,col+2,"Runnable",header_format)
worksheet.write(row,col+3,"Build only",header_format)
worksheet.write(row,col+4,"Simulators only",header_format)
worksheet.write(row,col+5,"Hardware only",header_format)
worksheet.write(row,col+6,"Mixed",header_format)
worksheet.write(row,col+7,"Coverage [%]",header_format)
worksheet.write(row,col+8,"Total Functions",header_format)
worksheet.write(row,col+9,"Uncovered Functions",header_format)
worksheet.write(row,col+10,"Comment",header_format)
row = 1
col = 0
for item in json_report['components']:
worksheet.write(row, col, item['name'],cell_format)
testsuites,runnable,build_only,sim_only,hw_only, mixed= self._component_calculate_stats(item)
worksheet.write(row,col+1,testsuites,cell_format)
worksheet.write(row,col+2,runnable,cell_format)
worksheet.write(row,col+3,build_only,cell_format)
worksheet.write(row,col+4,sim_only,cell_format)
worksheet.write(row,col+5,hw_only,cell_format)
worksheet.write(row,col+6,mixed,cell_format)
lines = 0
hit = 0
coverage = 0.0
total_funs = 0
uncovered_funs = 0
for i_file in item['files']:
lines += i_file['Lines']
hit += i_file['Hit']
total_funs += (len(i_file['Covered_Functions'])+len(i_file['Uncovered_Functions']))
uncovered_funs += len(i_file['Uncovered_Functions'])
if lines != 0:
coverage = (hit/lines)*100
worksheet.write_number(row,col+7,coverage,workbook.add_format({'num_format':'#,##0.00'}))
worksheet.write_number(row,col+8,total_funs)
worksheet.write_number(row,col+9,uncovered_funs)
worksheet.write(row,col+10,item["Comment"],cell_format)
row += 1
col = 0
worksheet.conditional_format(1,col+7,row,col+7, {'type': 'data_bar',
'min_value': 0,
'max_value': 100,
'bar_color': '#3fd927',
'bar_solid': True,
})
worksheet.autofit()
worksheet.set_default_row(15)
def generate_xlsx_report(self, json_report, output):
self.report_book = xlsxwriter.Workbook(output+".xlsx")
header_format = self.report_book.add_format(
{
"bold": True,
"fg_color": "#538DD5",
"color":"white"
}
)
# Create a format to use in the merged range.
merge_format = self.report_book.add_format(
{
"bold": 1,
"align": "center",
"valign": "vcenter",
"fg_color": "#538DD5",
"color":"white"
}
)
cell_format = self.report_book.add_format(
{
'valign': 'vcenter'
}
)
self._xlsx_generate_summary_page(self.report_book, self.report_json)
row = 0
col = 0
for item in json_report['components']:
worksheet = self.report_book.add_worksheet(item['name'])
row = 0
col = 0
worksheet.write(row,col,"File Name",header_format)
worksheet.write(row,col+1,"File Path",header_format)
worksheet.write(row,col+2,"Coverage [%]",header_format)
worksheet.write(row,col+3,"Lines",header_format)
worksheet.write(row,col+4,"Hits",header_format)
worksheet.write(row,col+5,"Diff",header_format)
row += 1
col = 0
for i_file in item['files']:
worksheet.write(row,col,i_file['Path'][i_file['Path'].rfind('/')+1:],cell_format)
worksheet.write(row,col+1,i_file["Path"][(self._find_char(i_file["Path"],'/',3)+1):],cell_format)
worksheet.write_number(row,col+2,i_file["Coverage"],self.report_book.add_format({'num_format':'#,##0.00'}))
worksheet.write(row,col+3,i_file["Lines"],cell_format)
worksheet.write(row,col+4,i_file["Hit"],cell_format)
worksheet.write(row,col+5,i_file["Lines"]-i_file["Hit"],cell_format)
row += 1
col = 0
row += 1
col = 0
worksheet.conditional_format(1,col+2,row,col+2, {'type': 'data_bar',
'min_value': 0,
'max_value': 100,
'bar_color': '#3fd927',
'bar_solid': True,
})
worksheet.merge_range(row,col,row,col+2, "Uncovered Functions", merge_format)
row += 1
worksheet.write(row,col,'Function Name',header_format)
worksheet.write(row,col+1,'Implementation File',header_format)
worksheet.write(row,col+2,'Comment',header_format)
row += 1
col = 0
for i_file in item['files']:
for i_uncov_fun in i_file['Uncovered_Functions']:
worksheet.write(row,col,i_uncov_fun["Name"],cell_format)
worksheet.write(row,col+1,i_file["Path"][self._find_char(i_file["Path"],'/',3)+1:],cell_format)
row += 1
col = 0
row += 1
col = 0
worksheet.write(row,col,"Components",header_format)
worksheet.write(row,col+1,"Sub-Components",header_format)
worksheet.write(row,col+2,"TestSuites",header_format)
worksheet.write(row,col+3,"Runnable",header_format)
worksheet.write(row,col+4,"Build only",header_format)
worksheet.write(row,col+5,"Simulation only",header_format)
worksheet.write(row,col+6,"Hardware only",header_format)
worksheet.write(row,col+7,"Mixed",header_format)
row += 1
col = 0
worksheet.write(row,col,item['name'],cell_format)
for i_sub_component in item['sub_components']:
testsuites_count = 0
runnable_count = 0
build_only_count = 0
sim_only_count = 0
hw_only_count = 0
mixed_count = 0
worksheet.write(row,col+1,i_sub_component['name'],cell_format)
for i_testsuit in i_sub_component['test_suites']:
testsuites_count += 1
if i_testsuit['runnable'] is True:
runnable_count += 1
else:
build_only_count += 1
if i_testsuit['status'] == "hw_only":
hw_only_count += 1
elif i_testsuit['status'] == "sim_only":
sim_only_count += 1
else:
mixed_count += 1
worksheet.write(row,col+2,testsuites_count,cell_format)
worksheet.write(row,col+3,runnable_count,cell_format)
worksheet.write(row,col+4,build_only_count,cell_format)
worksheet.write(row,col+5,sim_only_count,cell_format)
worksheet.write(row,col+6,hw_only_count,cell_format)
worksheet.write(row,col+7,mixed_count,cell_format)
row += 1
col = 0
worksheet.autofit()
worksheet.set_default_row(15)
self.report_book.close()
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('-m','--maintainers', help='Path to maintainers.yml [Required]', required=True)
parser.add_argument('-t','--testplan', help='Path to testplan [Required]', required=True)
parser.add_argument('-c','--coverage', help='Path to components file [Required]', required=True)
parser.add_argument('-o','--output', help='Report name [Required]', required=True)
parser.add_argument('-f','--format', help='Output format (json, xlsx, all) [Required]', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
Json_report()
``` | /content/code_sandbox/scripts/ci/coverage/coverage_analysis.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 4,537 |
```ini
[tox]
envlist=py3
[testenv]
deps =
setuptools-scm
pytest
types-PyYAML
mypy
setenv =
TOXTEMPDIR={envtmpdir}
commands =
python -m pytest {posargs:tests}
python -m mypy --config-file={toxinidir}/tox.ini --package=devicetree
[mypy]
mypy_path=src
ignore_missing_imports=True
``` | /content/code_sandbox/scripts/dts/python-devicetree/tox.ini | ini | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 100 |
```python
#
import setuptools
long_description = '''
Placeholder
===========
This is just a placeholder for moving Zephyr's devicetree libraries
to PyPI.
'''
version = '0.0.2'
setuptools.setup(
# TBD, just use these for now.
author='Zephyr Project',
author_email='devel@lists.zephyrproject.org',
name='devicetree',
version=version,
description='Python libraries for devicetree',
long_description=long_description,
# path_to_url#what-s-the-official-mime-type-for-restructuredtext-data
long_description_content_type="text/x-rst",
url='path_to_url
packages=setuptools.find_packages(where='src'),
package_dir={'': 'src'},
classifiers=[
'Programming Language :: Python :: 3 :: Only',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
],
install_requires=[
'PyYAML>=6.0',
],
python_requires='>=3.6',
)
``` | /content/code_sandbox/scripts/dts/python-devicetree/setup.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 238 |
```unknown
/*
*
*/
// Used by testedtlib.py
/dts-v1/;
/ {
//
// Interrupts
//
interrupt-parent-test {
controller {
compatible = "interrupt-three-cell";
#interrupt-cells = <3>;
interrupt-controller;
};
node {
interrupts = <1 2 3 4 5 6>;
interrupt-names = "foo", "bar";
interrupt-parent = <&{/interrupt-parent-test/controller}>;
};
};
interrupts-extended-test {
controller-0 {
compatible = "interrupt-one-cell";
#interrupt-cells = <1>;
interrupt-controller;
};
controller-1 {
compatible = "interrupt-two-cell";
#interrupt-cells = <2>;
interrupt-controller;
};
controller-2 {
compatible = "interrupt-three-cell";
#interrupt-cells = <3>;
interrupt-controller;
};
node {
interrupts-extended = <
&{/interrupts-extended-test/controller-0} 1
&{/interrupts-extended-test/controller-1} 2 3
&{/interrupts-extended-test/controller-2} 4 5 6>;
};
};
interrupt-map-test {
#address-cells = <2>;
#size-cells = <0>;
controller-0 {
compatible = "interrupt-one-cell";
#address-cells = <1>;
#interrupt-cells = <1>;
interrupt-controller;
};
controller-1 {
compatible = "interrupt-two-cell";
#address-cells = <2>;
#interrupt-cells = <2>;
interrupt-controller;
};
controller-2 {
compatible = "interrupt-three-cell";
#address-cells = <3>;
#interrupt-cells = <3>;
interrupt-controller;
};
nexus {
#interrupt-cells = <2>;
interrupt-map = <
0 0 0 0 &{/interrupt-map-test/controller-0} 0 0
0 0 0 1 &{/interrupt-map-test/controller-1} 0 0 0 1
0 0 0 2 &{/interrupt-map-test/controller-2} 0 0 0 0 0 2
0 1 0 0 &{/interrupt-map-test/controller-0} 0 3
0 1 0 1 &{/interrupt-map-test/controller-1} 0 0 0 4
0 1 0 2 &{/interrupt-map-test/controller-2} 0 0 0 0 0 5>;
};
node@0 {
reg = <0 0>;
interrupts = <0 0 0 1 0 2>;
interrupt-parent = <&{/interrupt-map-test/nexus}>;
};
node@1 {
reg = <0 1>;
interrupts-extended = <
&{/interrupt-map-test/nexus} 0 0
&{/interrupt-map-test/nexus} 0 1
&{/interrupt-map-test/nexus} 0 2>;
};
};
interrupt-map-bitops-test {
#address-cells = <2>;
#size-cells = <0>;
controller {
compatible = "interrupt-two-cell";
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-controller;
};
nexus {
#interrupt-cells = <2>;
interrupt-map = <
6 6 6 6 &{/interrupt-map-bitops-test/controller} 2 1
>;
interrupt-map-mask = <0xE 0x7 0xE 0x7>;
// Not specified in the DT spec., but shows up due to
// common code with GPIO. Might as well test it here.
interrupt-map-pass-thru = <1 2 3 3>;
};
// Child unit specifier: 00000007 0000000E 00000007 0000000E
// Mask: 0000000E 00000007 0000000E 00000007
// Pass-thru: 00000001 00000002 00000003 00000003
node@70000000E {
reg = <0x7 0xE>;
interrupt-parent = <&{/interrupt-map-bitops-test/nexus}>;
interrupts = <0x7 0xE>;
};
};
//
// 'ranges'
//
ranges-zero-cells {
#address-cells = <0>;
node {
#address-cells = <0>;
#size-cells = <0>;
ranges;
};
};
ranges-zero-parent-cells {
#address-cells = <0>;
node {
#address-cells = <1>;
#size-cells = <0>;
ranges = <0xA>,
<0x1A>,
<0x2A>;
};
};
ranges-one-address-cells {
#address-cells = <0>;
node {
reg = <1>;
#address-cells = <1>;
ranges = <0xA 0xB>,
<0x1A 0x1B>,
<0x2A 0x2B>;
};
};
ranges-one-address-two-size-cells {
#address-cells = <0>;
node {
reg = <1>;
#address-cells = <1>;
#size-cells = <2>;
ranges = <0xA 0xB 0xC>,
<0x1A 0x1B 0x1C>,
<0x2A 0x2B 0x2C>;
};
};
ranges-two-address-cells {
#address-cells = <1>;
node@1 {
reg = <1 2>;
ranges = <0xA 0xB 0xC 0xD>,
<0x1A 0x1B 0x1C 0x1D>,
<0x2A 0x2B 0x2C 0x2D>;
};
};
ranges-two-address-two-size-cells {
#address-cells = <1>;
node@1 {
reg = <1 2>;
#size-cells = <2>;
ranges = <0xA 0xB 0xC 0xD 0xE>,
<0x1A 0x1B 0x1C 0x1D 0x1E>,
<0x2A 0x2B 0x2C 0x2D 0x1D>;
};
};
ranges-three-address-cells {
node@1 {
reg = <0 1 2>;
#address-cells = <3>;
ranges = <0xA 0xB 0xC 0xD 0xE 0xF>,
<0x1A 0x1B 0x1C 0x1D 0x1E 0x1F>,
<0x2A 0x2B 0x2C 0x2D 0x2E 0x2F>;
};
};
ranges-three-address-two-size-cells {
node@1 {
reg = <0 1 2>;
#address-cells = <3>;
#size-cells = <2>;
ranges = <0xA 0xB 0xC 0xD 0xE 0xF 0x10>,
<0x1A 0x1B 0x1C 0x1D 0x1E 0x1F 0x110>,
<0x2A 0x2B 0x2C 0x2D 0x2E 0x2F 0x210>;
};
};
//
// 'reg'
//
reg-zero-address-cells {
#address-cells = <0>;
#size-cells = <1>;
node {
reg = <1 2>;
reg-names = "foo", "bar";
};
};
reg-zero-size-cells {
#address-cells = <1>;
#size-cells = <0>;
node {
reg = <1 2>;
};
};
// Use implied #size-cells = <1>
reg-ranges {
#address-cells = <2>;
parent {
#address-cells = <1>;
ranges = <1 0xA 0xB 1 /* 1 -> 0xA 0xB */
2 0xC 0xD 2 /* 2..3 -> 0xC 0xD */
4 0xE 0xF 1 /* 4 -> 0xE 0xF */
>;
node {
reg = <5 1 /* Matches no range */
4 1 /* Matches third range */
3 1 /* Matches second range */
2 1 /* Matches second range */
1 1 /* Matches first range */
0 1 /* Matches no range */
>;
};
};
};
// Build up <3 2 1> address with nested 'ranges'
reg-nested-ranges {
#address-cells = <3>;
grandparent {
#address-cells = <2>;
#size-cells = <2>;
ranges = <0 0 3 0 0 2 2>;
parent {
#address-cells = <1>;
ranges = <0 2 0 2>;
node {
reg = <1 1>;
};
};
};
};
//
// 'pinctrl-<index>'
//
pinctrl {
dev {
pinctrl-0 = <>;
pinctrl-1 = <&{/pinctrl/pincontroller/state-1}>;
pinctrl-2 = <&{/pinctrl/pincontroller/state-1}
&{/pinctrl/pincontroller/state-2}>;
pinctrl-names = "zero", "one", "two";
};
pincontroller {
state-1 {
};
state-2 {
};
};
};
//
// For testing hierarchy.
//
parent {
child-1 {
};
child-2 {
grandchild {
};
};
};
//
// For testing 'include:'
//
binding-include {
compatible = "binding-include-test";
foo = <0>;
bar = <1>;
baz = <2>;
qaz = <3>;
child {
foo = <0>;
bar = <1>;
baz = <2>;
qaz = <3>;
};
};
//
// For testing Node.props (derived from 'properties:' in the binding)
//
props {
compatible = "props";
existent-boolean;
int = <1>;
array = <1 2 3>;
uint8-array = [ 12 34 ];
string = "foo";
string-array = "foo", "bar", "baz";
phandle-ref = < &{/ctrl-1} >;
phandle-refs = < &{/ctrl-1} &{/ctrl-2} >;
phandle-array-foos = < &{/ctrl-1} 1 &{/ctrl-2} 2 3 >;
foo-gpios = < &{/ctrl-1} 1 >;
path = &{/ctrl-1};
};
ctrl-1 {
compatible = "phandle-array-controller-1";
#phandle-array-foo-cells = <1>;
#gpio-cells = <1>;
};
ctrl-2 {
compatible = "phandle-array-controller-2";
#phandle-array-foo-cells = <2>;
};
props-2 {
compatible = "props";
phandle-array-foos = < &{/ctrl-0-1} 0 &{/ctrl-0-2} >;
phandle-array-foo-names = "a", "missing", "b";
};
ctrl-0-1 {
compatible = "phandle-array-controller-0";
#phandle-array-foo-cells = <0>;
};
ctrl-0-2 {
compatible = "phandle-array-controller-0";
#phandle-array-foo-cells = <0>;
};
//
// Test <prefix>-map, via gpio-map
//
gpio-map {
source {
compatible = "gpio-src";
foo-gpios = <&{/gpio-map/connector} 3 4
&{/gpio-map/connector} 1 2>;
};
connector {
#gpio-cells = <2>;
// Use different data lengths for source and
// destination to make it a bit trickier
gpio-map = <1 2 &{/gpio-map/destination} 5
3 4 &{/gpio-map/destination} 6>;
};
destination {
compatible = "gpio-dst";
gpio-controller;
#gpio-cells = <1>;
};
};
//
// For testing Node.props with 'default:' values in binding
//
defaults {
compatible = "defaults";
// Should override the 'default:' in the binding
default-not-used = <234>;
};
//
// For testing 'enum:'
//
enums {
compatible = "enums";
int-enum = <1>;
string-enum = "foo_bar";
tokenizable-enum = "123 is ok";
tokenizable-lower-enum = "bar";
no-enum = "baz";
};
//
// For testing 'bus:' and 'on-bus:'
//
buses {
// The 'node' nodes below will map to different bindings since
// they appear on different buses
foo-bus {
compatible = "foo-bus";
node1 {
compatible = "on-bus", "on-any-bus";
nested {
compatible = "on-bus";
};
};
node2 {
compatible = "on-any-bus", "on-bus";
};
};
bar-bus {
compatible = "bar-bus";
node {
compatible = "on-bus";
};
};
no-bus-node {
compatible = "on-any-bus";
};
};
//
// Node with 'child-binding:' in binding (along with a recursive
// 'child-binding:')
//
child-binding-dep {
};
child-binding {
compatible = "top-binding";
child-1 {
child-prop = <1>;
grandchild {
grandchild-prop = <2>;
grandchild-ref = < &{/child-binding-dep} >;
};
};
child-2 {
child-prop = <3>;
child-ref = < &{/child-binding-dep} >;
};
};
//
// zephyr,user binding inference
//
zephyr,user {
boolean;
bytes = [81 82 83];
number = <23>;
numbers = <1>, <2>, <3>;
string = "text";
strings = "a", "b", "c";
handle = <&{/ctrl-1}>;
phandles = <&{/ctrl-1}>, <&{/ctrl-2}>;
phandle-array-foos = <&{/ctrl-2} 1 2>;
};
//
// For testing that neither 'include: [foo.yaml, bar.yaml]' nor
// 'include: [bar.yaml, foo.yaml]' causes errors when one of the files
// has 'required: true' and the other 'required: false'
//
include-order {
node-1 {
compatible = "order-1";
foo = <1>;
};
node-2 {
compatible = "order-2";
foo = <2>;
};
};
//
// For testing deprecated property
//
test-deprecated {
compatible = "test-deprecated";
oldprop = <4>; /* deprecated property */
curprop = <5>;
};
//
// For testing deprecated features
//
deprecated {
compatible = "deprecated";
required = <1>;
required-2 = <2>;
#foo-cells = <2>;
sub-node {
foos = <&{/deprecated} 1 2>;
};
};
};
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test.dts | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,876 |
```python
#!/usr/bin/env python3
# This script uses edtlib to generate a header file from a devicetree
# (.dts) file. Information from binding files in YAML format is used
# as well.
#
# Bindings are files that describe devicetree nodes. Devicetree nodes are
# usually mapped to bindings via their 'compatible = "..."' property.
#
# See Zephyr's Devicetree user guide for details.
#
# Note: Do not access private (_-prefixed) identifiers from edtlib here (and
# also note that edtlib is not meant to expose the dtlib API directly).
# Instead, think of what API you need, and add it as a public documented API in
# edtlib. This will keep this script simple.
import argparse
from collections import defaultdict
import logging
import os
import pathlib
import pickle
import re
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'python-devicetree',
'src'))
from devicetree import edtlib
class LogFormatter(logging.Formatter):
'''A log formatter that prints the level name in lower case,
for compatibility with earlier versions of edtlib.'''
def __init__(self):
super().__init__(fmt='%(levelnamelower)s: %(message)s')
def format(self, record):
record.levelnamelower = record.levelname.lower()
return super().format(record)
def main():
global header_file
global flash_area_num
args = parse_args()
setup_edtlib_logging()
vendor_prefixes = {}
for prefixes_file in args.vendor_prefixes:
vendor_prefixes.update(edtlib.load_vendor_prefixes_txt(prefixes_file))
try:
edt = edtlib.EDT(args.dts, args.bindings_dirs,
# Suppress this warning if it's suppressed in dtc
warn_reg_unit_address_mismatch=
"-Wno-simple_bus_reg" not in args.dtc_flags,
default_prop_types=True,
infer_binding_for_paths=["/zephyr,user"],
werror=args.edtlib_Werror,
vendor_prefixes=vendor_prefixes)
except edtlib.EDTError as e:
sys.exit(f"devicetree error: {e}")
flash_area_num = 0
# Save merged DTS source, as a debugging aid
with open(args.dts_out, "w", encoding="utf-8") as f:
print(edt.dts_source, file=f)
# The raw index into edt.compat2nodes[compat] is used for node
# instance numbering within a compatible.
#
# As a way to satisfy people's intuitions about instance numbers,
# though, we sort this list so enabled instances come first.
#
# This might look like a hack, but it keeps drivers and
# applications which don't use instance numbers carefully working
# as expected, since e.g. instance number 0 is always the
# singleton instance if there's just one enabled node of a
# particular compatible.
#
# This doesn't violate any devicetree.h API guarantees about
# instance ordering, since we make no promises that instance
# numbers are stable across builds.
for compat, nodes in edt.compat2nodes.items():
edt.compat2nodes[compat] = sorted(
nodes, key=lambda node: 0 if node.status == "okay" else 1)
# Create the generated header.
with open(args.header_out, "w", encoding="utf-8") as header_file:
write_top_comment(edt)
write_utils()
# populate all z_path_id first so any children references will
# work correctly.
for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal):
node.z_path_id = node_z_path_id(node)
# Check to see if we have duplicate "zephyr,memory-region" property values.
regions = dict()
for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal):
if 'zephyr,memory-region' in node.props:
region = node.props['zephyr,memory-region'].val
if region in regions:
sys.exit(f"ERROR: Duplicate 'zephyr,memory-region' ({region}) properties "
f"between {regions[region].path} and {node.path}")
regions[region] = node
for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal):
write_node_comment(node)
out_comment("Node's full path:")
out_dt_define(f"{node.z_path_id}_PATH", f'"{escape(node.path)}"')
out_comment("Node's name with unit-address:")
out_dt_define(f"{node.z_path_id}_FULL_NAME",
f'"{escape(node.name)}"')
if node.parent is not None:
out_comment(f"Node parent ({node.parent.path}) identifier:")
out_dt_define(f"{node.z_path_id}_PARENT",
f"DT_{node.parent.z_path_id}")
out_comment(f"Node's index in its parent's list of children:")
out_dt_define(f"{node.z_path_id}_CHILD_IDX",
node.parent.child_index(node))
out_comment("Helpers for dealing with node labels:")
out_dt_define(f"{node.z_path_id}_NODELABEL_NUM", len(node.labels))
out_dt_define(f"{node.z_path_id}_FOREACH_NODELABEL(fn)",
" ".join(f"fn({nodelabel})" for nodelabel in node.labels))
out_dt_define(f"{node.z_path_id}_FOREACH_NODELABEL_VARGS(fn, ...)",
" ".join(f"fn({nodelabel}, __VA_ARGS__)" for nodelabel in node.labels))
write_children(node)
write_dep_info(node)
write_idents_and_existence(node)
write_bus(node)
write_special_props(node)
write_vanilla_props(node)
write_chosen(edt)
write_global_macros(edt)
if args.edt_pickle_out:
write_pickled_edt(edt, args.edt_pickle_out)
def setup_edtlib_logging():
# The edtlib module emits logs using the standard 'logging' module.
# Configure it so that warnings and above are printed to stderr,
# using the LogFormatter class defined above to format each message.
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(LogFormatter())
logger = logging.getLogger('edtlib')
logger.setLevel(logging.WARNING)
logger.addHandler(handler)
def node_z_path_id(node):
# Return the node specific bit of the node's path identifier:
#
# - the root node's path "/" has path identifier "N"
# - "/foo" has "N_S_foo"
# - "/foo/bar" has "N_S_foo_S_bar"
# - "/foo/bar@123" has "N_S_foo_S_bar_123"
#
# This is used throughout this file to generate macros related to
# the node.
components = ["N"]
if node.parent is not None:
components.extend(f"S_{str2ident(component)}" for component in
node.path.split("/")[1:])
return "_".join(components)
def parse_args():
# Returns parsed command-line arguments
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--dts", required=True, help="DTS file")
parser.add_argument("--dtc-flags",
help="'dtc' devicetree compiler flags, some of which "
"might be respected here")
parser.add_argument("--bindings-dirs", nargs='+', required=True,
help="directory with bindings in YAML format, "
"we allow multiple")
parser.add_argument("--header-out", required=True,
help="path to write header to")
parser.add_argument("--dts-out", required=True,
help="path to write merged DTS source code to (e.g. "
"as a debugging aid)")
parser.add_argument("--edt-pickle-out",
help="path to write pickled edtlib.EDT object to")
parser.add_argument("--vendor-prefixes", action='append', default=[],
help="vendor-prefixes.txt path; used for validation; "
"may be given multiple times")
parser.add_argument("--edtlib-Werror", action="store_true",
help="if set, edtlib-specific warnings become errors. "
"(this does not apply to warnings shared "
"with dtc.)")
return parser.parse_args()
def write_top_comment(edt):
# Writes an overview comment with misc. info at the top of the header and
# configuration file
s = f"""\
Generated by gen_defines.py
DTS input file:
{edt.dts_path}
Directories with bindings:
{", ".join(map(relativize, edt.bindings_dirs))}
Node dependency ordering (ordinal and path):
"""
for scc in edt.scc_order:
if len(scc) > 1:
err("cycle in devicetree involving "
+ ", ".join(node.path for node in scc))
s += f" {scc[0].dep_ordinal:<3} {scc[0].path}\n"
s += """
Definitions derived from these nodes in dependency order are next,
followed by /chosen nodes.
"""
out_comment(s, blank_before=False)
def write_utils():
# Writes utility macros
out_comment("Used to remove brackets from around a single argument")
out_define("DT_DEBRACKET_INTERNAL(...)", "__VA_ARGS__")
def write_node_comment(node):
# Writes a comment describing 'node' to the header and configuration file
s = f"""\
Devicetree node: {node.path}
Node identifier: DT_{node.z_path_id}
"""
if node.matching_compat:
if node.binding_path:
s += f"""
Binding (compatible = {node.matching_compat}):
{relativize(node.binding_path)}
"""
else:
s += f"""
Binding (compatible = {node.matching_compat}):
No yaml (bindings inferred from properties)
"""
if node.description:
# We used to put descriptions in the generated file, but
# devicetree bindings now have pages in the HTML
# documentation. Let users who are accustomed to digging
# around in the generated file where to find the descriptions
# now.
#
# Keeping them here would mean that the descriptions
# themselves couldn't contain C multi-line comments, which is
# inconvenient when we want to do things like quote snippets
# of .dtsi files within the descriptions, or otherwise
# include the string "*/".
s += ("\n(Descriptions have moved to the Devicetree Bindings Index\n"
"in the documentation.)\n")
out_comment(s)
def relativize(path):
# If 'path' is within $ZEPHYR_BASE, returns it relative to $ZEPHYR_BASE,
# with a "$ZEPHYR_BASE/..." hint at the start of the string. Otherwise,
# returns 'path' unchanged.
zbase = os.getenv("ZEPHYR_BASE")
if zbase is None:
return path
try:
return str("$ZEPHYR_BASE" / pathlib.Path(path).relative_to(zbase))
except ValueError:
# Not within ZEPHYR_BASE
return path
def write_idents_and_existence(node):
# Writes macros related to the node's aliases, labels, etc.,
# as well as existence flags.
# Aliases
idents = [f"N_ALIAS_{str2ident(alias)}" for alias in node.aliases]
# Instances
for compat in node.compats:
instance_no = node.edt.compat2nodes[compat].index(node)
idents.append(f"N_INST_{instance_no}_{str2ident(compat)}")
# Node labels
idents.extend(f"N_NODELABEL_{str2ident(label)}" for label in node.labels)
out_comment("Existence and alternate IDs:")
out_dt_define(node.z_path_id + "_EXISTS", 1)
# Only determine maxlen if we have any idents
if idents:
maxlen = max(len("DT_" + ident) for ident in idents)
for ident in idents:
out_dt_define(ident, "DT_" + node.z_path_id, width=maxlen)
def write_bus(node):
# Macros about the node's bus controller, if there is one
bus = node.bus_node
if not bus:
return
out_comment(f"Bus info (controller: '{bus.path}', type: '{node.on_buses}')")
for one_bus in node.on_buses:
out_dt_define(f"{node.z_path_id}_BUS_{str2ident(one_bus)}", 1)
out_dt_define(f"{node.z_path_id}_BUS", f"DT_{bus.z_path_id}")
def write_special_props(node):
# Writes required macros for special case properties, when the
# data cannot otherwise be obtained from write_vanilla_props()
# results
# Macros that are special to the devicetree specification
out_comment("Macros for properties that are special in the specification:")
write_regs(node)
write_ranges(node)
write_interrupts(node)
write_compatibles(node)
write_status(node)
# Macros that are special to bindings inherited from Linux, which
# we can't capture with the current bindings language.
write_pinctrls(node)
write_fixed_partitions(node)
write_gpio_hogs(node)
def write_ranges(node):
# ranges property: edtlib knows the right #address-cells and
# #size-cells of parent and child, and can therefore pack the
# child & parent addresses and sizes correctly
idx_vals = []
path_id = node.z_path_id
if node.ranges is not None:
idx_vals.append((f"{path_id}_RANGES_NUM", len(node.ranges)))
for i,range in enumerate(node.ranges):
idx_vals.append((f"{path_id}_RANGES_IDX_{i}_EXISTS", 1))
if "pcie" in node.buses:
idx_vals.append((f"{path_id}_RANGES_IDX_{i}_VAL_CHILD_BUS_FLAGS_EXISTS", 1))
idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_CHILD_BUS_FLAGS"
idx_value = range.child_bus_addr >> ((range.child_bus_cells - 1) * 32)
idx_vals.append((idx_macro,
f"{idx_value} /* {hex(idx_value)} */"))
if range.child_bus_addr is not None:
idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_CHILD_BUS_ADDRESS"
if "pcie" in node.buses:
idx_value = range.child_bus_addr & ((1 << (range.child_bus_cells - 1) * 32) - 1)
else:
idx_value = range.child_bus_addr
idx_vals.append((idx_macro,
f"{idx_value} /* {hex(idx_value)} */"))
if range.parent_bus_addr is not None:
idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_PARENT_BUS_ADDRESS"
idx_vals.append((idx_macro,
f"{range.parent_bus_addr} /* {hex(range.parent_bus_addr)} */"))
if range.length is not None:
idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_LENGTH"
idx_vals.append((idx_macro,
f"{range.length} /* {hex(range.length)} */"))
for macro, val in idx_vals:
out_dt_define(macro, val)
out_dt_define(f"{path_id}_FOREACH_RANGE(fn)",
" ".join(f"fn(DT_{path_id}, {i})" for i,range in enumerate(node.ranges)))
def write_regs(node):
# reg property: edtlib knows the right #address-cells and
# #size-cells, and can therefore pack the register base addresses
# and sizes correctly
idx_vals = []
name_vals = []
path_id = node.z_path_id
if node.regs is not None:
idx_vals.append((f"{path_id}_REG_NUM", len(node.regs)))
for i, reg in enumerate(node.regs):
idx_vals.append((f"{path_id}_REG_IDX_{i}_EXISTS", 1))
if reg.addr is not None:
idx_macro = f"{path_id}_REG_IDX_{i}_VAL_ADDRESS"
idx_vals.append((idx_macro,
f"{reg.addr} /* {hex(reg.addr)} */"))
if reg.name:
name_vals.append((f"{path_id}_REG_NAME_{reg.name}_EXISTS", 1))
name_macro = f"{path_id}_REG_NAME_{reg.name}_VAL_ADDRESS"
name_vals.append((name_macro, f"DT_{idx_macro}"))
if reg.size is not None:
idx_macro = f"{path_id}_REG_IDX_{i}_VAL_SIZE"
idx_vals.append((idx_macro,
f"{reg.size} /* {hex(reg.size)} */"))
if reg.name:
name_macro = f"{path_id}_REG_NAME_{reg.name}_VAL_SIZE"
name_vals.append((name_macro, f"DT_{idx_macro}"))
for macro, val in idx_vals:
out_dt_define(macro, val)
for macro, val in name_vals:
out_dt_define(macro, val)
def write_interrupts(node):
# interrupts property: we have some hard-coded logic for interrupt
# mapping here.
#
# TODO: can we push map_arm_gic_irq_type() out of Python and into C with
# macro magic in devicetree.h?
def map_arm_gic_irq_type(irq, irq_num):
# Maps ARM GIC IRQ (type)+(index) combo to linear IRQ number
if "type" not in irq.data:
err(f"Expected binding for {irq.controller!r} to have 'type' in "
"interrupt-cells")
irq_type = irq.data["type"]
if irq_type == 0: # GIC_SPI
return irq_num + 32
if irq_type == 1: # GIC_PPI
return irq_num + 16
err(f"Invalid interrupt type specified for {irq!r}")
idx_vals = []
name_vals = []
path_id = node.z_path_id
if node.interrupts is not None:
idx_vals.append((f"{path_id}_IRQ_NUM", len(node.interrupts)))
for i, irq in enumerate(node.interrupts):
for cell_name, cell_value in irq.data.items():
name = str2ident(cell_name)
if cell_name == "irq":
if "arm,gic" in irq.controller.compats:
cell_value = map_arm_gic_irq_type(irq, cell_value)
idx_vals.append((f"{path_id}_IRQ_IDX_{i}_EXISTS", 1))
idx_macro = f"{path_id}_IRQ_IDX_{i}_VAL_{name}"
idx_vals.append((idx_macro, cell_value))
idx_vals.append((idx_macro + "_EXISTS", 1))
if irq.name:
name_macro = \
f"{path_id}_IRQ_NAME_{str2ident(irq.name)}_VAL_{name}"
name_vals.append((name_macro, f"DT_{idx_macro}"))
name_vals.append((name_macro + "_EXISTS", 1))
idx_controller_macro = f"{path_id}_IRQ_IDX_{i}_CONTROLLER"
idx_controller_path = f"DT_{irq.controller.z_path_id}"
idx_vals.append((idx_controller_macro, idx_controller_path))
if irq.name:
name_controller_macro = f"{path_id}_IRQ_NAME_{str2ident(irq.name)}_CONTROLLER"
name_vals.append((name_controller_macro, f"DT_{idx_controller_macro}"))
# Interrupt controller info
irqs = []
while node.interrupts is not None and len(node.interrupts) > 0:
irq = node.interrupts[0]
irqs.append(irq)
if node == irq.controller:
break
node = irq.controller
idx_vals.append((f"{path_id}_IRQ_LEVEL", len(irqs)))
for macro, val in idx_vals:
out_dt_define(macro, val)
for macro, val in name_vals:
out_dt_define(macro, val)
def write_compatibles(node):
# Writes a macro for each of the node's compatibles. We don't care
# about whether edtlib / Zephyr's binding language recognizes
# them. The compatibles the node provides are what is important.
for i, compat in enumerate(node.compats):
out_dt_define(
f"{node.z_path_id}_COMPAT_MATCHES_{str2ident(compat)}", 1)
if node.edt.compat2vendor[compat]:
out_dt_define(f"{node.z_path_id}_COMPAT_VENDOR_IDX_{i}_EXISTS", 1)
out_dt_define(f"{node.z_path_id}_COMPAT_VENDOR_IDX_{i}",
quote_str(node.edt.compat2vendor[compat]))
if node.edt.compat2model[compat]:
out_dt_define(f"{node.z_path_id}_COMPAT_MODEL_IDX_{i}_EXISTS", 1)
out_dt_define(f"{node.z_path_id}_COMPAT_MODEL_IDX_{i}",
quote_str(node.edt.compat2model[compat]))
def write_children(node):
# Writes helper macros for dealing with node's children.
out_comment("Helper macros for child nodes of this node.")
out_dt_define(f"{node.z_path_id}_CHILD_NUM", len(node.children))
ok_nodes_num = 0
for child in node.children.values():
if child.status == "okay":
ok_nodes_num = ok_nodes_num + 1
out_dt_define(f"{node.z_path_id}_CHILD_NUM_STATUS_OKAY", ok_nodes_num)
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD(fn)",
" ".join(f"fn(DT_{child.z_path_id})" for child in
node.children.values()))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_SEP(fn, sep)",
" DT_DEBRACKET_INTERNAL sep ".join(f"fn(DT_{child.z_path_id})"
for child in node.children.values()))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_VARGS(fn, ...)",
" ".join(f"fn(DT_{child.z_path_id}, __VA_ARGS__)"
for child in node.children.values()))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_SEP_VARGS(fn, sep, ...)",
" DT_DEBRACKET_INTERNAL sep ".join(f"fn(DT_{child.z_path_id}, __VA_ARGS__)"
for child in node.children.values()))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_STATUS_OKAY(fn)",
" ".join(f"fn(DT_{child.z_path_id})"
for child in node.children.values() if child.status == "okay"))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_STATUS_OKAY_SEP(fn, sep)",
" DT_DEBRACKET_INTERNAL sep ".join(f"fn(DT_{child.z_path_id})"
for child in node.children.values() if child.status == "okay"))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_STATUS_OKAY_VARGS(fn, ...)",
" ".join(f"fn(DT_{child.z_path_id}, __VA_ARGS__)"
for child in node.children.values() if child.status == "okay"))
out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_STATUS_OKAY_SEP_VARGS(fn, sep, ...)",
" DT_DEBRACKET_INTERNAL sep ".join(f"fn(DT_{child.z_path_id}, __VA_ARGS__)"
for child in node.children.values() if child.status == "okay"))
def write_status(node):
out_dt_define(f"{node.z_path_id}_STATUS_{str2ident(node.status)}", 1)
def write_pinctrls(node):
# Write special macros for pinctrl-<index> and pinctrl-names properties.
out_comment("Pin control (pinctrl-<i>, pinctrl-names) properties:")
out_dt_define(f"{node.z_path_id}_PINCTRL_NUM", len(node.pinctrls))
if not node.pinctrls:
return
for pc_idx, pinctrl in enumerate(node.pinctrls):
out_dt_define(f"{node.z_path_id}_PINCTRL_IDX_{pc_idx}_EXISTS", 1)
if not pinctrl.name:
continue
name = pinctrl.name_as_token
# Below we rely on the fact that edtlib ensures the
# pinctrl-<pc_idx> properties are contiguous, start from 0,
# and contain only phandles.
out_dt_define(f"{node.z_path_id}_PINCTRL_IDX_{pc_idx}_TOKEN", name)
out_dt_define(f"{node.z_path_id}_PINCTRL_IDX_{pc_idx}_UPPER_TOKEN", name.upper())
out_dt_define(f"{node.z_path_id}_PINCTRL_NAME_{name}_EXISTS", 1)
out_dt_define(f"{node.z_path_id}_PINCTRL_NAME_{name}_IDX", pc_idx)
for idx, ph in enumerate(pinctrl.conf_nodes):
out_dt_define(f"{node.z_path_id}_PINCTRL_NAME_{name}_IDX_{idx}_PH",
f"DT_{ph.z_path_id}")
def write_fixed_partitions(node):
# Macros for child nodes of each fixed-partitions node.
if not (node.parent and "fixed-partitions" in node.parent.compats):
return
global flash_area_num
out_comment("fixed-partitions identifier:")
out_dt_define(f"{node.z_path_id}_PARTITION_ID", flash_area_num)
flash_area_num += 1
def write_gpio_hogs(node):
# Write special macros for gpio-hog node properties.
macro = f"{node.z_path_id}_GPIO_HOGS"
macro2val = {}
for i, entry in enumerate(node.gpio_hogs):
macro2val.update(controller_and_data_macros(entry, i, macro))
if macro2val:
out_comment("GPIO hog properties:")
out_dt_define(f"{macro}_EXISTS", 1)
out_dt_define(f"{macro}_NUM", len(node.gpio_hogs))
for macro, val in macro2val.items():
out_dt_define(macro, val)
def write_vanilla_props(node):
# Writes macros for any and all properties defined in the
# "properties" section of the binding for the node.
#
# This does generate macros for special properties as well, like
# regs, etc. Just let that be rather than bothering to add
# never-ending amounts of special case code here to skip special
# properties. This function's macros can't conflict with
# write_special_props() macros, because they're in different
# namespaces. Special cases aren't special enough to break the rules.
macro2val = {}
for prop_name, prop in node.props.items():
prop_id = str2ident(prop_name)
macro = f"{node.z_path_id}_P_{prop_id}"
val = prop2value(prop)
if val is not None:
# DT_N_<node-id>_P_<prop-id>
macro2val[macro] = val
if prop.spec.type == 'string':
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_STRING_UNQUOTED
macro2val[macro + "_STRING_UNQUOTED"] = prop.val
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_STRING_TOKEN
macro2val[macro + "_STRING_TOKEN"] = prop.val_as_token
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_STRING_UPPER_TOKEN
macro2val[macro + "_STRING_UPPER_TOKEN"] = prop.val_as_token.upper()
# DT_N_<node-id>_P_<prop-id>_IDX_0:
# DT_N_<node-id>_P_<prop-id>_IDX_0_EXISTS:
# Allows treating the string like a degenerate case of a
# string-array of length 1.
macro2val[macro + "_IDX_0"] = quote_str(prop.val)
macro2val[macro + "_IDX_0_EXISTS"] = 1
if prop.enum_index is not None:
# DT_N_<node-id>_P_<prop-id>_ENUM_IDX
macro2val[macro + "_ENUM_IDX"] = prop.enum_index
spec = prop.spec
if spec.enum_tokenizable:
as_token = prop.val_as_token
# DT_N_<node-id>_P_<prop-id>_ENUM_VAL_<val>_EXISTS 1
macro2val[macro + f"_ENUM_VAL_{as_token}_EXISTS"] = 1
# DT_N_<node-id>_P_<prop-id>_ENUM_TOKEN
macro2val[macro + "_ENUM_TOKEN"] = as_token
if spec.enum_upper_tokenizable:
# DT_N_<node-id>_P_<prop-id>_ENUM_UPPER_TOKEN
macro2val[macro + "_ENUM_UPPER_TOKEN"] = as_token.upper()
else:
# DT_N_<node-id>_P_<prop-id>_ENUM_VAL_<val>_EXISTS 1
macro2val[macro + f"_ENUM_VAL_{prop.val}_EXISTS"] = 1
if "phandle" in prop.type:
macro2val.update(phandle_macros(prop, macro))
elif "array" in prop.type:
for i, subval in enumerate(prop.val):
# DT_N_<node-id>_P_<prop-id>_IDX_<i>
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_EXISTS
if isinstance(subval, str):
macro2val[macro + f"_IDX_{i}"] = quote_str(subval)
subval_as_token = edtlib.str_as_token(subval)
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_STRING_UNQUOTED
macro2val[macro + f"_IDX_{i}_STRING_UNQUOTED"] = subval
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_STRING_TOKEN
macro2val[macro + f"_IDX_{i}_STRING_TOKEN"] = subval_as_token
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_STRING_UPPER_TOKEN
macro2val[macro + f"_IDX_{i}_STRING_UPPER_TOKEN"] = subval_as_token.upper()
else:
macro2val[macro + f"_IDX_{i}"] = subval
macro2val[macro + f"_IDX_{i}_EXISTS"] = 1
plen = prop_len(prop)
if plen is not None:
# DT_N_<node-id>_P_<prop-id>_FOREACH_PROP_ELEM
macro2val[f"{macro}_FOREACH_PROP_ELEM(fn)"] = \
' \\\n\t'.join(
f'fn(DT_{node.z_path_id}, {prop_id}, {i})'
for i in range(plen))
# DT_N_<node-id>_P_<prop-id>_FOREACH_PROP_ELEM_SEP
macro2val[f"{macro}_FOREACH_PROP_ELEM_SEP(fn, sep)"] = \
' DT_DEBRACKET_INTERNAL sep \\\n\t'.join(
f'fn(DT_{node.z_path_id}, {prop_id}, {i})'
for i in range(plen))
# DT_N_<node-id>_P_<prop-id>_FOREACH_PROP_ELEM_VARGS
macro2val[f"{macro}_FOREACH_PROP_ELEM_VARGS(fn, ...)"] = \
' \\\n\t'.join(
f'fn(DT_{node.z_path_id}, {prop_id}, {i}, __VA_ARGS__)'
for i in range(plen))
# DT_N_<node-id>_P_<prop-id>_FOREACH_PROP_ELEM_SEP_VARGS
macro2val[f"{macro}_FOREACH_PROP_ELEM_SEP_VARGS(fn, sep, ...)"] = \
' DT_DEBRACKET_INTERNAL sep \\\n\t'.join(
f'fn(DT_{node.z_path_id}, {prop_id}, {i}, __VA_ARGS__)'
for i in range(plen))
# DT_N_<node-id>_P_<prop-id>_LEN
macro2val[macro + "_LEN"] = plen
# DT_N_<node-id>_P_<prop-id>_EXISTS
macro2val[f"{macro}_EXISTS"] = 1
if macro2val:
out_comment("Generic property macros:")
for macro, val in macro2val.items():
out_dt_define(macro, val)
else:
out_comment("(No generic property macros)")
def write_dep_info(node):
# Write dependency-related information about the node.
def fmt_dep_list(dep_list):
if dep_list:
# Sort the list by dependency ordinal for predictability.
sorted_list = sorted(dep_list, key=lambda node: node.dep_ordinal)
return "\\\n\t" + \
" \\\n\t".join(f"{n.dep_ordinal}, /* {n.path} */"
for n in sorted_list)
else:
return "/* nothing */"
out_comment("Node's dependency ordinal:")
out_dt_define(f"{node.z_path_id}_ORD", node.dep_ordinal)
out_dt_define(f"{node.z_path_id}_ORD_STR_SORTABLE", f"{node.dep_ordinal:0>5}")
out_comment("Ordinals for what this node depends on directly:")
out_dt_define(f"{node.z_path_id}_REQUIRES_ORDS",
fmt_dep_list(node.depends_on))
out_comment("Ordinals for what depends directly on this node:")
out_dt_define(f"{node.z_path_id}_SUPPORTS_ORDS",
fmt_dep_list(node.required_by))
def prop2value(prop):
# Gets the macro value for property 'prop', if there is
# a single well-defined C rvalue that it can be represented as.
# Returns None if there isn't one.
if prop.type == "string":
return quote_str(prop.val)
if prop.type == "int":
return prop.val
if prop.type == "boolean":
return 1 if prop.val else 0
if prop.type in ["array", "uint8-array"]:
return list2init(f"{val} /* {hex(val)} */" for val in prop.val)
if prop.type == "string-array":
return list2init(quote_str(val) for val in prop.val)
# phandle, phandles, phandle-array, path, compound: nothing
return None
def prop_len(prop):
# Returns the property's length if and only if we should generate
# a _LEN macro for the property. Otherwise, returns None.
#
# The set of types handled here coincides with the allowable types
# that can be used with DT_PROP_LEN(). If you change this set,
# make sure to update the doxygen string for that macro, and make
# sure that DT_FOREACH_PROP_ELEM() works for the new types too.
#
# This deliberately excludes ranges, dma-ranges, reg and interrupts.
# While they have array type, their lengths as arrays are
# basically nonsense semantically due to #address-cells and
# #size-cells for "reg", #interrupt-cells for "interrupts"
# and #address-cells, #size-cells and the #address-cells from the
# parent node for "ranges" and "dma-ranges".
#
# We have special purpose macros for the number of register blocks
# / interrupt specifiers. Excluding them from this list means
# DT_PROP_LEN(node_id, ...) fails fast at the devicetree.h layer
# with a build error. This forces users to switch to the right
# macros.
if prop.type in ["phandle", "string"]:
# phandle is treated as a phandles of length 1.
# string is treated as a string-array of length 1.
return 1
if (prop.type in ["array", "uint8-array", "string-array",
"phandles", "phandle-array"] and
prop.name not in ["ranges", "dma-ranges", "reg", "interrupts"]):
return len(prop.val)
return None
def phandle_macros(prop, macro):
# Returns a dict of macros for phandle or phandles property 'prop'.
#
# The 'macro' argument is the N_<node-id>_P_<prop-id> bit.
#
# These are currently special because we can't serialize their
# values without using label properties, which we're trying to get
# away from needing in Zephyr. (Label properties are great for
# humans, but have drawbacks for code size and boot time.)
#
# The names look a bit weird to make it easier for devicetree.h
# to use the same macros for phandle, phandles, and phandle-array.
ret = {}
if prop.type == "phandle":
# A phandle is treated as a phandles with fixed length 1.
ret[f"{macro}"] = f"DT_{prop.val.z_path_id}"
ret[f"{macro}_IDX_0"] = f"DT_{prop.val.z_path_id}"
ret[f"{macro}_IDX_0_PH"] = f"DT_{prop.val.z_path_id}"
ret[f"{macro}_IDX_0_EXISTS"] = 1
elif prop.type == "phandles":
for i, node in enumerate(prop.val):
ret[f"{macro}_IDX_{i}"] = f"DT_{node.z_path_id}"
ret[f"{macro}_IDX_{i}_PH"] = f"DT_{node.z_path_id}"
ret[f"{macro}_IDX_{i}_EXISTS"] = 1
elif prop.type == "phandle-array":
for i, entry in enumerate(prop.val):
if entry is None:
# Unspecified element. The phandle-array at this index
# does not point at a ControllerAndData value, but
# subsequent indices in the array may.
ret[f"{macro}_IDX_{i}_EXISTS"] = 0
continue
ret.update(controller_and_data_macros(entry, i, macro))
return ret
def controller_and_data_macros(entry, i, macro):
# Helper procedure used by phandle_macros().
#
# Its purpose is to write the "controller" (i.e. label property of
# the phandle's node) and associated data macros for a
# ControllerAndData.
ret = {}
data = entry.data
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_EXISTS
ret[f"{macro}_IDX_{i}_EXISTS"] = 1
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_PH
ret[f"{macro}_IDX_{i}_PH"] = f"DT_{entry.controller.z_path_id}"
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_VAL_<VAL>
for cell, val in data.items():
ret[f"{macro}_IDX_{i}_VAL_{str2ident(cell)}"] = val
ret[f"{macro}_IDX_{i}_VAL_{str2ident(cell)}_EXISTS"] = 1
if not entry.name:
return ret
name = str2ident(entry.name)
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_EXISTS
ret[f"{macro}_IDX_{i}_EXISTS"] = 1
# DT_N_<node-id>_P_<prop-id>_IDX_<i>_NAME
ret[f"{macro}_IDX_{i}_NAME"] = quote_str(entry.name)
# DT_N_<node-id>_P_<prop-id>_NAME_<NAME>_PH
ret[f"{macro}_NAME_{name}_PH"] = f"DT_{entry.controller.z_path_id}"
# DT_N_<node-id>_P_<prop-id>_NAME_<NAME>_EXISTS
ret[f"{macro}_NAME_{name}_EXISTS"] = 1
# DT_N_<node-id>_P_<prop-id>_NAME_<NAME>_VAL_<VAL>
for cell, val in data.items():
cell_ident = str2ident(cell)
ret[f"{macro}_NAME_{name}_VAL_{cell_ident}"] = \
f"DT_{macro}_IDX_{i}_VAL_{cell_ident}"
ret[f"{macro}_NAME_{name}_VAL_{cell_ident}_EXISTS"] = 1
return ret
def write_chosen(edt):
# Tree-wide information such as chosen nodes is printed here.
out_comment("Chosen nodes\n")
chosen = {}
for name, node in edt.chosen_nodes.items():
chosen[f"DT_CHOSEN_{str2ident(name)}"] = f"DT_{node.z_path_id}"
chosen[f"DT_CHOSEN_{str2ident(name)}_EXISTS"] = 1
max_len = max(map(len, chosen), default=0)
for macro, value in chosen.items():
out_define(macro, value, width=max_len)
def write_global_macros(edt):
# Global or tree-wide information, such as number of instances
# with status "okay" for each compatible, is printed here.
out_comment("Macros for iterating over all nodes and enabled nodes")
out_dt_define("FOREACH_HELPER(fn)",
" ".join(f"fn(DT_{node.z_path_id})" for node in edt.nodes))
out_dt_define("FOREACH_OKAY_HELPER(fn)",
" ".join(f"fn(DT_{node.z_path_id})" for node in edt.nodes
if node.status == "okay"))
out_dt_define("FOREACH_VARGS_HELPER(fn, ...)",
" ".join(f"fn(DT_{node.z_path_id}, __VA_ARGS__)" for node in edt.nodes))
out_dt_define("FOREACH_OKAY_VARGS_HELPER(fn, ...)",
" ".join(f"fn(DT_{node.z_path_id}, __VA_ARGS__)" for node in edt.nodes
if node.status == "okay"))
n_okay_macros = {}
for_each_macros = {}
compat2buses = defaultdict(list) # just for "okay" nodes
for compat, okay_nodes in edt.compat2okay.items():
for node in okay_nodes:
buses = node.on_buses
for bus in buses:
if bus is not None and bus not in compat2buses[compat]:
compat2buses[compat].append(bus)
ident = str2ident(compat)
n_okay_macros[f"DT_N_INST_{ident}_NUM_OKAY"] = len(okay_nodes)
# Helpers for non-INST for-each macros that take node
# identifiers as arguments.
for_each_macros[f"DT_FOREACH_OKAY_{ident}(fn)"] = \
" ".join(f"fn(DT_{node.z_path_id})"
for node in okay_nodes)
for_each_macros[f"DT_FOREACH_OKAY_VARGS_{ident}(fn, ...)"] = \
" ".join(f"fn(DT_{node.z_path_id}, __VA_ARGS__)"
for node in okay_nodes)
# Helpers for INST versions of for-each macros, which take
# instance numbers. We emit separate helpers for these because
# avoiding an intermediate node_id --> instance number
# conversion in the preprocessor helps to keep the macro
# expansions simpler. That hopefully eases debugging.
for_each_macros[f"DT_FOREACH_OKAY_INST_{ident}(fn)"] = \
" ".join(f"fn({edt.compat2nodes[compat].index(node)})"
for node in okay_nodes)
for_each_macros[f"DT_FOREACH_OKAY_INST_VARGS_{ident}(fn, ...)"] = \
" ".join(f"fn({edt.compat2nodes[compat].index(node)}, __VA_ARGS__)"
for node in okay_nodes)
for compat, nodes in edt.compat2nodes.items():
for node in nodes:
if compat == "fixed-partitions":
for child in node.children.values():
if "label" in child.props:
label = child.props["label"].val
macro = f"COMPAT_{str2ident(compat)}_LABEL_{str2ident(label)}"
val = f"DT_{child.z_path_id}"
out_dt_define(macro, val)
out_dt_define(macro + "_EXISTS", 1)
out_comment('Macros for compatibles with status "okay" nodes\n')
for compat, okay_nodes in edt.compat2okay.items():
if okay_nodes:
out_define(f"DT_COMPAT_HAS_OKAY_{str2ident(compat)}", 1)
out_comment('Macros for status "okay" instances of each compatible\n')
for macro, value in n_okay_macros.items():
out_define(macro, value)
for macro, value in for_each_macros.items():
out_define(macro, value)
out_comment('Bus information for status "okay" nodes of each compatible\n')
for compat, buses in compat2buses.items():
for bus in buses:
out_define(
f"DT_COMPAT_{str2ident(compat)}_BUS_{str2ident(bus)}", 1)
def str2ident(s):
# Converts 's' to a form suitable for (part of) an identifier
return re.sub('[-,.@/+]', '_', s.lower())
def list2init(l):
# Converts 'l', a Python list (or iterable), to a C array initializer
return "{" + ", ".join(l) + "}"
def out_dt_define(macro, val, width=None, deprecation_msg=None):
# Writes "#define DT_<macro> <val>" to the header file
#
# The macro will be left-justified to 'width' characters if that
# is specified, and the value will follow immediately after in
# that case. Otherwise, this function decides how to add
# whitespace between 'macro' and 'val'.
#
# If a 'deprecation_msg' string is passed, the generated identifiers will
# generate a warning if used, via __WARN(<deprecation_msg>)).
#
# Returns the full generated macro for 'macro', with leading "DT_".
ret = "DT_" + macro
out_define(ret, val, width=width, deprecation_msg=deprecation_msg)
return ret
def out_define(macro, val, width=None, deprecation_msg=None):
# Helper for out_dt_define(). Outputs "#define <macro> <val>",
# adds a deprecation message if given, and allocates whitespace
# unless told not to.
warn = fr' __WARN("{deprecation_msg}")' if deprecation_msg else ""
if width:
s = f"#define {macro.ljust(width)}{warn} {val}"
else:
s = f"#define {macro}{warn} {val}"
print(s, file=header_file)
def out_comment(s, blank_before=True):
# Writes 's' as a comment to the header and configuration file. 's' is
# allowed to have multiple lines. blank_before=True adds a blank line
# before the comment.
if blank_before:
print(file=header_file)
if "\n" in s:
# Format multi-line comments like
#
# /*
# * first line
# * second line
# *
# * empty line before this line
# */
res = ["/*"]
for line in s.splitlines():
# Avoid an extra space after '*' for empty lines. They turn red in
# Vim if space error checking is on, which is annoying.
res.append(" *" if not line.strip() else " * " + line)
res.append(" */")
print("\n".join(res), file=header_file)
else:
# Format single-line comments like
#
# /* foo bar */
print("/* " + s + " */", file=header_file)
def escape(s):
# Backslash-escapes any double quotes and backslashes in 's'
# \ must be escaped before " to avoid double escaping
return s.replace("\\", "\\\\").replace('"', '\\"')
def quote_str(s):
# Puts quotes around 's' and escapes any double quotes and
# backslashes within it
return f'"{escape(s)}"'
def write_pickled_edt(edt, out_file):
# Writes the edt object in pickle format to out_file.
with open(out_file, 'wb') as f:
# Pickle protocol version 4 is the default as of Python 3.8
# and was introduced in 3.4, so it is both available and
# recommended on all versions of Python that Zephyr supports
# (at time of writing, Python 3.6 was Zephyr's minimum
# version, and 3.8 the most recent CPython release).
#
# Using a common protocol version here will hopefully avoid
# reproducibility issues in different Python installations.
pickle.dump(edt, f, protocol=4)
def err(s):
raise Exception(s)
if __name__ == "__main__":
main()
``` | /content/code_sandbox/scripts/dts/gen_defines.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,785 |
```unknown
/*
*
*/
/*
* Used by test_edtlib.py. Dedicated file for testing having multiple binding
* directories.
*/
/dts-v1/;
/ {
in-dir-1 {
compatible = "in-dir-1";
};
in-dir-2 {
compatible = "in-dir-2";
};
};
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-multidir.dts | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 69 |
```python
import contextlib
import os
import re
import tempfile
from copy import deepcopy
from typing import Optional
import pytest
from devicetree import dtlib
# Test suite for dtlib.py.
#
# Run it using pytest (path_to_url
#
# $ pytest tests/test_dtlib.py
#
# Extra options you can pass to pytest for debugging:
#
# - to stop on the first failure with shorter traceback output,
# use '-x --tb=native'
# - to drop into a debugger on failure, use '--pdb'
# - to run a particular test function or functions, use
# '-k test_function_pattern_goes_here'
def parse(dts, include_path=(), **kwargs):
'''Parse a DTS string 'dts', using the given include path.
Any kwargs are passed on to DT().'''
fd, path = tempfile.mkstemp(prefix='pytest-', suffix='.dts')
try:
os.write(fd, dts.encode('utf-8'))
return dtlib.DT(path, include_path, **kwargs)
finally:
os.close(fd)
os.unlink(path)
def verify_parse(dts, expected, include_path=()):
'''Like parse(), but also verifies that the parsed DT object's string
representation is expected[1:-1].
The [1:] is so that the first line can be put on a separate line
after triple quotes, as is done below.'''
dt = parse(dts[1:], include_path)
actual = str(dt)
expected = expected[1:-1]
assert actual == expected, f'unexpected round-trip on {dts}'
return dt
def verify_error(dts, expected_msg):
'''Verify that parsing 'dts' results in a DTError with the
given error message 'msg'. The message must match exactly.'''
with dtlib_raises(expected_msg):
parse(dts[1:])
def verify_error_endswith(dts, expected_msg):
'''
Like verify_error(), but checks the message ends with
'expected_msg' instead of checking for strict equality.
'''
with dtlib_raises(err_endswith=expected_msg):
parse(dts[1:])
def verify_error_matches(dts, expected_re):
'''
Like verify_error(), but checks the message fully matches regular
expression 'expected_re' instead of checking for strict equality.
'''
with dtlib_raises(err_matches=expected_re):
parse(dts[1:])
@contextlib.contextmanager
def temporary_chdir(dirname):
'''A context manager that changes directory to 'dirname'.
The current working directory is unconditionally returned to its
present location after the context manager exits.
'''
here = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(here)
@contextlib.contextmanager
def dtlib_raises(err: Optional[str] = None,
err_endswith: Optional[str] = None,
err_matches: Optional[str] = None):
'''A context manager for running a block of code that should raise
DTError. Exactly one of the arguments 'err', 'err_endswith',
and 'err_matches' must be given. The semantics are:
- err: error message must be exactly this
- err_endswith: error message must end with this
- err_matches: error message must match this regular expression
'''
assert sum([bool(err), bool(err_endswith), bool(err_matches)]) == 1
with pytest.raises(dtlib.DTError) as e:
yield
actual_err = str(e.value)
if err:
assert actual_err == err
elif err_endswith:
assert actual_err.endswith(err_endswith)
else:
assert re.fullmatch(err_matches, actual_err), \
f'actual message:\n{actual_err!r}\n' \
f'does not match:\n{err_matches!r}'
def test_invalid_nodenames():
# Regression test that verifies node names are not matched against
# the more permissive set of rules used for property names.
verify_error_endswith("""
/dts-v1/;
/ { node? {}; };
""",
"/node?: bad character '?' in node name")
def test_cell_parsing():
'''Miscellaneous properties containing zero or more cells'''
verify_parse("""
/dts-v1/;
/ {
a;
b = < >;
c = [ ];
d = < 10 20 >;
e = < 0U 1L 2UL 3LL 4ULL >;
f = < 0x10 0x20 >;
g = < 010 020 >;
h = /bits/ 8 < 0x10 0x20 (-1) >;
i = /bits/ 16 < 0x10 0x20 (-1) >;
j = /bits/ 32 < 0x10 0x20 (-1) >;
k = /bits/ 64 < 0x10 0x20 (-1) >;
l = < 'a' 'b' 'c' >;
};
""",
"""
/dts-v1/;
/ {
a;
b;
c;
d = < 0xa 0x14 >;
e = < 0x0 0x1 0x2 0x3 0x4 >;
f = < 0x10 0x20 >;
g = < 0x8 0x10 >;
h = [ 10 20 FF ];
i = /bits/ 16 < 0x10 0x20 0xffff >;
j = < 0x10 0x20 0xffffffff >;
k = /bits/ 64 < 0x10 0x20 0xffffffffffffffff >;
l = < 0x61 0x62 0x63 >;
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
a = /bits/ 16 < 0x10000 >;
};
""",
":4 (column 18): parse error: 65536 does not fit in 16 bits")
verify_error_endswith("""
/dts-v1/;
/ {
a = < 0x100000000 >;
};
""",
":4 (column 8): parse error: 4294967296 does not fit in 32 bits")
verify_error_endswith("""
/dts-v1/;
/ {
a = /bits/ 128 < 0 >;
};
""",
":4 (column 13): parse error: expected 8, 16, 32, or 64")
def test_bytes_parsing():
'''Properties with byte array values'''
verify_parse("""
/dts-v1/;
/ {
a = [ ];
b = [ 12 34 ];
c = [ 1234 ];
};
""",
"""
/dts-v1/;
/ {
a;
b = [ 12 34 ];
c = [ 12 34 ];
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
a = [ 123 ];
};
""",
":4 (column 10): parse error: expected two-digit byte or ']'")
def test_string_parsing():
'''Properties with string values'''
verify_parse(r"""
/dts-v1/;
/ {
a = "";
b = "ABC";
c = "\\\"\xab\377\a\b\t\n\v\f\r";
};
""",
r"""
/dts-v1/;
/ {
a = "";
b = "ABC";
c = "\\\"\xab\xff\a\b\t\n\v\f\r";
};
""")
verify_error_endswith(r"""
/dts-v1/;
/ {
a = "\400";
};
""",
":4 (column 6): parse error: octal escape out of range (> 255)")
def test_char_literal_parsing():
'''Properties with character literal values'''
verify_parse(r"""
/dts-v1/;
/ {
a = < '\'' >;
b = < '\x12' >;
};
""",
"""
/dts-v1/;
/ {
a = < 0x27 >;
b = < 0x12 >;
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
// Character literals are not allowed at the top level
a = 'x';
};
""",
":5 (column 6): parse error: malformed value")
verify_error_endswith("""
/dts-v1/;
/ {
a = < '' >;
};
""",
":4 (column 7): parse error: character literals must be length 1")
verify_error_endswith("""
/dts-v1/;
/ {
a = < '12' >;
};
""",
":4 (column 7): parse error: character literals must be length 1")
def test_incbin(tmp_path):
'''Test /incbin/, an undocumented feature that allows for
binary file inclusion.
path_to_url
open(tmp_path / "tmp_bin", "wb").write(b"\00\01\02\03")
verify_parse(f"""
/dts-v1/;
/ {{
a = /incbin/ ("{tmp_path}/tmp_bin");
b = /incbin/ ("{tmp_path}/tmp_bin", 1, 1);
c = /incbin/ ("{tmp_path}/tmp_bin", 1, 2);
}};
""",
"""
/dts-v1/;
/ {
a = [ 00 01 02 03 ];
b = [ 01 ];
c = [ 01 02 ];
};
""")
verify_parse("""
/dts-v1/;
/ {
a = /incbin/ ("tmp_bin");
};
""",
"""
/dts-v1/;
/ {
a = [ 00 01 02 03 ];
};
""",
include_path=(tmp_path,))
verify_error_endswith(r"""
/dts-v1/;
/ {
a = /incbin/ ("missing");
};
""",
":4 (column 25): parse error: 'missing' could not be found")
def test_node_merging():
'''
Labels and properties specified for the same node in different
statements should be merged.
'''
verify_parse("""
/dts-v1/;
/ {
l1: l2: l1: foo {
foo1 = [ 01 ];
l4: l5: bar {
bar1 = [ 01 ];
};
};
};
l3: &l1 {
foo2 = [ 02 ];
l6: l7: bar {
bar2 = [ 02 ];
};
};
&l3 {
foo3 = [ 03 ];
};
&{/foo} {
foo4 = [ 04 ];
};
&{/foo/bar} {
bar3 = [ 03 ];
l8: baz {};
};
/ {
};
/ {
top = [ 01 ];
};
""",
"""
/dts-v1/;
/ {
top = [ 01 ];
l1: l2: l3: foo {
foo1 = [ 01 ];
foo2 = [ 02 ];
foo3 = [ 03 ];
foo4 = [ 04 ];
l4: l5: l6: l7: bar {
bar1 = [ 01 ];
bar2 = [ 02 ];
bar3 = [ 03 ];
l8: baz {
};
};
};
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
};
&missing {
};
""",
":6 (column 1): parse error: undefined node label 'missing'")
verify_error_endswith("""
/dts-v1/;
/ {
};
&{foo} {
};
""",
":6 (column 1): parse error: node path 'foo' does not start with '/'")
verify_error_endswith("""
/dts-v1/;
/ {
};
&{/foo} {
};
""",
":6 (column 1): parse error: component 'foo' in path '/foo' does not exist")
def test_property_labels():
'''Like nodes, properties can have labels too.'''
def verify_label2prop(label, expected):
actual = dt.label2prop[label].name
assert actual == expected, f"label '{label}' mapped to wrong property"
dt = verify_parse("""
/dts-v1/;
/ {
a;
b;
l2: c;
l4: l5: l5: l4: d = < 0 >;
};
/ {
l1: b;
l3: c;
l6: d;
};
""",
"""
/dts-v1/;
/ {
a;
l1: b;
l2: l3: c;
l4: l5: l6: d = < 0x0 >;
};
""")
verify_label2prop("l1", "b")
verify_label2prop("l2", "c")
verify_label2prop("l3", "c")
verify_label2prop("l4", "d")
verify_label2prop("l5", "d")
verify_label2prop("l6", "d")
def test_property_offset_labels():
'''
It's possible to give labels to data at nonnegative byte offsets
within a property value.
'''
def verify_label2offset(label, expected_prop, expected_offset):
actual_prop, actual_offset = dt.label2prop_offset[label]
actual_prop = actual_prop.name
assert (actual_prop, actual_offset) == \
(expected_prop, expected_offset), \
f"label '{label}' maps to wrong offset or property"
dt = verify_parse("""
/dts-v1/;
/ {
a = l01: l02: < l03: &node l04: l05: 2 l06: >,
l07: l08: [ l09: 03 l10: l11: 04 l12: l13: ] l14:, "A";
b = < 0 > l23: l24:;
node: node {
};
};
""",
"""
/dts-v1/;
/ {
a = l01: l02: < l03: &node l04: l05: 0x2 l06: l07: l08: >, [ l09: 03 l10: l11: 04 l12: l13: l14: ], "A";
b = < 0x0 l23: l24: >;
node: node {
phandle = < 0x1 >;
};
};
""")
verify_label2offset("l01", "a", 0)
verify_label2offset("l02", "a", 0)
verify_label2offset("l04", "a", 4)
verify_label2offset("l05", "a", 4)
verify_label2offset("l06", "a", 8)
verify_label2offset("l09", "a", 8)
verify_label2offset("l10", "a", 9)
verify_label2offset("l23", "b", 4)
verify_label2offset("l24", "b", 4)
def test_unit_addr():
'''Node unit addresses must be correctly extracted from their names.'''
def verify_unit_addr(path, expected):
node = dt.get_node(path)
assert node.unit_addr == expected, \
f"{node!r} has unexpected unit address"
dt = verify_parse("""
/dts-v1/;
/ {
no-unit-addr {
};
unit-addr@ABC {
};
unit-addr-non-numeric@foo-bar {
};
};
""",
"""
/dts-v1/;
/ {
no-unit-addr {
};
unit-addr@ABC {
};
unit-addr-non-numeric@foo-bar {
};
};
""")
verify_unit_addr("/no-unit-addr", "")
verify_unit_addr("/unit-addr@ABC", "ABC")
verify_unit_addr("/unit-addr-non-numeric@foo-bar", "foo-bar")
def test_node_path_references():
'''Node phandles may be specified using a reference to the node's path.'''
verify_parse("""
/dts-v1/;
/ {
a = &label;
b = [ 01 ], &label;
c = [ 01 ], &label, <2>;
d = &{/abc};
label: abc {
e = &label;
f = &{/abc};
};
};
""",
"""
/dts-v1/;
/ {
a = &label;
b = [ 01 ], &label;
c = [ 01 ], &label, < 0x2 >;
d = &{/abc};
label: abc {
e = &label;
f = &{/abc};
};
};
""")
verify_error("""
/dts-v1/;
/ {
sub {
x = &missing;
};
};
""",
"/sub: undefined node label 'missing'")
verify_error("""
/dts-v1/;
/ {
sub {
x = &{/sub/missing};
};
};
""",
"/sub: component 'missing' in path '/sub/missing' does not exist")
def test_phandles():
'''Various tests related to phandles.'''
verify_parse("""
/dts-v1/;
/ {
x = < &a &{/b} &c >;
dummy1 {
phandle = < 1 >;
};
dummy2 {
phandle = < 3 >;
};
a: a {
};
b {
};
c: c {
phandle = < 0xFF >;
};
};
""",
"""
/dts-v1/;
/ {
x = < &a &{/b} &c >;
dummy1 {
phandle = < 0x1 >;
};
dummy2 {
phandle = < 0x3 >;
};
a: a {
phandle = < 0x2 >;
};
b {
phandle = < 0x4 >;
};
c: c {
phandle = < 0xff >;
};
};
""")
# Check that a node can be assigned a phandle to itself. This just forces a
# phandle to be allocated on it. The C tools support this too.
verify_parse("""
/dts-v1/;
/ {
dummy {
phandle = < 1 >;
};
a {
foo: phandle = < &{/a} >;
};
label: b {
bar: phandle = < &label >;
};
};
""",
"""
/dts-v1/;
/ {
dummy {
phandle = < 0x1 >;
};
a {
foo: phandle = < &{/a} >;
};
label: b {
bar: phandle = < &label >;
};
};
""")
verify_error("""
/dts-v1/;
/ {
sub {
x = < &missing >;
};
};
""",
"/sub: undefined node label 'missing'")
verify_error_endswith("""
/dts-v1/;
/ {
a: sub {
x = /bits/ 16 < &a >;
};
};
""",
":5 (column 19): parse error: phandle references are only allowed in arrays with 32-bit elements")
verify_error("""
/dts-v1/;
/ {
foo {
phandle = [ 00 ];
};
};
""",
"/foo: bad phandle length (1), expected 4 bytes")
verify_error("""
/dts-v1/;
/ {
foo {
phandle = < 0 >;
};
};
""",
"/foo: bad value 0x00000000 for phandle")
verify_error("""
/dts-v1/;
/ {
foo {
phandle = < (-1) >;
};
};
""",
"/foo: bad value 0xffffffff for phandle")
verify_error("""
/dts-v1/;
/ {
foo {
phandle = < 17 >;
};
bar {
phandle = < 17 >;
};
};
""",
"/bar: duplicated phandle 0x11 (seen before at /foo)")
verify_error("""
/dts-v1/;
/ {
foo {
phandle = < &{/bar} >;
};
bar {
};
};
""",
"/foo: phandle refers to another node")
def test_phandle2node():
'''Test the phandle2node dict in a dt instance.'''
def verify_phandle2node(prop, offset, expected_name):
phandle = dtlib.to_num(dt.root.props[prop].value[offset:offset + 4])
actual_name = dt.phandle2node[phandle].name
assert actual_name == expected_name, \
f"'{prop}' is a phandle for the wrong thing"
dt = parse("""
/dts-v1/;
/ {
phandle_ = < &{/node1} 0 1 >;
phandles = < 0 &{/node2} 1 &{/node3} >;
node1 {
phandle = < 123 >;
};
node2 {
};
node3 {
};
};
""")
verify_phandle2node("phandle_", 0, "node1")
verify_phandle2node("phandles", 4, "node2")
verify_phandle2node("phandles", 12, "node3")
def test_mixed_assign():
'''Test mixed value type assignments'''
verify_parse("""
/dts-v1/;
/ {
x = /bits/ 8 < 0xFF 0xFF >,
&abc,
< 0xFF &abc 0xFF &abc >,
&abc,
[ FF FF ],
"abc";
abc: abc {
};
};
""",
"""
/dts-v1/;
/ {
x = [ FF FF ], &abc, < 0xff &abc 0xff &abc >, &abc, [ FF FF ], "abc";
abc: abc {
phandle = < 0x1 >;
};
};
""")
def test_deletion():
'''Properties and nodes may be deleted from the tree.'''
# Test property deletion
verify_parse("""
/dts-v1/;
/ {
keep = < 1 >;
delete = < &sub >, ⊂
/delete-property/ missing;
/delete-property/ delete;
sub: sub {
y = < &sub >, ⊂
};
};
&sub {
/delete-property/ y;
};
""",
"""
/dts-v1/;
/ {
keep = < 0x1 >;
sub: sub {
};
};
""")
# Test node deletion
verify_parse("""
/dts-v1/;
/ {
x = "foo";
sub0 {
x = "bar";
};
};
/delete-node/ &{/};
/ {
sub1 {
x = < 1 >;
sub2 {
x = < &sub >, ⊂
};
/delete-node/ sub2;
};
sub3: sub3 {
x = < &sub >, ⊂
};
sub4 {
x = < &sub >, ⊂
};
};
/delete-node/ &sub3;
/delete-node/ &{/sub4};
""",
"""
/dts-v1/;
/ {
sub1 {
x = < 0x1 >;
};
};
""")
verify_parse("""
/dts-v1/;
/ {
x: x = < &sub >, ⊂
sub1 {
x = < &sub >, ⊂
};
sub2: sub2 {
x = < &sub >, ⊂
};
};
/delete-node/ &{/};
""",
"""
/dts-v1/;
/ {
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
};
/delete-node/ &missing;
""",
":6 (column 15): parse error: undefined node label 'missing'")
verify_error_endswith("""
/dts-v1/;
/delete-node/ {
""",
":3 (column 15): parse error: expected label (&foo) or path (&{/foo/bar}) reference")
def test_include_curdir(tmp_path):
'''Verify that /include/ (which is handled in the lexer) searches the
current directory'''
with temporary_chdir(tmp_path):
with open("same-dir-1", "w") as f:
f.write("""
x = [ 00 ];
/include/ "same-dir-2"
""")
with open("same-dir-2", "w") as f:
f.write("""
y = [ 01 ];
/include/ "same-dir-3"
""")
with open("same-dir-3", "w") as f:
f.write("""
z = [ 02 ];
""")
with open("test.dts", "w") as f:
f.write("""
/dts-v1/;
/ {
/include/ "same-dir-1"
};
""")
dt = dtlib.DT("test.dts")
assert str(dt) == """
/dts-v1/;
/ {
x = [ 00 ];
y = [ 01 ];
z = [ 02 ];
};
"""[1:-1]
def test_include_is_lexical(tmp_path):
'''/include/ is done in the lexer, which means that property
definitions can span multiple included files in different
directories.'''
with open(tmp_path / "tmp2.dts", "w") as f:
f.write("""
/dts-v1/;
/ {
""")
with open(tmp_path / "tmp3.dts", "w") as f:
f.write("""
x = <1>;
""")
subdir_1 = tmp_path / "subdir-1"
subdir_1.mkdir()
with open(subdir_1 / "via-include-path-1", "w") as f:
f.write("""
= /include/ "via-include-path-2"
""")
subdir_2 = tmp_path / "subdir-2"
subdir_2.mkdir()
with open(subdir_2 / "via-include-path-2", "w") as f:
f.write("""
<2>;
};
""")
with open(tmp_path / "test.dts", "w") as test_dts:
test_dts.write("""
/include/ "tmp2.dts"
/include/ "tmp3.dts"
y /include/ "via-include-path-1"
""")
with temporary_chdir(tmp_path):
dt = dtlib.DT("test.dts", include_path=(subdir_1, subdir_2))
expected_dt = """
/dts-v1/;
/ {
x = < 0x1 >;
y = < 0x2 >;
};
"""[1:-1]
assert str(dt) == expected_dt
def test_include_misc(tmp_path):
'''Miscellaneous /include/ tests.'''
# Missing includes should error out.
verify_error_endswith("""
/include/ "missing"
""",
":1 (column 1): parse error: 'missing' could not be found")
# Verify that an error in an included file points to the right location
with temporary_chdir(tmp_path):
with open("tmp2.dts", "w") as f:
f.write("""\
x
""")
with open("tmp.dts", "w") as f:
f.write("""
/include/ "tmp2.dts"
""")
with dtlib_raises("tmp2.dts:3 (column 3): parse error: "
"expected '/dts-v1/;' at start of file"):
dtlib.DT("tmp.dts")
def test_include_recursion(tmp_path):
'''Test recursive /include/ detection'''
with temporary_chdir(tmp_path):
with open("tmp2.dts", "w") as f:
f.write('/include/ "tmp3.dts"\n')
with open("tmp3.dts", "w") as f:
f.write('/include/ "tmp.dts"\n')
with open("tmp.dts", "w") as f:
f.write('/include/ "tmp2.dts"\n')
expected_err = """\
tmp3.dts:1 (column 1): parse error: recursive /include/:
tmp.dts:1 ->
tmp2.dts:1 ->
tmp3.dts:1 ->
tmp.dts"""
with dtlib_raises(expected_err):
dtlib.DT("tmp.dts")
with open("tmp.dts", "w") as f:
f.write('/include/ "tmp.dts"\n')
expected_err = """\
tmp.dts:1 (column 1): parse error: recursive /include/:
tmp.dts:1 ->
tmp.dts"""
with dtlib_raises(expected_err):
dtlib.DT("tmp.dts")
def test_omit_if_no_ref():
'''The /omit-if-no-ref/ marker is a bit of undocumented
dtc magic that removes a node from the tree if it isn't
referred to elsewhere.
path_to_url
'''
verify_parse("""
/dts-v1/;
/ {
x = < &{/referenced} >, &referenced2;
/omit-if-no-ref/ referenced {
};
referenced2: referenced2 {
};
/omit-if-no-ref/ unreferenced {
};
l1: /omit-if-no-ref/ unreferenced2 {
};
/omit-if-no-ref/ l2: unreferenced3 {
};
unreferenced4: unreferenced4 {
};
unreferenced5 {
};
};
/omit-if-no-ref/ &referenced2;
/omit-if-no-ref/ &unreferenced4;
/omit-if-no-ref/ &{/unreferenced5};
""",
"""
/dts-v1/;
/ {
x = < &{/referenced} >, &referenced2;
referenced {
phandle = < 0x1 >;
};
referenced2: referenced2 {
};
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
/omit-if-no-ref/ x = "";
};
""",
":4 (column 21): parse error: /omit-if-no-ref/ can only be used on nodes")
verify_error_endswith("""
/dts-v1/;
/ {
/omit-if-no-ref/ x;
};
""",
":4 (column 20): parse error: /omit-if-no-ref/ can only be used on nodes")
verify_error_endswith("""
/dts-v1/;
/ {
/omit-if-no-ref/ {
};
};
""",
":4 (column 19): parse error: expected node or property name")
verify_error_endswith("""
/dts-v1/;
/ {
/omit-if-no-ref/ = < 0 >;
};
""",
":4 (column 19): parse error: expected node or property name")
verify_error_endswith("""
/dts-v1/;
/ {
};
/omit-if-no-ref/ &missing;
""",
":6 (column 18): parse error: undefined node label 'missing'")
verify_error_endswith("""
/dts-v1/;
/omit-if-no-ref/ {
""",
":3 (column 18): parse error: expected label (&foo) or path (&{/foo/bar}) reference")
def test_expr():
'''Property values may contain expressions.'''
verify_parse("""
/dts-v1/;
/ {
ter1 = < (0 ? 1 : 0 ? 2 : 3) >;
ter2 = < (0 ? 1 : 1 ? 2 : 3) >;
ter3 = < (1 ? 1 : 0 ? 2 : 3) >;
ter4 = < (1 ? 1 : 1 ? 2 : 3) >;
or1 = < (0 || 0) >;
or2 = < (0 || 1) >;
or3 = < (1 || 0) >;
or4 = < (1 || 1) >;
and1 = < (0 && 0) >;
and2 = < (0 && 1) >;
and3 = < (1 && 0) >;
and4 = < (1 && 1) >;
bitor = < (1 | 2) >;
bitxor = < (7 ^ 2) >;
bitand = < (3 & 6) >;
eq1 = < (1 == 0) >;
eq2 = < (1 == 1) >;
neq1 = < (1 != 0) >;
neq2 = < (1 != 1) >;
lt1 = < (1 < 2) >;
lt2 = < (2 < 2) >;
lt3 = < (3 < 2) >;
lteq1 = < (1 <= 2) >;
lteq2 = < (2 <= 2) >;
lteq3 = < (3 <= 2) >;
gt1 = < (1 > 2) >;
gt2 = < (2 > 2) >;
gt3 = < (3 > 2) >;
gteq1 = < (1 >= 2) >;
gteq2 = < (2 >= 2) >;
gteq3 = < (3 >= 2) >;
lshift = < (2 << 3) >;
rshift = < (16 >> 3) >;
add = < (3 + 4) >;
sub = < (7 - 4) >;
mul = < (3 * 4) >;
div = < (11 / 3) >;
mod = < (11 % 3) >;
unary_minus = < (-3) >;
bitnot = < (~1) >;
not0 = < (!-1) >;
not1 = < (!0) >;
not2 = < (!1) >;
not3 = < (!2) >;
nest = < (((--3) + (-2)) * (--(-2))) >;
char_lits = < ('a' + 'b') >;
};
""",
"""
/dts-v1/;
/ {
ter1 = < 0x3 >;
ter2 = < 0x2 >;
ter3 = < 0x1 >;
ter4 = < 0x1 >;
or1 = < 0x0 >;
or2 = < 0x1 >;
or3 = < 0x1 >;
or4 = < 0x1 >;
and1 = < 0x0 >;
and2 = < 0x0 >;
and3 = < 0x0 >;
and4 = < 0x1 >;
bitor = < 0x3 >;
bitxor = < 0x5 >;
bitand = < 0x2 >;
eq1 = < 0x0 >;
eq2 = < 0x1 >;
neq1 = < 0x1 >;
neq2 = < 0x0 >;
lt1 = < 0x1 >;
lt2 = < 0x0 >;
lt3 = < 0x0 >;
lteq1 = < 0x1 >;
lteq2 = < 0x1 >;
lteq3 = < 0x0 >;
gt1 = < 0x0 >;
gt2 = < 0x0 >;
gt3 = < 0x1 >;
gteq1 = < 0x0 >;
gteq2 = < 0x1 >;
gteq3 = < 0x1 >;
lshift = < 0x10 >;
rshift = < 0x2 >;
add = < 0x7 >;
sub = < 0x3 >;
mul = < 0xc >;
div = < 0x3 >;
mod = < 0x2 >;
unary_minus = < 0xfffffffd >;
bitnot = < 0xfffffffe >;
not0 = < 0x0 >;
not1 = < 0x1 >;
not2 = < 0x0 >;
not3 = < 0x0 >;
nest = < 0xfffffffe >;
char_lits = < 0xc3 >;
};
""")
verify_error_endswith("""
/dts-v1/;
/ {
a = < (1/(-1 + 1)) >;
};
""",
":4 (column 18): parse error: division by zero")
verify_error_endswith("""
/dts-v1/;
/ {
a = < (1%0) >;
};
""",
":4 (column 11): parse error: division by zero")
def test_comment_removal():
'''Comments should be removed when round-tripped to a str.'''
verify_parse("""
/**//dts-v1//**/;//
//
// foo
/ /**/{// foo
x/**/=/*
foo
*/</**/1/***/>/****/;/**/}/*/**/;
""",
"""
/dts-v1/;
/ {
x = < 0x1 >;
};
""")
def verify_path_is(path, node_name, dt):
'''Verify 'node.name' matches 'node_name' in 'dt'.'''
try:
node = dt.get_node(path)
assert node.name == node_name, f'unexpected path {path}'
except dtlib.DTError:
assert False, f'no node found for path {path}'
def verify_path_error(path, msg, dt):
'''Verify that an attempt to get node 'path' from 'dt' raises
a DTError whose str is 'msg'.'''
with dtlib_raises(msg):
dt.get_node(path)
def test_get_node():
'''Test DT.get_node().'''
dt = parse("""
/dts-v1/;
/ {
foo {
bar {
};
};
baz {
};
};
""")
verify_path_is("/", "/", dt)
verify_path_is("//", "/", dt)
verify_path_is("///", "/", dt)
verify_path_is("/foo", "foo", dt)
verify_path_is("//foo", "foo", dt)
verify_path_is("///foo", "foo", dt)
verify_path_is("/foo/bar", "bar", dt)
verify_path_is("//foo//bar", "bar", dt)
verify_path_is("///foo///bar", "bar", dt)
verify_path_is("/baz", "baz", dt)
verify_path_error(
"",
"no alias '' found -- did you forget the leading '/' in the node path?",
dt)
verify_path_error(
"missing",
"no alias 'missing' found -- did you forget the leading '/' in the node path?",
dt)
verify_path_error(
"/missing",
"component 'missing' in path '/missing' does not exist",
dt)
verify_path_error(
"/foo/missing",
"component 'missing' in path '/foo/missing' does not exist",
dt)
def verify_path_exists(path):
assert dt.has_node(path), f"path '{path}' does not exist"
def verify_path_missing(path):
assert not dt.has_node(path), f"path '{path}' exists"
verify_path_exists("/")
verify_path_exists("/foo")
verify_path_exists("/foo/bar")
verify_path_missing("/missing")
verify_path_missing("/foo/missing")
def test_aliases():
'''Test /aliases'''
dt = parse("""
/dts-v1/;
/ {
aliases {
alias1 = &l1;
alias2 = &l2;
alias3 = &{/sub/node3};
alias4 = &{/node4};
};
l1: node1 {
};
l2: node2 {
};
sub {
node3 {
};
};
node4 {
node5 {
};
};
};
""")
def verify_alias_target(alias, node_name):
verify_path_is(alias, node_name, dt)
assert alias in dt.alias2node
assert dt.alias2node[alias].name == node_name, f"bad result for {alias}"
verify_alias_target("alias1", "node1")
verify_alias_target("alias2", "node2")
verify_alias_target("alias3", "node3")
verify_path_is("alias4/node5", "node5", dt)
verify_path_error(
"alias4/node5/node6",
"component 'node6' in path 'alias4/node5/node6' does not exist",
dt)
verify_error_matches("""
/dts-v1/;
/ {
aliases {
a = [ 00 ];
};
};
""",
"expected property 'a' on /aliases in .*" +
re.escape("to be assigned with either 'a = &foo' or 'a = \"/path/to/node\"', not 'a = [ 00 ];'"))
verify_error_matches(r"""
/dts-v1/;
/ {
aliases {
a = "\xFF";
};
};
""",
re.escape(r"value of property 'a' (b'\xff\x00') on /aliases in ") +
".* is not valid UTF-8")
verify_error("""
/dts-v1/;
/ {
aliases {
A = "/aliases";
};
};
""",
"/aliases: alias property name 'A' should include only characters from [0-9a-z-]")
verify_error_matches(r"""
/dts-v1/;
/ {
aliases {
a = "/missing";
};
};
""",
"property 'a' on /aliases in .* points to the non-existent node \"/missing\"")
def test_prop_type():
'''Test Property.type'''
def verify_type(prop, expected):
actual = dt.root.props[prop].type
assert actual == expected, f'{prop} has wrong type'
dt = parse("""
/dts-v1/;
/ {
empty;
bytes1 = [ ];
bytes2 = [ 01 ];
bytes3 = [ 01 02 ];
bytes4 = foo: [ 01 bar: 02 ];
bytes5 = /bits/ 8 < 1 2 3 >;
num = < 1 >;
nums1 = < >;
nums2 = < >, < >;
nums3 = < 1 2 >;
nums4 = < 1 2 >, < 3 >, < 4 >;
string = "foo";
strings = "foo", "bar";
path1 = &node;
path2 = &{/node};
phandle1 = < &node >;
phandle2 = < &{/node} >;
phandles1 = < &node &node >;
phandles2 = < &node >, < &node >;
phandle-and-nums-1 = < &node 1 >;
phandle-and-nums-2 = < &node 1 2 &node 3 4 >;
phandle-and-nums-3 = < &node 1 2 >, < &node 3 4 >;
compound1 = < 1 >, [ 02 ];
compound2 = "foo", < >;
node: node {
};
};
""")
verify_type("empty", dtlib.Type.EMPTY)
verify_type("bytes1", dtlib.Type.BYTES)
verify_type("bytes2", dtlib.Type.BYTES)
verify_type("bytes3", dtlib.Type.BYTES)
verify_type("bytes4", dtlib.Type.BYTES)
verify_type("bytes5", dtlib.Type.BYTES)
verify_type("num", dtlib.Type.NUM)
verify_type("nums1", dtlib.Type.NUMS)
verify_type("nums2", dtlib.Type.NUMS)
verify_type("nums3", dtlib.Type.NUMS)
verify_type("nums4", dtlib.Type.NUMS)
verify_type("string", dtlib.Type.STRING)
verify_type("strings", dtlib.Type.STRINGS)
verify_type("phandle1", dtlib.Type.PHANDLE)
verify_type("phandle2", dtlib.Type.PHANDLE)
verify_type("phandles1", dtlib.Type.PHANDLES)
verify_type("phandles2", dtlib.Type.PHANDLES)
verify_type("phandle-and-nums-1", dtlib.Type.PHANDLES_AND_NUMS)
verify_type("phandle-and-nums-2", dtlib.Type.PHANDLES_AND_NUMS)
verify_type("phandle-and-nums-3", dtlib.Type.PHANDLES_AND_NUMS)
verify_type("path1", dtlib.Type.PATH)
verify_type("path2", dtlib.Type.PATH)
verify_type("compound1", dtlib.Type.COMPOUND)
verify_type("compound2", dtlib.Type.COMPOUND)
def test_prop_type_casting():
'''Test Property.to_{num,nums,string,strings,node}()'''
dt = parse(r"""
/dts-v1/;
/ {
u = < 1 >;
s = < 0xFFFFFFFF >;
u8 = /bits/ 8 < 1 >;
u16 = /bits/ 16 < 1 2 >;
u64 = /bits/ 64 < 1 >;
bytes = [ 01 02 03 ];
empty;
zero = < >;
two_u = < 1 2 >;
two_s = < 0xFFFFFFFF 0xFFFFFFFE >;
three_u = < 1 2 3 >;
three_u_split = < 1 >, < 2 >, < 3 >;
empty_string = "";
string = "foo\tbar baz";
invalid_string = "\xff";
strings = "foo", "bar", "baz";
invalid_strings = "foo", "\xff", "bar";
ref = <&{/target}>;
refs = <&{/target} &{/target2}>;
refs2 = <&{/target}>, <&{/target2}>;
path = &{/target};
manualpath = "/target";
missingpath = "/missing";
target {
phandle = < 100 >;
};
target2 {
};
};
""")
# Test Property.to_num()
def verify_to_num(prop, signed, expected):
signed_str = "a signed" if signed else "an unsigned"
actual = dt.root.props[prop].to_num(signed)
assert actual == expected, \
f"{prop} has bad {signed_str} numeric value"
def verify_to_num_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_num()
verify_to_num("u", False, 1)
verify_to_num("u", True, 1)
verify_to_num("s", False, 0xFFFFFFFF)
verify_to_num("s", True, -1)
verify_to_num_error_matches(
"two_u",
"expected property 'two_u' on / in .* to be assigned with " +
re.escape("'two_u = < (number) >;', not 'two_u = < 0x1 0x2 >;'"))
verify_to_num_error_matches(
"u8",
"expected property 'u8' on / in .* to be assigned with " +
re.escape("'u8 = < (number) >;', not 'u8 = [ 01 ];'"))
verify_to_num_error_matches(
"u16",
"expected property 'u16' on / in .* to be assigned with " +
re.escape("'u16 = < (number) >;', not 'u16 = /bits/ 16 < 0x1 0x2 >;'"))
verify_to_num_error_matches(
"u64",
"expected property 'u64' on / in .* to be assigned with " +
re.escape("'u64 = < (number) >;', not 'u64 = /bits/ 64 < 0x1 >;'"))
verify_to_num_error_matches(
"string",
"expected property 'string' on / in .* to be assigned with " +
re.escape("'string = < (number) >;', not 'string = \"foo\\tbar baz\";'"))
# Test Property.to_nums()
def verify_to_nums(prop, signed, expected):
signed_str = "signed" if signed else "unsigned"
actual = dt.root.props[prop].to_nums(signed)
assert actual == expected, \
f"'{prop}' gives the wrong {signed_str} numbers"
def verify_to_nums_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_nums()
verify_to_nums("zero", False, [])
verify_to_nums("u", False, [1])
verify_to_nums("two_u", False, [1, 2])
verify_to_nums("two_u", True, [1, 2])
verify_to_nums("two_s", False, [0xFFFFFFFF, 0xFFFFFFFE])
verify_to_nums("two_s", True, [-1, -2])
verify_to_nums("three_u", False, [1, 2, 3])
verify_to_nums("three_u_split", False, [1, 2, 3])
verify_to_nums_error_matches(
"empty",
"expected property 'empty' on / in .* to be assigned with " +
re.escape("'empty = < (number) (number) ... >;', not 'empty;'"))
verify_to_nums_error_matches(
"string",
"expected property 'string' on / in .* to be assigned with " +
re.escape("'string = < (number) (number) ... >;', ") +
re.escape("not 'string = \"foo\\tbar baz\";'"))
# Test Property.to_bytes()
def verify_to_bytes(prop, expected):
actual = dt.root.props[prop].to_bytes()
assert actual == expected, f"'{prop}' gives the wrong bytes"
def verify_to_bytes_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_bytes()
verify_to_bytes("u8", b"\x01")
verify_to_bytes("bytes", b"\x01\x02\x03")
verify_to_bytes_error_matches(
"u16",
"expected property 'u16' on / in .* to be assigned with " +
re.escape("'u16 = [ (byte) (byte) ... ];', ") +
re.escape("not 'u16 = /bits/ 16 < 0x1 0x2 >;'"))
verify_to_bytes_error_matches(
"empty",
"expected property 'empty' on / in .* to be assigned with " +
re.escape("'empty = [ (byte) (byte) ... ];', not 'empty;'"))
# Test Property.to_string()
def verify_to_string(prop, expected):
actual = dt.root.props[prop].to_string()
assert actual == expected, f"'{prop}' to_string gives the wrong string"
def verify_to_string_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_string()
verify_to_string("empty_string", "")
verify_to_string("string", "foo\tbar baz")
verify_to_string_error_matches(
"u",
"expected property 'u' on / in .* to be assigned with " +
re.escape("'u = \"string\";', not 'u = < 0x1 >;'"))
verify_to_string_error_matches(
"strings",
"expected property 'strings' on / in .* to be assigned with " +
re.escape("'strings = \"string\";', ")+
re.escape("not 'strings = \"foo\", \"bar\", \"baz\";'"))
verify_to_string_error_matches(
"invalid_string",
re.escape(r"value of property 'invalid_string' (b'\xff\x00') on / ") +
"in .* is not valid UTF-8")
# Test Property.to_strings()
def verify_to_strings(prop, expected):
actual = dt.root.props[prop].to_strings()
assert actual == expected, f"'{prop}' to_strings gives the wrong value"
def verify_to_strings_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_strings()
verify_to_strings("empty_string", [""])
verify_to_strings("string", ["foo\tbar baz"])
verify_to_strings("strings", ["foo", "bar", "baz"])
verify_to_strings_error_matches(
"u",
"expected property 'u' on / in .* to be assigned with " +
re.escape("'u = \"string\", \"string\", ... ;', not 'u = < 0x1 >;'"))
verify_to_strings_error_matches(
"invalid_strings",
"value of property 'invalid_strings' " +
re.escape(r"(b'foo\x00\xff\x00bar\x00') on / in ") +
".* is not valid UTF-8")
# Test Property.to_node()
def verify_to_node(prop, path):
actual = dt.root.props[prop].to_node().path
assert actual == path, f"'{prop}' points at wrong path"
def verify_to_node_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_node()
verify_to_node("ref", "/target")
verify_to_node_error_matches(
"u",
"expected property 'u' on / in .* to be assigned with " +
re.escape("'u = < &foo >;', not 'u = < 0x1 >;'"))
verify_to_node_error_matches(
"string",
"expected property 'string' on / in .* to be assigned with " +
re.escape("'string = < &foo >;', not 'string = \"foo\\tbar baz\";'"))
# Test Property.to_nodes()
def verify_to_nodes(prop, paths):
actual = [node.path for node in dt.root.props[prop].to_nodes()]
assert actual == paths, f"'{prop} gives wrong node paths"
def verify_to_nodes_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_nodes()
verify_to_nodes("zero", [])
verify_to_nodes("ref", ["/target"])
verify_to_nodes("refs", ["/target", "/target2"])
verify_to_nodes("refs2", ["/target", "/target2"])
verify_to_nodes_error_matches(
"u",
"expected property 'u' on / in .* to be assigned with " +
re.escape("'u = < &foo &bar ... >;', not 'u = < 0x1 >;'"))
verify_to_nodes_error_matches(
"string",
"expected property 'string' on / in .* to be assigned with " +
re.escape("'string = < &foo &bar ... >;', ") +
re.escape("not 'string = \"foo\\tbar baz\";'"))
# Test Property.to_path()
def verify_to_path(prop, path):
actual = dt.root.props[prop].to_path().path
assert actual == path, f"'{prop} gives the wrong path"
def verify_to_path_error_matches(prop, expected_re):
with dtlib_raises(err_matches=expected_re):
dt.root.props[prop].to_path()
verify_to_path("path", "/target")
verify_to_path("manualpath", "/target")
verify_to_path_error_matches(
"u",
"expected property 'u' on / in .* to be assigned with either " +
re.escape("'u = &foo' or 'u = \"/path/to/node\"', not 'u = < 0x1 >;'"))
verify_to_path_error_matches(
"missingpath",
"property 'missingpath' on / in .* points to the non-existent node "
'"/missing"')
# Test top-level to_num() and to_nums()
def verify_raw_to_num(fn, prop, length, signed, expected):
actual = fn(dt.root.props[prop].value, length, signed)
assert actual == expected, \
f"{fn.__name__}(<{prop}>, {length}, {signed}) gives wrong value"
def verify_raw_to_num_error(fn, data, length, msg):
# We're using this instead of dtlib_raises() for the extra
# context we get from the assertion below.
with pytest.raises(dtlib.DTError) as e:
fn(data, length)
assert str(e.value) == msg, \
(f"{fn.__name__}() called with data='{data}', length='{length}' "
"gives the wrong error")
verify_raw_to_num(dtlib.to_num, "u", None, False, 1)
verify_raw_to_num(dtlib.to_num, "u", 4, False, 1)
verify_raw_to_num(dtlib.to_num, "s", None, False, 0xFFFFFFFF)
verify_raw_to_num(dtlib.to_num, "s", None, True, -1)
verify_raw_to_num(dtlib.to_nums, "empty", 4, False, [])
verify_raw_to_num(dtlib.to_nums, "u16", 2, False, [1, 2])
verify_raw_to_num(dtlib.to_nums, "two_s", 4, False, [0xFFFFFFFF, 0xFFFFFFFE])
verify_raw_to_num(dtlib.to_nums, "two_s", 4, True, [-1, -2])
verify_raw_to_num_error(dtlib.to_num, 0, 0, "'0' has type 'int', expected 'bytes'")
verify_raw_to_num_error(dtlib.to_num, b"", 0, "'length' must be greater than zero, was 0")
verify_raw_to_num_error(dtlib.to_num, b"foo", 2, "b'foo' is 3 bytes long, expected 2")
verify_raw_to_num_error(dtlib.to_nums, 0, 0, "'0' has type 'int', expected 'bytes'")
verify_raw_to_num_error(dtlib.to_nums, b"", 0, "'length' must be greater than zero, was 0")
verify_raw_to_num_error(dtlib.to_nums, b"foooo", 2, "b'foooo' is 5 bytes long, expected a length that's a multiple of 2")
def test_duplicate_labels():
'''
It is an error to duplicate labels in most conditions, but there
are some exceptions where it's OK.
'''
verify_error("""
/dts-v1/;
/ {
sub1 {
label: foo {
};
};
sub2 {
label: bar {
};
};
};
""",
"Label 'label' appears on /sub1/foo and on /sub2/bar")
verify_error("""
/dts-v1/;
/ {
sub {
label: foo {
};
};
};
/ {
sub {
label: bar {
};
};
};
""",
"Label 'label' appears on /sub/bar and on /sub/foo")
verify_error("""
/dts-v1/;
/ {
foo: a = < 0 >;
foo: node {
};
};
""",
"Label 'foo' appears on /node and on property 'a' of node /")
verify_error("""
/dts-v1/;
/ {
foo: a = < 0 >;
node {
foo: b = < 0 >;
};
};
""",
"Label 'foo' appears on property 'a' of node / and on property 'b' of node /node")
verify_error("""
/dts-v1/;
/ {
foo: a = foo: < 0 >;
};
""",
"Label 'foo' appears in the value of property 'a' of node / and on property 'a' of node /")
# Giving the same label twice for the same node is fine
verify_parse("""
/dts-v1/;
/ {
sub {
label: foo {
};
};
};
/ {
sub {
label: foo {
};
};
};
""",
"""
/dts-v1/;
/ {
sub {
label: foo {
};
};
};
""")
# Duplicate labels are fine if one of the nodes is deleted
verify_parse("""
/dts-v1/;
/ {
label: foo {
};
label: bar {
};
};
/delete-node/ &{/bar};
""",
"""
/dts-v1/;
/ {
label: foo {
};
};
""")
#
# Test overriding/deleting a property with references
#
verify_parse("""
/dts-v1/;
/ {
x = &foo, < &foo >;
y = &foo, < &foo >;
foo: foo {
};
};
/ {
x = < 1 >;
/delete-property/ y;
};
""",
"""
/dts-v1/;
/ {
x = < 0x1 >;
foo: foo {
};
};
""")
#
# Test self-referential node
#
verify_parse("""
/dts-v1/;
/ {
label: foo {
x = &{/foo}, &label, < &label >;
};
};
""",
"""
/dts-v1/;
/ {
label: foo {
x = &{/foo}, &label, < &label >;
phandle = < 0x1 >;
};
};
""")
#
# Test /memreserve/
#
dt = verify_parse("""
/dts-v1/;
l1: l2: /memreserve/ (1 + 1) (2 * 2);
/memreserve/ 0x100 0x200;
/ {
};
""",
"""
/dts-v1/;
l1: l2: /memreserve/ 0x0000000000000002 0x0000000000000004;
/memreserve/ 0x0000000000000100 0x0000000000000200;
/ {
};
""")
expected = [(["l1", "l2"], 2, 4), ([], 0x100, 0x200)]
assert dt.memreserves == expected
verify_error_endswith("""
/dts-v1/;
foo: / {
};
""",
":3 (column 6): parse error: expected /memreserve/ after labels at beginning of file")
def test_reprs():
'''Test the __repr__() functions.'''
dts = """
/dts-v1/;
/ {
x = < 0 >;
sub {
y = < 1 >;
};
};
"""
dt = parse(dts, include_path=("foo", "bar"))
assert re.fullmatch(r"DT\(filename='.*', include_path=.'foo', 'bar'.\)",
repr(dt))
assert re.fullmatch("<Property 'x' at '/' in '.*'>",
repr(dt.root.props["x"]))
assert re.fullmatch("<Node /sub in '.*'>",
repr(dt.root.nodes["sub"]))
dt = parse(dts, include_path=iter(("foo", "bar")))
assert re.fullmatch(r"DT\(filename='.*', include_path=.'foo', 'bar'.\)",
repr(dt))
def test_names():
'''Tests for node/property names.'''
verify_parse(r"""
/dts-v1/;
/ {
// A leading \ is accepted but ignored in node/propert names
\aA0,._+*#?- = &_, &{/aA0,._+@-};
// Names that overlap with operators and integer literals
+ = [ 00 ];
* = [ 02 ];
- = [ 01 ];
? = [ 03 ];
0 = [ 04 ];
0x123 = [ 05 ];
// Node names are more restrictive than property names.
_: \aA0,._+@- {
};
0 {
};
};
""",
"""
/dts-v1/;
/ {
aA0,._+*#?- = &_, &{/aA0,._+@-};
+ = [ 00 ];
* = [ 02 ];
- = [ 01 ];
? = [ 03 ];
0 = [ 04 ];
0x123 = [ 05 ];
_: aA0,._+@- {
};
0 {
};
};
""")
verify_error_endswith(r"""
/dts-v1/;
/ {
foo@3;
};
""",
":4 (column 7): parse error: '@' is only allowed in node names")
verify_error_endswith(r"""
/dts-v1/;
/ {
foo@3 = < 0 >;
};
""",
":4 (column 8): parse error: '@' is only allowed in node names")
verify_error_endswith(r"""
/dts-v1/;
/ {
foo@2@3 {
};
};
""",
":4 (column 10): parse error: multiple '@' in node name")
def test_dense_input():
'''
Test that a densely written DTS input round-trips to something
readable.
'''
verify_parse("""
/dts-v1/;/{l1:l2:foo{l3:l4:bar{l5:x=l6:/bits/8<l7:1 l8:2>l9:,[03],"a";};};};
""",
"""
/dts-v1/;
/ {
l1: l2: foo {
l3: l4: bar {
l5: x = l6: [ l7: 01 l8: 02 l9: ], [ 03 ], "a";
};
};
};
""")
def test_misc():
'''Test miscellaneous errors and non-errors.'''
verify_error_endswith("", ":1 (column 1): parse error: expected '/dts-v1/;' at start of file")
verify_error_endswith("""
/dts-v1/;
""",
":2 (column 1): parse error: no root node defined")
verify_error_endswith("""
/dts-v1/; /plugin/;
""",
":1 (column 11): parse error: /plugin/ is not supported")
verify_error_endswith("""
/dts-v1/;
/ {
foo: foo {
};
};
// Only one label supported before label references at the top level
l1: l2: &foo {
};
""",
":9 (column 5): parse error: expected label reference (&foo)")
verify_error_endswith("""
/dts-v1/;
/ {
foo: {};
};
""",
":4 (column 14): parse error: expected node or property name")
# Multiple /dts-v1/ at the start of a file is fine
verify_parse("""
/dts-v1/;
/dts-v1/;
/ {
};
""",
"""
/dts-v1/;
/ {
};
""")
def test_dangling_alias():
dt = parse('''
/dts-v1/;
/ {
aliases { foo = "/missing"; };
};
''', force=True)
assert dt.get_node('/aliases').props['foo'].to_string() == '/missing'
def test_duplicate_nodes():
# Duplicate node names in the same {} block are an error in dtc,
# so we want to reproduce the same behavior. But we also need to
# make sure that doesn't break overlays modifying the same node.
verify_error_endswith("""
/dts-v1/;
/ {
foo {};
foo {};
};
""", "/foo: duplicate node name")
verify_parse("""
/dts-v1/;
/ {
foo { prop = <3>; };
};
/ {
foo { prop = <4>; };
};
""",
"""
/dts-v1/;
/ {
foo {
prop = < 0x4 >;
};
};
""")
def test_deepcopy():
dt = parse('''
/dts-v1/;
memreservelabel: /memreserve/ 0xdeadbeef 0x4000;
/ {
aliases {
foo = &nodelabel;
};
rootprop_label: rootprop = prop_offset0: <0x12345678 prop_offset4: 0x0>;
nodelabel: node@1234 {
nodeprop = <3>;
subnode {
ref-to-node = <&nodelabel>;
};
};
};
''')
dt_copy = deepcopy(dt)
assert dt_copy.filename == dt.filename
# dt_copy.root checks:
root_copy = dt_copy.root
assert root_copy is not dt.root
assert root_copy.parent is None
assert root_copy.dt is dt_copy
assert root_copy.labels == []
assert root_copy.labels is not dt.root.labels
# dt_copy.memreserves checks:
assert dt_copy.memreserves == [
(set(['memreservelabel']), 0xdeadbeef, 0x4000)
]
assert dt_copy.memreserves is not dt.memreserves
# Miscellaneous dt_copy node and property checks:
assert 'rootprop' in root_copy.props
rootprop_copy = root_copy.props['rootprop']
assert rootprop_copy is not dt.root.props['rootprop']
assert rootprop_copy.name == 'rootprop'
assert rootprop_copy.value == b'\x12\x34\x56\x78\0\0\0\0'
assert rootprop_copy.type == dtlib.Type.NUMS
assert rootprop_copy.labels == ['rootprop_label']
assert rootprop_copy.labels is not dt.root.props['rootprop'].labels
assert rootprop_copy.offset_labels == {
'prop_offset0': 0,
'prop_offset4': 4,
}
assert rootprop_copy.offset_labels is not \
dt.root.props['rootprop'].offset_labels
assert rootprop_copy.node is root_copy
assert dt_copy.has_node('/node@1234')
node_copy = dt_copy.get_node('/node@1234')
assert node_copy is not dt.get_node('/node@1234')
assert node_copy.labels == ['nodelabel']
assert node_copy.labels is not dt.get_node('/node@1234').labels
assert node_copy.name == 'node@1234'
assert node_copy.unit_addr == '1234'
assert node_copy.path == '/node@1234'
assert set(node_copy.props.keys()) == set(['nodeprop', 'phandle'])
assert node_copy.props is not dt.get_node('/node@1234').props
assert node_copy.props['nodeprop'].name == 'nodeprop'
assert node_copy.props['nodeprop'].labels == []
assert node_copy.props['nodeprop'].offset_labels == {}
assert node_copy.props['nodeprop'].node is node_copy
assert node_copy.dt is dt_copy
assert 'subnode' in node_copy.nodes
subnode_copy = node_copy.nodes['subnode']
assert subnode_copy is not dt.get_node('/node@1234/subnode')
assert subnode_copy.parent is node_copy
# dt_copy.label2prop and .label2prop_offset checks:
assert 'rootprop_label' in dt_copy.label2prop
assert dt_copy.label2prop['rootprop_label'] is rootprop_copy
assert list(dt_copy.label2prop_offset.keys()) == ['prop_offset0',
'prop_offset4']
assert dt_copy.label2prop_offset['prop_offset4'][0] is rootprop_copy
assert dt_copy.label2prop_offset['prop_offset4'][1] == 4
# dt_copy.foo2node checks:
def check_node_lookup_table(attr_name):
original = getattr(dt, attr_name)
copy = getattr(dt_copy, attr_name)
assert original is not copy
assert list(original.keys()) == list(copy.keys())
assert all([original_node.path == copy_node.path and
original_node is not copy_node
for original_node, copy_node in
zip(original.values(), copy.values())])
check_node_lookup_table('alias2node')
check_node_lookup_table('label2node')
check_node_lookup_table('phandle2node')
assert list(dt_copy.alias2node.keys()) == ['foo']
assert dt_copy.alias2node['foo'] is node_copy
assert list(dt_copy.label2node.keys()) == ['nodelabel']
assert dt_copy.label2node['nodelabel'] is node_copy
assert dt_copy.phandle2node
# This is a little awkward because of the way dtlib allocates
# phandles.
phandle2node_copy_values = set(dt_copy.phandle2node.values())
assert node_copy in phandle2node_copy_values
for node in dt.node_iter():
assert node not in phandle2node_copy_values
def test_move_node():
# Test cases for DT.move_node().
dt = parse('''
/dts-v1/;
/ {
aliases {
parent-alias = &parent_label;
};
parent_label: parent {
child {};
};
bar {
shouldbechosen {
foo = "bar";
};
};
};
''')
parent = dt.get_node('/parent')
child = dt.get_node('/parent/child')
dt.move_node(parent, '/newpath')
assert parent.path == '/newpath'
assert child.path == '/newpath/child'
assert child.parent is parent
assert child.parent is dt.get_node('/newpath')
assert dt.get_node('parent-alias') is parent
assert dt.label2node['parent_label'] is parent
assert not dt.has_node('/chosen')
dt.move_node(dt.get_node('/bar/shouldbechosen'), '/chosen')
assert dt.has_node('/chosen')
assert 'foo' in dt.get_node('/chosen').props
with dtlib_raises("the root node can't be moved"):
dt.move_node(dt.root, '/somewhere/else')
with dtlib_raises("can't move '/newpath' to '/aliases': "
"destination node exists"):
dt.move_node(parent, '/aliases')
with dtlib_raises("path 'xyz' doesn't start with '/'"):
dt.move_node(parent, 'xyz')
with dtlib_raises("new path '/ invalid': bad character ' '"):
dt.move_node(parent, '/ invalid')
with dtlib_raises("can't move '/newpath' to '/foo/bar': "
"parent node '/foo' doesn't exist"):
dt.move_node(parent, '/foo/bar')
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test_dtlib.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 16,109 |
```yaml
description: Device.wrong_phandle_array_name test
compatible: "wrong_phandle_array_name"
properties:
wrong-phandle-array-name:
type: phandle-array
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-wrong-bindings/wrong-phandle-array-name.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 38 |
```yaml
description: Device.wrong_specifier_space_type test
compatible: "wrong_specifier_space_type"
properties:
wrong-type-for-specifier-space:
type: phandle
specifier-space: foobar
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-wrong-bindings/wrong-specifier-space-type.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 45 |
```yaml
description: An empty property-allowlist is valid.
compatible: empty-allowlist
include:
- name: include.yaml
property-allowlist: []
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/empty-allowlist.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 36 |
```yaml
description: An empty property-blocklist is valid.
compatible: empty-blocklist
include:
- name: include.yaml
property-blocklist: []
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/empty-blocklist.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 33 |
```yaml
description: |
Top-level binding file for testing included property spec paths.
base.yaml: specifies properties "x" and "y"
modified.yaml: includes base.yaml, modifies property "x"
top.yaml (this file): includes modified.yaml, specifies property "p"
From the top-level binding, we expect:
- "x" was last modified in modified.yaml
- "y" was last modified in base.yaml
- "p" was last modified in top.yaml
compatible: top-level
include: modified.yaml
properties:
p:
type: int
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/top.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 125 |
```python
import contextlib
from copy import deepcopy
import io
from logging import WARNING
import os
from pathlib import Path
import pytest
from devicetree import edtlib
# Test suite for edtlib.py.
#
# Run it using pytest (path_to_url
#
# $ pytest testedtlib.py
#
# See the comment near the top of testdtlib.py for additional pytest advice.
#
# test.dts is the main test file. test-bindings/ and test-bindings-2/ has
# bindings. The tests mostly use string comparisons via the various __repr__()
# methods.
HERE = os.path.dirname(__file__)
@contextlib.contextmanager
def from_here():
# Convenience hack to minimize diff from zephyr.
cwd = os.getcwd()
try:
os.chdir(HERE)
yield
finally:
os.chdir(cwd)
def hpath(filename):
'''Convert 'filename' to the host path syntax.'''
return os.fspath(Path(filename))
def test_warnings(caplog):
'''Tests for situations that should cause warnings.'''
with from_here(): edtlib.EDT("test.dts", ["test-bindings"])
enums_hpath = hpath('test-bindings/enums.yaml')
expected_warnings = [
f"'oldprop' is marked as deprecated in 'properties:' in {hpath('test-bindings/deprecated.yaml')} for node /test-deprecated.",
"unit address and first address in 'reg' (0x1) don't match for /reg-zero-size-cells/node",
"unit address and first address in 'reg' (0x5) don't match for /reg-ranges/parent/node",
"unit address and first address in 'reg' (0x30000000200000001) don't match for /reg-nested-ranges/grandparent/parent/node",
f"compatible 'enums' in binding '{enums_hpath}' has non-tokenizable enum for property 'string-enum': 'foo bar', 'foo_bar'",
f"compatible 'enums' in binding '{enums_hpath}' has enum for property 'tokenizable-lower-enum' that is only tokenizable in lowercase: 'bar', 'BAR'",
]
assert caplog.record_tuples == [('devicetree.edtlib', WARNING, warning_message)
for warning_message in expected_warnings]
def test_interrupts():
'''Tests for the interrupts property.'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
node = edt.get_node("/interrupt-parent-test/node")
controller = edt.get_node('/interrupt-parent-test/controller')
assert node.interrupts == [
edtlib.ControllerAndData(node=node, controller=controller, data={'one': 1, 'two': 2, 'three': 3}, name='foo', basename=None),
edtlib.ControllerAndData(node=node, controller=controller, data={'one': 4, 'two': 5, 'three': 6}, name='bar', basename=None)
]
node = edt.get_node("/interrupts-extended-test/node")
controller_0 = edt.get_node('/interrupts-extended-test/controller-0')
controller_1 = edt.get_node('/interrupts-extended-test/controller-1')
controller_2 = edt.get_node('/interrupts-extended-test/controller-2')
assert node.interrupts == [
edtlib.ControllerAndData(node=node, controller=controller_0, data={'one': 1}, name=None, basename=None),
edtlib.ControllerAndData(node=node, controller=controller_1, data={'one': 2, 'two': 3}, name=None, basename=None),
edtlib.ControllerAndData(node=node, controller=controller_2, data={'one': 4, 'two': 5, 'three': 6}, name=None, basename=None)
]
node = edt.get_node("/interrupt-map-test/node@0")
controller_0 = edt.get_node('/interrupt-map-test/controller-0')
controller_1 = edt.get_node('/interrupt-map-test/controller-1')
controller_2 = edt.get_node('/interrupt-map-test/controller-2')
assert node.interrupts == [
edtlib.ControllerAndData(node=node, controller=controller_0, data={'one': 0}, name=None, basename=None),
edtlib.ControllerAndData(node=node, controller=controller_1, data={'one': 0, 'two': 1}, name=None, basename=None),
edtlib.ControllerAndData(node=node, controller=controller_2, data={'one': 0, 'two': 0, 'three': 2}, name=None, basename=None)
]
node = edt.get_node("/interrupt-map-test/node@1")
assert node.interrupts == [
edtlib.ControllerAndData(node=node, controller=controller_0, data={'one': 3}, name=None, basename=None),
edtlib.ControllerAndData(node=node, controller=controller_1, data={'one': 0, 'two': 4}, name=None, basename=None),
edtlib.ControllerAndData(node=node, controller=controller_2, data={'one': 0, 'two': 0, 'three': 5}, name=None, basename=None)
]
node = edt.get_node("/interrupt-map-bitops-test/node@70000000E")
assert node.interrupts == [
edtlib.ControllerAndData(node=node, controller=edt.get_node('/interrupt-map-bitops-test/controller'), data={'one': 3, 'two': 2}, name=None, basename=None)
]
def test_ranges():
'''Tests for the ranges property'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
node = edt.get_node("/reg-ranges/parent")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x1, parent_bus_cells=0x2, parent_bus_addr=0xa0000000b, length_cells=0x1, length=0x1),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x2, parent_bus_cells=0x2, parent_bus_addr=0xc0000000d, length_cells=0x1, length=0x2),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x4, parent_bus_cells=0x2, parent_bus_addr=0xe0000000f, length_cells=0x1, length=0x1)
]
node = edt.get_node("/reg-nested-ranges/grandparent")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0x0, parent_bus_cells=0x3, parent_bus_addr=0x30000000000000000, length_cells=0x2, length=0x200000002)
]
node = edt.get_node("/reg-nested-ranges/grandparent/parent")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x0, parent_bus_cells=0x2, parent_bus_addr=0x200000000, length_cells=0x1, length=0x2)
]
assert edt.get_node("/ranges-zero-cells/node").ranges == []
node = edt.get_node("/ranges-zero-parent-cells/node")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0xa, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x0, length=None),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x1a, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x0, length=None),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x2a, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x0, length=None)
]
node = edt.get_node("/ranges-one-address-cells/node")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0xa, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x1, length=0xb),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x1a, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x1, length=0x1b),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x2a, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x1, length=0x2b)
]
node = edt.get_node("/ranges-one-address-two-size-cells/node")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0xa, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x2, length=0xb0000000c),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x1a, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x2, length=0x1b0000001c),
edtlib.Range(node=node, child_bus_cells=0x1, child_bus_addr=0x2a, parent_bus_cells=0x0, parent_bus_addr=None, length_cells=0x2, length=0x2b0000002c)
]
node = edt.get_node("/ranges-two-address-cells/node@1")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0xa0000000b, parent_bus_cells=0x1, parent_bus_addr=0xc, length_cells=0x1, length=0xd),
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0x1a0000001b, parent_bus_cells=0x1, parent_bus_addr=0x1c, length_cells=0x1, length=0x1d),
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0x2a0000002b, parent_bus_cells=0x1, parent_bus_addr=0x2c, length_cells=0x1, length=0x2d)
]
node = edt.get_node("/ranges-two-address-two-size-cells/node@1")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0xa0000000b, parent_bus_cells=0x1, parent_bus_addr=0xc, length_cells=0x2, length=0xd0000000e),
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0x1a0000001b, parent_bus_cells=0x1, parent_bus_addr=0x1c, length_cells=0x2, length=0x1d0000001e),
edtlib.Range(node=node, child_bus_cells=0x2, child_bus_addr=0x2a0000002b, parent_bus_cells=0x1, parent_bus_addr=0x2c, length_cells=0x2, length=0x2d0000001d)
]
node = edt.get_node("/ranges-three-address-cells/node@1")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x3, child_bus_addr=0xa0000000b0000000c, parent_bus_cells=0x2, parent_bus_addr=0xd0000000e, length_cells=0x1, length=0xf),
edtlib.Range(node=node, child_bus_cells=0x3, child_bus_addr=0x1a0000001b0000001c, parent_bus_cells=0x2, parent_bus_addr=0x1d0000001e, length_cells=0x1, length=0x1f),
edtlib.Range(node=node, child_bus_cells=0x3, child_bus_addr=0x2a0000002b0000002c, parent_bus_cells=0x2, parent_bus_addr=0x2d0000002e, length_cells=0x1, length=0x2f)
]
node = edt.get_node("/ranges-three-address-two-size-cells/node@1")
assert node.ranges == [
edtlib.Range(node=node, child_bus_cells=0x3, child_bus_addr=0xa0000000b0000000c, parent_bus_cells=0x2, parent_bus_addr=0xd0000000e, length_cells=0x2, length=0xf00000010),
edtlib.Range(node=node, child_bus_cells=0x3, child_bus_addr=0x1a0000001b0000001c, parent_bus_cells=0x2, parent_bus_addr=0x1d0000001e, length_cells=0x2, length=0x1f00000110),
edtlib.Range(node=node, child_bus_cells=0x3, child_bus_addr=0x2a0000002b0000002c, parent_bus_cells=0x2, parent_bus_addr=0x2d0000002e, length_cells=0x2, length=0x2f00000210)
]
def test_reg():
'''Tests for the regs property'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
def verify_regs(node, expected_tuples):
regs = node.regs
assert len(regs) == len(expected_tuples)
for reg, expected_tuple in zip(regs, expected_tuples):
name, addr, size = expected_tuple
assert reg.node is node
assert reg.name == name
assert reg.addr == addr
assert reg.size == size
verify_regs(edt.get_node("/reg-zero-address-cells/node"),
[('foo', None, 0x1),
('bar', None, 0x2)])
verify_regs(edt.get_node("/reg-zero-size-cells/node"),
[(None, 0x1, None),
(None, 0x2, None)])
verify_regs(edt.get_node("/reg-ranges/parent/node"),
[(None, 0x5, 0x1),
(None, 0xe0000000f, 0x1),
(None, 0xc0000000e, 0x1),
(None, 0xc0000000d, 0x1),
(None, 0xa0000000b, 0x1),
(None, 0x0, 0x1)])
verify_regs(edt.get_node("/reg-nested-ranges/grandparent/parent/node"),
[(None, 0x30000000200000001, 0x1)])
def test_pinctrl():
'''Test 'pinctrl-<index>'.'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
node = edt.get_node("/pinctrl/dev")
state_1 = edt.get_node('/pinctrl/pincontroller/state-1')
state_2 = edt.get_node('/pinctrl/pincontroller/state-2')
assert node.pinctrls == [
edtlib.PinCtrl(node=node, name='zero', conf_nodes=[]),
edtlib.PinCtrl(node=node, name='one', conf_nodes=[state_1]),
edtlib.PinCtrl(node=node, name='two', conf_nodes=[state_1, state_2])
]
def test_hierarchy():
'''Test Node.parent and Node.children'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert edt.get_node("/").parent is None
assert str(edt.get_node("/parent/child-1").parent) == \
"<Node /parent in 'test.dts', no binding>"
assert str(edt.get_node("/parent/child-2/grandchild").parent) == \
"<Node /parent/child-2 in 'test.dts', no binding>"
assert str(edt.get_node("/parent").children) == \
"{'child-1': <Node /parent/child-1 in 'test.dts', no binding>, 'child-2': <Node /parent/child-2 in 'test.dts', no binding>}"
assert edt.get_node("/parent/child-1").children == {}
def test_child_index():
'''Test Node.child_index.'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
parent, child_1, child_2 = [edt.get_node(path) for path in
("/parent",
"/parent/child-1",
"/parent/child-2")]
assert parent.child_index(child_1) == 0
assert parent.child_index(child_2) == 1
with pytest.raises(KeyError):
parent.child_index(parent)
def test_include():
'''Test 'include:' and the legacy 'inherits: !include ...' in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
binding_include = edt.get_node("/binding-include")
assert binding_include.description == "Parent binding"
verify_props(binding_include,
['foo', 'bar', 'baz', 'qaz'],
['int', 'int', 'int', 'int'],
[0, 1, 2, 3])
verify_props(edt.get_node("/binding-include/child"),
['foo', 'bar', 'baz', 'qaz'],
['int', 'int', 'int', 'int'],
[0, 1, 2, 3])
def test_include_filters():
'''Test property-allowlist and property-blocklist in an include.'''
fname2path = {'include.yaml': 'test-bindings-include/include.yaml',
'include-2.yaml': 'test-bindings-include/include-2.yaml'}
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/allow-and-blocklist.yaml", fname2path)
assert ("should not specify both 'property-allowlist:' and 'property-blocklist:'"
in str(e.value))
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/allow-and-blocklist-child.yaml", fname2path)
assert ("should not specify both 'property-allowlist:' and 'property-blocklist:'"
in str(e.value))
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/allow-not-list.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith("'property-allowlist' value")
assert value_str.endswith("should be a list")
with pytest.raises(edtlib.EDTError) as e:
with from_here():
edtlib.Binding("test-bindings-include/block-not-list.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith("'property-blocklist' value")
assert value_str.endswith("should be a list")
with pytest.raises(edtlib.EDTError) as e:
with from_here():
binding = edtlib.Binding("test-bindings-include/include-invalid-keys.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith(
"'include:' in test-bindings-include/include-invalid-keys.yaml should not have these "
"unexpected contents: ")
assert 'bad-key-1' in value_str
assert 'bad-key-2' in value_str
with pytest.raises(edtlib.EDTError) as e:
with from_here():
binding = edtlib.Binding("test-bindings-include/include-invalid-type.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith(
"'include:' in test-bindings-include/include-invalid-type.yaml "
"should be a string or list, but has type ")
with pytest.raises(edtlib.EDTError) as e:
with from_here():
binding = edtlib.Binding("test-bindings-include/include-no-name.yaml", fname2path)
value_str = str(e.value)
assert value_str.startswith("'include:' element")
assert value_str.endswith(
"in test-bindings-include/include-no-name.yaml should have a 'name' key")
with from_here():
binding = edtlib.Binding("test-bindings-include/allowlist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x'} # 'x' is allowed
binding = edtlib.Binding("test-bindings-include/empty-allowlist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == set() # nothing is allowed
binding = edtlib.Binding("test-bindings-include/blocklist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'y', 'z'} # 'x' is blocked
binding = edtlib.Binding("test-bindings-include/empty-blocklist.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x', 'y', 'z'} # nothing is blocked
binding = edtlib.Binding("test-bindings-include/intermixed.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x', 'a'}
binding = edtlib.Binding("test-bindings-include/include-no-list.yaml", fname2path)
assert set(binding.prop2specs.keys()) == {'x', 'y', 'z'}
binding = edtlib.Binding("test-bindings-include/filter-child-bindings.yaml", fname2path)
child = binding.child_binding
grandchild = child.child_binding
assert set(binding.prop2specs.keys()) == {'x'}
assert set(child.prop2specs.keys()) == {'child-prop-2'}
assert set(grandchild.prop2specs.keys()) == {'grandchild-prop-1'}
binding = edtlib.Binding("test-bindings-include/allow-and-blocklist-multilevel.yaml",
fname2path)
assert set(binding.prop2specs.keys()) == {'x'} # 'x' is allowed
child = binding.child_binding
assert set(child.prop2specs.keys()) == {'child-prop-1', 'child-prop-2',
'x', 'z'} # root level 'y' is blocked
def test_include_paths():
'''Test "last modified" semantic for included bindings paths.'''
fname2path = {'base.yaml': 'test-bindings-include/base.yaml',
'modified.yaml': 'test-bindings-include/modified.yaml'}
with from_here():
top = edtlib.Binding('test-bindings-include/top.yaml', fname2path)
assert 'modified.yaml' == os.path.basename(top.prop2specs["x"].path)
assert 'base.yaml' == os.path.basename(top.prop2specs["y"].path)
assert 'top.yaml' == os.path.basename(top.prop2specs["p"].path)
def test_include_filters_included_bindings():
'''Test filters set by including bindings.'''
fname2path = {'base.yaml': 'test-bindings-include/base.yaml',
'inc-base.yaml': 'test-bindings-include/inc-base.yaml'}
with from_here():
top_allows = edtlib.Binding('test-bindings-include/top-allows.yaml', fname2path)
assert top_allows.prop2specs.get("x")
assert not top_allows.prop2specs.get("y")
with from_here():
top_blocks = edtlib.Binding('test-bindings-include/top-blocks.yaml', fname2path)
assert not top_blocks.prop2specs.get("x")
assert top_blocks.prop2specs.get("y")
def test_bus():
'''Test 'bus:' and 'on-bus:' in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
assert isinstance(edt.get_node("/buses/foo-bus").buses, list)
assert "foo" in edt.get_node("/buses/foo-bus").buses
# foo-bus does not itself appear on a bus
assert isinstance(edt.get_node("/buses/foo-bus").on_buses, list)
assert not edt.get_node("/buses/foo-bus").on_buses
assert edt.get_node("/buses/foo-bus").bus_node is None
# foo-bus/node1 is not a bus node...
assert isinstance(edt.get_node("/buses/foo-bus/node1").buses, list)
assert not edt.get_node("/buses/foo-bus/node1").buses
# ...but is on a bus
assert isinstance(edt.get_node("/buses/foo-bus/node1").on_buses, list)
assert "foo" in edt.get_node("/buses/foo-bus/node1").on_buses
assert edt.get_node("/buses/foo-bus/node1").bus_node.path == \
"/buses/foo-bus"
# foo-bus/node2 is not a bus node...
assert isinstance(edt.get_node("/buses/foo-bus/node2").buses, list)
assert not edt.get_node("/buses/foo-bus/node2").buses
# ...but is on a bus
assert isinstance(edt.get_node("/buses/foo-bus/node2").on_buses, list)
assert "foo" in edt.get_node("/buses/foo-bus/node2").on_buses
# no-bus-node is not a bus node...
assert isinstance(edt.get_node("/buses/no-bus-node").buses, list)
assert not edt.get_node("/buses/no-bus-node").buses
# ... and is not on a bus
assert isinstance(edt.get_node("/buses/no-bus-node").on_buses, list)
assert not edt.get_node("/buses/no-bus-node").on_buses
# Same compatible string, but different bindings from being on different
# buses
assert str(edt.get_node("/buses/foo-bus/node1").binding_path) == \
hpath("test-bindings/device-on-foo-bus.yaml")
assert str(edt.get_node("/buses/foo-bus/node2").binding_path) == \
hpath("test-bindings/device-on-any-bus.yaml")
assert str(edt.get_node("/buses/bar-bus/node").binding_path) == \
hpath("test-bindings/device-on-bar-bus.yaml")
assert str(edt.get_node("/buses/no-bus-node").binding_path) == \
hpath("test-bindings/device-on-any-bus.yaml")
# foo-bus/node/nested also appears on the foo-bus bus
assert isinstance(edt.get_node("/buses/foo-bus/node1/nested").on_buses, list)
assert "foo" in edt.get_node("/buses/foo-bus/node1/nested").on_buses
assert str(edt.get_node("/buses/foo-bus/node1/nested").binding_path) == \
hpath("test-bindings/device-on-foo-bus.yaml")
def test_child_binding():
'''Test 'child-binding:' in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
child1 = edt.get_node("/child-binding/child-1")
child2 = edt.get_node("/child-binding/child-2")
grandchild = edt.get_node("/child-binding/child-1/grandchild")
assert str(child1.binding_path) == hpath("test-bindings/child-binding.yaml")
assert str(child1.description) == "child node"
verify_props(child1, ['child-prop'], ['int'], [1])
assert str(child2.binding_path) == hpath("test-bindings/child-binding.yaml")
assert str(child2.description) == "child node"
verify_props(child2, ['child-prop'], ['int'], [3])
assert str(grandchild.binding_path) == hpath("test-bindings/child-binding.yaml")
assert str(grandchild.description) == "grandchild node"
verify_props(grandchild, ['grandchild-prop'], ['int'], [2])
with from_here():
binding_file = Path("test-bindings/child-binding.yaml").resolve()
top = edtlib.Binding(binding_file, {})
child = top.child_binding
assert Path(top.path) == binding_file
assert Path(child.path) == binding_file
assert top.compatible == 'top-binding'
assert child.compatible is None
with from_here():
binding_file = Path("test-bindings/child-binding-with-compat.yaml").resolve()
top = edtlib.Binding(binding_file, {})
child = top.child_binding
assert Path(top.path) == binding_file
assert Path(child.path) == binding_file
assert top.compatible == 'top-binding-with-compat'
assert child.compatible == 'child-compat'
def test_props():
'''Test Node.props (derived from DT and 'properties:' in the binding)'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
props_node = edt.get_node('/props')
ctrl_1, ctrl_2 = [edt.get_node(path) for path in ['/ctrl-1', '/ctrl-2']]
verify_props(props_node,
['int',
'existent-boolean', 'nonexistent-boolean',
'array', 'uint8-array',
'string', 'string-array',
'phandle-ref', 'phandle-refs',
'path'],
['int',
'boolean', 'boolean',
'array', 'uint8-array',
'string', 'string-array',
'phandle', 'phandles',
'path'],
[1,
True, False,
[1,2,3], b'\x124',
'foo', ['foo','bar','baz'],
ctrl_1, [ctrl_1,ctrl_2],
ctrl_1])
verify_phandle_array_prop(props_node,
'phandle-array-foos',
[(ctrl_1, {'one': 1}),
(ctrl_2, {'one': 2, 'two': 3})])
verify_phandle_array_prop(edt.get_node("/props-2"),
"phandle-array-foos",
[(edt.get_node('/ctrl-0-1'), {}),
None,
(edt.get_node('/ctrl-0-2'), {})])
verify_phandle_array_prop(props_node,
'foo-gpios',
[(ctrl_1, {'gpio-one': 1})])
def test_nexus():
'''Test <prefix>-map via gpio-map (the most common case).'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
source = edt.get_node("/gpio-map/source")
destination = edt.get_node('/gpio-map/destination')
verify_phandle_array_prop(source,
'foo-gpios',
[(destination, {'val': 6}),
(destination, {'val': 5})])
assert source.props["foo-gpios"].val[0].basename == f"gpio"
def test_prop_defaults():
'''Test property default values given in bindings'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
verify_props(edt.get_node("/defaults"),
['int',
'array', 'uint8-array',
'string', 'string-array',
'default-not-used'],
['int',
'array', 'uint8-array',
'string', 'string-array',
'int'],
[123,
[1,2,3], b'\x89\xab\xcd',
'hello', ['hello','there'],
234])
def test_prop_enums():
'''test properties with enum: in the binding'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
props = edt.get_node('/enums').props
int_enum = props['int-enum']
string_enum = props['string-enum']
tokenizable_enum = props['tokenizable-enum']
tokenizable_lower_enum = props['tokenizable-lower-enum']
no_enum = props['no-enum']
assert int_enum.val == 1
assert int_enum.enum_index == 0
assert not int_enum.spec.enum_tokenizable
assert not int_enum.spec.enum_upper_tokenizable
assert string_enum.val == 'foo_bar'
assert string_enum.enum_index == 1
assert not string_enum.spec.enum_tokenizable
assert not string_enum.spec.enum_upper_tokenizable
assert tokenizable_enum.val == '123 is ok'
assert tokenizable_enum.val_as_token == '123_is_ok'
assert tokenizable_enum.enum_index == 2
assert tokenizable_enum.spec.enum_tokenizable
assert tokenizable_enum.spec.enum_upper_tokenizable
assert tokenizable_lower_enum.val == 'bar'
assert tokenizable_lower_enum.val_as_token == 'bar'
assert tokenizable_lower_enum.enum_index == 0
assert tokenizable_lower_enum.spec.enum_tokenizable
assert not tokenizable_lower_enum.spec.enum_upper_tokenizable
assert no_enum.enum_index is None
assert not no_enum.spec.enum_tokenizable
assert not no_enum.spec.enum_upper_tokenizable
def test_binding_inference():
'''Test inferred bindings for special zephyr-specific nodes.'''
warnings = io.StringIO()
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"], warnings)
assert str(edt.get_node("/zephyr,user").props) == '{}'
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"], warnings,
infer_binding_for_paths=["/zephyr,user"])
ctrl_1 = edt.get_node('/ctrl-1')
ctrl_2 = edt.get_node('/ctrl-2')
zephyr_user = edt.get_node("/zephyr,user")
verify_props(zephyr_user,
['boolean', 'bytes', 'number',
'numbers', 'string', 'strings'],
['boolean', 'uint8-array', 'int',
'array', 'string', 'string-array'],
[True, b'\x81\x82\x83', 23,
[1,2,3], 'text', ['a','b','c']])
assert zephyr_user.props['handle'].val is ctrl_1
phandles = zephyr_user.props['phandles']
val = phandles.val
assert len(val) == 2
assert val[0] is ctrl_1
assert val[1] is ctrl_2
verify_phandle_array_prop(zephyr_user,
'phandle-array-foos',
[(edt.get_node('/ctrl-2'), {'one': 1, 'two': 2})])
def test_multi_bindings():
'''Test having multiple directories with bindings'''
with from_here():
edt = edtlib.EDT("test-multidir.dts", ["test-bindings", "test-bindings-2"])
assert str(edt.get_node("/in-dir-1").binding_path) == \
hpath("test-bindings/multidir.yaml")
assert str(edt.get_node("/in-dir-2").binding_path) == \
hpath("test-bindings-2/multidir.yaml")
def test_dependencies():
''''Test dependency relations'''
with from_here():
edt = edtlib.EDT("test-multidir.dts", ["test-bindings", "test-bindings-2"])
assert edt.get_node("/").dep_ordinal == 0
assert edt.get_node("/in-dir-1").dep_ordinal == 1
assert edt.get_node("/") in edt.get_node("/in-dir-1").depends_on
assert edt.get_node("/in-dir-1") in edt.get_node("/").required_by
def test_child_dependencies():
'''Test dependencies relashionship with child nodes propagated to parent'''
with from_here():
edt = edtlib.EDT("test.dts", ["test-bindings"])
dep_node = edt.get_node("/child-binding-dep")
assert dep_node in edt.get_node("/child-binding").depends_on
assert dep_node in edt.get_node("/child-binding/child-1/grandchild").depends_on
assert dep_node in edt.get_node("/child-binding/child-2").depends_on
assert edt.get_node("/child-binding") in dep_node.required_by
assert edt.get_node("/child-binding/child-1/grandchild") in dep_node.required_by
assert edt.get_node("/child-binding/child-2") in dep_node.required_by
def test_slice_errs(tmp_path):
'''Test error messages from the internal _slice() helper'''
dts_file = tmp_path / "error.dts"
verify_error("""
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <2>;
sub {
reg = <3>;
};
};
""",
dts_file,
f"'reg' property in <Node /sub in '{dts_file}'> has length 4, which is not evenly divisible by 12 (= 4*(<#address-cells> (= 1) + <#size-cells> (= 2))). Note that #*-cells properties come either from the parent node or from the controller (in the case of 'interrupts').")
verify_error("""
/dts-v1/;
/ {
sub {
interrupts = <1>;
interrupt-parent = < &{/controller} >;
};
controller {
interrupt-controller;
#interrupt-cells = <2>;
};
};
""",
dts_file,
f"'interrupts' property in <Node /sub in '{dts_file}'> has length 4, which is not evenly divisible by 8 (= 4*<#interrupt-cells>). Note that #*-cells properties come either from the parent node or from the controller (in the case of 'interrupts').")
verify_error("""
/dts-v1/;
/ {
#address-cells = <1>;
sub-1 {
#address-cells = <2>;
#size-cells = <3>;
ranges = <4 5>;
sub-2 {
reg = <1 2 3 4 5>;
};
};
};
""",
dts_file,
f"'ranges' property in <Node /sub-1 in '{dts_file}'> has length 8, which is not evenly divisible by 24 (= 4*(<#address-cells> (= 2) + <#address-cells for parent> (= 1) + <#size-cells> (= 3))). Note that #*-cells properties come either from the parent node or from the controller (in the case of 'interrupts').")
def test_bad_compatible(tmp_path):
# An invalid compatible should cause an error, even on a node with
# no binding.
dts_file = tmp_path / "error.dts"
verify_error("""
/dts-v1/;
/ {
foo {
compatible = "no, whitespace";
};
};
""",
dts_file,
r"node '/foo' compatible 'no, whitespace' must match this regular expression: '^[a-zA-Z][a-zA-Z0-9,+\-._]+$'")
def test_wrong_props():
'''Test Node.wrong_props (derived from DT and 'properties:' in the binding)'''
with from_here():
with pytest.raises(edtlib.EDTError) as e:
edtlib.Binding("test-wrong-bindings/wrong-specifier-space-type.yaml", None)
assert ("'specifier-space' in 'properties: wrong-type-for-specifier-space' has type 'phandle', expected 'phandle-array'"
in str(e.value))
with pytest.raises(edtlib.EDTError) as e:
edtlib.Binding("test-wrong-bindings/wrong-phandle-array-name.yaml", None)
value_str = str(e.value)
assert value_str.startswith("'wrong-phandle-array-name' in 'properties:'")
assert value_str.endswith("but no 'specifier-space' was provided.")
def test_deepcopy():
with from_here():
# We intentionally use different kwarg values than the
# defaults to make sure they're getting copied. This implies
# we have to set werror=True, so we can't use test.dts, since
# that generates warnings on purpose.
edt = edtlib.EDT("test-multidir.dts",
["test-bindings", "test-bindings-2"],
warn_reg_unit_address_mismatch=False,
default_prop_types=False,
support_fixed_partitions_on_any_bus=False,
infer_binding_for_paths=['/test-node'],
vendor_prefixes={'test-vnd': 'A test vendor'},
werror=True)
edt_copy = deepcopy(edt)
def equal_paths(list1, list2):
assert len(list1) == len(list2)
return all(elt1.path == elt2.path for elt1, elt2 in zip(list1, list2))
def equal_key2path(key2node1, key2node2):
assert len(key2node1) == len(key2node2)
return (all(key1 == key2 for (key1, key2) in
zip(key2node1, key2node2)) and
all(node1.path == node2.path for (node1, node2) in
zip(key2node1.values(), key2node2.values())))
def equal_key2paths(key2nodes1, key2nodes2):
assert len(key2nodes1) == len(key2nodes2)
return (all(key1 == key2 for (key1, key2) in
zip(key2nodes1, key2nodes2)) and
all(equal_paths(nodes1, nodes2) for (nodes1, nodes2) in
zip(key2nodes1.values(), key2nodes2.values())))
def test_equal_but_not_same(attribute, equal=None):
if equal is None:
equal = lambda a, b: a == b
copy = getattr(edt_copy, attribute)
original = getattr(edt, attribute)
assert equal(copy, original)
assert copy is not original
test_equal_but_not_same("nodes", equal_paths)
test_equal_but_not_same("compat2nodes", equal_key2paths)
test_equal_but_not_same("compat2okay", equal_key2paths)
test_equal_but_not_same("compat2vendor")
test_equal_but_not_same("compat2model")
test_equal_but_not_same("label2node", equal_key2path)
test_equal_but_not_same("dep_ord2node", equal_key2path)
assert edt_copy.dts_path == "test-multidir.dts"
assert edt_copy.bindings_dirs == ["test-bindings", "test-bindings-2"]
assert edt_copy.bindings_dirs is not edt.bindings_dirs
assert not edt_copy._warn_reg_unit_address_mismatch
assert not edt_copy._default_prop_types
assert not edt_copy._fixed_partitions_no_bus
assert edt_copy._infer_binding_for_paths == set(["/test-node"])
assert edt_copy._infer_binding_for_paths is not edt._infer_binding_for_paths
assert edt_copy._vendor_prefixes == {"test-vnd": "A test vendor"}
assert edt_copy._vendor_prefixes is not edt._vendor_prefixes
assert edt_copy._werror
test_equal_but_not_same("_compat2binding", equal_key2path)
test_equal_but_not_same("_binding_paths")
test_equal_but_not_same("_binding_fname2path")
assert len(edt_copy._node2enode) == len(edt._node2enode)
for node1, node2 in zip(edt_copy._node2enode, edt._node2enode):
enode1 = edt_copy._node2enode[node1]
enode2 = edt._node2enode[node2]
assert node1.path == node2.path
assert enode1.path == enode2.path
assert node1 is not node2
assert enode1 is not enode2
assert edt_copy._dt is not edt._dt
def verify_error(dts, dts_file, expected_err):
# Verifies that parsing a file 'dts_file' with the contents 'dts'
# (a string) raises an EDTError with the message 'expected_err'.
#
# The path 'dts_file' is written with the string 'dts' before the
# test is run.
with open(dts_file, "w", encoding="utf-8") as f:
f.write(dts)
f.flush() # Can't have unbuffered text IO, so flush() instead
with pytest.raises(edtlib.EDTError) as e:
edtlib.EDT(dts_file, [])
assert str(e.value) == expected_err
def verify_props(node, names, types, values):
# Verifies that each property in 'names' has the expected
# value in 'values'. Property lookup is done in Node 'node'.
for name, type, value in zip(names, types, values):
prop = node.props[name]
assert prop.name == name
assert prop.type == type
assert prop.val == value
assert prop.node is node
def verify_phandle_array_prop(node, name, values):
# Verifies 'node.props[name]' is a phandle-array, and has the
# expected controller/data values in 'values'. Elements
# of 'values' may be None.
prop = node.props[name]
assert prop.type == 'phandle-array'
assert prop.name == name
val = prop.val
assert isinstance(val, list)
assert len(val) == len(values)
for actual, expected in zip(val, values):
if expected is not None:
controller, data = expected
assert isinstance(actual, edtlib.ControllerAndData)
assert actual.controller is controller
assert actual.data == data
else:
assert actual is None
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test_edtlib.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 10,178 |
```yaml
description: |
Includes can be added at any level, so can property-allowlist and
property-blocklist.
compatible: allow-and-blocklist-multilevel
include:
- name: include.yaml
property-allowlist: [x]
child-binding:
include:
- name: include.yaml
property-blocklist: [y]
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/allow-and-blocklist-multilevel.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 76 |
```yaml
description: |
A property-blocklist, if given, must be a list. This binding should
cause an error.
compatible: block-not-list
include:
- name: include.yaml
property-blocklist:
foo:
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/block-not-list.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 51 |
```yaml
description: |
An include must not give both an allowlist and a blocklist.
This binding should cause an error.
compatible: allow-and-blocklist
include:
- name: include.yaml
property-blocklist: [x]
property-allowlist: [y]
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/allow-and-blocklist.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 61 |
```yaml
description: Test file for including other bindings
compatible: include
properties:
x:
type: int
y:
type: int
z:
type: int
child-binding:
properties:
child-prop-1:
type: int
child-prop-2:
type: int
child-binding:
properties:
grandchild-prop-1:
type: int
grandchild-prop-2:
type: int
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/include.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 100 |
```yaml
description: |
Invalid include element: no name key is present.
compatible: include-no-name
include:
- property-allowlist: [x]
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/include-no-name.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 34 |
```yaml
description: Valid property-allowlist.
compatible: allowlist
include:
- name: include.yaml
property-allowlist: [x]
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/allowlist.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 33 |
```yaml
description: |
Invalid include element: invalid keys are present.
compatible: include-invalid-keys
include:
- name: include.yaml
property-allowlist: [x]
bad-key-1: 3
bad-key-2: 3
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/include-invalid-keys.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 57 |
```yaml
description: |
A property-allowlist, if given, must be a list. This binding should
cause an error.
compatible: allow-not-list
include:
- name: include.yaml
property-allowlist:
foo:
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/allow-not-list.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 53 |
```yaml
description: Valid property-blocklist.
compatible: blocklist
include:
- name: include.yaml
property-blocklist: [x]
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/blocklist.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 31 |
```yaml
description: Second file for testing "intermixed" includes.
compatible: include-2
properties:
a:
type: int
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/include-2.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 30 |
```yaml
properties:
x:
type: int
y:
type: int
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/base.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 20 |
```restructuredtext
This directory contains bindings used to test the 'include:' feature.
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/README.rst | restructuredtext | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 14 |
```yaml
include: base.yaml
properties:
x:
required: true
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/modified.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 17 |
```yaml
description: Test binding for filtering 'child-binding' properties
include:
- name: include.yaml
property-allowlist: [x]
child-binding:
property-blocklist: [child-prop-1]
child-binding:
property-allowlist: [grandchild-prop-1]
compatible: filter-child-bindings
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/filter-child-bindings.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 70 |
```yaml
description: |
Invalid include: wrong top level type.
compatible: include-invalid-type
include:
a-map-is-not-allowed-here: 3
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/include-invalid-type.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 34 |
```yaml
include: base.yaml
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/inc-base.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7 |
```yaml
description: |
An include must not give both an allowlist and a blocklist in a
child binding. This binding should cause an error.
compatible: allow-and-blocklist-child
include:
- name: include.yaml
child-binding:
property-blocklist: [x]
property-allowlist: [y]
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/allow-and-blocklist-child.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 71 |
```yaml
description: A map element with just a name is valid, and has no filters.
compatible: include-no-list
include:
- name: include.yaml
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/include-no-list.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 34 |
```yaml
description: Test property-blocklist filters set by including bindings.
compatible: "top-blocklist"
include:
- name: inc-base.yaml
property-blocklist:
- x
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/top-blocks.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 40 |
```yaml
description: Test property-allowlist filters set by including bindings
compatible: "top-allowlist"
include:
- name: inc-base.yaml
property-allowlist:
- x
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/top-allows.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 43 |
```yaml
description: Including intermixed file names and maps is valid.
compatible: intermixed
include:
- name: include.yaml
property-allowlist: [x]
- include-2.yaml
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings-include/intermixed.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 44 |
```yaml
description: Property deprecated value test
compatible: "test-deprecated"
properties:
oldprop:
type: int
deprecated: true
required: false
curprop:
type: int
required: false
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/deprecated.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 51 |
```yaml
description: child-binding test
compatible: "top-binding"
child-binding:
description: child node
properties:
child-prop:
type: int
required: true
child-ref:
type: phandle
child-binding:
description: grandchild node
properties:
grandchild-prop:
type: int
required: true
grandchild-ref:
type: phandle
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/child-binding.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 90 |
```yaml
description: Device on any bus
compatible: "on-any-bus"
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/device-on-any-bus.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 17 |
```yaml
description: Binding in test-bindings/
compatible: "in-dir-1"
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/multidir.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 18 |
```yaml
description: Controller with one data value
compatible: "phandle-array-controller-1"
phandle-array-foo-cells:
- one
gpio-cells:
- gpio-one
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/phandle-array-controller-1.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 41 |
```yaml
description: Controller with zero data values
compatible: "phandle-array-controller-0"
phandle-array-foo-cells: []
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/phandle-array-controller-0.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 29 |
```yaml
description: GPIO source for mapping test
compatible: "gpio-src"
properties:
foo-gpios:
type: phandle-array
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/gpio-src.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 30 |
```yaml
description: Foo bus controller
compatible: "foo-bus"
bus: "foo"
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/foo-bus.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 20 |
```yaml
description: Bar bus controller
compatible: "bar-bus"
bus: "bar"
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/bar-bus.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 20 |
```yaml
description: Interrupt controller with three cells
compatible: "interrupt-three-cell"
interrupt-cells:
- one
- two
- three
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/interrupt-3-cell.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 33 |
```yaml
description: Parent binding
compatible: "binding-include-test"
include: child.yaml
properties:
foo:
# Changed from not being required in grandchild-1.yaml
required: true
# Type set in grandchild
child-binding:
# child.yaml included at child-binding level
include: child.yaml
properties:
foo:
# Changed from not being required in grandchild-1.yaml
required: true
# Type set in grandchild
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/parent.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 104 |
```yaml
properties:
qaz:
required: true
type: int
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/grandchild-3.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 18 |
```yaml
description: child-binding with separate compatible than the parent
compatible: "top-binding-with-compat"
child-binding:
compatible: child-compat
description: child node
properties:
child-prop:
type: int
required: true
child-binding:
description: grandchild node
properties:
grandchild-prop:
type: int
required: true
``` | /content/code_sandbox/scripts/dts/python-devicetree/tests/test-bindings/child-binding-with-compat.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 84 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.