text stringlengths 9 39.2M | dir stringlengths 25 226 | lang stringclasses 163 values | created_date timestamp[s] | updated_date timestamp[s] | repo_name stringclasses 751 values | repo_full_name stringclasses 752 values | star int64 1.01k 183k | len_tokens int64 1 18.5M |
|---|---|---|---|---|---|---|---|---|
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/val/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <limits.h>
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
char *s = malloc(10);
strcpy(s, "123456789");
printf("string is: %s\n", s);
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/val/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 201 |
```yaml
tests:
san.ubsan:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/ubsan/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 55 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/ubsan/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
int k = INT_MAX;
k += 256;
printf("num is: %d\n", k);
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/ubsan/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 186 |
```yaml
tests:
san.lsan:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/lsan/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/lsan/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <limits.h>
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
int helper(void)
{
char *s = malloc(100);
s[0] = '!';
s[1] = '\0';
printf("string is: %s\n", s);
return 0;
}
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
helper();
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/lsan/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 221 |
```yaml
tests:
san.asan:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/asan/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 54 |
```python
#!/usr/bin/env python3
#
"""
Blackbox tests for twister's command line functions
"""
import importlib
import mock
import os
import pytest
import sys
import re
from conftest import (
TEST_DATA,
ZEPHYR_BASE,
clear_log_in_test,
sample_filename_mock,
testsuite_filename_mock
)
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestPrintOuts:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['agnostic', 'subgrouped']
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
['device']
),
]
TESTDATA_2 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
[
'dummy.agnostic.group1.subgroup1.assert',
'dummy.agnostic.group1.subgroup2.assert',
'dummy.agnostic.group2.assert1',
'dummy.agnostic.group2.assert2',
'dummy.agnostic.group2.assert3'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
[
'dummy.device.group.assert'
]
),
]
TESTDATA_3 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
'Testsuite\n' \
' Samples\n' \
' Tests\n' \
' dummy\n' \
' agnostic\n' \
' dummy.agnostic.group1.subgroup1.assert\n' \
' dummy.agnostic.group1.subgroup2.assert\n' \
' dummy.agnostic.group2.assert1\n' \
' dummy.agnostic.group2.assert2\n' \
' dummy.agnostic.group2.assert3\n'
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
'Testsuite\n'
' Samples\n'
' Tests\n'
' dummy\n'
' device\n'
' dummy.device.group.assert\n'
),
]
TESTDATA_4 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy'),
['qemu_x86', 'qemu_x86_64', 'intel_adl_crb']
)
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.parametrize(
'test_path, expected',
TESTDATA_1,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_list_tags(self, capfd, out_path, test_path, expected):
args = ['--outdir', out_path, '-T', test_path, '--list-tags']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
printed_tags = [tag.strip() for tag in out.split('- ')[1:]]
assert all([tag in printed_tags for tag in expected])
assert all([tag in expected for tag in printed_tags])
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, expected',
TESTDATA_2,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_list_tests(self, capfd, out_path, test_path, expected):
args = ['--outdir', out_path, '-T', test_path, '--list-tests']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
printed_tests = [test.strip() for test in out.split('- ')[1:]]
printed_tests[-1] = printed_tests[-1].split('\n')[0]
assert all([test in printed_tests for test in expected])
assert all([test in expected for test in printed_tests])
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, expected',
TESTDATA_3,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_tree(self, capfd, out_path, test_path, expected):
args = ['--outdir', out_path, '-T', test_path, '--test-tree']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert expected in out
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms',
TESTDATA_4,
ids=['tests']
)
def test_timestamps(self, capfd, out_path, test_path, test_platforms):
args = ['-i', '--outdir', out_path, '-T', test_path, '--timestamps', '-v'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
info_regex = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} - (?:INFO|DEBUG|ERROR)'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
output = err.split('\n')
err_lines = []
for line in output:
if line.strip():
match = re.search(info_regex, line)
if match is None:
err_lines.append(line)
if err_lines:
assert False, f'No timestamp in line {err_lines}'
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'flag',
['--abcd', '--1234', '-%', '-1']
)
def test_broken_parameter(self, capfd, flag):
args = [flag]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
if flag == '-1':
assert str(sys_exit.value) == '1'
else:
assert str(sys_exit.value) == '2'
@pytest.mark.parametrize(
'flag',
['--help', '-h']
)
def test_help(self, capfd, flag):
args = [flag]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
@pytest.mark.parametrize(
'test_path, test_platforms',
TESTDATA_4,
ids=['tests']
)
def test_force_color(self, capfd, out_path, test_path, test_platforms):
args = ['-i', '--outdir', out_path, '-T', test_path, '--force-color'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert str(sys_exit.value) == '0'
@mock.patch.object(TestPlan, 'SAMPLE_FILENAME', sample_filename_mock)
def test_size(self, capfd, out_path):
test_platforms = ['qemu_x86', 'intel_adl_crb']
path = os.path.join(TEST_DATA, 'samples', 'hello_world')
args = ['-i', '--outdir', out_path, '-T', path] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
clear_log_in_test()
capfd.readouterr()
p = os.path.relpath(path, ZEPHYR_BASE)
prev_path = os.path.join(out_path, 'qemu_x86', p,
'sample.basic.helloworld', 'zephyr', 'zephyr.elf')
args = ['--size', prev_path]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
assert str(sys_exit.value) == '0'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
# Header and footer should be the most constant out of the report format.
header_pattern = r'SECTION NAME\s+VMA\s+LMA\s+SIZE\s+HEX SZ\s+TYPE\s*\n'
res = re.search(header_pattern, out)
assert res, 'No stdout size report header found.'
footer_pattern = r'Totals:\s+(?P<rom>[0-9]+)\s+bytes\s+\(ROM\),\s+' \
r'(?P<ram>[0-9]+)\s+bytes\s+\(RAM\)\s*\n'
res = re.search(footer_pattern, out)
assert res, 'No stdout size report footer found.'
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_printouts.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 2,369 |
```c
/*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <limits.h>
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
int *ptr;
int helper(void)
{
char *s = malloc(10);
strcpy(s, "123456789");
s[9] = '0';
free(s);
strcpy(s, "Hello");
printf("string is: %s\n", s);
return 0;
}
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
helper();
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/asan/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 235 |
```yaml
tests:
always_fail.dummy:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_build_error/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 42 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_build_error/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false")
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_build_error/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 79 |
```yaml
tests:
one_fail_two_error_one_pass.agnostic.group1.subgroup1:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup1/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup1/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/san/asan/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
tests:
one_fail_two_error_one_pass.agnostic.group1.subgroup3:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup3/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup1/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_2_tests, test_assert)
{
dummy
zassert_true(0, "1 was false");
zassert_false(1, "0 was true");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup3/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 93 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup3/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup4/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_2_tests, test_assert)
{
dummy
zassert_true(0, "1 was false");
zassert_false(1, "0 was true");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup4/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 93 |
```yaml
tests:
one_fail_two_error_one_pass.agnostic.group1.subgroup4:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup4/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup2/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
tests:
one_fail_two_error_one_pass.agnostic.group1.subgroup2:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup2/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 64 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_2_tests, test_assert)
{
zassert_true(0, "1 was false");
zassert_false(1, "0 was true");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_two_error_one_pass/agnostic/group1/subgroup2/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 90 |
```yaml
tests:
sample.twister.pytest:
platform_allow:
- native_posix
- native_sim
harness: pytest
harness_config:
pytest_args: ["--custom-pytest-arg", "foo", "--cmdopt", "."]
tags:
- test_framework
- pytest
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/pytest/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 78 |
```unknown
CONFIG_ZTEST=y
CONFIG_IDLE_STACK_SIZE=4096
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/pytest/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 14 |
```python
#
import os
import pytest
# fixture cmdopt defined in conftest.py, it can be requested either in
# tests or in other fixtures
@pytest.fixture(autouse=True)
def pytest_cmdopt_handle(cmdopt):
''' An auto fixture, all tests automatically request this fixture.
Argument "cmdopt" is a fixture defined in conftest.py, it returns
the value of an option passed by twister, this fixture export
that value to environment.
'''
print("handle cmdopt:")
print(cmdopt)
data_path = cmdopt
os.environ['data_path'] = str(data_path)
def test_case(cmdopt):
''' Test cases make use of fixture cmdopt to get the value of "--cmdopt" option
passed by twister. Actually, fixture cmdopt returns a path of the directory
which holds the artifacts generated by ztest. The main work of test cases
in this file is to check those stuff in that directory.
This test case simply compare the return value of cmdopt with the
environment variable exported by fixture pytest_cmdopt_handle.
'''
assert os.path.exists(cmdopt)
print("run test cases in:")
print(cmdopt)
def test_custom_arg(custom_pytest_arg):
''' Test passing of custom command line arguments to pytest.
'''
assert custom_pytest_arg == "foo"
if __name__ == "__main__":
pytest.main()
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/pytest/pytest/test_sample.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 303 |
```python
#
import pytest
# add option "--cmdopt" to pytest, or it will report "unknown option"
# this option is passed from twister.
def pytest_addoption(parser):
parser.addoption(
'--cmdopt'
)
parser.addoption(
'--custom-pytest-arg'
)
# define fixture to return value of option "--cmdopt", this fixture
# will be requested by other fixture of tests.
@pytest.fixture()
def cmdopt(request):
return request.config.getoption('--cmdopt')
# define fixture to return value of option "--custom-pytest-arg", this fixture
# will be requested by other fixture of tests.
@pytest.fixture()
def custom_pytest_arg(request):
return request.config.getoption('--custom-pytest-arg')
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/pytest/pytest/conftest.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 162 |
```yaml
tests:
platform_key.dummy:
platform_allow:
- qemu_x86
- qemu_x86_64
platform_key:
- simulation
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/platform_key/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 35 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/platform_key/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/platform_key/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(test_pytest, NULL, NULL, NULL, NULL, NULL);
ZTEST(test_pytest, test_pytest)
{
TC_PRINT("Hello world\n");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/pytest/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 50 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_one_pass/agnostic/group1/subgroup1/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_one_pass/agnostic/group1/subgroup1/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```yaml
tests:
one_fail_one_pass.agnostic.group1.subgroup1:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_one_pass/agnostic/group1/subgroup1/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 62 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_one_pass/agnostic/group1/subgroup2/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
tests:
one_fail_one_pass.agnostic.group1.subgroup2:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_one_pass/agnostic/group1/subgroup2/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 62 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_2_tests, test_assert)
{
zassert_true(0, "1 was false");
zassert_false(1, "0 was true");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/one_fail_one_pass/agnostic/group1/subgroup2/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 90 |
```yaml
common:
timeout: 10
tests:
always_timeout.dummy:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_timeout/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 50 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_timeout/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
tests:
seed_native_sim.dummy:
platform_allow:
- native_sim
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/seed_native_sim/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 29 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
int i = 0;
while (true) {
i++;
}
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_timeout/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 84 |
```unknown
config FAKE_ENTROPY_NATIVE_POSIX
default y
source "Kconfig.zephyr"
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/seed_native_sim/dummy/Kconfig | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 23 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/seed_native_sim/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(0, "1 was false");
zassert_false(1, "0 was true");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/seed_native_sim/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 90 |
```yaml
common:
timeout: 10
tests:
always_warning.dummy:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_warning/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 50 |
```c
/*
*
*/
#include <zephyr/ztest.h>
#include <zephyr/logging/log.h>
#define LOG_MODULE_NAME log_test
LOG_MODULE_REGISTER(LOG_MODULE_NAME, LOG_LEVEL_INF);
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
TC_PRINT("Create log message before rise warning\n");
LOG_WRN("log warning to custom warning");
#warning ("Custom warning");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_warning/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 127 |
```unknown
CONFIG_ZTEST=y
CONFIG_TEST_LOGGING_DEFAULTS=y
CONFIG_LOG=y
CONFIG_LOG_OUTPUT=y
CONFIG_LOG_BACKEND_UART=y
CONFIG_LOG_MODE_IMMEDIATE=y
CONFIG_MAIN_STACK_SIZE=4096
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_warning/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 44 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/params/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
tests:
params.dummy:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags: params
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/params/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 46 |
```yaml
tests:
always_fail.dummy:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_fail/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 42 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_fail/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(0, "1 was false");
zassert_false(1, "0 was true");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/always_fail/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 90 |
```yaml
tests:
always_overflow.dummy:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/qemu_overflow/dummy/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 42 |
```unknown
&dram0 {
reg = < 0x100000 DT_SIZE_M(1) >;
};
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/qemu_overflow/dummy/qemu_x86.overlay | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 23 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/qemu_overflow/dummy/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/qemu_overflow/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```yaml
tests:
dummy.device.group:
platform_allow: intel_adl_crb
integration_platforms:
- intel_adl_crb
tags: device
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/device/group/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 36 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/device/group/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(param_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(param_tests, test_assert1)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
ZTEST(param_tests, test_assert2)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
ZTEST(param_tests, test_assert3)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/params/dummy/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 328 |
```yaml
tests:
dummy.agnostic.group2:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags: agnostic
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group2/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 49 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group2/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a2_tests, test_assert3)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group2/src/submain.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 129 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a2_tests, test_assert1)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
ZTEST(a2_tests, test_assert2)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group2/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 239 |
```yaml
tests:
dummy.agnostic.group1.subgroup1:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group1/subgroup1/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 59 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group1/subgroup1/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(d_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(d_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/device/group/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 143 |
```yaml
tests:
dummy.agnostic.group1.subgroup2:
build_only: true
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
tags:
- agnostic
- subgrouped
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group1/subgroup2/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 65 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group1/subgroup1/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```unknown
CONFIG_ZTEST=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group1/subgroup2/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 6 |
```yaml
tests:
sample.pytest.shell:
filter: CONFIG_SERIAL and dt_chosen_enabled("zephyr,shell-uart")
min_ram: 40
harness: pytest
extra_configs:
- arch:posix:CONFIG_NATIVE_UART_0_ON_STDINOUT=y
integration_platforms:
- native_sim
- qemu_cortex_m3
tags:
- test_framework
- pytest
- shell
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/pytest/shell/test_sample.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 95 |
```unknown
CONFIG_PRINTK=y
CONFIG_SHELL=y
CONFIG_LOG=y
CONFIG_SHELL_BACKEND_SERIAL=y
CONFIG_KERNEL_SHELL=y
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/pytest/shell/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 28 |
```python
#
import logging
from twister_harness import Shell
logger = logging.getLogger(__name__)
def test_shell_print_help(shell: Shell):
logger.info('send "help" command')
lines = shell.exec_command('help')
assert 'Available commands:' in lines, 'expected response not found'
logger.info('response is valid')
def test_shell_print_version(shell: Shell):
logger.info('send "kernel version" command')
lines = shell.exec_command('kernel version')
assert any(['Zephyr version' in line for line in lines]), 'expected response not found'
logger.info('response is valid')
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/pytest/shell/pytest/test_shell.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 131 |
```c
/*
*
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_2_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/tests/dummy/agnostic/group1/subgroup2/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 149 |
```yaml
sample:
description: Hello World sample, the simplest Zephyr
application
name: hello world
common:
tags: introduction
integration_platforms:
- native_sim
harness: console
harness_config:
type: one_line
regex:
- "Hello World! (.*)"
tests:
sample.basic.helloworld:
tags: introduction
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/hello_world/test_sample.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 82 |
```unknown
# nothing here
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/hello_world/prj.conf | unknown | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5 |
```c
/*
*
*/
int main(void)
{
/* Shell application source code is injected by applied Kconfg SHELL
* options, no more "extra" functionalities are required for exemplary
* pytest test.
*/
return 0;
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/pytest/shell/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 52 |
```c
/*
*
*/
#include <stdio.h>
int main(void)
{
printf("Hello World! %s\n", CONFIG_BOARD);
return 0;
}
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/samples/hello_world/src/main.c | c | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 31 |
```yaml
name: dummy_board
vendor: others
arch: unit
identifier: dummy_board/dummy_soc
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/boards/others/dummy_board/dummy_board.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 22 |
```yaml
board:
name: dummy
vendor: others
socs:
- name: dummy_soc
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/boards/others/dummy_board/board.yml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 24 |
```yaml
tests:
dummy.agnostic.group2.alt:
platform_allow:
- native_sim
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_sim
slow: true
tags:
- agnostic
- alternate-config-root
``` | /content/code_sandbox/scripts/tests/twister_blackbox/test_data/alt-test-configs/dummy/agnostic/group2/test_data.yaml | yaml | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 63 |
```python
#!/usr/bin/env python3
#
"""tests for subfolder_list.py"""
import os
import sys
import time
sys.path.insert(0, os.path.join(os.environ["ZEPHYR_BASE"], "scripts"))
import subfolder_list as iut # Implementation Under Test
OUT_FILE = "out_file.txt"
DIR_NAME_PREFIX = "dir-"
def mkdirs(name_sfxs_range):
"""Create directories"""
dir_names = [os.getcwd()]
for sfx in name_sfxs_range:
name = f"{DIR_NAME_PREFIX}{sfx}"
os.mkdir(name)
dir_names.append(os.path.join(os.getcwd(), name))
return dir_names
def assert_out_file_has(dir_names):
"""Assert that out file has names of directories"""
for dir_name in open(OUT_FILE).readlines():
dir_name = dir_name.strip()
assert dir_name in dir_names
def test_subfolder_list(tmpdir):
"""Test subfolder list is correct"""
tmpdir.chdir()
dir_names = mkdirs(range(5))
iut_dir_names = iut.get_subfolder_list((str(tmpdir)))
assert dir_names == iut_dir_names
def test_links(tmpdir):
"""Test directory links creation"""
tmpdir.chdir()
links_dir = str(tmpdir)
dirs_dir = "dirs"
subdirs_parent_sfx = 1
dirs_range = range(5)
subdirs_range = range(5, 9)
expect_links = []
for i in dirs_range:
expect_links.append("%s_%s%d" % (dirs_dir, DIR_NAME_PREFIX, i))
for i in subdirs_range:
expect_links.append("%s_%s%d_%s%d" % (
dirs_dir, DIR_NAME_PREFIX, subdirs_parent_sfx, DIR_NAME_PREFIX, i))
tmpdir.mkdir(dirs_dir)
os.chdir(dirs_dir)
mkdirs(dirs_range)
os.chdir(f"{DIR_NAME_PREFIX}{subdirs_parent_sfx}")
mkdirs(subdirs_range)
tmpdir.chdir()
iut.get_subfolder_list(dirs_dir, links_dir)
links = [f for f in os.listdir(links_dir) if os.path.islink(f)]
assert sorted(expect_links) == sorted(links)
def test_gen_out_file(tmpdir):
"""Test generating the list output file"""
tmpdir.chdir()
dirs = []
for sfx in range(10):
dirs.append(f"{DIR_NAME_PREFIX}{sfx}")
iut.gen_out_file(OUT_FILE, dirs)
assert_out_file_has(dirs)
st_info = os.stat(OUT_FILE)
# should not be updated if it already exists and has the same content
iut.gen_out_file(OUT_FILE, dirs)
st_info2 = os.stat(OUT_FILE)
assert st_info == st_info2
# should be updated if exists with different content
with open(OUT_FILE, "a") as out_fo:
out_fo.write("A" * 79)
st_info = os.stat(OUT_FILE)
iut.gen_out_file(OUT_FILE, dirs)
st_info2 = os.stat(OUT_FILE)
assert st_info != st_info2
assert_out_file_has(dirs)
def test_trigger_file(tmpdir):
"""Test trigger file feature"""
trigger_file = "trigger_file"
tmpdir.chdir()
mkdirs(range(5))
# should be created if it does not exist
iut.touch(trigger_file)
assert os.path.exists(trigger_file)
# should be touched if it exists
with open(trigger_file, 'w'):
fake_time = 12345
os.utime(trigger_file, (fake_time, fake_time))
iut.touch(trigger_file)
st_info = os.stat(trigger_file)
time_now = time.time()
time_since_touch = 5.0 # seconds, rough estimate
assert (time_now - st_info.st_atime) <= time_since_touch
assert (time_now - st_info.st_mtime) <= time_since_touch
``` | /content/code_sandbox/scripts/tests/build/test_subfolder_list.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 867 |
```python
#!/usr/bin/env python3
#
"""
Tests for domains.py classes
"""
import mock
import os
import pytest
import sys
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/build_helpers"))
import domains
from contextlib import nullcontext
TESTDATA_1 = [
('', False, 1, ['domains.yaml file not found: domains.yaml']),
(
"""
default: None
build_dir: some/dir
domains: []
""",
True, None, []
),
]
@pytest.mark.parametrize(
'f_contents, f_exists, exit_code, expected_logs',
TESTDATA_1,
ids=['no file', 'valid']
)
def test_from_file(caplog, f_contents, f_exists, exit_code, expected_logs):
def mock_open(*args, **kwargs):
if f_exists:
return mock.mock_open(read_data=f_contents)(args, kwargs)
raise FileNotFoundError(f'domains.yaml not found.')
init_mock = mock.Mock(return_value=None)
with mock.patch('domains.Domains.__init__', init_mock), \
mock.patch('builtins.open', mock_open), \
pytest.raises(SystemExit) if exit_code else nullcontext() as s_exit:
result = domains.Domains.from_file('domains.yaml')
if exit_code:
assert str(s_exit.value) == str(exit_code)
else:
init_mock.assert_called_once()
assert result is not None
assert all([log in caplog.text for log in expected_logs])
TESTDATA_2 = [
(
"""
default: some default
build_dir: my/dir
domains:
- name: some default
build_dir: dir/2
- name: another
build_dir: dir/3
flash_order: I don\'t think this is correct
""",
1, None, None, None, None
),
(
"""
build_dir: build/dir
domains:
- name: a domain
build_dir: dir/1
- name: default_domain
build_dir: dir/2
default: default_domain
flash_order:
- default_domain
- a domain
""",
None,
'build/dir',
[('default_domain', 'dir/2'), ('a domain', 'dir/1')],
('default_domain', 'dir/2'),
{'a domain': ('a domain', 'dir/1'),
'default_domain': ('default_domain', 'dir/2')}
),
]
@pytest.mark.parametrize(
'data, exit_code, expected_build_dir, expected_flash_order,' \
' expected_default, expected_domains',
TESTDATA_2,
ids=['invalid', 'valid']
)
def test_from_yaml(
caplog,
data,
exit_code,
expected_build_dir,
expected_flash_order,
expected_default,
expected_domains
):
def mock_domain(name, build_dir, *args, **kwargs):
return name, build_dir
with mock.patch('domains.Domain', side_effect=mock_domain), \
pytest.raises(SystemExit) if exit_code else nullcontext() as exit_st:
doms = domains.Domains.from_yaml(data)
if exit_code:
assert str(exit_st.value) == str(exit_code)
return
assert doms.get_default_domain() == expected_default
assert doms.get_top_build_dir() == expected_build_dir
assert doms._domains == expected_domains
assert all([d in expected_flash_order for d in doms._flash_order])
assert all([d in doms._flash_order for d in expected_flash_order])
TESTDATA_3 = [
(
None,
True,
[('some', os.path.join('dir', '2')),
('order', os.path.join('dir', '1'))]
),
(
None,
False,
[('order', os.path.join('dir', '1')),
('some', os.path.join('dir', '2'))]
),
(
['some'],
False,
[('some', os.path.join('dir', '2'))]
),
]
@pytest.mark.parametrize(
'names, default_flash_order, expected_result',
TESTDATA_3,
ids=['order only', 'no parameters', 'valid']
)
def test_get_domains(
caplog,
names,
default_flash_order,
expected_result
):
doms = domains.Domains(
"""
domains:
- name: dummy
build_dir: dummy
default: dummy
build_dir: dummy
"""
)
doms._flash_order = [
('some', os.path.join('dir', '2')),
('order', os.path.join('dir', '1'))
]
doms._domains = {
'order': ('order', os.path.join('dir', '1')),
'some': ('some', os.path.join('dir', '2'))
}
result = doms.get_domains(names, default_flash_order)
assert result == expected_result
TESTDATA_3 = [
(
'other',
1,
['domain "other" not found, valid domains are: order, some'],
None
),
(
'some',
None,
[],
('some', os.path.join('dir', '2'))
),
]
@pytest.mark.parametrize(
'name, exit_code, expected_logs, expected_result',
TESTDATA_3,
ids=['domain not found', 'valid']
)
def test_get_domain(
caplog,
name,
exit_code,
expected_logs,
expected_result
):
doms = domains.Domains(
"""
domains:
- name: dummy
build_dir: dummy
default: dummy
build_dir: dummy
"""
)
doms._flash_order = [
('some', os.path.join('dir', '2')),
('order', os.path.join('dir', '1'))
]
doms._domains = {
'order': ('order', os.path.join('dir', '1')),
'some': ('some', os.path.join('dir', '2'))
}
with pytest.raises(SystemExit) if exit_code else nullcontext() as s_exit:
result = doms.get_domain(name)
assert all([log in caplog.text for log in expected_logs])
if exit_code:
assert str(s_exit.value) == str(exit_code)
else:
assert result == expected_result
def test_domain():
name = 'Domain Name'
build_dir = 'build/dir'
domain = domains.Domain(name, build_dir)
assert domain.name == name
assert domain.build_dir == build_dir
domain.name = 'New Name'
domain.build_dir = 'new/dir'
assert domain.name == 'New Name'
assert domain.build_dir == 'new/dir'
``` | /content/code_sandbox/scripts/tests/build_helpers/test_domains.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,448 |
```python
#!/usr/bin/env python3
#
"""
Tests for the mixins class
"""
import os
import pytest
def test_disable_pytest_test_collection(test_data):
test_path = os.path.join(test_data, 'mixins')
return_code = pytest.main([test_path])
assert return_code == 5
``` | /content/code_sandbox/scripts/tests/twister/test_mixins.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 65 |
```python
#!/usr/bin/env python3
#
"""
Tests for scl.py functions
"""
import logging
import mock
import os
import pytest
import sys
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
import scl
from contextlib import nullcontext
from importlib import reload
from pykwalify.errors import SchemaError
from yaml.scanner import ScannerError
TESTDATA_1 = [
(False),
(True),
]
@pytest.mark.parametrize(
'fail_c',
TESTDATA_1,
ids=['C YAML', 'non-C YAML']
)
def test_yaml_imports(fail_c):
class ImportRaiser:
def find_spec(self, fullname, path, target=None):
if fullname == 'yaml.CLoader' and fail_c:
raise ImportError()
if fullname == 'yaml.CSafeLoader' and fail_c:
raise ImportError()
if fullname == 'yaml.CDumper' and fail_c:
raise ImportError()
modules_mock = sys.modules.copy()
if hasattr(modules_mock['yaml'], 'CLoader'):
del modules_mock['yaml'].CLoader
del modules_mock['yaml'].CSafeLoader
del modules_mock['yaml'].CDumper
cloader_mock = mock.Mock()
loader_mock = mock.Mock()
csafeloader_mock = mock.Mock()
safeloader_mock = mock.Mock()
cdumper_mock = mock.Mock()
dumper_mock = mock.Mock()
if not fail_c:
modules_mock['yaml'].CLoader = cloader_mock
modules_mock['yaml'].CSafeLoader = csafeloader_mock
modules_mock['yaml'].CDumper = cdumper_mock
modules_mock['yaml'].Loader = loader_mock
modules_mock['yaml'].SafeLoader = safeloader_mock
modules_mock['yaml'].Dumper = dumper_mock
meta_path_mock = sys.meta_path[:]
meta_path_mock.insert(0, ImportRaiser())
with mock.patch.dict('sys.modules', modules_mock, clear=True), \
mock.patch('sys.meta_path', meta_path_mock):
reload(scl)
assert sys.modules['scl'].Loader == loader_mock if fail_c else \
cloader_mock
assert sys.modules['scl'].SafeLoader == safeloader_mock if fail_c else \
csafeloader_mock
assert sys.modules['scl'].Dumper == dumper_mock if fail_c else \
cdumper_mock
import yaml
reload(yaml)
TESTDATA_2 = [
(False, logging.CRITICAL, []),
(True, None, ['can\'t import pykwalify; won\'t validate YAML']),
]
@pytest.mark.parametrize(
'fail_pykwalify, log_level, expected_logs',
TESTDATA_2,
ids=['pykwalify OK', 'no pykwalify']
)
def test_pykwalify_import(caplog, fail_pykwalify, log_level, expected_logs):
class ImportRaiser:
def find_spec(self, fullname, path, target=None):
if fullname == 'pykwalify.core' and fail_pykwalify:
raise ImportError()
modules_mock = sys.modules.copy()
modules_mock['pykwalify'] = None if fail_pykwalify else \
modules_mock['pykwalify']
meta_path_mock = sys.meta_path[:]
meta_path_mock.insert(0, ImportRaiser())
with mock.patch.dict('sys.modules', modules_mock, clear=True), \
mock.patch('sys.meta_path', meta_path_mock):
reload(scl)
if log_level:
assert logging.getLogger('pykwalify.core').level == log_level
assert all([log in caplog.text for log in expected_logs])
if fail_pykwalify:
assert scl._yaml_validate(None, None) is None
assert scl._yaml_validate(mock.Mock(), mock.Mock()) is None
reload(scl)
TESTDATA_3 = [
(False),
(True),
]
@pytest.mark.parametrize(
'fail_parsing',
TESTDATA_3,
ids=['ok', 'parsing error']
)
def test_yaml_load(caplog, fail_parsing):
result_mock = mock.Mock()
def mock_load(*args, **kwargs):
if fail_parsing:
context_mark = mock.Mock()
problem_mark = mock.Mock()
type(context_mark).args = mock.PropertyMock(return_value=[])
type(context_mark).name = 'dummy context mark'
type(context_mark).line = 0
type(context_mark).column = 0
type(problem_mark).args = mock.PropertyMock(return_value=[])
type(problem_mark).name = 'dummy problem mark'
type(problem_mark).line = 0
type(problem_mark).column = 0
raise ScannerError(context='dummy context',
context_mark=context_mark, problem='dummy problem',
problem_mark=problem_mark, note='Dummy note')
return result_mock
filename = 'dummy/file.yaml'
with mock.patch('yaml.load', side_effect=mock_load), \
mock.patch('builtins.open', mock.mock_open()) as mock_file:
with pytest.raises(ScannerError) if fail_parsing else nullcontext():
result = scl.yaml_load(filename)
mock_file.assert_called_with('dummy/file.yaml', 'r', encoding='utf-8')
if not fail_parsing:
assert result == result_mock
else:
assert 'dummy problem mark:0:0: error: dummy problem' \
' (note Dummy note context @dummy context mark:0:0' \
' dummy context)' in caplog.text
TESTDATA_4 = [
(True, False, None),
(False, False, SchemaError),
(False, True, ScannerError),
]
@pytest.mark.parametrize(
'validate, fail_load, expected_error',
TESTDATA_4,
ids=['successful validation', 'failed validation', 'failed load']
)
def test_yaml_load_verify(validate, fail_load, expected_error):
filename = 'dummy/file.yaml'
schema_mock = mock.Mock()
data_mock = mock.Mock()
def mock_load(file_name, *args, **kwargs):
assert file_name == filename
if fail_load:
raise ScannerError
return data_mock
def mock_validate(data, schema, *args, **kwargs):
assert data == data_mock
assert schema == schema_mock
if validate:
return True
raise SchemaError(u'Schema validation failed.')
with mock.patch('scl.yaml_load', side_effect=mock_load), \
mock.patch('scl._yaml_validate', side_effect=mock_validate), \
pytest.raises(expected_error) if expected_error else nullcontext():
res = scl.yaml_load_verify(filename, schema_mock)
if validate:
assert res == data_mock
TESTDATA_5 = [
(True, True, None),
(True, False, SchemaError),
(False, None, None),
]
@pytest.mark.parametrize(
'schema_exists, validate, expected_error',
TESTDATA_5,
ids=['successful validation', 'failed validation', 'no schema']
)
def test_yaml_validate(schema_exists, validate, expected_error):
data_mock = mock.Mock()
schema_mock = mock.Mock() if schema_exists else None
def mock_validate(raise_exception, *args, **kwargs):
assert raise_exception
if validate:
return True
raise SchemaError(u'Schema validation failed.')
def mock_core(source_data, schema_data, *args, **kwargs):
assert source_data == data_mock
assert schema_data == schema_mock
return mock.Mock(validate=mock_validate)
core_mock = mock.Mock(side_effect=mock_core)
with mock.patch('pykwalify.core.Core', core_mock), \
pytest.raises(expected_error) if expected_error else nullcontext():
scl._yaml_validate(data_mock, schema_mock)
if schema_exists:
core_mock.assert_called_once()
else:
core_mock.assert_not_called()
def test_yaml_load_empty_file(tmp_path):
quarantine_file = tmp_path / 'empty_quarantine.yml'
quarantine_file.write_text("# yaml file without data")
with pytest.raises(scl.EmptyYamlFileException):
scl.yaml_load_verify(quarantine_file, None)
``` | /content/code_sandbox/scripts/tests/twister/test_scl.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,786 |
```python
#!/usr/bin/env python3
#
"""
Tests for log_helper.py functions
"""
import logging
import mock
import pytest
from importlib import reload
import twisterlib.log_helper
TESTDATA = [
('Windows', 'dummy message: [\'dummy\', \'command\', \'-flag\']'),
('Linux', 'dummy message: dummy command -flag'),
]
@pytest.mark.parametrize(
'system, expected_log',
TESTDATA,
ids=['Windows', 'Linux']
)
def test_log_command(caplog, system, expected_log):
caplog.set_level(logging.DEBUG)
logger = logging.getLogger('dummy')
message = 'dummy message'
args = ['dummy', 'command', '-flag']
with mock.patch('platform.system', return_value=system):
reload(twisterlib.log_helper)
twisterlib.log_helper.log_command(logger, message, args)
reload(twisterlib.log_helper)
assert expected_log in caplog.text
``` | /content/code_sandbox/scripts/tests/twister/test_log_helper.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 204 |
```python
#!/usr/bin/env python3
#
'''
This test file contains tests for platform.py module of twister
'''
import sys
import os
import mock
import pytest
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib.platform import Platform
TESTDATA_1 = [
(
"""\
identifier: dummy empty
arch: arc
""",
{
'name': 'dummy empty',
'arch': 'arc',
'twister': True,
'ram': 128,
'timeout_multiplier': 1.0,
'ignore_tags': [],
'only_tags': [],
'default': False,
'binaries': [],
'flash': 512,
'supported': set(),
'vendor': '',
'tier': -1,
'type': 'na',
'simulation': 'na',
'simulation_exec': None,
'supported_toolchains': [],
'env': [],
'env_satisfied': True
},
'<dummy empty on arc>'
),
(
"""\
identifier: dummy full
arch: riscv
twister: true
ram: 1024
testing:
timeout_multiplier: 2.0
ignore_tags:
- tag1
- tag2
only_tags:
- tag3
default: true
binaries:
- dummy.exe
- dummy.bin
flash: 4096
supported:
- ble
- netif:openthread
- gpio
vendor: vendor1
tier: 1
type: unit
simulation: nsim
simulation_exec: nsimdrv
toolchain:
- zephyr
- llvm
env:
- dummynonexistentvar
""",
{
'name': 'dummy full',
'arch': 'riscv',
'twister': True,
'ram': 1024,
'timeout_multiplier': 2.0,
'ignore_tags': ['tag1', 'tag2'],
'only_tags': ['tag3'],
'default': True,
'binaries': ['dummy.exe', 'dummy.bin'],
'flash': 4096,
'supported': set(['ble', 'netif', 'openthread', 'gpio']),
'vendor': 'vendor1',
'tier': 1,
'type': 'unit',
'simulation': 'nsim',
'simulation_exec': 'nsimdrv',
'supported_toolchains': ['zephyr', 'llvm', 'cross-compile'],
'env': ['dummynonexistentvar'],
'env_satisfied': False
},
'<dummy full on riscv>'
),
]
@pytest.mark.parametrize(
'platform_text, expected_data, expected_repr',
TESTDATA_1,
ids=['almost empty specification', 'full specification']
)
def test_platform_load(platform_text, expected_data, expected_repr):
platform = Platform()
with mock.patch('builtins.open', mock.mock_open(read_data=platform_text)):
platform.load('dummy.yaml')
for k, v in expected_data.items():
if not hasattr(platform, k):
assert False, f'No key {k} in platform {platform}'
att = getattr(platform, k)
if isinstance(v, list) and not isinstance(att, list):
assert False, f'Value mismatch in key {k} in platform {platform}'
if isinstance(v, list):
assert sorted(att) == sorted(v)
else:
assert att == v
assert platform.__repr__() == expected_repr
``` | /content/code_sandbox/scripts/tests/twister/test_platform.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 789 |
```python
#!/usr/bin/env python3
#
"""
Tests for runner.py classes
"""
import errno
import mock
import os
import pathlib
import pytest
import queue
import re
import subprocess
import sys
import yaml
from contextlib import nullcontext
from elftools.elf.sections import SymbolTableSection
from typing import List
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib.statuses import TwisterStatus
from twisterlib.error import BuildError
from twisterlib.harness import Pytest
from twisterlib.runner import (
CMake,
ExecutionCounter,
FilterBuilder,
ProjectBuilder,
TwisterRunner
)
@pytest.fixture
def mocked_instance(tmp_path):
instance = mock.Mock()
testsuite = mock.Mock()
testsuite.source_dir: str = ''
instance.testsuite = testsuite
platform = mock.Mock()
platform.sysbuild = False
platform.binaries: List[str] = []
instance.platform = platform
build_dir = tmp_path / 'build_dir'
os.makedirs(build_dir)
instance.build_dir: str = str(build_dir)
return instance
@pytest.fixture
def mocked_env():
env = mock.Mock()
options = mock.Mock()
env.options = options
return env
@pytest.fixture
def mocked_jobserver():
jobserver = mock.Mock()
return jobserver
@pytest.fixture
def project_builder(mocked_instance, mocked_env, mocked_jobserver) -> ProjectBuilder:
project_builder = ProjectBuilder(mocked_instance, mocked_env, mocked_jobserver)
return project_builder
@pytest.fixture
def runners(project_builder: ProjectBuilder) -> dict:
"""
Create runners.yaml file in build_dir/zephyr directory and return file
content as dict.
"""
build_dir_zephyr_path = os.path.join(project_builder.instance.build_dir, 'zephyr')
os.makedirs(build_dir_zephyr_path)
runners_file_path = os.path.join(build_dir_zephyr_path, 'runners.yaml')
runners_content: dict = {
'config': {
'elf_file': 'zephyr.elf',
'hex_file': os.path.join(build_dir_zephyr_path, 'zephyr.elf'),
'bin_file': 'zephyr.bin',
}
}
with open(runners_file_path, 'w') as file:
yaml.dump(runners_content, file)
return runners_content
@mock.patch("os.path.exists")
def test_projectbuilder_cmake_assemble_args_single(m):
# Causes the additional_overlay_path to be appended
m.return_value = True
class MockHandler:
pass
handler = MockHandler()
handler.args = ["handler_arg1", "handler_arg2"]
handler.ready = True
assert(ProjectBuilder.cmake_assemble_args(
["basearg1", "CONFIG_t=\"test\"", "SNIPPET_t=\"test\""],
handler,
["a.conf;b.conf", "c.conf"],
["extra_overlay.conf"],
["x.overlay;y.overlay", "z.overlay"],
["cmake1=foo", "cmake2=bar"],
"/builddir/",
) == [
"-DCONFIG_t=\"test\"",
"-Dcmake1=foo", "-Dcmake2=bar",
"-Dbasearg1", "-DSNIPPET_t=test",
"-Dhandler_arg1", "-Dhandler_arg2",
"-DCONF_FILE=a.conf;b.conf;c.conf",
"-DDTC_OVERLAY_FILE=x.overlay;y.overlay;z.overlay",
"-DOVERLAY_CONFIG=extra_overlay.conf "
"/builddir/twister/testsuite_extra.conf",
])
def test_if_default_binaries_are_taken_properly(project_builder: ProjectBuilder):
default_binaries = [
os.path.join('zephyr', 'zephyr.hex'),
os.path.join('zephyr', 'zephyr.bin'),
os.path.join('zephyr', 'zephyr.elf'),
os.path.join('zephyr', 'zephyr.exe'),
]
project_builder.instance.sysbuild = False
binaries = project_builder._get_binaries()
assert sorted(binaries) == sorted(default_binaries)
def test_if_binaries_from_platform_are_taken_properly(project_builder: ProjectBuilder):
platform_binaries = ['spi_image.bin']
project_builder.platform.binaries = platform_binaries
project_builder.instance.sysbuild = False
platform_binaries_expected = [os.path.join('zephyr', bin) for bin in platform_binaries]
binaries = project_builder._get_binaries()
assert sorted(binaries) == sorted(platform_binaries_expected)
def test_if_binaries_from_runners_are_taken_properly(runners, project_builder: ProjectBuilder):
runners_binaries = list(runners['config'].values())
runners_binaries_expected = [bin if os.path.isabs(bin) else os.path.join('zephyr', bin) for bin in runners_binaries]
binaries = project_builder._get_binaries_from_runners()
assert sorted(binaries) == sorted(runners_binaries_expected)
def test_if_runners_file_is_sanitized_properly(runners, project_builder: ProjectBuilder):
runners_file_path = os.path.join(project_builder.instance.build_dir, 'zephyr', 'runners.yaml')
with open(runners_file_path, 'r') as file:
unsanitized_runners_content = yaml.safe_load(file)
unsanitized_runners_binaries = list(unsanitized_runners_content['config'].values())
abs_paths = [bin for bin in unsanitized_runners_binaries if os.path.isabs(bin)]
assert len(abs_paths) > 0
project_builder._sanitize_runners_file()
with open(runners_file_path, 'r') as file:
sanitized_runners_content = yaml.safe_load(file)
sanitized_runners_binaries = list(sanitized_runners_content['config'].values())
abs_paths = [bin for bin in sanitized_runners_binaries if os.path.isabs(bin)]
assert len(abs_paths) == 0
def test_if_zephyr_base_is_sanitized_properly(project_builder: ProjectBuilder):
sanitized_path_expected = os.path.join('sanitized', 'path')
path_to_sanitize = os.path.join(os.path.realpath(ZEPHYR_BASE), sanitized_path_expected)
cmakecache_file_path = os.path.join(project_builder.instance.build_dir, 'CMakeCache.txt')
with open(cmakecache_file_path, 'w') as file:
file.write(path_to_sanitize)
project_builder._sanitize_zephyr_base_from_files()
with open(cmakecache_file_path, 'r') as file:
sanitized_path = file.read()
assert sanitized_path == sanitized_path_expected
def test_executioncounter(capfd):
ec = ExecutionCounter(total=12)
ec.cases = 25
ec.skipped_cases = 6
ec.error = 2
ec.iteration = 2
ec.done = 9
ec.passed = 6
ec.skipped_configs = 3
ec.skipped_runtime = 1
ec.skipped_filter = 2
ec.failed = 1
ec.summary()
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert (
f'--------------------------------\n'
f'Total test suites: 12\n'
f'Total test cases: 25\n'
f'Executed test cases: 19\n'
f'Skipped test cases: 6\n'
f'Completed test suites: 9\n'
f'Passing test suites: 6\n'
f'Failing test suites: 1\n'
f'Skipped test suites: 3\n'
f'Skipped test suites (runtime): 1\n'
f'Skipped test suites (filter): 2\n'
f'Errors: 2\n'
f'--------------------------------'
) in out
assert ec.cases == 25
assert ec.skipped_cases == 6
assert ec.error == 2
assert ec.iteration == 2
assert ec.done == 9
assert ec.passed == 6
assert ec.skipped_configs == 3
assert ec.skipped_runtime == 1
assert ec.skipped_filter == 2
assert ec.failed == 1
def test_cmake_parse_generated(mocked_jobserver):
testsuite_mock = mock.Mock()
platform_mock = mock.Mock()
source_dir = os.path.join('source', 'dir')
build_dir = os.path.join('build', 'dir')
cmake = CMake(testsuite_mock, platform_mock, source_dir, build_dir,
mocked_jobserver)
result = cmake.parse_generated()
assert cmake.defconfig == {}
assert result == {}
TESTDATA_1_1 = [
('linux'),
('nt')
]
TESTDATA_1_2 = [
(0, False, 'dummy out',
True, True, TwisterStatus.PASS, None, False, True),
(0, True, '',
False, False, TwisterStatus.PASS, None, False, False),
(1, True, 'ERROR: region `FLASH\' overflowed by 123 MB',
True, True, TwisterStatus.SKIP, 'FLASH overflow', True, False),
(1, True, 'Error: Image size (99 B) + trailer (1 B) exceeds requested size',
True, True, TwisterStatus.SKIP, 'imgtool overflow', True, False),
(1, True, 'mock.ANY',
True, True, TwisterStatus.ERROR, 'Build failure', False, False)
]
@pytest.mark.parametrize(
'return_code, is_instance_run, p_out, expect_returncode,' \
' expect_writes, expected_status, expected_reason,' \
' expected_change_skip, expected_add_missing',
TESTDATA_1_2,
ids=['no error, no instance run', 'no error, instance run',
'error - region overflow', 'error - image size exceed', 'error']
)
@pytest.mark.parametrize('sys_platform', TESTDATA_1_1)
def test_cmake_run_build(
sys_platform,
return_code,
is_instance_run,
p_out,
expect_returncode,
expect_writes,
expected_status,
expected_reason,
expected_change_skip,
expected_add_missing
):
process_mock = mock.Mock(
returncode=return_code,
communicate=mock.Mock(
return_value=(p_out.encode(sys.getdefaultencoding()), None)
)
)
def mock_popen(*args, **kwargs):
return process_mock
testsuite_mock = mock.Mock()
platform_mock = mock.Mock()
platform_mock.name = '<platform name>'
source_dir = os.path.join('source', 'dir')
build_dir = os.path.join('build', 'dir')
jobserver_mock = mock.Mock(
popen=mock.Mock(side_effect=mock_popen)
)
instance_mock = mock.Mock(add_missing_case_status=mock.Mock())
instance_mock.build_time = 0
instance_mock.run = is_instance_run
instance_mock.status = TwisterStatus.NONE
instance_mock.reason = None
cmake = CMake(testsuite_mock, platform_mock, source_dir, build_dir,
jobserver_mock)
cmake.cwd = os.path.join('dummy', 'working', 'dir')
cmake.instance = instance_mock
cmake.options = mock.Mock()
cmake.options.overflow_as_errors = False
cmake_path = os.path.join('dummy', 'cmake')
popen_mock = mock.Mock(side_effect=mock_popen)
change_mock = mock.Mock()
with mock.patch('sys.platform', sys_platform), \
mock.patch('shutil.which', return_value=cmake_path), \
mock.patch('twisterlib.runner.change_skip_to_error_if_integration',
change_mock), \
mock.patch('builtins.open', mock.mock_open()), \
mock.patch('subprocess.Popen', popen_mock):
result = cmake.run_build(args=['arg1', 'arg2'])
expected_results = {}
if expect_returncode:
expected_results['returncode'] = return_code
if expected_results == {}:
expected_results = None
assert expected_results == result
popen_caller = cmake.jobserver.popen if sys_platform == 'linux' else \
popen_mock
popen_caller.assert_called_once_with(
[os.path.join('dummy', 'cmake'), 'arg1', 'arg2'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.path.join('dummy', 'working', 'dir')
)
assert cmake.instance.status == expected_status
assert cmake.instance.reason == expected_reason
if expected_change_skip:
change_mock.assert_called_once()
if expected_add_missing:
cmake.instance.add_missing_case_status.assert_called_once_with(
TwisterStatus.SKIP, 'Test was built only'
)
TESTDATA_2_1 = [
('linux'),
('nt')
]
TESTDATA_2_2 = [
(True, ['dummy_stage_1', 'ds2'],
0, False, '',
True, True, False,
TwisterStatus.NONE, None,
[os.path.join('dummy', 'cmake'),
'-B' + os.path.join('build', 'dir'), '-DTC_RUNID=1',
'-DSB_CONFIG_COMPILER_WARNINGS_AS_ERRORS=y',
'-DEXTRA_GEN_DEFINES_ARGS=--edtlib-Werror', '-Gdummy_generator',
'-S' + os.path.join('source', 'dir'),
'arg1', 'arg2',
'-DBOARD=<platform name>',
'-DSNIPPET=dummy snippet 1;ds2',
'-DMODULES=dummy_stage_1,ds2',
'-Pzephyr_base/cmake/package_helper.cmake']),
(False, [],
1, True, 'ERROR: region `FLASH\' overflowed by 123 MB',
True, False, True,
TwisterStatus.ERROR, 'Cmake build failure',
[os.path.join('dummy', 'cmake'),
'-B' + os.path.join('build', 'dir'), '-DTC_RUNID=1',
'-DSB_CONFIG_COMPILER_WARNINGS_AS_ERRORS=n',
'-DEXTRA_GEN_DEFINES_ARGS=', '-Gdummy_generator',
'-Szephyr_base/share/sysbuild',
'-DAPP_DIR=' + os.path.join('source', 'dir'),
'arg1', 'arg2',
'-DBOARD=<platform name>',
'-DSNIPPET=dummy snippet 1;ds2']),
]
@pytest.mark.parametrize(
'error_warns, f_stages,' \
' return_code, is_instance_run, p_out, expect_returncode,' \
' expect_filter, expect_writes, expected_status, expected_reason,' \
' expected_cmd',
TESTDATA_2_2,
ids=['filter_stages with success', 'no stages with error']
)
@pytest.mark.parametrize('sys_platform', TESTDATA_2_1)
def test_cmake_run_cmake(
sys_platform,
error_warns,
f_stages,
return_code,
is_instance_run,
p_out,
expect_returncode,
expect_filter,
expect_writes,
expected_status,
expected_reason,
expected_cmd
):
process_mock = mock.Mock(
returncode=return_code,
communicate=mock.Mock(
return_value=(p_out.encode(sys.getdefaultencoding()), None)
)
)
def mock_popen(*args, **kwargs):
return process_mock
testsuite_mock = mock.Mock()
testsuite_mock.sysbuild = True
platform_mock = mock.Mock()
platform_mock.name = '<platform name>'
source_dir = os.path.join('source', 'dir')
build_dir = os.path.join('build', 'dir')
jobserver_mock = mock.Mock(
popen=mock.Mock(side_effect=mock_popen)
)
instance_mock = mock.Mock(add_missing_case_status=mock.Mock())
instance_mock.run = is_instance_run
instance_mock.run_id = 1
instance_mock.build_time = 0
instance_mock.status = TwisterStatus.NONE
instance_mock.reason = None
instance_mock.testsuite = mock.Mock()
instance_mock.testsuite.required_snippets = ['dummy snippet 1', 'ds2']
instance_mock.testcases = [mock.Mock(), mock.Mock()]
instance_mock.testcases[0].status = TwisterStatus.NONE
instance_mock.testcases[1].status = TwisterStatus.NONE
cmake = CMake(testsuite_mock, platform_mock, source_dir, build_dir,
jobserver_mock)
cmake.cwd = os.path.join('dummy', 'working', 'dir')
cmake.instance = instance_mock
cmake.options = mock.Mock()
cmake.options.disable_warnings_as_errors = not error_warns
cmake.options.overflow_as_errors = False
cmake.env = mock.Mock()
cmake.env.generator = 'dummy_generator'
cmake_path = os.path.join('dummy', 'cmake')
popen_mock = mock.Mock(side_effect=mock_popen)
change_mock = mock.Mock()
with mock.patch('sys.platform', sys_platform), \
mock.patch('shutil.which', return_value=cmake_path), \
mock.patch('twisterlib.runner.change_skip_to_error_if_integration',
change_mock), \
mock.patch('twisterlib.runner.canonical_zephyr_base',
'zephyr_base'), \
mock.patch('builtins.open', mock.mock_open()), \
mock.patch('subprocess.Popen', popen_mock):
result = cmake.run_cmake(args=['arg1', 'arg2'], filter_stages=f_stages)
expected_results = {}
if expect_returncode:
expected_results['returncode'] = return_code
if expect_filter:
expected_results['filter'] = {}
if expected_results == {}:
expected_results = None
assert expected_results == result
popen_caller = cmake.jobserver.popen if sys_platform == 'linux' else \
popen_mock
popen_caller.assert_called_once_with(
expected_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.path.join('dummy', 'working', 'dir')
)
assert cmake.instance.status == expected_status
assert cmake.instance.reason == expected_reason
for tc in cmake.instance.testcases:
assert tc.status == cmake.instance.status
TESTDATA_3 = [
('unit_testing', [], False, True, None, True, None, True,
None, None, {}, {}, None, None, [], {}),
(
'other', [], True,
True, ['dummy', 'west', 'options'], True,
None, True,
os.path.join('domain', 'build', 'dir', 'zephyr', '.config'),
os.path.join('domain', 'build', 'dir', 'zephyr', 'edt.pickle'),
{'CONFIG_FOO': 'no'},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'CONFIG_FOO': 'no', 'dummy cache elem': 1},
b'dummy edt pickle contents',
[f'Loaded sysbuild domain data from' \
f' {os.path.join("build", "dir", "domains.yaml")}'],
{os.path.join('other', 'dummy.testsuite.name'): True}
),
(
'other', ['kconfig'], True,
True, ['dummy', 'west', 'options'], True,
'Dummy parse results', True,
os.path.join('build', 'dir', 'zephyr', '.config'),
os.path.join('build', 'dir', 'zephyr', 'edt.pickle'),
{'CONFIG_FOO': 'no'},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'CONFIG_FOO': 'no', 'dummy cache elem': 1},
b'dummy edt pickle contents',
[],
{os.path.join('other', 'dummy.testsuite.name'): False}
),
(
'other', ['other'], False,
False, None, True,
'Dummy parse results', True,
None,
os.path.join('build', 'dir', 'zephyr', 'edt.pickle'),
{},
{},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True},
b'dummy edt pickle contents',
[],
{os.path.join('other', 'dummy.testsuite.name'): False}
),
(
'other', ['other'], True,
False, None, True,
'Dummy parse results', True,
None,
None,
{},
{},
{},
None,
['Sysbuild test will be skipped. West must be used for flashing.'],
{os.path.join('other', 'dummy.testsuite.name'): True}
),
(
'other', ['other'], False,
True, None, False,
'Dummy parse results', True,
None,
None,
{},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'dummy cache elem': 1},
None,
[],
{os.path.join('other', 'dummy.testsuite.name'): False}
),
(
'other', ['other'], False,
True, None, True,
'Dummy parse results', True,
None,
os.path.join('build', 'dir', 'zephyr', 'edt.pickle'),
{},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'dummy cache elem': 1},
b'dummy edt pickle contents',
[],
{os.path.join('other', 'dummy.testsuite.name'): False}
),
(
'other', ['other'], False,
True, None, True,
None, True,
None,
os.path.join('build', 'dir', 'zephyr', 'edt.pickle'),
{},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'dummy cache elem': 1},
b'dummy edt pickle contents',
[],
{os.path.join('other', 'dummy.testsuite.name'): True}
),
(
'other', ['other'], False,
True, None, True,
'Dummy parse results', False,
None,
os.path.join('build', 'dir', 'zephyr', 'edt.pickle'),
{},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'dummy cache elem': 1},
b'dummy edt pickle contents',
[],
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'dummy cache elem': 1}
),
(
'other', ['other'], False,
True, None, True,
SyntaxError, True,
None,
os.path.join('build', 'dir', 'zephyr', 'edt.pickle'),
{},
{'dummy cache elem': 1},
{'ARCH': 'dummy arch', 'PLATFORM': 'other', 'env_dummy': True,
'dummy cache elem': 1},
b'dummy edt pickle contents',
['Failed processing testsuite.yaml'],
SyntaxError
),
]
@pytest.mark.parametrize(
'platform_name, filter_stages, sysbuild,' \
' do_find_cache, west_flash_options, edt_exists,' \
' parse_results, testsuite_filter,' \
' expected_defconfig_path, expected_edt_pickle_path,' \
' expected_defconfig, expected_cmakecache, expected_filter_data,' \
' expected_edt,' \
' expected_logs, expected_return',
TESTDATA_3,
ids=['unit testing', 'domain', 'kconfig', 'no cache',
'no west options', 'no edt',
'parse result', 'no parse result', 'no testsuite filter', 'parse err']
)
def test_filterbuilder_parse_generated(
caplog,
mocked_jobserver,
platform_name,
filter_stages,
sysbuild,
do_find_cache,
west_flash_options,
edt_exists,
parse_results,
testsuite_filter,
expected_defconfig_path,
expected_edt_pickle_path,
expected_defconfig,
expected_cmakecache,
expected_filter_data,
expected_edt,
expected_logs,
expected_return
):
def mock_domains_from_file(*args, **kwargs):
dom = mock.Mock()
dom.build_dir = os.path.join('domain', 'build', 'dir')
res = mock.Mock(get_default_domain=mock.Mock(return_value=dom))
return res
def mock_cmakecache_from_file(*args, **kwargs):
if not do_find_cache:
raise FileNotFoundError(errno.ENOENT, 'Cache not found')
cache_elem = mock.Mock()
cache_elem.name = 'dummy cache elem'
cache_elem.value = 1
cache = [cache_elem]
return cache
def mock_open(filepath, type, *args, **kwargs):
if filepath == expected_defconfig_path:
rd = 'I am not a proper line\n' \
'CONFIG_FOO="no"'
elif filepath == expected_edt_pickle_path:
rd = b'dummy edt pickle contents'
else:
raise FileNotFoundError(errno.ENOENT,
f'File {filepath} not mocked.')
return mock.mock_open(read_data=rd)()
def mock_parser(filter, filter_data, edt):
assert filter_data == expected_filter_data
if isinstance(parse_results, type) and \
issubclass(parse_results, Exception):
raise parse_results
return parse_results
def mock_pickle(datafile):
assert datafile.read() == expected_edt
return mock.Mock()
testsuite_mock = mock.Mock()
testsuite_mock.name = 'dummy.testsuite.name'
testsuite_mock.filter = testsuite_filter
platform_mock = mock.Mock()
platform_mock.name = platform_name
platform_mock.arch = 'dummy arch'
source_dir = os.path.join('source', 'dir')
build_dir = os.path.join('build', 'dir')
fb = FilterBuilder(testsuite_mock, platform_mock, source_dir, build_dir,
mocked_jobserver)
instance_mock = mock.Mock()
instance_mock.sysbuild = 'sysbuild' if sysbuild else None
fb.instance = instance_mock
fb.env = mock.Mock()
fb.env.options = mock.Mock()
fb.env.options.west_flash = west_flash_options
fb.env.options.device_testing = True
environ_mock = {'env_dummy': True}
with mock.patch('twisterlib.runner.Domains.from_file',
mock_domains_from_file), \
mock.patch('twisterlib.runner.CMakeCache.from_file',
mock_cmakecache_from_file), \
mock.patch('builtins.open', mock_open), \
mock.patch('expr_parser.parse', mock_parser), \
mock.patch('pickle.load', mock_pickle), \
mock.patch('os.path.exists', return_value=edt_exists), \
mock.patch('os.environ', environ_mock), \
pytest.raises(expected_return) if \
isinstance(parse_results, type) and \
issubclass(parse_results, Exception) else nullcontext() as err:
result = fb.parse_generated(filter_stages)
if err:
assert True
return
assert all([log in caplog.text for log in expected_logs])
assert fb.defconfig == expected_defconfig
assert fb.cmake_cache == expected_cmakecache
assert result == expected_return
TESTDATA_4 = [
(False, False, [f"see: {os.path.join('dummy', 'path', 'dummy_file.log')}"]),
(True, False, [os.path.join('dummy', 'path', 'dummy_file.log'),
'file contents',
os.path.join('dummy', 'path', 'dummy_file.log')]),
(True, True, [os.path.join('dummy', 'path', 'dummy_file.log'),
'Unable to read log data ([Errno 2] ERROR: dummy_file.log)',
os.path.join('dummy', 'path', 'dummy_file.log')]),
]
@pytest.mark.parametrize(
'inline_logs, read_exception, expected_logs',
TESTDATA_4,
ids=['basic', 'inline logs', 'inline logs+read_exception']
)
def test_projectbuilder_log_info(
caplog,
mocked_jobserver,
inline_logs,
read_exception,
expected_logs
):
def mock_open(filename, *args, **kwargs):
if read_exception:
raise OSError(errno.ENOENT, f'ERROR: {os.path.basename(filename)}')
return mock.mock_open(read_data='file contents')()
def mock_realpath(filename, *args, **kwargs):
return os.path.join('path', filename)
def mock_abspath(filename, *args, **kwargs):
return os.path.join('dummy', filename)
filename = 'dummy_file.log'
env_mock = mock.Mock()
instance_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
with mock.patch('builtins.open', mock_open), \
mock.patch('os.path.realpath', mock_realpath), \
mock.patch('os.path.abspath', mock_abspath):
pb.log_info(filename, inline_logs)
assert all([log in caplog.text for log in expected_logs])
TESTDATA_5 = [
(True, False, False, "Valgrind error", 0, 0, 'build_dir/valgrind.log'),
(True, False, False, "Error", 0, 0, 'build_dir/build.log'),
(False, True, False, None, 1024, 0, 'build_dir/handler.log'),
(False, True, False, None, 0, 0, 'build_dir/build.log'),
(False, False, True, None, 0, 1024, 'build_dir/device.log'),
(False, False, True, None, 0, 0, 'build_dir/build.log'),
(False, False, False, None, 0, 0, 'build_dir/build.log'),
]
@pytest.mark.parametrize(
'valgrind_log_exists, handler_log_exists, device_log_exists,' \
' instance_reason, handler_log_getsize, device_log_getsize, expected_log',
TESTDATA_5,
ids=['valgrind log', 'valgrind log unused',
'handler log', 'handler log unused',
'device log', 'device log unused',
'no logs']
)
def test_projectbuilder_log_info_file(
caplog,
mocked_jobserver,
valgrind_log_exists,
handler_log_exists,
device_log_exists,
instance_reason,
handler_log_getsize,
device_log_getsize,
expected_log
):
def mock_exists(filename, *args, **kwargs):
if filename == 'build_dir/handler.log':
return handler_log_exists
if filename == 'build_dir/valgrind.log':
return valgrind_log_exists
if filename == 'build_dir/device.log':
return device_log_exists
return False
def mock_getsize(filename, *args, **kwargs):
if filename == 'build_dir/handler.log':
return handler_log_getsize
if filename == 'build_dir/device.log':
return device_log_getsize
return 0
env_mock = mock.Mock()
instance_mock = mock.Mock()
instance_mock.reason = instance_reason
instance_mock.build_dir = 'build_dir'
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
log_info_mock = mock.Mock()
with mock.patch('os.path.exists', mock_exists), \
mock.patch('os.path.getsize', mock_getsize), \
mock.patch('twisterlib.runner.ProjectBuilder.log_info', log_info_mock):
pb.log_info_file(None)
log_info_mock.assert_called_with(expected_log, mock.ANY)
TESTDATA_6 = [
(
{'op': 'filter'},
TwisterStatus.FAIL,
'Failed',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.FAIL,
'Failed',
0,
None
),
(
{'op': 'filter'},
TwisterStatus.PASS,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'filter': { 'dummy instance name': True }},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['filtering dummy instance name'],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.FILTER,
'runtime filter',
1,
(TwisterStatus.SKIP,)
),
(
{'op': 'filter'},
TwisterStatus.PASS,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'filter': { 'another dummy instance name': True }},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'cmake', 'test': mock.ANY},
TwisterStatus.PASS,
mock.ANY,
0,
None
),
(
{'op': 'cmake'},
TwisterStatus.ERROR,
'dummy error',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.ERROR,
'dummy error',
0,
None
),
(
{'op': 'cmake'},
TwisterStatus.NONE,
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.PASS,
mock.ANY,
0,
None
),
(
{'op': 'cmake'},
'success',
mock.ANY,
mock.ANY,
mock.ANY,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'report', 'test': mock.ANY},
'success',
mock.ANY,
0,
None
),
(
{'op': 'cmake'},
'success',
mock.ANY,
mock.ANY,
mock.ANY,
False,
mock.ANY,
mock.ANY,
mock.ANY,
{'filter': {'dummy instance name': True}},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
['filtering dummy instance name'],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.FILTER,
'runtime filter',
1,
(TwisterStatus.SKIP,)
),
(
{'op': 'cmake'},
'success',
mock.ANY,
mock.ANY,
mock.ANY,
False,
mock.ANY,
mock.ANY,
mock.ANY,
{'filter': {}},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'build', 'test': mock.ANY},
'success',
mock.ANY,
0,
None
),
(
{'op': 'build'},
mock.ANY,
None,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
None,
mock.ANY,
mock.ANY,
mock.ANY,
['build test: dummy instance name'],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.ERROR,
'Build Failure',
0,
None
),
(
{'op': 'build'},
TwisterStatus.SKIP,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'returncode': 0},
mock.ANY,
mock.ANY,
mock.ANY,
['build test: dummy instance name',
'Determine test cases for test instance: dummy instance name'],
{'op': 'gather_metrics', 'test': mock.ANY},
mock.ANY,
mock.ANY,
1,
(TwisterStatus.SKIP, mock.ANY)
),
(
{'op': 'build'},
TwisterStatus.PASS,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'dummy': 'dummy'},
mock.ANY,
mock.ANY,
mock.ANY,
['build test: dummy instance name'],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.PASS,
mock.ANY,
0,
(TwisterStatus.BLOCK, mock.ANY)
),
(
{'op': 'build'},
'success',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'returncode': 0},
mock.ANY,
mock.ANY,
mock.ANY,
['build test: dummy instance name',
'Determine test cases for test instance: dummy instance name'],
{'op': 'gather_metrics', 'test': mock.ANY},
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'build'},
'success',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'returncode': 0},
mock.ANY,
mock.ANY,
BuildError,
['build test: dummy instance name',
'Determine test cases for test instance: dummy instance name'],
{'op': 'report', 'test': mock.ANY},
TwisterStatus.ERROR,
'Determine Testcases Error!',
0,
None
),
(
{'op': 'gather_metrics'},
mock.ANY,
mock.ANY,
True,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'returncode': 0}, # metrics_res
mock.ANY,
mock.ANY,
[],
{'op': 'run', 'test': mock.ANY},
mock.ANY,
mock.ANY,
0,
None
), # 'gather metrics, run and ready handler'
(
{'op': 'gather_metrics'},
mock.ANY,
mock.ANY,
False,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'returncode': 0}, # metrics_res
mock.ANY,
mock.ANY,
[],
{'op': 'report', 'test': mock.ANY},
mock.ANY,
mock.ANY,
0,
None
), # 'gather metrics'
(
{'op': 'gather_metrics'},
mock.ANY,
mock.ANY,
False,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
{'returncode': 0}, # build_res
{'returncode': 1}, # metrics_res
mock.ANY,
mock.ANY,
[],
{'op': 'report', 'test': mock.ANY},
'error',
'Build Failure at gather_metrics.',
0,
None
), # 'build ok, gather metrics fail',
(
{'op': 'run'},
'success',
'OK',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
None,
mock.ANY,
['run test: dummy instance name',
'run status: dummy instance name success'],
{'op': 'report', 'test': mock.ANY, 'status': 'success', 'reason': 'OK'},
'success',
'OK',
0,
None
),
(
{'op': 'run'},
TwisterStatus.FAIL,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
RuntimeError,
mock.ANY,
['run test: dummy instance name',
'run status: dummy instance name failed',
'RuntimeError: Pipeline Error!'],
None,
TwisterStatus.FAIL,
mock.ANY,
0,
None
),
(
{'op': 'report'},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
False,
True,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'cleanup', 'mode': 'device', 'test': mock.ANY},
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'report'},
TwisterStatus.PASS,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
False,
False,
'pass',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'cleanup', 'mode': 'passed', 'test': mock.ANY},
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'report'},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
False,
False,
'all',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
{'op': 'cleanup', 'mode': 'all', 'test': mock.ANY},
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'report'},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
False,
False,
'other',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
None,
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'cleanup', 'mode': 'device'},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
None,
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'cleanup', 'mode': 'passed'},
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
None,
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'cleanup', 'mode': 'all'},
mock.ANY,
'Valgrind error',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
None,
mock.ANY,
mock.ANY,
0,
None
),
(
{'op': 'cleanup', 'mode': 'all'},
mock.ANY,
'Cmake build failure',
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY,
[],
None,
mock.ANY,
mock.ANY,
0,
None
),
]
@pytest.mark.parametrize(
'message,' \
' instance_status, instance_reason, instance_run, instance_handler_ready,' \
' options_cmake_only,' \
' options_coverage, options_prep_artifacts, options_runtime_artifacts,' \
' cmake_res, build_res, metrics_res,' \
' pipeline_runtime_error, determine_testcases_build_error,' \
' expected_logs, resulting_message,' \
' expected_status, expected_reason, expected_skipped, expected_missing',
TESTDATA_6,
ids=[
'filter, failed', 'filter, cmake res', 'filter, no cmake res',
'cmake, failed', 'cmake, cmake_only, no status', 'cmake, cmake_only',
'cmake, no cmake_only, cmake res', 'cmake, no cmake_only, no cmake res',
'build, no build res', 'build, skipped', 'build, blocked',
'build, determine testcases', 'build, determine testcases Error',
'gather metrics, run and ready handler', 'gather metrics',
'build ok, gather metrics fail',
'run', 'run, Pipeline Runtime Error',
'report, prep artifacts for testing',
'report, runtime artifact cleanup pass, status passed',
'report, runtime artifact cleanup all', 'report, no message put',
'cleanup, device', 'cleanup, mode passed', 'cleanup, mode all',
'cleanup, mode all, cmake build failure'
]
)
def test_projectbuilder_process(
caplog,
mocked_jobserver,
message,
instance_status,
instance_reason,
instance_run,
instance_handler_ready,
options_cmake_only,
options_coverage,
options_prep_artifacts,
options_runtime_artifacts,
cmake_res,
build_res,
metrics_res,
pipeline_runtime_error,
determine_testcases_build_error,
expected_logs,
resulting_message,
expected_status,
expected_reason,
expected_skipped,
expected_missing
):
def mock_pipeline_put(msg):
if isinstance(pipeline_runtime_error, type) and \
issubclass(pipeline_runtime_error, Exception):
raise RuntimeError('Pipeline Error!')
def mock_determine_testcases(res):
if isinstance(determine_testcases_build_error, type) and \
issubclass(determine_testcases_build_error, Exception):
raise BuildError('Determine Testcases Error!')
instance_mock = mock.Mock()
instance_mock.name = 'dummy instance name'
instance_mock.status = instance_status
instance_mock.reason = instance_reason
instance_mock.run = instance_run
instance_mock.handler = mock.Mock()
instance_mock.handler.ready = instance_handler_ready
instance_mock.testsuite.harness = 'test'
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.options = mock.Mock()
pb.options.coverage = options_coverage
pb.options.prep_artifacts_for_testing = options_prep_artifacts
pb.options.runtime_artifact_cleanup = options_runtime_artifacts
pb.options.cmake_only = options_cmake_only
pb.cmake = mock.Mock(return_value=cmake_res)
pb.build = mock.Mock(return_value=build_res)
pb.determine_testcases = mock.Mock(side_effect=mock_determine_testcases)
pb.report_out = mock.Mock()
pb.cleanup_artifacts = mock.Mock()
pb.cleanup_device_testing_artifacts = mock.Mock()
pb.run = mock.Mock()
pb.gather_metrics = mock.Mock(return_value=metrics_res)
pipeline_mock = mock.Mock(put=mock.Mock(side_effect=mock_pipeline_put))
done_mock = mock.Mock()
lock_mock = mock.Mock(
__enter__=mock.Mock(return_value=(mock.Mock(), mock.Mock())),
__exit__=mock.Mock(return_value=None)
)
results_mock = mock.Mock()
results_mock.skipped_runtime = 0
pb.process(pipeline_mock, done_mock, message, lock_mock, results_mock)
assert all([log in caplog.text for log in expected_logs])
if resulting_message:
pipeline_mock.put.assert_called_with(resulting_message)
assert pb.instance.status == expected_status
assert pb.instance.reason == expected_reason
assert results_mock.skipped_runtime == expected_skipped
if expected_missing:
pb.instance.add_missing_case_status.assert_called_with(*expected_missing)
TESTDATA_7 = [
(
[
'z_ztest_unit_test__dummy_suite_name__dummy_test_name',
'z_ztest_unit_test__dummy_suite_name__test_dummy_name',
'no match'
],
['dummy_id.dummy_name', 'dummy_id.dummy_name']
),
(
['no match'],
[]
),
]
@pytest.mark.parametrize(
'symbols_names, added_tcs',
TESTDATA_7,
ids=['two hits, one miss', 'nothing']
)
def test_projectbuilder_determine_testcases(
mocked_jobserver,
symbols_names,
added_tcs
):
symbols_mock = [mock.Mock(n=name) for name in symbols_names]
for m in symbols_mock:
m.configure_mock(name=m.n)
sections_mock = [mock.Mock(spec=SymbolTableSection)]
sections_mock[0].iter_symbols = mock.Mock(return_value=symbols_mock)
elf_mock = mock.Mock()
elf_mock().iter_sections = mock.Mock(return_value=sections_mock)
results_mock = mock.Mock()
instance_mock = mock.Mock()
instance_mock.testcases = []
instance_mock.testsuite.id = 'dummy_id'
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
with mock.patch('twisterlib.runner.ELFFile', elf_mock), \
mock.patch('builtins.open', mock.mock_open()):
pb.determine_testcases(results_mock)
pb.instance.add_testcase.assert_has_calls(
[mock.call(name=x) for x in added_tcs]
)
pb.instance.testsuite.add_testcase.assert_has_calls(
[mock.call(name=x) for x in added_tcs]
)
TESTDATA_8 = [
(
['addition.al'],
'dummy',
['addition.al', '.config', 'zephyr']
),
(
[],
'all',
['.config', 'zephyr', 'testsuite_extra.conf', 'twister']
),
]
@pytest.mark.parametrize(
'additional_keep, runtime_artifact_cleanup, expected_files',
TESTDATA_8,
ids=['additional keep', 'all cleanup']
)
def test_projectbuilder_cleanup_artifacts(
tmpdir,
mocked_jobserver,
additional_keep,
runtime_artifact_cleanup,
expected_files
):
# tmpdir
# twister
# testsuite_extra.conf
# dummy_dir
# dummy.del
# dummy_link_dir -> zephyr
# zephyr
# .config
# addition.al
twister_dir = tmpdir.mkdir('twister')
testsuite_extra_conf = twister_dir.join('testsuite_extra.conf')
testsuite_extra_conf.write_text('dummy', 'utf-8')
dummy_dir = tmpdir.mkdir('dummy_dir')
dummy_del = dummy_dir.join('dummy.del')
dummy_del.write_text('dummy', 'utf-8')
zephyr = tmpdir.mkdir('zephyr')
config = zephyr.join('.config')
config.write_text('dummy', 'utf-8')
dummy_link_dir = tmpdir.join('dummy_link_dir')
os.symlink(zephyr, dummy_link_dir)
addition_al = tmpdir.join('addition.al')
addition_al.write_text('dummy', 'utf-8')
instance_mock = mock.Mock()
instance_mock.build_dir = tmpdir
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.options = mock.Mock(runtime_artifact_cleanup=runtime_artifact_cleanup)
pb.cleanup_artifacts(additional_keep)
files_left = [p.name for p in list(pathlib.Path(tmpdir).glob('**/*'))]
assert sorted(files_left) == sorted(expected_files)
def test_projectbuilder_cleanup_device_testing_artifacts(
caplog,
mocked_jobserver
):
bins = [os.path.join('zephyr', 'file.bin')]
instance_mock = mock.Mock()
instance_mock.sysbuild = False
build_dir = os.path.join('build', 'dir')
instance_mock.build_dir = build_dir
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb._get_binaries = mock.Mock(return_value=bins)
pb.cleanup_artifacts = mock.Mock()
pb._sanitize_files = mock.Mock()
pb.cleanup_device_testing_artifacts()
assert f'Cleaning up for Device Testing {build_dir}' in caplog.text
pb.cleanup_artifacts.assert_called_once_with(
[os.path.join('zephyr', 'file.bin'),
os.path.join('zephyr', 'runners.yaml')]
)
pb._sanitize_files.assert_called_once()
TESTDATA_9 = [
(
None,
[],
[os.path.join('zephyr', 'zephyr.hex'),
os.path.join('zephyr', 'zephyr.bin'),
os.path.join('zephyr', 'zephyr.elf'),
os.path.join('zephyr', 'zephyr.exe')]
),
(
[os.path.join('dummy.bin'), os.path.join('dummy.hex')],
[os.path.join('dir2', 'dummy.elf')],
[os.path.join('zephyr', 'dummy.bin'),
os.path.join('zephyr', 'dummy.hex'),
os.path.join('dir2', 'dummy.elf')]
),
]
@pytest.mark.parametrize(
'platform_binaries, runner_binaries, expected_binaries',
TESTDATA_9,
ids=['default', 'valid']
)
def test_projectbuilder_get_binaries(
mocked_jobserver,
platform_binaries,
runner_binaries,
expected_binaries
):
def mock_get_domains(*args, **kwargs):
return []
instance_mock = mock.Mock()
instance_mock.build_dir = os.path.join('build', 'dir')
instance_mock.domains.get_domains.side_effect = mock_get_domains
instance_mock.platform = mock.Mock()
instance_mock.platform.binaries = platform_binaries
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb._get_binaries_from_runners = mock.Mock(return_value=runner_binaries)
bins = pb._get_binaries()
assert all(bin in expected_binaries for bin in bins)
assert all(bin in bins for bin in expected_binaries)
TESTDATA_10 = [
(None, None, []),
(None, {'dummy': 'dummy'}, []),
( None,
{
'config': {
'elf_file': '/absolute/path/dummy.elf',
'bin_file': 'path/dummy.bin'
}
},
['/absolute/path/dummy.elf', os.path.join('zephyr', 'path/dummy.bin')]
),
( 'test_domain',
{
'config': {
'elf_file': '/absolute/path/dummy.elf',
'bin_file': 'path/dummy.bin'
}
},
['/absolute/path/dummy.elf', os.path.join('test_domain', 'zephyr', 'path/dummy.bin')]
),
]
@pytest.mark.parametrize(
'domain, runners_content, expected_binaries',
TESTDATA_10,
ids=['no file', 'no config', 'valid', 'with domain']
)
def test_projectbuilder_get_binaries_from_runners(
mocked_jobserver,
domain,
runners_content,
expected_binaries
):
def mock_exists(fname):
assert fname == os.path.join('build', 'dir', domain if domain else '',
'zephyr', 'runners.yaml')
return runners_content is not None
instance_mock = mock.Mock()
instance_mock.build_dir = os.path.join('build', 'dir')
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock.mock_open()), \
mock.patch('yaml.load', return_value=runners_content):
if domain:
bins = pb._get_binaries_from_runners(domain)
else:
bins = pb._get_binaries_from_runners()
assert all(bin in expected_binaries for bin in bins)
assert all(bin in bins for bin in expected_binaries)
def test_projectbuilder_sanitize_files(mocked_jobserver):
instance_mock = mock.Mock()
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb._sanitize_runners_file = mock.Mock()
pb._sanitize_zephyr_base_from_files = mock.Mock()
pb._sanitize_files()
pb._sanitize_runners_file.assert_called_once()
pb._sanitize_zephyr_base_from_files.assert_called_once()
TESTDATA_11 = [
(None, None),
('dummy: []', None),
(
"""
config:
elf_file: relative/path/dummy.elf
hex_file: /absolute/path/build_dir/zephyr/dummy.hex
""",
"""
config:
elf_file: relative/path/dummy.elf
hex_file: dummy.hex
"""
),
]
@pytest.mark.parametrize(
'runners_text, expected_write_text',
TESTDATA_11,
ids=['no file', 'no config', 'valid']
)
def test_projectbuilder_sanitize_runners_file(
mocked_jobserver,
runners_text,
expected_write_text
):
def mock_exists(fname):
return runners_text is not None
instance_mock = mock.Mock()
instance_mock.build_dir = '/absolute/path/build_dir'
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open',
mock.mock_open(read_data=runners_text)) as f:
pb._sanitize_runners_file()
if expected_write_text is not None:
f().write.assert_called_with(expected_write_text)
else:
f().write.assert_not_called()
TESTDATA_12 = [
(
{
'CMakeCache.txt': mock.mock_open(
read_data='canonical/zephyr/base/dummy.file: ERROR'
)
},
{
'CMakeCache.txt': 'dummy.file: ERROR'
}
),
(
{
os.path.join('zephyr', 'runners.yaml'): mock.mock_open(
read_data='There was canonical/zephyr/base/dummy.file here'
)
},
{
os.path.join('zephyr', 'runners.yaml'): 'There was dummy.file here'
}
),
]
@pytest.mark.parametrize(
'text_mocks, expected_write_texts',
TESTDATA_12,
ids=['CMakeCache file', 'runners.yaml file']
)
def test_projectbuilder_sanitize_zephyr_base_from_files(
mocked_jobserver,
text_mocks,
expected_write_texts
):
build_dir_path = 'canonical/zephyr/base/build_dir/'
def mock_exists(fname):
if not fname.startswith(build_dir_path):
return False
return fname[len(build_dir_path):] in text_mocks
def mock_open(fname, *args, **kwargs):
if not fname.startswith(build_dir_path):
raise FileNotFoundError(errno.ENOENT, f'File {fname} not found.')
return text_mocks[fname[len(build_dir_path):]]()
instance_mock = mock.Mock()
instance_mock.build_dir = build_dir_path
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open), \
mock.patch('twisterlib.runner.canonical_zephyr_base',
'canonical/zephyr/base'):
pb._sanitize_zephyr_base_from_files()
for fname, fhandler in text_mocks.items():
fhandler().write.assert_called_with(expected_write_texts[fname])
TESTDATA_13 = [
(
TwisterStatus.ERROR, True, True, False,
['INFO 20/25 dummy platform' \
' dummy.testsuite.name' \
' ERROR dummy reason (cmake)'],
None
),
(
TwisterStatus.FAIL, False, False, False,
['ERROR dummy platform' \
' dummy.testsuite.name' \
' FAILED : dummy reason'],
'INFO - Total complete: 20/ 25 80% skipped: 3,' \
' failed: 3, error: 1'
),
(
TwisterStatus.SKIP, True, False, False,
['INFO 20/25 dummy platform' \
' dummy.testsuite.name' \
' SKIPPED (dummy reason)'],
None
),
(
TwisterStatus.FILTER, False, False, False,
[],
'INFO - Total complete: 20/ 25 80% skipped: 4,' \
' failed: 2, error: 1'
),
(
TwisterStatus.PASS, True, False, True,
['INFO 20/25 dummy platform' \
' dummy.testsuite.name' \
' PASSED' \
' (dummy handler type: dummy dut, 60.000s)'],
None
),
(
TwisterStatus.PASS, True, False, False,
['INFO 20/25 dummy platform' \
' dummy.testsuite.name' \
' PASSED (build)'],
None
),
(
'unknown status', False, False, False,
['Unknown status = unknown status'],
'INFO - Total complete: 20/ 25 80% skipped: 3,' \
' failed: 2, error: 1\r'
)
]
@pytest.mark.parametrize(
'status, verbose, cmake_only, ready_run, expected_logs, expected_out',
TESTDATA_13,
ids=['verbose error cmake only', 'failed', 'verbose skipped', 'filtered',
'verbose passed ready run', 'verbose passed', 'unknown status']
)
def test_projectbuilder_report_out(
capfd,
caplog,
mocked_jobserver,
status,
verbose,
cmake_only,
ready_run,
expected_logs,
expected_out
):
instance_mock = mock.Mock()
instance_mock.handler.type_str = 'dummy handler type'
instance_mock.handler.seed = 123
instance_mock.handler.ready = ready_run
instance_mock.run = ready_run
instance_mock.dut = 'dummy dut'
instance_mock.execution_time = 60
instance_mock.platform.name = 'dummy platform'
instance_mock.status = status
instance_mock.reason = 'dummy reason'
instance_mock.testsuite.name = 'dummy.testsuite.name'
instance_mock.testsuite.testcases = [mock.Mock() for _ in range(25)]
instance_mock.testcases = [mock.Mock() for _ in range(24)] + \
[mock.Mock(status=TwisterStatus.SKIP)]
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.options.verbose = verbose
pb.options.cmake_only = cmake_only
pb.options.seed = 123
pb.log_info_file = mock.Mock()
results_mock = mock.Mock()
results_mock.iteration = 1
results_mock.total = 25
results_mock.done = 19
results_mock.passed = 17
results_mock.skipped_configs = 3
results_mock.skipped_cases = 4
results_mock.failed = 2
results_mock.error = 1
results_mock.cases = 0
pb.report_out(results_mock)
assert results_mock.cases == 25
trim_actual_log = re.sub(
r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])',
'',
caplog.text
)
trim_actual_log = re.sub(r'twister:runner.py:\d+', '', trim_actual_log)
assert all([log in trim_actual_log for log in expected_logs])
if expected_out:
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
# Remove 7b ANSI C1 escape sequences (colours)
out = re.sub(
r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])',
'',
out
)
assert expected_out in out
def test_projectbuilder_cmake_assemble_args():
extra_args = ['CONFIG_FOO=y', 'DUMMY_EXTRA="yes"']
handler = mock.Mock(ready=True, args=['dummy_handler'])
extra_conf_files = ['extrafile1.conf', 'extrafile2.conf']
extra_overlay_confs = ['extra_overlay_conf']
extra_dtc_overlay_files = ['overlay1.dtc', 'overlay2.dtc']
cmake_extra_args = ['CMAKE1="yes"', 'CMAKE2=n']
build_dir = os.path.join('build', 'dir')
with mock.patch('os.path.exists', return_value=True):
results = ProjectBuilder.cmake_assemble_args(extra_args, handler,
extra_conf_files,
extra_overlay_confs,
extra_dtc_overlay_files,
cmake_extra_args,
build_dir)
expected_results = [
'-DCONFIG_FOO=y',
'-DCMAKE1=\"yes\"',
'-DCMAKE2=n',
'-DDUMMY_EXTRA=yes',
'-Ddummy_handler',
'-DCONF_FILE=extrafile1.conf;extrafile2.conf',
'-DDTC_OVERLAY_FILE=overlay1.dtc;overlay2.dtc',
f'-DOVERLAY_CONFIG=extra_overlay_conf ' \
f'{os.path.join("build", "dir", "twister", "testsuite_extra.conf")}'
]
assert results == expected_results
def test_projectbuilder_cmake():
instance_mock = mock.Mock()
instance_mock.handler = 'dummy handler'
instance_mock.build_dir = os.path.join('build', 'dir')
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.build_dir = 'build_dir'
pb.testsuite.extra_args = ['some', 'args']
pb.testsuite.extra_conf_files = ['some', 'files1']
pb.testsuite.extra_overlay_confs = ['some', 'files2']
pb.testsuite.extra_dtc_overlay_files = ['some', 'files3']
pb.options.extra_args = ['other', 'args']
pb.cmake_assemble_args = mock.Mock(return_value=['dummy'])
cmake_res_mock = mock.Mock()
pb.run_cmake = mock.Mock(return_value=cmake_res_mock)
res = pb.cmake(['dummy filter'])
assert res == cmake_res_mock
pb.cmake_assemble_args.assert_called_once_with(
pb.testsuite.extra_args,
pb.instance.handler,
pb.testsuite.extra_conf_files,
pb.testsuite.extra_overlay_confs,
pb.testsuite.extra_dtc_overlay_files,
pb.options.extra_args,
pb.instance.build_dir
)
pb.run_cmake.assert_called_once_with(['dummy'], ['dummy filter'])
def test_projectbuilder_build(mocked_jobserver):
instance_mock = mock.Mock()
instance_mock.testsuite.harness = 'test'
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.build_dir = 'build_dir'
pb.run_build = mock.Mock(return_value={'dummy': 'dummy'})
res = pb.build()
pb.run_build.assert_called_once_with(['--build', 'build_dir'])
assert res == {'dummy': 'dummy'}
TESTDATA_14 = [
(
True,
'device',
234,
'native_sim',
'posix',
{'CONFIG_FAKE_ENTROPY_NATIVE_POSIX': 'y'},
'pytest',
True,
True,
True,
True,
True,
False
),
(
True,
'not device',
None,
'native_sim',
'not posix',
{'CONFIG_FAKE_ENTROPY_NATIVE_POSIX': 'y'},
'not pytest',
False,
False,
False,
False,
False,
True
),
(
False,
'device',
234,
'native_sim',
'posix',
{'CONFIG_FAKE_ENTROPY_NATIVE_POSIX': 'y'},
'pytest',
False,
False,
False,
False,
False,
False
),
]
@pytest.mark.parametrize(
'ready, type_str, seed, platform_name, platform_arch, defconfig, harness,' \
' expect_duts, expect_parse_generated, expect_seed,' \
' expect_extra_test_args, expect_pytest, expect_handle',
TESTDATA_14,
ids=['pytest full', 'not pytest minimal', 'not ready']
)
def test_projectbuilder_run(
mocked_jobserver,
ready,
type_str,
seed,
platform_name,
platform_arch,
defconfig,
harness,
expect_duts,
expect_parse_generated,
expect_seed,
expect_extra_test_args,
expect_pytest,
expect_handle
):
pytest_mock = mock.Mock(spec=Pytest)
harness_mock = mock.Mock()
def mock_harness(name):
if name == 'Pytest':
return pytest_mock
else:
return harness_mock
instance_mock = mock.Mock()
instance_mock.handler.get_test_timeout = mock.Mock(return_value=60)
instance_mock.handler.seed = 123
instance_mock.handler.ready = ready
instance_mock.handler.type_str = type_str
instance_mock.handler.duts = [mock.Mock(name='dummy dut')]
instance_mock.platform.name = platform_name
instance_mock.platform.arch = platform_arch
instance_mock.testsuite.harness = harness
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.options.extra_test_args = ['dummy_arg1', 'dummy_arg2']
pb.duts = ['another dut']
pb.options.seed = seed
pb.defconfig = defconfig
pb.parse_generated = mock.Mock()
with mock.patch('twisterlib.runner.HarnessImporter.get_harness',
mock_harness):
pb.run()
if expect_duts:
assert pb.instance.handler.duts == ['another dut']
if expect_parse_generated:
pb.parse_generated.assert_called_once()
if expect_seed:
assert pb.instance.handler.seed == seed
if expect_extra_test_args:
assert pb.instance.handler.extra_test_args == ['dummy_arg1',
'dummy_arg2']
if expect_pytest:
pytest_mock.pytest_run.assert_called_once_with(60)
if expect_handle:
pb.instance.handler.handle.assert_called_once_with(harness_mock)
TESTDATA_15 = [
(False, False, False, True),
(True, False, True, False),
(False, True, False, True),
(True, True, False, True),
]
@pytest.mark.parametrize(
'enable_size_report, cmake_only, expect_calc_size, expect_zeroes',
TESTDATA_15,
ids=['none', 'size_report', 'cmake', 'size_report+cmake']
)
def test_projectbuilder_gather_metrics(
mocked_jobserver,
enable_size_report,
cmake_only,
expect_calc_size,
expect_zeroes
):
instance_mock = mock.Mock()
instance_mock.metrics = {}
env_mock = mock.Mock()
pb = ProjectBuilder(instance_mock, env_mock, mocked_jobserver)
pb.options.enable_size_report = enable_size_report
pb.options.create_rom_ram_report = False
pb.options.cmake_only = cmake_only
pb.calc_size = mock.Mock()
pb.gather_metrics(instance_mock)
if expect_calc_size:
pb.calc_size.assert_called_once()
if expect_zeroes:
assert instance_mock.metrics['used_ram'] == 0
assert instance_mock.metrics['used_rom'] == 0
assert instance_mock.metrics['available_rom'] == 0
assert instance_mock.metrics['available_ram'] == 0
assert instance_mock.metrics['unrecognized'] == []
TESTDATA_16 = [
(TwisterStatus.ERROR, mock.ANY, False, False, False),
(TwisterStatus.FAIL, mock.ANY, False, False, False),
(TwisterStatus.SKIP, mock.ANY, False, False, False),
(TwisterStatus.FILTER, 'native', False, False, True),
(TwisterStatus.PASS, 'qemu', False, False, True),
(TwisterStatus.FILTER, 'unit', False, False, True),
(TwisterStatus.FILTER, 'mcu', True, True, False),
(TwisterStatus.PASS, 'frdm_k64f', False, True, False),
]
@pytest.mark.parametrize(
'status, platform_type, expect_warnings, expect_calcs, expect_zeroes',
TESTDATA_16,
ids=[x[0] + (', ' + x[1]) if x[1] != mock.ANY else '' for x in TESTDATA_16]
)
def test_projectbuilder_calc_size(
status,
platform_type,
expect_warnings,
expect_calcs,
expect_zeroes
):
size_calc_mock = mock.Mock()
instance_mock = mock.Mock()
instance_mock.status = status
instance_mock.platform.type = platform_type
instance_mock.metrics = {}
instance_mock.calculate_sizes = mock.Mock(return_value=size_calc_mock)
from_buildlog = True
ProjectBuilder.calc_size(instance_mock, from_buildlog)
if expect_calcs:
instance_mock.calculate_sizes.assert_called_once_with(
from_buildlog=from_buildlog,
generate_warning=expect_warnings
)
assert instance_mock.metrics['used_ram'] == \
size_calc_mock.get_used_ram()
assert instance_mock.metrics['used_rom'] == \
size_calc_mock.get_used_rom()
assert instance_mock.metrics['available_rom'] == \
size_calc_mock.get_available_rom()
assert instance_mock.metrics['available_ram'] == \
size_calc_mock.get_available_ram()
assert instance_mock.metrics['unrecognized'] == \
size_calc_mock.unrecognized_sections()
if expect_zeroes:
assert instance_mock.metrics['used_ram'] == 0
assert instance_mock.metrics['used_rom'] == 0
assert instance_mock.metrics['available_rom'] == 0
assert instance_mock.metrics['available_ram'] == 0
assert instance_mock.metrics['unrecognized'] == []
if expect_calcs or expect_zeroes:
assert instance_mock.metrics['handler_time'] == \
instance_mock.execution_time
else:
assert instance_mock.metrics == {}
TESTDATA_17 = [
('linux', 'posix', {'jobs': 4}, True, 32, 'GNUMakeJobClient'),
('linux', 'posix', {'build_only': True}, False, 16, 'GNUMakeJobServer'),
('linux', '???', {}, False, 8, 'JobClient'),
('linux', '???', {'jobs': 4}, False, 4, 'JobClient'),
]
@pytest.mark.parametrize(
'platform, os_name, options, jobclient_from_environ, expected_jobs,' \
' expected_jobserver',
TESTDATA_17,
ids=['GNUMakeJobClient', 'GNUMakeJobServer',
'JobClient', 'Jobclient+options']
)
def test_twisterrunner_run(
caplog,
platform,
os_name,
options,
jobclient_from_environ,
expected_jobs,
expected_jobserver
):
def mock_client_from_environ(jobs):
if jobclient_from_environ:
jobclient_mock = mock.Mock(jobs=32)
jobclient_mock.name = 'GNUMakeJobClient'
return jobclient_mock
return None
instances = {'dummy instance': mock.Mock(metrics={'k': 'v'})}
suites = [mock.Mock()]
env_mock = mock.Mock()
tr = TwisterRunner(instances, suites, env=env_mock)
tr.options.retry_failed = 2
tr.options.retry_interval = 10
tr.options.retry_build_errors = True
tr.options.jobs = None
tr.options.build_only = None
for k, v in options.items():
setattr(tr.options, k, v)
tr.update_counting_before_pipeline = mock.Mock()
tr.execute = mock.Mock()
tr.show_brief = mock.Mock()
gnumakejobserver_mock = mock.Mock()
gnumakejobserver_mock().name='GNUMakeJobServer'
jobclient_mock = mock.Mock()
jobclient_mock().name='JobClient'
pipeline_q = queue.LifoQueue()
done_q = queue.LifoQueue()
done_instance = mock.Mock(
metrics={'k2': 'v2'},
execution_time=30
)
done_instance.name='dummy instance'
done_q.put(done_instance)
manager_mock = mock.Mock()
manager_mock().LifoQueue = mock.Mock(
side_effect=iter([pipeline_q, done_q])
)
results_mock = mock.Mock()
results_mock().error = 1
results_mock().iteration = 0
results_mock().failed = 2
results_mock().total = 9
with mock.patch('twisterlib.runner.ExecutionCounter', results_mock), \
mock.patch('twisterlib.runner.BaseManager', manager_mock), \
mock.patch('twisterlib.runner.GNUMakeJobClient.from_environ',
mock_client_from_environ), \
mock.patch('twisterlib.runner.GNUMakeJobServer',
gnumakejobserver_mock), \
mock.patch('twisterlib.runner.JobClient', jobclient_mock), \
mock.patch('multiprocessing.cpu_count', return_value=8), \
mock.patch('sys.platform', platform), \
mock.patch('time.sleep', mock.Mock()), \
mock.patch('os.name', os_name):
tr.run()
assert f'JOBS: {expected_jobs}' in caplog.text
assert tr.jobserver.name == expected_jobserver
assert tr.instances['dummy instance'].metrics == {
'k': 'v',
'k2': 'v2',
'handler_time': 30,
'unrecognized': []
}
assert results_mock().error == 0
def test_twisterrunner_update_counting_before_pipeline():
instances = {
'dummy1': mock.Mock(
status=TwisterStatus.FILTER,
reason='runtime filter',
testsuite=mock.Mock(
testcases=[mock.Mock()]
)
),
'dummy2': mock.Mock(
status=TwisterStatus.FILTER,
reason='static filter',
testsuite=mock.Mock(
testcases=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()]
)
),
'dummy3': mock.Mock(
status=TwisterStatus.ERROR,
reason='error',
testsuite=mock.Mock(
testcases=[mock.Mock()]
)
),
'dummy4': mock.Mock(
status=TwisterStatus.PASS,
reason='OK',
testsuite=mock.Mock(
testcases=[mock.Mock()]
)
),
'dummy5': mock.Mock(
status=TwisterStatus.SKIP,
reason=None,
testsuite=mock.Mock(
testcases=[mock.Mock()]
)
)
}
suites = [mock.Mock()]
env_mock = mock.Mock()
tr = TwisterRunner(instances, suites, env=env_mock)
tr.results = mock.Mock(
skipped_filter = 0,
skipped_configs = 0,
skipped_cases = 0,
cases = 0,
error = 0
)
tr.update_counting_before_pipeline()
assert tr.results.skipped_filter == 1
assert tr.results.skipped_configs == 1
assert tr.results.skipped_cases == 4
assert tr.results.cases == 4
assert tr.results.error == 1
def test_twisterrunner_show_brief(caplog):
instances = {
'dummy1': mock.Mock(),
'dummy2': mock.Mock(),
'dummy3': mock.Mock(),
'dummy4': mock.Mock(),
'dummy5': mock.Mock()
}
suites = [mock.Mock(), mock.Mock()]
env_mock = mock.Mock()
tr = TwisterRunner(instances, suites, env=env_mock)
tr.results = mock.Mock(
skipped_filter = 3,
skipped_configs = 4,
skipped_cases = 0,
cases = 0,
error = 0
)
tr.show_brief()
log = '2 test scenarios (5 test instances) selected,' \
' 4 configurations skipped (3 by static filter, 1 at runtime).'
assert log in caplog.text
TESTDATA_18 = [
(False, False, False, [{'op': 'cmake', 'test': mock.ANY}]),
(False, False, True, [{'op': 'filter', 'test': mock.ANY},
{'op': 'cmake', 'test': mock.ANY}]),
(False, True, True, [{'op': 'run', 'test': mock.ANY},
{'op': 'run', 'test': mock.ANY}]),
(False, True, False, [{'op': 'run', 'test': mock.ANY}]),
(True, True, False, [{'op': 'cmake', 'test': mock.ANY}]),
(True, True, True, [{'op': 'filter', 'test': mock.ANY},
{'op': 'cmake', 'test': mock.ANY}]),
(True, False, True, [{'op': 'filter', 'test': mock.ANY},
{'op': 'cmake', 'test': mock.ANY}]),
(True, False, False, [{'op': 'cmake', 'test': mock.ANY}]),
]
@pytest.mark.parametrize(
'build_only, test_only, retry_build_errors, expected_pipeline_elements',
TESTDATA_18,
ids=['none', 'retry', 'test+retry', 'test', 'build+test',
'build+test+retry', 'build+retry', 'build']
)
def test_twisterrunner_add_tasks_to_queue(
build_only,
test_only,
retry_build_errors,
expected_pipeline_elements
):
def mock_get_cmake_filter_stages(filter, keys):
return [filter]
instances = {
'dummy1': mock.Mock(run=True, retries=0, status=TwisterStatus.PASS, build_dir="/tmp"),
'dummy2': mock.Mock(run=True, retries=0, status=TwisterStatus.SKIP, build_dir="/tmp"),
'dummy3': mock.Mock(run=True, retries=0, status=TwisterStatus.FILTER, build_dir="/tmp"),
'dummy4': mock.Mock(run=True, retries=0, status=TwisterStatus.ERROR, build_dir="/tmp"),
'dummy5': mock.Mock(run=True, retries=0, status=TwisterStatus.FAIL, build_dir="/tmp")
}
instances['dummy4'].testsuite.filter = 'some'
instances['dummy5'].testsuite.filter = 'full'
suites = [mock.Mock(), mock.Mock()]
env_mock = mock.Mock()
tr = TwisterRunner(instances, suites, env=env_mock)
tr.get_cmake_filter_stages = mock.Mock(
side_effect=mock_get_cmake_filter_stages
)
pipeline_mock = mock.Mock()
tr.add_tasks_to_queue(
pipeline_mock,
build_only,
test_only,
retry_build_errors
)
assert all(
[build_only != instance.run for instance in instances.values()]
)
tr.get_cmake_filter_stages.assert_any_call('full', mock.ANY)
if retry_build_errors:
tr.get_cmake_filter_stages.assert_any_call('some', mock.ANY)
print(pipeline_mock.put.call_args_list)
print([mock.call(el) for el in expected_pipeline_elements])
assert pipeline_mock.put.call_args_list == \
[mock.call(el) for el in expected_pipeline_elements]
TESTDATA_19 = [
('linux'),
('nt')
]
@pytest.mark.parametrize(
'platform',
TESTDATA_19,
)
def test_twisterrunner_pipeline_mgr(mocked_jobserver, platform):
counter = 0
def mock_get_nowait():
nonlocal counter
counter += 1
if counter > 5:
raise queue.Empty()
return {'test': 'dummy'}
instances = {}
suites = []
env_mock = mock.Mock()
tr = TwisterRunner(instances, suites, env=env_mock)
tr.jobserver = mock.Mock(
get_job=mock.Mock(
return_value=nullcontext()
)
)
pipeline_mock = mock.Mock()
pipeline_mock.get_nowait = mock.Mock(side_effect=mock_get_nowait)
done_queue_mock = mock.Mock()
lock_mock = mock.Mock()
results_mock = mock.Mock()
with mock.patch('sys.platform', platform), \
mock.patch('twisterlib.runner.ProjectBuilder',\
return_value=mock.Mock()) as pb:
tr.pipeline_mgr(pipeline_mock, done_queue_mock, lock_mock, results_mock)
assert len(pb().process.call_args_list) == 5
if platform == 'linux':
tr.jobserver.get_job.assert_called_once()
def test_twisterrunner_execute(caplog):
counter = 0
def mock_join():
nonlocal counter
counter += 1
if counter > 3:
raise KeyboardInterrupt()
instances = {}
suites = []
env_mock = mock.Mock()
tr = TwisterRunner(instances, suites, env=env_mock)
tr.add_tasks_to_queue = mock.Mock()
tr.jobs = 5
process_mock = mock.Mock()
process_mock().join = mock.Mock(side_effect=mock_join)
process_mock().exitcode = 0
pipeline_mock = mock.Mock()
done_mock = mock.Mock()
with mock.patch('twisterlib.runner.Process', process_mock):
tr.execute(pipeline_mock, done_mock)
assert 'Execution interrupted' in caplog.text
assert len(process_mock().start.call_args_list) == 5
assert len(process_mock().join.call_args_list) == 4
assert len(process_mock().terminate.call_args_list) == 5
TESTDATA_20 = [
('', []),
('not ARCH in ["x86", "arc"]', ['full']),
('dt_dummy(x, y)', ['dts']),
('not CONFIG_FOO', ['kconfig']),
('dt_dummy and CONFIG_FOO', ['dts', 'kconfig']),
]
@pytest.mark.parametrize(
'filter, expected_result',
TESTDATA_20,
ids=['none', 'full', 'dts', 'kconfig', 'dts+kconfig']
)
def test_twisterrunner_get_cmake_filter_stages(filter, expected_result):
result = TwisterRunner.get_cmake_filter_stages(filter, ['not', 'and'])
assert sorted(result) == sorted(expected_result)
``` | /content/code_sandbox/scripts/tests/twister/test_runner.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 19,453 |
```python
#!/usr/bin/env python3
#
"""
Tests for handlers.py classes' methods
"""
import itertools
import mock
import os
import pytest
import signal
import subprocess
import sys
from contextlib import nullcontext
from importlib import reload
from serial import SerialException
from subprocess import CalledProcessError, TimeoutExpired
from types import SimpleNamespace
import twisterlib.harness
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
from twisterlib.error import TwisterException
from twisterlib.statuses import TwisterStatus
from twisterlib.handlers import (
Handler,
BinaryHandler,
DeviceHandler,
QEMUHandler,
SimulationHandler
)
from twisterlib.hardwaremap import (
DUT
)
@pytest.fixture
def mocked_instance(tmp_path):
instance = mock.Mock()
testsuite = mock.Mock()
type(testsuite).source_dir = mock.PropertyMock(return_value='')
instance.testsuite = testsuite
build_dir = tmp_path / 'build_dir'
os.makedirs(build_dir)
type(instance).build_dir = mock.PropertyMock(return_value=str(build_dir))
platform = mock.Mock()
type(platform).binaries = mock.PropertyMock(return_value=[])
instance.platform = platform
type(instance.testsuite).timeout = mock.PropertyMock(return_value=60)
type(instance.platform).timeout_multiplier = mock.PropertyMock(
return_value=2
)
instance.status = TwisterStatus.NONE
instance.reason = 'Unknown'
return instance
@pytest.fixture
def faux_timer():
class Counter:
def __init__(self):
self.t = 0
def time(self):
self.t += 1
return self.t
return Counter()
TESTDATA_1 = [
(True, False, 'posix', ['Install pyserial python module with pip to use' \
' --device-testing option.'], None),
(False, True, 'nt', [], None),
(True, True, 'posix', ['Install pyserial python module with pip to use' \
' --device-testing option.'], ImportError),
]
@pytest.mark.parametrize(
'fail_serial, fail_pty, os_name, expected_outs, expected_error',
TESTDATA_1,
ids=['import serial', 'import pty nt', 'import serial+pty posix']
)
def test_imports(
capfd,
fail_serial,
fail_pty,
os_name,
expected_outs,
expected_error
):
class ImportRaiser:
def find_spec(self, fullname, path, target=None):
if fullname == 'serial' and fail_serial:
raise ImportError()
if fullname == 'pty' and fail_pty:
raise ImportError()
modules_mock = sys.modules.copy()
modules_mock['serial'] = None if fail_serial else modules_mock['serial']
modules_mock['pty'] = None if fail_pty else modules_mock['pty']
meta_path_mock = sys.meta_path[:]
meta_path_mock.insert(0, ImportRaiser())
with mock.patch('os.name', os_name), \
mock.patch.dict('sys.modules', modules_mock, clear=True), \
mock.patch('sys.meta_path', meta_path_mock), \
pytest.raises(expected_error) if expected_error else nullcontext():
reload(twisterlib.handlers)
out, _ = capfd.readouterr()
assert all([expected_out in out for expected_out in expected_outs])
def test_handler_final_handle_actions(mocked_instance):
instance = mocked_instance
instance.testcases = [mock.Mock()]
handler = Handler(mocked_instance)
handler.suite_name_check = True
harness = twisterlib.harness.Test()
harness.status = mock.Mock()
harness.detected_suite_names = mock.Mock()
harness.matched_run_id = False
harness.run_id_exists = True
harness.recording = mock.Mock()
handler_time = mock.Mock()
handler._final_handle_actions(harness, handler_time)
assert handler.instance.status == TwisterStatus.FAIL
assert handler.instance.execution_time == handler_time
assert handler.instance.reason == 'RunID mismatch'
assert all(testcase.status == TwisterStatus.FAIL for \
testcase in handler.instance.testcases)
handler.instance.reason = 'This reason shan\'t be changed.'
handler._final_handle_actions(harness, handler_time)
instance.assert_has_calls([mock.call.record(harness.recording)])
assert handler.instance.reason == 'This reason shan\'t be changed.'
TESTDATA_2 = [
(['dummy_testsuite_name'], False),
([], True),
(['another_dummy_name', 'yet_another_dummy_name'], True),
]
@pytest.mark.parametrize(
'detected_suite_names, should_be_called',
TESTDATA_2,
ids=['detected one expected', 'detected none', 'detected two unexpected']
)
def test_handler_verify_ztest_suite_name(
mocked_instance,
detected_suite_names,
should_be_called
):
instance = mocked_instance
type(instance.testsuite).ztest_suite_names = ['dummy_testsuite_name']
harness_status = TwisterStatus.PASS
handler_time = mock.Mock()
with mock.patch.object(Handler, '_missing_suite_name') as _missing_mocked:
handler = Handler(instance)
handler._verify_ztest_suite_name(
harness_status,
detected_suite_names,
handler_time
)
if should_be_called:
_missing_mocked.assert_called_once()
else:
_missing_mocked.assert_not_called()
def test_handler_missing_suite_name(mocked_instance):
instance = mocked_instance
instance.testcases = [mock.Mock()]
handler = Handler(mocked_instance)
handler.suite_name_check = True
expected_suite_names = ['dummy_testsuite_name']
handler_time = mock.Mock()
handler._missing_suite_name(expected_suite_names, handler_time)
assert handler.instance.status == TwisterStatus.FAIL
assert handler.instance.execution_time == handler_time
assert handler.instance.reason == 'Testsuite mismatch'
assert all(
testcase.status == TwisterStatus.FAIL for testcase in handler.instance.testcases
)
def test_handler_terminate(mocked_instance):
def mock_kill_function(pid, sig):
if pid < 0:
raise ProcessLookupError
instance = mocked_instance
handler = Handler(instance)
mock_process = mock.Mock()
mock_child1 = mock.Mock(pid=1)
mock_child2 = mock.Mock(pid=2)
mock_process.children = mock.Mock(return_value=[mock_child1, mock_child2])
mock_proc = mock.Mock(pid=0)
mock_proc.terminate = mock.Mock(return_value=None)
mock_proc.kill = mock.Mock(return_value=None)
with mock.patch('psutil.Process', return_value=mock_process), \
mock.patch(
'os.kill',
mock.Mock(side_effect=mock_kill_function)
) as mock_kill:
handler.terminate(mock_proc)
assert handler.terminated
mock_proc.terminate.assert_called_once()
mock_proc.kill.assert_called_once()
mock_kill.assert_has_calls(
[mock.call(1, signal.SIGTERM), mock.call(2, signal.SIGTERM)]
)
mock_child_neg1 = mock.Mock(pid=-1)
mock_process.children = mock.Mock(
return_value=[mock_child_neg1, mock_child2]
)
handler.terminated = False
mock_kill.reset_mock()
handler.terminate(mock_proc)
mock_kill.assert_has_calls(
[mock.call(-1, signal.SIGTERM), mock.call(2, signal.SIGTERM)]
)
def test_binaryhandler_try_kill_process_by_pid(mocked_instance):
def mock_kill_function(pid, sig):
if pid < 0:
raise ProcessLookupError
instance = mocked_instance
handler = BinaryHandler(instance, 'build')
handler.pid_fn = os.path.join('dummy', 'path', 'to', 'pid.pid')
with mock.patch(
'os.kill',
mock.Mock(side_effect=mock_kill_function)
) as mock_kill, \
mock.patch('os.unlink', mock.Mock()) as mock_unlink:
with mock.patch('builtins.open', mock.mock_open(read_data='1')):
handler.try_kill_process_by_pid()
mock_unlink.assert_called_once_with(
os.path.join('dummy', 'path', 'to', 'pid.pid')
)
mock_kill.assert_called_once_with(1, signal.SIGKILL)
mock_unlink.reset_mock()
mock_kill.reset_mock()
handler.pid_fn = os.path.join('dummy', 'path', 'to', 'pid.pid')
with mock.patch('builtins.open', mock.mock_open(read_data='-1')):
handler.try_kill_process_by_pid()
mock_unlink.assert_called_once_with(
os.path.join('dummy', 'path', 'to', 'pid.pid')
)
mock_kill.assert_called_once_with(-1, signal.SIGKILL)
TESTDATA_3 = [
(
[b'This\\r\\n', b'is\r', b'a short', b'file.'],
mock.Mock(status=TwisterStatus.NONE, capture_coverage=False),
[
mock.call('This\\r\\n'),
mock.call('is\r'),
mock.call('a short'),
mock.call('file.')
],
[
mock.call('This'),
mock.call('is'),
mock.call('a short'),
mock.call('file.')
],
None,
False
),
(
[b'Too much.'] * 120, # Should be more than the timeout
mock.Mock(status=TwisterStatus.PASS, capture_coverage=False),
None,
None,
True,
False
),
(
[b'Too much.'] * 120, # Should be more than the timeout
mock.Mock(status=TwisterStatus.PASS, capture_coverage=False),
None,
None,
True,
False
),
(
[b'Too much.'] * 120, # Should be more than the timeout
mock.Mock(status=TwisterStatus.PASS, capture_coverage=True),
None,
None,
False,
True
),
]
@pytest.mark.parametrize(
'proc_stdout, harness, expected_handler_calls,'
' expected_harness_calls, should_be_less, timeout_wait',
TESTDATA_3,
ids=[
'no timeout',
'timeout',
'timeout with harness status',
'timeout with capture_coverage, wait timeout'
]
)
def test_binaryhandler_output_handler(
mocked_instance,
faux_timer,
proc_stdout,
harness,
expected_handler_calls,
expected_harness_calls,
should_be_less,
timeout_wait
):
class MockStdout(mock.Mock):
def __init__(self, text):
super().__init__(text)
self.text = text
self.line_index = 0
def readline(self):
if self.line_index == len(self.text):
self.line_index = 0
return b''
else:
line = self.text[self.line_index]
self.line_index += 1
return line
class MockProc(mock.Mock):
def __init__(self, pid, stdout):
super().__init__(pid, stdout)
self.pid = mock.PropertyMock(return_value=pid)
self.stdout = MockStdout(stdout)
def wait(self, *args, **kwargs):
if timeout_wait:
raise TimeoutExpired('dummy cmd', 'dummyamount')
handler = BinaryHandler(mocked_instance, 'build')
handler.terminate = mock.Mock()
handler.options = mock.Mock(timeout_multiplier=1)
proc = MockProc(1, proc_stdout)
with mock.patch(
'builtins.open',
mock.mock_open(read_data='')
) as mock_file, \
mock.patch('time.time', side_effect=faux_timer.time):
handler._output_handler(proc, harness)
mock_file.assert_called_with(handler.log, 'wt')
if expected_handler_calls:
mock_file.return_value.write.assert_has_calls(expected_handler_calls)
if expected_harness_calls:
harness.handle.assert_has_calls(expected_harness_calls)
if should_be_less is not None:
if should_be_less:
assert mock_file.return_value.write.call_count < len(proc_stdout)
else:
assert mock_file.return_value.write.call_count == len(proc_stdout)
if timeout_wait:
handler.terminate.assert_called_once_with(proc)
TESTDATA_4 = [
(True, False, True, None, None,
['valgrind', '--error-exitcode=2', '--leak-check=full',
f'--suppressions={ZEPHYR_BASE}/scripts/valgrind.supp',
'--log-file=build_dir/valgrind.log', '--track-origins=yes',
'generator']),
(False, True, False, 123, None, ['generator', 'run', '--seed=123']),
(False, False, False, None, ['ex1', 'ex2'], ['build_dir/zephyr/zephyr.exe', 'ex1', 'ex2']),
]
@pytest.mark.parametrize(
'robot_test, call_make_run, enable_valgrind, seed,' \
' extra_args, expected',
TESTDATA_4,
ids=['robot, valgrind', 'make run, seed', 'binary, extra']
)
def test_binaryhandler_create_command(
mocked_instance,
robot_test,
call_make_run,
enable_valgrind,
seed,
extra_args,
expected
):
handler = BinaryHandler(mocked_instance, 'build')
handler.generator_cmd = 'generator'
handler.binary = 'bin'
handler.call_make_run = call_make_run
handler.options = SimpleNamespace()
handler.options.enable_valgrind = enable_valgrind
handler.options.coverage_basedir = "coverage_basedir"
handler.seed = seed
handler.extra_test_args = extra_args
handler.build_dir = 'build_dir'
handler.instance.sysbuild = False
handler.platform = SimpleNamespace()
handler.platform.resc = "file.resc"
handler.platform.uart = "uart"
command = handler._create_command(robot_test)
assert command == expected
TESTDATA_5 = [
(False, False, False),
(True, False, False),
(True, True, False),
(False, False, True),
]
@pytest.mark.parametrize(
'enable_asan, enable_lsan, enable_ubsan',
TESTDATA_5,
ids=['none', 'asan', 'asan, lsan', 'ubsan']
)
def test_binaryhandler_create_env(
mocked_instance,
enable_asan,
enable_lsan,
enable_ubsan
):
handler = BinaryHandler(mocked_instance, 'build')
handler.options = mock.Mock(
enable_asan=enable_asan,
enable_lsan=enable_lsan,
enable_ubsan=enable_ubsan
)
env = {
'example_env_var': True,
'ASAN_OPTIONS': 'dummy=dummy:',
'UBSAN_OPTIONS': 'dummy=dummy:'
}
with mock.patch('os.environ', env):
res = handler._create_env()
assert env['example_env_var'] == res['example_env_var']
if enable_ubsan:
assert env['UBSAN_OPTIONS'] in res['UBSAN_OPTIONS']
assert 'log_path=stdout:' in res['UBSAN_OPTIONS']
assert 'halt_on_error=1:' in res['UBSAN_OPTIONS']
if enable_asan:
assert env['ASAN_OPTIONS'] in res['ASAN_OPTIONS']
assert 'log_path=stdout:' in res['ASAN_OPTIONS']
if not enable_lsan:
assert 'detect_leaks=0' in res['ASAN_OPTIONS']
TESTDATA_6 = [
(TwisterStatus.NONE, False, 2, True, TwisterStatus.FAIL, 'Valgrind error', False),
(TwisterStatus.NONE, False, 1, False, TwisterStatus.FAIL, 'Failed', False),
(TwisterStatus.FAIL, False, 0, False, TwisterStatus.FAIL, 'Failed', False),
('success', False, 0, False, 'success', 'Unknown', False),
(TwisterStatus.NONE, True, 1, True, TwisterStatus.FAIL, 'Timeout', True),
]
@pytest.mark.parametrize(
'harness_status, terminated, returncode, enable_valgrind,' \
' expected_status, expected_reason, do_add_missing',
TESTDATA_6,
ids=['valgrind error', 'failed', 'harness failed', 'custom success', 'no status']
)
def test_binaryhandler_update_instance_info(
mocked_instance,
harness_status,
terminated,
returncode,
enable_valgrind,
expected_status,
expected_reason,
do_add_missing
):
handler = BinaryHandler(mocked_instance, 'build')
handler_time = 59
handler.terminated = terminated
handler.returncode = returncode
handler.options = mock.Mock(enable_valgrind=enable_valgrind)
missing_mock = mock.Mock()
handler.instance.add_missing_case_status = missing_mock
handler._update_instance_info(harness_status, handler_time)
assert handler.instance.execution_time == handler_time
assert handler.instance.status == expected_status
assert handler.instance.reason == expected_reason
if do_add_missing:
missing_mock.assert_called_once_with(TwisterStatus.BLOCK, expected_reason)
TESTDATA_7 = [
(True, False, False),
(False, True, False),
(False, False, True),
]
@pytest.mark.parametrize(
'is_robot_test, coverage, isatty',
TESTDATA_7,
ids=['robot test', 'coverage', 'isatty']
)
def test_binaryhandler_handle(
mocked_instance,
caplog,
is_robot_test,
coverage,
isatty
):
thread_mock_obj = mock.Mock()
def mock_popen(command, *args, **kwargs,):
return mock.Mock(
__enter__=mock.Mock(return_value=mock.Mock(pid=0, returncode=0)),
__exit__=mock.Mock(return_value=None)
)
def mock_thread(target, *args, **kwargs):
return thread_mock_obj
handler = BinaryHandler(mocked_instance, 'build')
handler.sourcedir = 'source_dir'
handler.build_dir = 'build_dir'
handler.name= 'Dummy Name'
handler._create_command = mock.Mock(return_value=['dummy' , 'command'])
handler._create_env = mock.Mock(return_value=[])
handler._update_instance_info = mock.Mock()
handler._final_handle_actions = mock.Mock()
handler.terminate = mock.Mock()
handler.try_kill_process_by_pid = mock.Mock()
handler.options = mock.Mock(coverage=coverage)
robot_mock = mock.Mock()
harness = mock.Mock(is_robot_test=is_robot_test, run_robot_test=robot_mock)
popen_mock = mock.Mock(side_effect=mock_popen)
thread_mock = mock.Mock(side_effect=mock_thread)
call_mock = mock.Mock()
with mock.patch('subprocess.call', call_mock), \
mock.patch('subprocess.Popen', popen_mock), \
mock.patch('threading.Thread', thread_mock), \
mock.patch('sys.stdout.isatty', return_value=isatty):
handler.handle(harness)
if is_robot_test:
robot_mock.assert_called_once_with(['dummy', 'command'], mock.ANY)
return
assert 'Spawning BinaryHandler Thread for Dummy Name' in caplog.text
thread_mock_obj.join.assert_called()
handler._update_instance_info.assert_called_once()
handler._final_handle_actions.assert_called_once()
if isatty:
call_mock.assert_any_call(['stty', 'sane'], stdin=mock.ANY)
TESTDATA_8 = [
('renode', True, True, False, False),
('native', False, False, False, True),
('build', False, True, False, False),
]
@pytest.mark.parametrize(
'type_str, is_pid_fn, expected_call_make_run, is_binary, expected_ready',
TESTDATA_8,
ids=[t[0] for t in TESTDATA_8]
)
def test_simulationhandler_init(
mocked_instance,
type_str,
is_pid_fn,
expected_call_make_run,
is_binary,
expected_ready
):
handler = SimulationHandler(mocked_instance, type_str)
assert handler.call_make_run == expected_call_make_run
assert handler.ready == expected_ready
if is_pid_fn:
assert handler.pid_fn == os.path.join(mocked_instance.build_dir,
'renode.pid')
if is_binary:
assert handler.pid_fn == os.path.join(mocked_instance.build_dir,
'zephyr', 'zephyr.exe')
TESTDATA_9 = [
(3, 2, 0, 0, 3, -1, True, False, False, 1),
(4, 1, 0, 0, -1, -1, False, True, False, 0),
(5, 0, 1, 2, -1, 4, False, False, True, 3)
]
@pytest.mark.parametrize(
'success_count, in_waiting_count, oserror_count, readline_error_count,'
' haltless_count, statusless_count, end_by_halt, end_by_close,'
' end_by_status, expected_line_count',
TESTDATA_9,
ids=[
'halt event',
'serial closes',
'harness status with errors'
]
)
def test_devicehandler_monitor_serial(
mocked_instance,
success_count,
in_waiting_count,
oserror_count,
readline_error_count,
haltless_count,
statusless_count,
end_by_halt,
end_by_close,
end_by_status,
expected_line_count
):
is_open_iter = iter(lambda: True, False)
line_iter = [
TypeError('dummy TypeError') if x % 2 else \
SerialException('dummy SerialException') for x in range(
readline_error_count
)
] + [
f'line no {idx}'.encode('utf-8') for idx in range(success_count)
]
in_waiting_iter = [False] * in_waiting_count + [
TypeError('dummy TypeError')
] if end_by_close else (
[OSError('dummy OSError')] * oserror_count + [False] * in_waiting_count
) + [True] * (success_count + readline_error_count)
is_set_iter = [False] * haltless_count + [True] \
if end_by_halt else iter(lambda: False, True)
status_iter = [TwisterStatus.NONE] * statusless_count + [TwisterStatus.PASS] \
if end_by_status else iter(lambda: TwisterStatus.NONE, TwisterStatus.PASS)
halt_event = mock.Mock(is_set=mock.Mock(side_effect=is_set_iter))
ser = mock.Mock(
isOpen=mock.Mock(side_effect=is_open_iter),
readline=mock.Mock(side_effect=line_iter)
)
type(ser).in_waiting = mock.PropertyMock(
side_effect=in_waiting_iter,
return_value=False
)
harness = mock.Mock(capture_coverage=False)
type(harness).status=mock.PropertyMock(side_effect=status_iter)
handler = DeviceHandler(mocked_instance, 'build')
handler.options = mock.Mock(enable_coverage=not end_by_status)
with mock.patch('builtins.open', mock.mock_open(read_data='')):
handler.monitor_serial(ser, halt_event, harness)
if not end_by_close:
ser.close.assert_called_once()
print(harness.call_args_list)
harness.handle.assert_has_calls(
[mock.call(f'line no {idx}') for idx in range(expected_line_count)]
)
TESTDATA_10 = [
(
'dummy_platform',
'dummy fixture',
[
mock.Mock(
fixtures=[],
platform='dummy_platform',
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='another_platform',
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=None,
serial=None,
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=mock.Mock(),
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=mock.Mock(),
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
)
],
3
),
(
'dummy_platform',
'dummy fixture',
[
mock.Mock(
fixtures=[],
platform='dummy_platform',
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='another_platform',
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=None,
serial=None,
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=mock.Mock(),
available=1,
failures=1,
counter_increment=mock.Mock(),
counter=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=mock.Mock(),
available=1,
failures=0,
counter_increment=mock.Mock(),
counter=0
)
],
4
),
(
'dummy_platform',
'dummy fixture',
[],
TwisterException
),
(
'dummy_platform',
'dummy fixture',
[
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial_pty=mock.Mock(),
counter_increment=mock.Mock(),
failures=0,
available=0
),
mock.Mock(
fixtures=['another fixture'],
platform='dummy_platform',
serial_pty=mock.Mock(),
counter_increment=mock.Mock(),
failures=0,
available=0
),
mock.Mock(
fixtures=['dummy fixture'],
platform='dummy_platform',
serial=mock.Mock(),
counter_increment=mock.Mock(),
failures=0,
available=0
),
mock.Mock(
fixtures=['another fixture'],
platform='dummy_platform',
serial=mock.Mock(),
counter_increment=mock.Mock(),
failures=0,
available=0
)
],
None
)
]
@pytest.mark.parametrize(
'platform_name, fixture, duts, expected',
TESTDATA_10,
ids=['two good duts, select the first one',
'two duts, the first was failed once, select the second not failed',
'exception - no duts', 'no available duts']
)
def test_devicehandler_device_is_available(
mocked_instance,
platform_name,
fixture,
duts,
expected
):
mocked_instance.platform.name = platform_name
mocked_instance.testsuite.harness_config = {'fixture': fixture}
handler = DeviceHandler(mocked_instance, 'build')
handler.duts = duts
if isinstance(expected, int):
device = handler.device_is_available(mocked_instance)
assert device == duts[expected]
assert device.available == 0
device.counter_increment.assert_called_once()
elif expected is None:
device = handler.device_is_available(mocked_instance)
assert device is None
elif isinstance(expected, type):
with pytest.raises(expected):
device = handler.device_is_available(mocked_instance)
else:
assert False
def test_devicehandler_make_dut_available(mocked_instance):
serial = mock.Mock(name='dummy_serial')
duts = [
mock.Mock(available=0, serial=serial, serial_pty=None),
mock.Mock(available=0, serial=None, serial_pty=serial),
mock.Mock(
available=0,
serial=mock.Mock('another_serial'),
serial_pty=None
)
]
handler = DeviceHandler(mocked_instance, 'build')
handler.duts = duts
handler.make_dut_available(duts[1])
assert len([None for d in handler.duts if d.available == 1]) == 1
assert handler.duts[0].available == 0
assert handler.duts[2].available == 0
handler.make_dut_available(duts[0])
assert len([None for d in handler.duts if d.available == 1]) == 2
assert handler.duts[2].available == 0
TESTDATA_11 = [
(mock.Mock(pid=0, returncode=0), False),
(mock.Mock(pid=0, returncode=1), False),
(mock.Mock(pid=0, returncode=1), True)
]
@pytest.mark.parametrize(
'mock_process, raise_timeout',
TESTDATA_11,
ids=['proper script', 'error', 'timeout']
)
def test_devicehandler_run_custom_script(caplog, mock_process, raise_timeout):
def raise_timeout_fn(timeout=-1):
if raise_timeout and timeout != -1:
raise subprocess.TimeoutExpired(None, timeout)
else:
return mock.Mock(), mock.Mock()
def assert_popen(command, *args, **kwargs):
return mock.Mock(
__enter__=mock.Mock(return_value=mock_process),
__exit__=mock.Mock(return_value=None)
)
mock_process.communicate = mock.Mock(side_effect=raise_timeout_fn)
script = [os.path.join('test','script', 'path'), 'arg']
timeout = 60
with mock.patch('subprocess.Popen', side_effect=assert_popen):
DeviceHandler.run_custom_script(script, timeout)
if raise_timeout:
assert all(
t in caplog.text.lower() for t in [str(script), 'timed out']
)
mock_process.assert_has_calls(
[
mock.call.communicate(timeout=timeout),
mock.call.kill(),
mock.call.communicate()
]
)
elif mock_process.returncode == 0:
assert not any([r.levelname == 'ERROR' for r in caplog.records])
else:
assert 'timed out' not in caplog.text.lower()
assert 'custom script failure' in caplog.text.lower()
TESTDATA_12 = [
(0, False),
(4, False),
(0, True)
]
@pytest.mark.parametrize(
'num_of_failures, raise_exception',
TESTDATA_12,
ids=['no failures', 'with failures', 'exception']
)
def test_devicehandler_get_hardware(
mocked_instance,
caplog,
num_of_failures,
raise_exception
):
expected_hardware = mock.Mock()
def mock_availability(handler, instance, no=num_of_failures):
if raise_exception:
raise TwisterException(f'dummy message')
if handler.no:
handler.no -= 1
return None
return expected_hardware
handler = DeviceHandler(mocked_instance, 'build')
handler.no = num_of_failures
with mock.patch.object(
DeviceHandler,
'device_is_available',
mock_availability
):
hardware = handler.get_hardware()
if raise_exception:
assert 'dummy message' in caplog.text.lower()
assert mocked_instance.status == TwisterStatus.FAIL
assert mocked_instance.reason == 'dummy message'
else:
assert hardware == expected_hardware
TESTDATA_13 = [
(
None,
None,
None,
['generator_cmd', '-C', '$build_dir', 'flash']
),
(
[],
None,
None,
['west', 'flash', '--skip-rebuild', '-d', '$build_dir']
),
(
'--dummy',
None,
None,
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--', '--dummy']
),
(
'--dummy1,--dummy2',
None,
None,
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--', '--dummy1', '--dummy2']
),
(
None,
'runner',
'product',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'runner', 'param1', 'param2']
),
(
None,
'pyocd',
'product',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'pyocd', 'param1', 'param2', '--', '--dev-id', 12345]
),
(
None,
'nrfjprog',
'product',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'nrfjprog', 'param1', 'param2', '--', '--dev-id', 12345]
),
(
None,
'openocd',
'STM32 STLink',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'openocd', 'param1', 'param2',
'--', '--cmd-pre-init', 'hla_serial 12345']
),
(
None,
'openocd',
'STLINK-V3',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'openocd', 'param1', 'param2',
'--', '--cmd-pre-init', 'hla_serial 12345']
),
(
None,
'openocd',
'EDBG CMSIS-DAP',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'openocd', 'param1', 'param2',
'--', '--cmd-pre-init', 'cmsis_dap_serial 12345']
),
(
None,
'jlink',
'product',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'jlink', '--dev-id', 12345,
'param1', 'param2']
),
(
None,
'stm32cubeprogrammer',
'product',
['west', 'flash', '--skip-rebuild', '-d', '$build_dir',
'--runner', 'stm32cubeprogrammer', '--tool-opt=sn=12345',
'param1', 'param2']
),
]
TESTDATA_13_2 = [(True), (False)]
@pytest.mark.parametrize(
'self_west_flash, runner,' \
' hardware_product_name, expected',
TESTDATA_13,
ids=['generator', '--west-flash', 'one west flash value',
'multiple west flash values', 'generic runner', 'pyocd',
'nrfjprog', 'openocd, STM32 STLink', 'openocd, STLINK-v3',
'openocd, EDBG CMSIS-DAP', 'jlink', 'stm32cubeprogrammer']
)
@pytest.mark.parametrize('hardware_probe', TESTDATA_13_2, ids=['probe', 'id'])
def test_devicehandler_create_command(
mocked_instance,
self_west_flash,
runner,
hardware_probe,
hardware_product_name,
expected
):
handler = DeviceHandler(mocked_instance, 'build')
handler.options = mock.Mock(west_flash=self_west_flash)
handler.generator_cmd = 'generator_cmd'
expected = [handler.build_dir if val == '$build_dir' else \
val for val in expected]
hardware = mock.Mock(
product=hardware_product_name,
probe_id=12345 if hardware_probe else None,
id=12345 if not hardware_probe else None,
runner_params=['param1', 'param2']
)
command = handler._create_command(runner, hardware)
assert command == expected
TESTDATA_14 = [
('success', False, 'success', 'Unknown', False),
(TwisterStatus.FAIL, False, TwisterStatus.FAIL, 'Failed', True),
(TwisterStatus.ERROR, False, TwisterStatus.ERROR, 'Unknown', True),
(TwisterStatus.NONE, True, TwisterStatus.NONE, 'Unknown', False),
(TwisterStatus.NONE, False, TwisterStatus.FAIL, 'Timeout', True),
]
@pytest.mark.parametrize(
'harness_status, flash_error,' \
' expected_status, expected_reason, do_add_missing',
TESTDATA_14,
ids=['custom success', 'failed', 'error', 'flash error', 'no status']
)
def test_devicehandler_update_instance_info(
mocked_instance,
harness_status,
flash_error,
expected_status,
expected_reason,
do_add_missing
):
handler = DeviceHandler(mocked_instance, 'build')
handler_time = 59
missing_mock = mock.Mock()
handler.instance.add_missing_case_status = missing_mock
handler._update_instance_info(harness_status, handler_time, flash_error)
assert handler.instance.execution_time == handler_time
assert handler.instance.status == expected_status
assert handler.instance.reason == expected_reason
if do_add_missing:
missing_mock.assert_called_with('blocked', expected_reason)
TESTDATA_15 = [
('dummy device', 'dummy pty', None, None, True, False, False),
(
'dummy device',
'dummy pty',
mock.Mock(communicate=mock.Mock(return_value=('', ''))),
SerialException,
False,
True,
'dummy pty'
),
(
'dummy device',
None,
None,
SerialException,
False,
False,
'dummy device'
)
]
@pytest.mark.parametrize(
'serial_device, serial_pty, ser_pty_process, expected_exception,' \
' expected_result, terminate_ser_pty_process, make_available',
TESTDATA_15,
ids=['valid', 'serial pty process', 'no serial pty']
)
def test_devicehandler_create_serial_connection(
mocked_instance,
serial_device,
serial_pty,
ser_pty_process,
expected_exception,
expected_result,
terminate_ser_pty_process,
make_available
):
def mock_serial(*args, **kwargs):
if expected_exception:
raise expected_exception('')
return expected_result
handler = DeviceHandler(mocked_instance, 'build')
missing_mock = mock.Mock()
handler.instance.add_missing_case_status = missing_mock
handler.options = mock.Mock(timeout_multiplier=1)
twisterlib.handlers.terminate_process = mock.Mock()
dut = DUT()
dut.available = 0
dut.failures = 0
hardware_baud = 14400
flash_timeout = 60
serial_mock = mock.Mock(side_effect=mock_serial)
with mock.patch('serial.Serial', serial_mock), \
pytest.raises(expected_exception) if expected_exception else \
nullcontext():
result = handler._create_serial_connection(dut, serial_device, hardware_baud,
flash_timeout, serial_pty,
ser_pty_process)
if expected_result:
assert result is not None
assert dut.failures == 0
if expected_exception:
assert handler.instance.status == TwisterStatus.FAIL
assert handler.instance.reason == 'Serial Device Error'
assert dut.available == 1
assert dut.failures == 1
missing_mock.assert_called_once_with('blocked', 'Serial Device Error')
if terminate_ser_pty_process:
twisterlib.handlers.terminate_process.assert_called_once()
ser_pty_process.communicate.assert_called_once()
TESTDATA_16 = [
('dummy1 dummy2', None, 'slave name'),
('dummy1,dummy2', CalledProcessError, None),
(None, None, 'dummy hardware serial'),
]
@pytest.mark.parametrize(
'serial_pty, popen_exception, expected_device',
TESTDATA_16,
ids=['pty', 'pty process error', 'no pty']
)
def test_devicehandler_get_serial_device(
mocked_instance,
serial_pty,
popen_exception,
expected_device
):
def mock_popen(command, *args, **kwargs):
assert command == ['dummy1', 'dummy2']
if popen_exception:
raise popen_exception(command, 'Dummy error')
return mock.Mock()
handler = DeviceHandler(mocked_instance, 'build')
hardware_serial = 'dummy hardware serial'
popen_mock = mock.Mock(side_effect=mock_popen)
openpty_mock = mock.Mock(return_value=('master', 'slave'))
ttyname_mock = mock.Mock(side_effect=lambda x: x + ' name')
with mock.patch('subprocess.Popen', popen_mock), \
mock.patch('pty.openpty', openpty_mock), \
mock.patch('os.ttyname', ttyname_mock):
result = handler._get_serial_device(serial_pty, hardware_serial)
if popen_exception:
assert result is None
else:
assert result[0] == expected_device
TESTDATA_17 = [
(False, False, False, False, None, False, False,
TwisterStatus.NONE, None, []),
(True, True, False, False, None, False, False,
TwisterStatus.NONE, None, []),
(True, False, True, False, None, False, False,
TwisterStatus.ERROR, 'Device issue (Flash error)', []),
(True, False, False, True, None, False, False,
TwisterStatus.ERROR, 'Device issue (Timeout)', ['Flash operation timed out.']),
(True, False, False, False, 1, False, False,
TwisterStatus.ERROR, 'Device issue (Flash error?)', []),
(True, False, False, False, 0, True, False,
TwisterStatus.NONE, None, ['Timed out while monitoring serial output on IPName']),
(True, False, False, False, 0, False, True,
TwisterStatus.NONE, None, ["Terminating serial-pty:'Serial PTY'",
"Terminated serial-pty:'Serial PTY', stdout:'', stderr:''"]),
]
@pytest.mark.parametrize(
'has_hardware, raise_create_serial, raise_popen, raise_timeout,' \
' returncode, do_timeout_thread, use_pty,' \
' expected_status, expected_reason, expected_logs',
TESTDATA_17,
ids=['no hardware', 'create serial failure', 'popen called process error',
'communicate timeout', 'nonzero returncode', 'valid pty', 'valid dev']
)
def test_devicehandler_handle(
mocked_instance,
caplog,
has_hardware,
raise_create_serial,
raise_popen,
raise_timeout,
returncode,
do_timeout_thread,
use_pty,
expected_status,
expected_reason,
expected_logs
):
def mock_get_serial(serial_pty, hardware_serial):
if serial_pty:
serial_pty_process = mock.Mock(
name='dummy serial PTY process',
communicate=mock.Mock(
return_value=('', '')
)
)
return 'dummy serial PTY device', serial_pty_process
return 'dummy serial device', None
def mock_create_serial(*args, **kwargs):
if raise_create_serial:
raise SerialException('dummy cmd', 'dummy msg')
return mock.Mock(name='dummy serial')
def mock_thread(*args, **kwargs):
is_alive_mock = mock.Mock(return_value=bool(do_timeout_thread))
return mock.Mock(is_alive=is_alive_mock)
def mock_terminate(proc, *args, **kwargs):
proc.communicate = mock.Mock(return_value=(mock.Mock(), mock.Mock()))
def mock_communicate(*args, **kwargs):
if raise_timeout:
raise TimeoutExpired('dummy cmd', 'dummyamount')
return mock.Mock(), mock.Mock()
def mock_popen(command, *args, **kwargs):
if raise_popen:
raise CalledProcessError('dummy proc', 'dummy msg')
mock_process = mock.Mock(
pid=1,
returncode=returncode,
communicate=mock.Mock(side_effect=mock_communicate)
)
return mock.Mock(
__enter__=mock.Mock(return_value=mock_process),
__exit__=mock.Mock(return_value=None)
)
hardware = None if not has_hardware else mock.Mock(
baud=14400,
runner='dummy runner',
serial_pty='Serial PTY' if use_pty else None,
serial='dummy serial',
pre_script='dummy pre script',
post_script='dummy post script',
post_flash_script='dummy post flash script',
flash_timeout=60,
flash_with_test=True
)
handler = DeviceHandler(mocked_instance, 'build')
handler.get_hardware = mock.Mock(return_value=hardware)
handler.options = mock.Mock(
timeout_multiplier=1,
west_flash=None,
west_runner=None
)
handler._get_serial_device = mock.Mock(side_effect=mock_get_serial)
handler._create_command = mock.Mock(return_value=['dummy', 'command'])
handler.run_custom_script = mock.Mock()
handler._create_serial_connection = mock.Mock(
side_effect=mock_create_serial
)
handler.monitor_serial = mock.Mock()
handler.terminate = mock.Mock(side_effect=mock_terminate)
handler._update_instance_info = mock.Mock()
handler._final_handle_actions = mock.Mock()
handler.make_dut_available = mock.Mock()
twisterlib.handlers.terminate_process = mock.Mock()
handler.instance.platform.name = 'IPName'
harness = mock.Mock()
with mock.patch('builtins.open', mock.mock_open(read_data='')), \
mock.patch('subprocess.Popen', side_effect=mock_popen), \
mock.patch('threading.Event', mock.Mock()), \
mock.patch('threading.Thread', side_effect=mock_thread):
handler.handle(harness)
handler.get_hardware.assert_called_once()
messages = [record.msg for record in caplog.records]
assert all([msg in messages for msg in expected_logs])
if not has_hardware:
return
handler.run_custom_script.assert_has_calls([
mock.call('dummy pre script', mock.ANY)
])
if raise_create_serial:
return
handler.run_custom_script.assert_has_calls([
mock.call('dummy pre script', mock.ANY),
mock.call('dummy post flash script', mock.ANY),
mock.call('dummy post script', mock.ANY)
])
if expected_reason:
assert handler.instance.reason == expected_reason
if expected_status:
assert handler.instance.status == expected_status
handler.make_dut_available.assert_called_once_with(hardware)
TESTDATA_18 = [
(True, True, True),
(False, False, False),
]
@pytest.mark.parametrize(
'ignore_qemu_crash, expected_ignore_crash, expected_ignore_unexpected_eof',
TESTDATA_18,
ids=['ignore crash', 'qemu crash']
)
def test_qemuhandler_init(
mocked_instance,
ignore_qemu_crash,
expected_ignore_crash,
expected_ignore_unexpected_eof
):
mocked_instance.testsuite.ignore_qemu_crash = ignore_qemu_crash
handler = QEMUHandler(mocked_instance, 'build')
assert handler.ignore_qemu_crash == expected_ignore_crash
assert handler.ignore_unexpected_eof == expected_ignore_unexpected_eof
def test_qemuhandler_get_cpu_time():
def mock_process(pid):
return mock.Mock(
cpu_times=mock.Mock(
return_value=mock.Mock(
user=20.0,
system=64.0
)
)
)
with mock.patch('psutil.Process', mock_process):
res = QEMUHandler._get_cpu_time(0)
assert res == pytest.approx(84.0)
TESTDATA_19 = [
(
True,
os.path.join('self', 'dummy_dir', '1'),
mock.PropertyMock(return_value=os.path.join('dummy_dir', '1')),
os.path.join('dummy_dir', '1')
),
(
False,
os.path.join('self', 'dummy_dir', '2'),
mock.PropertyMock(return_value=os.path.join('dummy_dir', '2')),
os.path.join('self', 'dummy_dir', '2')
),
]
@pytest.mark.parametrize(
'self_sysbuild, self_build_dir, build_dir, expected',
TESTDATA_19,
ids=['domains build dir', 'self build dir']
)
def test_qemuhandler_get_default_domain_build_dir(
mocked_instance,
self_sysbuild,
self_build_dir,
build_dir,
expected
):
get_default_domain_mock = mock.Mock()
type(get_default_domain_mock()).build_dir = build_dir
domains_mock = mock.Mock(get_default_domain=get_default_domain_mock)
from_file_mock = mock.Mock(return_value=domains_mock)
handler = QEMUHandler(mocked_instance, 'build')
handler.instance.sysbuild = self_sysbuild
handler.build_dir = self_build_dir
with mock.patch('domains.Domains.from_file', from_file_mock):
result = handler.get_default_domain_build_dir()
assert result == expected
TESTDATA_20 = [
(
os.path.join('self', 'dummy_dir', 'log1'),
os.path.join('self', 'dummy_dir', 'pid1'),
os.path.join('sysbuild', 'dummy_dir', 'bd1'),
True
),
(
os.path.join('self', 'dummy_dir', 'log2'),
os.path.join('self', 'dummy_dir', 'pid2'),
os.path.join('sysbuild', 'dummy_dir', 'bd2'),
False
),
]
@pytest.mark.parametrize(
'self_log, self_pid_fn, sysbuild_build_dir, exists_pid_fn',
TESTDATA_20,
ids=['pid exists', 'pid missing']
)
def test_qemuhandler_set_qemu_filenames(
mocked_instance,
self_log,
self_pid_fn,
sysbuild_build_dir,
exists_pid_fn
):
unlink_mock = mock.Mock()
exists_mock = mock.Mock(return_value=exists_pid_fn)
handler = QEMUHandler(mocked_instance, 'build')
handler.log = self_log
handler.pid_fn = self_pid_fn
with mock.patch('os.unlink', unlink_mock), \
mock.patch('os.path.exists', exists_mock):
handler._set_qemu_filenames(sysbuild_build_dir)
assert handler.fifo_fn == mocked_instance.build_dir + \
os.path.sep + 'qemu-fifo'
assert handler.pid_fn == sysbuild_build_dir + os.path.sep + 'qemu.pid'
assert handler.log_fn == self_log
if exists_pid_fn:
unlink_mock.assert_called_once_with(sysbuild_build_dir + \
os.path.sep + 'qemu.pid')
def test_qemuhandler_create_command(mocked_instance):
sysbuild_build_dir = os.path.join('sysbuild', 'dummy_dir')
handler = QEMUHandler(mocked_instance, 'build')
handler.generator_cmd = 'dummy_cmd'
result = handler._create_command(sysbuild_build_dir)
assert result == ['dummy_cmd', '-C', 'sysbuild' + os.path.sep + 'dummy_dir',
'run']
TESTDATA_21 = [
(
0,
False,
None,
'good dummy status',
False,
TwisterStatus.NONE,
None,
False
),
(
1,
True,
None,
'good dummy status',
False,
TwisterStatus.NONE,
None,
False
),
(
0,
False,
None,
TwisterStatus.NONE,
True,
TwisterStatus.FAIL,
'Timeout',
True
),
(
1,
False,
None,
TwisterStatus.NONE,
False,
TwisterStatus.FAIL,
'Exited with 1',
True
),
(
1,
False,
'preexisting reason',
'good dummy status',
False,
TwisterStatus.FAIL,
'preexisting reason',
True
),
]
@pytest.mark.parametrize(
'self_returncode, self_ignore_qemu_crash,' \
' self_instance_reason, harness_status, is_timeout,' \
' expected_status, expected_reason, expected_called_missing_case',
TESTDATA_21,
ids=['not failed', 'qemu ignore', 'timeout', 'bad returncode', 'other fail']
)
def test_qemuhandler_update_instance_info(
mocked_instance,
self_returncode,
self_ignore_qemu_crash,
self_instance_reason,
harness_status,
is_timeout,
expected_status,
expected_reason,
expected_called_missing_case
):
mocked_instance.add_missing_case_status = mock.Mock()
mocked_instance.reason = self_instance_reason
handler = QEMUHandler(mocked_instance, 'build')
handler.returncode = self_returncode
handler.ignore_qemu_crash = self_ignore_qemu_crash
handler._update_instance_info(harness_status, is_timeout)
assert handler.instance.status == expected_status
assert handler.instance.reason == expected_reason
if expected_called_missing_case:
mocked_instance.add_missing_case_status.assert_called_once_with(
TwisterStatus.BLOCK
)
def test_qemuhandler_thread_get_fifo_names():
fifo_fn = 'dummy'
fifo_in, fifo_out = QEMUHandler._thread_get_fifo_names(fifo_fn)
assert fifo_in == 'dummy.in'
assert fifo_out == 'dummy.out'
TESTDATA_22 = [
(False, False),
(False, True),
(True, False),
(True, True),
]
@pytest.mark.parametrize(
'fifo_in_exists, fifo_out_exists',
TESTDATA_22,
ids=['both missing', 'out exists', 'in exists', 'both exist']
)
def test_qemuhandler_thread_open_files(fifo_in_exists, fifo_out_exists):
def mock_exists(path):
if path == 'fifo.in':
return fifo_in_exists
elif path == 'fifo.out':
return fifo_out_exists
else:
raise ValueError('Unexpected path in mock of os.path.exists')
unlink_mock = mock.Mock()
exists_mock = mock.Mock(side_effect=mock_exists)
mkfifo_mock = mock.Mock()
fifo_in = 'fifo.in'
fifo_out = 'fifo.out'
logfile = 'log.file'
with mock.patch('os.unlink', unlink_mock), \
mock.patch('os.mkfifo', mkfifo_mock), \
mock.patch('os.path.exists', exists_mock), \
mock.patch('builtins.open', mock.mock_open()) as open_mock:
_, _, _ = QEMUHandler._thread_open_files(fifo_in, fifo_out, logfile)
open_mock.assert_has_calls([
mock.call('fifo.in', 'wb'),
mock.call('fifo.out', 'rb', buffering=0),
mock.call('log.file', 'wt'),
])
if fifo_in_exists:
unlink_mock.assert_any_call('fifo.in')
if fifo_out_exists:
unlink_mock.assert_any_call('fifo.out')
TESTDATA_23 = [
(False, False),
(True, True),
(True, False)
]
@pytest.mark.parametrize(
'is_pid, is_lookup_error',
TESTDATA_23,
ids=['pid missing', 'pid lookup error', 'pid ok']
)
def test_qemuhandler_thread_close_files(is_pid, is_lookup_error):
is_process_killed = {}
def mock_kill(pid, sig):
if is_lookup_error:
raise ProcessLookupError(f'Couldn\'t find pid: {pid}.')
elif sig == signal.SIGTERM:
is_process_killed[pid] = True
unlink_mock = mock.Mock()
kill_mock = mock.Mock(side_effect=mock_kill)
fifo_in = 'fifo.in'
fifo_out = 'fifo.out'
pid = 12345 if is_pid else None
out_fp = mock.Mock()
in_fp = mock.Mock()
log_out_fp = mock.Mock()
with mock.patch('os.unlink', unlink_mock), \
mock.patch('os.kill', kill_mock):
QEMUHandler._thread_close_files(fifo_in, fifo_out, pid, out_fp,
in_fp, log_out_fp)
out_fp.close.assert_called_once()
in_fp.close.assert_called_once()
log_out_fp.close.assert_called_once()
unlink_mock.assert_has_calls([mock.call('fifo.in'), mock.call('fifo.out')])
if is_pid and not is_lookup_error:
assert is_process_killed[pid]
TESTDATA_24 = [
(TwisterStatus.FAIL, 'timeout', TwisterStatus.FAIL, 'timeout'),
(TwisterStatus.FAIL, 'Execution error', TwisterStatus.FAIL, 'Execution error'),
(TwisterStatus.FAIL, 'unexpected eof', TwisterStatus.FAIL, 'unexpected eof'),
(TwisterStatus.FAIL, 'unexpected byte', TwisterStatus.FAIL, 'unexpected byte'),
(TwisterStatus.NONE, None, TwisterStatus.NONE, 'Unknown'),
]
@pytest.mark.parametrize(
'_status, _reason, expected_status, expected_reason',
TESTDATA_24,
ids=['timeout', 'failed', 'unexpected eof', 'unexpected byte', 'unknown']
)
def test_qemuhandler_thread_update_instance_info(
mocked_instance,
_status,
_reason,
expected_status,
expected_reason
):
handler = QEMUHandler(mocked_instance, 'build')
handler_time = 59
QEMUHandler._thread_update_instance_info(handler, handler_time, _status, _reason)
assert handler.instance.execution_time == handler_time
assert handler.instance.status == expected_status
assert handler.instance.reason == expected_reason
TESTDATA_25 = [
(
('1\n' * 60).encode('utf-8'),
60,
1,
[TwisterStatus.NONE] * 60 + [TwisterStatus.PASS] * 6,
1000,
False,
TwisterStatus.FAIL,
'timeout',
[mock.call('1\n'), mock.call('1\n')]
),
(
('1\n' * 60).encode('utf-8'),
60,
-1,
[TwisterStatus.NONE] * 60 + [TwisterStatus.PASS] * 30,
100,
False,
TwisterStatus.FAIL,
None,
[mock.call('1\n'), mock.call('1\n')]
),
(
b'',
60,
1,
[TwisterStatus.PASS] * 3,
100,
False,
TwisterStatus.FAIL,
'unexpected eof',
[]
),
(
b'\x81',
60,
1,
[TwisterStatus.PASS] * 3,
100,
False,
TwisterStatus.FAIL,
'unexpected byte',
[]
),
(
'1\n2\n3\n4\n5\n'.encode('utf-8'),
600,
1,
[TwisterStatus.NONE] * 3 + [TwisterStatus.PASS] * 7,
100,
False,
TwisterStatus.PASS,
None,
[mock.call('1\n'), mock.call('2\n'), mock.call('3\n'), mock.call('4\n')]
),
(
'1\n2\n3\n4\n5\n'.encode('utf-8'),
600,
0,
[TwisterStatus.NONE] * 3 + [TwisterStatus.PASS] * 7,
100,
False,
TwisterStatus.FAIL,
'timeout',
[mock.call('1\n'), mock.call('2\n')]
),
(
'1\n2\n3\n4\n5\n'.encode('utf-8'),
60,
1,
[TwisterStatus.NONE] * 3 + [TwisterStatus.PASS] * 7,
(n for n in [100, 100, 10000]),
True,
TwisterStatus.PASS,
None,
[mock.call('1\n'), mock.call('2\n'), mock.call('3\n'), mock.call('4\n')]
),
]
@pytest.mark.parametrize(
'content, timeout, pid, harness_statuses, cputime, capture_coverage,' \
' expected_status, expected_reason, expected_log_calls',
TESTDATA_25,
ids=[
'timeout',
'harness failed',
'unexpected eof',
'unexpected byte',
'harness success',
'timeout by pid=0',
'capture_coverage'
]
)
def test_qemuhandler_thread(
mocked_instance,
faux_timer,
content,
timeout,
pid,
harness_statuses,
cputime,
capture_coverage,
expected_status,
expected_reason,
expected_log_calls
):
def mock_cputime(pid):
if pid > 0:
return cputime if isinstance(cputime, int) else next(cputime)
else:
raise ProcessLookupError()
type(mocked_instance.testsuite).timeout = mock.PropertyMock(return_value=timeout)
handler = QEMUHandler(mocked_instance, 'build')
handler.ignore_unexpected_eof = False
handler.pid_fn = 'pid_fn'
handler.fifo_fn = 'fifo_fn'
handler.options = mock.Mock(timeout_multiplier=1)
def mocked_open(filename, *args, **kwargs):
if filename == handler.pid_fn:
contents = str(pid).encode('utf-8')
elif filename == handler.fifo_fn + '.out':
contents = content
else:
contents = b''
file_object = mock.mock_open(read_data=contents).return_value
file_object.__iter__.return_value = contents.splitlines(True)
return file_object
harness = mock.Mock(capture_coverage=capture_coverage, handle=print)
type(harness).status = mock.PropertyMock(side_effect=harness_statuses)
p = mock.Mock()
p.poll = mock.Mock(
side_effect=itertools.cycle([True, True, True, True, False])
)
mock_thread_get_fifo_names = mock.Mock(
return_value=('fifo_fn.in', 'fifo_fn.out')
)
log_fp_mock = mock.Mock()
in_fp_mock = mocked_open('fifo_fn.out')
out_fp_mock = mock.Mock()
mock_thread_open_files = mock.Mock(
return_value=(out_fp_mock, in_fp_mock, log_fp_mock)
)
mock_thread_close_files = mock.Mock()
mock_thread_update_instance_info = mock.Mock()
with mock.patch('time.time', side_effect=faux_timer.time), \
mock.patch('builtins.open', new=mocked_open), \
mock.patch('select.poll', return_value=p), \
mock.patch('os.path.exists', return_value=True), \
mock.patch('twisterlib.handlers.QEMUHandler._get_cpu_time',
mock_cputime), \
mock.patch('twisterlib.handlers.QEMUHandler._thread_get_fifo_names',
mock_thread_get_fifo_names), \
mock.patch('twisterlib.handlers.QEMUHandler._thread_open_files',
mock_thread_open_files), \
mock.patch('twisterlib.handlers.QEMUHandler._thread_close_files',
mock_thread_close_files), \
mock.patch('twisterlib.handlers.QEMUHandler.' \
'_thread_update_instance_info',
mock_thread_update_instance_info):
QEMUHandler._thread(
handler,
handler.get_test_timeout(),
handler.build_dir,
handler.log,
handler.fifo_fn,
handler.pid_fn,
harness,
handler.ignore_unexpected_eof
)
print(mock_thread_update_instance_info.call_args_list)
mock_thread_update_instance_info.assert_called_once_with(
handler,
mock.ANY,
expected_status,
mock.ANY
)
log_fp_mock.write.assert_has_calls(expected_log_calls)
TESTDATA_26 = [
(True, False, TwisterStatus.NONE, True,
['No timeout, return code from QEMU (1): 1',
'return code from QEMU (1): 1']),
(False, True, TwisterStatus.PASS, True, ['return code from QEMU (1): 0']),
(False, True, TwisterStatus.FAIL, False, ['return code from QEMU (None): 1']),
]
@pytest.mark.parametrize(
'isatty, do_timeout, harness_status, exists_pid_fn, expected_logs',
TESTDATA_26,
ids=['no timeout, isatty', 'timeout passed', 'timeout, no pid_fn']
)
def test_qemuhandler_handle(
mocked_instance,
caplog,
tmp_path,
isatty,
do_timeout,
harness_status,
exists_pid_fn,
expected_logs
):
def mock_wait(*args, **kwargs):
if do_timeout:
raise TimeoutExpired('dummy cmd', 'dummyamount')
mock_process = mock.Mock(pid=0, returncode=1)
mock_process.communicate = mock.Mock(
return_value=(mock.Mock(), mock.Mock())
)
mock_process.wait = mock.Mock(side_effect=mock_wait)
handler = QEMUHandler(mocked_instance, 'build')
def mock_path_exists(name, *args, **kwargs):
return exists_pid_fn
def mock_popen(command, stdout=None, stdin=None, stderr=None, cwd=None):
return mock.Mock(
__enter__=mock.Mock(return_value=mock_process),
__exit__=mock.Mock(return_value=None),
communicate=mock.Mock(return_value=(mock.Mock(), mock.Mock()))
)
def mock_thread(name=None, target=None, daemon=None, args=None):
return mock.Mock()
def mock_filenames(sysbuild_build_dir):
handler.fifo_fn = os.path.join('dummy', 'qemu-fifo')
handler.pid_fn = os.path.join(sysbuild_build_dir, 'qemu.pid')
handler.log_fn = os.path.join('dummy', 'log')
harness = mock.Mock(status=harness_status)
handler_options_west_flash = []
domain_build_dir = os.path.join('sysbuild', 'dummydir')
command = ['generator_cmd', '-C', os.path.join('cmd', 'path'), 'run']
handler.options = mock.Mock(
timeout_multiplier=1,
west_flash=handler_options_west_flash,
west_runner=None
)
handler.run_custom_script = mock.Mock(return_value=None)
handler.make_device_available = mock.Mock(return_value=None)
handler._final_handle_actions = mock.Mock(return_value=None)
handler._create_command = mock.Mock(return_value=command)
handler._set_qemu_filenames = mock.Mock(side_effect=mock_filenames)
handler.get_default_domain_build_dir = mock.Mock(return_value=domain_build_dir)
handler.terminate = mock.Mock()
unlink_mock = mock.Mock()
with mock.patch('subprocess.Popen', side_effect=mock_popen), \
mock.patch('builtins.open', mock.mock_open(read_data='1')), \
mock.patch('threading.Thread', side_effect=mock_thread), \
mock.patch('os.path.exists', side_effect=mock_path_exists), \
mock.patch('os.unlink', unlink_mock), \
mock.patch('sys.stdout.isatty', return_value=isatty):
handler.handle(harness)
assert all([expected_log in caplog.text for expected_log in expected_logs])
def test_qemuhandler_get_fifo(mocked_instance):
handler = QEMUHandler(mocked_instance, 'build')
handler.fifo_fn = 'fifo_fn'
result = handler.get_fifo()
assert result == 'fifo_fn'
``` | /content/code_sandbox/scripts/tests/twister/test_handlers.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 14,663 |
```python
#!/usr/bin/env python3
#
"""
Tests for environment.py classes' methods
"""
import mock
import os
import pytest
import shutil
from contextlib import nullcontext
import twisterlib.environment
TESTDATA_1 = [
(
None,
None,
None,
['--short-build-path', '-k'],
'--short-build-path requires Ninja to be enabled'
),
(
'nt',
None,
None,
['--device-serial-pty', 'dummy'],
'--device-serial-pty is not supported on Windows OS'
),
(
None,
None,
None,
['--west-runner=dummy'],
'west-runner requires west-flash to be enabled'
),
(
None,
None,
None,
['--west-flash=\"--board-id=dummy\"'],
'west-flash requires device-testing to be enabled'
),
(
None,
{
'exist': [],
'missing': ['valgrind']
},
None,
['--enable-valgrind'],
'valgrind enabled but valgrind executable not found'
),
(
None,
None,
None,
[
'--device-testing',
'--device-serial',
'dummy',
],
'When --device-testing is used with --device-serial' \
' or --device-serial-pty, exactly one platform must' \
' be specified'
),
(
None,
None,
None,
[
'--device-testing',
'--device-serial',
'dummy',
'--platform',
'dummy_platform1',
'--platform',
'dummy_platform2'
],
'When --device-testing is used with --device-serial' \
' or --device-serial-pty, exactly one platform must' \
' be specified'
),
# Note the underscore.
(
None,
None,
None,
['--device-flash-with-test'],
'--device-flash-with-test requires --device_testing'
),
(
None,
None,
None,
['--shuffle-tests'],
'--shuffle-tests requires --subset'
),
(
None,
None,
None,
['--shuffle-tests-seed', '0'],
'--shuffle-tests-seed requires --shuffle-tests'
),
(
None,
None,
None,
['/dummy/unrecognised/arg'],
'Unrecognized arguments found: \'/dummy/unrecognised/arg\'.' \
' Use -- to delineate extra arguments for test binary' \
' or pass -h for help.'
),
(
None,
None,
True,
[],
'By default Twister should work without pytest-twister-harness' \
' plugin being installed, so please, uninstall it by' \
' `pip uninstall pytest-twister-harness` and' \
' `git clean -dxf scripts/pylib/pytest-twister-harness`.'
),
]
@pytest.mark.parametrize(
'os_name, which_dict, pytest_plugin, args, expected_error',
TESTDATA_1,
ids=[
'short build path without ninja',
'device-serial-pty on Windows',
'west runner without west flash',
'west-flash without device-testing',
'valgrind without executable',
'device serial without platform',
'device serial with multiple platforms',
'device flash with test without device testing',
'shuffle-tests without subset',
'shuffle-tests-seed without shuffle-tests',
'unrecognised argument',
'pytest-twister-harness installed'
]
)
def test_parse_arguments_errors(
caplog,
os_name,
which_dict,
pytest_plugin,
args,
expected_error
):
def mock_which(name):
if name in which_dict['missing']:
return False
elif name in which_dict['exist']:
return which_dict['path'][which_dict['exist']] \
if which_dict['path'][which_dict['exist']] \
else f'dummy/path/{name}'
else:
return f'dummy/path/{name}'
with mock.patch('sys.argv', ['twister'] + args):
parser = twisterlib.environment.add_parse_arguments()
if which_dict:
which_dict['path'] = {name: shutil.which(name) \
for name in which_dict['exist']}
which_mock = mock.Mock(side_effect=mock_which)
with mock.patch('os.name', os_name) \
if os_name is not None else nullcontext(), \
mock.patch('shutil.which', which_mock) \
if which_dict else nullcontext(), \
mock.patch('twisterlib.environment' \
'.PYTEST_PLUGIN_INSTALLED', pytest_plugin) \
if pytest_plugin is not None else nullcontext():
with pytest.raises(SystemExit) as exit_info:
twisterlib.environment.parse_arguments(parser, args)
assert exit_info.value.code == 1
assert expected_error in ' '.join(caplog.text.split())
def test_parse_arguments_errors_size():
"""`options.size` is not an error, rather a different functionality."""
args = ['--size', 'dummy.elf']
with mock.patch('sys.argv', ['twister'] + args):
parser = twisterlib.environment.add_parse_arguments()
mock_calc_parent = mock.Mock()
mock_calc_parent.child = mock.Mock(return_value=mock.Mock())
def mock_calc(*args, **kwargs):
return mock_calc_parent.child(args, kwargs)
with mock.patch('twisterlib.size_calc.SizeCalculator', mock_calc):
with pytest.raises(SystemExit) as exit_info:
twisterlib.environment.parse_arguments(parser, args)
assert exit_info.value.code == 0
mock_calc_parent.child.assert_has_calls([mock.call(('dummy.elf', []), {})])
mock_calc_parent.child().size_report.assert_has_calls([mock.call()])
def test_parse_arguments_warnings(caplog):
args = ['--allow-installed-plugin']
with mock.patch('sys.argv', ['twister'] + args):
parser = twisterlib.environment.add_parse_arguments()
with mock.patch('twisterlib.environment.PYTEST_PLUGIN_INSTALLED', True):
twisterlib.environment.parse_arguments(parser, args)
assert 'You work with installed version of' \
' pytest-twister-harness plugin.' in ' '.join(caplog.text.split())
TESTDATA_2 = [
(['--enable-size-report']),
(['--compare-report', 'dummy']),
]
@pytest.mark.parametrize(
'additional_args',
TESTDATA_2,
ids=['show footprint', 'compare report']
)
def test_parse_arguments(zephyr_base, additional_args):
args = ['--coverage', '--platform', 'dummy_platform'] + \
additional_args + ['--', 'dummy_extra_1', 'dummy_extra_2']
with mock.patch('sys.argv', ['twister'] + args):
parser = twisterlib.environment.add_parse_arguments()
options = twisterlib.environment.parse_arguments(parser, args)
assert os.path.join(zephyr_base, 'tests') in options.testsuite_root
assert os.path.join(zephyr_base, 'samples') in options.testsuite_root
assert options.enable_size_report
assert options.enable_coverage
assert options.coverage_platform == ['dummy_platform']
assert options.extra_test_args == ['dummy_extra_1', 'dummy_extra_2']
TESTDATA_3 = [
(
None,
mock.Mock(
generator_cmd='make',
generator='Unix Makefiles',
test_roots=None,
board_roots=None,
outdir=None,
)
),
(
mock.Mock(
ninja=True,
board_root=['dummy1', 'dummy2'],
testsuite_root=[
os.path.join('dummy', 'path', "tests"),
os.path.join('dummy', 'path', "samples")
],
outdir='dummy_abspath',
),
mock.Mock(
generator_cmd='ninja',
generator='Ninja',
test_roots=[
os.path.join('dummy', 'path', "tests"),
os.path.join('dummy', 'path', "samples")
],
board_roots=['dummy1', 'dummy2'],
outdir='dummy_abspath',
)
),
(
mock.Mock(
ninja=False,
board_root='dummy0',
testsuite_root=[
os.path.join('dummy', 'path', "tests"),
os.path.join('dummy', 'path', "samples")
],
outdir='dummy_abspath',
),
mock.Mock(
generator_cmd='make',
generator='Unix Makefiles',
test_roots=[
os.path.join('dummy', 'path', "tests"),
os.path.join('dummy', 'path', "samples")
],
board_roots=['dummy0'],
outdir='dummy_abspath',
)
),
]
@pytest.mark.parametrize(
'options, expected_env',
TESTDATA_3,
ids=[
'no options',
'ninja',
'make'
]
)
def test_twisterenv_init(options, expected_env):
original_abspath = os.path.abspath
def mocked_abspath(path):
if path == 'dummy_abspath':
return 'dummy_abspath'
elif isinstance(path, mock.Mock):
return None
else:
return original_abspath(path)
with mock.patch('os.path.abspath', side_effect=mocked_abspath):
twister_env = twisterlib.environment.TwisterEnv(options=options)
assert twister_env.generator_cmd == expected_env.generator_cmd
assert twister_env.generator == expected_env.generator
assert twister_env.test_roots == expected_env.test_roots
assert twister_env.board_roots == expected_env.board_roots
assert twister_env.outdir == expected_env.outdir
def test_twisterenv_discover():
options = mock.Mock(
ninja=True
)
original_abspath = os.path.abspath
def mocked_abspath(path):
if path == 'dummy_abspath':
return 'dummy_abspath'
elif isinstance(path, mock.Mock):
return None
else:
return original_abspath(path)
with mock.patch('os.path.abspath', side_effect=mocked_abspath):
twister_env = twisterlib.environment.TwisterEnv(options=options)
mock_datetime = mock.Mock(
now=mock.Mock(
return_value=mock.Mock(
isoformat=mock.Mock(return_value='dummy_time')
)
)
)
with mock.patch.object(
twisterlib.environment.TwisterEnv,
'check_zephyr_version',
mock.Mock()) as mock_czv, \
mock.patch.object(
twisterlib.environment.TwisterEnv,
'get_toolchain',
mock.Mock()) as mock_gt, \
mock.patch('twisterlib.environment.datetime', mock_datetime):
twister_env.discover()
mock_czv.assert_called_once()
mock_gt.assert_called_once()
assert twister_env.run_date == 'dummy_time'
TESTDATA_4 = [
(
mock.Mock(returncode=0, stdout='dummy stdout version'),
mock.Mock(returncode=0, stdout='dummy stdout date'),
['Zephyr version: dummy stdout version'],
'dummy stdout version',
'dummy stdout date'
),
(
mock.Mock(returncode=0, stdout=''),
mock.Mock(returncode=0, stdout='dummy stdout date'),
['Could not determine version'],
'Unknown',
'dummy stdout date'
),
(
mock.Mock(returncode=1, stdout='dummy stdout version'),
mock.Mock(returncode=0, stdout='dummy stdout date'),
['Could not determine version'],
'Unknown',
'dummy stdout date'
),
(
OSError,
mock.Mock(returncode=1),
['Could not determine version'],
'Unknown',
'Unknown'
),
]
@pytest.mark.parametrize(
'git_describe_return, git_show_return, expected_logs,' \
' expected_version, expected_commit_date',
TESTDATA_4,
ids=[
'valid',
'no zephyr version on describe',
'error on git describe',
'execution error on git describe',
]
)
def test_twisterenv_check_zephyr_version(
caplog,
git_describe_return,
git_show_return,
expected_logs,
expected_version,
expected_commit_date
):
def mock_run(command, *args, **kwargs):
if all([keyword in command for keyword in ['git', 'describe']]):
if isinstance(git_describe_return, type) and \
issubclass(git_describe_return, Exception):
raise git_describe_return()
return git_describe_return
if all([keyword in command for keyword in ['git', 'show']]):
if isinstance(git_show_return, type) and \
issubclass(git_show_return, Exception):
raise git_show_return()
return git_show_return
options = mock.Mock(
ninja=True
)
original_abspath = os.path.abspath
def mocked_abspath(path):
if path == 'dummy_abspath':
return 'dummy_abspath'
elif isinstance(path, mock.Mock):
return None
else:
return original_abspath(path)
with mock.patch('os.path.abspath', side_effect=mocked_abspath):
twister_env = twisterlib.environment.TwisterEnv(options=options)
with mock.patch('subprocess.run', mock.Mock(side_effect=mock_run)):
twister_env.check_zephyr_version()
print(expected_logs)
print(caplog.text)
assert twister_env.version == expected_version
assert twister_env.commit_date == expected_commit_date
assert all([expected_log in caplog.text for expected_log in expected_logs])
TESTDATA_5 = [
(
False,
None,
None,
'Unable to find `cmake` in path',
None
),
(
True,
0,
b'somedummy\x1B[123-@d1770',
'Finished running dummy/script/path',
{
'returncode': 0,
'msg': 'Finished running dummy/script/path',
'stdout': 'somedummyd1770',
}
),
(
True,
1,
b'another\x1B_dummy',
'Cmake script failure: dummy/script/path',
{
'returncode': 1,
'returnmsg': 'anotherdummy'
}
),
]
@pytest.mark.parametrize(
'find_cmake, return_code, out, expected_log, expected_result',
TESTDATA_5,
ids=[
'cmake not found',
'regex sanitation 1',
'regex sanitation 2'
]
)
def test_twisterenv_run_cmake_script(
caplog,
find_cmake,
return_code,
out,
expected_log,
expected_result
):
def mock_which(name, *args, **kwargs):
return 'dummy/cmake/path' if find_cmake else None
def mock_popen(command, *args, **kwargs):
return mock.Mock(
pid=0,
returncode=return_code,
communicate=mock.Mock(
return_value=(out, '')
)
)
args = ['dummy/script/path', 'var1=val1']
with mock.patch('shutil.which', mock_which), \
mock.patch('subprocess.Popen', mock.Mock(side_effect=mock_popen)), \
pytest.raises(Exception) \
if not find_cmake else nullcontext() as exception:
results = twisterlib.environment.TwisterEnv.run_cmake_script(args)
assert 'Running cmake script dummy/script/path' in caplog.text
assert expected_log in caplog.text
if exception is not None:
return
assert expected_result.items() <= results.items()
TESTDATA_6 = [
(
{
'returncode': 0,
'stdout': '{\"ZEPHYR_TOOLCHAIN_VARIANT\": \"dummy toolchain\"}'
},
None,
'Using \'dummy toolchain\' toolchain.'
),
(
{'returncode': 1},
2,
None
),
]
@pytest.mark.parametrize(
'script_result, exit_value, expected_log',
TESTDATA_6,
ids=['valid', 'error']
)
def test_get_toolchain(caplog, script_result, exit_value, expected_log):
options = mock.Mock(
ninja=True
)
original_abspath = os.path.abspath
def mocked_abspath(path):
if path == 'dummy_abspath':
return 'dummy_abspath'
elif isinstance(path, mock.Mock):
return None
else:
return original_abspath(path)
with mock.patch('os.path.abspath', side_effect=mocked_abspath):
twister_env = twisterlib.environment.TwisterEnv(options=options)
with mock.patch.object(
twisterlib.environment.TwisterEnv,
'run_cmake_script',
mock.Mock(return_value=script_result)), \
pytest.raises(SystemExit) \
if exit_value is not None else nullcontext() as exit_info:
twister_env.get_toolchain()
if exit_info is not None:
assert exit_info.value.code == exit_value
else:
assert expected_log in caplog.text
``` | /content/code_sandbox/scripts/tests/twister/test_environment.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 3,706 |
```python
#!/usr/bin/env python3
#
# pylint: disable=line-too-long
"""
Tests for testinstance class
"""
from contextlib import nullcontext
import os
import sys
import pytest
import mock
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib.statuses import TwisterStatus
from twisterlib.testinstance import TestInstance
from twisterlib.error import BuildError
from twisterlib.runner import TwisterRunner
from twisterlib.handlers import QEMUHandler
from expr_parser import reserved
TESTDATA_PART_1 = [
(False, False, "console", "na", "qemu", False, [], (False, True)),
(False, False, "console", "native", "qemu", False, [], (False, True)),
(True, False, "console", "native", "nsim", False, [], (True, False)),
(True, True, "console", "native", "renode", False, [], (True, False)),
(False, False, "sensor", "native", "", False, [], (True, False)),
(False, False, "sensor", "na", "", False, [], (True, False)),
(False, True, "sensor", "native", "", True, [], (True, False)),
]
@pytest.mark.parametrize(
"build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected",
TESTDATA_PART_1
)
def test_check_build_or_run(
class_testplan,
all_testsuites_dict,
platforms_list,
build_only,
slow,
harness,
platform_type,
platform_sim,
device_testing,
fixture,
expected
):
"""" Test to check the conditions for build_only and run scenarios
Scenario 1: Test when different parameters are passed, build_only and run are set correctly
Scenario 2: Test if build_only is enabled when the OS is Windows"""
class_testplan.testsuites = all_testsuites_dict
testsuite = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/tests/'
'test_a/test_a.check_1')
print(testsuite)
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform("demo_board_2")
platform.type = platform_type
platform.simulation = platform_sim
testsuite.harness = harness
testsuite.build_only = build_only
testsuite.slow = slow
testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
run = testinstance.check_runnable(slow, device_testing, fixture)
_, r = expected
assert run == r
with mock.patch('os.name', 'nt'):
# path to QEMU binary is not in QEMU_BIN_PATH environment variable
run = testinstance.check_runnable()
assert not run
# mock path to QEMU binary in QEMU_BIN_PATH environment variable
with mock.patch('os.environ', {'QEMU_BIN_PATH': ''}):
run = testinstance.check_runnable()
_, r = expected
assert run == r
TESTDATA_PART_2 = [
(True, True, True, ["demo_board_2"], "native",
None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y\nCONFIG_UBSAN=y'),
(True, False, True, ["demo_board_2"], "native",
None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y'),
(False, False, True, ["demo_board_2"], 'native',
None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
(True, False, True, ["demo_board_2"], 'mcu',
None, '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
(False, False, False, ["demo_board_2"], 'native', None, ''),
(False, False, True, ['demo_board_1'], 'native', None, ''),
(True, False, False, ["demo_board_2"], 'native', None, '\nCONFIG_ASAN=y'),
(False, True, False, ["demo_board_2"], 'native', None, '\nCONFIG_UBSAN=y'),
(False, False, False, ["demo_board_2"], 'native',
["CONFIG_LOG=y"], 'CONFIG_LOG=y'),
(False, False, False, ["demo_board_2"], 'native',
["arch:x86:CONFIG_LOG=y"], 'CONFIG_LOG=y'),
(False, False, False, ["demo_board_2"], 'native',
["arch:arm:CONFIG_LOG=y"], ''),
(False, False, False, ["demo_board_2"], 'native',
["platform:demo_board_2:CONFIG_LOG=y"], 'CONFIG_LOG=y'),
(False, False, False, ["demo_board_2"], 'native',
["platform:demo_board_1:CONFIG_LOG=y"], ''),
]
@pytest.mark.parametrize(
'enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type,'
' extra_configs, expected_content',
TESTDATA_PART_2
)
def test_create_overlay(
class_testplan,
all_testsuites_dict,
platforms_list,
enable_asan,
enable_ubsan,
enable_coverage,
coverage_platform,
platform_type,
extra_configs,
expected_content
):
"""Test correct content is written to testcase_extra.conf based on if conditions."""
class_testplan.testsuites = all_testsuites_dict
testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/'
'test_app/sample_test.app')
if extra_configs:
testcase.extra_configs = extra_configs
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform("demo_board_2")
testinstance = TestInstance(testcase, platform, class_testplan.env.outdir)
platform.type = platform_type
assert testinstance.create_overlay(platform, enable_asan, enable_ubsan, enable_coverage, coverage_platform) == expected_content
def test_calculate_sizes(class_testplan, all_testsuites_dict, platforms_list):
""" Test Calculate sizes method for zephyr elf"""
class_testplan.testsuites = all_testsuites_dict
testcase = class_testplan.testsuites.get('scripts/tests/twister/test_data/testsuites/samples/'
'test_app/sample_test.app')
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform("demo_board_2")
testinstance = TestInstance(testcase, platform, class_testplan.env.outdir)
with pytest.raises(BuildError):
assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
TESTDATA_PART_3 = [
(
'CONFIG_ARCH_HAS_THREAD_LOCAL_STORAGE and' \
' CONFIG_TOOLCHAIN_SUPPORTS_THREAD_LOCAL_STORAGE and' \
' not (CONFIG_TOOLCHAIN_ARCMWDT_SUPPORTS_THREAD_LOCAL_STORAGE and CONFIG_USERSPACE)',
['kconfig']
),
(
'(dt_compat_enabled("st,stm32-flash-controller") or' \
' dt_compat_enabled("st,stm32h7-flash-controller")) and' \
' dt_label_with_parent_compat_enabled("storage_partition", "fixed-partitions")',
['dts']
),
(
'((CONFIG_FLASH_HAS_DRIVER_ENABLED and not CONFIG_TRUSTED_EXECUTION_NONSECURE) and' \
' dt_label_with_parent_compat_enabled("storage_partition", "fixed-partitions")) or' \
' (CONFIG_FLASH_HAS_DRIVER_ENABLED and CONFIG_TRUSTED_EXECUTION_NONSECURE and' \
' dt_label_with_parent_compat_enabled("slot1_ns_partition", "fixed-partitions"))',
['dts', 'kconfig']
),
(
'((CONFIG_CPU_AARCH32_CORTEX_R or CONFIG_CPU_CORTEX_M) and' \
' CONFIG_CPU_HAS_FPU and TOOLCHAIN_HAS_NEWLIB == 1) or CONFIG_ARCH_POSIX',
['full']
)
]
@pytest.mark.parametrize("filter_expr, expected_stages", TESTDATA_PART_3)
def test_which_filter_stages(filter_expr, expected_stages):
logic_keys = reserved.keys()
stages = TwisterRunner.get_cmake_filter_stages(filter_expr, logic_keys)
assert sorted(stages) == sorted(expected_stages)
@pytest.fixture(name='testinstance')
def sample_testinstance(all_testsuites_dict, class_testplan, platforms_list, request):
testsuite_path = 'scripts/tests/twister/test_data/testsuites'
if request.param['testsuite_kind'] == 'sample':
testsuite_path += '/samples/test_app/sample_test.app'
elif request.param['testsuite_kind'] == 'tests':
testsuite_path += '/tests/test_a/test_a.check_1'
class_testplan.testsuites = all_testsuites_dict
testsuite = class_testplan.testsuites.get(testsuite_path)
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform(request.param.get('board_name', 'demo_board_2'))
testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
return testinstance
TESTDATA_1 = [
(False),
(True),
]
@pytest.mark.parametrize('detailed_test_id', TESTDATA_1)
def test_testinstance_init(all_testsuites_dict, class_testplan, platforms_list, detailed_test_id):
testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
class_testplan.testsuites = all_testsuites_dict
testsuite = class_testplan.testsuites.get(testsuite_path)
testsuite.detailed_test_id = detailed_test_id
class_testplan.platforms = platforms_list
print(class_testplan.platforms)
platform = class_testplan.get_platform("demo_board_2")
testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
if detailed_test_id:
assert testinstance.build_dir == os.path.join(class_testplan.env.outdir, platform.name, testsuite_path)
else:
assert testinstance.build_dir == os.path.join(class_testplan.env.outdir, platform.name, testsuite.source_dir_rel, testsuite.name)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
def test_testinstance_record(testinstance):
testinstance.testcases = [mock.Mock()]
recording = [ {'field_1': 'recording_1_1', 'field_2': 'recording_1_2'},
{'field_1': 'recording_2_1', 'field_2': 'recording_2_2'}
]
with mock.patch(
'builtins.open',
mock.mock_open(read_data='')
) as mock_file, \
mock.patch(
'csv.DictWriter.writerow',
mock.Mock()
) as mock_writeheader, \
mock.patch(
'csv.DictWriter.writerows',
mock.Mock()
) as mock_writerows:
testinstance.record(recording)
print(mock_file.mock_calls)
mock_file.assert_called_with(
os.path.join(testinstance.build_dir, 'recording.csv'),
'wt'
)
mock_writeheader.assert_has_calls([mock.call({ k:k for k in recording[0]})])
mock_writerows.assert_has_calls([mock.call(recording)])
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
def test_testinstance_add_filter(testinstance):
reason = 'dummy reason'
filter_type = 'dummy type'
testinstance.add_filter(reason, filter_type)
assert {'type': filter_type, 'reason': reason} in testinstance.filters
assert testinstance.status == TwisterStatus.FILTER
assert testinstance.reason == reason
assert testinstance.filter_type == filter_type
def test_testinstance_init_cases(all_testsuites_dict, class_testplan, platforms_list):
testsuite_path = 'scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'
class_testplan.testsuites = all_testsuites_dict
testsuite = class_testplan.testsuites.get(testsuite_path)
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform("demo_board_2")
testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
testinstance.init_cases()
assert all(
[
any(
[
tcc.name == tc.name and tcc.freeform == tc.freeform \
for tcc in testinstance.testsuite.testcases
]
) for tc in testsuite.testcases
]
)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'sample'}], indirect=True)
def test_testinstance_get_run_id(testinstance):
res = testinstance._get_run_id()
assert isinstance(res, str)
TESTDATA_2 = [
('another reason', 'another reason'),
(None, 'dummy reason'),
]
@pytest.mark.parametrize('reason, expected_reason', TESTDATA_2)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_add_missing_case_status(testinstance, reason, expected_reason):
testinstance.reason = 'dummy reason'
status = TwisterStatus.PASS
assert len(testinstance.testcases) > 1, 'Selected testsuite does not have enough testcases.'
testinstance.testcases[0].status = TwisterStatus.STARTED
testinstance.testcases[-1].status = TwisterStatus.NONE
testinstance.add_missing_case_status(status, reason)
assert testinstance.testcases[0].status == TwisterStatus.FAIL
assert testinstance.testcases[-1].status == TwisterStatus.PASS
assert testinstance.testcases[-1].reason == expected_reason
def test_testinstance_dunders(all_testsuites_dict, class_testplan, platforms_list):
testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
class_testplan.testsuites = all_testsuites_dict
testsuite = class_testplan.testsuites.get(testsuite_path)
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform("demo_board_2")
testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
testinstance_copy = TestInstance(testsuite, platform, class_testplan.env.outdir)
d = testinstance.__getstate__()
d['name'] = 'dummy name'
testinstance_copy.__setstate__(d)
d['name'] = 'another name'
testinstance.__setstate__(d)
assert testinstance < testinstance_copy
testinstance_copy.__setstate__(d)
assert not testinstance < testinstance_copy
assert not testinstance_copy < testinstance
assert testinstance.__repr__() == f'<TestSuite {testsuite_path} on demo_board_2>'
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_set_case_status_by_name(testinstance):
name = 'test_a.check_1.2a'
status = TwisterStatus.PASS
reason = 'dummy reason'
tc = testinstance.set_case_status_by_name(name, status, reason)
assert tc.name == name
assert tc.status == status
assert tc.reason == reason
tc = testinstance.set_case_status_by_name(name, status, None)
assert tc.reason == reason
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_add_testcase(testinstance):
name = 'test_a.check_1.3a'
freeform = True
tc = testinstance.add_testcase(name, freeform)
assert tc in testinstance.testcases
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_get_case_by_name(testinstance):
name = 'test_a.check_1.2a'
tc = testinstance.get_case_by_name(name)
assert tc.name == name
name = 'test_a.check_1.3a'
tc = testinstance.get_case_by_name(name)
assert tc is None
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_get_case_or_create(caplog, testinstance):
name = 'test_a.check_1.2a'
tc = testinstance.get_case_or_create(name)
assert tc.name == name
name = 'test_a.check_1.3a'
tc = testinstance.get_case_or_create(name)
assert tc.name == name
assert 'Could not find a matching testcase for test_a.check_1.3a' in caplog.text
TESTDATA_3 = [
(None, 'nonexistent harness', False),
('nonexistent fixture', 'console', False),
(None, 'console', True),
('dummy fixture', 'console', True),
]
@pytest.mark.parametrize(
'fixture, harness, expected_can_run',
TESTDATA_3,
ids=['improper harness', 'fixture not in list', 'no fixture specified', 'fixture in list']
)
def test_testinstance_testsuite_runnable(
all_testsuites_dict,
class_testplan,
fixture,
harness,
expected_can_run
):
testsuite_path = 'scripts/tests/twister/test_data/testsuites/samples/test_app/sample_test.app'
class_testplan.testsuites = all_testsuites_dict
testsuite = class_testplan.testsuites.get(testsuite_path)
testsuite.harness = harness
testsuite.harness_config['fixture'] = fixture
fixtures = ['dummy fixture']
can_run = TestInstance.testsuite_runnable(testsuite, fixtures)\
assert can_run == expected_can_run
TESTDATA_4 = [
(True, mock.ANY, mock.ANY, mock.ANY, None, [], False),
(False, True, mock.ANY, mock.ANY, 'device', [], True),
(False, False, 'qemu', mock.ANY, 'qemu', ['QEMU_PIPE=1'], True),
(False, False, 'dummy sim', mock.ANY, 'dummy sim', [], True),
(False, False, 'na', 'unit', 'unit', ['COVERAGE=1'], True),
(False, False, 'na', 'dummy type', '', [], False),
]
@pytest.mark.parametrize(
'preexisting_handler, device_testing, platform_sim, testsuite_type,' \
' expected_handler_type, expected_handler_args, expected_handler_ready',
TESTDATA_4,
ids=['preexisting handler', 'device testing', 'qemu simulation',
'non-qemu simulation with exec', 'unit teting', 'no handler']
)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_setup_handler(
testinstance,
preexisting_handler,
device_testing,
platform_sim,
testsuite_type,
expected_handler_type,
expected_handler_args,
expected_handler_ready
):
testinstance.handler = mock.Mock() if preexisting_handler else None
testinstance.platform.simulation = platform_sim
testinstance.platform.simulation_exec = 'dummy exec'
testinstance.testsuite.type = testsuite_type
env = mock.Mock(
options=mock.Mock(
device_testing=device_testing,
enable_coverage=True
)
)
with mock.patch.object(QEMUHandler, 'get_fifo', return_value=1), \
mock.patch('shutil.which', return_value=True):
testinstance.setup_handler(env)
if expected_handler_type:
assert testinstance.handler.type_str == expected_handler_type
assert testinstance.handler.ready == expected_handler_ready
assert all([arg in testinstance.handler.args for arg in expected_handler_args])
TESTDATA_5 = [
('nt', 'renode', mock.ANY, mock.ANY,
mock.ANY, mock.ANY, mock.ANY,
mock.ANY, mock.ANY, mock.ANY, mock.ANY, False),
('linux', mock.ANY, mock.ANY, mock.ANY,
True, mock.ANY, mock.ANY,
mock.ANY, mock.ANY, mock.ANY, mock.ANY, False),
('linux', mock.ANY, mock.ANY, mock.ANY,
False, True, mock.ANY,
False, mock.ANY, mock.ANY, mock.ANY, False),
('linux', 'qemu', mock.ANY, mock.ANY,
False, mock.ANY, 'pytest',
mock.ANY, 'not runnable', mock.ANY, None, True),
('linux', 'renode', 'renode', True,
False, mock.ANY, 'console',
mock.ANY, 'not runnable', [], None, True),
('linux', 'renode', 'renode', False,
False, mock.ANY, 'not pytest',
mock.ANY, 'not runnable', mock.ANY, None, False),
('linux', 'qemu', mock.ANY, mock.ANY,
False, mock.ANY, 'console',
mock.ANY, 'not runnable', ['?'], mock.Mock(duts=[mock.Mock(platform='demo_board_2', fixtures=[])]), True),
]
@pytest.mark.parametrize(
'os_name, platform_sim, platform_sim_exec, exec_exists,' \
' testsuite_build_only, testsuite_slow, testsuite_harness,' \
' enable_slow, filter, fixtures, hardware_map, expected',
TESTDATA_5,
ids=['windows', 'build only', 'skip slow', 'pytest harness', 'sim', 'no sim', 'hardware map']
)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_check_runnable(
testinstance,
os_name,
platform_sim,
platform_sim_exec,
exec_exists,
testsuite_build_only,
testsuite_slow,
testsuite_harness,
enable_slow,
filter,
fixtures,
hardware_map,
expected
):
testinstance.platform.simulation = platform_sim
testinstance.platform.simulation_exec = platform_sim_exec
testinstance.testsuite.build_only = testsuite_build_only
testinstance.testsuite.slow = testsuite_slow
testinstance.testsuite.harness = testsuite_harness
with mock.patch('os.name', os_name), \
mock.patch('shutil.which', return_value=exec_exists):
res = testinstance.check_runnable(enable_slow, filter, fixtures, hardware_map)
assert res == expected
TESTDATA_6 = [
(True, 'build.log'),
(False, ''),
]
@pytest.mark.parametrize('from_buildlog, expected_buildlog_filepath', TESTDATA_6)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_calculate_sizes(testinstance, from_buildlog, expected_buildlog_filepath):
expected_elf_filepath = 'dummy.elf'
expected_extra_sections = []
expected_warning = True
testinstance.get_elf_file = mock.Mock(return_value='dummy.elf')
testinstance.get_buildlog_file = mock.Mock(return_value='build.log')
sc_mock = mock.Mock()
mock_sc = mock.Mock(return_value=sc_mock)
with mock.patch('twisterlib.testinstance.SizeCalculator', mock_sc):
res = testinstance.calculate_sizes(from_buildlog, expected_warning)
assert res == sc_mock
mock_sc.assert_called_once_with(
elf_filename=expected_elf_filepath,
extra_sections=expected_extra_sections,
buildlog_filepath=expected_buildlog_filepath,
generate_warning=expected_warning
)
TESTDATA_7 = [
(True, None),
(False, BuildError),
]
@pytest.mark.parametrize('sysbuild, expected_error', TESTDATA_7)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_get_elf_file(caplog, tmp_path, testinstance, sysbuild, expected_error):
sysbuild_dir = tmp_path / 'sysbuild'
sysbuild_dir.mkdir()
zephyr_dir = sysbuild_dir / 'zephyr'
zephyr_dir.mkdir()
sysbuild_elf = zephyr_dir / 'dummy.elf'
sysbuild_elf.write_bytes(b'0')
sysbuild_elf2 = zephyr_dir / 'dummy2.elf'
sysbuild_elf2.write_bytes(b'0')
testinstance.sysbuild = sysbuild
testinstance.domains = mock.Mock(
get_default_domain=mock.Mock(
return_value=mock.Mock(
build_dir=sysbuild_dir
)
)
)
with pytest.raises(expected_error) if expected_error else nullcontext():
testinstance.get_elf_file()
if expected_error is None:
assert 'multiple ELF files detected: ' in caplog.text
TESTDATA_8 = [
(True, None),
(False, BuildError),
]
@pytest.mark.parametrize('create_build_log, expected_error', TESTDATA_8)
@pytest.mark.parametrize('testinstance', [{'testsuite_kind': 'tests'}], indirect=True)
def test_testinstance_get_buildlog_file(tmp_path, testinstance, create_build_log, expected_error):
if create_build_log:
build_dir = tmp_path / 'build'
build_dir.mkdir()
build_log = build_dir / 'build.log'
build_log.write_text('')
testinstance.build_dir = build_dir
with pytest.raises(expected_error) if expected_error else nullcontext():
res = testinstance.get_buildlog_file()
if expected_error is None:
assert res == str(build_log)
``` | /content/code_sandbox/scripts/tests/twister/test_testinstance.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,581 |
```python
#!/usr/bin/env python3
"""
This test file contains testsuites for the Harness classes of twister
"""
import mock
import sys
import os
import pytest
import re
import logging as logger
#ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
from conftest import ZEPHYR_BASE
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
from twisterlib.harness import (
Bsim,
Console,
Gtest,
Harness,
HarnessImporter,
Pytest,
PytestHarnessException,
Robot,
Test
)
from twisterlib.statuses import TwisterStatus
from twisterlib.testinstance import TestInstance
GTEST_START_STATE = " RUN "
GTEST_PASS_STATE = " OK "
GTEST_SKIP_STATE = " DISABLED "
GTEST_FAIL_STATE = " FAILED "
SAMPLE_GTEST_START = (
"[00:00:00.000,000] [0m<inf> label: [==========] Running all tests.[0m"
)
SAMPLE_GTEST_FMT = "[00:00:00.000,000] [0m<inf> label: [{state}] {suite}.{test} (0ms)[0m"
SAMPLE_GTEST_END = (
"[00:00:00.000,000] [0m<inf> label: [==========] Done running all tests.[0m"
)
def process_logs(harness, logs):
for line in logs:
harness.handle(line)
TEST_DATA_RECORDING = [
([''], "^START:(?P<foo>.*):END", [], None),
(['START:bar:STOP'], "^START:(?P<foo>.*):END", [], None),
(['START:bar:END'], "^START:(?P<foo>.*):END", [{'foo':'bar'}], None),
(['START:bar:baz:END'], "^START:(?P<foo>.*):(?P<boo>.*):END", [{'foo':'bar', 'boo':'baz'}], None),
(['START:bar:baz:END','START:may:jun:END'], "^START:(?P<foo>.*):(?P<boo>.*):END",
[{'foo':'bar', 'boo':'baz'}, {'foo':'may', 'boo':'jun'}], None),
(['START:bar:END'], "^START:(?P<foo>.*):END", [{'foo':'bar'}], []),
(['START:bar:END'], "^START:(?P<foo>.*):END", [{'foo':'bar'}], ['boo']),
(['START:bad_json:END'], "^START:(?P<foo>.*):END",
[{'foo':{'ERROR':{'msg':'Expecting value: line 1 column 1 (char 0)', 'doc':'bad_json'}}}], ['foo']),
(['START::END'], "^START:(?P<foo>.*):END", [{'foo':{}}], ['foo']),
(['START: {"one":1, "two":2} :END'], "^START:(?P<foo>.*):END", [{'foo':{'one':1, 'two':2}}], ['foo']),
(['START: {"one":1, "two":2} :STOP:oops:END'], "^START:(?P<foo>.*):STOP:(?P<boo>.*):END",
[{'foo':{'one':1, 'two':2},'boo':'oops'}], ['foo']),
(['START: {"one":1, "two":2} :STOP:{"oops":0}:END'], "^START:(?P<foo>.*):STOP:(?P<boo>.*):END",
[{'foo':{'one':1, 'two':2},'boo':{'oops':0}}], ['foo','boo']),
]
@pytest.mark.parametrize(
"lines, pattern, expected_records, as_json",
TEST_DATA_RECORDING,
ids=["empty", "no match", "match 1 field", "match 2 fields", "match 2 records",
"as_json empty", "as_json no such field", "error parsing json", "empty json value", "simple json",
"plain field and json field", "two json fields"
]
)
def test_harness_parse_record(lines, pattern, expected_records, as_json):
harness = Harness()
harness.record = { 'regex': pattern }
harness.record_pattern = re.compile(pattern)
harness.record_as_json = as_json
if as_json is not None:
harness.record['as_json'] = as_json
assert not harness.recording
for line in lines:
harness.parse_record(line)
assert harness.recording == expected_records
TEST_DATA_1 = [('RunID: 12345', False, False, False, TwisterStatus.NONE, True),
('PROJECT EXECUTION SUCCESSFUL', False, False, False, TwisterStatus.PASS, False),
('PROJECT EXECUTION SUCCESSFUL', True, False, False, TwisterStatus.FAIL, False),
('PROJECT EXECUTION FAILED', False, False, False, TwisterStatus.FAIL, False),
('ZEPHYR FATAL ERROR', False, True, False, TwisterStatus.NONE, False),
('GCOV_COVERAGE_DUMP_START', None, None, True, TwisterStatus.NONE, False),
('GCOV_COVERAGE_DUMP_END', None, None, False, TwisterStatus.NONE, False),]
@pytest.mark.parametrize(
"line, fault, fail_on_fault, cap_cov, exp_stat, exp_id",
TEST_DATA_1,
ids=["match id", "passed passed", "passed failed", "failed failed", "fail on fault", "GCOV START", "GCOV END"]
)
def test_harness_process_test(line, fault, fail_on_fault, cap_cov, exp_stat, exp_id):
#Arrange
harness = Harness()
harness.run_id = 12345
harness.status = TwisterStatus.NONE
harness.fault = fault
harness.fail_on_fault = fail_on_fault
mock.patch.object(Harness, 'parse_record', return_value=None)
#Act
harness.process_test(line)
#Assert
assert harness.matched_run_id == exp_id
assert harness.status == exp_stat
assert harness.capture_coverage == cap_cov
assert harness.recording == []
def test_robot_configure(tmp_path):
#Arrange
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
instance.testsuite.harness_config = {
'robot_testsuite': '/path/to/robot/test',
'robot_option': 'test_option'
}
robot_harness = Robot()
#Act
robot_harness.configure(instance)
#Assert
assert robot_harness.instance == instance
assert robot_harness.path == '/path/to/robot/test'
assert robot_harness.option == 'test_option'
def test_robot_handle(tmp_path):
#Arrange
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
handler = Robot()
handler.instance = instance
handler.id = 'test_case_1'
line = 'Test case passed'
#Act
handler.handle(line)
tc = instance.get_case_or_create('test_case_1')
#Assert
assert instance.status == TwisterStatus.PASS
assert tc.status == TwisterStatus.PASS
TEST_DATA_2 = [
("", 0, TwisterStatus.PASS),
("Robot test failure: sourcedir for mock_platform", 1, TwisterStatus.FAIL),
]
@pytest.mark.parametrize(
"exp_out, returncode, expected_status",
TEST_DATA_2,
ids=["passed", "failed"]
)
def test_robot_run_robot_test(tmp_path, caplog, exp_out, returncode, expected_status):
# Arrange
command = ["command"]
handler = mock.Mock()
handler.sourcedir = "sourcedir"
handler.log = "handler.log"
path = "path"
option = "option"
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [mock.Mock()])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
instance.build_dir = "build_dir"
open_mock = mock.mock_open()
robot = Robot()
robot.path = path
robot.option = option
robot.instance = instance
proc_mock = mock.Mock(
returncode = returncode,
communicate = mock.Mock(return_value=(b"output", None))
)
popen_mock = mock.Mock(return_value = mock.Mock(
__enter__ = mock.Mock(return_value = proc_mock),
__exit__ = mock.Mock()
))
# Act
with mock.patch("subprocess.Popen", popen_mock) as mock.mock_popen, \
mock.patch("builtins.open", open_mock):
robot.run_robot_test(command,handler)
# Assert
assert instance.status == expected_status
open_mock().write.assert_called_once_with("output")
assert exp_out in caplog.text
TEST_DATA_3 = [('one_line', None), ('multi_line', 2),]
@pytest.mark.parametrize(
"type, num_patterns",
TEST_DATA_3,
ids=["one line", "multi line"]
)
def test_console_configure(tmp_path, type, num_patterns):
#Arrange
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
instance.testsuite.harness_config = {
'type': type,
'regex': ['pattern1', 'pattern2']
}
console = Console()
#Act
console.configure(instance)
#Assert
if num_patterns == 2:
assert len(console.patterns) == num_patterns
assert [pattern.pattern for pattern in console.patterns] == ['pattern1', 'pattern2']
else:
assert console.pattern.pattern == 'pattern1'
TEST_DATA_4 = [("one_line", True, TwisterStatus.PASS, "line", False, False),
("multi_line", True, TwisterStatus.PASS, "line", False, False),
("multi_line", False, TwisterStatus.PASS, "line", False, False),
("invalid_type", False, TwisterStatus.NONE, "line", False, False),
("invalid_type", False, TwisterStatus.NONE, "ERROR", True, False),
("invalid_type", False, TwisterStatus.NONE, "COVERAGE_START", False, True),
("invalid_type", False, TwisterStatus.NONE, "COVERAGE_END", False, False)]
@pytest.mark.parametrize(
"line_type, ordered_val, exp_state, line, exp_fault, exp_capture",
TEST_DATA_4,
ids=["one line", "multi line ordered", "multi line not ordered", "logger error", "fail on fault", "GCOV START", "GCOV END"]
)
def test_console_handle(tmp_path, line_type, ordered_val, exp_state, line, exp_fault, exp_capture):
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
console = Console()
console.instance = instance
console.type = line_type
console.patterns = [re.compile("pattern1"), re.compile("pattern2")]
console.pattern = re.compile("pattern")
console.patterns_expected = 0
console.status = TwisterStatus.NONE
console.fail_on_fault = True
console.FAULT = "ERROR"
console.GCOV_START = "COVERAGE_START"
console.GCOV_END = "COVERAGE_END"
console.record = {"regex": "RESULT: (.*)"}
console.fieldnames = []
console.recording = []
console.regex = ["regex1", "regex2"]
console.id = "test_case_1"
instance.get_case_or_create('test_case_1')
instance.testsuite.id = "test_suite_1"
console.next_pattern = 0
console.ordered = ordered_val
line = line
console.handle(line)
line1 = "pattern1"
line2 = "pattern2"
console.handle(line1)
console.handle(line2)
assert console.status == exp_state
with pytest.raises(Exception):
console.handle(line)
assert logger.error.called
assert console.fault == exp_fault
assert console.capture_coverage == exp_capture
TEST_DATA_5 = [("serial_pty", 0), (None, 0),(None, 1)]
@pytest.mark.parametrize(
"pty_value, hardware_value",
TEST_DATA_5,
ids=["hardware pty", "hardware", "non hardware"]
)
def test_pytest__generate_parameters_for_hardware(tmp_path, pty_value, hardware_value):
#Arrange
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
handler = mock.Mock()
handler.instance = instance
hardware = mock.Mock()
hardware.serial_pty = pty_value
hardware.serial = 'serial'
hardware.baud = 115200
hardware.runner = "runner"
hardware.runner_params = ["--runner-param1", "runner-param2"]
hardware.fixtures = ['fixture1:option1', 'fixture2']
options = handler.options
options.west_flash = "args"
hardware.probe_id = '123'
hardware.product = 'product'
hardware.pre_script = 'pre_script'
hardware.post_flash_script = 'post_flash_script'
hardware.post_script = 'post_script'
pytest_test = Pytest()
pytest_test.configure(instance)
#Act
if hardware_value == 0:
handler.get_hardware.return_value = hardware
command = pytest_test._generate_parameters_for_hardware(handler)
else:
handler.get_hardware.return_value = None
#Assert
if hardware_value == 1:
with pytest.raises(PytestHarnessException) as exinfo:
pytest_test._generate_parameters_for_hardware(handler)
assert str(exinfo.value) == 'Hardware is not available'
else:
assert '--device-type=hardware' in command
if pty_value == "serial_pty":
assert '--device-serial-pty=serial_pty' in command
else:
assert '--device-serial=serial' in command
assert '--device-serial-baud=115200' in command
assert '--runner=runner' in command
assert '--runner-params=--runner-param1' in command
assert '--runner-params=runner-param2' in command
assert '--west-flash-extra-args=args' in command
assert '--device-id=123' in command
assert '--device-product=product' in command
assert '--pre-script=pre_script' in command
assert '--post-flash-script=post_flash_script' in command
assert '--post-script=post_script' in command
assert '--twister-fixture=fixture1:option1' in command
assert '--twister-fixture=fixture2' in command
def test__update_command_with_env_dependencies():
cmd = ['cmd']
pytest_test = Pytest()
mock.patch.object(Pytest, 'PYTEST_PLUGIN_INSTALLED', False)
# Act
result_cmd, _ = pytest_test._update_command_with_env_dependencies(cmd)
# Assert
assert result_cmd == ['cmd', '-p', 'twister_harness.plugin']
def test_pytest_run(tmp_path, caplog):
# Arrange
timeout = 10
cmd=['command']
exp_out = 'Support for handler handler_type not implemented yet'
harness = Pytest()
harness = mock.create_autospec(harness)
mock.patch.object(Pytest, 'generate_command', return_value=cmd)
mock.patch.object(Pytest, 'run_command')
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [], source_dir = 'source_dir', harness_config = {})
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
handler = mock.Mock(
options = mock.Mock(verbose= 0),
type_str = 'handler_type'
)
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
instance.handler = handler
test_obj = Pytest()
test_obj.configure(instance)
# Act
test_obj.pytest_run(timeout)
# Assert
assert test_obj.status == TwisterStatus.FAIL
assert exp_out in caplog.text
TEST_DATA_6 = [(None), ('Test')]
@pytest.mark.parametrize(
"name",
TEST_DATA_6,
ids=["no name", "provided name"]
)
def test_get_harness(name):
#Arrange
harnessimporter = HarnessImporter()
harness_name = name
#Act
harness_class = harnessimporter.get_harness(harness_name)
#Assert
assert isinstance(harness_class, Test)
TEST_DATA_7 = [("", "Running TESTSUITE suite_name", ['suite_name'], TwisterStatus.NONE, True, TwisterStatus.NONE),
("", "START - test_testcase", [], TwisterStatus.STARTED, True, TwisterStatus.NONE),
("", "PASS - test_example in 0 seconds", [], TwisterStatus.PASS, True, TwisterStatus.NONE),
("", "SKIP - test_example in 0 seconds", [], TwisterStatus.SKIP, True, TwisterStatus.NONE),
("", "FAIL - test_example in 0 seconds", [], TwisterStatus.FAIL, True, TwisterStatus.NONE),
("not a ztest and no state for test_id", "START - test_testcase", [], TwisterStatus.PASS, False, TwisterStatus.PASS),
("not a ztest and no state for test_id", "START - test_testcase", [], TwisterStatus.FAIL, False, TwisterStatus.FAIL)]
@pytest.mark.parametrize(
"exp_out, line, exp_suite_name, exp_status, ztest, state",
TEST_DATA_7,
ids=['testsuite', 'testcase', 'pass', 'skip', 'failed', 'ztest pass', 'ztest fail']
)
def test_test_handle(tmp_path, caplog, exp_out, line, exp_suite_name, exp_status, ztest, state):
# Arrange
line = line
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock(id = 'id', testcases = [])
mock_testsuite.name = "mock_testsuite"
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
test_obj = Test()
test_obj.configure(instance)
test_obj.id = "test_id"
test_obj.ztest = ztest
test_obj.status = state
test_obj.id = 'test_id'
#Act
test_obj.handle(line)
# Assert
assert test_obj.detected_suite_names == exp_suite_name
assert exp_out in caplog.text
if not "Running" in line and exp_out == "":
assert test_obj.instance.testcases[0].status == exp_status
if "ztest" in exp_out:
assert test_obj.instance.testcases[1].status == exp_status
@pytest.fixture
def gtest(tmp_path):
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock()
mock_testsuite.name = "mock_testsuite"
mock_testsuite.detailed_test_id = True
mock_testsuite.id = "id"
mock_testsuite.testcases = []
mock_testsuite.harness_config = {}
outdir = tmp_path / 'gtest_out'
outdir.mkdir()
instance = TestInstance(testsuite=mock_testsuite, platform=mock_platform, outdir=outdir)
harness = Gtest()
harness.configure(instance)
return harness
def test_gtest_start_test_no_suites_detected(gtest):
process_logs(gtest, [SAMPLE_GTEST_START])
assert len(gtest.detected_suite_names) == 0
assert gtest.status == TwisterStatus.NONE
def test_gtest_start_test(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.status == TwisterStatus.NONE
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") is not None
assert (
gtest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.STARTED
)
def test_gtest_pass(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.status == TwisterStatus.NONE
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.PASS
def test_gtest_failed(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_FAIL_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.status == TwisterStatus.NONE
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.FAIL
def test_gtest_skipped(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_SKIP_STATE, suite="suite_name", test="test_name"
),
],
)
assert gtest.status == TwisterStatus.NONE
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.SKIP
def test_gtest_all_pass(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_END,
],
)
assert gtest.status == TwisterStatus.PASS
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.PASS
def test_gtest_one_skipped(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test_name"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test_name1"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_SKIP_STATE, suite="suite_name", test="test_name1"
),
SAMPLE_GTEST_END,
],
)
assert gtest.status == TwisterStatus.PASS
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.PASS
assert gtest.instance.get_case_by_name("id.suite_name.test_name1") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test_name1").status == TwisterStatus.SKIP
def test_gtest_one_fail(gtest):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test1"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_FAIL_STATE, suite="suite_name", test="test1"
),
SAMPLE_GTEST_END,
],
)
assert gtest.status == TwisterStatus.FAIL
assert len(gtest.detected_suite_names) == 1
assert gtest.detected_suite_names[0] == "suite_name"
assert gtest.instance.get_case_by_name("id.suite_name.test0") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test0").status == TwisterStatus.PASS
assert gtest.instance.get_case_by_name("id.suite_name.test1") != TwisterStatus.NONE
assert gtest.instance.get_case_by_name("id.suite_name.test1").status == TwisterStatus.FAIL
def test_gtest_missing_result(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, id.suite_name.test0 didn't finish",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test1"
),
],
)
def test_gtest_mismatch_result(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, mismatched tests. Expected id.suite_name.test0 but got None",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test1"
),
],
)
def test_gtest_repeated_result(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, mismatched tests. Expected id.suite_name.test1 but got id.suite_name.test0",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test1"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
],
)
def test_gtest_repeated_run(gtest):
with pytest.raises(
AssertionError,
match=r"gTest error, id.suite_name.test0 running twice",
):
process_logs(
gtest,
[
SAMPLE_GTEST_START,
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_PASS_STATE, suite="suite_name", test="test0"
),
SAMPLE_GTEST_FMT.format(
state=GTEST_START_STATE, suite="suite_name", test="test0"
),
],
)
def test_bsim_build(monkeypatch, tmp_path):
mocked_instance = mock.Mock()
build_dir = tmp_path / 'build_dir'
os.makedirs(build_dir)
mocked_instance.build_dir = str(build_dir)
mocked_instance.name = 'platform_name/test/dummy.test'
mocked_instance.testsuite.harness_config = {}
harness = Bsim()
harness.instance = mocked_instance
monkeypatch.setenv('BSIM_OUT_PATH', str(tmp_path))
os.makedirs(os.path.join(tmp_path, 'bin'), exist_ok=True)
zephyr_exe_path = os.path.join(build_dir, 'zephyr', 'zephyr.exe')
os.makedirs(os.path.dirname(zephyr_exe_path), exist_ok=True)
with open(zephyr_exe_path, 'w') as file:
file.write('TEST_EXE')
harness.build()
new_exe_path = os.path.join(tmp_path, 'bin', 'bs_platform_name_test_dummy_test')
assert os.path.exists(new_exe_path)
with open(new_exe_path, 'r') as file:
exe_content = file.read()
assert 'TEST_EXE' in exe_content
``` | /content/code_sandbox/scripts/tests/twister/test_harness.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 7,050 |
```python
#!/usr/bin/env python3
#
'''Common fixtures for use in testing the twister tool.'''
import os
import sys
import pytest
pytest_plugins = ["pytester"]
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts"))
from twisterlib.testplan import TestPlan
from twisterlib.testinstance import TestInstance
from twisterlib.environment import TwisterEnv, add_parse_arguments, parse_arguments
def new_get_toolchain(*args, **kwargs):
return 'zephyr'
TestPlan.get_toolchain = new_get_toolchain
@pytest.fixture(name='test_data')
def _test_data():
""" Pytest fixture to load the test data directory"""
data = ZEPHYR_BASE + "/scripts/tests/twister/test_data/"
return data
@pytest.fixture(name='zephyr_base')
def zephyr_base_directory():
return ZEPHYR_BASE
@pytest.fixture(name='testsuites_dir')
def testsuites_directory():
""" Pytest fixture to load the test data directory"""
return ZEPHYR_BASE + "/scripts/tests/twister/test_data/testsuites"
@pytest.fixture(name='class_env')
def tesenv_obj(test_data, testsuites_dir, tmpdir_factory):
""" Pytest fixture to initialize and return the class TestPlan object"""
parser = add_parse_arguments()
options = parse_arguments(parser, [])
env = TwisterEnv(options)
env.board_roots = [os.path.join(test_data, "board_config", "1_level", "2_level")]
env.test_roots = [os.path.join(testsuites_dir, 'tests', testsuites_dir, 'samples')]
env.test_config = os.path.join(test_data, "test_config.yaml")
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
return env
@pytest.fixture(name='class_testplan')
def testplan_obj(test_data, class_env, testsuites_dir, tmpdir_factory):
""" Pytest fixture to initialize and return the class TestPlan object"""
env = class_env
env.board_roots = [test_data +"board_config/1_level/2_level/"]
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
plan = TestPlan(env)
plan.parse_configuration(config_file=env.test_config)
return plan
@pytest.fixture(name='all_testsuites_dict')
def testsuites_dict(class_testplan):
""" Pytest fixture to call add_testcase function of
Testsuite class and return the dictionary of testsuites"""
class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml'
class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
class_testplan.add_testsuites()
return class_testplan.testsuites
@pytest.fixture(name='platforms_list')
def all_platforms_list(test_data, class_testplan):
""" Pytest fixture to call add_configurations function of
Testsuite class and return the Platforms list"""
class_testplan.env.board_roots = [os.path.abspath(os.path.join(test_data, "board_config"))]
plan = TestPlan(class_testplan.env)
plan.parse_configuration(config_file=class_testplan.env.test_config)
plan.add_configurations()
return plan.platforms
@pytest.fixture
def instances_fixture(class_testplan, platforms_list, all_testsuites_dict, tmpdir_factory):
""" Pytest fixture to call add_instances function of Testsuite class
and return the instances dictionary"""
class_testplan.outdir = tmpdir_factory.mktemp("sanity_out_demo")
class_testplan.platforms = platforms_list
platform = class_testplan.get_platform("demo_board_2")
instance_list = []
for _, testcase in all_testsuites_dict.items():
instance = TestInstance(testcase, platform, class_testplan.outdir)
instance_list.append(instance)
class_testplan.add_instances(instance_list)
return class_testplan.instances
``` | /content/code_sandbox/scripts/tests/twister/conftest.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 890 |
```python
#!/usr/bin/env python3
#
"""
This test file contains foundational testcases for Twister tool
"""
import os
import sys
import pytest
from pathlib import Path
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
import scl
from twisterlib.error import ConfigurationError
from twisterlib.testplan import TwisterConfigParser
def test_yamlload():
""" Test to check if loading the non-existent files raises the errors """
filename = 'testcase_nc.yaml'
with pytest.raises(FileNotFoundError):
scl.yaml_load(filename)
@pytest.mark.parametrize("filename, schema",
[("testsuite_correct_schema.yaml", "testsuite-schema.yaml"),
("platform_correct_schema.yaml", "platform-schema.yaml")])
def test_correct_schema(filename, schema, test_data):
""" Test to validate the testsuite schema"""
filename = test_data + filename
schema = scl.yaml_load(ZEPHYR_BASE +'/scripts/schemas/twister//' + schema)
data = TwisterConfigParser(filename, schema)
data.load()
assert data
@pytest.mark.parametrize("filename, schema",
[("testsuite_incorrect_schema.yaml", "testsuite-schema.yaml"),
("platform_incorrect_schema.yaml", "platform-schema.yaml")])
def test_incorrect_schema(filename, schema, test_data):
""" Test to validate the exception is raised for incorrect testsuite schema"""
filename = test_data + filename
schema = scl.yaml_load(ZEPHYR_BASE +'/scripts/schemas/twister//' + schema)
with pytest.raises(Exception) as exception:
scl.yaml_load_verify(filename, schema)
assert str(exception.value) == "Schema validation failed"
def test_testsuite_config_files():
""" Test to validate conf and overlay files are extracted properly """
filename = Path(ZEPHYR_BASE) / "scripts/tests/twister/test_data/testsuites/tests/test_config/test_data.yaml"
schema = scl.yaml_load(Path(ZEPHYR_BASE) / "scripts/schemas/twister/testsuite-schema.yaml")
data = TwisterConfigParser(filename, schema)
data.load()
# Load and validate the specific scenario from testcases.yaml
scenario = data.get_scenario("test_config.main")
assert scenario
# CONF_FILE, DTC_OVERLAY_FILE, OVERLAY_CONFIG fields should be stripped out
# of extra_args. Other fields should remain untouched.
assert scenario["extra_args"] == ["UNRELATED1=abc", "UNRELATED2=xyz"]
# Check that all conf files have been assembled in the correct order
assert ";".join(scenario["extra_conf_files"]) == \
"conf1;conf2;conf3;conf4;conf5;conf6;conf7;conf8"
# Check that all DTC overlay files have been assembled in the correct order
assert ";".join(scenario["extra_dtc_overlay_files"]) == \
"overlay1;overlay2;overlay3;overlay4;overlay5;overlay6;overlay7;overlay8"
# Check that all overlay conf files have been assembled in the correct order
assert scenario["extra_overlay_confs"] == \
["oc1.conf", "oc2.conf", "oc3.conf", "oc4.conf"]
# Check extra kconfig statements, too
assert scenario["extra_configs"] == ["CONFIG_FOO=y"]
def test_configuration_error():
"""Test to validate that the ConfigurationError is raisable without further errors.
It ought to have an str with a path, colon and a message as its only args entry.
"""
filename = Path(ZEPHYR_BASE) / "scripts/tests/twister/test_twister.py"
with pytest.raises(ConfigurationError) as exception:
raise ConfigurationError(filename, "Dummy message.")
assert len(exception.value.args) == 1
assert exception.value.args[0] == str(filename) + ": " + "Dummy message."
``` | /content/code_sandbox/scripts/tests/twister/test_twister.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 847 |
```python
#!/usr/bin/env python3
#
"""
Tests for hardwaremap.py classes' methods
"""
import mock
import pytest
import sys
from pathlib import Path
from twisterlib.hardwaremap import(
DUT,
HardwareMap
)
@pytest.fixture
def mocked_hm():
duts = [
DUT(platform='p1', id=1, serial='s1', product='pr1', connected=True),
DUT(platform='p2', id=2, serial='s2', product='pr2', connected=False),
DUT(platform='p3', id=3, serial='s3', product='pr3', connected=True),
DUT(platform='p4', id=4, serial='s4', product='pr4', connected=False),
DUT(platform='p5', id=5, serial='s5', product='pr5', connected=True),
DUT(platform='p6', id=6, serial='s6', product='pr6', connected=False),
DUT(platform='p7', id=7, serial='s7', product='pr7', connected=True),
DUT(platform='p8', id=8, serial='s8', product='pr8', connected=False)
]
hm = HardwareMap(env=mock.Mock())
hm.duts = duts
hm.detected = duts[:5]
return hm
TESTDATA_1 = [
(
{},
{'baud': 115200, 'lock': mock.ANY, 'flash_timeout': 60},
'<None (None) on None>'
),
(
{
'id': 'dummy id',
'serial': 'dummy serial',
'serial_baud': 4400,
'platform': 'dummy platform',
'product': 'dummy product',
'serial_pty': 'dummy serial pty',
'connected': True,
'runner_params': ['dummy', 'runner', 'params'],
'pre_script': 'dummy pre script',
'post_script': 'dummy post script',
'post_flash_script': 'dummy post flash script',
'runner': 'dummy runner',
'flash_timeout': 30,
'flash_with_test': True
},
{
'lock': mock.ANY,
'id': 'dummy id',
'serial': 'dummy serial',
'baud': 4400,
'platform': 'dummy platform',
'product': 'dummy product',
'serial_pty': 'dummy serial pty',
'connected': True,
'runner_params': ['dummy', 'runner', 'params'],
'pre_script': 'dummy pre script',
'post_script': 'dummy post script',
'post_flash_script': 'dummy post flash script',
'runner': 'dummy runner',
'flash_timeout': 30,
'flash_with_test': True
},
'<dummy platform (dummy product) on dummy serial>'
),
]
@pytest.mark.parametrize(
'kwargs, expected_dict, expected_repr',
TESTDATA_1,
ids=['no information', 'full information']
)
def test_dut(kwargs, expected_dict, expected_repr):
d = DUT(**kwargs)
assert d.available
assert d.counter == 0
d.available = False
d.counter = 1
assert not d.available
assert d.counter == 1
assert d.to_dict() == expected_dict
assert d.__repr__() == expected_repr
TESTDATA_2 = [
('ghm.yaml', mock.ANY, mock.ANY, [], mock.ANY, mock.ANY, mock.ANY, 0,
True, True, False, False, False, False, []),
(None, False, 'hm.yaml', [], mock.ANY, mock.ANY, mock.ANY, 0,
False, False, True, True, False, False, []),
(None, True, 'hm.yaml', [], mock.ANY, mock.ANY, ['fix'], 1,
False, False, True, False, False, True, ['p1', 'p3', 'p5', 'p7']),
(None, True, 'hm.yaml', ['pX'], mock.ANY, mock.ANY, ['fix'], 1,
False, False, True, False, False, True, ['pX']),
(None, True, None, ['p'], 's', None, ['fix'], 1,
False, False, False, False, True, True, ['p']),
(None, True, None, ['p'], None, 'spty', ['fix'], 1,
False, False, False, False, True, True, ['p']),
]
@pytest.mark.parametrize(
'generate_hardware_map, device_testing, hardware_map, platform,' \
' device_serial, device_serial_pty, fixtures,' \
' return_code, expect_scan, expect_save, expect_load,' \
' expect_dump, expect_add_device, expect_fixtures, expected_platforms',
TESTDATA_2,
ids=['generate hardware map', 'existing hardware map',
'device testing with hardware map, no platform',
'device testing with hardware map with platform',
'device testing with device serial',
'device testing with device serial pty']
)
def test_hardwaremap_discover(
caplog,
mocked_hm,
generate_hardware_map,
device_testing,
hardware_map,
platform,
device_serial,
device_serial_pty,
fixtures,
return_code,
expect_scan,
expect_save,
expect_load,
expect_dump,
expect_add_device,
expect_fixtures,
expected_platforms
):
def mock_load(*args):
mocked_hm.platform = platform
mocked_hm.scan = mock.Mock()
mocked_hm.save = mock.Mock()
mocked_hm.load = mock.Mock(side_effect=mock_load)
mocked_hm.dump = mock.Mock()
mocked_hm.add_device = mock.Mock()
mocked_hm.options.device_flash_with_test = True
mocked_hm.options.device_flash_timeout = 15
mocked_hm.options.pre_script = 'dummy pre script'
mocked_hm.options.platform = platform
mocked_hm.options.device_serial = device_serial
mocked_hm.options.device_serial_pty = device_serial_pty
mocked_hm.options.device_testing = device_testing
mocked_hm.options.hardware_map = hardware_map
mocked_hm.options.persistent_hardware_map = mock.Mock()
mocked_hm.options.generate_hardware_map = generate_hardware_map
mocked_hm.options.fixture = fixtures
returncode = mocked_hm.discover()
assert returncode == return_code
if expect_scan:
mocked_hm.scan.assert_called_once_with(
persistent=mocked_hm.options.persistent_hardware_map
)
if expect_save:
mocked_hm.save.assert_called_once_with(
mocked_hm.options.generate_hardware_map
)
if expect_load:
mocked_hm.load.assert_called_once_with(
mocked_hm.options.hardware_map
)
if expect_dump:
mocked_hm.dump.assert_called_once_with(
connected_only=True
)
if expect_add_device:
mocked_hm.add_device.assert_called_once()
if expect_fixtures:
assert all(
[all(
[fixture in dut.fixtures for fixture in fixtures]
) for dut in mocked_hm.duts]
)
assert sorted(expected_platforms) == sorted(mocked_hm.options.platform)
def test_hardwaremap_summary(capfd, mocked_hm):
selected_platforms = ['p0', 'p1', 'p6', 'p7']
mocked_hm.summary(selected_platforms)
expected = """
Hardware distribution summary:
| Board | ID | Counter | Failures |
|---------|------|-----------|------------|
| p1 | 1 | 0 | 0 |
| p7 | 7 | 0 | 0 |
"""
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert expected in out
TESTDATA_3 = [
(True),
(False)
]
@pytest.mark.parametrize(
'is_pty',
TESTDATA_3,
ids=['pty', 'not pty']
)
def test_hardwaremap_add_device(is_pty):
hm = HardwareMap(env=mock.Mock())
serial = 'dummy'
platform = 'p0'
pre_script = 'dummy pre script'
hm.add_device(serial, platform, pre_script, is_pty)
assert len(hm.duts) == 1
if is_pty:
assert hm.duts[0].serial_pty == 'dummy' if is_pty else None
assert hm.duts[0].serial is None
else:
assert hm.duts[0].serial_pty is None
assert hm.duts[0].serial == 'dummy'
def test_hardwaremap_load():
map_file = \
"""
- id: id0
platform: p0
product: pr0
runner: r0
flash_with_test: True
flash_timeout: 15
baud: 14400
fixtures:
- dummy fixture 1
- dummy fixture 2
connected: True
serial: 'dummy'
- id: id1
platform: p1
product: pr1
runner: r1
connected: True
serial_pty: 'dummy'
- id: id2
platform: p2
product: pr2
runner: r2
connected: True
"""
map_filename = 'map-file.yaml'
builtin_open = open
def mock_open(*args, **kwargs):
if args[0] == map_filename:
return mock.mock_open(read_data=map_file)(*args, **kwargs)
return builtin_open(*args, **kwargs)
hm = HardwareMap(env=mock.Mock())
hm.options.device_flash_timeout = 30
hm.options.device_flash_with_test = False
with mock.patch('builtins.open', mock_open):
hm.load(map_filename)
expected = {
'id0': {
'platform': 'p0',
'product': 'pr0',
'runner': 'r0',
'flash_timeout': 15,
'flash_with_test': True,
'baud': 14400,
'fixtures': ['dummy fixture 1', 'dummy fixture 2'],
'connected': True,
'serial': 'dummy',
'serial_pty': None,
},
'id1': {
'platform': 'p1',
'product': 'pr1',
'runner': 'r1',
'flash_timeout': 30,
'flash_with_test': False,
'baud': 115200,
'fixtures': [],
'connected': True,
'serial': None,
'serial_pty': 'dummy',
},
}
for dut in hm.duts:
assert dut.id in expected
assert all([getattr(dut, k) == v for k, v in expected[dut.id].items()])
TESTDATA_4 = [
(
True,
'Linux',
['<p1 (pr1) on s1>', '<p2 (pr2) on s2>', '<p3 (pr3) on s3>',
'<p4 (pr4) on s4>', '<p5 (pr5) on s5>',
'<unknown (TI product) on /dev/serial/by-id/basic-file1>',
'<unknown (product123) on dummy device>',
'<unknown (unknown) on /dev/serial/by-id/basic-file2-link>']
),
(
True,
'nt',
['<p1 (pr1) on s1>', '<p2 (pr2) on s2>', '<p3 (pr3) on s3>',
'<p4 (pr4) on s4>', '<p5 (pr5) on s5>',
'<unknown (TI product) on /dev/serial/by-id/basic-file1>',
'<unknown (product123) on dummy device>',
'<unknown (unknown) on /dev/serial/by-id/basic-file2>']
),
(
False,
'Linux',
['<p1 (pr1) on s1>', '<p2 (pr2) on s2>', '<p3 (pr3) on s3>',
'<p4 (pr4) on s4>', '<p5 (pr5) on s5>',
'<unknown (TI product) on /dev/serial/by-id/basic-file1>',
'<unknown (product123) on dummy device>',
'<unknown (unknown) on /dev/serial/by-id/basic-file2>']
)
]
@pytest.mark.parametrize(
'persistent, system, expected_reprs',
TESTDATA_4,
ids=['linux persistent map', 'no map (not linux)', 'no map (nonpersistent)']
)
def test_hardwaremap_scan(
caplog,
mocked_hm,
persistent,
system,
expected_reprs
):
def mock_resolve(path):
if str(path).endswith('-link'):
return Path(str(path)[:-5])
return path
def mock_iterdir(path):
return [
Path(path / 'basic-file1'),
Path(path / 'basic-file2-link')
]
def mock_exists(path):
return True
mocked_hm.manufacturer = ['dummy manufacturer', 'Texas Instruments']
mocked_hm.runner_mapping = {
'dummy runner': ['product[0-9]+',],
'other runner': ['other TI product', 'TI product']
}
comports_mock = [
mock.Mock(
manufacturer='wrong manufacturer',
location='wrong location',
serial_number='wrong number',
product='wrong product',
device='wrong device'
),
mock.Mock(
manufacturer='dummy manufacturer',
location='dummy location',
serial_number='dummy number',
product=None,
device='/dev/serial/by-id/basic-file2'
),
mock.Mock(
manufacturer='dummy manufacturer',
location='dummy location',
serial_number='dummy number',
product='product123',
device='dummy device'
),
mock.Mock(
manufacturer='Texas Instruments',
location='serial1',
serial_number='TI1',
product='TI product',
device='TI device1'
),
mock.Mock(
manufacturer='Texas Instruments',
location='serial0',
serial_number='TI0',
product='TI product',
device='/dev/serial/by-id/basic-file1'
),
]
with mock.patch('platform.system', return_value=system), \
mock.patch('serial.tools.list_ports.comports',
return_value=comports_mock), \
mock.patch('twisterlib.hardwaremap.Path.resolve',
autospec=True, side_effect=mock_resolve), \
mock.patch('twisterlib.hardwaremap.Path.iterdir',
autospec=True, side_effect=mock_iterdir), \
mock.patch('twisterlib.hardwaremap.Path.exists',
autospec=True, side_effect=mock_exists):
mocked_hm.scan(persistent)
assert sorted([d.__repr__() for d in mocked_hm.detected]) == \
sorted(expected_reprs)
assert 'Scanning connected hardware...' in caplog.text
assert 'Unsupported device (wrong manufacturer): %s' % comports_mock[0] \
in caplog.text
TESTDATA_5 = [
(
None,
[{
'platform': 'p1',
'id': 1,
'runner': mock.ANY,
'serial': 's1',
'product': 'pr1',
'connected': True
},
{
'platform': 'p2',
'id': 2,
'runner': mock.ANY,
'serial': 's2',
'product': 'pr2',
'connected': False
},
{
'platform': 'p3',
'id': 3,
'runner': mock.ANY,
'serial': 's3',
'product': 'pr3',
'connected': True
},
{
'platform': 'p4',
'id': 4,
'runner': mock.ANY,
'serial': 's4',
'product': 'pr4',
'connected': False
},
{
'platform': 'p5',
'id': 5,
'runner': mock.ANY,
'serial': 's5',
'product': 'pr5',
'connected': True
}]
),
(
'',
[{
'serial': 's1',
'baud': 115200,
'platform': 'p1',
'connected': True,
'id': 1,
'product': 'pr1',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's2',
'baud': 115200,
'platform': 'p2',
'id': 2,
'product': 'pr2',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's3',
'baud': 115200,
'platform': 'p3',
'connected': True,
'id': 3,
'product': 'pr3',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's4',
'baud': 115200,
'platform': 'p4',
'id': 4,
'product': 'pr4',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's5',
'baud': 115200,
'platform': 'p5',
'connected': True,
'id': 5,
'product': 'pr5',
'lock': mock.ANY,
'flash_timeout': 60
}]
),
(
"""
- id: 4
platform: p4
product: pr4
connected: True
serial: s4
- id: 0
platform: p0
product: pr0
connected: True
serial: s0
- id: 10
platform: p10
product: pr10
connected: False
serial: s10
- id: 5
platform: p5-5
product: pr5-5
connected: True
serial: s5-5
""",
[{
'id': 0,
'platform': 'p0',
'product': 'pr0',
'connected': False,
'serial': None
},
{
'id': 4,
'platform': 'p4',
'product': 'pr4',
'connected': True,
'serial': 's4'
},
{
'id': 5,
'platform': 'p5-5',
'product': 'pr5-5',
'connected': False,
'serial': None
},
{
'id': 10,
'platform': 'p10',
'product': 'pr10',
'connected': False,
'serial': None
},
{
'serial': 's1',
'baud': 115200,
'platform': 'p1',
'connected': True,
'id': 1,
'product': 'pr1',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's2',
'baud': 115200,
'platform': 'p2',
'id': 2,
'product': 'pr2',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's3',
'baud': 115200,
'platform': 'p3',
'connected': True,
'id': 3,
'product': 'pr3',
'lock': mock.ANY,
'flash_timeout': 60
},
{
'serial': 's5',
'baud': 115200,
'platform': 'p5',
'connected': True,
'id': 5,
'product': 'pr5',
'lock': mock.ANY,
'flash_timeout': 60
}]
),
]
@pytest.mark.parametrize(
'hwm, expected_dump',
TESTDATA_5,
ids=['no map', 'empty map', 'map exists']
)
def test_hardwaremap_save(mocked_hm, hwm, expected_dump):
read_mock = mock.mock_open(read_data=hwm)
write_mock = mock.mock_open()
def mock_open(filename, mode):
if mode == 'r':
return read_mock()
elif mode == 'w':
return write_mock()
mocked_hm.load = mock.Mock()
mocked_hm.dump = mock.Mock()
open_mock = mock.Mock(side_effect=mock_open)
dump_mock = mock.Mock()
with mock.patch('os.path.exists', return_value=hwm is not None), \
mock.patch('builtins.open', open_mock), \
mock.patch('twisterlib.hardwaremap.yaml.dump', dump_mock):
mocked_hm.save('hwm.yaml')
dump_mock.assert_called_once_with(expected_dump, mock.ANY, Dumper=mock.ANY,
default_flow_style=mock.ANY)
TESTDATA_6 = [
(
['p1', 'p3', 'p5', 'p7'],
[],
True,
True,
"""
| Platform | ID | Serial device |
|------------|------|-----------------|
| p1 | 1 | s1 |
| p3 | 3 | s3 |
| p5 | 5 | s5 |
"""
),
(
[],
['?', '??', '???'],
False,
False,
"""
| ? | ?? | ??? |
|-----|------|-------|
| p1 | 1 | s1 |
| p2 | 2 | s2 |
| p3 | 3 | s3 |
| p4 | 4 | s4 |
| p5 | 5 | s5 |
| p6 | 6 | s6 |
| p7 | 7 | s7 |
| p8 | 8 | s8 |
"""
),
]
@pytest.mark.parametrize(
'filtered, header, connected_only, detected, expected_out',
TESTDATA_6,
ids=['detected no header', 'all with header']
)
def test_hardwaremap_dump(
capfd,
mocked_hm,
filtered,
header,
connected_only,
detected,
expected_out
):
mocked_hm.dump(filtered, header, connected_only, detected)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert out.strip() == expected_out.strip()
``` | /content/code_sandbox/scripts/tests/twister/test_hardwaremap.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 5,059 |
```python
#!/usr/bin/env python3
#
"""
Tests for quarantine.py classes' methods
"""
import mock
import os
import pytest
import textwrap
from twisterlib.quarantine import QuarantineException, \
QuarantineElement, \
QuarantineData
TESTDATA_1 = [
(
['dummy scenario', 'another scenario'],
['dummy platform', 'another platform'],
['dummy architecture', 'another architecture'],
['dummy simulation', 'another simulation'],
None,
[]
),
(
['all'],
['dummy platform', 'another platform'],
['dummy architecture', 'another architecture'],
['dummy simulation', 'another simulation'],
None,
['scenarios']
),
(
['dummy scenario', 'another scenario'],
['dummy platform', 'all'],
['all', 'another architecture'],
['dummy simulation', 'another simulation'],
None,
['platforms', 'architectures']
),
(
['all', 'another scenario'],
[],
[],
['all', 'all'],
QuarantineException,
['scenarios', 'platforms', 'architectures', 'simulations']
),
]
@pytest.mark.parametrize(
'scenarios, platforms, architectures, ' \
'simulations, expected_exception, empty_filter_attrs',
TESTDATA_1,
ids=[
'no empties',
'all scenarios',
'all platforms and architectures',
'exception'
]
)
def test_quarantineelement_post_init(
scenarios,
platforms,
architectures,
simulations,
expected_exception,
empty_filter_attrs
):
if expected_exception:
with pytest.raises(expected_exception):
quarantine_element = QuarantineElement(
scenarios=scenarios,
platforms=platforms,
architectures=architectures,
simulations=simulations
)
else:
quarantine_element = QuarantineElement(
scenarios=scenarios,
platforms=platforms,
architectures=architectures,
simulations=simulations
)
for attr in ['scenarios', 'platforms', 'architectures', 'simulations']:
if attr in empty_filter_attrs:
assert getattr(quarantine_element, attr) == []
else:
assert getattr(quarantine_element, attr) != []
def test_quarantinedata_post_init():
quarantine_element_dict = {
'scenarios': ['all'],
'platforms': ['dummy platform'],
'architectures': [],
'simulations': ['dummy simulation', 'another simulation']
}
quarantine_element = QuarantineElement(
platforms=['dummy platform'],
architectures=[],
simulations=['dummy simulation', 'another simulation']
)
quarantine_data_qlist = [quarantine_element, quarantine_element_dict]
quarantine_data = QuarantineData(quarantine_data_qlist)
assert quarantine_data.qlist[0] == quarantine_data.qlist[1]
TESTDATA_2 = [
(
'',
QuarantineData()
),
(
textwrap.dedent("""
[
{
\"scenarios\": [\"all\"],
\"platforms\": [\"dummy platform\"],
\"architectures\": [],
\"simulations\": [\"dummy simulation\", \"another simulation\"]
}
]
"""),
QuarantineData(
[
QuarantineElement(
scenarios=[],
platforms=['dummy platform'],
architectures=[],
simulations=['dummy simulation', 'another simulation']
)
]
)
),
(
textwrap.dedent("""
[
{
\"I\": [\"am\"],
\"not\": \"a\",
\"valid\": [],
\"JSON\": [\"for\", \"this\"]
}
]
"""),
QuarantineException
)
]
@pytest.mark.parametrize(
'file_contents, expected',
TESTDATA_2,
ids=['empty', 'valid', 'not valid']
)
def test_quarantinedata_load_data_from_yaml(file_contents, expected):
with mock.patch('builtins.open', mock.mock_open(read_data=file_contents)):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
res = QuarantineData.load_data_from_yaml(
os.path.join('dummy', 'path')
)
else:
res = QuarantineData.load_data_from_yaml(
os.path.join('dummy', 'path')
)
assert res == expected
TESTDATA_3 = [
(
'good scenario',
'good platform',
'good arch',
'good sim',
None
),
(
'good scenario',
'very bad dummy platform',
'good arch',
'good sim',
0
),
(
'bad scenario 1',
'good platform',
'good arch',
'bad sim',
1
),
(
'bad scenario 1',
'good platform',
'good arch',
'sim for scenario 1',
None
),
(
'good scenario',
'good platform',
'unsupported arch 1',
'good sim',
2
)
]
@pytest.mark.parametrize(
'scenario, platform, architecture, simulation, expected_idx',
TESTDATA_3,
ids=[
'not quarantined',
'quarantined platform',
'quarantined scenario with sim',
'not quarantined with bad scenario',
'quarantined arch'
]
)
def test_quarantinedata_get_matched_quarantine(
scenario,
platform,
architecture,
simulation,
expected_idx
):
qlist = [
QuarantineElement(
scenarios=['all'],
platforms=['very bad dummy platform'],
architectures=['all'],
simulations=['all']
),
QuarantineElement(
scenarios=['bad scenario 1', 'bad scenario 2'],
platforms=['all'],
architectures=['all'],
simulations=['bad sim']
),
QuarantineElement(
scenarios=['all'],
platforms=['all'],
architectures=['unsupported arch 1'],
simulations=['all']
),
]
quarantine_data = QuarantineData(qlist)
if expected_idx is None:
assert quarantine_data.get_matched_quarantine(
scenario=scenario,
platform=platform,
architecture=architecture,
simulation=simulation
) is None
else:
assert quarantine_data.get_matched_quarantine(
scenario=scenario,
platform=platform,
architecture=architecture,
simulation=simulation
) == qlist[expected_idx]
``` | /content/code_sandbox/scripts/tests/twister/test_quarantine.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,395 |
```python
#!/usr/bin/env python3
#
"""
Tests for cmakecache.py classes' methods
"""
import mock
import pytest
from contextlib import nullcontext
from twisterlib.cmakecache import CMakeCacheEntry, CMakeCache
TESTDATA_1 = [
('ON', True),
('YES', True),
('TRUE', True),
('Y', True),
('OFF', False),
('NO', False),
('FALSE', False),
('N', False),
('IGNORE', False),
('NOTFOUND', False),
('', False),
('DUMMY-NOTFOUND', False),
('1', True),
('0', False),
('I AM NOT A PROPER VALUE', ValueError),
]
@pytest.mark.parametrize(
'cmake_bool, expected_bool',
TESTDATA_1,
ids=[t[0] for t in TESTDATA_1]
)
def test_cmakecacheentry_to_bool(cmake_bool, expected_bool):
with pytest.raises(expected_bool) if \
not isinstance(expected_bool, bool) else nullcontext() as exception:
b = CMakeCacheEntry._to_bool(cmake_bool)
if exception is None:
assert b == expected_bool
else:
assert str(exception.value) == f'invalid bool {cmake_bool}'
TESTDATA_2 = [
(
'// I am a comment',
None
),
(
'# I am a comment too',
None
),
(
' \r\n ',
None
),
(
'DUMMY:WRONG_TYPE=???',
None
),
(
'DUMMY_VALUE_NAME1:STRING=I am a dummy string',
CMakeCacheEntry('DUMMY_VALUE_NAME1', 'I am a dummy string')
),
(
'DUMMY_VALUE_NAME2:STRING=list_el1;list_el2;list_el3',
CMakeCacheEntry(
'DUMMY_VALUE_NAME2',
['list_el1', 'list_el2', 'list_el3']
)
),
(
'DUMMY_VALUE_NAME3:INTERNAL=I am a dummy internal string',
CMakeCacheEntry('DUMMY_VALUE_NAME3', 'I am a dummy internal string')),
(
'DUMMY_VALUE_NAME4:INTERNAL=list_el1;list_el2',
CMakeCacheEntry('DUMMY_VALUE_NAME4', ['list_el1', 'list_el2'])
),
(
'DUMMY_VALUE_NAME5:FILEPATH=/path/to/dir',
CMakeCacheEntry('DUMMY_VALUE_NAME5', '/path/to/dir')
),
(
'DUMMY_VALUE_NAME6:PATH=/path/to/dir/file.txt',
CMakeCacheEntry('DUMMY_VALUE_NAME6', '/path/to/dir/file.txt')),
(
'DUMMY_VALUE_NAME7:BOOL=Y',
CMakeCacheEntry('DUMMY_VALUE_NAME7', True)
),
(
'DUMMY_VALUE_NAME8:BOOL=FALSE',
CMakeCacheEntry('DUMMY_VALUE_NAME8', False)
),
(
'DUMMY_VALUE_NAME9:BOOL=NOT_A_BOOL',
ValueError(
(
'invalid bool NOT_A_BOOL',
'on line 7: DUMMY_VALUE_NAME9:BOOL=NOT_A_BOOL'
)
)
),
]
@pytest.mark.parametrize(
'cmake_line, expected',
TESTDATA_2,
ids=[
'// comment',
'# comment',
'whitespace',
'unrecognised type',
'string',
'string list',
'internal string',
'internal list',
'filepath',
'path',
'true bool',
'false bool',
'not a bool'
]
)
def test_cmakecacheentry_from_line(cmake_line, expected):
cmake_line_number = 7
with pytest.raises(type(expected)) if \
isinstance(expected, Exception) else nullcontext() as exception:
entry = CMakeCacheEntry.from_line(cmake_line, cmake_line_number)
if exception is not None:
assert repr(exception.value) == repr(expected)
return
if expected is None:
assert entry is None
return
assert entry.name == expected.name
assert entry.value == expected.value
TESTDATA_3 = [
(
CMakeCacheEntry('DUMMY_NAME1', 'dummy value'),
'CMakeCacheEntry(name=DUMMY_NAME1, value=dummy value)'
),
(
CMakeCacheEntry('DUMMY_NAME2', False),
'CMakeCacheEntry(name=DUMMY_NAME2, value=False)'
)
]
@pytest.mark.parametrize(
'cmake_cache_entry, expected',
TESTDATA_3,
ids=['string value', 'bool value']
)
def test_cmakecacheentry_str(cmake_cache_entry, expected):
assert str(cmake_cache_entry) == expected
def test_cmakecache_load():
file_data = (
'DUMMY_NAME1:STRING=First line\n'
'//Comment on the second line\n'
'DUMMY_NAME2:STRING=Third line\n'
'DUMMY_NAME3:STRING=Fourth line\n'
)
with mock.patch('builtins.open', mock.mock_open(read_data=file_data)):
cache = CMakeCache.from_file('dummy/path/CMakeCache.txt')
assert cache.cache_file == 'dummy/path/CMakeCache.txt'
expected = [
('DUMMY_NAME1', 'First line'),
('DUMMY_NAME2', 'Third line'),
('DUMMY_NAME3', 'Fourth line')
]
for expect in reversed(expected):
item = cache._entries.popitem()
assert item[0] == expect[0]
assert item[1].name == expect[0]
assert item[1].value == expect[1]
def test_cmakecache_get():
file_data = 'DUMMY_NAME:STRING=Dummy value'
with mock.patch('builtins.open', mock.mock_open(read_data=file_data)):
cache = CMakeCache.from_file('dummy/path/CMakeCache.txt')
good_val = cache.get('DUMMY_NAME')
assert good_val == 'Dummy value'
bad_val = cache.get('ANOTHER_NAME')
assert bad_val is None
bad_val = cache.get('ANOTHER_NAME', default='No such value')
assert bad_val == 'No such value'
TESTDATA_4 = [
('STRING=el1;el2;el3;el4', True, ['el1', 'el2', 'el3', 'el4']),
('STRING=dummy value', True, ['dummy value']),
('STRING=', True, []),
('BOOL=True', True, RuntimeError),
('STRING=dummy value', False, []),
]
@pytest.mark.parametrize(
'value, correct_get, expected',
TESTDATA_4,
ids=['list', 'single value', 'empty', 'exception', 'get failure']
)
def test_cmakecache_get_list(value, correct_get, expected):
file_data = f'DUMMY_NAME:{value}'
with mock.patch('builtins.open', mock.mock_open(read_data=file_data)):
cache = CMakeCache.from_file('dummy/path/CMakeCache.txt')
with pytest.raises(expected) if \
isinstance(expected, type) and issubclass(expected, Exception) else \
nullcontext() as exception:
res = cache.get_list('DUMMY_NAME') if \
correct_get else cache.get_list('ANOTHER_NAME')
if exception is None:
assert res == expected
def test_cmakecache_dunders():
file_data = f'DUMMY_NAME:STRING=dummy value'
with mock.patch('builtins.open', mock.mock_open(read_data=file_data)):
cache = CMakeCache.from_file('dummy/path/CMakeCache.txt')
assert len(list(cache.__iter__())) == 1
cache.__setitem__(
'ANOTHER_NAME',
CMakeCacheEntry('ANOTHER_NAME', 'another value')
)
assert cache.__contains__('ANOTHER_NAME')
assert cache.__getitem__('ANOTHER_NAME') == 'another value'
cache.__delitem__('ANOTHER_NAME')
assert not cache.__contains__('ANOTHER_NAME')
assert len(list(cache.__iter__())) == 1
with pytest.raises(TypeError):
cache.__setitem__('WRONG_TYPE', 'Yet Another Dummy Value')
``` | /content/code_sandbox/scripts/tests/twister/test_cmakecache.py | python | 2016-05-26T17:54:19 | 2024-08-16T18:09:06 | zephyr | zephyrproject-rtos/zephyr | 10,307 | 1,780 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.