mirror of
https://github.com/espressif/esp-matter.git
synced 2026-04-27 19:13:13 +00:00
294 lines
9.2 KiB
Python
294 lines
9.2 KiB
Python
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
# pylint: disable=W0621 # redefined-outer-name
|
|
|
|
# This file is a pytest root configuration file and provide the following functionalities:
|
|
# 1. Defines a few fixtures that could be used under the whole project.
|
|
# 2. Defines a few hook functions.
|
|
#
|
|
# IDF is using [pytest](https://github.com/pytest-dev/pytest) and
|
|
# [pytest-embedded plugin](https://github.com/espressif/pytest-embedded) as its example test framework.
|
|
#
|
|
# This is an experimental feature, and if you found any bug or have any question, please report to
|
|
# https://github.com/espressif/pytest-embedded/issues
|
|
|
|
import logging
|
|
import os
|
|
import xml.etree.ElementTree as ET
|
|
from datetime import datetime
|
|
from typing import Callable, List, Optional, Tuple
|
|
|
|
import pytest
|
|
from _pytest.config import Config, ExitCode
|
|
from _pytest.fixtures import FixtureRequest
|
|
from _pytest.main import Session
|
|
from _pytest.nodes import Item
|
|
from _pytest.python import Function
|
|
from _pytest.reports import TestReport
|
|
from _pytest.runner import CallInfo
|
|
from _pytest.terminal import TerminalReporter
|
|
from pytest_embedded.plugin import multi_dut_argument, multi_dut_fixture
|
|
from pytest_embedded.utils import find_by_suffix
|
|
|
|
|
|
DEFAULT_SDKCONFIG = 'default'
|
|
|
|
|
|
##################
|
|
# Help Functions #
|
|
##################
|
|
def format_case_id(target: Optional[str], config: Optional[str], case: str) -> str:
|
|
return f'{target}.{config}.{case}'
|
|
|
|
|
|
def item_marker_names(item: Item) -> List[str]:
|
|
return [marker.name for marker in item.iter_markers()]
|
|
|
|
|
|
############
|
|
# Fixtures #
|
|
############
|
|
@pytest.fixture(scope='session', autouse=True)
|
|
def session_tempdir() -> str:
|
|
_tmpdir = os.path.join(
|
|
os.path.dirname(__file__),
|
|
'pytest_embedded_log',
|
|
datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
|
|
)
|
|
os.makedirs(_tmpdir, exist_ok=True)
|
|
return _tmpdir
|
|
|
|
|
|
@pytest.fixture
|
|
@multi_dut_argument
|
|
def config(request: FixtureRequest) -> str:
|
|
return getattr(request, 'param', None) or DEFAULT_SDKCONFIG
|
|
|
|
|
|
@pytest.fixture
|
|
def test_func_name(request: FixtureRequest) -> str:
|
|
return request.node.function.__name__ # type: ignore
|
|
|
|
|
|
@pytest.fixture
|
|
def test_case_name(request: FixtureRequest, target: str, config: str) -> str:
|
|
return format_case_id(target, config, request.node.originalname)
|
|
|
|
|
|
@pytest.fixture
|
|
@multi_dut_fixture
|
|
def build_dir(app_path: str, target: Optional[str], config: Optional[str]) -> str:
|
|
"""
|
|
Check local build dir with the following priority:
|
|
|
|
1. build_<target>_<config>
|
|
2. build_<target>
|
|
3. build_<config>
|
|
4. build
|
|
|
|
Args:
|
|
app_path: app path
|
|
target: target
|
|
config: config
|
|
|
|
Returns:
|
|
valid build directory
|
|
"""
|
|
|
|
check_dirs = []
|
|
if target is not None and config is not None:
|
|
check_dirs.append(f'build_{target}_{config}')
|
|
if target is not None:
|
|
check_dirs.append(f'build_{target}')
|
|
if config is not None:
|
|
check_dirs.append(f'build_{config}')
|
|
check_dirs.append('build')
|
|
|
|
for check_dir in check_dirs:
|
|
binary_path = os.path.join(app_path, check_dir)
|
|
if os.path.isdir(binary_path):
|
|
logging.info(f'find valid binary path: {binary_path}')
|
|
return check_dir
|
|
|
|
logging.warning(
|
|
'checking binary path: %s... missing... try another place', binary_path
|
|
)
|
|
|
|
recommend_place = check_dirs[0]
|
|
raise ValueError(
|
|
f'no build dir valid. Please build the binary via "idf.py -B {recommend_place} build" and run pytest again'
|
|
)
|
|
|
|
def pytest_addoption(parser):
|
|
parser.addoption(
|
|
"--certification-json",
|
|
action="store",
|
|
default="certification_test_commands.json",
|
|
help="Path to the certification test commands JSON file",
|
|
)
|
|
parser.addoption(
|
|
"--ci-branch",
|
|
action="store",
|
|
default="main",
|
|
help="Branch on which the CI runs",
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def certification_tests(request):
|
|
return request.config.getoption("--certification-json")
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def ci_branch(request):
|
|
return request.config.getoption("--ci-branch")
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
@multi_dut_fixture
|
|
def junit_properties(
|
|
test_case_name: str, record_xml_attribute: Callable[[str, object], None]
|
|
) -> None:
|
|
"""
|
|
This fixture is autoused and will modify the junit report test case name to <target>.<config>.<case_name>
|
|
"""
|
|
record_xml_attribute('name', test_case_name)
|
|
|
|
|
|
##################
|
|
# Hook functions #
|
|
##################
|
|
_idf_pytest_embedded_key = pytest.StashKey['IdfPytestEmbedded']
|
|
|
|
|
|
def pytest_configure(config: Config) -> None:
|
|
# cli option "--target"
|
|
target = config.getoption('target') or ''
|
|
|
|
help_commands = ['--help', '--fixtures', '--markers', '--version']
|
|
for cmd in help_commands:
|
|
if cmd in config.invocation_params.args:
|
|
target = 'unneeded'
|
|
break
|
|
|
|
assert target, "Must specify target by --target"
|
|
|
|
config.stash[_idf_pytest_embedded_key] = IdfPytestEmbedded(
|
|
target=target,
|
|
)
|
|
config.pluginmanager.register(config.stash[_idf_pytest_embedded_key])
|
|
|
|
|
|
def pytest_unconfigure(config: Config) -> None:
|
|
_pytest_embedded = config.stash.get(_idf_pytest_embedded_key, None)
|
|
if _pytest_embedded:
|
|
del config.stash[_idf_pytest_embedded_key]
|
|
config.pluginmanager.unregister(_pytest_embedded)
|
|
|
|
|
|
class IdfPytestEmbedded:
|
|
def __init__(
|
|
self,
|
|
target: Optional[str] = None,
|
|
):
|
|
# CLI options to filter the test cases
|
|
self.target = target
|
|
self._failed_cases: List[
|
|
Tuple[str, bool, bool]
|
|
] = [] # (test_case_name, is_known_failure_cases, is_xfail)
|
|
|
|
@property
|
|
def failed_cases(self) -> List[str]:
|
|
return [
|
|
case
|
|
for case, is_xfail in self._failed_cases
|
|
if not is_xfail
|
|
]
|
|
|
|
@property
|
|
def xfail_cases(self) -> List[str]:
|
|
return [case for case, is_xfail in self._failed_cases if is_xfail]
|
|
|
|
@pytest.hookimpl(tryfirst=True)
|
|
def pytest_sessionstart(self, session: Session) -> None:
|
|
if self.target:
|
|
self.target = self.target.lower()
|
|
session.config.option.target = self.target
|
|
|
|
@pytest.hookimpl(tryfirst=True)
|
|
def pytest_collection_modifyitems(self, items: List[Function]) -> None:
|
|
# sort by file path and callspec.config
|
|
# implement like this since this is a limitation of pytest, couldn't get fixture values while collecting
|
|
# https://github.com/pytest-dev/pytest/discussions/9689
|
|
def _get_param_config(_item: Function) -> str:
|
|
if hasattr(_item, 'callspec'):
|
|
return _item.callspec.params.get('config', DEFAULT_SDKCONFIG) # type: ignore
|
|
return DEFAULT_SDKCONFIG
|
|
|
|
items.sort(key=lambda x: (os.path.dirname(x.path), _get_param_config(x)))
|
|
|
|
# set default timeout 10 minutes for each case
|
|
for item in items:
|
|
if 'timeout' not in item.keywords:
|
|
item.add_marker(pytest.mark.timeout(40 * 60))
|
|
|
|
# filter all the test cases with "--target"
|
|
if self.target:
|
|
items[:] = [
|
|
item for item in items if self.target in item_marker_names(item)
|
|
]
|
|
|
|
def pytest_runtest_makereport(
|
|
self, item: Function, call: CallInfo[None]
|
|
) -> Optional[TestReport]:
|
|
report = TestReport.from_item_and_call(item, call)
|
|
if report.outcome == 'failed':
|
|
test_case_name = item.funcargs.get('test_case_name', '')
|
|
is_xfail = report.keywords.get('xfail', False)
|
|
self._failed_cases.append((test_case_name, is_xfail))
|
|
|
|
return report
|
|
|
|
@pytest.hookimpl(trylast=True)
|
|
def pytest_runtest_teardown(self, item: Function) -> None:
|
|
"""
|
|
Format the test case generated junit reports
|
|
"""
|
|
tempdir = item.funcargs.get('test_case_tempdir')
|
|
if not tempdir:
|
|
return
|
|
|
|
junits = find_by_suffix('.xml', tempdir)
|
|
if not junits:
|
|
return
|
|
|
|
target = item.funcargs['target']
|
|
config = item.funcargs['config']
|
|
for junit in junits:
|
|
xml = ET.parse(junit)
|
|
testcases = xml.findall('.//testcase')
|
|
for case in testcases:
|
|
case.attrib['name'] = format_case_id(
|
|
target, config, case.attrib['name']
|
|
)
|
|
if 'file' in case.attrib:
|
|
case.attrib['file'] = case.attrib['file'].replace(
|
|
'/IDF/', ''
|
|
) # our unity test framework
|
|
xml.write(junit)
|
|
|
|
def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:
|
|
if exitstatus != 0:
|
|
if exitstatus == ExitCode.NO_TESTS_COLLECTED:
|
|
session.exitstatus = 0
|
|
|
|
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
|
|
if self.xfail_cases:
|
|
terminalreporter.section('xfail cases', bold=True, yellow=True)
|
|
terminalreporter.line('\n'.join(self.xfail_cases))
|
|
|
|
if self.failed_cases:
|
|
terminalreporter.section('Failed cases', bold=True, red=True)
|
|
terminalreporter.line('\n'.join(self.failed_cases))
|