|
|
|
|
|
"""Support for skip/xfail functions and markers.""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
from collections.abc import Mapping |
|
|
import dataclasses |
|
|
import os |
|
|
import platform |
|
|
import sys |
|
|
import traceback |
|
|
from typing import Generator |
|
|
from typing import Optional |
|
|
|
|
|
from _pytest.config import Config |
|
|
from _pytest.config import hookimpl |
|
|
from _pytest.config.argparsing import Parser |
|
|
from _pytest.mark.structures import Mark |
|
|
from _pytest.nodes import Item |
|
|
from _pytest.outcomes import fail |
|
|
from _pytest.outcomes import skip |
|
|
from _pytest.outcomes import xfail |
|
|
from _pytest.reports import BaseReport |
|
|
from _pytest.reports import TestReport |
|
|
from _pytest.runner import CallInfo |
|
|
from _pytest.stash import StashKey |
|
|
|
|
|
|
|
|
def pytest_addoption(parser: Parser) -> None: |
|
|
group = parser.getgroup("general") |
|
|
group.addoption( |
|
|
"--runxfail", |
|
|
action="store_true", |
|
|
dest="runxfail", |
|
|
default=False, |
|
|
help="Report the results of xfail tests as if they were not marked", |
|
|
) |
|
|
|
|
|
parser.addini( |
|
|
"xfail_strict", |
|
|
"Default for the strict parameter of xfail " |
|
|
"markers when not given explicitly (default: False)", |
|
|
default=False, |
|
|
type="bool", |
|
|
) |
|
|
|
|
|
|
|
|
def pytest_configure(config: Config) -> None: |
|
|
if config.option.runxfail: |
|
|
|
|
|
import pytest |
|
|
|
|
|
old = pytest.xfail |
|
|
config.add_cleanup(lambda: setattr(pytest, "xfail", old)) |
|
|
|
|
|
def nop(*args, **kwargs): |
|
|
pass |
|
|
|
|
|
nop.Exception = xfail.Exception |
|
|
setattr(pytest, "xfail", nop) |
|
|
|
|
|
config.addinivalue_line( |
|
|
"markers", |
|
|
"skip(reason=None): skip the given test function with an optional reason. " |
|
|
'Example: skip(reason="no way of currently testing this") skips the ' |
|
|
"test.", |
|
|
) |
|
|
config.addinivalue_line( |
|
|
"markers", |
|
|
"skipif(condition, ..., *, reason=...): " |
|
|
"skip the given test function if any of the conditions evaluate to True. " |
|
|
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " |
|
|
"See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", |
|
|
) |
|
|
config.addinivalue_line( |
|
|
"markers", |
|
|
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " |
|
|
"mark the test function as an expected failure if any of the conditions " |
|
|
"evaluate to True. Optionally specify a reason for better reporting " |
|
|
"and run=False if you don't even want to execute the test function. " |
|
|
"If only specific exception(s) are expected, you can list them in " |
|
|
"raises, and if the test fails in other ways, it will be reported as " |
|
|
"a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", |
|
|
) |
|
|
|
|
|
|
|
|
def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]: |
|
|
"""Evaluate a single skipif/xfail condition. |
|
|
|
|
|
If an old-style string condition is given, it is eval()'d, otherwise the |
|
|
condition is bool()'d. If this fails, an appropriately formatted pytest.fail |
|
|
is raised. |
|
|
|
|
|
Returns (result, reason). The reason is only relevant if the result is True. |
|
|
""" |
|
|
|
|
|
if isinstance(condition, str): |
|
|
globals_ = { |
|
|
"os": os, |
|
|
"sys": sys, |
|
|
"platform": platform, |
|
|
"config": item.config, |
|
|
} |
|
|
for dictionary in reversed( |
|
|
item.ihook.pytest_markeval_namespace(config=item.config) |
|
|
): |
|
|
if not isinstance(dictionary, Mapping): |
|
|
raise ValueError( |
|
|
f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}" |
|
|
) |
|
|
globals_.update(dictionary) |
|
|
if hasattr(item, "obj"): |
|
|
globals_.update(item.obj.__globals__) |
|
|
try: |
|
|
filename = f"<{mark.name} condition>" |
|
|
condition_code = compile(condition, filename, "eval") |
|
|
result = eval(condition_code, globals_) |
|
|
except SyntaxError as exc: |
|
|
msglines = [ |
|
|
f"Error evaluating {mark.name!r} condition", |
|
|
" " + condition, |
|
|
" " + " " * (exc.offset or 0) + "^", |
|
|
"SyntaxError: invalid syntax", |
|
|
] |
|
|
fail("\n".join(msglines), pytrace=False) |
|
|
except Exception as exc: |
|
|
msglines = [ |
|
|
f"Error evaluating {mark.name!r} condition", |
|
|
" " + condition, |
|
|
*traceback.format_exception_only(type(exc), exc), |
|
|
] |
|
|
fail("\n".join(msglines), pytrace=False) |
|
|
|
|
|
|
|
|
else: |
|
|
try: |
|
|
result = bool(condition) |
|
|
except Exception as exc: |
|
|
msglines = [ |
|
|
f"Error evaluating {mark.name!r} condition as a boolean", |
|
|
*traceback.format_exception_only(type(exc), exc), |
|
|
] |
|
|
fail("\n".join(msglines), pytrace=False) |
|
|
|
|
|
reason = mark.kwargs.get("reason", None) |
|
|
if reason is None: |
|
|
if isinstance(condition, str): |
|
|
reason = "condition: " + condition |
|
|
else: |
|
|
|
|
|
msg = ( |
|
|
f"Error evaluating {mark.name!r}: " |
|
|
+ "you need to specify reason=STRING when using booleans as conditions." |
|
|
) |
|
|
fail(msg, pytrace=False) |
|
|
|
|
|
return result, reason |
|
|
|
|
|
|
|
|
@dataclasses.dataclass(frozen=True) |
|
|
class Skip: |
|
|
"""The result of evaluate_skip_marks().""" |
|
|
|
|
|
reason: str = "unconditional skip" |
|
|
|
|
|
|
|
|
def evaluate_skip_marks(item: Item) -> Skip | None: |
|
|
"""Evaluate skip and skipif marks on item, returning Skip if triggered.""" |
|
|
for mark in item.iter_markers(name="skipif"): |
|
|
if "condition" not in mark.kwargs: |
|
|
conditions = mark.args |
|
|
else: |
|
|
conditions = (mark.kwargs["condition"],) |
|
|
|
|
|
|
|
|
if not conditions: |
|
|
reason = mark.kwargs.get("reason", "") |
|
|
return Skip(reason) |
|
|
|
|
|
|
|
|
for condition in conditions: |
|
|
result, reason = evaluate_condition(item, mark, condition) |
|
|
if result: |
|
|
return Skip(reason) |
|
|
|
|
|
for mark in item.iter_markers(name="skip"): |
|
|
try: |
|
|
return Skip(*mark.args, **mark.kwargs) |
|
|
except TypeError as e: |
|
|
raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
@dataclasses.dataclass(frozen=True) |
|
|
class Xfail: |
|
|
"""The result of evaluate_xfail_marks().""" |
|
|
|
|
|
__slots__ = ("reason", "run", "strict", "raises") |
|
|
|
|
|
reason: str |
|
|
run: bool |
|
|
strict: bool |
|
|
raises: tuple[type[BaseException], ...] | None |
|
|
|
|
|
|
|
|
def evaluate_xfail_marks(item: Item) -> Xfail | None: |
|
|
"""Evaluate xfail marks on item, returning Xfail if triggered.""" |
|
|
for mark in item.iter_markers(name="xfail"): |
|
|
run = mark.kwargs.get("run", True) |
|
|
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) |
|
|
raises = mark.kwargs.get("raises", None) |
|
|
if "condition" not in mark.kwargs: |
|
|
conditions = mark.args |
|
|
else: |
|
|
conditions = (mark.kwargs["condition"],) |
|
|
|
|
|
|
|
|
if not conditions: |
|
|
reason = mark.kwargs.get("reason", "") |
|
|
return Xfail(reason, run, strict, raises) |
|
|
|
|
|
|
|
|
for condition in conditions: |
|
|
result, reason = evaluate_condition(item, mark, condition) |
|
|
if result: |
|
|
return Xfail(reason, run, strict, raises) |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
xfailed_key = StashKey[Optional[Xfail]]() |
|
|
|
|
|
|
|
|
@hookimpl(tryfirst=True) |
|
|
def pytest_runtest_setup(item: Item) -> None: |
|
|
skipped = evaluate_skip_marks(item) |
|
|
if skipped: |
|
|
raise skip.Exception(skipped.reason, _use_item_location=True) |
|
|
|
|
|
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) |
|
|
if xfailed and not item.config.option.runxfail and not xfailed.run: |
|
|
xfail("[NOTRUN] " + xfailed.reason) |
|
|
|
|
|
|
|
|
@hookimpl(wrapper=True) |
|
|
def pytest_runtest_call(item: Item) -> Generator[None]: |
|
|
xfailed = item.stash.get(xfailed_key, None) |
|
|
if xfailed is None: |
|
|
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) |
|
|
|
|
|
if xfailed and not item.config.option.runxfail and not xfailed.run: |
|
|
xfail("[NOTRUN] " + xfailed.reason) |
|
|
|
|
|
try: |
|
|
return (yield) |
|
|
finally: |
|
|
|
|
|
xfailed = item.stash.get(xfailed_key, None) |
|
|
if xfailed is None: |
|
|
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) |
|
|
|
|
|
|
|
|
@hookimpl(wrapper=True) |
|
|
def pytest_runtest_makereport( |
|
|
item: Item, call: CallInfo[None] |
|
|
) -> Generator[None, TestReport, TestReport]: |
|
|
rep = yield |
|
|
xfailed = item.stash.get(xfailed_key, None) |
|
|
if item.config.option.runxfail: |
|
|
pass |
|
|
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): |
|
|
assert call.excinfo.value.msg is not None |
|
|
rep.wasxfail = "reason: " + call.excinfo.value.msg |
|
|
rep.outcome = "skipped" |
|
|
elif not rep.skipped and xfailed: |
|
|
if call.excinfo: |
|
|
raises = xfailed.raises |
|
|
if raises is not None and not isinstance(call.excinfo.value, raises): |
|
|
rep.outcome = "failed" |
|
|
else: |
|
|
rep.outcome = "skipped" |
|
|
rep.wasxfail = xfailed.reason |
|
|
elif call.when == "call": |
|
|
if xfailed.strict: |
|
|
rep.outcome = "failed" |
|
|
rep.longrepr = "[XPASS(strict)] " + xfailed.reason |
|
|
else: |
|
|
rep.outcome = "passed" |
|
|
rep.wasxfail = xfailed.reason |
|
|
return rep |
|
|
|
|
|
|
|
|
def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: |
|
|
if hasattr(report, "wasxfail"): |
|
|
if report.skipped: |
|
|
return "xfailed", "x", "XFAIL" |
|
|
elif report.passed: |
|
|
return "xpassed", "X", "XPASS" |
|
|
return None |
|
|
|