ZAIDX11 commited on
Commit
25b3648
·
verified ·
1 Parent(s): 9941744

Add files using upload-large-folder tool

Browse files
archive/.venv/share/jupyter/lab/static/6733.bf3398ba9bb890f0fb67.js.LICENSE.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * @license React
3
+ * react-is.production.min.js
4
+ *
5
+ * Copyright (c) Facebook, Inc. and its affiliates.
6
+ *
7
+ * This source code is licensed under the MIT license found in the
8
+ * LICENSE file in the root directory of this source tree.
9
+ */
archive/.venv/share/jupyter/lab/static/6767.4b82d96c237ca7e31bc6.js ADDED
@@ -0,0 +1 @@
 
 
1
+ "use strict";(self["webpackChunk_jupyterlab_application_top"]=self["webpackChunk_jupyterlab_application_top"]||[]).push([[6767],{56767:(e,t,r)=>{r.r(t);r.d(t,{vbScript:()=>a,vbScriptASP:()=>i});function n(e){var t="error";function r(e){return new RegExp("^(("+e.join(")|(")+"))\\b","i")}var n=new RegExp("^[\\+\\-\\*/&\\\\\\^<>=]");var a=new RegExp("^((<>)|(<=)|(>=))");var i=new RegExp("^[\\.,]");var o=new RegExp("^[\\(\\)]");var c=new RegExp("^[A-Za-z][_A-Za-z0-9]*");var u=["class","sub","select","while","if","function","property","with","for"];var l=["else","elseif","case"];var s=["next","loop","wend"];var v=r(["and","or","not","xor","is","mod","eqv","imp"]);var b=["dim","redim","then","until","randomize","byval","byref","new","property","exit","in","const","private","public","get","set","let","stop","on error resume next","on error goto 0","option explicit","call","me"];var d=["true","false","nothing","empty","null"];var f=["abs","array","asc","atn","cbool","cbyte","ccur","cdate","cdbl","chr","cint","clng","cos","csng","cstr","date","dateadd","datediff","datepart","dateserial","datevalue","day","escape","eval","execute","exp","filter","formatcurrency","formatdatetime","formatnumber","formatpercent","getlocale","getobject","getref","hex","hour","inputbox","instr","instrrev","int","fix","isarray","isdate","isempty","isnull","isnumeric","isobject","join","lbound","lcase","left","len","loadpicture","log","ltrim","rtrim","trim","maths","mid","minute","month","monthname","msgbox","now","oct","replace","rgb","right","rnd","round","scriptengine","scriptenginebuildversion","scriptenginemajorversion","scriptengineminorversion","second","setlocale","sgn","sin","space","split","sqr","strcomp","string","strreverse","tan","time","timer","timeserial","timevalue","typename","ubound","ucase","unescape","vartype","weekday","weekdayname","year"];var m=["vbBlack","vbRed","vbGreen","vbYellow","vbBlue","vbMagenta","vbCyan","vbWhite","vbBinaryCompare","vbTextCompare","vbSunday","vbMonday","vbTuesday","vbWednesday","vbThursday","vbFriday","vbSaturday","vbUseSystemDayOfWeek","vbFirstJan1","vbFirstFourDays","vbFirstFullWeek","vbGeneralDate","vbLongDate","vbShortDate","vbLongTime","vbShortTime","vbObjectError","vbOKOnly","vbOKCancel","vbAbortRetryIgnore","vbYesNoCancel","vbYesNo","vbRetryCancel","vbCritical","vbQuestion","vbExclamation","vbInformation","vbDefaultButton1","vbDefaultButton2","vbDefaultButton3","vbDefaultButton4","vbApplicationModal","vbSystemModal","vbOK","vbCancel","vbAbort","vbRetry","vbIgnore","vbYes","vbNo","vbCr","VbCrLf","vbFormFeed","vbLf","vbNewLine","vbNullChar","vbNullString","vbTab","vbVerticalTab","vbUseDefault","vbTrue","vbFalse","vbEmpty","vbNull","vbInteger","vbLong","vbSingle","vbDouble","vbCurrency","vbDate","vbString","vbObject","vbError","vbBoolean","vbVariant","vbDataObject","vbDecimal","vbByte","vbArray"];var p=["WScript","err","debug","RegExp"];var h=["description","firstindex","global","helpcontext","helpfile","ignorecase","length","number","pattern","source","value","count"];var y=["clear","execute","raise","replace","test","write","writeline","close","open","state","eof","update","addnew","end","createobject","quit"];var g=["server","response","request","session","application"];var k=["buffer","cachecontrol","charset","contenttype","expires","expiresabsolute","isclientconnected","pics","status","clientcertificate","cookies","form","querystring","servervariables","totalbytes","contents","staticobjects","codepage","lcid","sessionid","timeout","scripttimeout"];var w=["addheader","appendtolog","binarywrite","end","flush","redirect","binaryread","remove","removeall","lock","unlock","abandon","getlasterror","htmlencode","mappath","transfer","urlencode"];var x=y.concat(h);p=p.concat(m);if(e.isASP){p=p.concat(g);x=x.concat(w,k)}var C=r(b);var I=r(d);var L=r(f);var S=r(p);var D=r(x);var E='"';var j=r(u);var O=r(l);var T=r(s);var z=r(["end"]);var R=r(["do"]);var F=r(["on error resume next","exit"]);var A=r(["rem"]);function B(e,t){t.currentIndent++}function N(e,t){t.currentIndent--}function _(e,r){if(e.eatSpace()){return null}var u=e.peek();if(u==="'"){e.skipToEnd();return"comment"}if(e.match(A)){e.skipToEnd();return"comment"}if(e.match(/^((&H)|(&O))?[0-9\.]/i,false)&&!e.match(/^((&H)|(&O))?[0-9\.]+[a-z_]/i,false)){var l=false;if(e.match(/^\d*\.\d+/i)){l=true}else if(e.match(/^\d+\.\d*/)){l=true}else if(e.match(/^\.\d+/)){l=true}if(l){e.eat(/J/i);return"number"}var s=false;if(e.match(/^&H[0-9a-f]+/i)){s=true}else if(e.match(/^&O[0-7]+/i)){s=true}else if(e.match(/^[1-9]\d*F?/)){e.eat(/J/i);s=true}else if(e.match(/^0(?![\dx])/i)){s=true}if(s){e.eat(/L/i);return"number"}}if(e.match(E)){r.tokenize=W(e.current());return r.tokenize(e,r)}if(e.match(a)||e.match(n)||e.match(v)){return"operator"}if(e.match(i)){return null}if(e.match(o)){return"bracket"}if(e.match(F)){r.doInCurrentLine=true;return"keyword"}if(e.match(R)){B(e,r);r.doInCurrentLine=true;return"keyword"}if(e.match(j)){if(!r.doInCurrentLine)B(e,r);else r.doInCurrentLine=false;return"keyword"}if(e.match(O)){return"keyword"}if(e.match(z)){N(e,r);N(e,r);return"keyword"}if(e.match(T)){if(!r.doInCurrentLine)N(e,r);else r.doInCurrentLine=false;return"keyword"}if(e.match(C)){return"keyword"}if(e.match(I)){return"atom"}if(e.match(D)){return"variableName.special"}if(e.match(L)){return"builtin"}if(e.match(S)){return"builtin"}if(e.match(c)){return"variable"}e.next();return t}function W(e){var t=e.length==1;var r="string";return function(n,a){while(!n.eol()){n.eatWhile(/[^'"]/);if(n.match(e)){a.tokenize=_;return r}else{n.eat(/['"]/)}}if(t){a.tokenize=_}return r}}function q(e,r){var n=r.tokenize(e,r);var a=e.current();if(a==="."){n=r.tokenize(e,r);a=e.current();if(n&&(n.substr(0,8)==="variable"||n==="builtin"||n==="keyword")){if(n==="builtin"||n==="keyword")n="variable";if(x.indexOf(a.substr(1))>-1)n="keyword";return n}else{return t}}return n}return{name:"vbscript",startState:function(){return{tokenize:_,lastToken:null,currentIndent:0,nextLineIndent:0,doInCurrentLine:false,ignoreKeyword:false}},token:function(e,t){if(e.sol()){t.currentIndent+=t.nextLineIndent;t.nextLineIndent=0;t.doInCurrentLine=0}var r=q(e,t);t.lastToken={style:r,content:e.current()};if(r===null)r=null;return r},indent:function(e,t,r){var n=t.replace(/^\s+|\s+$/g,"");if(n.match(T)||n.match(z)||n.match(O))return r.unit*(e.currentIndent-1);if(e.currentIndent<0)return 0;return e.currentIndent*r.unit}}}const a=n({});const i=n({isASP:true})}}]);
archive/Axiovorax/.venv/Lib/site-packages/_pytest/pastebin.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Submit failure or test session information to a pastebin service."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from io import StringIO
7
+ import tempfile
8
+ from typing import IO
9
+
10
+ from _pytest.config import Config
11
+ from _pytest.config import create_terminal_writer
12
+ from _pytest.config.argparsing import Parser
13
+ from _pytest.stash import StashKey
14
+ from _pytest.terminal import TerminalReporter
15
+ import pytest
16
+
17
+
18
+ pastebinfile_key = StashKey[IO[bytes]]()
19
+
20
+
21
+ def pytest_addoption(parser: Parser) -> None:
22
+ group = parser.getgroup("terminal reporting")
23
+ group.addoption(
24
+ "--pastebin",
25
+ metavar="mode",
26
+ action="store",
27
+ dest="pastebin",
28
+ default=None,
29
+ choices=["failed", "all"],
30
+ help="Send failed|all info to bpaste.net pastebin service",
31
+ )
32
+
33
+
34
+ @pytest.hookimpl(trylast=True)
35
+ def pytest_configure(config: Config) -> None:
36
+ if config.option.pastebin == "all":
37
+ tr = config.pluginmanager.getplugin("terminalreporter")
38
+ # If no terminal reporter plugin is present, nothing we can do here;
39
+ # this can happen when this function executes in a worker node
40
+ # when using pytest-xdist, for example.
41
+ if tr is not None:
42
+ # pastebin file will be UTF-8 encoded binary file.
43
+ config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b")
44
+ oldwrite = tr._tw.write
45
+
46
+ def tee_write(s, **kwargs):
47
+ oldwrite(s, **kwargs)
48
+ if isinstance(s, str):
49
+ s = s.encode("utf-8")
50
+ config.stash[pastebinfile_key].write(s)
51
+
52
+ tr._tw.write = tee_write
53
+
54
+
55
+ def pytest_unconfigure(config: Config) -> None:
56
+ if pastebinfile_key in config.stash:
57
+ pastebinfile = config.stash[pastebinfile_key]
58
+ # Get terminal contents and delete file.
59
+ pastebinfile.seek(0)
60
+ sessionlog = pastebinfile.read()
61
+ pastebinfile.close()
62
+ del config.stash[pastebinfile_key]
63
+ # Undo our patching in the terminal reporter.
64
+ tr = config.pluginmanager.getplugin("terminalreporter")
65
+ del tr._tw.__dict__["write"]
66
+ # Write summary.
67
+ tr.write_sep("=", "Sending information to Paste Service")
68
+ pastebinurl = create_new_paste(sessionlog)
69
+ tr.write_line(f"pastebin session-log: {pastebinurl}\n")
70
+
71
+
72
+ def create_new_paste(contents: str | bytes) -> str:
73
+ """Create a new paste using the bpaste.net service.
74
+
75
+ :contents: Paste contents string.
76
+ :returns: URL to the pasted contents, or an error message.
77
+ """
78
+ import re
79
+ from urllib.error import HTTPError
80
+ from urllib.parse import urlencode
81
+ from urllib.request import urlopen
82
+
83
+ params = {"code": contents, "lexer": "text", "expiry": "1week"}
84
+ url = "https://bpa.st"
85
+ try:
86
+ response: str = (
87
+ urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8")
88
+ )
89
+ except HTTPError as e:
90
+ with e: # HTTPErrors are also http responses that must be closed!
91
+ return f"bad response: {e}"
92
+ except OSError as e: # eg urllib.error.URLError
93
+ return f"bad response: {e}"
94
+ m = re.search(r'href="/raw/(\w+)"', response)
95
+ if m:
96
+ return f"{url}/show/{m.group(1)}"
97
+ else:
98
+ return "bad response: invalid format ('" + response + "')"
99
+
100
+
101
+ def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
102
+ if terminalreporter.config.option.pastebin != "failed":
103
+ return
104
+ if "failed" in terminalreporter.stats:
105
+ terminalreporter.write_sep("=", "Sending information to Paste Service")
106
+ for rep in terminalreporter.stats["failed"]:
107
+ try:
108
+ msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
109
+ except AttributeError:
110
+ msg = terminalreporter._getfailureheadline(rep)
111
+ file = StringIO()
112
+ tw = create_terminal_writer(terminalreporter.config, file)
113
+ rep.toterminal(tw)
114
+ s = file.getvalue()
115
+ assert len(s)
116
+ pastebinurl = create_new_paste(s)
117
+ terminalreporter.write_line(f"{msg} --> {pastebinurl}")
archive/Axiovorax/.venv/Lib/site-packages/_pytest/pathlib.py ADDED
@@ -0,0 +1,1055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import atexit
4
+ from collections.abc import Callable
5
+ from collections.abc import Iterable
6
+ from collections.abc import Iterator
7
+ import contextlib
8
+ from enum import Enum
9
+ from errno import EBADF
10
+ from errno import ELOOP
11
+ from errno import ENOENT
12
+ from errno import ENOTDIR
13
+ import fnmatch
14
+ from functools import partial
15
+ from importlib.machinery import ModuleSpec
16
+ from importlib.machinery import PathFinder
17
+ import importlib.util
18
+ import itertools
19
+ import os
20
+ from os.path import expanduser
21
+ from os.path import expandvars
22
+ from os.path import isabs
23
+ from os.path import sep
24
+ from pathlib import Path
25
+ from pathlib import PurePath
26
+ from posixpath import sep as posix_sep
27
+ import shutil
28
+ import sys
29
+ import types
30
+ from types import ModuleType
31
+ from typing import Any
32
+ from typing import TypeVar
33
+ import uuid
34
+ import warnings
35
+
36
+ from _pytest.compat import assert_never
37
+ from _pytest.outcomes import skip
38
+ from _pytest.warning_types import PytestWarning
39
+
40
+
41
+ if sys.version_info < (3, 11):
42
+ from importlib._bootstrap_external import _NamespaceLoader as NamespaceLoader
43
+ else:
44
+ from importlib.machinery import NamespaceLoader
45
+
46
+ LOCK_TIMEOUT = 60 * 60 * 24 * 3
47
+
48
+ _AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
49
+
50
+ # The following function, variables and comments were
51
+ # copied from cpython 3.9 Lib/pathlib.py file.
52
+
53
+ # EBADF - guard against macOS `stat` throwing EBADF
54
+ _IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP)
55
+
56
+ _IGNORED_WINERRORS = (
57
+ 21, # ERROR_NOT_READY - drive exists but is not accessible
58
+ 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
59
+ )
60
+
61
+
62
+ def _ignore_error(exception: Exception) -> bool:
63
+ return (
64
+ getattr(exception, "errno", None) in _IGNORED_ERRORS
65
+ or getattr(exception, "winerror", None) in _IGNORED_WINERRORS
66
+ )
67
+
68
+
69
+ def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
70
+ return path.joinpath(".lock")
71
+
72
+
73
+ def on_rm_rf_error(
74
+ func: Callable[..., Any] | None,
75
+ path: str,
76
+ excinfo: BaseException
77
+ | tuple[type[BaseException], BaseException, types.TracebackType | None],
78
+ *,
79
+ start_path: Path,
80
+ ) -> bool:
81
+ """Handle known read-only errors during rmtree.
82
+
83
+ The returned value is used only by our own tests.
84
+ """
85
+ if isinstance(excinfo, BaseException):
86
+ exc = excinfo
87
+ else:
88
+ exc = excinfo[1]
89
+
90
+ # Another process removed the file in the middle of the "rm_rf" (xdist for example).
91
+ # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
92
+ if isinstance(exc, FileNotFoundError):
93
+ return False
94
+
95
+ if not isinstance(exc, PermissionError):
96
+ warnings.warn(
97
+ PytestWarning(f"(rm_rf) error removing {path}\n{type(exc)}: {exc}")
98
+ )
99
+ return False
100
+
101
+ if func not in (os.rmdir, os.remove, os.unlink):
102
+ if func not in (os.open,):
103
+ warnings.warn(
104
+ PytestWarning(
105
+ f"(rm_rf) unknown function {func} when removing {path}:\n{type(exc)}: {exc}"
106
+ )
107
+ )
108
+ return False
109
+
110
+ # Chmod + retry.
111
+ import stat
112
+
113
+ def chmod_rw(p: str) -> None:
114
+ mode = os.stat(p).st_mode
115
+ os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
116
+
117
+ # For files, we need to recursively go upwards in the directories to
118
+ # ensure they all are also writable.
119
+ p = Path(path)
120
+ if p.is_file():
121
+ for parent in p.parents:
122
+ chmod_rw(str(parent))
123
+ # Stop when we reach the original path passed to rm_rf.
124
+ if parent == start_path:
125
+ break
126
+ chmod_rw(str(path))
127
+
128
+ func(path)
129
+ return True
130
+
131
+
132
+ def ensure_extended_length_path(path: Path) -> Path:
133
+ """Get the extended-length version of a path (Windows).
134
+
135
+ On Windows, by default, the maximum length of a path (MAX_PATH) is 260
136
+ characters, and operations on paths longer than that fail. But it is possible
137
+ to overcome this by converting the path to "extended-length" form before
138
+ performing the operation:
139
+ https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
140
+
141
+ On Windows, this function returns the extended-length absolute version of path.
142
+ On other platforms it returns path unchanged.
143
+ """
144
+ if sys.platform.startswith("win32"):
145
+ path = path.resolve()
146
+ path = Path(get_extended_length_path_str(str(path)))
147
+ return path
148
+
149
+
150
+ def get_extended_length_path_str(path: str) -> str:
151
+ """Convert a path to a Windows extended length path."""
152
+ long_path_prefix = "\\\\?\\"
153
+ unc_long_path_prefix = "\\\\?\\UNC\\"
154
+ if path.startswith((long_path_prefix, unc_long_path_prefix)):
155
+ return path
156
+ # UNC
157
+ if path.startswith("\\\\"):
158
+ return unc_long_path_prefix + path[2:]
159
+ return long_path_prefix + path
160
+
161
+
162
+ def rm_rf(path: Path) -> None:
163
+ """Remove the path contents recursively, even if some elements
164
+ are read-only."""
165
+ path = ensure_extended_length_path(path)
166
+ onerror = partial(on_rm_rf_error, start_path=path)
167
+ if sys.version_info >= (3, 12):
168
+ shutil.rmtree(str(path), onexc=onerror)
169
+ else:
170
+ shutil.rmtree(str(path), onerror=onerror)
171
+
172
+
173
+ def find_prefixed(root: Path, prefix: str) -> Iterator[os.DirEntry[str]]:
174
+ """Find all elements in root that begin with the prefix, case-insensitive."""
175
+ l_prefix = prefix.lower()
176
+ for x in os.scandir(root):
177
+ if x.name.lower().startswith(l_prefix):
178
+ yield x
179
+
180
+
181
+ def extract_suffixes(iter: Iterable[os.DirEntry[str]], prefix: str) -> Iterator[str]:
182
+ """Return the parts of the paths following the prefix.
183
+
184
+ :param iter: Iterator over path names.
185
+ :param prefix: Expected prefix of the path names.
186
+ """
187
+ p_len = len(prefix)
188
+ for entry in iter:
189
+ yield entry.name[p_len:]
190
+
191
+
192
+ def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
193
+ """Combine find_prefixes and extract_suffixes."""
194
+ return extract_suffixes(find_prefixed(root, prefix), prefix)
195
+
196
+
197
+ def parse_num(maybe_num: str) -> int:
198
+ """Parse number path suffixes, returns -1 on error."""
199
+ try:
200
+ return int(maybe_num)
201
+ except ValueError:
202
+ return -1
203
+
204
+
205
+ def _force_symlink(root: Path, target: str | PurePath, link_to: str | Path) -> None:
206
+ """Helper to create the current symlink.
207
+
208
+ It's full of race conditions that are reasonably OK to ignore
209
+ for the context of best effort linking to the latest test run.
210
+
211
+ The presumption being that in case of much parallelism
212
+ the inaccuracy is going to be acceptable.
213
+ """
214
+ current_symlink = root.joinpath(target)
215
+ try:
216
+ current_symlink.unlink()
217
+ except OSError:
218
+ pass
219
+ try:
220
+ current_symlink.symlink_to(link_to)
221
+ except Exception:
222
+ pass
223
+
224
+
225
+ def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path:
226
+ """Create a directory with an increased number as suffix for the given prefix."""
227
+ for i in range(10):
228
+ # try up to 10 times to create the folder
229
+ max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
230
+ new_number = max_existing + 1
231
+ new_path = root.joinpath(f"{prefix}{new_number}")
232
+ try:
233
+ new_path.mkdir(mode=mode)
234
+ except Exception:
235
+ pass
236
+ else:
237
+ _force_symlink(root, prefix + "current", new_path)
238
+ return new_path
239
+ else:
240
+ raise OSError(
241
+ "could not create numbered dir with prefix "
242
+ f"{prefix} in {root} after 10 tries"
243
+ )
244
+
245
+
246
+ def create_cleanup_lock(p: Path) -> Path:
247
+ """Create a lock to prevent premature folder cleanup."""
248
+ lock_path = get_lock_path(p)
249
+ try:
250
+ fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
251
+ except FileExistsError as e:
252
+ raise OSError(f"cannot create lockfile in {p}") from e
253
+ else:
254
+ pid = os.getpid()
255
+ spid = str(pid).encode()
256
+ os.write(fd, spid)
257
+ os.close(fd)
258
+ if not lock_path.is_file():
259
+ raise OSError("lock path got renamed after successful creation")
260
+ return lock_path
261
+
262
+
263
+ def register_cleanup_lock_removal(
264
+ lock_path: Path, register: Any = atexit.register
265
+ ) -> Any:
266
+ """Register a cleanup function for removing a lock, by default on atexit."""
267
+ pid = os.getpid()
268
+
269
+ def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
270
+ current_pid = os.getpid()
271
+ if current_pid != original_pid:
272
+ # fork
273
+ return
274
+ try:
275
+ lock_path.unlink()
276
+ except OSError:
277
+ pass
278
+
279
+ return register(cleanup_on_exit)
280
+
281
+
282
+ def maybe_delete_a_numbered_dir(path: Path) -> None:
283
+ """Remove a numbered directory if its lock can be obtained and it does
284
+ not seem to be in use."""
285
+ path = ensure_extended_length_path(path)
286
+ lock_path = None
287
+ try:
288
+ lock_path = create_cleanup_lock(path)
289
+ parent = path.parent
290
+
291
+ garbage = parent.joinpath(f"garbage-{uuid.uuid4()}")
292
+ path.rename(garbage)
293
+ rm_rf(garbage)
294
+ except OSError:
295
+ # known races:
296
+ # * other process did a cleanup at the same time
297
+ # * deletable folder was found
298
+ # * process cwd (Windows)
299
+ return
300
+ finally:
301
+ # If we created the lock, ensure we remove it even if we failed
302
+ # to properly remove the numbered dir.
303
+ if lock_path is not None:
304
+ try:
305
+ lock_path.unlink()
306
+ except OSError:
307
+ pass
308
+
309
+
310
+ def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
311
+ """Check if `path` is deletable based on whether the lock file is expired."""
312
+ if path.is_symlink():
313
+ return False
314
+ lock = get_lock_path(path)
315
+ try:
316
+ if not lock.is_file():
317
+ return True
318
+ except OSError:
319
+ # we might not have access to the lock file at all, in this case assume
320
+ # we don't have access to the entire directory (#7491).
321
+ return False
322
+ try:
323
+ lock_time = lock.stat().st_mtime
324
+ except Exception:
325
+ return False
326
+ else:
327
+ if lock_time < consider_lock_dead_if_created_before:
328
+ # We want to ignore any errors while trying to remove the lock such as:
329
+ # - PermissionDenied, like the file permissions have changed since the lock creation;
330
+ # - FileNotFoundError, in case another pytest process got here first;
331
+ # and any other cause of failure.
332
+ with contextlib.suppress(OSError):
333
+ lock.unlink()
334
+ return True
335
+ return False
336
+
337
+
338
+ def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
339
+ """Try to cleanup a folder if we can ensure it's deletable."""
340
+ if ensure_deletable(path, consider_lock_dead_if_created_before):
341
+ maybe_delete_a_numbered_dir(path)
342
+
343
+
344
+ def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
345
+ """List candidates for numbered directories to be removed - follows py.path."""
346
+ max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
347
+ max_delete = max_existing - keep
348
+ entries = find_prefixed(root, prefix)
349
+ entries, entries2 = itertools.tee(entries)
350
+ numbers = map(parse_num, extract_suffixes(entries2, prefix))
351
+ for entry, number in zip(entries, numbers):
352
+ if number <= max_delete:
353
+ yield Path(entry)
354
+
355
+
356
+ def cleanup_dead_symlinks(root: Path) -> None:
357
+ for left_dir in root.iterdir():
358
+ if left_dir.is_symlink():
359
+ if not left_dir.resolve().exists():
360
+ left_dir.unlink()
361
+
362
+
363
+ def cleanup_numbered_dir(
364
+ root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
365
+ ) -> None:
366
+ """Cleanup for lock driven numbered directories."""
367
+ if not root.exists():
368
+ return
369
+ for path in cleanup_candidates(root, prefix, keep):
370
+ try_cleanup(path, consider_lock_dead_if_created_before)
371
+ for path in root.glob("garbage-*"):
372
+ try_cleanup(path, consider_lock_dead_if_created_before)
373
+
374
+ cleanup_dead_symlinks(root)
375
+
376
+
377
+ def make_numbered_dir_with_cleanup(
378
+ root: Path,
379
+ prefix: str,
380
+ keep: int,
381
+ lock_timeout: float,
382
+ mode: int,
383
+ ) -> Path:
384
+ """Create a numbered dir with a cleanup lock and remove old ones."""
385
+ e = None
386
+ for i in range(10):
387
+ try:
388
+ p = make_numbered_dir(root, prefix, mode)
389
+ # Only lock the current dir when keep is not 0
390
+ if keep != 0:
391
+ lock_path = create_cleanup_lock(p)
392
+ register_cleanup_lock_removal(lock_path)
393
+ except Exception as exc:
394
+ e = exc
395
+ else:
396
+ consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
397
+ # Register a cleanup for program exit
398
+ atexit.register(
399
+ cleanup_numbered_dir,
400
+ root,
401
+ prefix,
402
+ keep,
403
+ consider_lock_dead_if_created_before,
404
+ )
405
+ return p
406
+ assert e is not None
407
+ raise e
408
+
409
+
410
+ def resolve_from_str(input: str, rootpath: Path) -> Path:
411
+ input = expanduser(input)
412
+ input = expandvars(input)
413
+ if isabs(input):
414
+ return Path(input)
415
+ else:
416
+ return rootpath.joinpath(input)
417
+
418
+
419
+ def fnmatch_ex(pattern: str, path: str | os.PathLike[str]) -> bool:
420
+ """A port of FNMatcher from py.path.common which works with PurePath() instances.
421
+
422
+ The difference between this algorithm and PurePath.match() is that the
423
+ latter matches "**" glob expressions for each part of the path, while
424
+ this algorithm uses the whole path instead.
425
+
426
+ For example:
427
+ "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
428
+ with this algorithm, but not with PurePath.match().
429
+
430
+ This algorithm was ported to keep backward-compatibility with existing
431
+ settings which assume paths match according this logic.
432
+
433
+ References:
434
+ * https://bugs.python.org/issue29249
435
+ * https://bugs.python.org/issue34731
436
+ """
437
+ path = PurePath(path)
438
+ iswin32 = sys.platform.startswith("win")
439
+
440
+ if iswin32 and sep not in pattern and posix_sep in pattern:
441
+ # Running on Windows, the pattern has no Windows path separators,
442
+ # and the pattern has one or more Posix path separators. Replace
443
+ # the Posix path separators with the Windows path separator.
444
+ pattern = pattern.replace(posix_sep, sep)
445
+
446
+ if sep not in pattern:
447
+ name = path.name
448
+ else:
449
+ name = str(path)
450
+ if path.is_absolute() and not os.path.isabs(pattern):
451
+ pattern = f"*{os.sep}{pattern}"
452
+ return fnmatch.fnmatch(name, pattern)
453
+
454
+
455
+ def parts(s: str) -> set[str]:
456
+ parts = s.split(sep)
457
+ return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
458
+
459
+
460
+ def symlink_or_skip(
461
+ src: os.PathLike[str] | str,
462
+ dst: os.PathLike[str] | str,
463
+ **kwargs: Any,
464
+ ) -> None:
465
+ """Make a symlink, or skip the test in case symlinks are not supported."""
466
+ try:
467
+ os.symlink(src, dst, **kwargs)
468
+ except OSError as e:
469
+ skip(f"symlinks not supported: {e}")
470
+
471
+
472
+ class ImportMode(Enum):
473
+ """Possible values for `mode` parameter of `import_path`."""
474
+
475
+ prepend = "prepend"
476
+ append = "append"
477
+ importlib = "importlib"
478
+
479
+
480
+ class ImportPathMismatchError(ImportError):
481
+ """Raised on import_path() if there is a mismatch of __file__'s.
482
+
483
+ This can happen when `import_path` is called multiple times with different filenames that has
484
+ the same basename but reside in packages
485
+ (for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
486
+ """
487
+
488
+
489
+ def import_path(
490
+ path: str | os.PathLike[str],
491
+ *,
492
+ mode: str | ImportMode = ImportMode.prepend,
493
+ root: Path,
494
+ consider_namespace_packages: bool,
495
+ ) -> ModuleType:
496
+ """
497
+ Import and return a module from the given path, which can be a file (a module) or
498
+ a directory (a package).
499
+
500
+ :param path:
501
+ Path to the file to import.
502
+
503
+ :param mode:
504
+ Controls the underlying import mechanism that will be used:
505
+
506
+ * ImportMode.prepend: the directory containing the module (or package, taking
507
+ `__init__.py` files into account) will be put at the *start* of `sys.path` before
508
+ being imported with `importlib.import_module`.
509
+
510
+ * ImportMode.append: same as `prepend`, but the directory will be appended
511
+ to the end of `sys.path`, if not already in `sys.path`.
512
+
513
+ * ImportMode.importlib: uses more fine control mechanisms provided by `importlib`
514
+ to import the module, which avoids having to muck with `sys.path` at all. It effectively
515
+ allows having same-named test modules in different places.
516
+
517
+ :param root:
518
+ Used as an anchor when mode == ImportMode.importlib to obtain
519
+ a unique name for the module being imported so it can safely be stored
520
+ into ``sys.modules``.
521
+
522
+ :param consider_namespace_packages:
523
+ If True, consider namespace packages when resolving module names.
524
+
525
+ :raises ImportPathMismatchError:
526
+ If after importing the given `path` and the module `__file__`
527
+ are different. Only raised in `prepend` and `append` modes.
528
+ """
529
+ path = Path(path)
530
+ mode = ImportMode(mode)
531
+
532
+ if not path.exists():
533
+ raise ImportError(path)
534
+
535
+ if mode is ImportMode.importlib:
536
+ # Try to import this module using the standard import mechanisms, but
537
+ # without touching sys.path.
538
+ try:
539
+ pkg_root, module_name = resolve_pkg_root_and_module_name(
540
+ path, consider_namespace_packages=consider_namespace_packages
541
+ )
542
+ except CouldNotResolvePathError:
543
+ pass
544
+ else:
545
+ # If the given module name is already in sys.modules, do not import it again.
546
+ with contextlib.suppress(KeyError):
547
+ return sys.modules[module_name]
548
+
549
+ mod = _import_module_using_spec(
550
+ module_name, path, pkg_root, insert_modules=False
551
+ )
552
+ if mod is not None:
553
+ return mod
554
+
555
+ # Could not import the module with the current sys.path, so we fall back
556
+ # to importing the file as a single module, not being a part of a package.
557
+ module_name = module_name_from_path(path, root)
558
+ with contextlib.suppress(KeyError):
559
+ return sys.modules[module_name]
560
+
561
+ mod = _import_module_using_spec(
562
+ module_name, path, path.parent, insert_modules=True
563
+ )
564
+ if mod is None:
565
+ raise ImportError(f"Can't find module {module_name} at location {path}")
566
+ return mod
567
+
568
+ try:
569
+ pkg_root, module_name = resolve_pkg_root_and_module_name(
570
+ path, consider_namespace_packages=consider_namespace_packages
571
+ )
572
+ except CouldNotResolvePathError:
573
+ pkg_root, module_name = path.parent, path.stem
574
+
575
+ # Change sys.path permanently: restoring it at the end of this function would cause surprising
576
+ # problems because of delayed imports: for example, a conftest.py file imported by this function
577
+ # might have local imports, which would fail at runtime if we restored sys.path.
578
+ if mode is ImportMode.append:
579
+ if str(pkg_root) not in sys.path:
580
+ sys.path.append(str(pkg_root))
581
+ elif mode is ImportMode.prepend:
582
+ if str(pkg_root) != sys.path[0]:
583
+ sys.path.insert(0, str(pkg_root))
584
+ else:
585
+ assert_never(mode)
586
+
587
+ importlib.import_module(module_name)
588
+
589
+ mod = sys.modules[module_name]
590
+ if path.name == "__init__.py":
591
+ return mod
592
+
593
+ ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
594
+ if ignore != "1":
595
+ module_file = mod.__file__
596
+ if module_file is None:
597
+ raise ImportPathMismatchError(module_name, module_file, path)
598
+
599
+ if module_file.endswith((".pyc", ".pyo")):
600
+ module_file = module_file[:-1]
601
+ if module_file.endswith(os.sep + "__init__.py"):
602
+ module_file = module_file[: -(len(os.sep + "__init__.py"))]
603
+
604
+ try:
605
+ is_same = _is_same(str(path), module_file)
606
+ except FileNotFoundError:
607
+ is_same = False
608
+
609
+ if not is_same:
610
+ raise ImportPathMismatchError(module_name, module_file, path)
611
+
612
+ return mod
613
+
614
+
615
+ def _import_module_using_spec(
616
+ module_name: str, module_path: Path, module_location: Path, *, insert_modules: bool
617
+ ) -> ModuleType | None:
618
+ """
619
+ Tries to import a module by its canonical name, path, and its parent location.
620
+
621
+ :param module_name:
622
+ The expected module name, will become the key of `sys.modules`.
623
+
624
+ :param module_path:
625
+ The file path of the module, for example `/foo/bar/test_demo.py`.
626
+ If module is a package, pass the path to the `__init__.py` of the package.
627
+ If module is a namespace package, pass directory path.
628
+
629
+ :param module_location:
630
+ The parent location of the module.
631
+ If module is a package, pass the directory containing the `__init__.py` file.
632
+
633
+ :param insert_modules:
634
+ If True, will call `insert_missing_modules` to create empty intermediate modules
635
+ with made-up module names (when importing test files not reachable from `sys.path`).
636
+
637
+ Example 1 of parent_module_*:
638
+
639
+ module_name: "a.b.c.demo"
640
+ module_path: Path("a/b/c/demo.py")
641
+ module_location: Path("a/b/c/")
642
+ if "a.b.c" is package ("a/b/c/__init__.py" exists), then
643
+ parent_module_name: "a.b.c"
644
+ parent_module_path: Path("a/b/c/__init__.py")
645
+ parent_module_location: Path("a/b/c/")
646
+ else:
647
+ parent_module_name: "a.b.c"
648
+ parent_module_path: Path("a/b/c")
649
+ parent_module_location: Path("a/b/")
650
+
651
+ Example 2 of parent_module_*:
652
+
653
+ module_name: "a.b.c"
654
+ module_path: Path("a/b/c/__init__.py")
655
+ module_location: Path("a/b/c/")
656
+ if "a.b" is package ("a/b/__init__.py" exists), then
657
+ parent_module_name: "a.b"
658
+ parent_module_path: Path("a/b/__init__.py")
659
+ parent_module_location: Path("a/b/")
660
+ else:
661
+ parent_module_name: "a.b"
662
+ parent_module_path: Path("a/b/")
663
+ parent_module_location: Path("a/")
664
+ """
665
+ # Attempt to import the parent module, seems is our responsibility:
666
+ # https://github.com/python/cpython/blob/73906d5c908c1e0b73c5436faeff7d93698fc074/Lib/importlib/_bootstrap.py#L1308-L1311
667
+ parent_module_name, _, name = module_name.rpartition(".")
668
+ parent_module: ModuleType | None = None
669
+ if parent_module_name:
670
+ parent_module = sys.modules.get(parent_module_name)
671
+ # If the parent_module lacks the `__path__` attribute, AttributeError when finding a submodule's spec,
672
+ # requiring re-import according to the path.
673
+ need_reimport = not hasattr(parent_module, "__path__")
674
+ if parent_module is None or need_reimport:
675
+ # Get parent_location based on location, get parent_path based on path.
676
+ if module_path.name == "__init__.py":
677
+ # If the current module is in a package,
678
+ # need to leave the package first and then enter the parent module.
679
+ parent_module_path = module_path.parent.parent
680
+ else:
681
+ parent_module_path = module_path.parent
682
+
683
+ if (parent_module_path / "__init__.py").is_file():
684
+ # If the parent module is a package, loading by __init__.py file.
685
+ parent_module_path = parent_module_path / "__init__.py"
686
+
687
+ parent_module = _import_module_using_spec(
688
+ parent_module_name,
689
+ parent_module_path,
690
+ parent_module_path.parent,
691
+ insert_modules=insert_modules,
692
+ )
693
+
694
+ # Checking with sys.meta_path first in case one of its hooks can import this module,
695
+ # such as our own assertion-rewrite hook.
696
+ for meta_importer in sys.meta_path:
697
+ module_name_of_meta = getattr(meta_importer.__class__, "__module__", "")
698
+ if module_name_of_meta == "_pytest.assertion.rewrite" and module_path.is_file():
699
+ # Import modules in subdirectories by module_path
700
+ # to ensure assertion rewrites are not missed (#12659).
701
+ find_spec_path = [str(module_location), str(module_path)]
702
+ else:
703
+ find_spec_path = [str(module_location)]
704
+
705
+ spec = meta_importer.find_spec(module_name, find_spec_path)
706
+
707
+ if spec_matches_module_path(spec, module_path):
708
+ break
709
+ else:
710
+ loader = None
711
+ if module_path.is_dir():
712
+ # The `spec_from_file_location` matches a loader based on the file extension by default.
713
+ # For a namespace package, need to manually specify a loader.
714
+ loader = NamespaceLoader(name, module_path, PathFinder()) # type: ignore[arg-type]
715
+
716
+ spec = importlib.util.spec_from_file_location(
717
+ module_name, str(module_path), loader=loader
718
+ )
719
+
720
+ if spec_matches_module_path(spec, module_path):
721
+ assert spec is not None
722
+ # Find spec and import this module.
723
+ mod = importlib.util.module_from_spec(spec)
724
+ sys.modules[module_name] = mod
725
+ spec.loader.exec_module(mod) # type: ignore[union-attr]
726
+
727
+ # Set this module as an attribute of the parent module (#12194).
728
+ if parent_module is not None:
729
+ setattr(parent_module, name, mod)
730
+
731
+ if insert_modules:
732
+ insert_missing_modules(sys.modules, module_name)
733
+ return mod
734
+
735
+ return None
736
+
737
+
738
+ def spec_matches_module_path(module_spec: ModuleSpec | None, module_path: Path) -> bool:
739
+ """Return true if the given ModuleSpec can be used to import the given module path."""
740
+ if module_spec is None:
741
+ return False
742
+
743
+ if module_spec.origin:
744
+ return Path(module_spec.origin) == module_path
745
+
746
+ # Compare the path with the `module_spec.submodule_Search_Locations` in case
747
+ # the module is part of a namespace package.
748
+ # https://docs.python.org/3/library/importlib.html#importlib.machinery.ModuleSpec.submodule_search_locations
749
+ if module_spec.submodule_search_locations: # can be None.
750
+ for path in module_spec.submodule_search_locations:
751
+ if Path(path) == module_path:
752
+ return True
753
+
754
+ return False
755
+
756
+
757
+ # Implement a special _is_same function on Windows which returns True if the two filenames
758
+ # compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678).
759
+ if sys.platform.startswith("win"):
760
+
761
+ def _is_same(f1: str, f2: str) -> bool:
762
+ return Path(f1) == Path(f2) or os.path.samefile(f1, f2)
763
+
764
+ else:
765
+
766
+ def _is_same(f1: str, f2: str) -> bool:
767
+ return os.path.samefile(f1, f2)
768
+
769
+
770
+ def module_name_from_path(path: Path, root: Path) -> str:
771
+ """
772
+ Return a dotted module name based on the given path, anchored on root.
773
+
774
+ For example: path="projects/src/tests/test_foo.py" and root="/projects", the
775
+ resulting module name will be "src.tests.test_foo".
776
+ """
777
+ path = path.with_suffix("")
778
+ try:
779
+ relative_path = path.relative_to(root)
780
+ except ValueError:
781
+ # If we can't get a relative path to root, use the full path, except
782
+ # for the first part ("d:\\" or "/" depending on the platform, for example).
783
+ path_parts = path.parts[1:]
784
+ else:
785
+ # Use the parts for the relative path to the root path.
786
+ path_parts = relative_path.parts
787
+
788
+ # Module name for packages do not contain the __init__ file, unless
789
+ # the `__init__.py` file is at the root.
790
+ if len(path_parts) >= 2 and path_parts[-1] == "__init__":
791
+ path_parts = path_parts[:-1]
792
+
793
+ # Module names cannot contain ".", normalize them to "_". This prevents
794
+ # a directory having a "." in the name (".env.310" for example) causing extra intermediate modules.
795
+ # Also, important to replace "." at the start of paths, as those are considered relative imports.
796
+ path_parts = tuple(x.replace(".", "_") for x in path_parts)
797
+
798
+ return ".".join(path_parts)
799
+
800
+
801
+ def insert_missing_modules(modules: dict[str, ModuleType], module_name: str) -> None:
802
+ """
803
+ Used by ``import_path`` to create intermediate modules when using mode=importlib.
804
+
805
+ When we want to import a module as "src.tests.test_foo" for example, we need
806
+ to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo",
807
+ otherwise "src.tests.test_foo" is not importable by ``__import__``.
808
+ """
809
+ module_parts = module_name.split(".")
810
+ while module_name:
811
+ parent_module_name, _, child_name = module_name.rpartition(".")
812
+ if parent_module_name:
813
+ parent_module = modules.get(parent_module_name)
814
+ if parent_module is None:
815
+ try:
816
+ # If sys.meta_path is empty, calling import_module will issue
817
+ # a warning and raise ModuleNotFoundError. To avoid the
818
+ # warning, we check sys.meta_path explicitly and raise the error
819
+ # ourselves to fall back to creating a dummy module.
820
+ if not sys.meta_path:
821
+ raise ModuleNotFoundError
822
+ parent_module = importlib.import_module(parent_module_name)
823
+ except ModuleNotFoundError:
824
+ parent_module = ModuleType(
825
+ module_name,
826
+ doc="Empty module created by pytest's importmode=importlib.",
827
+ )
828
+ modules[parent_module_name] = parent_module
829
+
830
+ # Add child attribute to the parent that can reference the child
831
+ # modules.
832
+ if not hasattr(parent_module, child_name):
833
+ setattr(parent_module, child_name, modules[module_name])
834
+
835
+ module_parts.pop(-1)
836
+ module_name = ".".join(module_parts)
837
+
838
+
839
+ def resolve_package_path(path: Path) -> Path | None:
840
+ """Return the Python package path by looking for the last
841
+ directory upwards which still contains an __init__.py.
842
+
843
+ Returns None if it cannot be determined.
844
+ """
845
+ result = None
846
+ for parent in itertools.chain((path,), path.parents):
847
+ if parent.is_dir():
848
+ if not (parent / "__init__.py").is_file():
849
+ break
850
+ if not parent.name.isidentifier():
851
+ break
852
+ result = parent
853
+ return result
854
+
855
+
856
+ def resolve_pkg_root_and_module_name(
857
+ path: Path, *, consider_namespace_packages: bool = False
858
+ ) -> tuple[Path, str]:
859
+ """
860
+ Return the path to the directory of the root package that contains the
861
+ given Python file, and its module name:
862
+
863
+ src/
864
+ app/
865
+ __init__.py
866
+ core/
867
+ __init__.py
868
+ models.py
869
+
870
+ Passing the full path to `models.py` will yield Path("src") and "app.core.models".
871
+
872
+ If consider_namespace_packages is True, then we additionally check upwards in the hierarchy
873
+ for namespace packages:
874
+
875
+ https://packaging.python.org/en/latest/guides/packaging-namespace-packages
876
+
877
+ Raises CouldNotResolvePathError if the given path does not belong to a package (missing any __init__.py files).
878
+ """
879
+ pkg_root: Path | None = None
880
+ pkg_path = resolve_package_path(path)
881
+ if pkg_path is not None:
882
+ pkg_root = pkg_path.parent
883
+ if consider_namespace_packages:
884
+ start = pkg_root if pkg_root is not None else path.parent
885
+ for candidate in (start, *start.parents):
886
+ module_name = compute_module_name(candidate, path)
887
+ if module_name and is_importable(module_name, path):
888
+ # Point the pkg_root to the root of the namespace package.
889
+ pkg_root = candidate
890
+ break
891
+
892
+ if pkg_root is not None:
893
+ module_name = compute_module_name(pkg_root, path)
894
+ if module_name:
895
+ return pkg_root, module_name
896
+
897
+ raise CouldNotResolvePathError(f"Could not resolve for {path}")
898
+
899
+
900
+ def is_importable(module_name: str, module_path: Path) -> bool:
901
+ """
902
+ Return if the given module path could be imported normally by Python, akin to the user
903
+ entering the REPL and importing the corresponding module name directly, and corresponds
904
+ to the module_path specified.
905
+
906
+ :param module_name:
907
+ Full module name that we want to check if is importable.
908
+ For example, "app.models".
909
+
910
+ :param module_path:
911
+ Full path to the python module/package we want to check if is importable.
912
+ For example, "/projects/src/app/models.py".
913
+ """
914
+ try:
915
+ # Note this is different from what we do in ``_import_module_using_spec``, where we explicitly search through
916
+ # sys.meta_path to be able to pass the path of the module that we want to import (``meta_importer.find_spec``).
917
+ # Using importlib.util.find_spec() is different, it gives the same results as trying to import
918
+ # the module normally in the REPL.
919
+ spec = importlib.util.find_spec(module_name)
920
+ except (ImportError, ValueError, ImportWarning):
921
+ return False
922
+ else:
923
+ return spec_matches_module_path(spec, module_path)
924
+
925
+
926
+ def compute_module_name(root: Path, module_path: Path) -> str | None:
927
+ """Compute a module name based on a path and a root anchor."""
928
+ try:
929
+ path_without_suffix = module_path.with_suffix("")
930
+ except ValueError:
931
+ # Empty paths (such as Path.cwd()) might break meta_path hooks (like our own assertion rewriter).
932
+ return None
933
+
934
+ try:
935
+ relative = path_without_suffix.relative_to(root)
936
+ except ValueError: # pragma: no cover
937
+ return None
938
+ names = list(relative.parts)
939
+ if not names:
940
+ return None
941
+ if names[-1] == "__init__":
942
+ names.pop()
943
+ return ".".join(names)
944
+
945
+
946
+ class CouldNotResolvePathError(Exception):
947
+ """Custom exception raised by resolve_pkg_root_and_module_name."""
948
+
949
+
950
+ def scandir(
951
+ path: str | os.PathLike[str],
952
+ sort_key: Callable[[os.DirEntry[str]], object] = lambda entry: entry.name,
953
+ ) -> list[os.DirEntry[str]]:
954
+ """Scan a directory recursively, in breadth-first order.
955
+
956
+ The returned entries are sorted according to the given key.
957
+ The default is to sort by name.
958
+ If the directory does not exist, return an empty list.
959
+ """
960
+ entries = []
961
+ # Attempt to create a scandir iterator for the given path.
962
+ try:
963
+ scandir_iter = os.scandir(path)
964
+ except FileNotFoundError:
965
+ # If the directory does not exist, return an empty list.
966
+ return []
967
+ # Use the scandir iterator in a context manager to ensure it is properly closed.
968
+ with scandir_iter as s:
969
+ for entry in s:
970
+ try:
971
+ entry.is_file()
972
+ except OSError as err:
973
+ if _ignore_error(err):
974
+ continue
975
+ # Reraise non-ignorable errors to avoid hiding issues.
976
+ raise
977
+ entries.append(entry)
978
+ entries.sort(key=sort_key) # type: ignore[arg-type]
979
+ return entries
980
+
981
+
982
+ def visit(
983
+ path: str | os.PathLike[str], recurse: Callable[[os.DirEntry[str]], bool]
984
+ ) -> Iterator[os.DirEntry[str]]:
985
+ """Walk a directory recursively, in breadth-first order.
986
+
987
+ The `recurse` predicate determines whether a directory is recursed.
988
+
989
+ Entries at each directory level are sorted.
990
+ """
991
+ entries = scandir(path)
992
+ yield from entries
993
+ for entry in entries:
994
+ if entry.is_dir() and recurse(entry):
995
+ yield from visit(entry.path, recurse)
996
+
997
+
998
+ def absolutepath(path: str | os.PathLike[str]) -> Path:
999
+ """Convert a path to an absolute path using os.path.abspath.
1000
+
1001
+ Prefer this over Path.resolve() (see #6523).
1002
+ Prefer this over Path.absolute() (not public, doesn't normalize).
1003
+ """
1004
+ return Path(os.path.abspath(path))
1005
+
1006
+
1007
+ def commonpath(path1: Path, path2: Path) -> Path | None:
1008
+ """Return the common part shared with the other path, or None if there is
1009
+ no common part.
1010
+
1011
+ If one path is relative and one is absolute, returns None.
1012
+ """
1013
+ try:
1014
+ return Path(os.path.commonpath((str(path1), str(path2))))
1015
+ except ValueError:
1016
+ return None
1017
+
1018
+
1019
+ def bestrelpath(directory: Path, dest: Path) -> str:
1020
+ """Return a string which is a relative path from directory to dest such
1021
+ that directory/bestrelpath == dest.
1022
+
1023
+ The paths must be either both absolute or both relative.
1024
+
1025
+ If no such path can be determined, returns dest.
1026
+ """
1027
+ assert isinstance(directory, Path)
1028
+ assert isinstance(dest, Path)
1029
+ if dest == directory:
1030
+ return os.curdir
1031
+ # Find the longest common directory.
1032
+ base = commonpath(directory, dest)
1033
+ # Can be the case on Windows for two absolute paths on different drives.
1034
+ # Can be the case for two relative paths without common prefix.
1035
+ # Can be the case for a relative path and an absolute path.
1036
+ if not base:
1037
+ return str(dest)
1038
+ reldirectory = directory.relative_to(base)
1039
+ reldest = dest.relative_to(base)
1040
+ return os.path.join(
1041
+ # Back from directory to base.
1042
+ *([os.pardir] * len(reldirectory.parts)),
1043
+ # Forward from base to dest.
1044
+ *reldest.parts,
1045
+ )
1046
+
1047
+
1048
+ def safe_exists(p: Path) -> bool:
1049
+ """Like Path.exists(), but account for input arguments that might be too long (#11394)."""
1050
+ try:
1051
+ return p.exists()
1052
+ except (ValueError, OSError):
1053
+ # ValueError: stat: path too long for Windows
1054
+ # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect
1055
+ return False
archive/Axiovorax/.venv/Lib/site-packages/_pytest/py.typed ADDED
File without changes
archive/Axiovorax/.venv/Lib/site-packages/_pytest/pytester.py ADDED
@@ -0,0 +1,1775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """(Disabled by default) support for testing pytest and pytest plugins.
3
+
4
+ PYTEST_DONT_REWRITE
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import collections.abc
10
+ from collections.abc import Callable
11
+ from collections.abc import Generator
12
+ from collections.abc import Iterable
13
+ from collections.abc import Sequence
14
+ import contextlib
15
+ from fnmatch import fnmatch
16
+ import gc
17
+ import importlib
18
+ from io import StringIO
19
+ import locale
20
+ import os
21
+ from pathlib import Path
22
+ import platform
23
+ import re
24
+ import shutil
25
+ import subprocess
26
+ import sys
27
+ import traceback
28
+ from typing import Any
29
+ from typing import Final
30
+ from typing import final
31
+ from typing import IO
32
+ from typing import Literal
33
+ from typing import overload
34
+ from typing import TextIO
35
+ from typing import TYPE_CHECKING
36
+ from weakref import WeakKeyDictionary
37
+
38
+ from iniconfig import IniConfig
39
+ from iniconfig import SectionWrapper
40
+
41
+ from _pytest import timing
42
+ from _pytest._code import Source
43
+ from _pytest.capture import _get_multicapture
44
+ from _pytest.compat import NOTSET
45
+ from _pytest.compat import NotSetType
46
+ from _pytest.config import _PluggyPlugin
47
+ from _pytest.config import Config
48
+ from _pytest.config import ExitCode
49
+ from _pytest.config import hookimpl
50
+ from _pytest.config import main
51
+ from _pytest.config import PytestPluginManager
52
+ from _pytest.config.argparsing import Parser
53
+ from _pytest.deprecated import check_ispytest
54
+ from _pytest.fixtures import fixture
55
+ from _pytest.fixtures import FixtureRequest
56
+ from _pytest.main import Session
57
+ from _pytest.monkeypatch import MonkeyPatch
58
+ from _pytest.nodes import Collector
59
+ from _pytest.nodes import Item
60
+ from _pytest.outcomes import fail
61
+ from _pytest.outcomes import importorskip
62
+ from _pytest.outcomes import skip
63
+ from _pytest.pathlib import bestrelpath
64
+ from _pytest.pathlib import make_numbered_dir
65
+ from _pytest.reports import CollectReport
66
+ from _pytest.reports import TestReport
67
+ from _pytest.tmpdir import TempPathFactory
68
+ from _pytest.warning_types import PytestFDWarning
69
+
70
+
71
+ if TYPE_CHECKING:
72
+ import pexpect
73
+
74
+
75
+ pytest_plugins = ["pytester_assertions"]
76
+
77
+
78
+ IGNORE_PAM = [ # filenames added when obtaining details about the current user
79
+ "/var/lib/sss/mc/passwd"
80
+ ]
81
+
82
+
83
+ def pytest_addoption(parser: Parser) -> None:
84
+ parser.addoption(
85
+ "--lsof",
86
+ action="store_true",
87
+ dest="lsof",
88
+ default=False,
89
+ help="Run FD checks if lsof is available",
90
+ )
91
+
92
+ parser.addoption(
93
+ "--runpytest",
94
+ default="inprocess",
95
+ dest="runpytest",
96
+ choices=("inprocess", "subprocess"),
97
+ help=(
98
+ "Run pytest sub runs in tests using an 'inprocess' "
99
+ "or 'subprocess' (python -m main) method"
100
+ ),
101
+ )
102
+
103
+ parser.addini(
104
+ "pytester_example_dir", help="Directory to take the pytester example files from"
105
+ )
106
+
107
+
108
+ def pytest_configure(config: Config) -> None:
109
+ if config.getvalue("lsof"):
110
+ checker = LsofFdLeakChecker()
111
+ if checker.matching_platform():
112
+ config.pluginmanager.register(checker)
113
+
114
+ config.addinivalue_line(
115
+ "markers",
116
+ "pytester_example_path(*path_segments): join the given path "
117
+ "segments to `pytester_example_dir` for this test.",
118
+ )
119
+
120
+
121
+ class LsofFdLeakChecker:
122
+ def get_open_files(self) -> list[tuple[str, str]]:
123
+ if sys.version_info >= (3, 11):
124
+ # New in Python 3.11, ignores utf-8 mode
125
+ encoding = locale.getencoding()
126
+ else:
127
+ encoding = locale.getpreferredencoding(False)
128
+ out = subprocess.run(
129
+ ("lsof", "-Ffn0", "-p", str(os.getpid())),
130
+ stdout=subprocess.PIPE,
131
+ stderr=subprocess.DEVNULL,
132
+ check=True,
133
+ text=True,
134
+ encoding=encoding,
135
+ ).stdout
136
+
137
+ def isopen(line: str) -> bool:
138
+ return line.startswith("f") and (
139
+ "deleted" not in line
140
+ and "mem" not in line
141
+ and "txt" not in line
142
+ and "cwd" not in line
143
+ )
144
+
145
+ open_files = []
146
+
147
+ for line in out.split("\n"):
148
+ if isopen(line):
149
+ fields = line.split("\0")
150
+ fd = fields[0][1:]
151
+ filename = fields[1][1:]
152
+ if filename in IGNORE_PAM:
153
+ continue
154
+ if filename.startswith("/"):
155
+ open_files.append((fd, filename))
156
+
157
+ return open_files
158
+
159
+ def matching_platform(self) -> bool:
160
+ try:
161
+ subprocess.run(("lsof", "-v"), check=True)
162
+ except (OSError, subprocess.CalledProcessError):
163
+ return False
164
+ else:
165
+ return True
166
+
167
+ @hookimpl(wrapper=True, tryfirst=True)
168
+ def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]:
169
+ lines1 = self.get_open_files()
170
+ try:
171
+ return (yield)
172
+ finally:
173
+ if hasattr(sys, "pypy_version_info"):
174
+ gc.collect()
175
+ lines2 = self.get_open_files()
176
+
177
+ new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
178
+ leaked_files = [t for t in lines2 if t[0] in new_fds]
179
+ if leaked_files:
180
+ error = [
181
+ f"***** {len(leaked_files)} FD leakage detected",
182
+ *(str(f) for f in leaked_files),
183
+ "*** Before:",
184
+ *(str(f) for f in lines1),
185
+ "*** After:",
186
+ *(str(f) for f in lines2),
187
+ f"***** {len(leaked_files)} FD leakage detected",
188
+ "*** function {}:{}: {} ".format(*item.location),
189
+ "See issue #2366",
190
+ ]
191
+ item.warn(PytestFDWarning("\n".join(error)))
192
+
193
+
194
+ # used at least by pytest-xdist plugin
195
+
196
+
197
+ @fixture
198
+ def _pytest(request: FixtureRequest) -> PytestArg:
199
+ """Return a helper which offers a gethookrecorder(hook) method which
200
+ returns a HookRecorder instance which helps to make assertions about called
201
+ hooks."""
202
+ return PytestArg(request)
203
+
204
+
205
+ class PytestArg:
206
+ def __init__(self, request: FixtureRequest) -> None:
207
+ self._request = request
208
+
209
+ def gethookrecorder(self, hook) -> HookRecorder:
210
+ hookrecorder = HookRecorder(hook._pm)
211
+ self._request.addfinalizer(hookrecorder.finish_recording)
212
+ return hookrecorder
213
+
214
+
215
+ def get_public_names(values: Iterable[str]) -> list[str]:
216
+ """Only return names from iterator values without a leading underscore."""
217
+ return [x for x in values if x[0] != "_"]
218
+
219
+
220
+ @final
221
+ class RecordedHookCall:
222
+ """A recorded call to a hook.
223
+
224
+ The arguments to the hook call are set as attributes.
225
+ For example:
226
+
227
+ .. code-block:: python
228
+
229
+ calls = hook_recorder.getcalls("pytest_runtest_setup")
230
+ # Suppose pytest_runtest_setup was called once with `item=an_item`.
231
+ assert calls[0].item is an_item
232
+ """
233
+
234
+ def __init__(self, name: str, kwargs) -> None:
235
+ self.__dict__.update(kwargs)
236
+ self._name = name
237
+
238
+ def __repr__(self) -> str:
239
+ d = self.__dict__.copy()
240
+ del d["_name"]
241
+ return f"<RecordedHookCall {self._name!r}(**{d!r})>"
242
+
243
+ if TYPE_CHECKING:
244
+ # The class has undetermined attributes, this tells mypy about it.
245
+ def __getattr__(self, key: str): ...
246
+
247
+
248
+ @final
249
+ class HookRecorder:
250
+ """Record all hooks called in a plugin manager.
251
+
252
+ Hook recorders are created by :class:`Pytester`.
253
+
254
+ This wraps all the hook calls in the plugin manager, recording each call
255
+ before propagating the normal calls.
256
+ """
257
+
258
+ def __init__(
259
+ self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False
260
+ ) -> None:
261
+ check_ispytest(_ispytest)
262
+
263
+ self._pluginmanager = pluginmanager
264
+ self.calls: list[RecordedHookCall] = []
265
+ self.ret: int | ExitCode | None = None
266
+
267
+ def before(hook_name: str, hook_impls, kwargs) -> None:
268
+ self.calls.append(RecordedHookCall(hook_name, kwargs))
269
+
270
+ def after(outcome, hook_name: str, hook_impls, kwargs) -> None:
271
+ pass
272
+
273
+ self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
274
+
275
+ def finish_recording(self) -> None:
276
+ self._undo_wrapping()
277
+
278
+ def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]:
279
+ """Get all recorded calls to hooks with the given names (or name)."""
280
+ if isinstance(names, str):
281
+ names = names.split()
282
+ return [call for call in self.calls if call._name in names]
283
+
284
+ def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None:
285
+ __tracebackhide__ = True
286
+ i = 0
287
+ entries = list(entries)
288
+ # Since Python 3.13, f_locals is not a dict, but eval requires a dict.
289
+ backlocals = dict(sys._getframe(1).f_locals)
290
+ while entries:
291
+ name, check = entries.pop(0)
292
+ for ind, call in enumerate(self.calls[i:]):
293
+ if call._name == name:
294
+ print("NAMEMATCH", name, call)
295
+ if eval(check, backlocals, call.__dict__):
296
+ print("CHECKERMATCH", repr(check), "->", call)
297
+ else:
298
+ print("NOCHECKERMATCH", repr(check), "-", call)
299
+ continue
300
+ i += ind + 1
301
+ break
302
+ print("NONAMEMATCH", name, "with", call)
303
+ else:
304
+ fail(f"could not find {name!r} check {check!r}")
305
+
306
+ def popcall(self, name: str) -> RecordedHookCall:
307
+ __tracebackhide__ = True
308
+ for i, call in enumerate(self.calls):
309
+ if call._name == name:
310
+ del self.calls[i]
311
+ return call
312
+ lines = [f"could not find call {name!r}, in:"]
313
+ lines.extend([f" {x}" for x in self.calls])
314
+ fail("\n".join(lines))
315
+
316
+ def getcall(self, name: str) -> RecordedHookCall:
317
+ values = self.getcalls(name)
318
+ assert len(values) == 1, (name, values)
319
+ return values[0]
320
+
321
+ # functionality for test reports
322
+
323
+ @overload
324
+ def getreports(
325
+ self,
326
+ names: Literal["pytest_collectreport"],
327
+ ) -> Sequence[CollectReport]: ...
328
+
329
+ @overload
330
+ def getreports(
331
+ self,
332
+ names: Literal["pytest_runtest_logreport"],
333
+ ) -> Sequence[TestReport]: ...
334
+
335
+ @overload
336
+ def getreports(
337
+ self,
338
+ names: str | Iterable[str] = (
339
+ "pytest_collectreport",
340
+ "pytest_runtest_logreport",
341
+ ),
342
+ ) -> Sequence[CollectReport | TestReport]: ...
343
+
344
+ def getreports(
345
+ self,
346
+ names: str | Iterable[str] = (
347
+ "pytest_collectreport",
348
+ "pytest_runtest_logreport",
349
+ ),
350
+ ) -> Sequence[CollectReport | TestReport]:
351
+ return [x.report for x in self.getcalls(names)]
352
+
353
+ def matchreport(
354
+ self,
355
+ inamepart: str = "",
356
+ names: str | Iterable[str] = (
357
+ "pytest_runtest_logreport",
358
+ "pytest_collectreport",
359
+ ),
360
+ when: str | None = None,
361
+ ) -> CollectReport | TestReport:
362
+ """Return a testreport whose dotted import path matches."""
363
+ values = []
364
+ for rep in self.getreports(names=names):
365
+ if not when and rep.when != "call" and rep.passed:
366
+ # setup/teardown passing reports - let's ignore those
367
+ continue
368
+ if when and rep.when != when:
369
+ continue
370
+ if not inamepart or inamepart in rep.nodeid.split("::"):
371
+ values.append(rep)
372
+ if not values:
373
+ raise ValueError(
374
+ f"could not find test report matching {inamepart!r}: "
375
+ "no test reports at all!"
376
+ )
377
+ if len(values) > 1:
378
+ raise ValueError(
379
+ f"found 2 or more testreports matching {inamepart!r}: {values}"
380
+ )
381
+ return values[0]
382
+
383
+ @overload
384
+ def getfailures(
385
+ self,
386
+ names: Literal["pytest_collectreport"],
387
+ ) -> Sequence[CollectReport]: ...
388
+
389
+ @overload
390
+ def getfailures(
391
+ self,
392
+ names: Literal["pytest_runtest_logreport"],
393
+ ) -> Sequence[TestReport]: ...
394
+
395
+ @overload
396
+ def getfailures(
397
+ self,
398
+ names: str | Iterable[str] = (
399
+ "pytest_collectreport",
400
+ "pytest_runtest_logreport",
401
+ ),
402
+ ) -> Sequence[CollectReport | TestReport]: ...
403
+
404
+ def getfailures(
405
+ self,
406
+ names: str | Iterable[str] = (
407
+ "pytest_collectreport",
408
+ "pytest_runtest_logreport",
409
+ ),
410
+ ) -> Sequence[CollectReport | TestReport]:
411
+ return [rep for rep in self.getreports(names) if rep.failed]
412
+
413
+ def getfailedcollections(self) -> Sequence[CollectReport]:
414
+ return self.getfailures("pytest_collectreport")
415
+
416
+ def listoutcomes(
417
+ self,
418
+ ) -> tuple[
419
+ Sequence[TestReport],
420
+ Sequence[CollectReport | TestReport],
421
+ Sequence[CollectReport | TestReport],
422
+ ]:
423
+ passed = []
424
+ skipped = []
425
+ failed = []
426
+ for rep in self.getreports(
427
+ ("pytest_collectreport", "pytest_runtest_logreport")
428
+ ):
429
+ if rep.passed:
430
+ if rep.when == "call":
431
+ assert isinstance(rep, TestReport)
432
+ passed.append(rep)
433
+ elif rep.skipped:
434
+ skipped.append(rep)
435
+ else:
436
+ assert rep.failed, f"Unexpected outcome: {rep!r}"
437
+ failed.append(rep)
438
+ return passed, skipped, failed
439
+
440
+ def countoutcomes(self) -> list[int]:
441
+ return [len(x) for x in self.listoutcomes()]
442
+
443
+ def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None:
444
+ __tracebackhide__ = True
445
+ from _pytest.pytester_assertions import assertoutcome
446
+
447
+ outcomes = self.listoutcomes()
448
+ assertoutcome(
449
+ outcomes,
450
+ passed=passed,
451
+ skipped=skipped,
452
+ failed=failed,
453
+ )
454
+
455
+ def clear(self) -> None:
456
+ self.calls[:] = []
457
+
458
+
459
+ @fixture
460
+ def linecomp() -> LineComp:
461
+ """A :class: `LineComp` instance for checking that an input linearly
462
+ contains a sequence of strings."""
463
+ return LineComp()
464
+
465
+
466
+ @fixture(name="LineMatcher")
467
+ def LineMatcher_fixture(request: FixtureRequest) -> type[LineMatcher]:
468
+ """A reference to the :class: `LineMatcher`.
469
+
470
+ This is instantiable with a list of lines (without their trailing newlines).
471
+ This is useful for testing large texts, such as the output of commands.
472
+ """
473
+ return LineMatcher
474
+
475
+
476
+ @fixture
477
+ def pytester(
478
+ request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch
479
+ ) -> Pytester:
480
+ """
481
+ Facilities to write tests/configuration files, execute pytest in isolation, and match
482
+ against expected output, perfect for black-box testing of pytest plugins.
483
+
484
+ It attempts to isolate the test run from external factors as much as possible, modifying
485
+ the current working directory to ``path`` and environment variables during initialization.
486
+
487
+ It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path`
488
+ fixture but provides methods which aid in testing pytest itself.
489
+ """
490
+ return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True)
491
+
492
+
493
+ @fixture
494
+ def _sys_snapshot() -> Generator[None]:
495
+ snappaths = SysPathsSnapshot()
496
+ snapmods = SysModulesSnapshot()
497
+ yield
498
+ snapmods.restore()
499
+ snappaths.restore()
500
+
501
+
502
+ @fixture
503
+ def _config_for_test() -> Generator[Config]:
504
+ from _pytest.config import get_config
505
+
506
+ config = get_config()
507
+ yield config
508
+ config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.
509
+
510
+
511
+ # Regex to match the session duration string in the summary: "74.34s".
512
+ rex_session_duration = re.compile(r"\d+\.\d\ds")
513
+ # Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped".
514
+ rex_outcome = re.compile(r"(\d+) (\w+)")
515
+
516
+
517
+ @final
518
+ class RunResult:
519
+ """The result of running a command from :class:`~pytest.Pytester`."""
520
+
521
+ def __init__(
522
+ self,
523
+ ret: int | ExitCode,
524
+ outlines: list[str],
525
+ errlines: list[str],
526
+ duration: float,
527
+ ) -> None:
528
+ try:
529
+ self.ret: int | ExitCode = ExitCode(ret)
530
+ """The return value."""
531
+ except ValueError:
532
+ self.ret = ret
533
+ self.outlines = outlines
534
+ """List of lines captured from stdout."""
535
+ self.errlines = errlines
536
+ """List of lines captured from stderr."""
537
+ self.stdout = LineMatcher(outlines)
538
+ """:class:`~pytest.LineMatcher` of stdout.
539
+
540
+ Use e.g. :func:`str(stdout) <pytest.LineMatcher.__str__()>` to reconstruct stdout, or the commonly used
541
+ :func:`stdout.fnmatch_lines() <pytest.LineMatcher.fnmatch_lines()>` method.
542
+ """
543
+ self.stderr = LineMatcher(errlines)
544
+ """:class:`~pytest.LineMatcher` of stderr."""
545
+ self.duration = duration
546
+ """Duration in seconds."""
547
+
548
+ def __repr__(self) -> str:
549
+ return (
550
+ f"<RunResult ret={self.ret!s} "
551
+ f"len(stdout.lines)={len(self.stdout.lines)} "
552
+ f"len(stderr.lines)={len(self.stderr.lines)} "
553
+ f"duration={self.duration:.2f}s>"
554
+ )
555
+
556
+ def parseoutcomes(self) -> dict[str, int]:
557
+ """Return a dictionary of outcome noun -> count from parsing the terminal
558
+ output that the test process produced.
559
+
560
+ The returned nouns will always be in plural form::
561
+
562
+ ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
563
+
564
+ Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
565
+ """
566
+ return self.parse_summary_nouns(self.outlines)
567
+
568
+ @classmethod
569
+ def parse_summary_nouns(cls, lines) -> dict[str, int]:
570
+ """Extract the nouns from a pytest terminal summary line.
571
+
572
+ It always returns the plural noun for consistency::
573
+
574
+ ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
575
+
576
+ Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
577
+ """
578
+ for line in reversed(lines):
579
+ if rex_session_duration.search(line):
580
+ outcomes = rex_outcome.findall(line)
581
+ ret = {noun: int(count) for (count, noun) in outcomes}
582
+ break
583
+ else:
584
+ raise ValueError("Pytest terminal summary report not found")
585
+
586
+ to_plural = {
587
+ "warning": "warnings",
588
+ "error": "errors",
589
+ }
590
+ return {to_plural.get(k, k): v for k, v in ret.items()}
591
+
592
+ def assert_outcomes(
593
+ self,
594
+ passed: int = 0,
595
+ skipped: int = 0,
596
+ failed: int = 0,
597
+ errors: int = 0,
598
+ xpassed: int = 0,
599
+ xfailed: int = 0,
600
+ warnings: int | None = None,
601
+ deselected: int | None = None,
602
+ ) -> None:
603
+ """
604
+ Assert that the specified outcomes appear with the respective
605
+ numbers (0 means it didn't occur) in the text output from a test run.
606
+
607
+ ``warnings`` and ``deselected`` are only checked if not None.
608
+ """
609
+ __tracebackhide__ = True
610
+ from _pytest.pytester_assertions import assert_outcomes
611
+
612
+ outcomes = self.parseoutcomes()
613
+ assert_outcomes(
614
+ outcomes,
615
+ passed=passed,
616
+ skipped=skipped,
617
+ failed=failed,
618
+ errors=errors,
619
+ xpassed=xpassed,
620
+ xfailed=xfailed,
621
+ warnings=warnings,
622
+ deselected=deselected,
623
+ )
624
+
625
+
626
+ class SysModulesSnapshot:
627
+ def __init__(self, preserve: Callable[[str], bool] | None = None) -> None:
628
+ self.__preserve = preserve
629
+ self.__saved = dict(sys.modules)
630
+
631
+ def restore(self) -> None:
632
+ if self.__preserve:
633
+ self.__saved.update(
634
+ (k, m) for k, m in sys.modules.items() if self.__preserve(k)
635
+ )
636
+ sys.modules.clear()
637
+ sys.modules.update(self.__saved)
638
+
639
+
640
+ class SysPathsSnapshot:
641
+ def __init__(self) -> None:
642
+ self.__saved = list(sys.path), list(sys.meta_path)
643
+
644
+ def restore(self) -> None:
645
+ sys.path[:], sys.meta_path[:] = self.__saved
646
+
647
+
648
+ @final
649
+ class Pytester:
650
+ """
651
+ Facilities to write tests/configuration files, execute pytest in isolation, and match
652
+ against expected output, perfect for black-box testing of pytest plugins.
653
+
654
+ It attempts to isolate the test run from external factors as much as possible, modifying
655
+ the current working directory to :attr:`path` and environment variables during initialization.
656
+ """
657
+
658
+ __test__ = False
659
+
660
+ CLOSE_STDIN: Final = NOTSET
661
+
662
+ class TimeoutExpired(Exception):
663
+ pass
664
+
665
+ def __init__(
666
+ self,
667
+ request: FixtureRequest,
668
+ tmp_path_factory: TempPathFactory,
669
+ monkeypatch: MonkeyPatch,
670
+ *,
671
+ _ispytest: bool = False,
672
+ ) -> None:
673
+ check_ispytest(_ispytest)
674
+ self._request = request
675
+ self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = (
676
+ WeakKeyDictionary()
677
+ )
678
+ if request.function:
679
+ name: str = request.function.__name__
680
+ else:
681
+ name = request.node.name
682
+ self._name = name
683
+ self._path: Path = tmp_path_factory.mktemp(name, numbered=True)
684
+ #: A list of plugins to use with :py:meth:`parseconfig` and
685
+ #: :py:meth:`runpytest`. Initially this is an empty list but plugins can
686
+ #: be added to the list. The type of items to add to the list depends on
687
+ #: the method using them so refer to them for details.
688
+ self.plugins: list[str | _PluggyPlugin] = []
689
+ self._sys_path_snapshot = SysPathsSnapshot()
690
+ self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
691
+ self._request.addfinalizer(self._finalize)
692
+ self._method = self._request.config.getoption("--runpytest")
693
+ self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True)
694
+
695
+ self._monkeypatch = mp = monkeypatch
696
+ self.chdir()
697
+ mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot))
698
+ # Ensure no unexpected caching via tox.
699
+ mp.delenv("TOX_ENV_DIR", raising=False)
700
+ # Discard outer pytest options.
701
+ mp.delenv("PYTEST_ADDOPTS", raising=False)
702
+ # Ensure no user config is used.
703
+ tmphome = str(self.path)
704
+ mp.setenv("HOME", tmphome)
705
+ mp.setenv("USERPROFILE", tmphome)
706
+ # Do not use colors for inner runs by default.
707
+ mp.setenv("PY_COLORS", "0")
708
+
709
+ @property
710
+ def path(self) -> Path:
711
+ """Temporary directory path used to create files/run tests from, etc."""
712
+ return self._path
713
+
714
+ def __repr__(self) -> str:
715
+ return f"<Pytester {self.path!r}>"
716
+
717
+ def _finalize(self) -> None:
718
+ """
719
+ Clean up global state artifacts.
720
+
721
+ Some methods modify the global interpreter state and this tries to
722
+ clean this up. It does not remove the temporary directory however so
723
+ it can be looked at after the test run has finished.
724
+ """
725
+ self._sys_modules_snapshot.restore()
726
+ self._sys_path_snapshot.restore()
727
+
728
+ def __take_sys_modules_snapshot(self) -> SysModulesSnapshot:
729
+ # Some zope modules used by twisted-related tests keep internal state
730
+ # and can't be deleted; we had some trouble in the past with
731
+ # `zope.interface` for example.
732
+ #
733
+ # Preserve readline due to https://bugs.python.org/issue41033.
734
+ # pexpect issues a SIGWINCH.
735
+ def preserve_module(name):
736
+ return name.startswith(("zope", "readline"))
737
+
738
+ return SysModulesSnapshot(preserve=preserve_module)
739
+
740
+ def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder:
741
+ """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`."""
742
+ pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined]
743
+ self._request.addfinalizer(reprec.finish_recording)
744
+ return reprec
745
+
746
+ def chdir(self) -> None:
747
+ """Cd into the temporary directory.
748
+
749
+ This is done automatically upon instantiation.
750
+ """
751
+ self._monkeypatch.chdir(self.path)
752
+
753
+ def _makefile(
754
+ self,
755
+ ext: str,
756
+ lines: Sequence[Any | bytes],
757
+ files: dict[str, str],
758
+ encoding: str = "utf-8",
759
+ ) -> Path:
760
+ items = list(files.items())
761
+
762
+ if ext is None:
763
+ raise TypeError("ext must not be None")
764
+
765
+ if ext and not ext.startswith("."):
766
+ raise ValueError(
767
+ f"pytester.makefile expects a file extension, try .{ext} instead of {ext}"
768
+ )
769
+
770
+ def to_text(s: Any | bytes) -> str:
771
+ return s.decode(encoding) if isinstance(s, bytes) else str(s)
772
+
773
+ if lines:
774
+ source = "\n".join(to_text(x) for x in lines)
775
+ basename = self._name
776
+ items.insert(0, (basename, source))
777
+
778
+ ret = None
779
+ for basename, value in items:
780
+ p = self.path.joinpath(basename).with_suffix(ext)
781
+ p.parent.mkdir(parents=True, exist_ok=True)
782
+ source_ = Source(value)
783
+ source = "\n".join(to_text(line) for line in source_.lines)
784
+ p.write_text(source.strip(), encoding=encoding)
785
+ if ret is None:
786
+ ret = p
787
+ assert ret is not None
788
+ return ret
789
+
790
+ def makefile(self, ext: str, *args: str, **kwargs: str) -> Path:
791
+ r"""Create new text file(s) in the test directory.
792
+
793
+ :param ext:
794
+ The extension the file(s) should use, including the dot, e.g. `.py`.
795
+ :param args:
796
+ All args are treated as strings and joined using newlines.
797
+ The result is written as contents to the file. The name of the
798
+ file is based on the test function requesting this fixture.
799
+ :param kwargs:
800
+ Each keyword is the name of a file, while the value of it will
801
+ be written as contents of the file.
802
+ :returns:
803
+ The first created file.
804
+
805
+ Examples:
806
+
807
+ .. code-block:: python
808
+
809
+ pytester.makefile(".txt", "line1", "line2")
810
+
811
+ pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
812
+
813
+ To create binary files, use :meth:`pathlib.Path.write_bytes` directly:
814
+
815
+ .. code-block:: python
816
+
817
+ filename = pytester.path.joinpath("foo.bin")
818
+ filename.write_bytes(b"...")
819
+ """
820
+ return self._makefile(ext, args, kwargs)
821
+
822
+ def makeconftest(self, source: str) -> Path:
823
+ """Write a conftest.py file.
824
+
825
+ :param source: The contents.
826
+ :returns: The conftest.py file.
827
+ """
828
+ return self.makepyfile(conftest=source)
829
+
830
+ def makeini(self, source: str) -> Path:
831
+ """Write a tox.ini file.
832
+
833
+ :param source: The contents.
834
+ :returns: The tox.ini file.
835
+ """
836
+ return self.makefile(".ini", tox=source)
837
+
838
+ def getinicfg(self, source: str) -> SectionWrapper:
839
+ """Return the pytest section from the tox.ini config file."""
840
+ p = self.makeini(source)
841
+ return IniConfig(str(p))["pytest"]
842
+
843
+ def makepyprojecttoml(self, source: str) -> Path:
844
+ """Write a pyproject.toml file.
845
+
846
+ :param source: The contents.
847
+ :returns: The pyproject.ini file.
848
+
849
+ .. versionadded:: 6.0
850
+ """
851
+ return self.makefile(".toml", pyproject=source)
852
+
853
+ def makepyfile(self, *args, **kwargs) -> Path:
854
+ r"""Shortcut for .makefile() with a .py extension.
855
+
856
+ Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting
857
+ existing files.
858
+
859
+ Examples:
860
+
861
+ .. code-block:: python
862
+
863
+ def test_something(pytester):
864
+ # Initial file is created test_something.py.
865
+ pytester.makepyfile("foobar")
866
+ # To create multiple files, pass kwargs accordingly.
867
+ pytester.makepyfile(custom="foobar")
868
+ # At this point, both 'test_something.py' & 'custom.py' exist in the test directory.
869
+
870
+ """
871
+ return self._makefile(".py", args, kwargs)
872
+
873
+ def maketxtfile(self, *args, **kwargs) -> Path:
874
+ r"""Shortcut for .makefile() with a .txt extension.
875
+
876
+ Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting
877
+ existing files.
878
+
879
+ Examples:
880
+
881
+ .. code-block:: python
882
+
883
+ def test_something(pytester):
884
+ # Initial file is created test_something.txt.
885
+ pytester.maketxtfile("foobar")
886
+ # To create multiple files, pass kwargs accordingly.
887
+ pytester.maketxtfile(custom="foobar")
888
+ # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory.
889
+
890
+ """
891
+ return self._makefile(".txt", args, kwargs)
892
+
893
+ def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None:
894
+ """Prepend a directory to sys.path, defaults to :attr:`path`.
895
+
896
+ This is undone automatically when this object dies at the end of each
897
+ test.
898
+
899
+ :param path:
900
+ The path.
901
+ """
902
+ if path is None:
903
+ path = self.path
904
+
905
+ self._monkeypatch.syspath_prepend(str(path))
906
+
907
+ def mkdir(self, name: str | os.PathLike[str]) -> Path:
908
+ """Create a new (sub)directory.
909
+
910
+ :param name:
911
+ The name of the directory, relative to the pytester path.
912
+ :returns:
913
+ The created directory.
914
+ :rtype: pathlib.Path
915
+ """
916
+ p = self.path / name
917
+ p.mkdir()
918
+ return p
919
+
920
+ def mkpydir(self, name: str | os.PathLike[str]) -> Path:
921
+ """Create a new python package.
922
+
923
+ This creates a (sub)directory with an empty ``__init__.py`` file so it
924
+ gets recognised as a Python package.
925
+ """
926
+ p = self.path / name
927
+ p.mkdir()
928
+ p.joinpath("__init__.py").touch()
929
+ return p
930
+
931
+ def copy_example(self, name: str | None = None) -> Path:
932
+ """Copy file from project's directory into the testdir.
933
+
934
+ :param name:
935
+ The name of the file to copy.
936
+ :return:
937
+ Path to the copied directory (inside ``self.path``).
938
+ :rtype: pathlib.Path
939
+ """
940
+ example_dir_ = self._request.config.getini("pytester_example_dir")
941
+ if example_dir_ is None:
942
+ raise ValueError("pytester_example_dir is unset, can't copy examples")
943
+ example_dir: Path = self._request.config.rootpath / example_dir_
944
+
945
+ for extra_element in self._request.node.iter_markers("pytester_example_path"):
946
+ assert extra_element.args
947
+ example_dir = example_dir.joinpath(*extra_element.args)
948
+
949
+ if name is None:
950
+ func_name = self._name
951
+ maybe_dir = example_dir / func_name
952
+ maybe_file = example_dir / (func_name + ".py")
953
+
954
+ if maybe_dir.is_dir():
955
+ example_path = maybe_dir
956
+ elif maybe_file.is_file():
957
+ example_path = maybe_file
958
+ else:
959
+ raise LookupError(
960
+ f"{func_name} can't be found as module or package in {example_dir}"
961
+ )
962
+ else:
963
+ example_path = example_dir.joinpath(name)
964
+
965
+ if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file():
966
+ shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True)
967
+ return self.path
968
+ elif example_path.is_file():
969
+ result = self.path.joinpath(example_path.name)
970
+ shutil.copy(example_path, result)
971
+ return result
972
+ else:
973
+ raise LookupError(
974
+ f'example "{example_path}" is not found as a file or directory'
975
+ )
976
+
977
+ def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item:
978
+ """Get the collection node of a file.
979
+
980
+ :param config:
981
+ A pytest config.
982
+ See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it.
983
+ :param arg:
984
+ Path to the file.
985
+ :returns:
986
+ The node.
987
+ """
988
+ session = Session.from_config(config)
989
+ assert "::" not in str(arg)
990
+ p = Path(os.path.abspath(arg))
991
+ config.hook.pytest_sessionstart(session=session)
992
+ res = session.perform_collect([str(p)], genitems=False)[0]
993
+ config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
994
+ return res
995
+
996
+ def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item:
997
+ """Return the collection node of a file.
998
+
999
+ This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
1000
+ create the (configured) pytest Config instance.
1001
+
1002
+ :param path:
1003
+ Path to the file.
1004
+ :returns:
1005
+ The node.
1006
+ """
1007
+ path = Path(path)
1008
+ config = self.parseconfigure(path)
1009
+ session = Session.from_config(config)
1010
+ x = bestrelpath(session.path, path)
1011
+ config.hook.pytest_sessionstart(session=session)
1012
+ res = session.perform_collect([x], genitems=False)[0]
1013
+ config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
1014
+ return res
1015
+
1016
+ def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]:
1017
+ """Generate all test items from a collection node.
1018
+
1019
+ This recurses into the collection node and returns a list of all the
1020
+ test items contained within.
1021
+
1022
+ :param colitems:
1023
+ The collection nodes.
1024
+ :returns:
1025
+ The collected items.
1026
+ """
1027
+ session = colitems[0].session
1028
+ result: list[Item] = []
1029
+ for colitem in colitems:
1030
+ result.extend(session.genitems(colitem))
1031
+ return result
1032
+
1033
+ def runitem(self, source: str) -> Any:
1034
+ """Run the "test_func" Item.
1035
+
1036
+ The calling test instance (class containing the test method) must
1037
+ provide a ``.getrunner()`` method which should return a runner which
1038
+ can run the test protocol for a single item, e.g.
1039
+ ``_pytest.runner.runtestprotocol``.
1040
+ """
1041
+ # used from runner functional tests
1042
+ item = self.getitem(source)
1043
+ # the test class where we are called from wants to provide the runner
1044
+ testclassinstance = self._request.instance
1045
+ runner = testclassinstance.getrunner()
1046
+ return runner(item)
1047
+
1048
+ def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder:
1049
+ """Run a test module in process using ``pytest.main()``.
1050
+
1051
+ This run writes "source" into a temporary file and runs
1052
+ ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
1053
+ for the result.
1054
+
1055
+ :param source: The source code of the test module.
1056
+ :param cmdlineargs: Any extra command line arguments to use.
1057
+ """
1058
+ p = self.makepyfile(source)
1059
+ values = [*list(cmdlineargs), p]
1060
+ return self.inline_run(*values)
1061
+
1062
+ def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]:
1063
+ """Run ``pytest.main(['--collect-only'])`` in-process.
1064
+
1065
+ Runs the :py:func:`pytest.main` function to run all of pytest inside
1066
+ the test process itself like :py:meth:`inline_run`, but returns a
1067
+ tuple of the collected items and a :py:class:`HookRecorder` instance.
1068
+ """
1069
+ rec = self.inline_run("--collect-only", *args)
1070
+ items = [x.item for x in rec.getcalls("pytest_itemcollected")]
1071
+ return items, rec
1072
+
1073
+ def inline_run(
1074
+ self,
1075
+ *args: str | os.PathLike[str],
1076
+ plugins=(),
1077
+ no_reraise_ctrlc: bool = False,
1078
+ ) -> HookRecorder:
1079
+ """Run ``pytest.main()`` in-process, returning a HookRecorder.
1080
+
1081
+ Runs the :py:func:`pytest.main` function to run all of pytest inside
1082
+ the test process itself. This means it can return a
1083
+ :py:class:`HookRecorder` instance which gives more detailed results
1084
+ from that run than can be done by matching stdout/stderr from
1085
+ :py:meth:`runpytest`.
1086
+
1087
+ :param args:
1088
+ Command line arguments to pass to :py:func:`pytest.main`.
1089
+ :param plugins:
1090
+ Extra plugin instances the ``pytest.main()`` instance should use.
1091
+ :param no_reraise_ctrlc:
1092
+ Typically we reraise keyboard interrupts from the child run. If
1093
+ True, the KeyboardInterrupt exception is captured.
1094
+ """
1095
+ from _pytest.unraisableexception import gc_collect_iterations_key
1096
+
1097
+ # (maybe a cpython bug?) the importlib cache sometimes isn't updated
1098
+ # properly between file creation and inline_run (especially if imports
1099
+ # are interspersed with file creation)
1100
+ importlib.invalidate_caches()
1101
+
1102
+ plugins = list(plugins)
1103
+ finalizers = []
1104
+ try:
1105
+ # Any sys.module or sys.path changes done while running pytest
1106
+ # inline should be reverted after the test run completes to avoid
1107
+ # clashing with later inline tests run within the same pytest test,
1108
+ # e.g. just because they use matching test module names.
1109
+ finalizers.append(self.__take_sys_modules_snapshot().restore)
1110
+ finalizers.append(SysPathsSnapshot().restore)
1111
+
1112
+ # Important note:
1113
+ # - our tests should not leave any other references/registrations
1114
+ # laying around other than possibly loaded test modules
1115
+ # referenced from sys.modules, as nothing will clean those up
1116
+ # automatically
1117
+
1118
+ rec = []
1119
+
1120
+ class PytesterHelperPlugin:
1121
+ @staticmethod
1122
+ def pytest_configure(config: Config) -> None:
1123
+ rec.append(self.make_hook_recorder(config.pluginmanager))
1124
+
1125
+ # The unraisable plugin GC collect slows down inline
1126
+ # pytester runs too much.
1127
+ config.stash[gc_collect_iterations_key] = 0
1128
+
1129
+ plugins.append(PytesterHelperPlugin())
1130
+ ret = main([str(x) for x in args], plugins=plugins)
1131
+ if len(rec) == 1:
1132
+ reprec = rec.pop()
1133
+ else:
1134
+
1135
+ class reprec: # type: ignore
1136
+ pass
1137
+
1138
+ reprec.ret = ret
1139
+
1140
+ # Typically we reraise keyboard interrupts from the child run
1141
+ # because it's our user requesting interruption of the testing.
1142
+ if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc:
1143
+ calls = reprec.getcalls("pytest_keyboard_interrupt")
1144
+ if calls and calls[-1].excinfo.type == KeyboardInterrupt:
1145
+ raise KeyboardInterrupt()
1146
+ return reprec
1147
+ finally:
1148
+ for finalizer in finalizers:
1149
+ finalizer()
1150
+
1151
+ def runpytest_inprocess(
1152
+ self, *args: str | os.PathLike[str], **kwargs: Any
1153
+ ) -> RunResult:
1154
+ """Return result of running pytest in-process, providing a similar
1155
+ interface to what self.runpytest() provides."""
1156
+ syspathinsert = kwargs.pop("syspathinsert", False)
1157
+
1158
+ if syspathinsert:
1159
+ self.syspathinsert()
1160
+ instant = timing.Instant()
1161
+ capture = _get_multicapture("sys")
1162
+ capture.start_capturing()
1163
+ try:
1164
+ try:
1165
+ reprec = self.inline_run(*args, **kwargs)
1166
+ except SystemExit as e:
1167
+ ret = e.args[0]
1168
+ try:
1169
+ ret = ExitCode(e.args[0])
1170
+ except ValueError:
1171
+ pass
1172
+
1173
+ class reprec: # type: ignore
1174
+ ret = ret
1175
+
1176
+ except Exception:
1177
+ traceback.print_exc()
1178
+
1179
+ class reprec: # type: ignore
1180
+ ret = ExitCode(3)
1181
+
1182
+ finally:
1183
+ out, err = capture.readouterr()
1184
+ capture.stop_capturing()
1185
+ sys.stdout.write(out)
1186
+ sys.stderr.write(err)
1187
+
1188
+ assert reprec.ret is not None
1189
+ res = RunResult(
1190
+ reprec.ret, out.splitlines(), err.splitlines(), instant.elapsed().seconds
1191
+ )
1192
+ res.reprec = reprec # type: ignore
1193
+ return res
1194
+
1195
+ def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult:
1196
+ """Run pytest inline or in a subprocess, depending on the command line
1197
+ option "--runpytest" and return a :py:class:`~pytest.RunResult`."""
1198
+ new_args = self._ensure_basetemp(args)
1199
+ if self._method == "inprocess":
1200
+ return self.runpytest_inprocess(*new_args, **kwargs)
1201
+ elif self._method == "subprocess":
1202
+ return self.runpytest_subprocess(*new_args, **kwargs)
1203
+ raise RuntimeError(f"Unrecognized runpytest option: {self._method}")
1204
+
1205
+ def _ensure_basetemp(
1206
+ self, args: Sequence[str | os.PathLike[str]]
1207
+ ) -> list[str | os.PathLike[str]]:
1208
+ new_args = list(args)
1209
+ for x in new_args:
1210
+ if str(x).startswith("--basetemp"):
1211
+ break
1212
+ else:
1213
+ new_args.append(
1214
+ "--basetemp={}".format(self.path.parent.joinpath("basetemp"))
1215
+ )
1216
+ return new_args
1217
+
1218
+ def parseconfig(self, *args: str | os.PathLike[str]) -> Config:
1219
+ """Return a new pytest :class:`pytest.Config` instance from given
1220
+ commandline args.
1221
+
1222
+ This invokes the pytest bootstrapping code in _pytest.config to create a
1223
+ new :py:class:`pytest.PytestPluginManager` and call the
1224
+ :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config`
1225
+ instance.
1226
+
1227
+ If :attr:`plugins` has been populated they should be plugin modules
1228
+ to be registered with the plugin manager.
1229
+ """
1230
+ import _pytest.config
1231
+
1232
+ new_args = self._ensure_basetemp(args)
1233
+ new_args = [str(x) for x in new_args]
1234
+
1235
+ config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type]
1236
+ # we don't know what the test will do with this half-setup config
1237
+ # object and thus we make sure it gets unconfigured properly in any
1238
+ # case (otherwise capturing could still be active, for example)
1239
+ self._request.addfinalizer(config._ensure_unconfigure)
1240
+ return config
1241
+
1242
+ def parseconfigure(self, *args: str | os.PathLike[str]) -> Config:
1243
+ """Return a new pytest configured Config instance.
1244
+
1245
+ Returns a new :py:class:`pytest.Config` instance like
1246
+ :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure`
1247
+ hook.
1248
+ """
1249
+ config = self.parseconfig(*args)
1250
+ config._do_configure()
1251
+ return config
1252
+
1253
+ def getitem(
1254
+ self, source: str | os.PathLike[str], funcname: str = "test_func"
1255
+ ) -> Item:
1256
+ """Return the test item for a test function.
1257
+
1258
+ Writes the source to a python file and runs pytest's collection on
1259
+ the resulting module, returning the test item for the requested
1260
+ function name.
1261
+
1262
+ :param source:
1263
+ The module source.
1264
+ :param funcname:
1265
+ The name of the test function for which to return a test item.
1266
+ :returns:
1267
+ The test item.
1268
+ """
1269
+ items = self.getitems(source)
1270
+ for item in items:
1271
+ if item.name == funcname:
1272
+ return item
1273
+ assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}"
1274
+
1275
+ def getitems(self, source: str | os.PathLike[str]) -> list[Item]:
1276
+ """Return all test items collected from the module.
1277
+
1278
+ Writes the source to a Python file and runs pytest's collection on
1279
+ the resulting module, returning all test items contained within.
1280
+ """
1281
+ modcol = self.getmodulecol(source)
1282
+ return self.genitems([modcol])
1283
+
1284
+ def getmodulecol(
1285
+ self,
1286
+ source: str | os.PathLike[str],
1287
+ configargs=(),
1288
+ *,
1289
+ withinit: bool = False,
1290
+ ):
1291
+ """Return the module collection node for ``source``.
1292
+
1293
+ Writes ``source`` to a file using :py:meth:`makepyfile` and then
1294
+ runs the pytest collection on it, returning the collection node for the
1295
+ test module.
1296
+
1297
+ :param source:
1298
+ The source code of the module to collect.
1299
+
1300
+ :param configargs:
1301
+ Any extra arguments to pass to :py:meth:`parseconfigure`.
1302
+
1303
+ :param withinit:
1304
+ Whether to also write an ``__init__.py`` file to the same
1305
+ directory to ensure it is a package.
1306
+ """
1307
+ if isinstance(source, os.PathLike):
1308
+ path = self.path.joinpath(source)
1309
+ assert not withinit, "not supported for paths"
1310
+ else:
1311
+ kw = {self._name: str(source)}
1312
+ path = self.makepyfile(**kw)
1313
+ if withinit:
1314
+ self.makepyfile(__init__="#")
1315
+ self.config = config = self.parseconfigure(path, *configargs)
1316
+ return self.getnode(config, path)
1317
+
1318
+ def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None:
1319
+ """Return the collection node for name from the module collection.
1320
+
1321
+ Searches a module collection node for a collection node matching the
1322
+ given name.
1323
+
1324
+ :param modcol: A module collection node; see :py:meth:`getmodulecol`.
1325
+ :param name: The name of the node to return.
1326
+ """
1327
+ if modcol not in self._mod_collections:
1328
+ self._mod_collections[modcol] = list(modcol.collect())
1329
+ for colitem in self._mod_collections[modcol]:
1330
+ if colitem.name == name:
1331
+ return colitem
1332
+ return None
1333
+
1334
+ def popen(
1335
+ self,
1336
+ cmdargs: Sequence[str | os.PathLike[str]],
1337
+ stdout: int | TextIO = subprocess.PIPE,
1338
+ stderr: int | TextIO = subprocess.PIPE,
1339
+ stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN,
1340
+ **kw,
1341
+ ):
1342
+ """Invoke :py:class:`subprocess.Popen`.
1343
+
1344
+ Calls :py:class:`subprocess.Popen` making sure the current working
1345
+ directory is in ``PYTHONPATH``.
1346
+
1347
+ You probably want to use :py:meth:`run` instead.
1348
+ """
1349
+ env = os.environ.copy()
1350
+ env["PYTHONPATH"] = os.pathsep.join(
1351
+ filter(None, [os.getcwd(), env.get("PYTHONPATH", "")])
1352
+ )
1353
+ kw["env"] = env
1354
+
1355
+ if stdin is self.CLOSE_STDIN:
1356
+ kw["stdin"] = subprocess.PIPE
1357
+ elif isinstance(stdin, bytes):
1358
+ kw["stdin"] = subprocess.PIPE
1359
+ else:
1360
+ kw["stdin"] = stdin
1361
+
1362
+ popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
1363
+ if stdin is self.CLOSE_STDIN:
1364
+ assert popen.stdin is not None
1365
+ popen.stdin.close()
1366
+ elif isinstance(stdin, bytes):
1367
+ assert popen.stdin is not None
1368
+ popen.stdin.write(stdin)
1369
+
1370
+ return popen
1371
+
1372
+ def run(
1373
+ self,
1374
+ *cmdargs: str | os.PathLike[str],
1375
+ timeout: float | None = None,
1376
+ stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN,
1377
+ ) -> RunResult:
1378
+ """Run a command with arguments.
1379
+
1380
+ Run a process using :py:class:`subprocess.Popen` saving the stdout and
1381
+ stderr.
1382
+
1383
+ :param cmdargs:
1384
+ The sequence of arguments to pass to :py:class:`subprocess.Popen`,
1385
+ with path-like objects being converted to :py:class:`str`
1386
+ automatically.
1387
+ :param timeout:
1388
+ The period in seconds after which to timeout and raise
1389
+ :py:class:`Pytester.TimeoutExpired`.
1390
+ :param stdin:
1391
+ Optional standard input.
1392
+
1393
+ - If it is ``CLOSE_STDIN`` (Default), then this method calls
1394
+ :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and
1395
+ the standard input is closed immediately after the new command is
1396
+ started.
1397
+
1398
+ - If it is of type :py:class:`bytes`, these bytes are sent to the
1399
+ standard input of the command.
1400
+
1401
+ - Otherwise, it is passed through to :py:class:`subprocess.Popen`.
1402
+ For further information in this case, consult the document of the
1403
+ ``stdin`` parameter in :py:class:`subprocess.Popen`.
1404
+ :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int
1405
+ :returns:
1406
+ The result.
1407
+
1408
+ """
1409
+ __tracebackhide__ = True
1410
+
1411
+ cmdargs = tuple(os.fspath(arg) for arg in cmdargs)
1412
+ p1 = self.path.joinpath("stdout")
1413
+ p2 = self.path.joinpath("stderr")
1414
+ print("running:", *cmdargs)
1415
+ print(" in:", Path.cwd())
1416
+
1417
+ with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2:
1418
+ instant = timing.Instant()
1419
+ popen = self.popen(
1420
+ cmdargs,
1421
+ stdin=stdin,
1422
+ stdout=f1,
1423
+ stderr=f2,
1424
+ close_fds=(sys.platform != "win32"),
1425
+ )
1426
+ if popen.stdin is not None:
1427
+ popen.stdin.close()
1428
+
1429
+ def handle_timeout() -> None:
1430
+ __tracebackhide__ = True
1431
+
1432
+ timeout_message = f"{timeout} second timeout expired running: {cmdargs}"
1433
+
1434
+ popen.kill()
1435
+ popen.wait()
1436
+ raise self.TimeoutExpired(timeout_message)
1437
+
1438
+ if timeout is None:
1439
+ ret = popen.wait()
1440
+ else:
1441
+ try:
1442
+ ret = popen.wait(timeout)
1443
+ except subprocess.TimeoutExpired:
1444
+ handle_timeout()
1445
+
1446
+ with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2:
1447
+ out = f1.read().splitlines()
1448
+ err = f2.read().splitlines()
1449
+
1450
+ self._dump_lines(out, sys.stdout)
1451
+ self._dump_lines(err, sys.stderr)
1452
+
1453
+ with contextlib.suppress(ValueError):
1454
+ ret = ExitCode(ret)
1455
+ return RunResult(ret, out, err, instant.elapsed().seconds)
1456
+
1457
+ def _dump_lines(self, lines, fp):
1458
+ try:
1459
+ for line in lines:
1460
+ print(line, file=fp)
1461
+ except UnicodeEncodeError:
1462
+ print(f"couldn't print to {fp} because of encoding")
1463
+
1464
+ def _getpytestargs(self) -> tuple[str, ...]:
1465
+ return sys.executable, "-mpytest"
1466
+
1467
+ def runpython(self, script: os.PathLike[str]) -> RunResult:
1468
+ """Run a python script using sys.executable as interpreter."""
1469
+ return self.run(sys.executable, script)
1470
+
1471
+ def runpython_c(self, command: str) -> RunResult:
1472
+ """Run ``python -c "command"``."""
1473
+ return self.run(sys.executable, "-c", command)
1474
+
1475
+ def runpytest_subprocess(
1476
+ self, *args: str | os.PathLike[str], timeout: float | None = None
1477
+ ) -> RunResult:
1478
+ """Run pytest as a subprocess with given arguments.
1479
+
1480
+ Any plugins added to the :py:attr:`plugins` list will be added using the
1481
+ ``-p`` command line option. Additionally ``--basetemp`` is used to put
1482
+ any temporary files and directories in a numbered directory prefixed
1483
+ with "runpytest-" to not conflict with the normal numbered pytest
1484
+ location for temporary files and directories.
1485
+
1486
+ :param args:
1487
+ The sequence of arguments to pass to the pytest subprocess.
1488
+ :param timeout:
1489
+ The period in seconds after which to timeout and raise
1490
+ :py:class:`Pytester.TimeoutExpired`.
1491
+ :returns:
1492
+ The result.
1493
+ """
1494
+ __tracebackhide__ = True
1495
+ p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700)
1496
+ args = (f"--basetemp={p}", *args)
1497
+ plugins = [x for x in self.plugins if isinstance(x, str)]
1498
+ if plugins:
1499
+ args = ("-p", plugins[0], *args)
1500
+ args = self._getpytestargs() + args
1501
+ return self.run(*args, timeout=timeout)
1502
+
1503
+ def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn:
1504
+ """Run pytest using pexpect.
1505
+
1506
+ This makes sure to use the right pytest and sets up the temporary
1507
+ directory locations.
1508
+
1509
+ The pexpect child is returned.
1510
+ """
1511
+ basetemp = self.path / "temp-pexpect"
1512
+ basetemp.mkdir(mode=0o700)
1513
+ invoke = " ".join(map(str, self._getpytestargs()))
1514
+ cmd = f"{invoke} --basetemp={basetemp} {string}"
1515
+ return self.spawn(cmd, expect_timeout=expect_timeout)
1516
+
1517
+ def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn:
1518
+ """Run a command using pexpect.
1519
+
1520
+ The pexpect child is returned.
1521
+ """
1522
+ pexpect = importorskip("pexpect", "3.0")
1523
+ if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
1524
+ skip("pypy-64 bit not supported")
1525
+ if not hasattr(pexpect, "spawn"):
1526
+ skip("pexpect.spawn not available")
1527
+ logfile = self.path.joinpath("spawn.out").open("wb")
1528
+
1529
+ child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout)
1530
+ self._request.addfinalizer(logfile.close)
1531
+ return child
1532
+
1533
+
1534
+ class LineComp:
1535
+ def __init__(self) -> None:
1536
+ self.stringio = StringIO()
1537
+ """:class:`python:io.StringIO()` instance used for input."""
1538
+
1539
+ def assert_contains_lines(self, lines2: Sequence[str]) -> None:
1540
+ """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value.
1541
+
1542
+ Lines are matched using :func:`LineMatcher.fnmatch_lines <pytest.LineMatcher.fnmatch_lines>`.
1543
+ """
1544
+ __tracebackhide__ = True
1545
+ val = self.stringio.getvalue()
1546
+ self.stringio.truncate(0)
1547
+ self.stringio.seek(0)
1548
+ lines1 = val.split("\n")
1549
+ LineMatcher(lines1).fnmatch_lines(lines2)
1550
+
1551
+
1552
+ class LineMatcher:
1553
+ """Flexible matching of text.
1554
+
1555
+ This is a convenience class to test large texts like the output of
1556
+ commands.
1557
+
1558
+ The constructor takes a list of lines without their trailing newlines, i.e.
1559
+ ``text.splitlines()``.
1560
+ """
1561
+
1562
+ def __init__(self, lines: list[str]) -> None:
1563
+ self.lines = lines
1564
+ self._log_output: list[str] = []
1565
+
1566
+ def __str__(self) -> str:
1567
+ """Return the entire original text.
1568
+
1569
+ .. versionadded:: 6.2
1570
+ You can use :meth:`str` in older versions.
1571
+ """
1572
+ return "\n".join(self.lines)
1573
+
1574
+ def _getlines(self, lines2: str | Sequence[str] | Source) -> Sequence[str]:
1575
+ if isinstance(lines2, str):
1576
+ lines2 = Source(lines2)
1577
+ if isinstance(lines2, Source):
1578
+ lines2 = lines2.strip().lines
1579
+ return lines2
1580
+
1581
+ def fnmatch_lines_random(self, lines2: Sequence[str]) -> None:
1582
+ """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`)."""
1583
+ __tracebackhide__ = True
1584
+ self._match_lines_random(lines2, fnmatch)
1585
+
1586
+ def re_match_lines_random(self, lines2: Sequence[str]) -> None:
1587
+ """Check lines exist in the output in any order (using :func:`python:re.match`)."""
1588
+ __tracebackhide__ = True
1589
+ self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name)))
1590
+
1591
+ def _match_lines_random(
1592
+ self, lines2: Sequence[str], match_func: Callable[[str, str], bool]
1593
+ ) -> None:
1594
+ __tracebackhide__ = True
1595
+ lines2 = self._getlines(lines2)
1596
+ for line in lines2:
1597
+ for x in self.lines:
1598
+ if line == x or match_func(x, line):
1599
+ self._log("matched: ", repr(line))
1600
+ break
1601
+ else:
1602
+ msg = f"line {line!r} not found in output"
1603
+ self._log(msg)
1604
+ self._fail(msg)
1605
+
1606
+ def get_lines_after(self, fnline: str) -> Sequence[str]:
1607
+ """Return all lines following the given line in the text.
1608
+
1609
+ The given line can contain glob wildcards.
1610
+ """
1611
+ for i, line in enumerate(self.lines):
1612
+ if fnline == line or fnmatch(line, fnline):
1613
+ return self.lines[i + 1 :]
1614
+ raise ValueError(f"line {fnline!r} not found in output")
1615
+
1616
+ def _log(self, *args) -> None:
1617
+ self._log_output.append(" ".join(str(x) for x in args))
1618
+
1619
+ @property
1620
+ def _log_text(self) -> str:
1621
+ return "\n".join(self._log_output)
1622
+
1623
+ def fnmatch_lines(
1624
+ self, lines2: Sequence[str], *, consecutive: bool = False
1625
+ ) -> None:
1626
+ """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`).
1627
+
1628
+ The argument is a list of lines which have to match and can use glob
1629
+ wildcards. If they do not match a pytest.fail() is called. The
1630
+ matches and non-matches are also shown as part of the error message.
1631
+
1632
+ :param lines2: String patterns to match.
1633
+ :param consecutive: Match lines consecutively?
1634
+ """
1635
+ __tracebackhide__ = True
1636
+ self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive)
1637
+
1638
+ def re_match_lines(
1639
+ self, lines2: Sequence[str], *, consecutive: bool = False
1640
+ ) -> None:
1641
+ """Check lines exist in the output (using :func:`python:re.match`).
1642
+
1643
+ The argument is a list of lines which have to match using ``re.match``.
1644
+ If they do not match a pytest.fail() is called.
1645
+
1646
+ The matches and non-matches are also shown as part of the error message.
1647
+
1648
+ :param lines2: string patterns to match.
1649
+ :param consecutive: match lines consecutively?
1650
+ """
1651
+ __tracebackhide__ = True
1652
+ self._match_lines(
1653
+ lines2,
1654
+ lambda name, pat: bool(re.match(pat, name)),
1655
+ "re.match",
1656
+ consecutive=consecutive,
1657
+ )
1658
+
1659
+ def _match_lines(
1660
+ self,
1661
+ lines2: Sequence[str],
1662
+ match_func: Callable[[str, str], bool],
1663
+ match_nickname: str,
1664
+ *,
1665
+ consecutive: bool = False,
1666
+ ) -> None:
1667
+ """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
1668
+
1669
+ :param Sequence[str] lines2:
1670
+ List of string patterns to match. The actual format depends on
1671
+ ``match_func``.
1672
+ :param match_func:
1673
+ A callable ``match_func(line, pattern)`` where line is the
1674
+ captured line from stdout/stderr and pattern is the matching
1675
+ pattern.
1676
+ :param str match_nickname:
1677
+ The nickname for the match function that will be logged to stdout
1678
+ when a match occurs.
1679
+ :param consecutive:
1680
+ Match lines consecutively?
1681
+ """
1682
+ if not isinstance(lines2, collections.abc.Sequence):
1683
+ raise TypeError(f"invalid type for lines2: {type(lines2).__name__}")
1684
+ lines2 = self._getlines(lines2)
1685
+ lines1 = self.lines[:]
1686
+ extralines = []
1687
+ __tracebackhide__ = True
1688
+ wnick = len(match_nickname) + 1
1689
+ started = False
1690
+ for line in lines2:
1691
+ nomatchprinted = False
1692
+ while lines1:
1693
+ nextline = lines1.pop(0)
1694
+ if line == nextline:
1695
+ self._log("exact match:", repr(line))
1696
+ started = True
1697
+ break
1698
+ elif match_func(nextline, line):
1699
+ self._log(f"{match_nickname}:", repr(line))
1700
+ self._log(
1701
+ "{:>{width}}".format("with:", width=wnick), repr(nextline)
1702
+ )
1703
+ started = True
1704
+ break
1705
+ else:
1706
+ if consecutive and started:
1707
+ msg = f"no consecutive match: {line!r}"
1708
+ self._log(msg)
1709
+ self._log(
1710
+ "{:>{width}}".format("with:", width=wnick), repr(nextline)
1711
+ )
1712
+ self._fail(msg)
1713
+ if not nomatchprinted:
1714
+ self._log(
1715
+ "{:>{width}}".format("nomatch:", width=wnick), repr(line)
1716
+ )
1717
+ nomatchprinted = True
1718
+ self._log("{:>{width}}".format("and:", width=wnick), repr(nextline))
1719
+ extralines.append(nextline)
1720
+ else:
1721
+ msg = f"remains unmatched: {line!r}"
1722
+ self._log(msg)
1723
+ self._fail(msg)
1724
+ self._log_output = []
1725
+
1726
+ def no_fnmatch_line(self, pat: str) -> None:
1727
+ """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``.
1728
+
1729
+ :param str pat: The pattern to match lines.
1730
+ """
1731
+ __tracebackhide__ = True
1732
+ self._no_match_line(pat, fnmatch, "fnmatch")
1733
+
1734
+ def no_re_match_line(self, pat: str) -> None:
1735
+ """Ensure captured lines do not match the given pattern, using ``re.match``.
1736
+
1737
+ :param str pat: The regular expression to match lines.
1738
+ """
1739
+ __tracebackhide__ = True
1740
+ self._no_match_line(
1741
+ pat, lambda name, pat: bool(re.match(pat, name)), "re.match"
1742
+ )
1743
+
1744
+ def _no_match_line(
1745
+ self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str
1746
+ ) -> None:
1747
+ """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``.
1748
+
1749
+ :param str pat: The pattern to match lines.
1750
+ """
1751
+ __tracebackhide__ = True
1752
+ nomatch_printed = False
1753
+ wnick = len(match_nickname) + 1
1754
+ for line in self.lines:
1755
+ if match_func(line, pat):
1756
+ msg = f"{match_nickname}: {pat!r}"
1757
+ self._log(msg)
1758
+ self._log("{:>{width}}".format("with:", width=wnick), repr(line))
1759
+ self._fail(msg)
1760
+ else:
1761
+ if not nomatch_printed:
1762
+ self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat))
1763
+ nomatch_printed = True
1764
+ self._log("{:>{width}}".format("and:", width=wnick), repr(line))
1765
+ self._log_output = []
1766
+
1767
+ def _fail(self, msg: str) -> None:
1768
+ __tracebackhide__ = True
1769
+ log_text = self._log_text
1770
+ self._log_output = []
1771
+ fail(log_text)
1772
+
1773
+ def str(self) -> str:
1774
+ """Return the entire original text."""
1775
+ return str(self)
archive/Axiovorax/.venv/Lib/site-packages/_pytest/pytester_assertions.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper plugin for pytester; should not be loaded on its own."""
2
+
3
+ # This plugin contains assertions used by pytester. pytester cannot
4
+ # contain them itself, since it is imported by the `pytest` module,
5
+ # hence cannot be subject to assertion rewriting, which requires a
6
+ # module to not be already imported.
7
+ from __future__ import annotations
8
+
9
+ from collections.abc import Sequence
10
+
11
+ from _pytest.reports import CollectReport
12
+ from _pytest.reports import TestReport
13
+
14
+
15
+ def assertoutcome(
16
+ outcomes: tuple[
17
+ Sequence[TestReport],
18
+ Sequence[CollectReport | TestReport],
19
+ Sequence[CollectReport | TestReport],
20
+ ],
21
+ passed: int = 0,
22
+ skipped: int = 0,
23
+ failed: int = 0,
24
+ ) -> None:
25
+ __tracebackhide__ = True
26
+
27
+ realpassed, realskipped, realfailed = outcomes
28
+ obtained = {
29
+ "passed": len(realpassed),
30
+ "skipped": len(realskipped),
31
+ "failed": len(realfailed),
32
+ }
33
+ expected = {"passed": passed, "skipped": skipped, "failed": failed}
34
+ assert obtained == expected, outcomes
35
+
36
+
37
+ def assert_outcomes(
38
+ outcomes: dict[str, int],
39
+ passed: int = 0,
40
+ skipped: int = 0,
41
+ failed: int = 0,
42
+ errors: int = 0,
43
+ xpassed: int = 0,
44
+ xfailed: int = 0,
45
+ warnings: int | None = None,
46
+ deselected: int | None = None,
47
+ ) -> None:
48
+ """Assert that the specified outcomes appear with the respective
49
+ numbers (0 means it didn't occur) in the text output from a test run."""
50
+ __tracebackhide__ = True
51
+
52
+ obtained = {
53
+ "passed": outcomes.get("passed", 0),
54
+ "skipped": outcomes.get("skipped", 0),
55
+ "failed": outcomes.get("failed", 0),
56
+ "errors": outcomes.get("errors", 0),
57
+ "xpassed": outcomes.get("xpassed", 0),
58
+ "xfailed": outcomes.get("xfailed", 0),
59
+ }
60
+ expected = {
61
+ "passed": passed,
62
+ "skipped": skipped,
63
+ "failed": failed,
64
+ "errors": errors,
65
+ "xpassed": xpassed,
66
+ "xfailed": xfailed,
67
+ }
68
+ if warnings is not None:
69
+ obtained["warnings"] = outcomes.get("warnings", 0)
70
+ expected["warnings"] = warnings
71
+ if deselected is not None:
72
+ obtained["deselected"] = outcomes.get("deselected", 0)
73
+ expected["deselected"] = deselected
74
+ assert obtained == expected
archive/Axiovorax/.venv/Lib/site-packages/_pytest/python.py ADDED
@@ -0,0 +1,1723 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Python test discovery, setup and run of test functions."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import abc
7
+ from collections import Counter
8
+ from collections import defaultdict
9
+ from collections.abc import Callable
10
+ from collections.abc import Generator
11
+ from collections.abc import Iterable
12
+ from collections.abc import Iterator
13
+ from collections.abc import Mapping
14
+ from collections.abc import Sequence
15
+ import dataclasses
16
+ import enum
17
+ import fnmatch
18
+ from functools import partial
19
+ import inspect
20
+ import itertools
21
+ import os
22
+ from pathlib import Path
23
+ import re
24
+ import types
25
+ from typing import Any
26
+ from typing import final
27
+ from typing import Literal
28
+ from typing import NoReturn
29
+ from typing import TYPE_CHECKING
30
+ import warnings
31
+
32
+ import _pytest
33
+ from _pytest import fixtures
34
+ from _pytest import nodes
35
+ from _pytest._code import filter_traceback
36
+ from _pytest._code import getfslineno
37
+ from _pytest._code.code import ExceptionInfo
38
+ from _pytest._code.code import TerminalRepr
39
+ from _pytest._code.code import Traceback
40
+ from _pytest._io.saferepr import saferepr
41
+ from _pytest.compat import ascii_escaped
42
+ from _pytest.compat import get_default_arg_names
43
+ from _pytest.compat import get_real_func
44
+ from _pytest.compat import getimfunc
45
+ from _pytest.compat import is_async_function
46
+ from _pytest.compat import LEGACY_PATH
47
+ from _pytest.compat import NOTSET
48
+ from _pytest.compat import safe_getattr
49
+ from _pytest.compat import safe_isclass
50
+ from _pytest.config import Config
51
+ from _pytest.config import hookimpl
52
+ from _pytest.config.argparsing import Parser
53
+ from _pytest.deprecated import check_ispytest
54
+ from _pytest.fixtures import FixtureDef
55
+ from _pytest.fixtures import FixtureRequest
56
+ from _pytest.fixtures import FuncFixtureInfo
57
+ from _pytest.fixtures import get_scope_node
58
+ from _pytest.main import Session
59
+ from _pytest.mark import ParameterSet
60
+ from _pytest.mark.structures import _HiddenParam
61
+ from _pytest.mark.structures import get_unpacked_marks
62
+ from _pytest.mark.structures import HIDDEN_PARAM
63
+ from _pytest.mark.structures import Mark
64
+ from _pytest.mark.structures import MarkDecorator
65
+ from _pytest.mark.structures import normalize_mark_list
66
+ from _pytest.outcomes import fail
67
+ from _pytest.outcomes import skip
68
+ from _pytest.pathlib import fnmatch_ex
69
+ from _pytest.pathlib import import_path
70
+ from _pytest.pathlib import ImportPathMismatchError
71
+ from _pytest.pathlib import scandir
72
+ from _pytest.scope import _ScopeName
73
+ from _pytest.scope import Scope
74
+ from _pytest.stash import StashKey
75
+ from _pytest.warning_types import PytestCollectionWarning
76
+ from _pytest.warning_types import PytestReturnNotNoneWarning
77
+
78
+
79
+ if TYPE_CHECKING:
80
+ from typing_extensions import Self
81
+
82
+
83
+ def pytest_addoption(parser: Parser) -> None:
84
+ parser.addini(
85
+ "python_files",
86
+ type="args",
87
+ # NOTE: default is also used in AssertionRewritingHook.
88
+ default=["test_*.py", "*_test.py"],
89
+ help="Glob-style file patterns for Python test module discovery",
90
+ )
91
+ parser.addini(
92
+ "python_classes",
93
+ type="args",
94
+ default=["Test"],
95
+ help="Prefixes or glob names for Python test class discovery",
96
+ )
97
+ parser.addini(
98
+ "python_functions",
99
+ type="args",
100
+ default=["test"],
101
+ help="Prefixes or glob names for Python test function and method discovery",
102
+ )
103
+ parser.addini(
104
+ "disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
105
+ type="bool",
106
+ default=False,
107
+ help="Disable string escape non-ASCII characters, might cause unwanted "
108
+ "side effects(use at your own risk)",
109
+ )
110
+
111
+
112
+ def pytest_generate_tests(metafunc: Metafunc) -> None:
113
+ for marker in metafunc.definition.iter_markers(name="parametrize"):
114
+ metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker)
115
+
116
+
117
+ def pytest_configure(config: Config) -> None:
118
+ config.addinivalue_line(
119
+ "markers",
120
+ "parametrize(argnames, argvalues): call a test function multiple "
121
+ "times passing in different arguments in turn. argvalues generally "
122
+ "needs to be a list of values if argnames specifies only one name "
123
+ "or a list of tuples of values if argnames specifies multiple names. "
124
+ "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
125
+ "decorated test function, one with arg1=1 and another with arg1=2."
126
+ "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info "
127
+ "and examples.",
128
+ )
129
+ config.addinivalue_line(
130
+ "markers",
131
+ "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
132
+ "all of the specified fixtures. see "
133
+ "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ",
134
+ )
135
+
136
+
137
+ def async_fail(nodeid: str) -> None:
138
+ msg = (
139
+ "async def functions are not natively supported.\n"
140
+ "You need to install a suitable plugin for your async framework, for example:\n"
141
+ " - anyio\n"
142
+ " - pytest-asyncio\n"
143
+ " - pytest-tornasync\n"
144
+ " - pytest-trio\n"
145
+ " - pytest-twisted"
146
+ )
147
+ fail(msg, pytrace=False)
148
+
149
+
150
+ @hookimpl(trylast=True)
151
+ def pytest_pyfunc_call(pyfuncitem: Function) -> object | None:
152
+ testfunction = pyfuncitem.obj
153
+ if is_async_function(testfunction):
154
+ async_fail(pyfuncitem.nodeid)
155
+ funcargs = pyfuncitem.funcargs
156
+ testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
157
+ result = testfunction(**testargs)
158
+ if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
159
+ async_fail(pyfuncitem.nodeid)
160
+ elif result is not None:
161
+ warnings.warn(
162
+ PytestReturnNotNoneWarning(
163
+ f"Test functions should return None, but {pyfuncitem.nodeid} returned {type(result)!r}.\n"
164
+ "Did you mean to use `assert` instead of `return`?\n"
165
+ "See https://docs.pytest.org/en/stable/how-to/assert.html#return-not-none for more information."
166
+ )
167
+ )
168
+ return True
169
+
170
+
171
+ def pytest_collect_directory(
172
+ path: Path, parent: nodes.Collector
173
+ ) -> nodes.Collector | None:
174
+ pkginit = path / "__init__.py"
175
+ try:
176
+ has_pkginit = pkginit.is_file()
177
+ except PermissionError:
178
+ # See https://github.com/pytest-dev/pytest/issues/12120#issuecomment-2106349096.
179
+ return None
180
+ if has_pkginit:
181
+ return Package.from_parent(parent, path=path)
182
+ return None
183
+
184
+
185
+ def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Module | None:
186
+ if file_path.suffix == ".py":
187
+ if not parent.session.isinitpath(file_path):
188
+ if not path_matches_patterns(
189
+ file_path, parent.config.getini("python_files")
190
+ ):
191
+ return None
192
+ ihook = parent.session.gethookproxy(file_path)
193
+ module: Module = ihook.pytest_pycollect_makemodule(
194
+ module_path=file_path, parent=parent
195
+ )
196
+ return module
197
+ return None
198
+
199
+
200
+ def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool:
201
+ """Return whether path matches any of the patterns in the list of globs given."""
202
+ return any(fnmatch_ex(pattern, path) for pattern in patterns)
203
+
204
+
205
+ def pytest_pycollect_makemodule(module_path: Path, parent) -> Module:
206
+ return Module.from_parent(parent, path=module_path)
207
+
208
+
209
+ @hookimpl(trylast=True)
210
+ def pytest_pycollect_makeitem(
211
+ collector: Module | Class, name: str, obj: object
212
+ ) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]:
213
+ assert isinstance(collector, (Class, Module)), type(collector)
214
+ # Nothing was collected elsewhere, let's do it here.
215
+ if safe_isclass(obj):
216
+ if collector.istestclass(obj, name):
217
+ return Class.from_parent(collector, name=name, obj=obj)
218
+ elif collector.istestfunction(obj, name):
219
+ # mock seems to store unbound methods (issue473), normalize it.
220
+ obj = getattr(obj, "__func__", obj)
221
+ # We need to try and unwrap the function if it's a functools.partial
222
+ # or a functools.wrapped.
223
+ # We mustn't if it's been wrapped with mock.patch (python 2 only).
224
+ if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
225
+ filename, lineno = getfslineno(obj)
226
+ warnings.warn_explicit(
227
+ message=PytestCollectionWarning(
228
+ f"cannot collect {name!r} because it is not a function."
229
+ ),
230
+ category=None,
231
+ filename=str(filename),
232
+ lineno=lineno + 1,
233
+ )
234
+ elif getattr(obj, "__test__", True):
235
+ if inspect.isgeneratorfunction(obj):
236
+ fail(
237
+ f"'yield' keyword is allowed in fixtures, but not in tests ({name})",
238
+ pytrace=False,
239
+ )
240
+ return list(collector._genfunctions(name, obj))
241
+ return None
242
+ return None
243
+
244
+
245
+ class PyobjMixin(nodes.Node):
246
+ """this mix-in inherits from Node to carry over the typing information
247
+
248
+ as its intended to always mix in before a node
249
+ its position in the mro is unaffected"""
250
+
251
+ _ALLOW_MARKERS = True
252
+
253
+ @property
254
+ def module(self):
255
+ """Python module object this node was collected from (can be None)."""
256
+ node = self.getparent(Module)
257
+ return node.obj if node is not None else None
258
+
259
+ @property
260
+ def cls(self):
261
+ """Python class object this node was collected from (can be None)."""
262
+ node = self.getparent(Class)
263
+ return node.obj if node is not None else None
264
+
265
+ @property
266
+ def instance(self):
267
+ """Python instance object the function is bound to.
268
+
269
+ Returns None if not a test method, e.g. for a standalone test function,
270
+ a class or a module.
271
+ """
272
+ # Overridden by Function.
273
+ return None
274
+
275
+ @property
276
+ def obj(self):
277
+ """Underlying Python object."""
278
+ obj = getattr(self, "_obj", None)
279
+ if obj is None:
280
+ self._obj = obj = self._getobj()
281
+ # XXX evil hack
282
+ # used to avoid Function marker duplication
283
+ if self._ALLOW_MARKERS:
284
+ self.own_markers.extend(get_unpacked_marks(self.obj))
285
+ # This assumes that `obj` is called before there is a chance
286
+ # to add custom keys to `self.keywords`, so no fear of overriding.
287
+ self.keywords.update((mark.name, mark) for mark in self.own_markers)
288
+ return obj
289
+
290
+ @obj.setter
291
+ def obj(self, value):
292
+ self._obj = value
293
+
294
+ def _getobj(self):
295
+ """Get the underlying Python object. May be overwritten by subclasses."""
296
+ # TODO: Improve the type of `parent` such that assert/ignore aren't needed.
297
+ assert self.parent is not None
298
+ obj = self.parent.obj # type: ignore[attr-defined]
299
+ return getattr(obj, self.name)
300
+
301
+ def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str:
302
+ """Return Python path relative to the containing module."""
303
+ parts = []
304
+ for node in self.iter_parents():
305
+ name = node.name
306
+ if isinstance(node, Module):
307
+ name = os.path.splitext(name)[0]
308
+ if stopatmodule:
309
+ if includemodule:
310
+ parts.append(name)
311
+ break
312
+ parts.append(name)
313
+ parts.reverse()
314
+ return ".".join(parts)
315
+
316
+ def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]:
317
+ # XXX caching?
318
+ path, lineno = getfslineno(self.obj)
319
+ modpath = self.getmodpath()
320
+ return path, lineno, modpath
321
+
322
+
323
+ # As an optimization, these builtin attribute names are pre-ignored when
324
+ # iterating over an object during collection -- the pytest_pycollect_makeitem
325
+ # hook is not called for them.
326
+ # fmt: off
327
+ class _EmptyClass: pass # noqa: E701
328
+ IGNORED_ATTRIBUTES = frozenset.union(
329
+ frozenset(),
330
+ # Module.
331
+ dir(types.ModuleType("empty_module")),
332
+ # Some extra module attributes the above doesn't catch.
333
+ {"__builtins__", "__file__", "__cached__"},
334
+ # Class.
335
+ dir(_EmptyClass),
336
+ # Instance.
337
+ dir(_EmptyClass()),
338
+ )
339
+ del _EmptyClass
340
+ # fmt: on
341
+
342
+
343
+ class PyCollector(PyobjMixin, nodes.Collector, abc.ABC):
344
+ def funcnamefilter(self, name: str) -> bool:
345
+ return self._matches_prefix_or_glob_option("python_functions", name)
346
+
347
+ def isnosetest(self, obj: object) -> bool:
348
+ """Look for the __test__ attribute, which is applied by the
349
+ @nose.tools.istest decorator.
350
+ """
351
+ # We explicitly check for "is True" here to not mistakenly treat
352
+ # classes with a custom __getattr__ returning something truthy (like a
353
+ # function) as test classes.
354
+ return safe_getattr(obj, "__test__", False) is True
355
+
356
+ def classnamefilter(self, name: str) -> bool:
357
+ return self._matches_prefix_or_glob_option("python_classes", name)
358
+
359
+ def istestfunction(self, obj: object, name: str) -> bool:
360
+ if self.funcnamefilter(name) or self.isnosetest(obj):
361
+ if isinstance(obj, (staticmethod, classmethod)):
362
+ # staticmethods and classmethods need to be unwrapped.
363
+ obj = safe_getattr(obj, "__func__", False)
364
+ return callable(obj) and fixtures.getfixturemarker(obj) is None
365
+ else:
366
+ return False
367
+
368
+ def istestclass(self, obj: object, name: str) -> bool:
369
+ if not (self.classnamefilter(name) or self.isnosetest(obj)):
370
+ return False
371
+ if inspect.isabstract(obj):
372
+ return False
373
+ return True
374
+
375
+ def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool:
376
+ """Check if the given name matches the prefix or glob-pattern defined
377
+ in ini configuration."""
378
+ for option in self.config.getini(option_name):
379
+ if name.startswith(option):
380
+ return True
381
+ # Check that name looks like a glob-string before calling fnmatch
382
+ # because this is called for every name in each collected module,
383
+ # and fnmatch is somewhat expensive to call.
384
+ elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
385
+ name, option
386
+ ):
387
+ return True
388
+ return False
389
+
390
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
391
+ if not getattr(self.obj, "__test__", True):
392
+ return []
393
+
394
+ # Avoid random getattrs and peek in the __dict__ instead.
395
+ dicts = [getattr(self.obj, "__dict__", {})]
396
+ if isinstance(self.obj, type):
397
+ for basecls in self.obj.__mro__:
398
+ dicts.append(basecls.__dict__)
399
+
400
+ # In each class, nodes should be definition ordered.
401
+ # __dict__ is definition ordered.
402
+ seen: set[str] = set()
403
+ dict_values: list[list[nodes.Item | nodes.Collector]] = []
404
+ collect_imported_tests = self.session.config.getini("collect_imported_tests")
405
+ ihook = self.ihook
406
+ for dic in dicts:
407
+ values: list[nodes.Item | nodes.Collector] = []
408
+ # Note: seems like the dict can change during iteration -
409
+ # be careful not to remove the list() without consideration.
410
+ for name, obj in list(dic.items()):
411
+ if name in IGNORED_ATTRIBUTES:
412
+ continue
413
+ if name in seen:
414
+ continue
415
+ seen.add(name)
416
+
417
+ if not collect_imported_tests and isinstance(self, Module):
418
+ # Do not collect functions and classes from other modules.
419
+ if inspect.isfunction(obj) or inspect.isclass(obj):
420
+ if obj.__module__ != self._getobj().__name__:
421
+ continue
422
+
423
+ res = ihook.pytest_pycollect_makeitem(
424
+ collector=self, name=name, obj=obj
425
+ )
426
+ if res is None:
427
+ continue
428
+ elif isinstance(res, list):
429
+ values.extend(res)
430
+ else:
431
+ values.append(res)
432
+ dict_values.append(values)
433
+
434
+ # Between classes in the class hierarchy, reverse-MRO order -- nodes
435
+ # inherited from base classes should come before subclasses.
436
+ result = []
437
+ for values in reversed(dict_values):
438
+ result.extend(values)
439
+ return result
440
+
441
+ def _genfunctions(self, name: str, funcobj) -> Iterator[Function]:
442
+ modulecol = self.getparent(Module)
443
+ assert modulecol is not None
444
+ module = modulecol.obj
445
+ clscol = self.getparent(Class)
446
+ cls = (clscol and clscol.obj) or None
447
+
448
+ definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
449
+ fixtureinfo = definition._fixtureinfo
450
+
451
+ # pytest_generate_tests impls call metafunc.parametrize() which fills
452
+ # metafunc._calls, the outcome of the hook.
453
+ metafunc = Metafunc(
454
+ definition=definition,
455
+ fixtureinfo=fixtureinfo,
456
+ config=self.config,
457
+ cls=cls,
458
+ module=module,
459
+ _ispytest=True,
460
+ )
461
+ methods = []
462
+ if hasattr(module, "pytest_generate_tests"):
463
+ methods.append(module.pytest_generate_tests)
464
+ if cls is not None and hasattr(cls, "pytest_generate_tests"):
465
+ methods.append(cls().pytest_generate_tests)
466
+ self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
467
+
468
+ if not metafunc._calls:
469
+ yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
470
+ else:
471
+ metafunc._recompute_direct_params_indices()
472
+ # Direct parametrizations taking place in module/class-specific
473
+ # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure
474
+ # we update what the function really needs a.k.a its fixture closure. Note that
475
+ # direct parametrizations using `@pytest.mark.parametrize` have already been considered
476
+ # into making the closure using `ignore_args` arg to `getfixtureclosure`.
477
+ fixtureinfo.prune_dependency_tree()
478
+
479
+ for callspec in metafunc._calls:
480
+ subname = f"{name}[{callspec.id}]" if callspec._idlist else name
481
+ yield Function.from_parent(
482
+ self,
483
+ name=subname,
484
+ callspec=callspec,
485
+ fixtureinfo=fixtureinfo,
486
+ keywords={callspec.id: True},
487
+ originalname=name,
488
+ )
489
+
490
+
491
+ def importtestmodule(
492
+ path: Path,
493
+ config: Config,
494
+ ):
495
+ # We assume we are only called once per module.
496
+ importmode = config.getoption("--import-mode")
497
+ try:
498
+ mod = import_path(
499
+ path,
500
+ mode=importmode,
501
+ root=config.rootpath,
502
+ consider_namespace_packages=config.getini("consider_namespace_packages"),
503
+ )
504
+ except SyntaxError as e:
505
+ raise nodes.Collector.CollectError(
506
+ ExceptionInfo.from_current().getrepr(style="short")
507
+ ) from e
508
+ except ImportPathMismatchError as e:
509
+ raise nodes.Collector.CollectError(
510
+ "import file mismatch:\n"
511
+ "imported module {!r} has this __file__ attribute:\n"
512
+ " {}\n"
513
+ "which is not the same as the test file we want to collect:\n"
514
+ " {}\n"
515
+ "HINT: remove __pycache__ / .pyc files and/or use a "
516
+ "unique basename for your test file modules".format(*e.args)
517
+ ) from e
518
+ except ImportError as e:
519
+ exc_info = ExceptionInfo.from_current()
520
+ if config.get_verbosity() < 2:
521
+ exc_info.traceback = exc_info.traceback.filter(filter_traceback)
522
+ exc_repr = (
523
+ exc_info.getrepr(style="short")
524
+ if exc_info.traceback
525
+ else exc_info.exconly()
526
+ )
527
+ formatted_tb = str(exc_repr)
528
+ raise nodes.Collector.CollectError(
529
+ f"ImportError while importing test module '{path}'.\n"
530
+ "Hint: make sure your test modules/packages have valid Python names.\n"
531
+ "Traceback:\n"
532
+ f"{formatted_tb}"
533
+ ) from e
534
+ except skip.Exception as e:
535
+ if e.allow_module_level:
536
+ raise
537
+ raise nodes.Collector.CollectError(
538
+ "Using pytest.skip outside of a test will skip the entire module. "
539
+ "If that's your intention, pass `allow_module_level=True`. "
540
+ "If you want to skip a specific test or an entire class, "
541
+ "use the @pytest.mark.skip or @pytest.mark.skipif decorators."
542
+ ) from e
543
+ config.pluginmanager.consider_module(mod)
544
+ return mod
545
+
546
+
547
+ class Module(nodes.File, PyCollector):
548
+ """Collector for test classes and functions in a Python module."""
549
+
550
+ def _getobj(self):
551
+ return importtestmodule(self.path, self.config)
552
+
553
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
554
+ self._register_setup_module_fixture()
555
+ self._register_setup_function_fixture()
556
+ self.session._fixturemanager.parsefactories(self)
557
+ return super().collect()
558
+
559
+ def _register_setup_module_fixture(self) -> None:
560
+ """Register an autouse, module-scoped fixture for the collected module object
561
+ that invokes setUpModule/tearDownModule if either or both are available.
562
+
563
+ Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
564
+ other fixtures (#517).
565
+ """
566
+ setup_module = _get_first_non_fixture_func(
567
+ self.obj, ("setUpModule", "setup_module")
568
+ )
569
+ teardown_module = _get_first_non_fixture_func(
570
+ self.obj, ("tearDownModule", "teardown_module")
571
+ )
572
+
573
+ if setup_module is None and teardown_module is None:
574
+ return
575
+
576
+ def xunit_setup_module_fixture(request) -> Generator[None]:
577
+ module = request.module
578
+ if setup_module is not None:
579
+ _call_with_optional_argument(setup_module, module)
580
+ yield
581
+ if teardown_module is not None:
582
+ _call_with_optional_argument(teardown_module, module)
583
+
584
+ self.session._fixturemanager._register_fixture(
585
+ # Use a unique name to speed up lookup.
586
+ name=f"_xunit_setup_module_fixture_{self.obj.__name__}",
587
+ func=xunit_setup_module_fixture,
588
+ nodeid=self.nodeid,
589
+ scope="module",
590
+ autouse=True,
591
+ )
592
+
593
+ def _register_setup_function_fixture(self) -> None:
594
+ """Register an autouse, function-scoped fixture for the collected module object
595
+ that invokes setup_function/teardown_function if either or both are available.
596
+
597
+ Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
598
+ other fixtures (#517).
599
+ """
600
+ setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
601
+ teardown_function = _get_first_non_fixture_func(
602
+ self.obj, ("teardown_function",)
603
+ )
604
+ if setup_function is None and teardown_function is None:
605
+ return
606
+
607
+ def xunit_setup_function_fixture(request) -> Generator[None]:
608
+ if request.instance is not None:
609
+ # in this case we are bound to an instance, so we need to let
610
+ # setup_method handle this
611
+ yield
612
+ return
613
+ function = request.function
614
+ if setup_function is not None:
615
+ _call_with_optional_argument(setup_function, function)
616
+ yield
617
+ if teardown_function is not None:
618
+ _call_with_optional_argument(teardown_function, function)
619
+
620
+ self.session._fixturemanager._register_fixture(
621
+ # Use a unique name to speed up lookup.
622
+ name=f"_xunit_setup_function_fixture_{self.obj.__name__}",
623
+ func=xunit_setup_function_fixture,
624
+ nodeid=self.nodeid,
625
+ scope="function",
626
+ autouse=True,
627
+ )
628
+
629
+
630
+ class Package(nodes.Directory):
631
+ """Collector for files and directories in a Python packages -- directories
632
+ with an `__init__.py` file.
633
+
634
+ .. note::
635
+
636
+ Directories without an `__init__.py` file are instead collected by
637
+ :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory`
638
+ collectors.
639
+
640
+ .. versionchanged:: 8.0
641
+
642
+ Now inherits from :class:`~pytest.Directory`.
643
+ """
644
+
645
+ def __init__(
646
+ self,
647
+ fspath: LEGACY_PATH | None,
648
+ parent: nodes.Collector,
649
+ # NOTE: following args are unused:
650
+ config=None,
651
+ session=None,
652
+ nodeid=None,
653
+ path: Path | None = None,
654
+ ) -> None:
655
+ # NOTE: Could be just the following, but kept as-is for compat.
656
+ # super().__init__(self, fspath, parent=parent)
657
+ session = parent.session
658
+ super().__init__(
659
+ fspath=fspath,
660
+ path=path,
661
+ parent=parent,
662
+ config=config,
663
+ session=session,
664
+ nodeid=nodeid,
665
+ )
666
+
667
+ def setup(self) -> None:
668
+ init_mod = importtestmodule(self.path / "__init__.py", self.config)
669
+
670
+ # Not using fixtures to call setup_module here because autouse fixtures
671
+ # from packages are not called automatically (#4085).
672
+ setup_module = _get_first_non_fixture_func(
673
+ init_mod, ("setUpModule", "setup_module")
674
+ )
675
+ if setup_module is not None:
676
+ _call_with_optional_argument(setup_module, init_mod)
677
+
678
+ teardown_module = _get_first_non_fixture_func(
679
+ init_mod, ("tearDownModule", "teardown_module")
680
+ )
681
+ if teardown_module is not None:
682
+ func = partial(_call_with_optional_argument, teardown_module, init_mod)
683
+ self.addfinalizer(func)
684
+
685
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
686
+ # Always collect __init__.py first.
687
+ def sort_key(entry: os.DirEntry[str]) -> object:
688
+ return (entry.name != "__init__.py", entry.name)
689
+
690
+ config = self.config
691
+ col: nodes.Collector | None
692
+ cols: Sequence[nodes.Collector]
693
+ ihook = self.ihook
694
+ for direntry in scandir(self.path, sort_key):
695
+ if direntry.is_dir():
696
+ path = Path(direntry.path)
697
+ if not self.session.isinitpath(path, with_parents=True):
698
+ if ihook.pytest_ignore_collect(collection_path=path, config=config):
699
+ continue
700
+ col = ihook.pytest_collect_directory(path=path, parent=self)
701
+ if col is not None:
702
+ yield col
703
+
704
+ elif direntry.is_file():
705
+ path = Path(direntry.path)
706
+ if not self.session.isinitpath(path):
707
+ if ihook.pytest_ignore_collect(collection_path=path, config=config):
708
+ continue
709
+ cols = ihook.pytest_collect_file(file_path=path, parent=self)
710
+ yield from cols
711
+
712
+
713
+ def _call_with_optional_argument(func, arg) -> None:
714
+ """Call the given function with the given argument if func accepts one argument, otherwise
715
+ calls func without arguments."""
716
+ arg_count = func.__code__.co_argcount
717
+ if inspect.ismethod(func):
718
+ arg_count -= 1
719
+ if arg_count:
720
+ func(arg)
721
+ else:
722
+ func()
723
+
724
+
725
+ def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> object | None:
726
+ """Return the attribute from the given object to be used as a setup/teardown
727
+ xunit-style function, but only if not marked as a fixture to avoid calling it twice.
728
+ """
729
+ for name in names:
730
+ meth: object | None = getattr(obj, name, None)
731
+ if meth is not None and fixtures.getfixturemarker(meth) is None:
732
+ return meth
733
+ return None
734
+
735
+
736
+ class Class(PyCollector):
737
+ """Collector for test methods (and nested classes) in a Python class."""
738
+
739
+ @classmethod
740
+ def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override]
741
+ """The public constructor."""
742
+ return super().from_parent(name=name, parent=parent, **kw)
743
+
744
+ def newinstance(self):
745
+ return self.obj()
746
+
747
+ def collect(self) -> Iterable[nodes.Item | nodes.Collector]:
748
+ if not safe_getattr(self.obj, "__test__", True):
749
+ return []
750
+ if hasinit(self.obj):
751
+ assert self.parent is not None
752
+ self.warn(
753
+ PytestCollectionWarning(
754
+ f"cannot collect test class {self.obj.__name__!r} because it has a "
755
+ f"__init__ constructor (from: {self.parent.nodeid})"
756
+ )
757
+ )
758
+ return []
759
+ elif hasnew(self.obj):
760
+ assert self.parent is not None
761
+ self.warn(
762
+ PytestCollectionWarning(
763
+ f"cannot collect test class {self.obj.__name__!r} because it has a "
764
+ f"__new__ constructor (from: {self.parent.nodeid})"
765
+ )
766
+ )
767
+ return []
768
+
769
+ self._register_setup_class_fixture()
770
+ self._register_setup_method_fixture()
771
+
772
+ self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid)
773
+
774
+ return super().collect()
775
+
776
+ def _register_setup_class_fixture(self) -> None:
777
+ """Register an autouse, class scoped fixture into the collected class object
778
+ that invokes setup_class/teardown_class if either or both are available.
779
+
780
+ Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
781
+ other fixtures (#517).
782
+ """
783
+ setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
784
+ teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",))
785
+ if setup_class is None and teardown_class is None:
786
+ return
787
+
788
+ def xunit_setup_class_fixture(request) -> Generator[None]:
789
+ cls = request.cls
790
+ if setup_class is not None:
791
+ func = getimfunc(setup_class)
792
+ _call_with_optional_argument(func, cls)
793
+ yield
794
+ if teardown_class is not None:
795
+ func = getimfunc(teardown_class)
796
+ _call_with_optional_argument(func, cls)
797
+
798
+ self.session._fixturemanager._register_fixture(
799
+ # Use a unique name to speed up lookup.
800
+ name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}",
801
+ func=xunit_setup_class_fixture,
802
+ nodeid=self.nodeid,
803
+ scope="class",
804
+ autouse=True,
805
+ )
806
+
807
+ def _register_setup_method_fixture(self) -> None:
808
+ """Register an autouse, function scoped fixture into the collected class object
809
+ that invokes setup_method/teardown_method if either or both are available.
810
+
811
+ Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with
812
+ other fixtures (#517).
813
+ """
814
+ setup_name = "setup_method"
815
+ setup_method = _get_first_non_fixture_func(self.obj, (setup_name,))
816
+ teardown_name = "teardown_method"
817
+ teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,))
818
+ if setup_method is None and teardown_method is None:
819
+ return
820
+
821
+ def xunit_setup_method_fixture(request) -> Generator[None]:
822
+ instance = request.instance
823
+ method = request.function
824
+ if setup_method is not None:
825
+ func = getattr(instance, setup_name)
826
+ _call_with_optional_argument(func, method)
827
+ yield
828
+ if teardown_method is not None:
829
+ func = getattr(instance, teardown_name)
830
+ _call_with_optional_argument(func, method)
831
+
832
+ self.session._fixturemanager._register_fixture(
833
+ # Use a unique name to speed up lookup.
834
+ name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}",
835
+ func=xunit_setup_method_fixture,
836
+ nodeid=self.nodeid,
837
+ scope="function",
838
+ autouse=True,
839
+ )
840
+
841
+
842
+ def hasinit(obj: object) -> bool:
843
+ init: object = getattr(obj, "__init__", None)
844
+ if init:
845
+ return init != object.__init__
846
+ return False
847
+
848
+
849
+ def hasnew(obj: object) -> bool:
850
+ new: object = getattr(obj, "__new__", None)
851
+ if new:
852
+ return new != object.__new__
853
+ return False
854
+
855
+
856
+ @final
857
+ @dataclasses.dataclass(frozen=True)
858
+ class IdMaker:
859
+ """Make IDs for a parametrization."""
860
+
861
+ __slots__ = (
862
+ "argnames",
863
+ "config",
864
+ "func_name",
865
+ "idfn",
866
+ "ids",
867
+ "nodeid",
868
+ "parametersets",
869
+ )
870
+
871
+ # The argnames of the parametrization.
872
+ argnames: Sequence[str]
873
+ # The ParameterSets of the parametrization.
874
+ parametersets: Sequence[ParameterSet]
875
+ # Optionally, a user-provided callable to make IDs for parameters in a
876
+ # ParameterSet.
877
+ idfn: Callable[[Any], object | None] | None
878
+ # Optionally, explicit IDs for ParameterSets by index.
879
+ ids: Sequence[object | None] | None
880
+ # Optionally, the pytest config.
881
+ # Used for controlling ASCII escaping, and for calling the
882
+ # :hook:`pytest_make_parametrize_id` hook.
883
+ config: Config | None
884
+ # Optionally, the ID of the node being parametrized.
885
+ # Used only for clearer error messages.
886
+ nodeid: str | None
887
+ # Optionally, the ID of the function being parametrized.
888
+ # Used only for clearer error messages.
889
+ func_name: str | None
890
+
891
+ def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]:
892
+ """Make a unique identifier for each ParameterSet, that may be used to
893
+ identify the parametrization in a node ID.
894
+
895
+ Format is <prm_1_token>-...-<prm_n_token>[counter], where prm_x_token is
896
+ - user-provided id, if given
897
+ - else an id derived from the value, applicable for certain types
898
+ - else <argname><parameterset index>
899
+ The counter suffix is appended only in case a string wouldn't be unique
900
+ otherwise.
901
+ """
902
+ resolved_ids = list(self._resolve_ids())
903
+ # All IDs must be unique!
904
+ if len(resolved_ids) != len(set(resolved_ids)):
905
+ # Record the number of occurrences of each ID.
906
+ id_counts = Counter(resolved_ids)
907
+ # Map the ID to its next suffix.
908
+ id_suffixes: dict[str, int] = defaultdict(int)
909
+ # Suffix non-unique IDs to make them unique.
910
+ for index, id in enumerate(resolved_ids):
911
+ if id_counts[id] > 1:
912
+ if id is HIDDEN_PARAM:
913
+ self._complain_multiple_hidden_parameter_sets()
914
+ suffix = ""
915
+ if id and id[-1].isdigit():
916
+ suffix = "_"
917
+ new_id = f"{id}{suffix}{id_suffixes[id]}"
918
+ while new_id in set(resolved_ids):
919
+ id_suffixes[id] += 1
920
+ new_id = f"{id}{suffix}{id_suffixes[id]}"
921
+ resolved_ids[index] = new_id
922
+ id_suffixes[id] += 1
923
+ assert len(resolved_ids) == len(set(resolved_ids)), (
924
+ f"Internal error: {resolved_ids=}"
925
+ )
926
+ return resolved_ids
927
+
928
+ def _resolve_ids(self) -> Iterable[str | _HiddenParam]:
929
+ """Resolve IDs for all ParameterSets (may contain duplicates)."""
930
+ for idx, parameterset in enumerate(self.parametersets):
931
+ if parameterset.id is not None:
932
+ # ID provided directly - pytest.param(..., id="...")
933
+ if parameterset.id is HIDDEN_PARAM:
934
+ yield HIDDEN_PARAM
935
+ else:
936
+ yield _ascii_escaped_by_config(parameterset.id, self.config)
937
+ elif self.ids and idx < len(self.ids) and self.ids[idx] is not None:
938
+ # ID provided in the IDs list - parametrize(..., ids=[...]).
939
+ if self.ids[idx] is HIDDEN_PARAM:
940
+ yield HIDDEN_PARAM
941
+ else:
942
+ yield self._idval_from_value_required(self.ids[idx], idx)
943
+ else:
944
+ # ID not provided - generate it.
945
+ yield "-".join(
946
+ self._idval(val, argname, idx)
947
+ for val, argname in zip(parameterset.values, self.argnames)
948
+ )
949
+
950
+ def _idval(self, val: object, argname: str, idx: int) -> str:
951
+ """Make an ID for a parameter in a ParameterSet."""
952
+ idval = self._idval_from_function(val, argname, idx)
953
+ if idval is not None:
954
+ return idval
955
+ idval = self._idval_from_hook(val, argname)
956
+ if idval is not None:
957
+ return idval
958
+ idval = self._idval_from_value(val)
959
+ if idval is not None:
960
+ return idval
961
+ return self._idval_from_argname(argname, idx)
962
+
963
+ def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None:
964
+ """Try to make an ID for a parameter in a ParameterSet using the
965
+ user-provided id callable, if given."""
966
+ if self.idfn is None:
967
+ return None
968
+ try:
969
+ id = self.idfn(val)
970
+ except Exception as e:
971
+ prefix = f"{self.nodeid}: " if self.nodeid is not None else ""
972
+ msg = "error raised while trying to determine id of parameter '{}' at position {}"
973
+ msg = prefix + msg.format(argname, idx)
974
+ raise ValueError(msg) from e
975
+ if id is None:
976
+ return None
977
+ return self._idval_from_value(id)
978
+
979
+ def _idval_from_hook(self, val: object, argname: str) -> str | None:
980
+ """Try to make an ID for a parameter in a ParameterSet by calling the
981
+ :hook:`pytest_make_parametrize_id` hook."""
982
+ if self.config:
983
+ id: str | None = self.config.hook.pytest_make_parametrize_id(
984
+ config=self.config, val=val, argname=argname
985
+ )
986
+ return id
987
+ return None
988
+
989
+ def _idval_from_value(self, val: object) -> str | None:
990
+ """Try to make an ID for a parameter in a ParameterSet from its value,
991
+ if the value type is supported."""
992
+ if isinstance(val, (str, bytes)):
993
+ return _ascii_escaped_by_config(val, self.config)
994
+ elif val is None or isinstance(val, (float, int, bool, complex)):
995
+ return str(val)
996
+ elif isinstance(val, re.Pattern):
997
+ return ascii_escaped(val.pattern)
998
+ elif val is NOTSET:
999
+ # Fallback to default. Note that NOTSET is an enum.Enum.
1000
+ pass
1001
+ elif isinstance(val, enum.Enum):
1002
+ return str(val)
1003
+ elif isinstance(getattr(val, "__name__", None), str):
1004
+ # Name of a class, function, module, etc.
1005
+ name: str = getattr(val, "__name__")
1006
+ return name
1007
+ return None
1008
+
1009
+ def _idval_from_value_required(self, val: object, idx: int) -> str:
1010
+ """Like _idval_from_value(), but fails if the type is not supported."""
1011
+ id = self._idval_from_value(val)
1012
+ if id is not None:
1013
+ return id
1014
+
1015
+ # Fail.
1016
+ prefix = self._make_error_prefix()
1017
+ msg = (
1018
+ f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. "
1019
+ "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__."
1020
+ )
1021
+ fail(msg, pytrace=False)
1022
+
1023
+ @staticmethod
1024
+ def _idval_from_argname(argname: str, idx: int) -> str:
1025
+ """Make an ID for a parameter in a ParameterSet from the argument name
1026
+ and the index of the ParameterSet."""
1027
+ return str(argname) + str(idx)
1028
+
1029
+ def _complain_multiple_hidden_parameter_sets(self) -> NoReturn:
1030
+ fail(
1031
+ f"{self._make_error_prefix()}multiple instances of HIDDEN_PARAM "
1032
+ "cannot be used in the same parametrize call, "
1033
+ "because the tests names need to be unique."
1034
+ )
1035
+
1036
+ def _make_error_prefix(self) -> str:
1037
+ if self.func_name is not None:
1038
+ return f"In {self.func_name}: "
1039
+ elif self.nodeid is not None:
1040
+ return f"In {self.nodeid}: "
1041
+ else:
1042
+ return ""
1043
+
1044
+
1045
+ @final
1046
+ @dataclasses.dataclass(frozen=True)
1047
+ class CallSpec2:
1048
+ """A planned parameterized invocation of a test function.
1049
+
1050
+ Calculated during collection for a given test function's Metafunc.
1051
+ Once collection is over, each callspec is turned into a single Item
1052
+ and stored in item.callspec.
1053
+ """
1054
+
1055
+ # arg name -> arg value which will be passed to a fixture or pseudo-fixture
1056
+ # of the same name. (indirect or direct parametrization respectively)
1057
+ params: dict[str, object] = dataclasses.field(default_factory=dict)
1058
+ # arg name -> arg index.
1059
+ indices: dict[str, int] = dataclasses.field(default_factory=dict)
1060
+ # arg name -> parameter scope.
1061
+ # Used for sorting parametrized resources.
1062
+ _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict)
1063
+ # Parts which will be added to the item's name in `[..]` separated by "-".
1064
+ _idlist: Sequence[str] = dataclasses.field(default_factory=tuple)
1065
+ # Marks which will be applied to the item.
1066
+ marks: list[Mark] = dataclasses.field(default_factory=list)
1067
+
1068
+ def setmulti(
1069
+ self,
1070
+ *,
1071
+ argnames: Iterable[str],
1072
+ valset: Iterable[object],
1073
+ id: str | _HiddenParam,
1074
+ marks: Iterable[Mark | MarkDecorator],
1075
+ scope: Scope,
1076
+ param_index: int,
1077
+ nodeid: str,
1078
+ ) -> CallSpec2:
1079
+ params = self.params.copy()
1080
+ indices = self.indices.copy()
1081
+ arg2scope = dict(self._arg2scope)
1082
+ for arg, val in zip(argnames, valset):
1083
+ if arg in params:
1084
+ raise nodes.Collector.CollectError(
1085
+ f"{nodeid}: duplicate parametrization of {arg!r}"
1086
+ )
1087
+ params[arg] = val
1088
+ indices[arg] = param_index
1089
+ arg2scope[arg] = scope
1090
+ return CallSpec2(
1091
+ params=params,
1092
+ indices=indices,
1093
+ _arg2scope=arg2scope,
1094
+ _idlist=self._idlist if id is HIDDEN_PARAM else [*self._idlist, id],
1095
+ marks=[*self.marks, *normalize_mark_list(marks)],
1096
+ )
1097
+
1098
+ def getparam(self, name: str) -> object:
1099
+ try:
1100
+ return self.params[name]
1101
+ except KeyError as e:
1102
+ raise ValueError(name) from e
1103
+
1104
+ @property
1105
+ def id(self) -> str:
1106
+ return "-".join(self._idlist)
1107
+
1108
+
1109
+ def get_direct_param_fixture_func(request: FixtureRequest) -> Any:
1110
+ return request.param
1111
+
1112
+
1113
+ # Used for storing pseudo fixturedefs for direct parametrization.
1114
+ name2pseudofixturedef_key = StashKey[dict[str, FixtureDef[Any]]]()
1115
+
1116
+
1117
+ @final
1118
+ class Metafunc:
1119
+ """Objects passed to the :hook:`pytest_generate_tests` hook.
1120
+
1121
+ They help to inspect a test function and to generate tests according to
1122
+ test configuration or values specified in the class or module where a
1123
+ test function is defined.
1124
+ """
1125
+
1126
+ def __init__(
1127
+ self,
1128
+ definition: FunctionDefinition,
1129
+ fixtureinfo: fixtures.FuncFixtureInfo,
1130
+ config: Config,
1131
+ cls=None,
1132
+ module=None,
1133
+ *,
1134
+ _ispytest: bool = False,
1135
+ ) -> None:
1136
+ check_ispytest(_ispytest)
1137
+
1138
+ #: Access to the underlying :class:`_pytest.python.FunctionDefinition`.
1139
+ self.definition = definition
1140
+
1141
+ #: Access to the :class:`pytest.Config` object for the test session.
1142
+ self.config = config
1143
+
1144
+ #: The module object where the test function is defined in.
1145
+ self.module = module
1146
+
1147
+ #: Underlying Python test function.
1148
+ self.function = definition.obj
1149
+
1150
+ #: Set of fixture names required by the test function.
1151
+ self.fixturenames = fixtureinfo.names_closure
1152
+
1153
+ #: Class object where the test function is defined in or ``None``.
1154
+ self.cls = cls
1155
+
1156
+ self._arg2fixturedefs = fixtureinfo.name2fixturedefs
1157
+
1158
+ # Result of parametrize().
1159
+ self._calls: list[CallSpec2] = []
1160
+
1161
+ self._params_directness: dict[str, Literal["indirect", "direct"]] = {}
1162
+
1163
+ def parametrize(
1164
+ self,
1165
+ argnames: str | Sequence[str],
1166
+ argvalues: Iterable[ParameterSet | Sequence[object] | object],
1167
+ indirect: bool | Sequence[str] = False,
1168
+ ids: Iterable[object | None] | Callable[[Any], object | None] | None = None,
1169
+ scope: _ScopeName | None = None,
1170
+ *,
1171
+ _param_mark: Mark | None = None,
1172
+ ) -> None:
1173
+ """Add new invocations to the underlying test function using the list
1174
+ of argvalues for the given argnames. Parametrization is performed
1175
+ during the collection phase. If you need to setup expensive resources
1176
+ see about setting ``indirect`` to do it at test setup time instead.
1177
+
1178
+ Can be called multiple times per test function (but only on different
1179
+ argument names), in which case each call parametrizes all previous
1180
+ parametrizations, e.g.
1181
+
1182
+ ::
1183
+
1184
+ unparametrized: t
1185
+ parametrize ["x", "y"]: t[x], t[y]
1186
+ parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2]
1187
+
1188
+ :param argnames:
1189
+ A comma-separated string denoting one or more argument names, or
1190
+ a list/tuple of argument strings.
1191
+
1192
+ :param argvalues:
1193
+ The list of argvalues determines how often a test is invoked with
1194
+ different argument values.
1195
+
1196
+ If only one argname was specified argvalues is a list of values.
1197
+ If N argnames were specified, argvalues must be a list of
1198
+ N-tuples, where each tuple-element specifies a value for its
1199
+ respective argname.
1200
+
1201
+ :param indirect:
1202
+ A list of arguments' names (subset of argnames) or a boolean.
1203
+ If True the list contains all names from the argnames. Each
1204
+ argvalue corresponding to an argname in this list will
1205
+ be passed as request.param to its respective argname fixture
1206
+ function so that it can perform more expensive setups during the
1207
+ setup phase of a test rather than at collection time.
1208
+
1209
+ :param ids:
1210
+ Sequence of (or generator for) ids for ``argvalues``,
1211
+ or a callable to return part of the id for each argvalue.
1212
+
1213
+ With sequences (and generators like ``itertools.count()``) the
1214
+ returned ids should be of type ``string``, ``int``, ``float``,
1215
+ ``bool``, or ``None``.
1216
+ They are mapped to the corresponding index in ``argvalues``.
1217
+ ``None`` means to use the auto-generated id.
1218
+
1219
+ .. versionadded:: 8.4
1220
+ :ref:`hidden-param` means to hide the parameter set
1221
+ from the test name. Can only be used at most 1 time, as
1222
+ test names need to be unique.
1223
+
1224
+ If it is a callable it will be called for each entry in
1225
+ ``argvalues``, and the return value is used as part of the
1226
+ auto-generated id for the whole set (where parts are joined with
1227
+ dashes ("-")).
1228
+ This is useful to provide more specific ids for certain items, e.g.
1229
+ dates. Returning ``None`` will use an auto-generated id.
1230
+
1231
+ If no ids are provided they will be generated automatically from
1232
+ the argvalues.
1233
+
1234
+ :param scope:
1235
+ If specified it denotes the scope of the parameters.
1236
+ The scope is used for grouping tests by parameter instances.
1237
+ It will also override any fixture-function defined scope, allowing
1238
+ to set a dynamic scope using test context or configuration.
1239
+ """
1240
+ nodeid = self.definition.nodeid
1241
+
1242
+ argnames, parametersets = ParameterSet._for_parametrize(
1243
+ argnames,
1244
+ argvalues,
1245
+ self.function,
1246
+ self.config,
1247
+ nodeid=self.definition.nodeid,
1248
+ )
1249
+ del argvalues
1250
+
1251
+ if "request" in argnames:
1252
+ fail(
1253
+ f"{nodeid}: 'request' is a reserved name and cannot be used in @pytest.mark.parametrize",
1254
+ pytrace=False,
1255
+ )
1256
+
1257
+ if scope is not None:
1258
+ scope_ = Scope.from_user(
1259
+ scope, descr=f"parametrize() call in {self.function.__name__}"
1260
+ )
1261
+ else:
1262
+ scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
1263
+
1264
+ self._validate_if_using_arg_names(argnames, indirect)
1265
+
1266
+ # Use any already (possibly) generated ids with parametrize Marks.
1267
+ if _param_mark and _param_mark._param_ids_from:
1268
+ generated_ids = _param_mark._param_ids_from._param_ids_generated
1269
+ if generated_ids is not None:
1270
+ ids = generated_ids
1271
+
1272
+ ids = self._resolve_parameter_set_ids(
1273
+ argnames, ids, parametersets, nodeid=self.definition.nodeid
1274
+ )
1275
+
1276
+ # Store used (possibly generated) ids with parametrize Marks.
1277
+ if _param_mark and _param_mark._param_ids_from and generated_ids is None:
1278
+ object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
1279
+
1280
+ # Calculate directness.
1281
+ arg_directness = self._resolve_args_directness(argnames, indirect)
1282
+ self._params_directness.update(arg_directness)
1283
+
1284
+ # Add direct parametrizations as fixturedefs to arg2fixturedefs by
1285
+ # registering artificial "pseudo" FixtureDef's such that later at test
1286
+ # setup time we can rely on FixtureDefs to exist for all argnames.
1287
+ node = None
1288
+ # For scopes higher than function, a "pseudo" FixtureDef might have
1289
+ # already been created for the scope. We thus store and cache the
1290
+ # FixtureDef on the node related to the scope.
1291
+ if scope_ is Scope.Function:
1292
+ name2pseudofixturedef = None
1293
+ else:
1294
+ collector = self.definition.parent
1295
+ assert collector is not None
1296
+ node = get_scope_node(collector, scope_)
1297
+ if node is None:
1298
+ # If used class scope and there is no class, use module-level
1299
+ # collector (for now).
1300
+ if scope_ is Scope.Class:
1301
+ assert isinstance(collector, Module)
1302
+ node = collector
1303
+ # If used package scope and there is no package, use session
1304
+ # (for now).
1305
+ elif scope_ is Scope.Package:
1306
+ node = collector.session
1307
+ else:
1308
+ assert False, f"Unhandled missing scope: {scope}"
1309
+ default: dict[str, FixtureDef[Any]] = {}
1310
+ name2pseudofixturedef = node.stash.setdefault(
1311
+ name2pseudofixturedef_key, default
1312
+ )
1313
+ for argname in argnames:
1314
+ if arg_directness[argname] == "indirect":
1315
+ continue
1316
+ if name2pseudofixturedef is not None and argname in name2pseudofixturedef:
1317
+ fixturedef = name2pseudofixturedef[argname]
1318
+ else:
1319
+ fixturedef = FixtureDef(
1320
+ config=self.config,
1321
+ baseid="",
1322
+ argname=argname,
1323
+ func=get_direct_param_fixture_func,
1324
+ scope=scope_,
1325
+ params=None,
1326
+ ids=None,
1327
+ _ispytest=True,
1328
+ )
1329
+ if name2pseudofixturedef is not None:
1330
+ name2pseudofixturedef[argname] = fixturedef
1331
+ self._arg2fixturedefs[argname] = [fixturedef]
1332
+
1333
+ # Create the new calls: if we are parametrize() multiple times (by applying the decorator
1334
+ # more than once) then we accumulate those calls generating the cartesian product
1335
+ # of all calls.
1336
+ newcalls = []
1337
+ for callspec in self._calls or [CallSpec2()]:
1338
+ for param_index, (param_id, param_set) in enumerate(
1339
+ zip(ids, parametersets)
1340
+ ):
1341
+ newcallspec = callspec.setmulti(
1342
+ argnames=argnames,
1343
+ valset=param_set.values,
1344
+ id=param_id,
1345
+ marks=param_set.marks,
1346
+ scope=scope_,
1347
+ param_index=param_index,
1348
+ nodeid=nodeid,
1349
+ )
1350
+ newcalls.append(newcallspec)
1351
+ self._calls = newcalls
1352
+
1353
+ def _resolve_parameter_set_ids(
1354
+ self,
1355
+ argnames: Sequence[str],
1356
+ ids: Iterable[object | None] | Callable[[Any], object | None] | None,
1357
+ parametersets: Sequence[ParameterSet],
1358
+ nodeid: str,
1359
+ ) -> list[str | _HiddenParam]:
1360
+ """Resolve the actual ids for the given parameter sets.
1361
+
1362
+ :param argnames:
1363
+ Argument names passed to ``parametrize()``.
1364
+ :param ids:
1365
+ The `ids` parameter of the ``parametrize()`` call (see docs).
1366
+ :param parametersets:
1367
+ The parameter sets, each containing a set of values corresponding
1368
+ to ``argnames``.
1369
+ :param nodeid str:
1370
+ The nodeid of the definition item that generated this
1371
+ parametrization.
1372
+ :returns:
1373
+ List with ids for each parameter set given.
1374
+ """
1375
+ if ids is None:
1376
+ idfn = None
1377
+ ids_ = None
1378
+ elif callable(ids):
1379
+ idfn = ids
1380
+ ids_ = None
1381
+ else:
1382
+ idfn = None
1383
+ ids_ = self._validate_ids(ids, parametersets, self.function.__name__)
1384
+ id_maker = IdMaker(
1385
+ argnames,
1386
+ parametersets,
1387
+ idfn,
1388
+ ids_,
1389
+ self.config,
1390
+ nodeid=nodeid,
1391
+ func_name=self.function.__name__,
1392
+ )
1393
+ return id_maker.make_unique_parameterset_ids()
1394
+
1395
+ def _validate_ids(
1396
+ self,
1397
+ ids: Iterable[object | None],
1398
+ parametersets: Sequence[ParameterSet],
1399
+ func_name: str,
1400
+ ) -> list[object | None]:
1401
+ try:
1402
+ num_ids = len(ids) # type: ignore[arg-type]
1403
+ except TypeError:
1404
+ try:
1405
+ iter(ids)
1406
+ except TypeError as e:
1407
+ raise TypeError("ids must be a callable or an iterable") from e
1408
+ num_ids = len(parametersets)
1409
+
1410
+ # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
1411
+ if num_ids != len(parametersets) and num_ids != 0:
1412
+ msg = "In {}: {} parameter sets specified, with different number of ids: {}"
1413
+ fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False)
1414
+
1415
+ return list(itertools.islice(ids, num_ids))
1416
+
1417
+ def _resolve_args_directness(
1418
+ self,
1419
+ argnames: Sequence[str],
1420
+ indirect: bool | Sequence[str],
1421
+ ) -> dict[str, Literal["indirect", "direct"]]:
1422
+ """Resolve if each parametrized argument must be considered an indirect
1423
+ parameter to a fixture of the same name, or a direct parameter to the
1424
+ parametrized function, based on the ``indirect`` parameter of the
1425
+ parametrized() call.
1426
+
1427
+ :param argnames:
1428
+ List of argument names passed to ``parametrize()``.
1429
+ :param indirect:
1430
+ Same as the ``indirect`` parameter of ``parametrize()``.
1431
+ :returns
1432
+ A dict mapping each arg name to either "indirect" or "direct".
1433
+ """
1434
+ arg_directness: dict[str, Literal["indirect", "direct"]]
1435
+ if isinstance(indirect, bool):
1436
+ arg_directness = dict.fromkeys(
1437
+ argnames, "indirect" if indirect else "direct"
1438
+ )
1439
+ elif isinstance(indirect, Sequence):
1440
+ arg_directness = dict.fromkeys(argnames, "direct")
1441
+ for arg in indirect:
1442
+ if arg not in argnames:
1443
+ fail(
1444
+ f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist",
1445
+ pytrace=False,
1446
+ )
1447
+ arg_directness[arg] = "indirect"
1448
+ else:
1449
+ fail(
1450
+ f"In {self.function.__name__}: expected Sequence or boolean"
1451
+ f" for indirect, got {type(indirect).__name__}",
1452
+ pytrace=False,
1453
+ )
1454
+ return arg_directness
1455
+
1456
+ def _validate_if_using_arg_names(
1457
+ self,
1458
+ argnames: Sequence[str],
1459
+ indirect: bool | Sequence[str],
1460
+ ) -> None:
1461
+ """Check if all argnames are being used, by default values, or directly/indirectly.
1462
+
1463
+ :param List[str] argnames: List of argument names passed to ``parametrize()``.
1464
+ :param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
1465
+ :raises ValueError: If validation fails.
1466
+ """
1467
+ default_arg_names = set(get_default_arg_names(self.function))
1468
+ func_name = self.function.__name__
1469
+ for arg in argnames:
1470
+ if arg not in self.fixturenames:
1471
+ if arg in default_arg_names:
1472
+ fail(
1473
+ f"In {func_name}: function already takes an argument '{arg}' with a default value",
1474
+ pytrace=False,
1475
+ )
1476
+ else:
1477
+ if isinstance(indirect, Sequence):
1478
+ name = "fixture" if arg in indirect else "argument"
1479
+ else:
1480
+ name = "fixture" if indirect else "argument"
1481
+ fail(
1482
+ f"In {func_name}: function uses no {name} '{arg}'",
1483
+ pytrace=False,
1484
+ )
1485
+
1486
+ def _recompute_direct_params_indices(self) -> None:
1487
+ for argname, param_type in self._params_directness.items():
1488
+ if param_type == "direct":
1489
+ for i, callspec in enumerate(self._calls):
1490
+ callspec.indices[argname] = i
1491
+
1492
+
1493
+ def _find_parametrized_scope(
1494
+ argnames: Sequence[str],
1495
+ arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]],
1496
+ indirect: bool | Sequence[str],
1497
+ ) -> Scope:
1498
+ """Find the most appropriate scope for a parametrized call based on its arguments.
1499
+
1500
+ When there's at least one direct argument, always use "function" scope.
1501
+
1502
+ When a test function is parametrized and all its arguments are indirect
1503
+ (e.g. fixtures), return the most narrow scope based on the fixtures used.
1504
+
1505
+ Related to issue #1832, based on code posted by @Kingdread.
1506
+ """
1507
+ if isinstance(indirect, Sequence):
1508
+ all_arguments_are_fixtures = len(indirect) == len(argnames)
1509
+ else:
1510
+ all_arguments_are_fixtures = bool(indirect)
1511
+
1512
+ if all_arguments_are_fixtures:
1513
+ fixturedefs = arg2fixturedefs or {}
1514
+ used_scopes = [
1515
+ fixturedef[-1]._scope
1516
+ for name, fixturedef in fixturedefs.items()
1517
+ if name in argnames
1518
+ ]
1519
+ # Takes the most narrow scope from used fixtures.
1520
+ return min(used_scopes, default=Scope.Function)
1521
+
1522
+ return Scope.Function
1523
+
1524
+
1525
+ def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str:
1526
+ if config is None:
1527
+ escape_option = False
1528
+ else:
1529
+ escape_option = config.getini(
1530
+ "disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
1531
+ )
1532
+ # TODO: If escaping is turned off and the user passes bytes,
1533
+ # will return a bytes. For now we ignore this but the
1534
+ # code *probably* doesn't handle this case.
1535
+ return val if escape_option else ascii_escaped(val) # type: ignore
1536
+
1537
+
1538
+ class Function(PyobjMixin, nodes.Item):
1539
+ """Item responsible for setting up and executing a Python test function.
1540
+
1541
+ :param name:
1542
+ The full function name, including any decorations like those
1543
+ added by parametrization (``my_func[my_param]``).
1544
+ :param parent:
1545
+ The parent Node.
1546
+ :param config:
1547
+ The pytest Config object.
1548
+ :param callspec:
1549
+ If given, this function has been parametrized and the callspec contains
1550
+ meta information about the parametrization.
1551
+ :param callobj:
1552
+ If given, the object which will be called when the Function is invoked,
1553
+ otherwise the callobj will be obtained from ``parent`` using ``originalname``.
1554
+ :param keywords:
1555
+ Keywords bound to the function object for "-k" matching.
1556
+ :param session:
1557
+ The pytest Session object.
1558
+ :param fixtureinfo:
1559
+ Fixture information already resolved at this fixture node..
1560
+ :param originalname:
1561
+ The attribute name to use for accessing the underlying function object.
1562
+ Defaults to ``name``. Set this if name is different from the original name,
1563
+ for example when it contains decorations like those added by parametrization
1564
+ (``my_func[my_param]``).
1565
+ """
1566
+
1567
+ # Disable since functions handle it themselves.
1568
+ _ALLOW_MARKERS = False
1569
+
1570
+ def __init__(
1571
+ self,
1572
+ name: str,
1573
+ parent,
1574
+ config: Config | None = None,
1575
+ callspec: CallSpec2 | None = None,
1576
+ callobj=NOTSET,
1577
+ keywords: Mapping[str, Any] | None = None,
1578
+ session: Session | None = None,
1579
+ fixtureinfo: FuncFixtureInfo | None = None,
1580
+ originalname: str | None = None,
1581
+ ) -> None:
1582
+ super().__init__(name, parent, config=config, session=session)
1583
+
1584
+ if callobj is not NOTSET:
1585
+ self._obj = callobj
1586
+ self._instance = getattr(callobj, "__self__", None)
1587
+
1588
+ #: Original function name, without any decorations (for example
1589
+ #: parametrization adds a ``"[...]"`` suffix to function names), used to access
1590
+ #: the underlying function object from ``parent`` (in case ``callobj`` is not given
1591
+ #: explicitly).
1592
+ #:
1593
+ #: .. versionadded:: 3.0
1594
+ self.originalname = originalname or name
1595
+
1596
+ # Note: when FunctionDefinition is introduced, we should change ``originalname``
1597
+ # to a readonly property that returns FunctionDefinition.name.
1598
+
1599
+ self.own_markers.extend(get_unpacked_marks(self.obj))
1600
+ if callspec:
1601
+ self.callspec = callspec
1602
+ self.own_markers.extend(callspec.marks)
1603
+
1604
+ # todo: this is a hell of a hack
1605
+ # https://github.com/pytest-dev/pytest/issues/4569
1606
+ # Note: the order of the updates is important here; indicates what
1607
+ # takes priority (ctor argument over function attributes over markers).
1608
+ # Take own_markers only; NodeKeywords handles parent traversal on its own.
1609
+ self.keywords.update((mark.name, mark) for mark in self.own_markers)
1610
+ self.keywords.update(self.obj.__dict__)
1611
+ if keywords:
1612
+ self.keywords.update(keywords)
1613
+
1614
+ if fixtureinfo is None:
1615
+ fm = self.session._fixturemanager
1616
+ fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls)
1617
+ self._fixtureinfo: FuncFixtureInfo = fixtureinfo
1618
+ self.fixturenames = fixtureinfo.names_closure
1619
+ self._initrequest()
1620
+
1621
+ # todo: determine sound type limitations
1622
+ @classmethod
1623
+ def from_parent(cls, parent, **kw) -> Self:
1624
+ """The public constructor."""
1625
+ return super().from_parent(parent=parent, **kw)
1626
+
1627
+ def _initrequest(self) -> None:
1628
+ self.funcargs: dict[str, object] = {}
1629
+ self._request = fixtures.TopRequest(self, _ispytest=True)
1630
+
1631
+ @property
1632
+ def function(self):
1633
+ """Underlying python 'function' object."""
1634
+ return getimfunc(self.obj)
1635
+
1636
+ @property
1637
+ def instance(self):
1638
+ try:
1639
+ return self._instance
1640
+ except AttributeError:
1641
+ if isinstance(self.parent, Class):
1642
+ # Each Function gets a fresh class instance.
1643
+ self._instance = self._getinstance()
1644
+ else:
1645
+ self._instance = None
1646
+ return self._instance
1647
+
1648
+ def _getinstance(self):
1649
+ if isinstance(self.parent, Class):
1650
+ # Each Function gets a fresh class instance.
1651
+ return self.parent.newinstance()
1652
+ else:
1653
+ return None
1654
+
1655
+ def _getobj(self):
1656
+ instance = self.instance
1657
+ if instance is not None:
1658
+ parent_obj = instance
1659
+ else:
1660
+ assert self.parent is not None
1661
+ parent_obj = self.parent.obj # type: ignore[attr-defined]
1662
+ return getattr(parent_obj, self.originalname)
1663
+
1664
+ @property
1665
+ def _pyfuncitem(self):
1666
+ """(compatonly) for code expecting pytest-2.2 style request objects."""
1667
+ return self
1668
+
1669
+ def runtest(self) -> None:
1670
+ """Execute the underlying test function."""
1671
+ self.ihook.pytest_pyfunc_call(pyfuncitem=self)
1672
+
1673
+ def setup(self) -> None:
1674
+ self._request._fillfixtures()
1675
+
1676
+ def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
1677
+ if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
1678
+ code = _pytest._code.Code.from_function(get_real_func(self.obj))
1679
+ path, firstlineno = code.path, code.firstlineno
1680
+ traceback = excinfo.traceback
1681
+ ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
1682
+ if ntraceback == traceback:
1683
+ ntraceback = ntraceback.cut(path=path)
1684
+ if ntraceback == traceback:
1685
+ ntraceback = ntraceback.filter(filter_traceback)
1686
+ if not ntraceback:
1687
+ ntraceback = traceback
1688
+ ntraceback = ntraceback.filter(excinfo)
1689
+
1690
+ # issue364: mark all but first and last frames to
1691
+ # only show a single-line message for each frame.
1692
+ if self.config.getoption("tbstyle", "auto") == "auto":
1693
+ if len(ntraceback) > 2:
1694
+ ntraceback = Traceback(
1695
+ (
1696
+ ntraceback[0],
1697
+ *(t.with_repr_style("short") for t in ntraceback[1:-1]),
1698
+ ntraceback[-1],
1699
+ )
1700
+ )
1701
+
1702
+ return ntraceback
1703
+ return excinfo.traceback
1704
+
1705
+ # TODO: Type ignored -- breaks Liskov Substitution.
1706
+ def repr_failure( # type: ignore[override]
1707
+ self,
1708
+ excinfo: ExceptionInfo[BaseException],
1709
+ ) -> str | TerminalRepr:
1710
+ style = self.config.getoption("tbstyle", "auto")
1711
+ if style == "auto":
1712
+ style = "long"
1713
+ return self._repr_failure_py(excinfo, style=style)
1714
+
1715
+
1716
+ class FunctionDefinition(Function):
1717
+ """This class is a stop gap solution until we evolve to have actual function
1718
+ definition nodes and manage to get rid of ``metafunc``."""
1719
+
1720
+ def runtest(self) -> None:
1721
+ raise RuntimeError("function definitions are not supposed to be run as tests")
1722
+
1723
+ setup = runtest
archive/Axiovorax/.venv/Lib/site-packages/_pytest/python_api.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ from collections.abc import Collection
5
+ from collections.abc import Mapping
6
+ from collections.abc import Sequence
7
+ from collections.abc import Sized
8
+ from decimal import Decimal
9
+ import math
10
+ from numbers import Complex
11
+ import pprint
12
+ import sys
13
+ from typing import Any
14
+ from typing import TYPE_CHECKING
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from numpy import ndarray
19
+
20
+
21
+ def _compare_approx(
22
+ full_object: object,
23
+ message_data: Sequence[tuple[str, str, str]],
24
+ number_of_elements: int,
25
+ different_ids: Sequence[object],
26
+ max_abs_diff: float,
27
+ max_rel_diff: float,
28
+ ) -> list[str]:
29
+ message_list = list(message_data)
30
+ message_list.insert(0, ("Index", "Obtained", "Expected"))
31
+ max_sizes = [0, 0, 0]
32
+ for index, obtained, expected in message_list:
33
+ max_sizes[0] = max(max_sizes[0], len(index))
34
+ max_sizes[1] = max(max_sizes[1], len(obtained))
35
+ max_sizes[2] = max(max_sizes[2], len(expected))
36
+ explanation = [
37
+ f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:",
38
+ f"Max absolute difference: {max_abs_diff}",
39
+ f"Max relative difference: {max_rel_diff}",
40
+ ] + [
41
+ f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}"
42
+ for indexes, obtained, expected in message_list
43
+ ]
44
+ return explanation
45
+
46
+
47
+ # builtin pytest.approx helper
48
+
49
+
50
+ class ApproxBase:
51
+ """Provide shared utilities for making approximate comparisons between
52
+ numbers or sequences of numbers."""
53
+
54
+ # Tell numpy to use our `__eq__` operator instead of its.
55
+ __array_ufunc__ = None
56
+ __array_priority__ = 100
57
+
58
+ def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None:
59
+ __tracebackhide__ = True
60
+ self.expected = expected
61
+ self.abs = abs
62
+ self.rel = rel
63
+ self.nan_ok = nan_ok
64
+ self._check_type()
65
+
66
+ def __repr__(self) -> str:
67
+ raise NotImplementedError
68
+
69
+ def _repr_compare(self, other_side: Any) -> list[str]:
70
+ return [
71
+ "comparison failed",
72
+ f"Obtained: {other_side}",
73
+ f"Expected: {self}",
74
+ ]
75
+
76
+ def __eq__(self, actual) -> bool:
77
+ return all(
78
+ a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
79
+ )
80
+
81
+ def __bool__(self):
82
+ __tracebackhide__ = True
83
+ raise AssertionError(
84
+ "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?"
85
+ )
86
+
87
+ # Ignore type because of https://github.com/python/mypy/issues/4266.
88
+ __hash__ = None # type: ignore
89
+
90
+ def __ne__(self, actual) -> bool:
91
+ return not (actual == self)
92
+
93
+ def _approx_scalar(self, x) -> ApproxScalar:
94
+ if isinstance(x, Decimal):
95
+ return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
96
+ return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
97
+
98
+ def _yield_comparisons(self, actual):
99
+ """Yield all the pairs of numbers to be compared.
100
+
101
+ This is used to implement the `__eq__` method.
102
+ """
103
+ raise NotImplementedError
104
+
105
+ def _check_type(self) -> None:
106
+ """Raise a TypeError if the expected value is not a valid type."""
107
+ # This is only a concern if the expected value is a sequence. In every
108
+ # other case, the approx() function ensures that the expected value has
109
+ # a numeric type. For this reason, the default is to do nothing. The
110
+ # classes that deal with sequences should reimplement this method to
111
+ # raise if there are any non-numeric elements in the sequence.
112
+
113
+
114
+ def _recursive_sequence_map(f, x):
115
+ """Recursively map a function over a sequence of arbitrary depth"""
116
+ if isinstance(x, (list, tuple)):
117
+ seq_type = type(x)
118
+ return seq_type(_recursive_sequence_map(f, xi) for xi in x)
119
+ elif _is_sequence_like(x):
120
+ return [_recursive_sequence_map(f, xi) for xi in x]
121
+ else:
122
+ return f(x)
123
+
124
+
125
+ class ApproxNumpy(ApproxBase):
126
+ """Perform approximate comparisons where the expected value is numpy array."""
127
+
128
+ def __repr__(self) -> str:
129
+ list_scalars = _recursive_sequence_map(
130
+ self._approx_scalar, self.expected.tolist()
131
+ )
132
+ return f"approx({list_scalars!r})"
133
+
134
+ def _repr_compare(self, other_side: ndarray | list[Any]) -> list[str]:
135
+ import itertools
136
+ import math
137
+
138
+ def get_value_from_nested_list(
139
+ nested_list: list[Any], nd_index: tuple[Any, ...]
140
+ ) -> Any:
141
+ """
142
+ Helper function to get the value out of a nested list, given an n-dimensional index.
143
+ This mimics numpy's indexing, but for raw nested python lists.
144
+ """
145
+ value: Any = nested_list
146
+ for i in nd_index:
147
+ value = value[i]
148
+ return value
149
+
150
+ np_array_shape = self.expected.shape
151
+ approx_side_as_seq = _recursive_sequence_map(
152
+ self._approx_scalar, self.expected.tolist()
153
+ )
154
+
155
+ # convert other_side to numpy array to ensure shape attribute is available
156
+ other_side_as_array = _as_numpy_array(other_side)
157
+ assert other_side_as_array is not None
158
+
159
+ if np_array_shape != other_side_as_array.shape:
160
+ return [
161
+ "Impossible to compare arrays with different shapes.",
162
+ f"Shapes: {np_array_shape} and {other_side_as_array.shape}",
163
+ ]
164
+
165
+ number_of_elements = self.expected.size
166
+ max_abs_diff = -math.inf
167
+ max_rel_diff = -math.inf
168
+ different_ids = []
169
+ for index in itertools.product(*(range(i) for i in np_array_shape)):
170
+ approx_value = get_value_from_nested_list(approx_side_as_seq, index)
171
+ other_value = get_value_from_nested_list(other_side_as_array, index)
172
+ if approx_value != other_value:
173
+ abs_diff = abs(approx_value.expected - other_value)
174
+ max_abs_diff = max(max_abs_diff, abs_diff)
175
+ if other_value == 0.0:
176
+ max_rel_diff = math.inf
177
+ else:
178
+ max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
179
+ different_ids.append(index)
180
+
181
+ message_data = [
182
+ (
183
+ str(index),
184
+ str(get_value_from_nested_list(other_side_as_array, index)),
185
+ str(get_value_from_nested_list(approx_side_as_seq, index)),
186
+ )
187
+ for index in different_ids
188
+ ]
189
+ return _compare_approx(
190
+ self.expected,
191
+ message_data,
192
+ number_of_elements,
193
+ different_ids,
194
+ max_abs_diff,
195
+ max_rel_diff,
196
+ )
197
+
198
+ def __eq__(self, actual) -> bool:
199
+ import numpy as np
200
+
201
+ # self.expected is supposed to always be an array here.
202
+
203
+ if not np.isscalar(actual):
204
+ try:
205
+ actual = np.asarray(actual)
206
+ except Exception as e:
207
+ raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e
208
+
209
+ if not np.isscalar(actual) and actual.shape != self.expected.shape:
210
+ return False
211
+
212
+ return super().__eq__(actual)
213
+
214
+ def _yield_comparisons(self, actual):
215
+ import numpy as np
216
+
217
+ # `actual` can either be a numpy array or a scalar, it is treated in
218
+ # `__eq__` before being passed to `ApproxBase.__eq__`, which is the
219
+ # only method that calls this one.
220
+
221
+ if np.isscalar(actual):
222
+ for i in np.ndindex(self.expected.shape):
223
+ yield actual, self.expected[i].item()
224
+ else:
225
+ for i in np.ndindex(self.expected.shape):
226
+ yield actual[i].item(), self.expected[i].item()
227
+
228
+
229
+ class ApproxMapping(ApproxBase):
230
+ """Perform approximate comparisons where the expected value is a mapping
231
+ with numeric values (the keys can be anything)."""
232
+
233
+ def __repr__(self) -> str:
234
+ return f"approx({ ({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})"
235
+
236
+ def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]:
237
+ import math
238
+
239
+ approx_side_as_map = {
240
+ k: self._approx_scalar(v) for k, v in self.expected.items()
241
+ }
242
+
243
+ number_of_elements = len(approx_side_as_map)
244
+ max_abs_diff = -math.inf
245
+ max_rel_diff = -math.inf
246
+ different_ids = []
247
+ for (approx_key, approx_value), other_value in zip(
248
+ approx_side_as_map.items(), other_side.values()
249
+ ):
250
+ if approx_value != other_value:
251
+ if approx_value.expected is not None and other_value is not None:
252
+ try:
253
+ max_abs_diff = max(
254
+ max_abs_diff, abs(approx_value.expected - other_value)
255
+ )
256
+ if approx_value.expected == 0.0:
257
+ max_rel_diff = math.inf
258
+ else:
259
+ max_rel_diff = max(
260
+ max_rel_diff,
261
+ abs(
262
+ (approx_value.expected - other_value)
263
+ / approx_value.expected
264
+ ),
265
+ )
266
+ except ZeroDivisionError:
267
+ pass
268
+ different_ids.append(approx_key)
269
+
270
+ message_data = [
271
+ (str(key), str(other_side[key]), str(approx_side_as_map[key]))
272
+ for key in different_ids
273
+ ]
274
+
275
+ return _compare_approx(
276
+ self.expected,
277
+ message_data,
278
+ number_of_elements,
279
+ different_ids,
280
+ max_abs_diff,
281
+ max_rel_diff,
282
+ )
283
+
284
+ def __eq__(self, actual) -> bool:
285
+ try:
286
+ if set(actual.keys()) != set(self.expected.keys()):
287
+ return False
288
+ except AttributeError:
289
+ return False
290
+
291
+ return super().__eq__(actual)
292
+
293
+ def _yield_comparisons(self, actual):
294
+ for k in self.expected.keys():
295
+ yield actual[k], self.expected[k]
296
+
297
+ def _check_type(self) -> None:
298
+ __tracebackhide__ = True
299
+ for key, value in self.expected.items():
300
+ if isinstance(value, type(self.expected)):
301
+ msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
302
+ raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
303
+
304
+
305
+ class ApproxSequenceLike(ApproxBase):
306
+ """Perform approximate comparisons where the expected value is a sequence of numbers."""
307
+
308
+ def __repr__(self) -> str:
309
+ seq_type = type(self.expected)
310
+ if seq_type not in (tuple, list):
311
+ seq_type = list
312
+ return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})"
313
+
314
+ def _repr_compare(self, other_side: Sequence[float]) -> list[str]:
315
+ import math
316
+
317
+ if len(self.expected) != len(other_side):
318
+ return [
319
+ "Impossible to compare lists with different sizes.",
320
+ f"Lengths: {len(self.expected)} and {len(other_side)}",
321
+ ]
322
+
323
+ approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected)
324
+
325
+ number_of_elements = len(approx_side_as_map)
326
+ max_abs_diff = -math.inf
327
+ max_rel_diff = -math.inf
328
+ different_ids = []
329
+ for i, (approx_value, other_value) in enumerate(
330
+ zip(approx_side_as_map, other_side)
331
+ ):
332
+ if approx_value != other_value:
333
+ try:
334
+ abs_diff = abs(approx_value.expected - other_value)
335
+ max_abs_diff = max(max_abs_diff, abs_diff)
336
+ # Ignore non-numbers for the diff calculations (#13012).
337
+ except TypeError:
338
+ pass
339
+ else:
340
+ if other_value == 0.0:
341
+ max_rel_diff = math.inf
342
+ else:
343
+ max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value))
344
+ different_ids.append(i)
345
+ message_data = [
346
+ (str(i), str(other_side[i]), str(approx_side_as_map[i]))
347
+ for i in different_ids
348
+ ]
349
+
350
+ return _compare_approx(
351
+ self.expected,
352
+ message_data,
353
+ number_of_elements,
354
+ different_ids,
355
+ max_abs_diff,
356
+ max_rel_diff,
357
+ )
358
+
359
+ def __eq__(self, actual) -> bool:
360
+ try:
361
+ if len(actual) != len(self.expected):
362
+ return False
363
+ except TypeError:
364
+ return False
365
+ return super().__eq__(actual)
366
+
367
+ def _yield_comparisons(self, actual):
368
+ return zip(actual, self.expected)
369
+
370
+ def _check_type(self) -> None:
371
+ __tracebackhide__ = True
372
+ for index, x in enumerate(self.expected):
373
+ if isinstance(x, type(self.expected)):
374
+ msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
375
+ raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
376
+
377
+
378
+ class ApproxScalar(ApproxBase):
379
+ """Perform approximate comparisons where the expected value is a single number."""
380
+
381
+ # Using Real should be better than this Union, but not possible yet:
382
+ # https://github.com/python/typeshed/pull/3108
383
+ DEFAULT_ABSOLUTE_TOLERANCE: float | Decimal = 1e-12
384
+ DEFAULT_RELATIVE_TOLERANCE: float | Decimal = 1e-6
385
+
386
+ def __repr__(self) -> str:
387
+ """Return a string communicating both the expected value and the
388
+ tolerance for the comparison being made.
389
+
390
+ For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``.
391
+ """
392
+ # Don't show a tolerance for values that aren't compared using
393
+ # tolerances, i.e. non-numerics and infinities. Need to call abs to
394
+ # handle complex numbers, e.g. (inf + 1j).
395
+ if (
396
+ isinstance(self.expected, bool)
397
+ or (not isinstance(self.expected, (Complex, Decimal)))
398
+ or math.isinf(abs(self.expected) or isinstance(self.expected, bool))
399
+ ):
400
+ return str(self.expected)
401
+
402
+ # If a sensible tolerance can't be calculated, self.tolerance will
403
+ # raise a ValueError. In this case, display '???'.
404
+ try:
405
+ if 1e-3 <= self.tolerance < 1e3:
406
+ vetted_tolerance = f"{self.tolerance:n}"
407
+ else:
408
+ vetted_tolerance = f"{self.tolerance:.1e}"
409
+
410
+ if (
411
+ isinstance(self.expected, Complex)
412
+ and self.expected.imag
413
+ and not math.isinf(self.tolerance)
414
+ ):
415
+ vetted_tolerance += " ∠ ±180°"
416
+ except ValueError:
417
+ vetted_tolerance = "???"
418
+
419
+ return f"{self.expected} ± {vetted_tolerance}"
420
+
421
+ def __eq__(self, actual) -> bool:
422
+ """Return whether the given value is equal to the expected value
423
+ within the pre-specified tolerance."""
424
+
425
+ def is_bool(val: Any) -> bool:
426
+ # Check if `val` is a native bool or numpy bool.
427
+ if isinstance(val, bool):
428
+ return True
429
+ try:
430
+ import numpy as np
431
+
432
+ return isinstance(val, np.bool_)
433
+ except ImportError:
434
+ return False
435
+
436
+ asarray = _as_numpy_array(actual)
437
+ if asarray is not None:
438
+ # Call ``__eq__()`` manually to prevent infinite-recursion with
439
+ # numpy<1.13. See #3748.
440
+ return all(self.__eq__(a) for a in asarray.flat)
441
+
442
+ # Short-circuit exact equality, except for bool and np.bool_
443
+ if is_bool(self.expected) and not is_bool(actual):
444
+ return False
445
+ elif actual == self.expected:
446
+ return True
447
+
448
+ # If either type is non-numeric, fall back to strict equality.
449
+ # NB: we need Complex, rather than just Number, to ensure that __abs__,
450
+ # __sub__, and __float__ are defined. Also, consider bool to be
451
+ # non-numeric, even though it has the required arithmetic.
452
+ if is_bool(self.expected) or not (
453
+ isinstance(self.expected, (Complex, Decimal))
454
+ and isinstance(actual, (Complex, Decimal))
455
+ ):
456
+ return False
457
+
458
+ # Allow the user to control whether NaNs are considered equal to each
459
+ # other or not. The abs() calls are for compatibility with complex
460
+ # numbers.
461
+ if math.isnan(abs(self.expected)):
462
+ return self.nan_ok and math.isnan(abs(actual))
463
+
464
+ # Infinity shouldn't be approximately equal to anything but itself, but
465
+ # if there's a relative tolerance, it will be infinite and infinity
466
+ # will seem approximately equal to everything. The equal-to-itself
467
+ # case would have been short circuited above, so here we can just
468
+ # return false if the expected value is infinite. The abs() call is
469
+ # for compatibility with complex numbers.
470
+ if math.isinf(abs(self.expected)):
471
+ return False
472
+
473
+ # Return true if the two numbers are within the tolerance.
474
+ result: bool = abs(self.expected - actual) <= self.tolerance
475
+ return result
476
+
477
+ # Ignore type because of https://github.com/python/mypy/issues/4266.
478
+ __hash__ = None # type: ignore
479
+
480
+ @property
481
+ def tolerance(self):
482
+ """Return the tolerance for the comparison.
483
+
484
+ This could be either an absolute tolerance or a relative tolerance,
485
+ depending on what the user specified or which would be larger.
486
+ """
487
+
488
+ def set_default(x, default):
489
+ return x if x is not None else default
490
+
491
+ # Figure out what the absolute tolerance should be. ``self.abs`` is
492
+ # either None or a value specified by the user.
493
+ absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
494
+
495
+ if absolute_tolerance < 0:
496
+ raise ValueError(
497
+ f"absolute tolerance can't be negative: {absolute_tolerance}"
498
+ )
499
+ if math.isnan(absolute_tolerance):
500
+ raise ValueError("absolute tolerance can't be NaN.")
501
+
502
+ # If the user specified an absolute tolerance but not a relative one,
503
+ # just return the absolute tolerance.
504
+ if self.rel is None:
505
+ if self.abs is not None:
506
+ return absolute_tolerance
507
+
508
+ # Figure out what the relative tolerance should be. ``self.rel`` is
509
+ # either None or a value specified by the user. This is done after
510
+ # we've made sure the user didn't ask for an absolute tolerance only,
511
+ # because we don't want to raise errors about the relative tolerance if
512
+ # we aren't even going to use it.
513
+ relative_tolerance = set_default(
514
+ self.rel, self.DEFAULT_RELATIVE_TOLERANCE
515
+ ) * abs(self.expected)
516
+
517
+ if relative_tolerance < 0:
518
+ raise ValueError(
519
+ f"relative tolerance can't be negative: {relative_tolerance}"
520
+ )
521
+ if math.isnan(relative_tolerance):
522
+ raise ValueError("relative tolerance can't be NaN.")
523
+
524
+ # Return the larger of the relative and absolute tolerances.
525
+ return max(relative_tolerance, absolute_tolerance)
526
+
527
+
528
+ class ApproxDecimal(ApproxScalar):
529
+ """Perform approximate comparisons where the expected value is a Decimal."""
530
+
531
+ DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
532
+ DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
533
+
534
+
535
+ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
536
+ """Assert that two numbers (or two ordered sequences of numbers) are equal to each other
537
+ within some tolerance.
538
+
539
+ Due to the :doc:`python:tutorial/floatingpoint`, numbers that we
540
+ would intuitively expect to be equal are not always so::
541
+
542
+ >>> 0.1 + 0.2 == 0.3
543
+ False
544
+
545
+ This problem is commonly encountered when writing tests, e.g. when making
546
+ sure that floating-point values are what you expect them to be. One way to
547
+ deal with this problem is to assert that two floating-point numbers are
548
+ equal to within some appropriate tolerance::
549
+
550
+ >>> abs((0.1 + 0.2) - 0.3) < 1e-6
551
+ True
552
+
553
+ However, comparisons like this are tedious to write and difficult to
554
+ understand. Furthermore, absolute comparisons like the one above are
555
+ usually discouraged because there's no tolerance that works well for all
556
+ situations. ``1e-6`` is good for numbers around ``1``, but too small for
557
+ very big numbers and too big for very small ones. It's better to express
558
+ the tolerance as a fraction of the expected value, but relative comparisons
559
+ like that are even more difficult to write correctly and concisely.
560
+
561
+ The ``approx`` class performs floating-point comparisons using a syntax
562
+ that's as intuitive as possible::
563
+
564
+ >>> from pytest import approx
565
+ >>> 0.1 + 0.2 == approx(0.3)
566
+ True
567
+
568
+ The same syntax also works for ordered sequences of numbers::
569
+
570
+ >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
571
+ True
572
+
573
+ ``numpy`` arrays::
574
+
575
+ >>> import numpy as np # doctest: +SKIP
576
+ >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
577
+ True
578
+
579
+ And for a ``numpy`` array against a scalar::
580
+
581
+ >>> import numpy as np # doctest: +SKIP
582
+ >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
583
+ True
584
+
585
+ Only ordered sequences are supported, because ``approx`` needs
586
+ to infer the relative position of the sequences without ambiguity. This means
587
+ ``sets`` and other unordered sequences are not supported.
588
+
589
+ Finally, dictionary *values* can also be compared::
590
+
591
+ >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
592
+ True
593
+
594
+ The comparison will be true if both mappings have the same keys and their
595
+ respective values match the expected tolerances.
596
+
597
+ **Tolerances**
598
+
599
+ By default, ``approx`` considers numbers within a relative tolerance of
600
+ ``1e-6`` (i.e. one part in a million) of its expected value to be equal.
601
+ This treatment would lead to surprising results if the expected value was
602
+ ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
603
+ To handle this case less surprisingly, ``approx`` also considers numbers
604
+ within an absolute tolerance of ``1e-12`` of its expected value to be
605
+ equal. Infinity and NaN are special cases. Infinity is only considered
606
+ equal to itself, regardless of the relative tolerance. NaN is not
607
+ considered equal to anything by default, but you can make it be equal to
608
+ itself by setting the ``nan_ok`` argument to True. (This is meant to
609
+ facilitate comparing arrays that use NaN to mean "no data".)
610
+
611
+ Both the relative and absolute tolerances can be changed by passing
612
+ arguments to the ``approx`` constructor::
613
+
614
+ >>> 1.0001 == approx(1)
615
+ False
616
+ >>> 1.0001 == approx(1, rel=1e-3)
617
+ True
618
+ >>> 1.0001 == approx(1, abs=1e-3)
619
+ True
620
+
621
+ If you specify ``abs`` but not ``rel``, the comparison will not consider
622
+ the relative tolerance at all. In other words, two numbers that are within
623
+ the default relative tolerance of ``1e-6`` will still be considered unequal
624
+ if they exceed the specified absolute tolerance. If you specify both
625
+ ``abs`` and ``rel``, the numbers will be considered equal if either
626
+ tolerance is met::
627
+
628
+ >>> 1 + 1e-8 == approx(1)
629
+ True
630
+ >>> 1 + 1e-8 == approx(1, abs=1e-12)
631
+ False
632
+ >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
633
+ True
634
+
635
+ **Non-numeric types**
636
+
637
+ You can also use ``approx`` to compare non-numeric types, or dicts and
638
+ sequences containing non-numeric types, in which case it falls back to
639
+ strict equality. This can be useful for comparing dicts and sequences that
640
+ can contain optional values::
641
+
642
+ >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None})
643
+ True
644
+ >>> [None, 1.0000005] == approx([None,1])
645
+ True
646
+ >>> ["foo", 1.0000005] == approx([None,1])
647
+ False
648
+
649
+ If you're thinking about using ``approx``, then you might want to know how
650
+ it compares to other good ways of comparing floating-point numbers. All of
651
+ these algorithms are based on relative and absolute tolerances and should
652
+ agree for the most part, but they do have meaningful differences:
653
+
654
+ - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
655
+ tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
656
+ tolerance is met. Because the relative tolerance is calculated w.r.t.
657
+ both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
658
+ ``b`` is a "reference value"). You have to specify an absolute tolerance
659
+ if you want to compare to ``0.0`` because there is no tolerance by
660
+ default. More information: :py:func:`math.isclose`.
661
+
662
+ - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
663
+ between ``a`` and ``b`` is less that the sum of the relative tolerance
664
+ w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
665
+ is only calculated w.r.t. ``b``, this test is asymmetric and you can
666
+ think of ``b`` as the reference value. Support for comparing sequences
667
+ is provided by :py:func:`numpy.allclose`. More information:
668
+ :std:doc:`numpy:reference/generated/numpy.isclose`.
669
+
670
+ - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
671
+ are within an absolute tolerance of ``1e-7``. No relative tolerance is
672
+ considered , so this function is not appropriate for very large or very
673
+ small numbers. Also, it's only available in subclasses of ``unittest.TestCase``
674
+ and it's ugly because it doesn't follow PEP8. More information:
675
+ :py:meth:`unittest.TestCase.assertAlmostEqual`.
676
+
677
+ - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
678
+ tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
679
+ Because the relative tolerance is only calculated w.r.t. ``b``, this test
680
+ is asymmetric and you can think of ``b`` as the reference value. In the
681
+ special case that you explicitly specify an absolute tolerance but not a
682
+ relative tolerance, only the absolute tolerance is considered.
683
+
684
+ .. note::
685
+
686
+ ``approx`` can handle numpy arrays, but we recommend the
687
+ specialised test helpers in :std:doc:`numpy:reference/routines.testing`
688
+ if you need support for comparisons, NaNs, or ULP-based tolerances.
689
+
690
+ To match strings using regex, you can use
691
+ `Matches <https://github.com/asottile/re-assert#re_assertmatchespattern-str-args-kwargs>`_
692
+ from the
693
+ `re_assert package <https://github.com/asottile/re-assert>`_.
694
+
695
+
696
+ .. note::
697
+
698
+ Unlike built-in equality, this function considers
699
+ booleans unequal to numeric zero or one. For example::
700
+
701
+ >>> 1 == approx(True)
702
+ False
703
+
704
+ .. warning::
705
+
706
+ .. versionchanged:: 3.2
707
+
708
+ In order to avoid inconsistent behavior, :py:exc:`TypeError` is
709
+ raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
710
+ The example below illustrates the problem::
711
+
712
+ assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
713
+ assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
714
+
715
+ In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
716
+ to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
717
+ comparison. This is because the call hierarchy of rich comparisons
718
+ follows a fixed behavior. More information: :py:meth:`object.__ge__`
719
+
720
+ .. versionchanged:: 3.7.1
721
+ ``approx`` raises ``TypeError`` when it encounters a dict value or
722
+ sequence element of non-numeric type.
723
+
724
+ .. versionchanged:: 6.1.0
725
+ ``approx`` falls back to strict equality for non-numeric types instead
726
+ of raising ``TypeError``.
727
+ """
728
+ # Delegate the comparison to a class that knows how to deal with the type
729
+ # of the expected value (e.g. int, float, list, dict, numpy.array, etc).
730
+ #
731
+ # The primary responsibility of these classes is to implement ``__eq__()``
732
+ # and ``__repr__()``. The former is used to actually check if some
733
+ # "actual" value is equivalent to the given expected value within the
734
+ # allowed tolerance. The latter is used to show the user the expected
735
+ # value and tolerance, in the case that a test failed.
736
+ #
737
+ # The actual logic for making approximate comparisons can be found in
738
+ # ApproxScalar, which is used to compare individual numbers. All of the
739
+ # other Approx classes eventually delegate to this class. The ApproxBase
740
+ # class provides some convenient methods and overloads, but isn't really
741
+ # essential.
742
+
743
+ __tracebackhide__ = True
744
+
745
+ if isinstance(expected, Decimal):
746
+ cls: type[ApproxBase] = ApproxDecimal
747
+ elif isinstance(expected, Mapping):
748
+ cls = ApproxMapping
749
+ elif _is_numpy_array(expected):
750
+ expected = _as_numpy_array(expected)
751
+ cls = ApproxNumpy
752
+ elif _is_sequence_like(expected):
753
+ cls = ApproxSequenceLike
754
+ elif isinstance(expected, Collection) and not isinstance(expected, (str, bytes)):
755
+ msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}"
756
+ raise TypeError(msg)
757
+ else:
758
+ cls = ApproxScalar
759
+
760
+ return cls(expected, rel, abs, nan_ok)
761
+
762
+
763
+ def _is_sequence_like(expected: object) -> bool:
764
+ return (
765
+ hasattr(expected, "__getitem__")
766
+ and isinstance(expected, Sized)
767
+ and not isinstance(expected, (str, bytes))
768
+ )
769
+
770
+
771
+ def _is_numpy_array(obj: object) -> bool:
772
+ """
773
+ Return true if the given object is implicitly convertible to ndarray,
774
+ and numpy is already imported.
775
+ """
776
+ return _as_numpy_array(obj) is not None
777
+
778
+
779
+ def _as_numpy_array(obj: object) -> ndarray | None:
780
+ """
781
+ Return an ndarray if the given object is implicitly convertible to ndarray,
782
+ and numpy is already imported, otherwise None.
783
+ """
784
+ np: Any = sys.modules.get("numpy")
785
+ if np is not None:
786
+ # avoid infinite recursion on numpy scalars, which have __array__
787
+ if np.isscalar(obj):
788
+ return None
789
+ elif isinstance(obj, np.ndarray):
790
+ return obj
791
+ elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"):
792
+ return np.asarray(obj)
793
+ return None
archive/Axiovorax/.venv/Lib/site-packages/_pytest/raises.py ADDED
@@ -0,0 +1,1519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import ABC
4
+ from abc import abstractmethod
5
+ import re
6
+ from re import Pattern
7
+ import sys
8
+ from textwrap import indent
9
+ from typing import Any
10
+ from typing import cast
11
+ from typing import final
12
+ from typing import Generic
13
+ from typing import get_args
14
+ from typing import get_origin
15
+ from typing import Literal
16
+ from typing import overload
17
+ from typing import TYPE_CHECKING
18
+ import warnings
19
+
20
+ from _pytest._code import ExceptionInfo
21
+ from _pytest._code.code import stringify_exception
22
+ from _pytest.outcomes import fail
23
+ from _pytest.warning_types import PytestWarning
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from collections.abc import Callable
28
+ from collections.abc import Sequence
29
+
30
+ # for some reason Sphinx does not play well with 'from types import TracebackType'
31
+ import types
32
+
33
+ from typing_extensions import ParamSpec
34
+ from typing_extensions import TypeGuard
35
+ from typing_extensions import TypeVar
36
+
37
+ P = ParamSpec("P")
38
+
39
+ # this conditional definition is because we want to allow a TypeVar default
40
+ BaseExcT_co_default = TypeVar(
41
+ "BaseExcT_co_default",
42
+ bound=BaseException,
43
+ default=BaseException,
44
+ covariant=True,
45
+ )
46
+
47
+ # Use short name because it shows up in docs.
48
+ E = TypeVar("E", bound=BaseException, default=BaseException)
49
+ else:
50
+ from typing import TypeVar
51
+
52
+ BaseExcT_co_default = TypeVar(
53
+ "BaseExcT_co_default", bound=BaseException, covariant=True
54
+ )
55
+
56
+ # RaisesGroup doesn't work with a default.
57
+ BaseExcT_co = TypeVar("BaseExcT_co", bound=BaseException, covariant=True)
58
+ BaseExcT_1 = TypeVar("BaseExcT_1", bound=BaseException)
59
+ BaseExcT_2 = TypeVar("BaseExcT_2", bound=BaseException)
60
+ ExcT_1 = TypeVar("ExcT_1", bound=Exception)
61
+ ExcT_2 = TypeVar("ExcT_2", bound=Exception)
62
+
63
+ if sys.version_info < (3, 11):
64
+ from exceptiongroup import BaseExceptionGroup
65
+ from exceptiongroup import ExceptionGroup
66
+
67
+
68
+ # String patterns default to including the unicode flag.
69
+ _REGEX_NO_FLAGS = re.compile(r"").flags
70
+
71
+
72
+ # pytest.raises helper
73
+ @overload
74
+ def raises(
75
+ expected_exception: type[E] | tuple[type[E], ...],
76
+ *,
77
+ match: str | re.Pattern[str] | None = ...,
78
+ check: Callable[[E], bool] = ...,
79
+ ) -> RaisesExc[E]: ...
80
+
81
+
82
+ @overload
83
+ def raises(
84
+ *,
85
+ match: str | re.Pattern[str],
86
+ # If exception_type is not provided, check() must do any typechecks itself.
87
+ check: Callable[[BaseException], bool] = ...,
88
+ ) -> RaisesExc[BaseException]: ...
89
+
90
+
91
+ @overload
92
+ def raises(*, check: Callable[[BaseException], bool]) -> RaisesExc[BaseException]: ...
93
+
94
+
95
+ @overload
96
+ def raises(
97
+ expected_exception: type[E] | tuple[type[E], ...],
98
+ func: Callable[..., Any],
99
+ *args: Any,
100
+ **kwargs: Any,
101
+ ) -> ExceptionInfo[E]: ...
102
+
103
+
104
+ def raises(
105
+ expected_exception: type[E] | tuple[type[E], ...] | None = None,
106
+ *args: Any,
107
+ **kwargs: Any,
108
+ ) -> RaisesExc[BaseException] | ExceptionInfo[E]:
109
+ r"""Assert that a code block/function call raises an exception type, or one of its subclasses.
110
+
111
+ :param expected_exception:
112
+ The expected exception type, or a tuple if one of multiple possible
113
+ exception types are expected. Note that subclasses of the passed exceptions
114
+ will also match.
115
+
116
+ This is not a required parameter, you may opt to only use ``match`` and/or
117
+ ``check`` for verifying the raised exception.
118
+
119
+ :kwparam str | re.Pattern[str] | None match:
120
+ If specified, a string containing a regular expression,
121
+ or a regular expression object, that is tested against the string
122
+ representation of the exception and its :pep:`678` `__notes__`
123
+ using :func:`re.search`.
124
+
125
+ To match a literal string that may contain :ref:`special characters
126
+ <re-syntax>`, the pattern can first be escaped with :func:`re.escape`.
127
+
128
+ (This is only used when ``pytest.raises`` is used as a context manager,
129
+ and passed through to the function otherwise.
130
+ When using ``pytest.raises`` as a function, you can use:
131
+ ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
132
+
133
+ :kwparam Callable[[BaseException], bool] check:
134
+
135
+ .. versionadded:: 8.4
136
+
137
+ If specified, a callable that will be called with the exception as a parameter
138
+ after checking the type and the match regex if specified.
139
+ If it returns ``True`` it will be considered a match, if not it will
140
+ be considered a failed match.
141
+
142
+
143
+ Use ``pytest.raises`` as a context manager, which will capture the exception of the given
144
+ type, or any of its subclasses::
145
+
146
+ >>> import pytest
147
+ >>> with pytest.raises(ZeroDivisionError):
148
+ ... 1/0
149
+
150
+ If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example
151
+ above), or no exception at all, the check will fail instead.
152
+
153
+ You can also use the keyword argument ``match`` to assert that the
154
+ exception matches a text or regex::
155
+
156
+ >>> with pytest.raises(ValueError, match='must be 0 or None'):
157
+ ... raise ValueError("value must be 0 or None")
158
+
159
+ >>> with pytest.raises(ValueError, match=r'must be \d+$'):
160
+ ... raise ValueError("value must be 42")
161
+
162
+ The ``match`` argument searches the formatted exception string, which includes any
163
+ `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``:
164
+
165
+ >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP
166
+ ... e = ValueError("value must be 42")
167
+ ... e.add_note("had a note added")
168
+ ... raise e
169
+
170
+ The ``check`` argument, if provided, must return True when passed the raised exception
171
+ for the match to be successful, otherwise an :exc:`AssertionError` is raised.
172
+
173
+ >>> import errno
174
+ >>> with pytest.raises(OSError, check=lambda e: e.errno == errno.EACCES):
175
+ ... raise OSError(errno.EACCES, "no permission to view")
176
+
177
+ The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
178
+ details of the captured exception::
179
+
180
+ >>> with pytest.raises(ValueError) as exc_info:
181
+ ... raise ValueError("value must be 42")
182
+ >>> assert exc_info.type is ValueError
183
+ >>> assert exc_info.value.args[0] == "value must be 42"
184
+
185
+ .. warning::
186
+
187
+ Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this::
188
+
189
+ # Careful, this will catch ANY exception raised.
190
+ with pytest.raises(Exception):
191
+ some_function()
192
+
193
+ Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide
194
+ real bugs, where the user wrote this expecting a specific exception, but some other exception is being
195
+ raised due to a bug introduced during a refactoring.
196
+
197
+ Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch
198
+ **any** exception raised.
199
+
200
+ .. note::
201
+
202
+ When using ``pytest.raises`` as a context manager, it's worthwhile to
203
+ note that normal context manager rules apply and that the exception
204
+ raised *must* be the final line in the scope of the context manager.
205
+ Lines of code after that, within the scope of the context manager will
206
+ not be executed. For example::
207
+
208
+ >>> value = 15
209
+ >>> with pytest.raises(ValueError) as exc_info:
210
+ ... if value > 10:
211
+ ... raise ValueError("value must be <= 10")
212
+ ... assert exc_info.type is ValueError # This will not execute.
213
+
214
+ Instead, the following approach must be taken (note the difference in
215
+ scope)::
216
+
217
+ >>> with pytest.raises(ValueError) as exc_info:
218
+ ... if value > 10:
219
+ ... raise ValueError("value must be <= 10")
220
+ ...
221
+ >>> assert exc_info.type is ValueError
222
+
223
+ **Expecting exception groups**
224
+
225
+ When expecting exceptions wrapped in :exc:`BaseExceptionGroup` or
226
+ :exc:`ExceptionGroup`, you should instead use :class:`pytest.RaisesGroup`.
227
+
228
+ **Using with** ``pytest.mark.parametrize``
229
+
230
+ When using :ref:`pytest.mark.parametrize ref`
231
+ it is possible to parametrize tests such that
232
+ some runs raise an exception and others do not.
233
+
234
+ See :ref:`parametrizing_conditional_raising` for an example.
235
+
236
+ .. seealso::
237
+
238
+ :ref:`assertraises` for more examples and detailed discussion.
239
+
240
+ **Legacy form**
241
+
242
+ It is possible to specify a callable by passing a to-be-called lambda::
243
+
244
+ >>> raises(ZeroDivisionError, lambda: 1/0)
245
+ <ExceptionInfo ...>
246
+
247
+ or you can specify an arbitrary callable with arguments::
248
+
249
+ >>> def f(x): return 1/x
250
+ ...
251
+ >>> raises(ZeroDivisionError, f, 0)
252
+ <ExceptionInfo ...>
253
+ >>> raises(ZeroDivisionError, f, x=0)
254
+ <ExceptionInfo ...>
255
+
256
+ The form above is fully supported but discouraged for new code because the
257
+ context manager form is regarded as more readable and less error-prone.
258
+
259
+ .. note::
260
+ Similar to caught exception objects in Python, explicitly clearing
261
+ local references to returned ``ExceptionInfo`` objects can
262
+ help the Python interpreter speed up its garbage collection.
263
+
264
+ Clearing those references breaks a reference cycle
265
+ (``ExceptionInfo`` --> caught exception --> frame stack raising
266
+ the exception --> current frame stack --> local variables -->
267
+ ``ExceptionInfo``) which makes Python keep all objects referenced
268
+ from that cycle (including all local variables in the current
269
+ frame) alive until the next cyclic garbage collection run.
270
+ More detailed information can be found in the official Python
271
+ documentation for :ref:`the try statement <python:try>`.
272
+ """
273
+ __tracebackhide__ = True
274
+
275
+ if not args:
276
+ if set(kwargs) - {"match", "check", "expected_exception"}:
277
+ msg = "Unexpected keyword arguments passed to pytest.raises: "
278
+ msg += ", ".join(sorted(kwargs))
279
+ msg += "\nUse context-manager form instead?"
280
+ raise TypeError(msg)
281
+
282
+ if expected_exception is None:
283
+ return RaisesExc(**kwargs)
284
+ return RaisesExc(expected_exception, **kwargs)
285
+
286
+ if not expected_exception:
287
+ raise ValueError(
288
+ f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. "
289
+ f"Raising exceptions is already understood as failing the test, so you don't need "
290
+ f"any special code to say 'this should never raise an exception'."
291
+ )
292
+ func = args[0]
293
+ if not callable(func):
294
+ raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
295
+ with RaisesExc(expected_exception) as excinfo:
296
+ func(*args[1:], **kwargs)
297
+ try:
298
+ return excinfo
299
+ finally:
300
+ del excinfo
301
+
302
+
303
+ # note: RaisesExc/RaisesGroup uses fail() internally, so this alias
304
+ # indicates (to [internal] plugins?) that `pytest.raises` will
305
+ # raise `_pytest.outcomes.Failed`, where
306
+ # `outcomes.Failed is outcomes.fail.Exception is raises.Exception`
307
+ # note: this is *not* the same as `_pytest.main.Failed`
308
+ # note: mypy does not recognize this attribute, and it's not possible
309
+ # to use a protocol/decorator like the others in outcomes due to
310
+ # https://github.com/python/mypy/issues/18715
311
+ raises.Exception = fail.Exception # type: ignore[attr-defined]
312
+
313
+
314
+ def _match_pattern(match: Pattern[str]) -> str | Pattern[str]:
315
+ """Helper function to remove redundant `re.compile` calls when printing regex"""
316
+ return match.pattern if match.flags == _REGEX_NO_FLAGS else match
317
+
318
+
319
+ def repr_callable(fun: Callable[[BaseExcT_1], bool]) -> str:
320
+ """Get the repr of a ``check`` parameter.
321
+
322
+ Split out so it can be monkeypatched (e.g. by hypothesis)
323
+ """
324
+ return repr(fun)
325
+
326
+
327
+ def backquote(s: str) -> str:
328
+ return "`" + s + "`"
329
+
330
+
331
+ def _exception_type_name(
332
+ e: type[BaseException] | tuple[type[BaseException], ...],
333
+ ) -> str:
334
+ if isinstance(e, type):
335
+ return e.__name__
336
+ if len(e) == 1:
337
+ return e[0].__name__
338
+ return "(" + ", ".join(ee.__name__ for ee in e) + ")"
339
+
340
+
341
+ def _check_raw_type(
342
+ expected_type: type[BaseException] | tuple[type[BaseException], ...] | None,
343
+ exception: BaseException,
344
+ ) -> str | None:
345
+ if expected_type is None or expected_type == ():
346
+ return None
347
+
348
+ if not isinstance(
349
+ exception,
350
+ expected_type,
351
+ ):
352
+ actual_type_str = backquote(_exception_type_name(type(exception)) + "()")
353
+ expected_type_str = backquote(_exception_type_name(expected_type))
354
+ if (
355
+ isinstance(exception, BaseExceptionGroup)
356
+ and isinstance(expected_type, type)
357
+ and not issubclass(expected_type, BaseExceptionGroup)
358
+ ):
359
+ return f"Unexpected nested {actual_type_str}, expected {expected_type_str}"
360
+ return f"{actual_type_str} is not an instance of {expected_type_str}"
361
+ return None
362
+
363
+
364
+ def is_fully_escaped(s: str) -> bool:
365
+ # we know we won't compile with re.VERBOSE, so whitespace doesn't need to be escaped
366
+ metacharacters = "{}()+.*?^$[]"
367
+ return not any(
368
+ c in metacharacters and (i == 0 or s[i - 1] != "\\") for (i, c) in enumerate(s)
369
+ )
370
+
371
+
372
+ def unescape(s: str) -> str:
373
+ return re.sub(r"\\([{}()+-.*?^$\[\]\s\\])", r"\1", s)
374
+
375
+
376
+ # These classes conceptually differ from ExceptionInfo in that ExceptionInfo is tied, and
377
+ # constructed from, a particular exception - whereas these are constructed with expected
378
+ # exceptions, and later allow matching towards particular exceptions.
379
+ # But there's overlap in `ExceptionInfo.match` and `AbstractRaises._check_match`, as with
380
+ # `AbstractRaises.matches` and `ExceptionInfo.errisinstance`+`ExceptionInfo.group_contains`.
381
+ # The interaction between these classes should perhaps be improved.
382
+ class AbstractRaises(ABC, Generic[BaseExcT_co]):
383
+ """ABC with common functionality shared between RaisesExc and RaisesGroup"""
384
+
385
+ def __init__(
386
+ self,
387
+ *,
388
+ match: str | Pattern[str] | None,
389
+ check: Callable[[BaseExcT_co], bool] | None,
390
+ ) -> None:
391
+ if isinstance(match, str):
392
+ # juggle error in order to avoid context to fail (necessary?)
393
+ re_error = None
394
+ try:
395
+ self.match: Pattern[str] | None = re.compile(match)
396
+ except re.error as e:
397
+ re_error = e
398
+ if re_error is not None:
399
+ fail(f"Invalid regex pattern provided to 'match': {re_error}")
400
+ if match == "":
401
+ warnings.warn(
402
+ PytestWarning(
403
+ "matching against an empty string will *always* pass. If you want "
404
+ "to check for an empty message you need to pass '^$'. If you don't "
405
+ "want to match you should pass `None` or leave out the parameter."
406
+ ),
407
+ stacklevel=2,
408
+ )
409
+ else:
410
+ self.match = match
411
+
412
+ # check if this is a fully escaped regex and has ^$ to match fully
413
+ # in which case we can do a proper diff on error
414
+ self.rawmatch: str | None = None
415
+ if isinstance(match, str) or (
416
+ isinstance(match, Pattern) and match.flags == _REGEX_NO_FLAGS
417
+ ):
418
+ if isinstance(match, Pattern):
419
+ match = match.pattern
420
+ if (
421
+ match
422
+ and match[0] == "^"
423
+ and match[-1] == "$"
424
+ and is_fully_escaped(match[1:-1])
425
+ ):
426
+ self.rawmatch = unescape(match[1:-1])
427
+
428
+ self.check = check
429
+ self._fail_reason: str | None = None
430
+
431
+ # used to suppress repeated printing of `repr(self.check)`
432
+ self._nested: bool = False
433
+
434
+ # set in self._parse_exc
435
+ self.is_baseexception = False
436
+
437
+ def _parse_exc(
438
+ self, exc: type[BaseExcT_1] | types.GenericAlias, expected: str
439
+ ) -> type[BaseExcT_1]:
440
+ if isinstance(exc, type) and issubclass(exc, BaseException):
441
+ if not issubclass(exc, Exception):
442
+ self.is_baseexception = True
443
+ return exc
444
+ # because RaisesGroup does not support variable number of exceptions there's
445
+ # still a use for RaisesExc(ExceptionGroup[Exception]).
446
+ origin_exc: type[BaseException] | None = get_origin(exc)
447
+ if origin_exc and issubclass(origin_exc, BaseExceptionGroup):
448
+ exc_type = get_args(exc)[0]
449
+ if (
450
+ issubclass(origin_exc, ExceptionGroup) and exc_type in (Exception, Any)
451
+ ) or (
452
+ issubclass(origin_exc, BaseExceptionGroup)
453
+ and exc_type in (BaseException, Any)
454
+ ):
455
+ if not isinstance(exc, Exception):
456
+ self.is_baseexception = True
457
+ return cast(type[BaseExcT_1], origin_exc)
458
+ else:
459
+ raise ValueError(
460
+ f"Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseExeption]` "
461
+ f"are accepted as generic types but got `{exc}`. "
462
+ f"As `raises` will catch all instances of the specified group regardless of the "
463
+ f"generic argument specific nested exceptions has to be checked "
464
+ f"with `RaisesGroup`."
465
+ )
466
+ # unclear if the Type/ValueError distinction is even helpful here
467
+ msg = f"expected exception must be {expected}, not "
468
+ if isinstance(exc, type):
469
+ raise ValueError(msg + f"{exc.__name__!r}")
470
+ if isinstance(exc, BaseException):
471
+ raise TypeError(msg + f"an exception instance ({type(exc).__name__})")
472
+ raise TypeError(msg + repr(type(exc).__name__))
473
+
474
+ @property
475
+ def fail_reason(self) -> str | None:
476
+ """Set after a call to :meth:`matches` to give a human-readable reason for why the match failed.
477
+ When used as a context manager the string will be printed as the reason for the
478
+ test failing."""
479
+ return self._fail_reason
480
+
481
+ def _check_check(
482
+ self: AbstractRaises[BaseExcT_1],
483
+ exception: BaseExcT_1,
484
+ ) -> bool:
485
+ if self.check is None:
486
+ return True
487
+
488
+ if self.check(exception):
489
+ return True
490
+
491
+ check_repr = "" if self._nested else " " + repr_callable(self.check)
492
+ self._fail_reason = f"check{check_repr} did not return True"
493
+ return False
494
+
495
+ # TODO: harmonize with ExceptionInfo.match
496
+ def _check_match(self, e: BaseException) -> bool:
497
+ if self.match is None or re.search(
498
+ self.match,
499
+ stringified_exception := stringify_exception(
500
+ e, include_subexception_msg=False
501
+ ),
502
+ ):
503
+ return True
504
+
505
+ # if we're matching a group, make sure we're explicit to reduce confusion
506
+ # if they're trying to match an exception contained within the group
507
+ maybe_specify_type = (
508
+ f" the `{_exception_type_name(type(e))}()`"
509
+ if isinstance(e, BaseExceptionGroup)
510
+ else ""
511
+ )
512
+ if isinstance(self.rawmatch, str):
513
+ # TODO: it instructs to use `-v` to print leading text, but that doesn't work
514
+ # I also don't know if this is the proper entry point, or tool to use at all
515
+ from _pytest.assertion.util import _diff_text
516
+ from _pytest.assertion.util import dummy_highlighter
517
+
518
+ diff = _diff_text(self.rawmatch, stringified_exception, dummy_highlighter)
519
+ self._fail_reason = ("\n" if diff[0][0] == "-" else "") + "\n".join(diff)
520
+ return False
521
+
522
+ # I don't love "Regex"+"Input" vs something like "expected regex"+"exception message"
523
+ # when they're similar it's not always obvious which is which
524
+ self._fail_reason = (
525
+ f"Regex pattern did not match{maybe_specify_type}.\n"
526
+ f" Regex: {_match_pattern(self.match)!r}\n"
527
+ f" Input: {stringified_exception!r}"
528
+ )
529
+ if _match_pattern(self.match) == stringified_exception:
530
+ self._fail_reason += "\n Did you mean to `re.escape()` the regex?"
531
+ return False
532
+
533
+ @abstractmethod
534
+ def matches(
535
+ self: AbstractRaises[BaseExcT_1], exception: BaseException
536
+ ) -> TypeGuard[BaseExcT_1]:
537
+ """Check if an exception matches the requirements of this AbstractRaises.
538
+ If it fails, :meth:`AbstractRaises.fail_reason` should be set.
539
+ """
540
+
541
+
542
+ @final
543
+ class RaisesExc(AbstractRaises[BaseExcT_co_default]):
544
+ """
545
+ .. versionadded:: 8.4
546
+
547
+
548
+ This is the class constructed when calling :func:`pytest.raises`, but may be used
549
+ directly as a helper class with :class:`RaisesGroup` when you want to specify
550
+ requirements on sub-exceptions.
551
+
552
+ You don't need this if you only want to specify the type, since :class:`RaisesGroup`
553
+ accepts ``type[BaseException]``.
554
+
555
+ :param type[BaseException] | tuple[type[BaseException]] | None expected_exception:
556
+ The expected type, or one of several possible types.
557
+ May be ``None`` in order to only make use of ``match`` and/or ``check``
558
+
559
+ The type is checked with :func:`isinstance`, and does not need to be an exact match.
560
+ If that is wanted you can use the ``check`` parameter.
561
+
562
+ :kwparam str | Pattern[str] match
563
+ A regex to match.
564
+
565
+ :kwparam Callable[[BaseException], bool] check:
566
+ If specified, a callable that will be called with the exception as a parameter
567
+ after checking the type and the match regex if specified.
568
+ If it returns ``True`` it will be considered a match, if not it will
569
+ be considered a failed match.
570
+
571
+ :meth:`RaisesExc.matches` can also be used standalone to check individual exceptions.
572
+
573
+ Examples::
574
+
575
+ with RaisesGroup(RaisesExc(ValueError, match="string"))
576
+ ...
577
+ with RaisesGroup(RaisesExc(check=lambda x: x.args == (3, "hello"))):
578
+ ...
579
+ with RaisesGroup(RaisesExc(check=lambda x: type(x) is ValueError)):
580
+ ...
581
+ """
582
+
583
+ # Trio bundled hypothesis monkeypatching, we will probably instead assume that
584
+ # hypothesis will handle that in their pytest plugin by the time this is released.
585
+ # Alternatively we could add a version of get_pretty_function_description ourselves
586
+ # https://github.com/HypothesisWorks/hypothesis/blob/8ced2f59f5c7bea3344e35d2d53e1f8f8eb9fcd8/hypothesis-python/src/hypothesis/internal/reflection.py#L439
587
+
588
+ # At least one of the three parameters must be passed.
589
+ @overload
590
+ def __init__(
591
+ self,
592
+ expected_exception: (
593
+ type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...]
594
+ ),
595
+ /,
596
+ *,
597
+ match: str | Pattern[str] | None = ...,
598
+ check: Callable[[BaseExcT_co_default], bool] | None = ...,
599
+ ) -> None: ...
600
+
601
+ @overload
602
+ def __init__(
603
+ self: RaisesExc[BaseException], # Give E a value.
604
+ /,
605
+ *,
606
+ match: str | Pattern[str] | None,
607
+ # If exception_type is not provided, check() must do any typechecks itself.
608
+ check: Callable[[BaseException], bool] | None = ...,
609
+ ) -> None: ...
610
+
611
+ @overload
612
+ def __init__(self, /, *, check: Callable[[BaseException], bool]) -> None: ...
613
+
614
+ def __init__(
615
+ self,
616
+ expected_exception: (
617
+ type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] | None
618
+ ) = None,
619
+ /,
620
+ *,
621
+ match: str | Pattern[str] | None = None,
622
+ check: Callable[[BaseExcT_co_default], bool] | None = None,
623
+ ):
624
+ super().__init__(match=match, check=check)
625
+ if isinstance(expected_exception, tuple):
626
+ expected_exceptions = expected_exception
627
+ elif expected_exception is None:
628
+ expected_exceptions = ()
629
+ else:
630
+ expected_exceptions = (expected_exception,)
631
+
632
+ if (expected_exceptions == ()) and match is None and check is None:
633
+ raise ValueError("You must specify at least one parameter to match on.")
634
+
635
+ self.expected_exceptions = tuple(
636
+ self._parse_exc(e, expected="a BaseException type")
637
+ for e in expected_exceptions
638
+ )
639
+
640
+ self._just_propagate = False
641
+
642
+ def matches(
643
+ self,
644
+ exception: BaseException | None,
645
+ ) -> TypeGuard[BaseExcT_co_default]:
646
+ """Check if an exception matches the requirements of this :class:`RaisesExc`.
647
+ If it fails, :attr:`RaisesExc.fail_reason` will be set.
648
+
649
+ Examples::
650
+
651
+ assert RaisesExc(ValueError).matches(my_exception):
652
+ # is equivalent to
653
+ assert isinstance(my_exception, ValueError)
654
+
655
+ # this can be useful when checking e.g. the ``__cause__`` of an exception.
656
+ with pytest.raises(ValueError) as excinfo:
657
+ ...
658
+ assert RaisesExc(SyntaxError, match="foo").matches(excinfo.value.__cause__)
659
+ # above line is equivalent to
660
+ assert isinstance(excinfo.value.__cause__, SyntaxError)
661
+ assert re.search("foo", str(excinfo.value.__cause__)
662
+
663
+ """
664
+ self._just_propagate = False
665
+ if exception is None:
666
+ self._fail_reason = "exception is None"
667
+ return False
668
+ if not self._check_type(exception):
669
+ self._just_propagate = True
670
+ return False
671
+
672
+ if not self._check_match(exception):
673
+ return False
674
+
675
+ return self._check_check(exception)
676
+
677
+ def __repr__(self) -> str:
678
+ parameters = []
679
+ if self.expected_exceptions:
680
+ parameters.append(_exception_type_name(self.expected_exceptions))
681
+ if self.match is not None:
682
+ # If no flags were specified, discard the redundant re.compile() here.
683
+ parameters.append(
684
+ f"match={_match_pattern(self.match)!r}",
685
+ )
686
+ if self.check is not None:
687
+ parameters.append(f"check={repr_callable(self.check)}")
688
+ return f"RaisesExc({', '.join(parameters)})"
689
+
690
+ def _check_type(self, exception: BaseException) -> TypeGuard[BaseExcT_co_default]:
691
+ self._fail_reason = _check_raw_type(self.expected_exceptions, exception)
692
+ return self._fail_reason is None
693
+
694
+ def __enter__(self) -> ExceptionInfo[BaseExcT_co_default]:
695
+ self.excinfo: ExceptionInfo[BaseExcT_co_default] = ExceptionInfo.for_later()
696
+ return self.excinfo
697
+
698
+ # TODO: move common code into superclass
699
+ def __exit__(
700
+ self,
701
+ exc_type: type[BaseException] | None,
702
+ exc_val: BaseException | None,
703
+ exc_tb: types.TracebackType | None,
704
+ ) -> bool:
705
+ __tracebackhide__ = True
706
+ if exc_type is None:
707
+ if not self.expected_exceptions:
708
+ fail("DID NOT RAISE any exception")
709
+ if len(self.expected_exceptions) > 1:
710
+ fail(f"DID NOT RAISE any of {self.expected_exceptions!r}")
711
+
712
+ fail(f"DID NOT RAISE {self.expected_exceptions[0]!r}")
713
+
714
+ assert self.excinfo is not None, (
715
+ "Internal error - should have been constructed in __enter__"
716
+ )
717
+
718
+ if not self.matches(exc_val):
719
+ if self._just_propagate:
720
+ return False
721
+ raise AssertionError(self._fail_reason)
722
+
723
+ # Cast to narrow the exception type now that it's verified....
724
+ # even though the TypeGuard in self.matches should be narrowing
725
+ exc_info = cast(
726
+ "tuple[type[BaseExcT_co_default], BaseExcT_co_default, types.TracebackType]",
727
+ (exc_type, exc_val, exc_tb),
728
+ )
729
+ self.excinfo.fill_unfilled(exc_info)
730
+ return True
731
+
732
+
733
+ @final
734
+ class RaisesGroup(AbstractRaises[BaseExceptionGroup[BaseExcT_co]]):
735
+ """
736
+ .. versionadded:: 8.4
737
+
738
+ Contextmanager for checking for an expected :exc:`ExceptionGroup`.
739
+ This works similar to :func:`pytest.raises`, but allows for specifying the structure of an :exc:`ExceptionGroup`.
740
+ :meth:`ExceptionInfo.group_contains` also tries to handle exception groups,
741
+ but it is very bad at checking that you *didn't* get unexpected exceptions.
742
+
743
+ The catching behaviour differs from :ref:`except* <except_star>`, being much
744
+ stricter about the structure by default.
745
+ By using ``allow_unwrapped=True`` and ``flatten_subgroups=True`` you can match
746
+ :ref:`except* <except_star>` fully when expecting a single exception.
747
+
748
+ :param args:
749
+ Any number of exception types, :class:`RaisesGroup` or :class:`RaisesExc`
750
+ to specify the exceptions contained in this exception.
751
+ All specified exceptions must be present in the raised group, *and no others*.
752
+
753
+ If you expect a variable number of exceptions you need to use
754
+ :func:`pytest.raises(ExceptionGroup) <pytest.raises>` and manually check
755
+ the contained exceptions. Consider making use of :meth:`RaisesExc.matches`.
756
+
757
+ It does not care about the order of the exceptions, so
758
+ ``RaisesGroup(ValueError, TypeError)``
759
+ is equivalent to
760
+ ``RaisesGroup(TypeError, ValueError)``.
761
+ :kwparam str | re.Pattern[str] | None match:
762
+ If specified, a string containing a regular expression,
763
+ or a regular expression object, that is tested against the string
764
+ representation of the exception group and its :pep:`678` `__notes__`
765
+ using :func:`re.search`.
766
+
767
+ To match a literal string that may contain :ref:`special characters
768
+ <re-syntax>`, the pattern can first be escaped with :func:`re.escape`.
769
+
770
+ Note that " (5 subgroups)" will be stripped from the ``repr`` before matching.
771
+ :kwparam Callable[[E], bool] check:
772
+ If specified, a callable that will be called with the group as a parameter
773
+ after successfully matching the expected exceptions. If it returns ``True``
774
+ it will be considered a match, if not it will be considered a failed match.
775
+ :kwparam bool allow_unwrapped:
776
+ If expecting a single exception or :class:`RaisesExc` it will match even
777
+ if the exception is not inside an exceptiongroup.
778
+
779
+ Using this together with ``match``, ``check`` or expecting multiple exceptions
780
+ will raise an error.
781
+ :kwparam bool flatten_subgroups:
782
+ "flatten" any groups inside the raised exception group, extracting all exceptions
783
+ inside any nested groups, before matching. Without this it expects you to
784
+ fully specify the nesting structure by passing :class:`RaisesGroup` as expected
785
+ parameter.
786
+
787
+ Examples::
788
+
789
+ with RaisesGroup(ValueError):
790
+ raise ExceptionGroup("", (ValueError(),))
791
+ # match
792
+ with RaisesGroup(
793
+ ValueError,
794
+ ValueError,
795
+ RaisesExc(TypeError, match="^expected int$"),
796
+ match="^my group$",
797
+ ):
798
+ raise ExceptionGroup(
799
+ "my group",
800
+ [
801
+ ValueError(),
802
+ TypeError("expected int"),
803
+ ValueError(),
804
+ ],
805
+ )
806
+ # check
807
+ with RaisesGroup(
808
+ KeyboardInterrupt,
809
+ match="^hello$",
810
+ check=lambda x: isinstance(x.__cause__, ValueError),
811
+ ):
812
+ raise BaseExceptionGroup("hello", [KeyboardInterrupt()]) from ValueError
813
+ # nested groups
814
+ with RaisesGroup(RaisesGroup(ValueError)):
815
+ raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),))
816
+
817
+ # flatten_subgroups
818
+ with RaisesGroup(ValueError, flatten_subgroups=True):
819
+ raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),))
820
+
821
+ # allow_unwrapped
822
+ with RaisesGroup(ValueError, allow_unwrapped=True):
823
+ raise ValueError
824
+
825
+
826
+ :meth:`RaisesGroup.matches` can also be used directly to check a standalone exception group.
827
+
828
+
829
+ The matching algorithm is greedy, which means cases such as this may fail::
830
+
831
+ with RaisesGroup(ValueError, RaisesExc(ValueError, match="hello")):
832
+ raise ExceptionGroup("", (ValueError("hello"), ValueError("goodbye")))
833
+
834
+ even though it generally does not care about the order of the exceptions in the group.
835
+ To avoid the above you should specify the first :exc:`ValueError` with a :class:`RaisesExc` as well.
836
+
837
+ .. note::
838
+ When raised exceptions don't match the expected ones, you'll get a detailed error
839
+ message explaining why. This includes ``repr(check)`` if set, which in Python can be
840
+ overly verbose, showing memory locations etc etc.
841
+
842
+ If installed and imported (in e.g. ``conftest.py``), the ``hypothesis`` library will
843
+ monkeypatch this output to provide shorter & more readable repr's.
844
+ """
845
+
846
+ # allow_unwrapped=True requires: singular exception, exception not being
847
+ # RaisesGroup instance, match is None, check is None
848
+ @overload
849
+ def __init__(
850
+ self,
851
+ expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co],
852
+ /,
853
+ *,
854
+ allow_unwrapped: Literal[True],
855
+ flatten_subgroups: bool = False,
856
+ ) -> None: ...
857
+
858
+ # flatten_subgroups = True also requires no nested RaisesGroup
859
+ @overload
860
+ def __init__(
861
+ self,
862
+ expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co],
863
+ /,
864
+ *other_exceptions: type[BaseExcT_co] | RaisesExc[BaseExcT_co],
865
+ flatten_subgroups: Literal[True],
866
+ match: str | Pattern[str] | None = None,
867
+ check: Callable[[BaseExceptionGroup[BaseExcT_co]], bool] | None = None,
868
+ ) -> None: ...
869
+
870
+ # simplify the typevars if possible (the following 3 are equivalent but go simpler->complicated)
871
+ # ... the first handles RaisesGroup[ValueError], the second RaisesGroup[ExceptionGroup[ValueError]],
872
+ # the third RaisesGroup[ValueError | ExceptionGroup[ValueError]].
873
+ # ... otherwise, we will get results like RaisesGroup[ValueError | ExceptionGroup[Never]] (I think)
874
+ # (technically correct but misleading)
875
+ @overload
876
+ def __init__(
877
+ self: RaisesGroup[ExcT_1],
878
+ expected_exception: type[ExcT_1] | RaisesExc[ExcT_1],
879
+ /,
880
+ *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1],
881
+ match: str | Pattern[str] | None = None,
882
+ check: Callable[[ExceptionGroup[ExcT_1]], bool] | None = None,
883
+ ) -> None: ...
884
+
885
+ @overload
886
+ def __init__(
887
+ self: RaisesGroup[ExceptionGroup[ExcT_2]],
888
+ expected_exception: RaisesGroup[ExcT_2],
889
+ /,
890
+ *other_exceptions: RaisesGroup[ExcT_2],
891
+ match: str | Pattern[str] | None = None,
892
+ check: Callable[[ExceptionGroup[ExceptionGroup[ExcT_2]]], bool] | None = None,
893
+ ) -> None: ...
894
+
895
+ @overload
896
+ def __init__(
897
+ self: RaisesGroup[ExcT_1 | ExceptionGroup[ExcT_2]],
898
+ expected_exception: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2],
899
+ /,
900
+ *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2],
901
+ match: str | Pattern[str] | None = None,
902
+ check: (
903
+ Callable[[ExceptionGroup[ExcT_1 | ExceptionGroup[ExcT_2]]], bool] | None
904
+ ) = None,
905
+ ) -> None: ...
906
+
907
+ # same as the above 3 but handling BaseException
908
+ @overload
909
+ def __init__(
910
+ self: RaisesGroup[BaseExcT_1],
911
+ expected_exception: type[BaseExcT_1] | RaisesExc[BaseExcT_1],
912
+ /,
913
+ *other_exceptions: type[BaseExcT_1] | RaisesExc[BaseExcT_1],
914
+ match: str | Pattern[str] | None = None,
915
+ check: Callable[[BaseExceptionGroup[BaseExcT_1]], bool] | None = None,
916
+ ) -> None: ...
917
+
918
+ @overload
919
+ def __init__(
920
+ self: RaisesGroup[BaseExceptionGroup[BaseExcT_2]],
921
+ expected_exception: RaisesGroup[BaseExcT_2],
922
+ /,
923
+ *other_exceptions: RaisesGroup[BaseExcT_2],
924
+ match: str | Pattern[str] | None = None,
925
+ check: (
926
+ Callable[[BaseExceptionGroup[BaseExceptionGroup[BaseExcT_2]]], bool] | None
927
+ ) = None,
928
+ ) -> None: ...
929
+
930
+ @overload
931
+ def __init__(
932
+ self: RaisesGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]],
933
+ expected_exception: type[BaseExcT_1]
934
+ | RaisesExc[BaseExcT_1]
935
+ | RaisesGroup[BaseExcT_2],
936
+ /,
937
+ *other_exceptions: type[BaseExcT_1]
938
+ | RaisesExc[BaseExcT_1]
939
+ | RaisesGroup[BaseExcT_2],
940
+ match: str | Pattern[str] | None = None,
941
+ check: (
942
+ Callable[
943
+ [BaseExceptionGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]]],
944
+ bool,
945
+ ]
946
+ | None
947
+ ) = None,
948
+ ) -> None: ...
949
+
950
+ def __init__(
951
+ self: RaisesGroup[ExcT_1 | BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]],
952
+ expected_exception: type[BaseExcT_1]
953
+ | RaisesExc[BaseExcT_1]
954
+ | RaisesGroup[BaseExcT_2],
955
+ /,
956
+ *other_exceptions: type[BaseExcT_1]
957
+ | RaisesExc[BaseExcT_1]
958
+ | RaisesGroup[BaseExcT_2],
959
+ allow_unwrapped: bool = False,
960
+ flatten_subgroups: bool = False,
961
+ match: str | Pattern[str] | None = None,
962
+ check: (
963
+ Callable[[BaseExceptionGroup[BaseExcT_1]], bool]
964
+ | Callable[[ExceptionGroup[ExcT_1]], bool]
965
+ | None
966
+ ) = None,
967
+ ):
968
+ # The type hint on the `self` and `check` parameters uses different formats
969
+ # that are *very* hard to reconcile while adhering to the overloads, so we cast
970
+ # it to avoid an error when passing it to super().__init__
971
+ check = cast(
972
+ "Callable[[BaseExceptionGroup[ExcT_1|BaseExcT_1|BaseExceptionGroup[BaseExcT_2]]], bool]",
973
+ check,
974
+ )
975
+ super().__init__(match=match, check=check)
976
+ self.allow_unwrapped = allow_unwrapped
977
+ self.flatten_subgroups: bool = flatten_subgroups
978
+ self.is_baseexception = False
979
+
980
+ if allow_unwrapped and other_exceptions:
981
+ raise ValueError(
982
+ "You cannot specify multiple exceptions with `allow_unwrapped=True.`"
983
+ " If you want to match one of multiple possible exceptions you should"
984
+ " use a `RaisesExc`."
985
+ " E.g. `RaisesExc(check=lambda e: isinstance(e, (...)))`",
986
+ )
987
+ if allow_unwrapped and isinstance(expected_exception, RaisesGroup):
988
+ raise ValueError(
989
+ "`allow_unwrapped=True` has no effect when expecting a `RaisesGroup`."
990
+ " You might want it in the expected `RaisesGroup`, or"
991
+ " `flatten_subgroups=True` if you don't care about the structure.",
992
+ )
993
+ if allow_unwrapped and (match is not None or check is not None):
994
+ raise ValueError(
995
+ "`allow_unwrapped=True` bypasses the `match` and `check` parameters"
996
+ " if the exception is unwrapped. If you intended to match/check the"
997
+ " exception you should use a `RaisesExc` object. If you want to match/check"
998
+ " the exceptiongroup when the exception *is* wrapped you need to"
999
+ " do e.g. `if isinstance(exc.value, ExceptionGroup):"
1000
+ " assert RaisesGroup(...).matches(exc.value)` afterwards.",
1001
+ )
1002
+
1003
+ self.expected_exceptions: tuple[
1004
+ type[BaseExcT_co] | RaisesExc[BaseExcT_co] | RaisesGroup[BaseException], ...
1005
+ ] = tuple(
1006
+ self._parse_excgroup(e, "a BaseException type, RaisesExc, or RaisesGroup")
1007
+ for e in (
1008
+ expected_exception,
1009
+ *other_exceptions,
1010
+ )
1011
+ )
1012
+
1013
+ def _parse_excgroup(
1014
+ self,
1015
+ exc: (
1016
+ type[BaseExcT_co]
1017
+ | types.GenericAlias
1018
+ | RaisesExc[BaseExcT_1]
1019
+ | RaisesGroup[BaseExcT_2]
1020
+ ),
1021
+ expected: str,
1022
+ ) -> type[BaseExcT_co] | RaisesExc[BaseExcT_1] | RaisesGroup[BaseExcT_2]:
1023
+ # verify exception type and set `self.is_baseexception`
1024
+ if isinstance(exc, RaisesGroup):
1025
+ if self.flatten_subgroups:
1026
+ raise ValueError(
1027
+ "You cannot specify a nested structure inside a RaisesGroup with"
1028
+ " `flatten_subgroups=True`. The parameter will flatten subgroups"
1029
+ " in the raised exceptiongroup before matching, which would never"
1030
+ " match a nested structure.",
1031
+ )
1032
+ self.is_baseexception |= exc.is_baseexception
1033
+ exc._nested = True
1034
+ return exc
1035
+ elif isinstance(exc, RaisesExc):
1036
+ self.is_baseexception |= exc.is_baseexception
1037
+ exc._nested = True
1038
+ return exc
1039
+ elif isinstance(exc, tuple):
1040
+ raise TypeError(
1041
+ f"expected exception must be {expected}, not {type(exc).__name__!r}.\n"
1042
+ "RaisesGroup does not support tuples of exception types when expecting one of "
1043
+ "several possible exception types like RaisesExc.\n"
1044
+ "If you meant to expect a group with multiple exceptions, list them as separate arguments."
1045
+ )
1046
+ else:
1047
+ return super()._parse_exc(exc, expected)
1048
+
1049
+ @overload
1050
+ def __enter__(
1051
+ self: RaisesGroup[ExcT_1],
1052
+ ) -> ExceptionInfo[ExceptionGroup[ExcT_1]]: ...
1053
+ @overload
1054
+ def __enter__(
1055
+ self: RaisesGroup[BaseExcT_1],
1056
+ ) -> ExceptionInfo[BaseExceptionGroup[BaseExcT_1]]: ...
1057
+
1058
+ def __enter__(self) -> ExceptionInfo[BaseExceptionGroup[BaseException]]:
1059
+ self.excinfo: ExceptionInfo[BaseExceptionGroup[BaseExcT_co]] = (
1060
+ ExceptionInfo.for_later()
1061
+ )
1062
+ return self.excinfo
1063
+
1064
+ def __repr__(self) -> str:
1065
+ reqs = [
1066
+ e.__name__ if isinstance(e, type) else repr(e)
1067
+ for e in self.expected_exceptions
1068
+ ]
1069
+ if self.allow_unwrapped:
1070
+ reqs.append(f"allow_unwrapped={self.allow_unwrapped}")
1071
+ if self.flatten_subgroups:
1072
+ reqs.append(f"flatten_subgroups={self.flatten_subgroups}")
1073
+ if self.match is not None:
1074
+ # If no flags were specified, discard the redundant re.compile() here.
1075
+ reqs.append(f"match={_match_pattern(self.match)!r}")
1076
+ if self.check is not None:
1077
+ reqs.append(f"check={repr_callable(self.check)}")
1078
+ return f"RaisesGroup({', '.join(reqs)})"
1079
+
1080
+ def _unroll_exceptions(
1081
+ self,
1082
+ exceptions: Sequence[BaseException],
1083
+ ) -> Sequence[BaseException]:
1084
+ """Used if `flatten_subgroups=True`."""
1085
+ res: list[BaseException] = []
1086
+ for exc in exceptions:
1087
+ if isinstance(exc, BaseExceptionGroup):
1088
+ res.extend(self._unroll_exceptions(exc.exceptions))
1089
+
1090
+ else:
1091
+ res.append(exc)
1092
+ return res
1093
+
1094
+ @overload
1095
+ def matches(
1096
+ self: RaisesGroup[ExcT_1],
1097
+ exception: BaseException | None,
1098
+ ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ...
1099
+ @overload
1100
+ def matches(
1101
+ self: RaisesGroup[BaseExcT_1],
1102
+ exception: BaseException | None,
1103
+ ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ...
1104
+
1105
+ def matches(
1106
+ self,
1107
+ exception: BaseException | None,
1108
+ ) -> bool:
1109
+ """Check if an exception matches the requirements of this RaisesGroup.
1110
+ If it fails, `RaisesGroup.fail_reason` will be set.
1111
+
1112
+ Example::
1113
+
1114
+ with pytest.raises(TypeError) as excinfo:
1115
+ ...
1116
+ assert RaisesGroup(ValueError).matches(excinfo.value.__cause__)
1117
+ # the above line is equivalent to
1118
+ myexc = excinfo.value.__cause
1119
+ assert isinstance(myexc, BaseExceptionGroup)
1120
+ assert len(myexc.exceptions) == 1
1121
+ assert isinstance(myexc.exceptions[0], ValueError)
1122
+ """
1123
+ self._fail_reason = None
1124
+ if exception is None:
1125
+ self._fail_reason = "exception is None"
1126
+ return False
1127
+ if not isinstance(exception, BaseExceptionGroup):
1128
+ # we opt to only print type of the exception here, as the repr would
1129
+ # likely be quite long
1130
+ not_group_msg = f"`{type(exception).__name__}()` is not an exception group"
1131
+ if len(self.expected_exceptions) > 1:
1132
+ self._fail_reason = not_group_msg
1133
+ return False
1134
+ # if we have 1 expected exception, check if it would work even if
1135
+ # allow_unwrapped is not set
1136
+ res = self._check_expected(self.expected_exceptions[0], exception)
1137
+ if res is None and self.allow_unwrapped:
1138
+ return True
1139
+
1140
+ if res is None:
1141
+ self._fail_reason = (
1142
+ f"{not_group_msg}, but would match with `allow_unwrapped=True`"
1143
+ )
1144
+ elif self.allow_unwrapped:
1145
+ self._fail_reason = res
1146
+ else:
1147
+ self._fail_reason = not_group_msg
1148
+ return False
1149
+
1150
+ actual_exceptions: Sequence[BaseException] = exception.exceptions
1151
+ if self.flatten_subgroups:
1152
+ actual_exceptions = self._unroll_exceptions(actual_exceptions)
1153
+
1154
+ if not self._check_match(exception):
1155
+ self._fail_reason = cast(str, self._fail_reason)
1156
+ old_reason = self._fail_reason
1157
+ if (
1158
+ len(actual_exceptions) == len(self.expected_exceptions) == 1
1159
+ and isinstance(expected := self.expected_exceptions[0], type)
1160
+ and isinstance(actual := actual_exceptions[0], expected)
1161
+ and self._check_match(actual)
1162
+ ):
1163
+ assert self.match is not None, "can't be None if _check_match failed"
1164
+ assert self._fail_reason is old_reason is not None
1165
+ self._fail_reason += (
1166
+ f"\n"
1167
+ f" but matched the expected `{self._repr_expected(expected)}`.\n"
1168
+ f" You might want "
1169
+ f"`RaisesGroup(RaisesExc({expected.__name__}, match={_match_pattern(self.match)!r}))`"
1170
+ )
1171
+ else:
1172
+ self._fail_reason = old_reason
1173
+ return False
1174
+
1175
+ # do the full check on expected exceptions
1176
+ if not self._check_exceptions(
1177
+ exception,
1178
+ actual_exceptions,
1179
+ ):
1180
+ self._fail_reason = cast(str, self._fail_reason)
1181
+ assert self._fail_reason is not None
1182
+ old_reason = self._fail_reason
1183
+ # if we're not expecting a nested structure, and there is one, do a second
1184
+ # pass where we try flattening it
1185
+ if (
1186
+ not self.flatten_subgroups
1187
+ and not any(
1188
+ isinstance(e, RaisesGroup) for e in self.expected_exceptions
1189
+ )
1190
+ and any(isinstance(e, BaseExceptionGroup) for e in actual_exceptions)
1191
+ and self._check_exceptions(
1192
+ exception,
1193
+ self._unroll_exceptions(exception.exceptions),
1194
+ )
1195
+ ):
1196
+ # only indent if it's a single-line reason. In a multi-line there's already
1197
+ # indented lines that this does not belong to.
1198
+ indent = " " if "\n" not in self._fail_reason else ""
1199
+ self._fail_reason = (
1200
+ old_reason
1201
+ + f"\n{indent}Did you mean to use `flatten_subgroups=True`?"
1202
+ )
1203
+ else:
1204
+ self._fail_reason = old_reason
1205
+ return False
1206
+
1207
+ # Only run `self.check` once we know `exception` is of the correct type.
1208
+ if not self._check_check(exception):
1209
+ reason = (
1210
+ cast(str, self._fail_reason) + f" on the {type(exception).__name__}"
1211
+ )
1212
+ if (
1213
+ len(actual_exceptions) == len(self.expected_exceptions) == 1
1214
+ and isinstance(expected := self.expected_exceptions[0], type)
1215
+ # we explicitly break typing here :)
1216
+ and self._check_check(actual_exceptions[0]) # type: ignore[arg-type]
1217
+ ):
1218
+ self._fail_reason = reason + (
1219
+ f", but did return True for the expected {self._repr_expected(expected)}."
1220
+ f" You might want RaisesGroup(RaisesExc({expected.__name__}, check=<...>))"
1221
+ )
1222
+ else:
1223
+ self._fail_reason = reason
1224
+ return False
1225
+
1226
+ return True
1227
+
1228
+ @staticmethod
1229
+ def _check_expected(
1230
+ expected_type: (
1231
+ type[BaseException] | RaisesExc[BaseException] | RaisesGroup[BaseException]
1232
+ ),
1233
+ exception: BaseException,
1234
+ ) -> str | None:
1235
+ """Helper method for `RaisesGroup.matches` and `RaisesGroup._check_exceptions`
1236
+ to check one of potentially several expected exceptions."""
1237
+ if isinstance(expected_type, type):
1238
+ return _check_raw_type(expected_type, exception)
1239
+ res = expected_type.matches(exception)
1240
+ if res:
1241
+ return None
1242
+ assert expected_type.fail_reason is not None
1243
+ if expected_type.fail_reason.startswith("\n"):
1244
+ return f"\n{expected_type!r}: {indent(expected_type.fail_reason, ' ')}"
1245
+ return f"{expected_type!r}: {expected_type.fail_reason}"
1246
+
1247
+ @staticmethod
1248
+ def _repr_expected(e: type[BaseException] | AbstractRaises[BaseException]) -> str:
1249
+ """Get the repr of an expected type/RaisesExc/RaisesGroup, but we only want
1250
+ the name if it's a type"""
1251
+ if isinstance(e, type):
1252
+ return _exception_type_name(e)
1253
+ return repr(e)
1254
+
1255
+ @overload
1256
+ def _check_exceptions(
1257
+ self: RaisesGroup[ExcT_1],
1258
+ _exception: Exception,
1259
+ actual_exceptions: Sequence[Exception],
1260
+ ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ...
1261
+ @overload
1262
+ def _check_exceptions(
1263
+ self: RaisesGroup[BaseExcT_1],
1264
+ _exception: BaseException,
1265
+ actual_exceptions: Sequence[BaseException],
1266
+ ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ...
1267
+
1268
+ def _check_exceptions(
1269
+ self,
1270
+ _exception: BaseException,
1271
+ actual_exceptions: Sequence[BaseException],
1272
+ ) -> bool:
1273
+ """Helper method for RaisesGroup.matches that attempts to pair up expected and actual exceptions"""
1274
+ # The _exception parameter is not used, but necessary for the TypeGuard
1275
+
1276
+ # full table with all results
1277
+ results = ResultHolder(self.expected_exceptions, actual_exceptions)
1278
+
1279
+ # (indexes of) raised exceptions that haven't (yet) found an expected
1280
+ remaining_actual = list(range(len(actual_exceptions)))
1281
+ # (indexes of) expected exceptions that haven't found a matching raised
1282
+ failed_expected: list[int] = []
1283
+ # successful greedy matches
1284
+ matches: dict[int, int] = {}
1285
+
1286
+ # loop over expected exceptions first to get a more predictable result
1287
+ for i_exp, expected in enumerate(self.expected_exceptions):
1288
+ for i_rem in remaining_actual:
1289
+ res = self._check_expected(expected, actual_exceptions[i_rem])
1290
+ results.set_result(i_exp, i_rem, res)
1291
+ if res is None:
1292
+ remaining_actual.remove(i_rem)
1293
+ matches[i_exp] = i_rem
1294
+ break
1295
+ else:
1296
+ failed_expected.append(i_exp)
1297
+
1298
+ # All exceptions matched up successfully
1299
+ if not remaining_actual and not failed_expected:
1300
+ return True
1301
+
1302
+ # in case of a single expected and single raised we simplify the output
1303
+ if 1 == len(actual_exceptions) == len(self.expected_exceptions):
1304
+ assert not matches
1305
+ self._fail_reason = res
1306
+ return False
1307
+
1308
+ # The test case is failing, so we can do a slow and exhaustive check to find
1309
+ # duplicate matches etc that will be helpful in debugging
1310
+ for i_exp, expected in enumerate(self.expected_exceptions):
1311
+ for i_actual, actual in enumerate(actual_exceptions):
1312
+ if results.has_result(i_exp, i_actual):
1313
+ continue
1314
+ results.set_result(
1315
+ i_exp, i_actual, self._check_expected(expected, actual)
1316
+ )
1317
+
1318
+ successful_str = (
1319
+ f"{len(matches)} matched exception{'s' if len(matches) > 1 else ''}. "
1320
+ if matches
1321
+ else ""
1322
+ )
1323
+
1324
+ # all expected were found
1325
+ if not failed_expected and results.no_match_for_actual(remaining_actual):
1326
+ self._fail_reason = (
1327
+ f"{successful_str}Unexpected exception(s):"
1328
+ f" {[actual_exceptions[i] for i in remaining_actual]!r}"
1329
+ )
1330
+ return False
1331
+ # all raised exceptions were expected
1332
+ if not remaining_actual and results.no_match_for_expected(failed_expected):
1333
+ no_match_for_str = ", ".join(
1334
+ self._repr_expected(self.expected_exceptions[i])
1335
+ for i in failed_expected
1336
+ )
1337
+ self._fail_reason = f"{successful_str}Too few exceptions raised, found no match for: [{no_match_for_str}]"
1338
+ return False
1339
+
1340
+ # if there's only one remaining and one failed, and the unmatched didn't match anything else,
1341
+ # we elect to only print why the remaining and the failed didn't match.
1342
+ if (
1343
+ 1 == len(remaining_actual) == len(failed_expected)
1344
+ and results.no_match_for_actual(remaining_actual)
1345
+ and results.no_match_for_expected(failed_expected)
1346
+ ):
1347
+ self._fail_reason = f"{successful_str}{results.get_result(failed_expected[0], remaining_actual[0])}"
1348
+ return False
1349
+
1350
+ # there's both expected and raised exceptions without matches
1351
+ s = ""
1352
+ if matches:
1353
+ s += f"\n{successful_str}"
1354
+ indent_1 = " " * 2
1355
+ indent_2 = " " * 4
1356
+
1357
+ if not remaining_actual:
1358
+ s += "\nToo few exceptions raised!"
1359
+ elif not failed_expected:
1360
+ s += "\nUnexpected exception(s)!"
1361
+
1362
+ if failed_expected:
1363
+ s += "\nThe following expected exceptions did not find a match:"
1364
+ rev_matches = {v: k for k, v in matches.items()}
1365
+ for i_failed in failed_expected:
1366
+ s += (
1367
+ f"\n{indent_1}{self._repr_expected(self.expected_exceptions[i_failed])}"
1368
+ )
1369
+ for i_actual, actual in enumerate(actual_exceptions):
1370
+ if results.get_result(i_exp, i_actual) is None:
1371
+ # we print full repr of match target
1372
+ s += (
1373
+ f"\n{indent_2}It matches {backquote(repr(actual))} which was paired with "
1374
+ + backquote(
1375
+ self._repr_expected(
1376
+ self.expected_exceptions[rev_matches[i_actual]]
1377
+ )
1378
+ )
1379
+ )
1380
+
1381
+ if remaining_actual:
1382
+ s += "\nThe following raised exceptions did not find a match"
1383
+ for i_actual in remaining_actual:
1384
+ s += f"\n{indent_1}{actual_exceptions[i_actual]!r}:"
1385
+ for i_exp, expected in enumerate(self.expected_exceptions):
1386
+ res = results.get_result(i_exp, i_actual)
1387
+ if i_exp in failed_expected:
1388
+ assert res is not None
1389
+ if res[0] != "\n":
1390
+ s += "\n"
1391
+ s += indent(res, indent_2)
1392
+ if res is None:
1393
+ # we print full repr of match target
1394
+ s += (
1395
+ f"\n{indent_2}It matches {backquote(self._repr_expected(expected))} "
1396
+ f"which was paired with {backquote(repr(actual_exceptions[matches[i_exp]]))}"
1397
+ )
1398
+
1399
+ if len(self.expected_exceptions) == len(actual_exceptions) and possible_match(
1400
+ results
1401
+ ):
1402
+ s += (
1403
+ "\nThere exist a possible match when attempting an exhaustive check,"
1404
+ " but RaisesGroup uses a greedy algorithm. "
1405
+ "Please make your expected exceptions more stringent with `RaisesExc` etc"
1406
+ " so the greedy algorithm can function."
1407
+ )
1408
+ self._fail_reason = s
1409
+ return False
1410
+
1411
+ def __exit__(
1412
+ self,
1413
+ exc_type: type[BaseException] | None,
1414
+ exc_val: BaseException | None,
1415
+ exc_tb: types.TracebackType | None,
1416
+ ) -> bool:
1417
+ __tracebackhide__ = True
1418
+ if exc_type is None:
1419
+ fail(f"DID NOT RAISE any exception, expected `{self.expected_type()}`")
1420
+
1421
+ assert self.excinfo is not None, (
1422
+ "Internal error - should have been constructed in __enter__"
1423
+ )
1424
+
1425
+ # group_str is the only thing that differs between RaisesExc and RaisesGroup...
1426
+ # I might just scrap it? Or make it part of fail_reason
1427
+ group_str = (
1428
+ "(group)"
1429
+ if self.allow_unwrapped and not issubclass(exc_type, BaseExceptionGroup)
1430
+ else "group"
1431
+ )
1432
+
1433
+ if not self.matches(exc_val):
1434
+ fail(f"Raised exception {group_str} did not match: {self._fail_reason}")
1435
+
1436
+ # Cast to narrow the exception type now that it's verified....
1437
+ # even though the TypeGuard in self.matches should be narrowing
1438
+ exc_info = cast(
1439
+ "tuple[type[BaseExceptionGroup[BaseExcT_co]], BaseExceptionGroup[BaseExcT_co], types.TracebackType]",
1440
+ (exc_type, exc_val, exc_tb),
1441
+ )
1442
+ self.excinfo.fill_unfilled(exc_info)
1443
+ return True
1444
+
1445
+ def expected_type(self) -> str:
1446
+ subexcs = []
1447
+ for e in self.expected_exceptions:
1448
+ if isinstance(e, RaisesExc):
1449
+ subexcs.append(repr(e))
1450
+ elif isinstance(e, RaisesGroup):
1451
+ subexcs.append(e.expected_type())
1452
+ elif isinstance(e, type):
1453
+ subexcs.append(e.__name__)
1454
+ else: # pragma: no cover
1455
+ raise AssertionError("unknown type")
1456
+ group_type = "Base" if self.is_baseexception else ""
1457
+ return f"{group_type}ExceptionGroup({', '.join(subexcs)})"
1458
+
1459
+
1460
+ @final
1461
+ class NotChecked:
1462
+ """Singleton for unchecked values in ResultHolder"""
1463
+
1464
+
1465
+ class ResultHolder:
1466
+ """Container for results of checking exceptions.
1467
+ Used in RaisesGroup._check_exceptions and possible_match.
1468
+ """
1469
+
1470
+ def __init__(
1471
+ self,
1472
+ expected_exceptions: tuple[
1473
+ type[BaseException] | AbstractRaises[BaseException], ...
1474
+ ],
1475
+ actual_exceptions: Sequence[BaseException],
1476
+ ) -> None:
1477
+ self.results: list[list[str | type[NotChecked] | None]] = [
1478
+ [NotChecked for _ in expected_exceptions] for _ in actual_exceptions
1479
+ ]
1480
+
1481
+ def set_result(self, expected: int, actual: int, result: str | None) -> None:
1482
+ self.results[actual][expected] = result
1483
+
1484
+ def get_result(self, expected: int, actual: int) -> str | None:
1485
+ res = self.results[actual][expected]
1486
+ assert res is not NotChecked
1487
+ # mypy doesn't support identity checking against anything but None
1488
+ return res # type: ignore[return-value]
1489
+
1490
+ def has_result(self, expected: int, actual: int) -> bool:
1491
+ return self.results[actual][expected] is not NotChecked
1492
+
1493
+ def no_match_for_expected(self, expected: list[int]) -> bool:
1494
+ for i in expected:
1495
+ for actual_results in self.results:
1496
+ assert actual_results[i] is not NotChecked
1497
+ if actual_results[i] is None:
1498
+ return False
1499
+ return True
1500
+
1501
+ def no_match_for_actual(self, actual: list[int]) -> bool:
1502
+ for i in actual:
1503
+ for res in self.results[i]:
1504
+ assert res is not NotChecked
1505
+ if res is None:
1506
+ return False
1507
+ return True
1508
+
1509
+
1510
+ def possible_match(results: ResultHolder, used: set[int] | None = None) -> bool:
1511
+ if used is None:
1512
+ used = set()
1513
+ curr_row = len(used)
1514
+ if curr_row == len(results.results):
1515
+ return True
1516
+ return any(
1517
+ val is None and i not in used and possible_match(results, used | {i})
1518
+ for (i, val) in enumerate(results.results[curr_row])
1519
+ )
archive/Axiovorax/.venv/Lib/site-packages/_pytest/recwarn.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Record warnings during test function execution."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from collections.abc import Callable
7
+ from collections.abc import Generator
8
+ from collections.abc import Iterator
9
+ from pprint import pformat
10
+ import re
11
+ from types import TracebackType
12
+ from typing import Any
13
+ from typing import final
14
+ from typing import overload
15
+ from typing import TYPE_CHECKING
16
+ from typing import TypeVar
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ from typing_extensions import Self
21
+
22
+ import warnings
23
+
24
+ from _pytest.deprecated import check_ispytest
25
+ from _pytest.fixtures import fixture
26
+ from _pytest.outcomes import Exit
27
+ from _pytest.outcomes import fail
28
+
29
+
30
+ T = TypeVar("T")
31
+
32
+
33
+ @fixture
34
+ def recwarn() -> Generator[WarningsRecorder]:
35
+ """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
36
+
37
+ See :ref:`warnings` for information on warning categories.
38
+ """
39
+ wrec = WarningsRecorder(_ispytest=True)
40
+ with wrec:
41
+ warnings.simplefilter("default")
42
+ yield wrec
43
+
44
+
45
+ @overload
46
+ def deprecated_call(
47
+ *, match: str | re.Pattern[str] | None = ...
48
+ ) -> WarningsRecorder: ...
49
+
50
+
51
+ @overload
52
+ def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: ...
53
+
54
+
55
+ def deprecated_call(
56
+ func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any
57
+ ) -> WarningsRecorder | Any:
58
+ """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``.
59
+
60
+ This function can be used as a context manager::
61
+
62
+ >>> import warnings
63
+ >>> def api_call_v2():
64
+ ... warnings.warn('use v3 of this api', DeprecationWarning)
65
+ ... return 200
66
+
67
+ >>> import pytest
68
+ >>> with pytest.deprecated_call():
69
+ ... assert api_call_v2() == 200
70
+
71
+ It can also be used by passing a function and ``*args`` and ``**kwargs``,
72
+ in which case it will ensure calling ``func(*args, **kwargs)`` produces one of
73
+ the warnings types above. The return value is the return value of the function.
74
+
75
+ In the context manager form you may use the keyword argument ``match`` to assert
76
+ that the warning matches a text or regex.
77
+
78
+ The context manager produces a list of :class:`warnings.WarningMessage` objects,
79
+ one for each warning raised.
80
+ """
81
+ __tracebackhide__ = True
82
+ if func is not None:
83
+ args = (func, *args)
84
+ return warns(
85
+ (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs
86
+ )
87
+
88
+
89
+ @overload
90
+ def warns(
91
+ expected_warning: type[Warning] | tuple[type[Warning], ...] = ...,
92
+ *,
93
+ match: str | re.Pattern[str] | None = ...,
94
+ ) -> WarningsChecker: ...
95
+
96
+
97
+ @overload
98
+ def warns(
99
+ expected_warning: type[Warning] | tuple[type[Warning], ...],
100
+ func: Callable[..., T],
101
+ *args: Any,
102
+ **kwargs: Any,
103
+ ) -> T: ...
104
+
105
+
106
+ def warns(
107
+ expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning,
108
+ *args: Any,
109
+ match: str | re.Pattern[str] | None = None,
110
+ **kwargs: Any,
111
+ ) -> WarningsChecker | Any:
112
+ r"""Assert that code raises a particular class of warning.
113
+
114
+ Specifically, the parameter ``expected_warning`` can be a warning class or tuple
115
+ of warning classes, and the code inside the ``with`` block must issue at least one
116
+ warning of that class or classes.
117
+
118
+ This helper produces a list of :class:`warnings.WarningMessage` objects, one for
119
+ each warning emitted (regardless of whether it is an ``expected_warning`` or not).
120
+ Since pytest 8.0, unmatched warnings are also re-emitted when the context closes.
121
+
122
+ This function can be used as a context manager::
123
+
124
+ >>> import pytest
125
+ >>> with pytest.warns(RuntimeWarning):
126
+ ... warnings.warn("my warning", RuntimeWarning)
127
+
128
+ In the context manager form you may use the keyword argument ``match`` to assert
129
+ that the warning matches a text or regex::
130
+
131
+ >>> with pytest.warns(UserWarning, match='must be 0 or None'):
132
+ ... warnings.warn("value must be 0 or None", UserWarning)
133
+
134
+ >>> with pytest.warns(UserWarning, match=r'must be \d+$'):
135
+ ... warnings.warn("value must be 42", UserWarning)
136
+
137
+ >>> with pytest.warns(UserWarning): # catch re-emitted warning
138
+ ... with pytest.warns(UserWarning, match=r'must be \d+$'):
139
+ ... warnings.warn("this is not here", UserWarning)
140
+ Traceback (most recent call last):
141
+ ...
142
+ Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted...
143
+
144
+ **Using with** ``pytest.mark.parametrize``
145
+
146
+ When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests
147
+ such that some runs raise a warning and others do not.
148
+
149
+ This could be achieved in the same way as with exceptions, see
150
+ :ref:`parametrizing_conditional_raising` for an example.
151
+
152
+ """
153
+ __tracebackhide__ = True
154
+ if not args:
155
+ if kwargs:
156
+ argnames = ", ".join(sorted(kwargs))
157
+ raise TypeError(
158
+ f"Unexpected keyword arguments passed to pytest.warns: {argnames}"
159
+ "\nUse context-manager form instead?"
160
+ )
161
+ return WarningsChecker(expected_warning, match_expr=match, _ispytest=True)
162
+ else:
163
+ func = args[0]
164
+ if not callable(func):
165
+ raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
166
+ with WarningsChecker(expected_warning, _ispytest=True):
167
+ return func(*args[1:], **kwargs)
168
+
169
+
170
+ class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg]
171
+ """A context manager to record raised warnings.
172
+
173
+ Each recorded warning is an instance of :class:`warnings.WarningMessage`.
174
+
175
+ Adapted from `warnings.catch_warnings`.
176
+
177
+ .. note::
178
+ ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
179
+ differently; see :ref:`ensuring_function_triggers`.
180
+
181
+ """
182
+
183
+ def __init__(self, *, _ispytest: bool = False) -> None:
184
+ check_ispytest(_ispytest)
185
+ super().__init__(record=True)
186
+ self._entered = False
187
+ self._list: list[warnings.WarningMessage] = []
188
+
189
+ @property
190
+ def list(self) -> list[warnings.WarningMessage]:
191
+ """The list of recorded warnings."""
192
+ return self._list
193
+
194
+ def __getitem__(self, i: int) -> warnings.WarningMessage:
195
+ """Get a recorded warning by index."""
196
+ return self._list[i]
197
+
198
+ def __iter__(self) -> Iterator[warnings.WarningMessage]:
199
+ """Iterate through the recorded warnings."""
200
+ return iter(self._list)
201
+
202
+ def __len__(self) -> int:
203
+ """The number of recorded warnings."""
204
+ return len(self._list)
205
+
206
+ def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage:
207
+ """Pop the first recorded warning which is an instance of ``cls``,
208
+ but not an instance of a child class of any other match.
209
+ Raises ``AssertionError`` if there is no match.
210
+ """
211
+ best_idx: int | None = None
212
+ for i, w in enumerate(self._list):
213
+ if w.category == cls:
214
+ return self._list.pop(i) # exact match, stop looking
215
+ if issubclass(w.category, cls) and (
216
+ best_idx is None
217
+ or not issubclass(w.category, self._list[best_idx].category)
218
+ ):
219
+ best_idx = i
220
+ if best_idx is not None:
221
+ return self._list.pop(best_idx)
222
+ __tracebackhide__ = True
223
+ raise AssertionError(f"{cls!r} not found in warning list")
224
+
225
+ def clear(self) -> None:
226
+ """Clear the list of recorded warnings."""
227
+ self._list[:] = []
228
+
229
+ def __enter__(self) -> Self:
230
+ if self._entered:
231
+ __tracebackhide__ = True
232
+ raise RuntimeError(f"Cannot enter {self!r} twice")
233
+ _list = super().__enter__()
234
+ # record=True means it's None.
235
+ assert _list is not None
236
+ self._list = _list
237
+ warnings.simplefilter("always")
238
+ return self
239
+
240
+ def __exit__(
241
+ self,
242
+ exc_type: type[BaseException] | None,
243
+ exc_val: BaseException | None,
244
+ exc_tb: TracebackType | None,
245
+ ) -> None:
246
+ if not self._entered:
247
+ __tracebackhide__ = True
248
+ raise RuntimeError(f"Cannot exit {self!r} without entering first")
249
+
250
+ super().__exit__(exc_type, exc_val, exc_tb)
251
+
252
+ # Built-in catch_warnings does not reset entered state so we do it
253
+ # manually here for this context manager to become reusable.
254
+ self._entered = False
255
+
256
+
257
+ @final
258
+ class WarningsChecker(WarningsRecorder):
259
+ def __init__(
260
+ self,
261
+ expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning,
262
+ match_expr: str | re.Pattern[str] | None = None,
263
+ *,
264
+ _ispytest: bool = False,
265
+ ) -> None:
266
+ check_ispytest(_ispytest)
267
+ super().__init__(_ispytest=True)
268
+
269
+ msg = "exceptions must be derived from Warning, not %s"
270
+ if isinstance(expected_warning, tuple):
271
+ for exc in expected_warning:
272
+ if not issubclass(exc, Warning):
273
+ raise TypeError(msg % type(exc))
274
+ expected_warning_tup = expected_warning
275
+ elif isinstance(expected_warning, type) and issubclass(
276
+ expected_warning, Warning
277
+ ):
278
+ expected_warning_tup = (expected_warning,)
279
+ else:
280
+ raise TypeError(msg % type(expected_warning))
281
+
282
+ self.expected_warning = expected_warning_tup
283
+ self.match_expr = match_expr
284
+
285
+ def matches(self, warning: warnings.WarningMessage) -> bool:
286
+ assert self.expected_warning is not None
287
+ return issubclass(warning.category, self.expected_warning) and bool(
288
+ self.match_expr is None or re.search(self.match_expr, str(warning.message))
289
+ )
290
+
291
+ def __exit__(
292
+ self,
293
+ exc_type: type[BaseException] | None,
294
+ exc_val: BaseException | None,
295
+ exc_tb: TracebackType | None,
296
+ ) -> None:
297
+ super().__exit__(exc_type, exc_val, exc_tb)
298
+
299
+ __tracebackhide__ = True
300
+
301
+ # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within
302
+ # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed
303
+ # when the warning doesn't happen. Control-flow exceptions should always
304
+ # propagate.
305
+ if exc_val is not None and (
306
+ not isinstance(exc_val, Exception)
307
+ # Exit is an Exception, not a BaseException, for some reason.
308
+ or isinstance(exc_val, Exit)
309
+ ):
310
+ return
311
+
312
+ def found_str() -> str:
313
+ return pformat([record.message for record in self], indent=2)
314
+
315
+ try:
316
+ if not any(issubclass(w.category, self.expected_warning) for w in self):
317
+ fail(
318
+ f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n"
319
+ f" Emitted warnings: {found_str()}."
320
+ )
321
+ elif not any(self.matches(w) for w in self):
322
+ fail(
323
+ f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n"
324
+ f" Regex: {self.match_expr}\n"
325
+ f" Emitted warnings: {found_str()}."
326
+ )
327
+ finally:
328
+ # Whether or not any warnings matched, we want to re-emit all unmatched warnings.
329
+ for w in self:
330
+ if not self.matches(w):
331
+ warnings.warn_explicit(
332
+ message=w.message,
333
+ category=w.category,
334
+ filename=w.filename,
335
+ lineno=w.lineno,
336
+ module=w.__module__,
337
+ source=w.source,
338
+ )
339
+
340
+ # Currently in Python it is possible to pass other types than an
341
+ # `str` message when creating `Warning` instances, however this
342
+ # causes an exception when :func:`warnings.filterwarnings` is used
343
+ # to filter those warnings. See
344
+ # https://github.com/python/cpython/issues/103577 for a discussion.
345
+ # While this can be considered a bug in CPython, we put guards in
346
+ # pytest as the error message produced without this check in place
347
+ # is confusing (#10865).
348
+ for w in self:
349
+ if type(w.message) is not UserWarning:
350
+ # If the warning was of an incorrect type then `warnings.warn()`
351
+ # creates a UserWarning. Any other warning must have been specified
352
+ # explicitly.
353
+ continue
354
+ if not w.message.args:
355
+ # UserWarning() without arguments must have been specified explicitly.
356
+ continue
357
+ msg = w.message.args[0]
358
+ if isinstance(msg, str):
359
+ continue
360
+ # It's possible that UserWarning was explicitly specified, and
361
+ # its first argument was not a string. But that case can't be
362
+ # distinguished from an invalid type.
363
+ raise TypeError(
364
+ f"Warning must be str or Warning, got {msg!r} (type {type(msg).__name__})"
365
+ )
archive/Axiovorax/.venv/Lib/site-packages/_pytest/reports.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+
4
+ from collections.abc import Iterable
5
+ from collections.abc import Iterator
6
+ from collections.abc import Mapping
7
+ from collections.abc import Sequence
8
+ import dataclasses
9
+ from io import StringIO
10
+ import os
11
+ from pprint import pprint
12
+ from typing import Any
13
+ from typing import cast
14
+ from typing import final
15
+ from typing import Literal
16
+ from typing import NoReturn
17
+ from typing import TYPE_CHECKING
18
+
19
+ from _pytest._code.code import ExceptionChainRepr
20
+ from _pytest._code.code import ExceptionInfo
21
+ from _pytest._code.code import ExceptionRepr
22
+ from _pytest._code.code import ReprEntry
23
+ from _pytest._code.code import ReprEntryNative
24
+ from _pytest._code.code import ReprExceptionInfo
25
+ from _pytest._code.code import ReprFileLocation
26
+ from _pytest._code.code import ReprFuncArgs
27
+ from _pytest._code.code import ReprLocals
28
+ from _pytest._code.code import ReprTraceback
29
+ from _pytest._code.code import TerminalRepr
30
+ from _pytest._io import TerminalWriter
31
+ from _pytest.config import Config
32
+ from _pytest.nodes import Collector
33
+ from _pytest.nodes import Item
34
+ from _pytest.outcomes import fail
35
+ from _pytest.outcomes import skip
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ from typing_extensions import Self
40
+
41
+ from _pytest.runner import CallInfo
42
+
43
+
44
+ def getworkerinfoline(node):
45
+ try:
46
+ return node._workerinfocache
47
+ except AttributeError:
48
+ d = node.workerinfo
49
+ ver = "{}.{}.{}".format(*d["version_info"][:3])
50
+ node._workerinfocache = s = "[{}] {} -- Python {} {}".format(
51
+ d["id"], d["sysplatform"], ver, d["executable"]
52
+ )
53
+ return s
54
+
55
+
56
+ class BaseReport:
57
+ when: str | None
58
+ location: tuple[str, int | None, str] | None
59
+ longrepr: (
60
+ None | ExceptionInfo[BaseException] | tuple[str, int, str] | str | TerminalRepr
61
+ )
62
+ sections: list[tuple[str, str]]
63
+ nodeid: str
64
+ outcome: Literal["passed", "failed", "skipped"]
65
+
66
+ def __init__(self, **kw: Any) -> None:
67
+ self.__dict__.update(kw)
68
+
69
+ if TYPE_CHECKING:
70
+ # Can have arbitrary fields given to __init__().
71
+ def __getattr__(self, key: str) -> Any: ...
72
+
73
+ def toterminal(self, out: TerminalWriter) -> None:
74
+ if hasattr(self, "node"):
75
+ worker_info = getworkerinfoline(self.node)
76
+ if worker_info:
77
+ out.line(worker_info)
78
+
79
+ longrepr = self.longrepr
80
+ if longrepr is None:
81
+ return
82
+
83
+ if hasattr(longrepr, "toterminal"):
84
+ longrepr_terminal = cast(TerminalRepr, longrepr)
85
+ longrepr_terminal.toterminal(out)
86
+ else:
87
+ try:
88
+ s = str(longrepr)
89
+ except UnicodeEncodeError:
90
+ s = "<unprintable longrepr>"
91
+ out.line(s)
92
+
93
+ def get_sections(self, prefix: str) -> Iterator[tuple[str, str]]:
94
+ for name, content in self.sections:
95
+ if name.startswith(prefix):
96
+ yield prefix, content
97
+
98
+ @property
99
+ def longreprtext(self) -> str:
100
+ """Read-only property that returns the full string representation of
101
+ ``longrepr``.
102
+
103
+ .. versionadded:: 3.0
104
+ """
105
+ file = StringIO()
106
+ tw = TerminalWriter(file)
107
+ tw.hasmarkup = False
108
+ self.toterminal(tw)
109
+ exc = file.getvalue()
110
+ return exc.strip()
111
+
112
+ @property
113
+ def caplog(self) -> str:
114
+ """Return captured log lines, if log capturing is enabled.
115
+
116
+ .. versionadded:: 3.5
117
+ """
118
+ return "\n".join(
119
+ content for (prefix, content) in self.get_sections("Captured log")
120
+ )
121
+
122
+ @property
123
+ def capstdout(self) -> str:
124
+ """Return captured text from stdout, if capturing is enabled.
125
+
126
+ .. versionadded:: 3.0
127
+ """
128
+ return "".join(
129
+ content for (prefix, content) in self.get_sections("Captured stdout")
130
+ )
131
+
132
+ @property
133
+ def capstderr(self) -> str:
134
+ """Return captured text from stderr, if capturing is enabled.
135
+
136
+ .. versionadded:: 3.0
137
+ """
138
+ return "".join(
139
+ content for (prefix, content) in self.get_sections("Captured stderr")
140
+ )
141
+
142
+ @property
143
+ def passed(self) -> bool:
144
+ """Whether the outcome is passed."""
145
+ return self.outcome == "passed"
146
+
147
+ @property
148
+ def failed(self) -> bool:
149
+ """Whether the outcome is failed."""
150
+ return self.outcome == "failed"
151
+
152
+ @property
153
+ def skipped(self) -> bool:
154
+ """Whether the outcome is skipped."""
155
+ return self.outcome == "skipped"
156
+
157
+ @property
158
+ def fspath(self) -> str:
159
+ """The path portion of the reported node, as a string."""
160
+ return self.nodeid.split("::")[0]
161
+
162
+ @property
163
+ def count_towards_summary(self) -> bool:
164
+ """**Experimental** Whether this report should be counted towards the
165
+ totals shown at the end of the test session: "1 passed, 1 failure, etc".
166
+
167
+ .. note::
168
+
169
+ This function is considered **experimental**, so beware that it is subject to changes
170
+ even in patch releases.
171
+ """
172
+ return True
173
+
174
+ @property
175
+ def head_line(self) -> str | None:
176
+ """**Experimental** The head line shown with longrepr output for this
177
+ report, more commonly during traceback representation during
178
+ failures::
179
+
180
+ ________ Test.foo ________
181
+
182
+
183
+ In the example above, the head_line is "Test.foo".
184
+
185
+ .. note::
186
+
187
+ This function is considered **experimental**, so beware that it is subject to changes
188
+ even in patch releases.
189
+ """
190
+ if self.location is not None:
191
+ fspath, lineno, domain = self.location
192
+ return domain
193
+ return None
194
+
195
+ def _get_verbose_word_with_markup(
196
+ self, config: Config, default_markup: Mapping[str, bool]
197
+ ) -> tuple[str, Mapping[str, bool]]:
198
+ _category, _short, verbose = config.hook.pytest_report_teststatus(
199
+ report=self, config=config
200
+ )
201
+
202
+ if isinstance(verbose, str):
203
+ return verbose, default_markup
204
+
205
+ if isinstance(verbose, Sequence) and len(verbose) == 2:
206
+ word, markup = verbose
207
+ if isinstance(word, str) and isinstance(markup, Mapping):
208
+ return word, markup
209
+
210
+ fail( # pragma: no cover
211
+ "pytest_report_teststatus() hook (from a plugin) returned "
212
+ f"an invalid verbose value: {verbose!r}.\nExpected either a string "
213
+ "or a tuple of (word, markup)."
214
+ )
215
+
216
+ def _to_json(self) -> dict[str, Any]:
217
+ """Return the contents of this report as a dict of builtin entries,
218
+ suitable for serialization.
219
+
220
+ This was originally the serialize_report() function from xdist (ca03269).
221
+
222
+ Experimental method.
223
+ """
224
+ return _report_to_json(self)
225
+
226
+ @classmethod
227
+ def _from_json(cls, reportdict: dict[str, object]) -> Self:
228
+ """Create either a TestReport or CollectReport, depending on the calling class.
229
+
230
+ It is the callers responsibility to know which class to pass here.
231
+
232
+ This was originally the serialize_report() function from xdist (ca03269).
233
+
234
+ Experimental method.
235
+ """
236
+ kwargs = _report_kwargs_from_json(reportdict)
237
+ return cls(**kwargs)
238
+
239
+
240
+ def _report_unserialization_failure(
241
+ type_name: str, report_class: type[BaseReport], reportdict
242
+ ) -> NoReturn:
243
+ url = "https://github.com/pytest-dev/pytest/issues"
244
+ stream = StringIO()
245
+ pprint("-" * 100, stream=stream)
246
+ pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream)
247
+ pprint(f"report_name: {report_class}", stream=stream)
248
+ pprint(reportdict, stream=stream)
249
+ pprint(f"Please report this bug at {url}", stream=stream)
250
+ pprint("-" * 100, stream=stream)
251
+ raise RuntimeError(stream.getvalue())
252
+
253
+
254
+ @final
255
+ class TestReport(BaseReport):
256
+ """Basic test report object (also used for setup and teardown calls if
257
+ they fail).
258
+
259
+ Reports can contain arbitrary extra attributes.
260
+ """
261
+
262
+ __test__ = False
263
+
264
+ # Defined by skipping plugin.
265
+ # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish.
266
+ wasxfail: str
267
+
268
+ def __init__(
269
+ self,
270
+ nodeid: str,
271
+ location: tuple[str, int | None, str],
272
+ keywords: Mapping[str, Any],
273
+ outcome: Literal["passed", "failed", "skipped"],
274
+ longrepr: None
275
+ | ExceptionInfo[BaseException]
276
+ | tuple[str, int, str]
277
+ | str
278
+ | TerminalRepr,
279
+ when: Literal["setup", "call", "teardown"],
280
+ sections: Iterable[tuple[str, str]] = (),
281
+ duration: float = 0,
282
+ start: float = 0,
283
+ stop: float = 0,
284
+ user_properties: Iterable[tuple[str, object]] | None = None,
285
+ **extra,
286
+ ) -> None:
287
+ #: Normalized collection nodeid.
288
+ self.nodeid = nodeid
289
+
290
+ #: A (filesystempath, lineno, domaininfo) tuple indicating the
291
+ #: actual location of a test item - it might be different from the
292
+ #: collected one e.g. if a method is inherited from a different module.
293
+ #: The filesystempath may be relative to ``config.rootdir``.
294
+ #: The line number is 0-based.
295
+ self.location: tuple[str, int | None, str] = location
296
+
297
+ #: A name -> value dictionary containing all keywords and
298
+ #: markers associated with a test invocation.
299
+ self.keywords: Mapping[str, Any] = keywords
300
+
301
+ #: Test outcome, always one of "passed", "failed", "skipped".
302
+ self.outcome = outcome
303
+
304
+ #: None or a failure representation.
305
+ self.longrepr = longrepr
306
+
307
+ #: One of 'setup', 'call', 'teardown' to indicate runtest phase.
308
+ self.when: Literal["setup", "call", "teardown"] = when
309
+
310
+ #: User properties is a list of tuples (name, value) that holds user
311
+ #: defined properties of the test.
312
+ self.user_properties = list(user_properties or [])
313
+
314
+ #: Tuples of str ``(heading, content)`` with extra information
315
+ #: for the test report. Used by pytest to add text captured
316
+ #: from ``stdout``, ``stderr``, and intercepted logging events. May
317
+ #: be used by other plugins to add arbitrary information to reports.
318
+ self.sections = list(sections)
319
+
320
+ #: Time it took to run just the test.
321
+ self.duration: float = duration
322
+
323
+ #: The system time when the call started, in seconds since the epoch.
324
+ self.start: float = start
325
+ #: The system time when the call ended, in seconds since the epoch.
326
+ self.stop: float = stop
327
+
328
+ self.__dict__.update(extra)
329
+
330
+ def __repr__(self) -> str:
331
+ return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>"
332
+
333
+ @classmethod
334
+ def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport:
335
+ """Create and fill a TestReport with standard item and call info.
336
+
337
+ :param item: The item.
338
+ :param call: The call info.
339
+ """
340
+ when = call.when
341
+ # Remove "collect" from the Literal type -- only for collection calls.
342
+ assert when != "collect"
343
+ duration = call.duration
344
+ start = call.start
345
+ stop = call.stop
346
+ keywords = {x: 1 for x in item.keywords}
347
+ excinfo = call.excinfo
348
+ sections = []
349
+ if not call.excinfo:
350
+ outcome: Literal["passed", "failed", "skipped"] = "passed"
351
+ longrepr: (
352
+ None
353
+ | ExceptionInfo[BaseException]
354
+ | tuple[str, int, str]
355
+ | str
356
+ | TerminalRepr
357
+ ) = None
358
+ else:
359
+ if not isinstance(excinfo, ExceptionInfo):
360
+ outcome = "failed"
361
+ longrepr = excinfo
362
+ elif isinstance(excinfo.value, skip.Exception):
363
+ outcome = "skipped"
364
+ r = excinfo._getreprcrash()
365
+ assert r is not None, (
366
+ "There should always be a traceback entry for skipping a test."
367
+ )
368
+ if excinfo.value._use_item_location:
369
+ path, line = item.reportinfo()[:2]
370
+ assert line is not None
371
+ longrepr = os.fspath(path), line + 1, r.message
372
+ else:
373
+ longrepr = (str(r.path), r.lineno, r.message)
374
+ else:
375
+ outcome = "failed"
376
+ if call.when == "call":
377
+ longrepr = item.repr_failure(excinfo)
378
+ else: # exception in setup or teardown
379
+ longrepr = item._repr_failure_py(
380
+ excinfo, style=item.config.getoption("tbstyle", "auto")
381
+ )
382
+ for rwhen, key, content in item._report_sections:
383
+ sections.append((f"Captured {key} {rwhen}", content))
384
+ return cls(
385
+ item.nodeid,
386
+ item.location,
387
+ keywords,
388
+ outcome,
389
+ longrepr,
390
+ when,
391
+ sections,
392
+ duration,
393
+ start,
394
+ stop,
395
+ user_properties=item.user_properties,
396
+ )
397
+
398
+
399
+ @final
400
+ class CollectReport(BaseReport):
401
+ """Collection report object.
402
+
403
+ Reports can contain arbitrary extra attributes.
404
+ """
405
+
406
+ when = "collect"
407
+
408
+ def __init__(
409
+ self,
410
+ nodeid: str,
411
+ outcome: Literal["passed", "failed", "skipped"],
412
+ longrepr: None
413
+ | ExceptionInfo[BaseException]
414
+ | tuple[str, int, str]
415
+ | str
416
+ | TerminalRepr,
417
+ result: list[Item | Collector] | None,
418
+ sections: Iterable[tuple[str, str]] = (),
419
+ **extra,
420
+ ) -> None:
421
+ #: Normalized collection nodeid.
422
+ self.nodeid = nodeid
423
+
424
+ #: Test outcome, always one of "passed", "failed", "skipped".
425
+ self.outcome = outcome
426
+
427
+ #: None or a failure representation.
428
+ self.longrepr = longrepr
429
+
430
+ #: The collected items and collection nodes.
431
+ self.result = result or []
432
+
433
+ #: Tuples of str ``(heading, content)`` with extra information
434
+ #: for the test report. Used by pytest to add text captured
435
+ #: from ``stdout``, ``stderr``, and intercepted logging events. May
436
+ #: be used by other plugins to add arbitrary information to reports.
437
+ self.sections = list(sections)
438
+
439
+ self.__dict__.update(extra)
440
+
441
+ @property
442
+ def location( # type:ignore[override]
443
+ self,
444
+ ) -> tuple[str, int | None, str] | None:
445
+ return (self.fspath, None, self.fspath)
446
+
447
+ def __repr__(self) -> str:
448
+ return f"<CollectReport {self.nodeid!r} lenresult={len(self.result)} outcome={self.outcome!r}>"
449
+
450
+
451
+ class CollectErrorRepr(TerminalRepr):
452
+ def __init__(self, msg: str) -> None:
453
+ self.longrepr = msg
454
+
455
+ def toterminal(self, out: TerminalWriter) -> None:
456
+ out.line(self.longrepr, red=True)
457
+
458
+
459
+ def pytest_report_to_serializable(
460
+ report: CollectReport | TestReport,
461
+ ) -> dict[str, Any] | None:
462
+ if isinstance(report, (TestReport, CollectReport)):
463
+ data = report._to_json()
464
+ data["$report_type"] = report.__class__.__name__
465
+ return data
466
+ # TODO: Check if this is actually reachable.
467
+ return None # type: ignore[unreachable]
468
+
469
+
470
+ def pytest_report_from_serializable(
471
+ data: dict[str, Any],
472
+ ) -> CollectReport | TestReport | None:
473
+ if "$report_type" in data:
474
+ if data["$report_type"] == "TestReport":
475
+ return TestReport._from_json(data)
476
+ elif data["$report_type"] == "CollectReport":
477
+ return CollectReport._from_json(data)
478
+ assert False, "Unknown report_type unserialize data: {}".format(
479
+ data["$report_type"]
480
+ )
481
+ return None
482
+
483
+
484
+ def _report_to_json(report: BaseReport) -> dict[str, Any]:
485
+ """Return the contents of this report as a dict of builtin entries,
486
+ suitable for serialization.
487
+
488
+ This was originally the serialize_report() function from xdist (ca03269).
489
+ """
490
+
491
+ def serialize_repr_entry(
492
+ entry: ReprEntry | ReprEntryNative,
493
+ ) -> dict[str, Any]:
494
+ data = dataclasses.asdict(entry)
495
+ for key, value in data.items():
496
+ if hasattr(value, "__dict__"):
497
+ data[key] = dataclasses.asdict(value)
498
+ entry_data = {"type": type(entry).__name__, "data": data}
499
+ return entry_data
500
+
501
+ def serialize_repr_traceback(reprtraceback: ReprTraceback) -> dict[str, Any]:
502
+ result = dataclasses.asdict(reprtraceback)
503
+ result["reprentries"] = [
504
+ serialize_repr_entry(x) for x in reprtraceback.reprentries
505
+ ]
506
+ return result
507
+
508
+ def serialize_repr_crash(
509
+ reprcrash: ReprFileLocation | None,
510
+ ) -> dict[str, Any] | None:
511
+ if reprcrash is not None:
512
+ return dataclasses.asdict(reprcrash)
513
+ else:
514
+ return None
515
+
516
+ def serialize_exception_longrepr(rep: BaseReport) -> dict[str, Any]:
517
+ assert rep.longrepr is not None
518
+ # TODO: Investigate whether the duck typing is really necessary here.
519
+ longrepr = cast(ExceptionRepr, rep.longrepr)
520
+ result: dict[str, Any] = {
521
+ "reprcrash": serialize_repr_crash(longrepr.reprcrash),
522
+ "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback),
523
+ "sections": longrepr.sections,
524
+ }
525
+ if isinstance(longrepr, ExceptionChainRepr):
526
+ result["chain"] = []
527
+ for repr_traceback, repr_crash, description in longrepr.chain:
528
+ result["chain"].append(
529
+ (
530
+ serialize_repr_traceback(repr_traceback),
531
+ serialize_repr_crash(repr_crash),
532
+ description,
533
+ )
534
+ )
535
+ else:
536
+ result["chain"] = None
537
+ return result
538
+
539
+ d = report.__dict__.copy()
540
+ if hasattr(report.longrepr, "toterminal"):
541
+ if hasattr(report.longrepr, "reprtraceback") and hasattr(
542
+ report.longrepr, "reprcrash"
543
+ ):
544
+ d["longrepr"] = serialize_exception_longrepr(report)
545
+ else:
546
+ d["longrepr"] = str(report.longrepr)
547
+ else:
548
+ d["longrepr"] = report.longrepr
549
+ for name in d:
550
+ if isinstance(d[name], os.PathLike):
551
+ d[name] = os.fspath(d[name])
552
+ elif name == "result":
553
+ d[name] = None # for now
554
+ return d
555
+
556
+
557
+ def _report_kwargs_from_json(reportdict: dict[str, Any]) -> dict[str, Any]:
558
+ """Return **kwargs that can be used to construct a TestReport or
559
+ CollectReport instance.
560
+
561
+ This was originally the serialize_report() function from xdist (ca03269).
562
+ """
563
+
564
+ def deserialize_repr_entry(entry_data):
565
+ data = entry_data["data"]
566
+ entry_type = entry_data["type"]
567
+ if entry_type == "ReprEntry":
568
+ reprfuncargs = None
569
+ reprfileloc = None
570
+ reprlocals = None
571
+ if data["reprfuncargs"]:
572
+ reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
573
+ if data["reprfileloc"]:
574
+ reprfileloc = ReprFileLocation(**data["reprfileloc"])
575
+ if data["reprlocals"]:
576
+ reprlocals = ReprLocals(data["reprlocals"]["lines"])
577
+
578
+ reprentry: ReprEntry | ReprEntryNative = ReprEntry(
579
+ lines=data["lines"],
580
+ reprfuncargs=reprfuncargs,
581
+ reprlocals=reprlocals,
582
+ reprfileloc=reprfileloc,
583
+ style=data["style"],
584
+ )
585
+ elif entry_type == "ReprEntryNative":
586
+ reprentry = ReprEntryNative(data["lines"])
587
+ else:
588
+ _report_unserialization_failure(entry_type, TestReport, reportdict)
589
+ return reprentry
590
+
591
+ def deserialize_repr_traceback(repr_traceback_dict):
592
+ repr_traceback_dict["reprentries"] = [
593
+ deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"]
594
+ ]
595
+ return ReprTraceback(**repr_traceback_dict)
596
+
597
+ def deserialize_repr_crash(repr_crash_dict: dict[str, Any] | None):
598
+ if repr_crash_dict is not None:
599
+ return ReprFileLocation(**repr_crash_dict)
600
+ else:
601
+ return None
602
+
603
+ if (
604
+ reportdict["longrepr"]
605
+ and "reprcrash" in reportdict["longrepr"]
606
+ and "reprtraceback" in reportdict["longrepr"]
607
+ ):
608
+ reprtraceback = deserialize_repr_traceback(
609
+ reportdict["longrepr"]["reprtraceback"]
610
+ )
611
+ reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"])
612
+ if reportdict["longrepr"]["chain"]:
613
+ chain = []
614
+ for repr_traceback_data, repr_crash_data, description in reportdict[
615
+ "longrepr"
616
+ ]["chain"]:
617
+ chain.append(
618
+ (
619
+ deserialize_repr_traceback(repr_traceback_data),
620
+ deserialize_repr_crash(repr_crash_data),
621
+ description,
622
+ )
623
+ )
624
+ exception_info: ExceptionChainRepr | ReprExceptionInfo = ExceptionChainRepr(
625
+ chain
626
+ )
627
+ else:
628
+ exception_info = ReprExceptionInfo(
629
+ reprtraceback=reprtraceback,
630
+ reprcrash=reprcrash,
631
+ )
632
+
633
+ for section in reportdict["longrepr"]["sections"]:
634
+ exception_info.addsection(*section)
635
+ reportdict["longrepr"] = exception_info
636
+
637
+ return reportdict
archive/Axiovorax/.venv/Lib/site-packages/_pytest/runner.py ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Basic collect and runtest protocol implementations."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import bdb
7
+ from collections.abc import Callable
8
+ import dataclasses
9
+ import os
10
+ import sys
11
+ import types
12
+ from typing import cast
13
+ from typing import final
14
+ from typing import Generic
15
+ from typing import Literal
16
+ from typing import TYPE_CHECKING
17
+ from typing import TypeVar
18
+
19
+ from .reports import BaseReport
20
+ from .reports import CollectErrorRepr
21
+ from .reports import CollectReport
22
+ from .reports import TestReport
23
+ from _pytest import timing
24
+ from _pytest._code.code import ExceptionChainRepr
25
+ from _pytest._code.code import ExceptionInfo
26
+ from _pytest._code.code import TerminalRepr
27
+ from _pytest.config.argparsing import Parser
28
+ from _pytest.deprecated import check_ispytest
29
+ from _pytest.nodes import Collector
30
+ from _pytest.nodes import Directory
31
+ from _pytest.nodes import Item
32
+ from _pytest.nodes import Node
33
+ from _pytest.outcomes import Exit
34
+ from _pytest.outcomes import OutcomeException
35
+ from _pytest.outcomes import Skipped
36
+ from _pytest.outcomes import TEST_OUTCOME
37
+
38
+
39
+ if sys.version_info < (3, 11):
40
+ from exceptiongroup import BaseExceptionGroup
41
+
42
+ if TYPE_CHECKING:
43
+ from _pytest.main import Session
44
+ from _pytest.terminal import TerminalReporter
45
+
46
+ #
47
+ # pytest plugin hooks.
48
+
49
+
50
+ def pytest_addoption(parser: Parser) -> None:
51
+ group = parser.getgroup("terminal reporting", "Reporting", after="general")
52
+ group.addoption(
53
+ "--durations",
54
+ action="store",
55
+ type=int,
56
+ default=None,
57
+ metavar="N",
58
+ help="Show N slowest setup/test durations (N=0 for all)",
59
+ )
60
+ group.addoption(
61
+ "--durations-min",
62
+ action="store",
63
+ type=float,
64
+ default=None,
65
+ metavar="N",
66
+ help="Minimal duration in seconds for inclusion in slowest list. "
67
+ "Default: 0.005 (or 0.0 if -vv is given).",
68
+ )
69
+
70
+
71
+ def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
72
+ durations = terminalreporter.config.option.durations
73
+ durations_min = terminalreporter.config.option.durations_min
74
+ verbose = terminalreporter.config.get_verbosity()
75
+ if durations is None:
76
+ return
77
+ if durations_min is None:
78
+ durations_min = 0.005 if verbose < 2 else 0.0
79
+ tr = terminalreporter
80
+ dlist = []
81
+ for replist in tr.stats.values():
82
+ for rep in replist:
83
+ if hasattr(rep, "duration"):
84
+ dlist.append(rep)
85
+ if not dlist:
86
+ return
87
+ dlist.sort(key=lambda x: x.duration, reverse=True)
88
+ if not durations:
89
+ tr.write_sep("=", "slowest durations")
90
+ else:
91
+ tr.write_sep("=", f"slowest {durations} durations")
92
+ dlist = dlist[:durations]
93
+
94
+ for i, rep in enumerate(dlist):
95
+ if rep.duration < durations_min:
96
+ tr.write_line("")
97
+ message = f"({len(dlist) - i} durations < {durations_min:g}s hidden."
98
+ if terminalreporter.config.option.durations_min is None:
99
+ message += " Use -vv to show these durations."
100
+ message += ")"
101
+ tr.write_line(message)
102
+ break
103
+ tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
104
+
105
+
106
+ def pytest_sessionstart(session: Session) -> None:
107
+ session._setupstate = SetupState()
108
+
109
+
110
+ def pytest_sessionfinish(session: Session) -> None:
111
+ session._setupstate.teardown_exact(None)
112
+
113
+
114
+ def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> bool:
115
+ ihook = item.ihook
116
+ ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
117
+ runtestprotocol(item, nextitem=nextitem)
118
+ ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
119
+ return True
120
+
121
+
122
+ def runtestprotocol(
123
+ item: Item, log: bool = True, nextitem: Item | None = None
124
+ ) -> list[TestReport]:
125
+ hasrequest = hasattr(item, "_request")
126
+ if hasrequest and not item._request: # type: ignore[attr-defined]
127
+ # This only happens if the item is re-run, as is done by
128
+ # pytest-rerunfailures.
129
+ item._initrequest() # type: ignore[attr-defined]
130
+ rep = call_and_report(item, "setup", log)
131
+ reports = [rep]
132
+ if rep.passed:
133
+ if item.config.getoption("setupshow", False):
134
+ show_test_item(item)
135
+ if not item.config.getoption("setuponly", False):
136
+ reports.append(call_and_report(item, "call", log))
137
+ # If the session is about to fail or stop, teardown everything - this is
138
+ # necessary to correctly report fixture teardown errors (see #11706)
139
+ if item.session.shouldfail or item.session.shouldstop:
140
+ nextitem = None
141
+ reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
142
+ # After all teardown hooks have been called
143
+ # want funcargs and request info to go away.
144
+ if hasrequest:
145
+ item._request = False # type: ignore[attr-defined]
146
+ item.funcargs = None # type: ignore[attr-defined]
147
+ return reports
148
+
149
+
150
+ def show_test_item(item: Item) -> None:
151
+ """Show test function, parameters and the fixtures of the test item."""
152
+ tw = item.config.get_terminal_writer()
153
+ tw.line()
154
+ tw.write(" " * 8)
155
+ tw.write(item.nodeid)
156
+ used_fixtures = sorted(getattr(item, "fixturenames", []))
157
+ if used_fixtures:
158
+ tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
159
+ tw.flush()
160
+
161
+
162
+ def pytest_runtest_setup(item: Item) -> None:
163
+ _update_current_test_var(item, "setup")
164
+ item.session._setupstate.setup(item)
165
+
166
+
167
+ def pytest_runtest_call(item: Item) -> None:
168
+ _update_current_test_var(item, "call")
169
+ try:
170
+ del sys.last_type
171
+ del sys.last_value
172
+ del sys.last_traceback
173
+ if sys.version_info >= (3, 12, 0):
174
+ del sys.last_exc # type:ignore[attr-defined]
175
+ except AttributeError:
176
+ pass
177
+ try:
178
+ item.runtest()
179
+ except Exception as e:
180
+ # Store trace info to allow postmortem debugging
181
+ sys.last_type = type(e)
182
+ sys.last_value = e
183
+ if sys.version_info >= (3, 12, 0):
184
+ sys.last_exc = e # type:ignore[attr-defined]
185
+ assert e.__traceback__ is not None
186
+ # Skip *this* frame
187
+ sys.last_traceback = e.__traceback__.tb_next
188
+ raise
189
+
190
+
191
+ def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None:
192
+ _update_current_test_var(item, "teardown")
193
+ item.session._setupstate.teardown_exact(nextitem)
194
+ _update_current_test_var(item, None)
195
+
196
+
197
+ def _update_current_test_var(
198
+ item: Item, when: Literal["setup", "call", "teardown"] | None
199
+ ) -> None:
200
+ """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage.
201
+
202
+ If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment.
203
+ """
204
+ var_name = "PYTEST_CURRENT_TEST"
205
+ if when:
206
+ value = f"{item.nodeid} ({when})"
207
+ # don't allow null bytes on environment variables (see #2644, #2957)
208
+ value = value.replace("\x00", "(null)")
209
+ os.environ[var_name] = value
210
+ else:
211
+ os.environ.pop(var_name)
212
+
213
+
214
+ def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None:
215
+ if report.when in ("setup", "teardown"):
216
+ if report.failed:
217
+ # category, shortletter, verbose-word
218
+ return "error", "E", "ERROR"
219
+ elif report.skipped:
220
+ return "skipped", "s", "SKIPPED"
221
+ else:
222
+ return "", "", ""
223
+ return None
224
+
225
+
226
+ #
227
+ # Implementation
228
+
229
+
230
+ def call_and_report(
231
+ item: Item, when: Literal["setup", "call", "teardown"], log: bool = True, **kwds
232
+ ) -> TestReport:
233
+ ihook = item.ihook
234
+ if when == "setup":
235
+ runtest_hook: Callable[..., None] = ihook.pytest_runtest_setup
236
+ elif when == "call":
237
+ runtest_hook = ihook.pytest_runtest_call
238
+ elif when == "teardown":
239
+ runtest_hook = ihook.pytest_runtest_teardown
240
+ else:
241
+ assert False, f"Unhandled runtest hook case: {when}"
242
+ reraise: tuple[type[BaseException], ...] = (Exit,)
243
+ if not item.config.getoption("usepdb", False):
244
+ reraise += (KeyboardInterrupt,)
245
+ call = CallInfo.from_call(
246
+ lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
247
+ )
248
+ report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call)
249
+ if log:
250
+ ihook.pytest_runtest_logreport(report=report)
251
+ if check_interactive_exception(call, report):
252
+ ihook.pytest_exception_interact(node=item, call=call, report=report)
253
+ return report
254
+
255
+
256
+ def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool:
257
+ """Check whether the call raised an exception that should be reported as
258
+ interactive."""
259
+ if call.excinfo is None:
260
+ # Didn't raise.
261
+ return False
262
+ if hasattr(report, "wasxfail"):
263
+ # Exception was expected.
264
+ return False
265
+ if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)):
266
+ # Special control flow exception.
267
+ return False
268
+ return True
269
+
270
+
271
+ TResult = TypeVar("TResult", covariant=True)
272
+
273
+
274
+ @final
275
+ @dataclasses.dataclass
276
+ class CallInfo(Generic[TResult]):
277
+ """Result/Exception info of a function invocation."""
278
+
279
+ _result: TResult | None
280
+ #: The captured exception of the call, if it raised.
281
+ excinfo: ExceptionInfo[BaseException] | None
282
+ #: The system time when the call started, in seconds since the epoch.
283
+ start: float
284
+ #: The system time when the call ended, in seconds since the epoch.
285
+ stop: float
286
+ #: The call duration, in seconds.
287
+ duration: float
288
+ #: The context of invocation: "collect", "setup", "call" or "teardown".
289
+ when: Literal["collect", "setup", "call", "teardown"]
290
+
291
+ def __init__(
292
+ self,
293
+ result: TResult | None,
294
+ excinfo: ExceptionInfo[BaseException] | None,
295
+ start: float,
296
+ stop: float,
297
+ duration: float,
298
+ when: Literal["collect", "setup", "call", "teardown"],
299
+ *,
300
+ _ispytest: bool = False,
301
+ ) -> None:
302
+ check_ispytest(_ispytest)
303
+ self._result = result
304
+ self.excinfo = excinfo
305
+ self.start = start
306
+ self.stop = stop
307
+ self.duration = duration
308
+ self.when = when
309
+
310
+ @property
311
+ def result(self) -> TResult:
312
+ """The return value of the call, if it didn't raise.
313
+
314
+ Can only be accessed if excinfo is None.
315
+ """
316
+ if self.excinfo is not None:
317
+ raise AttributeError(f"{self!r} has no valid result")
318
+ # The cast is safe because an exception wasn't raised, hence
319
+ # _result has the expected function return type (which may be
320
+ # None, that's why a cast and not an assert).
321
+ return cast(TResult, self._result)
322
+
323
+ @classmethod
324
+ def from_call(
325
+ cls,
326
+ func: Callable[[], TResult],
327
+ when: Literal["collect", "setup", "call", "teardown"],
328
+ reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
329
+ ) -> CallInfo[TResult]:
330
+ """Call func, wrapping the result in a CallInfo.
331
+
332
+ :param func:
333
+ The function to call. Called without arguments.
334
+ :type func: Callable[[], _pytest.runner.TResult]
335
+ :param when:
336
+ The phase in which the function is called.
337
+ :param reraise:
338
+ Exception or exceptions that shall propagate if raised by the
339
+ function, instead of being wrapped in the CallInfo.
340
+ """
341
+ excinfo = None
342
+ instant = timing.Instant()
343
+ try:
344
+ result: TResult | None = func()
345
+ except BaseException:
346
+ excinfo = ExceptionInfo.from_current()
347
+ if reraise is not None and isinstance(excinfo.value, reraise):
348
+ raise
349
+ result = None
350
+ duration = instant.elapsed()
351
+ return cls(
352
+ start=duration.start.time,
353
+ stop=duration.stop.time,
354
+ duration=duration.seconds,
355
+ when=when,
356
+ result=result,
357
+ excinfo=excinfo,
358
+ _ispytest=True,
359
+ )
360
+
361
+ def __repr__(self) -> str:
362
+ if self.excinfo is None:
363
+ return f"<CallInfo when={self.when!r} result: {self._result!r}>"
364
+ return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
365
+
366
+
367
+ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
368
+ return TestReport.from_item_and_call(item, call)
369
+
370
+
371
+ def pytest_make_collect_report(collector: Collector) -> CollectReport:
372
+ def collect() -> list[Item | Collector]:
373
+ # Before collecting, if this is a Directory, load the conftests.
374
+ # If a conftest import fails to load, it is considered a collection
375
+ # error of the Directory collector. This is why it's done inside of the
376
+ # CallInfo wrapper.
377
+ #
378
+ # Note: initial conftests are loaded early, not here.
379
+ if isinstance(collector, Directory):
380
+ collector.config.pluginmanager._loadconftestmodules(
381
+ collector.path,
382
+ collector.config.getoption("importmode"),
383
+ rootpath=collector.config.rootpath,
384
+ consider_namespace_packages=collector.config.getini(
385
+ "consider_namespace_packages"
386
+ ),
387
+ )
388
+
389
+ return list(collector.collect())
390
+
391
+ call = CallInfo.from_call(
392
+ collect, "collect", reraise=(KeyboardInterrupt, SystemExit)
393
+ )
394
+ longrepr: None | tuple[str, int, str] | str | TerminalRepr = None
395
+ if not call.excinfo:
396
+ outcome: Literal["passed", "skipped", "failed"] = "passed"
397
+ else:
398
+ skip_exceptions = [Skipped]
399
+ unittest = sys.modules.get("unittest")
400
+ if unittest is not None:
401
+ skip_exceptions.append(unittest.SkipTest)
402
+ if isinstance(call.excinfo.value, tuple(skip_exceptions)):
403
+ outcome = "skipped"
404
+ r_ = collector._repr_failure_py(call.excinfo, "line")
405
+ assert isinstance(r_, ExceptionChainRepr), repr(r_)
406
+ r = r_.reprcrash
407
+ assert r
408
+ longrepr = (str(r.path), r.lineno, r.message)
409
+ else:
410
+ outcome = "failed"
411
+ errorinfo = collector.repr_failure(call.excinfo)
412
+ if not hasattr(errorinfo, "toterminal"):
413
+ assert isinstance(errorinfo, str)
414
+ errorinfo = CollectErrorRepr(errorinfo)
415
+ longrepr = errorinfo
416
+ result = call.result if not call.excinfo else None
417
+ rep = CollectReport(collector.nodeid, outcome, longrepr, result)
418
+ rep.call = call # type: ignore # see collect_one_node
419
+ return rep
420
+
421
+
422
+ class SetupState:
423
+ """Shared state for setting up/tearing down test items or collectors
424
+ in a session.
425
+
426
+ Suppose we have a collection tree as follows:
427
+
428
+ <Session session>
429
+ <Module mod1>
430
+ <Function item1>
431
+ <Module mod2>
432
+ <Function item2>
433
+
434
+ The SetupState maintains a stack. The stack starts out empty:
435
+
436
+ []
437
+
438
+ During the setup phase of item1, setup(item1) is called. What it does
439
+ is:
440
+
441
+ push session to stack, run session.setup()
442
+ push mod1 to stack, run mod1.setup()
443
+ push item1 to stack, run item1.setup()
444
+
445
+ The stack is:
446
+
447
+ [session, mod1, item1]
448
+
449
+ While the stack is in this shape, it is allowed to add finalizers to
450
+ each of session, mod1, item1 using addfinalizer().
451
+
452
+ During the teardown phase of item1, teardown_exact(item2) is called,
453
+ where item2 is the next item to item1. What it does is:
454
+
455
+ pop item1 from stack, run its teardowns
456
+ pop mod1 from stack, run its teardowns
457
+
458
+ mod1 was popped because it ended its purpose with item1. The stack is:
459
+
460
+ [session]
461
+
462
+ During the setup phase of item2, setup(item2) is called. What it does
463
+ is:
464
+
465
+ push mod2 to stack, run mod2.setup()
466
+ push item2 to stack, run item2.setup()
467
+
468
+ Stack:
469
+
470
+ [session, mod2, item2]
471
+
472
+ During the teardown phase of item2, teardown_exact(None) is called,
473
+ because item2 is the last item. What it does is:
474
+
475
+ pop item2 from stack, run its teardowns
476
+ pop mod2 from stack, run its teardowns
477
+ pop session from stack, run its teardowns
478
+
479
+ Stack:
480
+
481
+ []
482
+
483
+ The end!
484
+ """
485
+
486
+ def __init__(self) -> None:
487
+ # The stack is in the dict insertion order.
488
+ self.stack: dict[
489
+ Node,
490
+ tuple[
491
+ # Node's finalizers.
492
+ list[Callable[[], object]],
493
+ # Node's exception and original traceback, if its setup raised.
494
+ tuple[OutcomeException | Exception, types.TracebackType | None] | None,
495
+ ],
496
+ ] = {}
497
+
498
+ def setup(self, item: Item) -> None:
499
+ """Setup objects along the collector chain to the item."""
500
+ needed_collectors = item.listchain()
501
+
502
+ # If a collector fails its setup, fail its entire subtree of items.
503
+ # The setup is not retried for each item - the same exception is used.
504
+ for col, (finalizers, exc) in self.stack.items():
505
+ assert col in needed_collectors, "previous item was not torn down properly"
506
+ if exc:
507
+ raise exc[0].with_traceback(exc[1])
508
+
509
+ for col in needed_collectors[len(self.stack) :]:
510
+ assert col not in self.stack
511
+ # Push onto the stack.
512
+ self.stack[col] = ([col.teardown], None)
513
+ try:
514
+ col.setup()
515
+ except TEST_OUTCOME as exc:
516
+ self.stack[col] = (self.stack[col][0], (exc, exc.__traceback__))
517
+ raise
518
+
519
+ def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None:
520
+ """Attach a finalizer to the given node.
521
+
522
+ The node must be currently active in the stack.
523
+ """
524
+ assert node and not isinstance(node, tuple)
525
+ assert callable(finalizer)
526
+ assert node in self.stack, (node, self.stack)
527
+ self.stack[node][0].append(finalizer)
528
+
529
+ def teardown_exact(self, nextitem: Item | None) -> None:
530
+ """Teardown the current stack up until reaching nodes that nextitem
531
+ also descends from.
532
+
533
+ When nextitem is None (meaning we're at the last item), the entire
534
+ stack is torn down.
535
+ """
536
+ needed_collectors = (nextitem and nextitem.listchain()) or []
537
+ exceptions: list[BaseException] = []
538
+ while self.stack:
539
+ if list(self.stack.keys()) == needed_collectors[: len(self.stack)]:
540
+ break
541
+ node, (finalizers, _) = self.stack.popitem()
542
+ these_exceptions = []
543
+ while finalizers:
544
+ fin = finalizers.pop()
545
+ try:
546
+ fin()
547
+ except TEST_OUTCOME as e:
548
+ these_exceptions.append(e)
549
+
550
+ if len(these_exceptions) == 1:
551
+ exceptions.extend(these_exceptions)
552
+ elif these_exceptions:
553
+ msg = f"errors while tearing down {node!r}"
554
+ exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1]))
555
+
556
+ if len(exceptions) == 1:
557
+ raise exceptions[0]
558
+ elif exceptions:
559
+ raise BaseExceptionGroup("errors during test teardown", exceptions[::-1])
560
+ if nextitem is None:
561
+ assert not self.stack
562
+
563
+
564
+ def collect_one_node(collector: Collector) -> CollectReport:
565
+ ihook = collector.ihook
566
+ ihook.pytest_collectstart(collector=collector)
567
+ rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
568
+ call = rep.__dict__.pop("call", None)
569
+ if call and check_interactive_exception(call, rep):
570
+ ihook.pytest_exception_interact(node=collector, call=call, report=rep)
571
+ return rep
archive/Axiovorax/.venv/Lib/site-packages/_pytest/scope.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Scope definition and related utilities.
3
+
4
+ Those are defined here, instead of in the 'fixtures' module because
5
+ their use is spread across many other pytest modules, and centralizing it in 'fixtures'
6
+ would cause circular references.
7
+
8
+ Also this makes the module light to import, as it should.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from enum import Enum
14
+ from functools import total_ordering
15
+ from typing import Literal
16
+
17
+
18
+ _ScopeName = Literal["session", "package", "module", "class", "function"]
19
+
20
+
21
+ @total_ordering
22
+ class Scope(Enum):
23
+ """
24
+ Represents one of the possible fixture scopes in pytest.
25
+
26
+ Scopes are ordered from lower to higher, that is:
27
+
28
+ ->>> higher ->>>
29
+
30
+ Function < Class < Module < Package < Session
31
+
32
+ <<<- lower <<<-
33
+ """
34
+
35
+ # Scopes need to be listed from lower to higher.
36
+ Function = "function"
37
+ Class = "class"
38
+ Module = "module"
39
+ Package = "package"
40
+ Session = "session"
41
+
42
+ def next_lower(self) -> Scope:
43
+ """Return the next lower scope."""
44
+ index = _SCOPE_INDICES[self]
45
+ if index == 0:
46
+ raise ValueError(f"{self} is the lower-most scope")
47
+ return _ALL_SCOPES[index - 1]
48
+
49
+ def next_higher(self) -> Scope:
50
+ """Return the next higher scope."""
51
+ index = _SCOPE_INDICES[self]
52
+ if index == len(_SCOPE_INDICES) - 1:
53
+ raise ValueError(f"{self} is the upper-most scope")
54
+ return _ALL_SCOPES[index + 1]
55
+
56
+ def __lt__(self, other: Scope) -> bool:
57
+ self_index = _SCOPE_INDICES[self]
58
+ other_index = _SCOPE_INDICES[other]
59
+ return self_index < other_index
60
+
61
+ @classmethod
62
+ def from_user(
63
+ cls, scope_name: _ScopeName, descr: str, where: str | None = None
64
+ ) -> Scope:
65
+ """
66
+ Given a scope name from the user, return the equivalent Scope enum. Should be used
67
+ whenever we want to convert a user provided scope name to its enum object.
68
+
69
+ If the scope name is invalid, construct a user friendly message and call pytest.fail.
70
+ """
71
+ from _pytest.outcomes import fail
72
+
73
+ try:
74
+ # Holding this reference is necessary for mypy at the moment.
75
+ scope = Scope(scope_name)
76
+ except ValueError:
77
+ fail(
78
+ "{} {}got an unexpected scope value '{}'".format(
79
+ descr, f"from {where} " if where else "", scope_name
80
+ ),
81
+ pytrace=False,
82
+ )
83
+ return scope
84
+
85
+
86
+ _ALL_SCOPES = list(Scope)
87
+ _SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)}
88
+
89
+
90
+ # Ordered list of scopes which can contain many tests (in practice all except Function).
91
+ HIGH_SCOPES = [x for x in Scope if x is not Scope.Function]
archive/Axiovorax/.venv/Lib/site-packages/_pytest/setuponly.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Generator
4
+
5
+ from _pytest._io.saferepr import saferepr
6
+ from _pytest.config import Config
7
+ from _pytest.config import ExitCode
8
+ from _pytest.config.argparsing import Parser
9
+ from _pytest.fixtures import FixtureDef
10
+ from _pytest.fixtures import SubRequest
11
+ from _pytest.scope import Scope
12
+ import pytest
13
+
14
+
15
+ def pytest_addoption(parser: Parser) -> None:
16
+ group = parser.getgroup("debugconfig")
17
+ group.addoption(
18
+ "--setuponly",
19
+ "--setup-only",
20
+ action="store_true",
21
+ help="Only setup fixtures, do not execute tests",
22
+ )
23
+ group.addoption(
24
+ "--setupshow",
25
+ "--setup-show",
26
+ action="store_true",
27
+ help="Show setup of fixtures while executing tests",
28
+ )
29
+
30
+
31
+ @pytest.hookimpl(wrapper=True)
32
+ def pytest_fixture_setup(
33
+ fixturedef: FixtureDef[object], request: SubRequest
34
+ ) -> Generator[None, object, object]:
35
+ try:
36
+ return (yield)
37
+ finally:
38
+ if request.config.option.setupshow:
39
+ if hasattr(request, "param"):
40
+ # Save the fixture parameter so ._show_fixture_action() can
41
+ # display it now and during the teardown (in .finish()).
42
+ if fixturedef.ids:
43
+ if callable(fixturedef.ids):
44
+ param = fixturedef.ids(request.param)
45
+ else:
46
+ param = fixturedef.ids[request.param_index]
47
+ else:
48
+ param = request.param
49
+ fixturedef.cached_param = param # type: ignore[attr-defined]
50
+ _show_fixture_action(fixturedef, request.config, "SETUP")
51
+
52
+
53
+ def pytest_fixture_post_finalizer(
54
+ fixturedef: FixtureDef[object], request: SubRequest
55
+ ) -> None:
56
+ if fixturedef.cached_result is not None:
57
+ config = request.config
58
+ if config.option.setupshow:
59
+ _show_fixture_action(fixturedef, request.config, "TEARDOWN")
60
+ if hasattr(fixturedef, "cached_param"):
61
+ del fixturedef.cached_param
62
+
63
+
64
+ def _show_fixture_action(
65
+ fixturedef: FixtureDef[object], config: Config, msg: str
66
+ ) -> None:
67
+ capman = config.pluginmanager.getplugin("capturemanager")
68
+ if capman:
69
+ capman.suspend_global_capture()
70
+
71
+ tw = config.get_terminal_writer()
72
+ tw.line()
73
+ # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc.
74
+ scope_indent = list(reversed(Scope)).index(fixturedef._scope)
75
+ tw.write(" " * 2 * scope_indent)
76
+
77
+ scopename = fixturedef.scope[0].upper()
78
+ tw.write(f"{msg:<8} {scopename} {fixturedef.argname}")
79
+
80
+ if msg == "SETUP":
81
+ deps = sorted(arg for arg in fixturedef.argnames if arg != "request")
82
+ if deps:
83
+ tw.write(" (fixtures used: {})".format(", ".join(deps)))
84
+
85
+ if hasattr(fixturedef, "cached_param"):
86
+ tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]")
87
+
88
+ tw.flush()
89
+
90
+ if capman:
91
+ capman.resume_global_capture()
92
+
93
+
94
+ @pytest.hookimpl(tryfirst=True)
95
+ def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
96
+ if config.option.setuponly:
97
+ config.option.setupshow = True
98
+ return None
archive/Axiovorax/.venv/Lib/site-packages/_pytest/setupplan.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from _pytest.config import Config
4
+ from _pytest.config import ExitCode
5
+ from _pytest.config.argparsing import Parser
6
+ from _pytest.fixtures import FixtureDef
7
+ from _pytest.fixtures import SubRequest
8
+ import pytest
9
+
10
+
11
+ def pytest_addoption(parser: Parser) -> None:
12
+ group = parser.getgroup("debugconfig")
13
+ group.addoption(
14
+ "--setupplan",
15
+ "--setup-plan",
16
+ action="store_true",
17
+ help="Show what fixtures and tests would be executed but "
18
+ "don't execute anything",
19
+ )
20
+
21
+
22
+ @pytest.hookimpl(tryfirst=True)
23
+ def pytest_fixture_setup(
24
+ fixturedef: FixtureDef[object], request: SubRequest
25
+ ) -> object | None:
26
+ # Will return a dummy fixture if the setuponly option is provided.
27
+ if request.config.option.setupplan:
28
+ my_cache_key = fixturedef.cache_key(request)
29
+ fixturedef.cached_result = (None, my_cache_key, None)
30
+ return fixturedef.cached_result
31
+ return None
32
+
33
+
34
+ @pytest.hookimpl(tryfirst=True)
35
+ def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
36
+ if config.option.setupplan:
37
+ config.option.setuponly = True
38
+ config.option.setupshow = True
39
+ return None
archive/Axiovorax/.venv/Lib/site-packages/_pytest/skipping.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Support for skip/xfail functions and markers."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from collections.abc import Generator
7
+ from collections.abc import Mapping
8
+ import dataclasses
9
+ import os
10
+ import platform
11
+ import sys
12
+ import traceback
13
+ from typing import Optional
14
+
15
+ from _pytest.config import Config
16
+ from _pytest.config import hookimpl
17
+ from _pytest.config.argparsing import Parser
18
+ from _pytest.mark.structures import Mark
19
+ from _pytest.nodes import Item
20
+ from _pytest.outcomes import fail
21
+ from _pytest.outcomes import skip
22
+ from _pytest.outcomes import xfail
23
+ from _pytest.raises import AbstractRaises
24
+ from _pytest.reports import BaseReport
25
+ from _pytest.reports import TestReport
26
+ from _pytest.runner import CallInfo
27
+ from _pytest.stash import StashKey
28
+
29
+
30
+ def pytest_addoption(parser: Parser) -> None:
31
+ group = parser.getgroup("general")
32
+ group.addoption(
33
+ "--runxfail",
34
+ action="store_true",
35
+ dest="runxfail",
36
+ default=False,
37
+ help="Report the results of xfail tests as if they were not marked",
38
+ )
39
+
40
+ parser.addini(
41
+ "xfail_strict",
42
+ "Default for the strict parameter of xfail "
43
+ "markers when not given explicitly (default: False)",
44
+ default=False,
45
+ type="bool",
46
+ )
47
+
48
+
49
+ def pytest_configure(config: Config) -> None:
50
+ if config.option.runxfail:
51
+ # yay a hack
52
+ import pytest
53
+
54
+ old = pytest.xfail
55
+ config.add_cleanup(lambda: setattr(pytest, "xfail", old))
56
+
57
+ def nop(*args, **kwargs):
58
+ pass
59
+
60
+ nop.Exception = xfail.Exception # type: ignore[attr-defined]
61
+ setattr(pytest, "xfail", nop)
62
+
63
+ config.addinivalue_line(
64
+ "markers",
65
+ "skip(reason=None): skip the given test function with an optional reason. "
66
+ 'Example: skip(reason="no way of currently testing this") skips the '
67
+ "test.",
68
+ )
69
+ config.addinivalue_line(
70
+ "markers",
71
+ "skipif(condition, ..., *, reason=...): "
72
+ "skip the given test function if any of the conditions evaluate to True. "
73
+ "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
74
+ "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
75
+ )
76
+ config.addinivalue_line(
77
+ "markers",
78
+ "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
79
+ "mark the test function as an expected failure if any of the conditions "
80
+ "evaluate to True. Optionally specify a reason for better reporting "
81
+ "and run=False if you don't even want to execute the test function. "
82
+ "If only specific exception(s) are expected, you can list them in "
83
+ "raises, and if the test fails in other ways, it will be reported as "
84
+ "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
85
+ )
86
+
87
+
88
+ def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]:
89
+ """Evaluate a single skipif/xfail condition.
90
+
91
+ If an old-style string condition is given, it is eval()'d, otherwise the
92
+ condition is bool()'d. If this fails, an appropriately formatted pytest.fail
93
+ is raised.
94
+
95
+ Returns (result, reason). The reason is only relevant if the result is True.
96
+ """
97
+ # String condition.
98
+ if isinstance(condition, str):
99
+ globals_ = {
100
+ "os": os,
101
+ "sys": sys,
102
+ "platform": platform,
103
+ "config": item.config,
104
+ }
105
+ for dictionary in reversed(
106
+ item.ihook.pytest_markeval_namespace(config=item.config)
107
+ ):
108
+ if not isinstance(dictionary, Mapping):
109
+ raise ValueError(
110
+ f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}"
111
+ )
112
+ globals_.update(dictionary)
113
+ if hasattr(item, "obj"):
114
+ globals_.update(item.obj.__globals__)
115
+ try:
116
+ filename = f"<{mark.name} condition>"
117
+ condition_code = compile(condition, filename, "eval")
118
+ result = eval(condition_code, globals_)
119
+ except SyntaxError as exc:
120
+ msglines = [
121
+ f"Error evaluating {mark.name!r} condition",
122
+ " " + condition,
123
+ " " + " " * (exc.offset or 0) + "^",
124
+ "SyntaxError: invalid syntax",
125
+ ]
126
+ fail("\n".join(msglines), pytrace=False)
127
+ except Exception as exc:
128
+ msglines = [
129
+ f"Error evaluating {mark.name!r} condition",
130
+ " " + condition,
131
+ *traceback.format_exception_only(type(exc), exc),
132
+ ]
133
+ fail("\n".join(msglines), pytrace=False)
134
+
135
+ # Boolean condition.
136
+ else:
137
+ try:
138
+ result = bool(condition)
139
+ except Exception as exc:
140
+ msglines = [
141
+ f"Error evaluating {mark.name!r} condition as a boolean",
142
+ *traceback.format_exception_only(type(exc), exc),
143
+ ]
144
+ fail("\n".join(msglines), pytrace=False)
145
+
146
+ reason = mark.kwargs.get("reason", None)
147
+ if reason is None:
148
+ if isinstance(condition, str):
149
+ reason = "condition: " + condition
150
+ else:
151
+ # XXX better be checked at collection time
152
+ msg = (
153
+ f"Error evaluating {mark.name!r}: "
154
+ + "you need to specify reason=STRING when using booleans as conditions."
155
+ )
156
+ fail(msg, pytrace=False)
157
+
158
+ return result, reason
159
+
160
+
161
+ @dataclasses.dataclass(frozen=True)
162
+ class Skip:
163
+ """The result of evaluate_skip_marks()."""
164
+
165
+ reason: str = "unconditional skip"
166
+
167
+
168
+ def evaluate_skip_marks(item: Item) -> Skip | None:
169
+ """Evaluate skip and skipif marks on item, returning Skip if triggered."""
170
+ for mark in item.iter_markers(name="skipif"):
171
+ if "condition" not in mark.kwargs:
172
+ conditions = mark.args
173
+ else:
174
+ conditions = (mark.kwargs["condition"],)
175
+
176
+ # Unconditional.
177
+ if not conditions:
178
+ reason = mark.kwargs.get("reason", "")
179
+ return Skip(reason)
180
+
181
+ # If any of the conditions are true.
182
+ for condition in conditions:
183
+ result, reason = evaluate_condition(item, mark, condition)
184
+ if result:
185
+ return Skip(reason)
186
+
187
+ for mark in item.iter_markers(name="skip"):
188
+ try:
189
+ return Skip(*mark.args, **mark.kwargs)
190
+ except TypeError as e:
191
+ raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
192
+
193
+ return None
194
+
195
+
196
+ @dataclasses.dataclass(frozen=True)
197
+ class Xfail:
198
+ """The result of evaluate_xfail_marks()."""
199
+
200
+ __slots__ = ("raises", "reason", "run", "strict")
201
+
202
+ reason: str
203
+ run: bool
204
+ strict: bool
205
+ raises: (
206
+ type[BaseException]
207
+ | tuple[type[BaseException], ...]
208
+ | AbstractRaises[BaseException]
209
+ | None
210
+ )
211
+
212
+
213
+ def evaluate_xfail_marks(item: Item) -> Xfail | None:
214
+ """Evaluate xfail marks on item, returning Xfail if triggered."""
215
+ for mark in item.iter_markers(name="xfail"):
216
+ run = mark.kwargs.get("run", True)
217
+ strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
218
+ raises = mark.kwargs.get("raises", None)
219
+ if "condition" not in mark.kwargs:
220
+ conditions = mark.args
221
+ else:
222
+ conditions = (mark.kwargs["condition"],)
223
+
224
+ # Unconditional.
225
+ if not conditions:
226
+ reason = mark.kwargs.get("reason", "")
227
+ return Xfail(reason, run, strict, raises)
228
+
229
+ # If any of the conditions are true.
230
+ for condition in conditions:
231
+ result, reason = evaluate_condition(item, mark, condition)
232
+ if result:
233
+ return Xfail(reason, run, strict, raises)
234
+
235
+ return None
236
+
237
+
238
+ # Saves the xfail mark evaluation. Can be refreshed during call if None.
239
+ xfailed_key = StashKey[Optional[Xfail]]()
240
+
241
+
242
+ @hookimpl(tryfirst=True)
243
+ def pytest_runtest_setup(item: Item) -> None:
244
+ skipped = evaluate_skip_marks(item)
245
+ if skipped:
246
+ raise skip.Exception(skipped.reason, _use_item_location=True)
247
+
248
+ item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
249
+ if xfailed and not item.config.option.runxfail and not xfailed.run:
250
+ xfail("[NOTRUN] " + xfailed.reason)
251
+
252
+
253
+ @hookimpl(wrapper=True)
254
+ def pytest_runtest_call(item: Item) -> Generator[None]:
255
+ xfailed = item.stash.get(xfailed_key, None)
256
+ if xfailed is None:
257
+ item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
258
+
259
+ if xfailed and not item.config.option.runxfail and not xfailed.run:
260
+ xfail("[NOTRUN] " + xfailed.reason)
261
+
262
+ try:
263
+ return (yield)
264
+ finally:
265
+ # The test run may have added an xfail mark dynamically.
266
+ xfailed = item.stash.get(xfailed_key, None)
267
+ if xfailed is None:
268
+ item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
269
+
270
+
271
+ @hookimpl(wrapper=True)
272
+ def pytest_runtest_makereport(
273
+ item: Item, call: CallInfo[None]
274
+ ) -> Generator[None, TestReport, TestReport]:
275
+ rep = yield
276
+ xfailed = item.stash.get(xfailed_key, None)
277
+ if item.config.option.runxfail:
278
+ pass # don't interfere
279
+ elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
280
+ assert call.excinfo.value.msg is not None
281
+ rep.wasxfail = call.excinfo.value.msg
282
+ rep.outcome = "skipped"
283
+ elif not rep.skipped and xfailed:
284
+ if call.excinfo:
285
+ raises = xfailed.raises
286
+ if raises is None or (
287
+ (
288
+ isinstance(raises, (type, tuple))
289
+ and isinstance(call.excinfo.value, raises)
290
+ )
291
+ or (
292
+ isinstance(raises, AbstractRaises)
293
+ and raises.matches(call.excinfo.value)
294
+ )
295
+ ):
296
+ rep.outcome = "skipped"
297
+ rep.wasxfail = xfailed.reason
298
+ else:
299
+ rep.outcome = "failed"
300
+ elif call.when == "call":
301
+ if xfailed.strict:
302
+ rep.outcome = "failed"
303
+ rep.longrepr = "[XPASS(strict)] " + xfailed.reason
304
+ else:
305
+ rep.outcome = "passed"
306
+ rep.wasxfail = xfailed.reason
307
+ return rep
308
+
309
+
310
+ def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None:
311
+ if hasattr(report, "wasxfail"):
312
+ if report.skipped:
313
+ return "xfailed", "x", "XFAIL"
314
+ elif report.passed:
315
+ return "xpassed", "X", "XPASS"
316
+ return None
archive/Axiovorax/.venv/Lib/site-packages/_pytest/stash.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+ from typing import cast
5
+ from typing import Generic
6
+ from typing import TypeVar
7
+
8
+
9
+ __all__ = ["Stash", "StashKey"]
10
+
11
+
12
+ T = TypeVar("T")
13
+ D = TypeVar("D")
14
+
15
+
16
+ class StashKey(Generic[T]):
17
+ """``StashKey`` is an object used as a key to a :class:`Stash`.
18
+
19
+ A ``StashKey`` is associated with the type ``T`` of the value of the key.
20
+
21
+ A ``StashKey`` is unique and cannot conflict with another key.
22
+
23
+ .. versionadded:: 7.0
24
+ """
25
+
26
+ __slots__ = ()
27
+
28
+
29
+ class Stash:
30
+ r"""``Stash`` is a type-safe heterogeneous mutable mapping that
31
+ allows keys and value types to be defined separately from
32
+ where it (the ``Stash``) is created.
33
+
34
+ Usually you will be given an object which has a ``Stash``, for example
35
+ :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`:
36
+
37
+ .. code-block:: python
38
+
39
+ stash: Stash = some_object.stash
40
+
41
+ If a module or plugin wants to store data in this ``Stash``, it creates
42
+ :class:`StashKey`\s for its keys (at the module level):
43
+
44
+ .. code-block:: python
45
+
46
+ # At the top-level of the module
47
+ some_str_key = StashKey[str]()
48
+ some_bool_key = StashKey[bool]()
49
+
50
+ To store information:
51
+
52
+ .. code-block:: python
53
+
54
+ # Value type must match the key.
55
+ stash[some_str_key] = "value"
56
+ stash[some_bool_key] = True
57
+
58
+ To retrieve the information:
59
+
60
+ .. code-block:: python
61
+
62
+ # The static type of some_str is str.
63
+ some_str = stash[some_str_key]
64
+ # The static type of some_bool is bool.
65
+ some_bool = stash[some_bool_key]
66
+
67
+ .. versionadded:: 7.0
68
+ """
69
+
70
+ __slots__ = ("_storage",)
71
+
72
+ def __init__(self) -> None:
73
+ self._storage: dict[StashKey[Any], object] = {}
74
+
75
+ def __setitem__(self, key: StashKey[T], value: T) -> None:
76
+ """Set a value for key."""
77
+ self._storage[key] = value
78
+
79
+ def __getitem__(self, key: StashKey[T]) -> T:
80
+ """Get the value for key.
81
+
82
+ Raises ``KeyError`` if the key wasn't set before.
83
+ """
84
+ return cast(T, self._storage[key])
85
+
86
+ def get(self, key: StashKey[T], default: D) -> T | D:
87
+ """Get the value for key, or return default if the key wasn't set
88
+ before."""
89
+ try:
90
+ return self[key]
91
+ except KeyError:
92
+ return default
93
+
94
+ def setdefault(self, key: StashKey[T], default: T) -> T:
95
+ """Return the value of key if already set, otherwise set the value
96
+ of key to default and return default."""
97
+ try:
98
+ return self[key]
99
+ except KeyError:
100
+ self[key] = default
101
+ return default
102
+
103
+ def __delitem__(self, key: StashKey[T]) -> None:
104
+ """Delete the value for key.
105
+
106
+ Raises ``KeyError`` if the key wasn't set before.
107
+ """
108
+ del self._storage[key]
109
+
110
+ def __contains__(self, key: StashKey[T]) -> bool:
111
+ """Return whether key was set."""
112
+ return key in self._storage
113
+
114
+ def __len__(self) -> int:
115
+ """Return how many items exist in the stash."""
116
+ return len(self._storage)
archive/Axiovorax/.venv/Lib/site-packages/_pytest/stepwise.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ from datetime import datetime
5
+ from datetime import timedelta
6
+ from typing import Any
7
+ from typing import TYPE_CHECKING
8
+
9
+ from _pytest import nodes
10
+ from _pytest.cacheprovider import Cache
11
+ from _pytest.config import Config
12
+ from _pytest.config.argparsing import Parser
13
+ from _pytest.main import Session
14
+ from _pytest.reports import TestReport
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from typing_extensions import Self
19
+
20
+ STEPWISE_CACHE_DIR = "cache/stepwise"
21
+
22
+
23
+ def pytest_addoption(parser: Parser) -> None:
24
+ group = parser.getgroup("general")
25
+ group.addoption(
26
+ "--sw",
27
+ "--stepwise",
28
+ action="store_true",
29
+ default=False,
30
+ dest="stepwise",
31
+ help="Exit on test failure and continue from last failing test next time",
32
+ )
33
+ group.addoption(
34
+ "--sw-skip",
35
+ "--stepwise-skip",
36
+ action="store_true",
37
+ default=False,
38
+ dest="stepwise_skip",
39
+ help="Ignore the first failing test but stop on the next failing test. "
40
+ "Implicitly enables --stepwise.",
41
+ )
42
+ group.addoption(
43
+ "--sw-reset",
44
+ "--stepwise-reset",
45
+ action="store_true",
46
+ default=False,
47
+ dest="stepwise_reset",
48
+ help="Resets stepwise state, restarting the stepwise workflow. "
49
+ "Implicitly enables --stepwise.",
50
+ )
51
+
52
+
53
+ def pytest_configure(config: Config) -> None:
54
+ # --stepwise-skip/--stepwise-reset implies stepwise.
55
+ if config.option.stepwise_skip or config.option.stepwise_reset:
56
+ config.option.stepwise = True
57
+ if config.getoption("stepwise"):
58
+ config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
59
+
60
+
61
+ def pytest_sessionfinish(session: Session) -> None:
62
+ if not session.config.getoption("stepwise"):
63
+ assert session.config.cache is not None
64
+ if hasattr(session.config, "workerinput"):
65
+ # Do not update cache if this process is a xdist worker to prevent
66
+ # race conditions (#10641).
67
+ return
68
+
69
+
70
+ @dataclasses.dataclass
71
+ class StepwiseCacheInfo:
72
+ # The nodeid of the last failed test.
73
+ last_failed: str | None
74
+
75
+ # The number of tests in the last time --stepwise was run.
76
+ # We use this information as a simple way to invalidate the cache information, avoiding
77
+ # confusing behavior in case the cache is stale.
78
+ last_test_count: int | None
79
+
80
+ # The date when the cache was last updated, for information purposes only.
81
+ last_cache_date_str: str
82
+
83
+ @property
84
+ def last_cache_date(self) -> datetime:
85
+ return datetime.fromisoformat(self.last_cache_date_str)
86
+
87
+ @classmethod
88
+ def empty(cls) -> Self:
89
+ return cls(
90
+ last_failed=None,
91
+ last_test_count=None,
92
+ last_cache_date_str=datetime.now().isoformat(),
93
+ )
94
+
95
+ def update_date_to_now(self) -> None:
96
+ self.last_cache_date_str = datetime.now().isoformat()
97
+
98
+
99
+ class StepwisePlugin:
100
+ def __init__(self, config: Config) -> None:
101
+ self.config = config
102
+ self.session: Session | None = None
103
+ self.report_status: list[str] = []
104
+ assert config.cache is not None
105
+ self.cache: Cache = config.cache
106
+ self.skip: bool = config.getoption("stepwise_skip")
107
+ self.reset: bool = config.getoption("stepwise_reset")
108
+ self.cached_info = self._load_cached_info()
109
+
110
+ def _load_cached_info(self) -> StepwiseCacheInfo:
111
+ cached_dict: dict[str, Any] | None = self.cache.get(STEPWISE_CACHE_DIR, None)
112
+ if cached_dict:
113
+ try:
114
+ return StepwiseCacheInfo(
115
+ cached_dict["last_failed"],
116
+ cached_dict["last_test_count"],
117
+ cached_dict["last_cache_date_str"],
118
+ )
119
+ except (KeyError, TypeError) as e:
120
+ error = f"{type(e).__name__}: {e}"
121
+ self.report_status.append(f"error reading cache, discarding ({error})")
122
+
123
+ # Cache not found or error during load, return a new cache.
124
+ return StepwiseCacheInfo.empty()
125
+
126
+ def pytest_sessionstart(self, session: Session) -> None:
127
+ self.session = session
128
+
129
+ def pytest_collection_modifyitems(
130
+ self, config: Config, items: list[nodes.Item]
131
+ ) -> None:
132
+ last_test_count = self.cached_info.last_test_count
133
+ self.cached_info.last_test_count = len(items)
134
+
135
+ if self.reset:
136
+ self.report_status.append("resetting state, not skipping.")
137
+ self.cached_info.last_failed = None
138
+ return
139
+
140
+ if not self.cached_info.last_failed:
141
+ self.report_status.append("no previously failed tests, not skipping.")
142
+ return
143
+
144
+ if last_test_count is not None and last_test_count != len(items):
145
+ self.report_status.append(
146
+ f"test count changed, not skipping (now {len(items)} tests, previously {last_test_count})."
147
+ )
148
+ self.cached_info.last_failed = None
149
+ return
150
+
151
+ # Check all item nodes until we find a match on last failed.
152
+ failed_index = None
153
+ for index, item in enumerate(items):
154
+ if item.nodeid == self.cached_info.last_failed:
155
+ failed_index = index
156
+ break
157
+
158
+ # If the previously failed test was not found among the test items,
159
+ # do not skip any tests.
160
+ if failed_index is None:
161
+ self.report_status.append("previously failed test not found, not skipping.")
162
+ else:
163
+ cache_age = datetime.now() - self.cached_info.last_cache_date
164
+ # Round up to avoid showing microseconds.
165
+ cache_age = timedelta(seconds=int(cache_age.total_seconds()))
166
+ self.report_status.append(
167
+ f"skipping {failed_index} already passed items (cache from {cache_age} ago,"
168
+ f" use --sw-reset to discard)."
169
+ )
170
+ deselected = items[:failed_index]
171
+ del items[:failed_index]
172
+ config.hook.pytest_deselected(items=deselected)
173
+
174
+ def pytest_runtest_logreport(self, report: TestReport) -> None:
175
+ if report.failed:
176
+ if self.skip:
177
+ # Remove test from the failed ones (if it exists) and unset the skip option
178
+ # to make sure the following tests will not be skipped.
179
+ if report.nodeid == self.cached_info.last_failed:
180
+ self.cached_info.last_failed = None
181
+
182
+ self.skip = False
183
+ else:
184
+ # Mark test as the last failing and interrupt the test session.
185
+ self.cached_info.last_failed = report.nodeid
186
+ assert self.session is not None
187
+ self.session.shouldstop = (
188
+ "Test failed, continuing from this test next run."
189
+ )
190
+
191
+ else:
192
+ # If the test was actually run and did pass.
193
+ if report.when == "call":
194
+ # Remove test from the failed ones, if exists.
195
+ if report.nodeid == self.cached_info.last_failed:
196
+ self.cached_info.last_failed = None
197
+
198
+ def pytest_report_collectionfinish(self) -> list[str] | None:
199
+ if self.config.get_verbosity() >= 0 and self.report_status:
200
+ return [f"stepwise: {x}" for x in self.report_status]
201
+ return None
202
+
203
+ def pytest_sessionfinish(self) -> None:
204
+ if hasattr(self.config, "workerinput"):
205
+ # Do not update cache if this process is a xdist worker to prevent
206
+ # race conditions (#10641).
207
+ return
208
+ self.cached_info.update_date_to_now()
209
+ self.cache.set(STEPWISE_CACHE_DIR, dataclasses.asdict(self.cached_info))
archive/Axiovorax/.venv/Lib/site-packages/_pytest/terminal.py ADDED
@@ -0,0 +1,1641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Terminal reporting of the full testing process.
3
+
4
+ This is a good source for looking at the various reporting hooks.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import argparse
10
+ from collections import Counter
11
+ from collections.abc import Callable
12
+ from collections.abc import Generator
13
+ from collections.abc import Mapping
14
+ from collections.abc import Sequence
15
+ import dataclasses
16
+ import datetime
17
+ from functools import partial
18
+ import inspect
19
+ from pathlib import Path
20
+ import platform
21
+ import sys
22
+ import textwrap
23
+ from typing import Any
24
+ from typing import ClassVar
25
+ from typing import final
26
+ from typing import Literal
27
+ from typing import NamedTuple
28
+ from typing import TextIO
29
+ from typing import TYPE_CHECKING
30
+ import warnings
31
+
32
+ import pluggy
33
+
34
+ from _pytest import compat
35
+ from _pytest import nodes
36
+ from _pytest import timing
37
+ from _pytest._code import ExceptionInfo
38
+ from _pytest._code.code import ExceptionRepr
39
+ from _pytest._io import TerminalWriter
40
+ from _pytest._io.wcwidth import wcswidth
41
+ import _pytest._version
42
+ from _pytest.assertion.util import running_on_ci
43
+ from _pytest.config import _PluggyPlugin
44
+ from _pytest.config import Config
45
+ from _pytest.config import ExitCode
46
+ from _pytest.config import hookimpl
47
+ from _pytest.config.argparsing import Parser
48
+ from _pytest.nodes import Item
49
+ from _pytest.nodes import Node
50
+ from _pytest.pathlib import absolutepath
51
+ from _pytest.pathlib import bestrelpath
52
+ from _pytest.reports import BaseReport
53
+ from _pytest.reports import CollectReport
54
+ from _pytest.reports import TestReport
55
+
56
+
57
+ if TYPE_CHECKING:
58
+ from _pytest.main import Session
59
+
60
+
61
+ REPORT_COLLECTING_RESOLUTION = 0.5
62
+
63
+ KNOWN_TYPES = (
64
+ "failed",
65
+ "passed",
66
+ "skipped",
67
+ "deselected",
68
+ "xfailed",
69
+ "xpassed",
70
+ "warnings",
71
+ "error",
72
+ )
73
+
74
+ _REPORTCHARS_DEFAULT = "fE"
75
+
76
+
77
+ class MoreQuietAction(argparse.Action):
78
+ """A modified copy of the argparse count action which counts down and updates
79
+ the legacy quiet attribute at the same time.
80
+
81
+ Used to unify verbosity handling.
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ option_strings: Sequence[str],
87
+ dest: str,
88
+ default: object = None,
89
+ required: bool = False,
90
+ help: str | None = None,
91
+ ) -> None:
92
+ super().__init__(
93
+ option_strings=option_strings,
94
+ dest=dest,
95
+ nargs=0,
96
+ default=default,
97
+ required=required,
98
+ help=help,
99
+ )
100
+
101
+ def __call__(
102
+ self,
103
+ parser: argparse.ArgumentParser,
104
+ namespace: argparse.Namespace,
105
+ values: str | Sequence[object] | None,
106
+ option_string: str | None = None,
107
+ ) -> None:
108
+ new_count = getattr(namespace, self.dest, 0) - 1
109
+ setattr(namespace, self.dest, new_count)
110
+ # todo Deprecate config.quiet
111
+ namespace.quiet = getattr(namespace, "quiet", 0) + 1
112
+
113
+
114
+ class TestShortLogReport(NamedTuple):
115
+ """Used to store the test status result category, shortletter and verbose word.
116
+ For example ``"rerun", "R", ("RERUN", {"yellow": True})``.
117
+
118
+ :ivar category:
119
+ The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string.
120
+
121
+ :ivar letter:
122
+ The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string.
123
+
124
+ :ivar word:
125
+ Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``,
126
+ ``"ERROR"``, or the empty string.
127
+ """
128
+
129
+ category: str
130
+ letter: str
131
+ word: str | tuple[str, Mapping[str, bool]]
132
+
133
+
134
+ def pytest_addoption(parser: Parser) -> None:
135
+ group = parser.getgroup("terminal reporting", "Reporting", after="general")
136
+ group._addoption( # private to use reserved lower-case short option
137
+ "-v",
138
+ "--verbose",
139
+ action="count",
140
+ default=0,
141
+ dest="verbose",
142
+ help="Increase verbosity",
143
+ )
144
+ group.addoption(
145
+ "--no-header",
146
+ action="store_true",
147
+ default=False,
148
+ dest="no_header",
149
+ help="Disable header",
150
+ )
151
+ group.addoption(
152
+ "--no-summary",
153
+ action="store_true",
154
+ default=False,
155
+ dest="no_summary",
156
+ help="Disable summary",
157
+ )
158
+ group.addoption(
159
+ "--no-fold-skipped",
160
+ action="store_false",
161
+ dest="fold_skipped",
162
+ default=True,
163
+ help="Do not fold skipped tests in short summary.",
164
+ )
165
+ group.addoption(
166
+ "--force-short-summary",
167
+ action="store_true",
168
+ dest="force_short_summary",
169
+ default=False,
170
+ help="Force condensed summary output regardless of verbosity level.",
171
+ )
172
+ group._addoption( # private to use reserved lower-case short option
173
+ "-q",
174
+ "--quiet",
175
+ action=MoreQuietAction,
176
+ default=0,
177
+ dest="verbose",
178
+ help="Decrease verbosity",
179
+ )
180
+ group.addoption(
181
+ "--verbosity",
182
+ dest="verbose",
183
+ type=int,
184
+ default=0,
185
+ help="Set verbosity. Default: 0.",
186
+ )
187
+ group._addoption( # private to use reserved lower-case short option
188
+ "-r",
189
+ action="store",
190
+ dest="reportchars",
191
+ default=_REPORTCHARS_DEFAULT,
192
+ metavar="chars",
193
+ help="Show extra test summary info as specified by chars: (f)ailed, "
194
+ "(E)rror, (s)kipped, (x)failed, (X)passed, "
195
+ "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
196
+ "(w)arnings are enabled by default (see --disable-warnings), "
197
+ "'N' can be used to reset the list. (default: 'fE').",
198
+ )
199
+ group.addoption(
200
+ "--disable-warnings",
201
+ "--disable-pytest-warnings",
202
+ default=False,
203
+ dest="disable_warnings",
204
+ action="store_true",
205
+ help="Disable warnings summary",
206
+ )
207
+ group._addoption( # private to use reserved lower-case short option
208
+ "-l",
209
+ "--showlocals",
210
+ action="store_true",
211
+ dest="showlocals",
212
+ default=False,
213
+ help="Show locals in tracebacks (disabled by default)",
214
+ )
215
+ group.addoption(
216
+ "--no-showlocals",
217
+ action="store_false",
218
+ dest="showlocals",
219
+ help="Hide locals in tracebacks (negate --showlocals passed through addopts)",
220
+ )
221
+ group.addoption(
222
+ "--tb",
223
+ metavar="style",
224
+ action="store",
225
+ dest="tbstyle",
226
+ default="auto",
227
+ choices=["auto", "long", "short", "no", "line", "native"],
228
+ help="Traceback print mode (auto/long/short/line/native/no)",
229
+ )
230
+ group.addoption(
231
+ "--xfail-tb",
232
+ action="store_true",
233
+ dest="xfail_tb",
234
+ default=False,
235
+ help="Show tracebacks for xfail (as long as --tb != no)",
236
+ )
237
+ group.addoption(
238
+ "--show-capture",
239
+ action="store",
240
+ dest="showcapture",
241
+ choices=["no", "stdout", "stderr", "log", "all"],
242
+ default="all",
243
+ help="Controls how captured stdout/stderr/log is shown on failed tests. "
244
+ "Default: all.",
245
+ )
246
+ group.addoption(
247
+ "--fulltrace",
248
+ "--full-trace",
249
+ action="store_true",
250
+ default=False,
251
+ help="Don't cut any tracebacks (default is to cut)",
252
+ )
253
+ group.addoption(
254
+ "--color",
255
+ metavar="color",
256
+ action="store",
257
+ dest="color",
258
+ default="auto",
259
+ choices=["yes", "no", "auto"],
260
+ help="Color terminal output (yes/no/auto)",
261
+ )
262
+ group.addoption(
263
+ "--code-highlight",
264
+ default="yes",
265
+ choices=["yes", "no"],
266
+ help="Whether code should be highlighted (only if --color is also enabled). "
267
+ "Default: yes.",
268
+ )
269
+
270
+ parser.addini(
271
+ "console_output_style",
272
+ help='Console output: "classic", or with additional progress information '
273
+ '("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces '
274
+ "progress even when capture=no)",
275
+ default="progress",
276
+ )
277
+ Config._add_verbosity_ini(
278
+ parser,
279
+ Config.VERBOSITY_TEST_CASES,
280
+ help=(
281
+ "Specify a verbosity level for test case execution, overriding the main level. "
282
+ "Higher levels will provide more detailed information about each test case executed."
283
+ ),
284
+ )
285
+
286
+
287
+ def pytest_configure(config: Config) -> None:
288
+ reporter = TerminalReporter(config, sys.stdout)
289
+ config.pluginmanager.register(reporter, "terminalreporter")
290
+ if config.option.debug or config.option.traceconfig:
291
+
292
+ def mywriter(tags, args):
293
+ msg = " ".join(map(str, args))
294
+ reporter.write_line("[traceconfig] " + msg)
295
+
296
+ config.trace.root.setprocessor("pytest:config", mywriter)
297
+
298
+
299
+ def getreportopt(config: Config) -> str:
300
+ reportchars: str = config.option.reportchars
301
+
302
+ old_aliases = {"F", "S"}
303
+ reportopts = ""
304
+ for char in reportchars:
305
+ if char in old_aliases:
306
+ char = char.lower()
307
+ if char == "a":
308
+ reportopts = "sxXEf"
309
+ elif char == "A":
310
+ reportopts = "PpsxXEf"
311
+ elif char == "N":
312
+ reportopts = ""
313
+ elif char not in reportopts:
314
+ reportopts += char
315
+
316
+ if not config.option.disable_warnings and "w" not in reportopts:
317
+ reportopts = "w" + reportopts
318
+ elif config.option.disable_warnings and "w" in reportopts:
319
+ reportopts = reportopts.replace("w", "")
320
+
321
+ return reportopts
322
+
323
+
324
+ @hookimpl(trylast=True) # after _pytest.runner
325
+ def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str]:
326
+ letter = "F"
327
+ if report.passed:
328
+ letter = "."
329
+ elif report.skipped:
330
+ letter = "s"
331
+
332
+ outcome: str = report.outcome
333
+ if report.when in ("collect", "setup", "teardown") and outcome == "failed":
334
+ outcome = "error"
335
+ letter = "E"
336
+
337
+ return outcome, letter, outcome.upper()
338
+
339
+
340
+ @dataclasses.dataclass
341
+ class WarningReport:
342
+ """Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
343
+
344
+ :ivar str message:
345
+ User friendly message about the warning.
346
+ :ivar str|None nodeid:
347
+ nodeid that generated the warning (see ``get_location``).
348
+ :ivar tuple fslocation:
349
+ File system location of the source of the warning (see ``get_location``).
350
+ """
351
+
352
+ message: str
353
+ nodeid: str | None = None
354
+ fslocation: tuple[str, int] | None = None
355
+
356
+ count_towards_summary: ClassVar = True
357
+
358
+ def get_location(self, config: Config) -> str | None:
359
+ """Return the more user-friendly information about the location of a warning, or None."""
360
+ if self.nodeid:
361
+ return self.nodeid
362
+ if self.fslocation:
363
+ filename, linenum = self.fslocation
364
+ relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename))
365
+ return f"{relpath}:{linenum}"
366
+ return None
367
+
368
+
369
+ @final
370
+ class TerminalReporter:
371
+ def __init__(self, config: Config, file: TextIO | None = None) -> None:
372
+ import _pytest.config
373
+
374
+ self.config = config
375
+ self._numcollected = 0
376
+ self._session: Session | None = None
377
+ self._showfspath: bool | None = None
378
+
379
+ self.stats: dict[str, list[Any]] = {}
380
+ self._main_color: str | None = None
381
+ self._known_types: list[str] | None = None
382
+ self.startpath = config.invocation_params.dir
383
+ if file is None:
384
+ file = sys.stdout
385
+ self._tw = _pytest.config.create_terminal_writer(config, file)
386
+ self._screen_width = self._tw.fullwidth
387
+ self.currentfspath: None | Path | str | int = None
388
+ self.reportchars = getreportopt(config)
389
+ self.foldskipped = config.option.fold_skipped
390
+ self.hasmarkup = self._tw.hasmarkup
391
+ # isatty should be a method but was wrongly implemented as a boolean.
392
+ # We use CallableBool here to support both.
393
+ self.isatty = compat.CallableBool(file.isatty())
394
+ self._progress_nodeids_reported: set[str] = set()
395
+ self._timing_nodeids_reported: set[str] = set()
396
+ self._show_progress_info = self._determine_show_progress_info()
397
+ self._collect_report_last_write = timing.Instant()
398
+ self._already_displayed_warnings: int | None = None
399
+ self._keyboardinterrupt_memo: ExceptionRepr | None = None
400
+
401
+ def _determine_show_progress_info(
402
+ self,
403
+ ) -> Literal["progress", "count", "times", False]:
404
+ """Return whether we should display progress information based on the current config."""
405
+ # do not show progress if we are not capturing output (#3038) unless explicitly
406
+ # overridden by progress-even-when-capture-no
407
+ if (
408
+ self.config.getoption("capture", "no") == "no"
409
+ and self.config.getini("console_output_style")
410
+ != "progress-even-when-capture-no"
411
+ ):
412
+ return False
413
+ # do not show progress if we are showing fixture setup/teardown
414
+ if self.config.getoption("setupshow", False):
415
+ return False
416
+ cfg: str = self.config.getini("console_output_style")
417
+ if cfg in {"progress", "progress-even-when-capture-no"}:
418
+ return "progress"
419
+ elif cfg == "count":
420
+ return "count"
421
+ elif cfg == "times":
422
+ return "times"
423
+ else:
424
+ return False
425
+
426
+ @property
427
+ def verbosity(self) -> int:
428
+ verbosity: int = self.config.option.verbose
429
+ return verbosity
430
+
431
+ @property
432
+ def showheader(self) -> bool:
433
+ return self.verbosity >= 0
434
+
435
+ @property
436
+ def no_header(self) -> bool:
437
+ return bool(self.config.option.no_header)
438
+
439
+ @property
440
+ def no_summary(self) -> bool:
441
+ return bool(self.config.option.no_summary)
442
+
443
+ @property
444
+ def showfspath(self) -> bool:
445
+ if self._showfspath is None:
446
+ return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0
447
+ return self._showfspath
448
+
449
+ @showfspath.setter
450
+ def showfspath(self, value: bool | None) -> None:
451
+ self._showfspath = value
452
+
453
+ @property
454
+ def showlongtestinfo(self) -> bool:
455
+ return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0
456
+
457
+ def hasopt(self, char: str) -> bool:
458
+ char = {"xfailed": "x", "skipped": "s"}.get(char, char)
459
+ return char in self.reportchars
460
+
461
+ def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None:
462
+ fspath = self.config.rootpath / nodeid.split("::")[0]
463
+ if self.currentfspath is None or fspath != self.currentfspath:
464
+ if self.currentfspath is not None and self._show_progress_info:
465
+ self._write_progress_information_filling_space()
466
+ self.currentfspath = fspath
467
+ relfspath = bestrelpath(self.startpath, fspath)
468
+ self._tw.line()
469
+ self._tw.write(relfspath + " ")
470
+ self._tw.write(res, flush=True, **markup)
471
+
472
+ def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
473
+ if self.currentfspath != prefix:
474
+ self._tw.line()
475
+ self.currentfspath = prefix
476
+ self._tw.write(prefix)
477
+ if extra:
478
+ self._tw.write(extra, **kwargs)
479
+ self.currentfspath = -2
480
+
481
+ def ensure_newline(self) -> None:
482
+ if self.currentfspath:
483
+ self._tw.line()
484
+ self.currentfspath = None
485
+
486
+ def wrap_write(
487
+ self,
488
+ content: str,
489
+ *,
490
+ flush: bool = False,
491
+ margin: int = 8,
492
+ line_sep: str = "\n",
493
+ **markup: bool,
494
+ ) -> None:
495
+ """Wrap message with margin for progress info."""
496
+ width_of_current_line = self._tw.width_of_current_line
497
+ wrapped = line_sep.join(
498
+ textwrap.wrap(
499
+ " " * width_of_current_line + content,
500
+ width=self._screen_width - margin,
501
+ drop_whitespace=True,
502
+ replace_whitespace=False,
503
+ ),
504
+ )
505
+ wrapped = wrapped[width_of_current_line:]
506
+ self._tw.write(wrapped, flush=flush, **markup)
507
+
508
+ def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
509
+ self._tw.write(content, flush=flush, **markup)
510
+
511
+ def flush(self) -> None:
512
+ self._tw.flush()
513
+
514
+ def write_line(self, line: str | bytes, **markup: bool) -> None:
515
+ if not isinstance(line, str):
516
+ line = str(line, errors="replace")
517
+ self.ensure_newline()
518
+ self._tw.line(line, **markup)
519
+
520
+ def rewrite(self, line: str, **markup: bool) -> None:
521
+ """Rewinds the terminal cursor to the beginning and writes the given line.
522
+
523
+ :param erase:
524
+ If True, will also add spaces until the full terminal width to ensure
525
+ previous lines are properly erased.
526
+
527
+ The rest of the keyword arguments are markup instructions.
528
+ """
529
+ erase = markup.pop("erase", False)
530
+ if erase:
531
+ fill_count = self._tw.fullwidth - len(line) - 1
532
+ fill = " " * fill_count
533
+ else:
534
+ fill = ""
535
+ line = str(line)
536
+ self._tw.write("\r" + line + fill, **markup)
537
+
538
+ def write_sep(
539
+ self,
540
+ sep: str,
541
+ title: str | None = None,
542
+ fullwidth: int | None = None,
543
+ **markup: bool,
544
+ ) -> None:
545
+ self.ensure_newline()
546
+ self._tw.sep(sep, title, fullwidth, **markup)
547
+
548
+ def section(self, title: str, sep: str = "=", **kw: bool) -> None:
549
+ self._tw.sep(sep, title, **kw)
550
+
551
+ def line(self, msg: str, **kw: bool) -> None:
552
+ self._tw.line(msg, **kw)
553
+
554
+ def _add_stats(self, category: str, items: Sequence[Any]) -> None:
555
+ set_main_color = category not in self.stats
556
+ self.stats.setdefault(category, []).extend(items)
557
+ if set_main_color:
558
+ self._set_main_color()
559
+
560
+ def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
561
+ for line in str(excrepr).split("\n"):
562
+ self.write_line("INTERNALERROR> " + line)
563
+ return True
564
+
565
+ def pytest_warning_recorded(
566
+ self,
567
+ warning_message: warnings.WarningMessage,
568
+ nodeid: str,
569
+ ) -> None:
570
+ from _pytest.warnings import warning_record_to_str
571
+
572
+ fslocation = warning_message.filename, warning_message.lineno
573
+ message = warning_record_to_str(warning_message)
574
+
575
+ warning_report = WarningReport(
576
+ fslocation=fslocation, message=message, nodeid=nodeid
577
+ )
578
+ self._add_stats("warnings", [warning_report])
579
+
580
+ def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
581
+ if self.config.option.traceconfig:
582
+ msg = f"PLUGIN registered: {plugin}"
583
+ # XXX This event may happen during setup/teardown time
584
+ # which unfortunately captures our output here
585
+ # which garbles our output if we use self.write_line.
586
+ self.write_line(msg)
587
+
588
+ def pytest_deselected(self, items: Sequence[Item]) -> None:
589
+ self._add_stats("deselected", items)
590
+
591
+ def pytest_runtest_logstart(
592
+ self, nodeid: str, location: tuple[str, int | None, str]
593
+ ) -> None:
594
+ fspath, lineno, domain = location
595
+ # Ensure that the path is printed before the
596
+ # 1st test of a module starts running.
597
+ if self.showlongtestinfo:
598
+ line = self._locationline(nodeid, fspath, lineno, domain)
599
+ self.write_ensure_prefix(line, "")
600
+ self.flush()
601
+ elif self.showfspath:
602
+ self.write_fspath_result(nodeid, "")
603
+ self.flush()
604
+
605
+ def pytest_runtest_logreport(self, report: TestReport) -> None:
606
+ self._tests_ran = True
607
+ rep = report
608
+
609
+ res = TestShortLogReport(
610
+ *self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
611
+ )
612
+ category, letter, word = res.category, res.letter, res.word
613
+ if not isinstance(word, tuple):
614
+ markup = None
615
+ else:
616
+ word, markup = word
617
+ self._add_stats(category, [rep])
618
+ if not letter and not word:
619
+ # Probably passed setup/teardown.
620
+ return
621
+ if markup is None:
622
+ was_xfail = hasattr(report, "wasxfail")
623
+ if rep.passed and not was_xfail:
624
+ markup = {"green": True}
625
+ elif rep.passed and was_xfail:
626
+ markup = {"yellow": True}
627
+ elif rep.failed:
628
+ markup = {"red": True}
629
+ elif rep.skipped:
630
+ markup = {"yellow": True}
631
+ else:
632
+ markup = {}
633
+ self._progress_nodeids_reported.add(rep.nodeid)
634
+ if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0:
635
+ self._tw.write(letter, **markup)
636
+ # When running in xdist, the logreport and logfinish of multiple
637
+ # items are interspersed, e.g. `logreport`, `logreport`,
638
+ # `logfinish`, `logfinish`. To avoid the "past edge" calculation
639
+ # from getting confused and overflowing (#7166), do the past edge
640
+ # printing here and not in logfinish, except for the 100% which
641
+ # should only be printed after all teardowns are finished.
642
+ if self._show_progress_info and not self._is_last_item:
643
+ self._write_progress_information_if_past_edge()
644
+ else:
645
+ line = self._locationline(rep.nodeid, *rep.location)
646
+ running_xdist = hasattr(rep, "node")
647
+ if not running_xdist:
648
+ self.write_ensure_prefix(line, word, **markup)
649
+ if rep.skipped or hasattr(report, "wasxfail"):
650
+ reason = _get_raw_skip_reason(rep)
651
+ if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2:
652
+ available_width = (
653
+ (self._tw.fullwidth - self._tw.width_of_current_line)
654
+ - len(" [100%]")
655
+ - 1
656
+ )
657
+ formatted_reason = _format_trimmed(
658
+ " ({})", reason, available_width
659
+ )
660
+ else:
661
+ formatted_reason = f" ({reason})"
662
+
663
+ if reason and formatted_reason is not None:
664
+ self.wrap_write(formatted_reason)
665
+ if self._show_progress_info:
666
+ self._write_progress_information_filling_space()
667
+ else:
668
+ self.ensure_newline()
669
+ self._tw.write(f"[{rep.node.gateway.id}]")
670
+ if self._show_progress_info:
671
+ self._tw.write(
672
+ self._get_progress_information_message() + " ", cyan=True
673
+ )
674
+ else:
675
+ self._tw.write(" ")
676
+ self._tw.write(word, **markup)
677
+ self._tw.write(" " + line)
678
+ self.currentfspath = -2
679
+ self.flush()
680
+
681
+ @property
682
+ def _is_last_item(self) -> bool:
683
+ assert self._session is not None
684
+ return len(self._progress_nodeids_reported) == self._session.testscollected
685
+
686
+ @hookimpl(wrapper=True)
687
+ def pytest_runtestloop(self) -> Generator[None, object, object]:
688
+ result = yield
689
+
690
+ # Write the final/100% progress -- deferred until the loop is complete.
691
+ if (
692
+ self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0
693
+ and self._show_progress_info
694
+ and self._progress_nodeids_reported
695
+ ):
696
+ self._write_progress_information_filling_space()
697
+
698
+ return result
699
+
700
+ def _get_progress_information_message(self) -> str:
701
+ assert self._session
702
+ collected = self._session.testscollected
703
+ if self._show_progress_info == "count":
704
+ if collected:
705
+ progress = len(self._progress_nodeids_reported)
706
+ counter_format = f"{{:{len(str(collected))}d}}"
707
+ format_string = f" [{counter_format}/{{}}]"
708
+ return format_string.format(progress, collected)
709
+ return f" [ {collected} / {collected} ]"
710
+ if self._show_progress_info == "times":
711
+ if not collected:
712
+ return ""
713
+ all_reports = (
714
+ self._get_reports_to_display("passed")
715
+ + self._get_reports_to_display("xpassed")
716
+ + self._get_reports_to_display("failed")
717
+ + self._get_reports_to_display("xfailed")
718
+ + self._get_reports_to_display("skipped")
719
+ + self._get_reports_to_display("error")
720
+ + self._get_reports_to_display("")
721
+ )
722
+ current_location = all_reports[-1].location[0]
723
+ not_reported = [
724
+ r for r in all_reports if r.nodeid not in self._timing_nodeids_reported
725
+ ]
726
+ tests_in_module = sum(
727
+ i.location[0] == current_location for i in self._session.items
728
+ )
729
+ tests_completed = sum(
730
+ r.when == "setup"
731
+ for r in not_reported
732
+ if r.location[0] == current_location
733
+ )
734
+ last_in_module = tests_completed == tests_in_module
735
+ if self.showlongtestinfo or last_in_module:
736
+ self._timing_nodeids_reported.update(r.nodeid for r in not_reported)
737
+ return format_node_duration(sum(r.duration for r in not_reported))
738
+ return ""
739
+ if collected:
740
+ return f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]"
741
+ return " [100%]"
742
+
743
+ def _write_progress_information_if_past_edge(self) -> None:
744
+ w = self._width_of_current_line
745
+ if self._show_progress_info == "count":
746
+ assert self._session
747
+ num_tests = self._session.testscollected
748
+ progress_length = len(f" [{num_tests}/{num_tests}]")
749
+ elif self._show_progress_info == "times":
750
+ progress_length = len(" 99h 59m")
751
+ else:
752
+ progress_length = len(" [100%]")
753
+ past_edge = w + progress_length + 1 >= self._screen_width
754
+ if past_edge:
755
+ main_color, _ = self._get_main_color()
756
+ msg = self._get_progress_information_message()
757
+ self._tw.write(msg + "\n", **{main_color: True})
758
+
759
+ def _write_progress_information_filling_space(self) -> None:
760
+ color, _ = self._get_main_color()
761
+ msg = self._get_progress_information_message()
762
+ w = self._width_of_current_line
763
+ fill = self._tw.fullwidth - w - 1
764
+ self.write(msg.rjust(fill), flush=True, **{color: True})
765
+
766
+ @property
767
+ def _width_of_current_line(self) -> int:
768
+ """Return the width of the current line."""
769
+ return self._tw.width_of_current_line
770
+
771
+ def pytest_collection(self) -> None:
772
+ if self.isatty():
773
+ if self.config.option.verbose >= 0:
774
+ self.write("collecting ... ", flush=True, bold=True)
775
+ elif self.config.option.verbose >= 1:
776
+ self.write("collecting ... ", flush=True, bold=True)
777
+
778
+ def pytest_collectreport(self, report: CollectReport) -> None:
779
+ if report.failed:
780
+ self._add_stats("error", [report])
781
+ elif report.skipped:
782
+ self._add_stats("skipped", [report])
783
+ items = [x for x in report.result if isinstance(x, Item)]
784
+ self._numcollected += len(items)
785
+ if self.isatty():
786
+ self.report_collect()
787
+
788
+ def report_collect(self, final: bool = False) -> None:
789
+ if self.config.option.verbose < 0:
790
+ return
791
+
792
+ if not final:
793
+ # Only write the "collecting" report every `REPORT_COLLECTING_RESOLUTION`.
794
+ if (
795
+ self._collect_report_last_write.elapsed().seconds
796
+ < REPORT_COLLECTING_RESOLUTION
797
+ ):
798
+ return
799
+ self._collect_report_last_write = timing.Instant()
800
+
801
+ errors = len(self.stats.get("error", []))
802
+ skipped = len(self.stats.get("skipped", []))
803
+ deselected = len(self.stats.get("deselected", []))
804
+ selected = self._numcollected - deselected
805
+ line = "collected " if final else "collecting "
806
+ line += (
807
+ str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
808
+ )
809
+ if errors:
810
+ line += f" / {errors} error{'s' if errors != 1 else ''}"
811
+ if deselected:
812
+ line += f" / {deselected} deselected"
813
+ if skipped:
814
+ line += f" / {skipped} skipped"
815
+ if self._numcollected > selected:
816
+ line += f" / {selected} selected"
817
+ if self.isatty():
818
+ self.rewrite(line, bold=True, erase=True)
819
+ if final:
820
+ self.write("\n")
821
+ else:
822
+ self.write_line(line)
823
+
824
+ @hookimpl(trylast=True)
825
+ def pytest_sessionstart(self, session: Session) -> None:
826
+ self._session = session
827
+ self._session_start = timing.Instant()
828
+ if not self.showheader:
829
+ return
830
+ self.write_sep("=", "test session starts", bold=True)
831
+ verinfo = platform.python_version()
832
+ if not self.no_header:
833
+ msg = f"platform {sys.platform} -- Python {verinfo}"
834
+ pypy_version_info = getattr(sys, "pypy_version_info", None)
835
+ if pypy_version_info:
836
+ verinfo = ".".join(map(str, pypy_version_info[:3]))
837
+ msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]"
838
+ msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}"
839
+ if (
840
+ self.verbosity > 0
841
+ or self.config.option.debug
842
+ or getattr(self.config.option, "pastebin", None)
843
+ ):
844
+ msg += " -- " + str(sys.executable)
845
+ self.write_line(msg)
846
+ lines = self.config.hook.pytest_report_header(
847
+ config=self.config, start_path=self.startpath
848
+ )
849
+ self._write_report_lines_from_hooks(lines)
850
+
851
+ def _write_report_lines_from_hooks(
852
+ self, lines: Sequence[str | Sequence[str]]
853
+ ) -> None:
854
+ for line_or_lines in reversed(lines):
855
+ if isinstance(line_or_lines, str):
856
+ self.write_line(line_or_lines)
857
+ else:
858
+ for line in line_or_lines:
859
+ self.write_line(line)
860
+
861
+ def pytest_report_header(self, config: Config) -> list[str]:
862
+ result = [f"rootdir: {config.rootpath}"]
863
+
864
+ if config.inipath:
865
+ result.append("configfile: " + bestrelpath(config.rootpath, config.inipath))
866
+
867
+ if config.args_source == Config.ArgsSource.TESTPATHS:
868
+ testpaths: list[str] = config.getini("testpaths")
869
+ result.append("testpaths: {}".format(", ".join(testpaths)))
870
+
871
+ plugininfo = config.pluginmanager.list_plugin_distinfo()
872
+ if plugininfo:
873
+ result.append(
874
+ "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo)))
875
+ )
876
+ return result
877
+
878
+ def pytest_collection_finish(self, session: Session) -> None:
879
+ self.report_collect(True)
880
+
881
+ lines = self.config.hook.pytest_report_collectionfinish(
882
+ config=self.config,
883
+ start_path=self.startpath,
884
+ items=session.items,
885
+ )
886
+ self._write_report_lines_from_hooks(lines)
887
+
888
+ if self.config.getoption("collectonly"):
889
+ if session.items:
890
+ if self.config.option.verbose > -1:
891
+ self._tw.line("")
892
+ self._printcollecteditems(session.items)
893
+
894
+ failed = self.stats.get("failed")
895
+ if failed:
896
+ self._tw.sep("!", "collection failures")
897
+ for rep in failed:
898
+ rep.toterminal(self._tw)
899
+
900
+ def _printcollecteditems(self, items: Sequence[Item]) -> None:
901
+ test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES)
902
+ if test_cases_verbosity < 0:
903
+ if test_cases_verbosity < -1:
904
+ counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
905
+ for name, count in sorted(counts.items()):
906
+ self._tw.line(f"{name}: {count}")
907
+ else:
908
+ for item in items:
909
+ self._tw.line(item.nodeid)
910
+ return
911
+ stack: list[Node] = []
912
+ indent = ""
913
+ for item in items:
914
+ needed_collectors = item.listchain()[1:] # strip root node
915
+ while stack:
916
+ if stack == needed_collectors[: len(stack)]:
917
+ break
918
+ stack.pop()
919
+ for col in needed_collectors[len(stack) :]:
920
+ stack.append(col)
921
+ indent = (len(stack) - 1) * " "
922
+ self._tw.line(f"{indent}{col}")
923
+ if test_cases_verbosity >= 1:
924
+ obj = getattr(col, "obj", None)
925
+ doc = inspect.getdoc(obj) if obj else None
926
+ if doc:
927
+ for line in doc.splitlines():
928
+ self._tw.line("{}{}".format(indent + " ", line))
929
+
930
+ @hookimpl(wrapper=True)
931
+ def pytest_sessionfinish(
932
+ self, session: Session, exitstatus: int | ExitCode
933
+ ) -> Generator[None]:
934
+ result = yield
935
+ self._tw.line("")
936
+ summary_exit_codes = (
937
+ ExitCode.OK,
938
+ ExitCode.TESTS_FAILED,
939
+ ExitCode.INTERRUPTED,
940
+ ExitCode.USAGE_ERROR,
941
+ ExitCode.NO_TESTS_COLLECTED,
942
+ )
943
+ if exitstatus in summary_exit_codes and not self.no_summary:
944
+ self.config.hook.pytest_terminal_summary(
945
+ terminalreporter=self, exitstatus=exitstatus, config=self.config
946
+ )
947
+ if session.shouldfail:
948
+ self.write_sep("!", str(session.shouldfail), red=True)
949
+ if exitstatus == ExitCode.INTERRUPTED:
950
+ self._report_keyboardinterrupt()
951
+ self._keyboardinterrupt_memo = None
952
+ elif session.shouldstop:
953
+ self.write_sep("!", str(session.shouldstop), red=True)
954
+ self.summary_stats()
955
+ return result
956
+
957
+ @hookimpl(wrapper=True)
958
+ def pytest_terminal_summary(self) -> Generator[None]:
959
+ self.summary_errors()
960
+ self.summary_failures()
961
+ self.summary_xfailures()
962
+ self.summary_warnings()
963
+ self.summary_passes()
964
+ self.summary_xpasses()
965
+ try:
966
+ return (yield)
967
+ finally:
968
+ self.short_test_summary()
969
+ # Display any extra warnings from teardown here (if any).
970
+ self.summary_warnings()
971
+
972
+ def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
973
+ self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
974
+
975
+ def pytest_unconfigure(self) -> None:
976
+ if self._keyboardinterrupt_memo is not None:
977
+ self._report_keyboardinterrupt()
978
+
979
+ def _report_keyboardinterrupt(self) -> None:
980
+ excrepr = self._keyboardinterrupt_memo
981
+ assert excrepr is not None
982
+ assert excrepr.reprcrash is not None
983
+ msg = excrepr.reprcrash.message
984
+ self.write_sep("!", msg)
985
+ if "KeyboardInterrupt" in msg:
986
+ if self.config.option.fulltrace:
987
+ excrepr.toterminal(self._tw)
988
+ else:
989
+ excrepr.reprcrash.toterminal(self._tw)
990
+ self._tw.line(
991
+ "(to show a full traceback on KeyboardInterrupt use --full-trace)",
992
+ yellow=True,
993
+ )
994
+
995
+ def _locationline(
996
+ self, nodeid: str, fspath: str, lineno: int | None, domain: str
997
+ ) -> str:
998
+ def mkrel(nodeid: str) -> str:
999
+ line = self.config.cwd_relative_nodeid(nodeid)
1000
+ if domain and line.endswith(domain):
1001
+ line = line[: -len(domain)]
1002
+ values = domain.split("[")
1003
+ values[0] = values[0].replace(".", "::") # don't replace '.' in params
1004
+ line += "[".join(values)
1005
+ return line
1006
+
1007
+ # fspath comes from testid which has a "/"-normalized path.
1008
+ if fspath:
1009
+ res = mkrel(nodeid)
1010
+ if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
1011
+ "\\", nodes.SEP
1012
+ ):
1013
+ res += " <- " + bestrelpath(self.startpath, Path(fspath))
1014
+ else:
1015
+ res = "[location]"
1016
+ return res + " "
1017
+
1018
+ def _getfailureheadline(self, rep):
1019
+ head_line = rep.head_line
1020
+ if head_line:
1021
+ return head_line
1022
+ return "test session" # XXX?
1023
+
1024
+ def _getcrashline(self, rep):
1025
+ try:
1026
+ return str(rep.longrepr.reprcrash)
1027
+ except AttributeError:
1028
+ try:
1029
+ return str(rep.longrepr)[:50]
1030
+ except AttributeError:
1031
+ return ""
1032
+
1033
+ #
1034
+ # Summaries for sessionfinish.
1035
+ #
1036
+ def getreports(self, name: str):
1037
+ return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")]
1038
+
1039
+ def summary_warnings(self) -> None:
1040
+ if self.hasopt("w"):
1041
+ all_warnings: list[WarningReport] | None = self.stats.get("warnings")
1042
+ if not all_warnings:
1043
+ return
1044
+
1045
+ final = self._already_displayed_warnings is not None
1046
+ if final:
1047
+ warning_reports = all_warnings[self._already_displayed_warnings :]
1048
+ else:
1049
+ warning_reports = all_warnings
1050
+ self._already_displayed_warnings = len(warning_reports)
1051
+ if not warning_reports:
1052
+ return
1053
+
1054
+ reports_grouped_by_message: dict[str, list[WarningReport]] = {}
1055
+ for wr in warning_reports:
1056
+ reports_grouped_by_message.setdefault(wr.message, []).append(wr)
1057
+
1058
+ def collapsed_location_report(reports: list[WarningReport]) -> str:
1059
+ locations = []
1060
+ for w in reports:
1061
+ location = w.get_location(self.config)
1062
+ if location:
1063
+ locations.append(location)
1064
+
1065
+ if len(locations) < 10:
1066
+ return "\n".join(map(str, locations))
1067
+
1068
+ counts_by_filename = Counter(
1069
+ str(loc).split("::", 1)[0] for loc in locations
1070
+ )
1071
+ return "\n".join(
1072
+ "{}: {} warning{}".format(k, v, "s" if v > 1 else "")
1073
+ for k, v in counts_by_filename.items()
1074
+ )
1075
+
1076
+ title = "warnings summary (final)" if final else "warnings summary"
1077
+ self.write_sep("=", title, yellow=True, bold=False)
1078
+ for message, message_reports in reports_grouped_by_message.items():
1079
+ maybe_location = collapsed_location_report(message_reports)
1080
+ if maybe_location:
1081
+ self._tw.line(maybe_location)
1082
+ lines = message.splitlines()
1083
+ indented = "\n".join(" " + x for x in lines)
1084
+ message = indented.rstrip()
1085
+ else:
1086
+ message = message.rstrip()
1087
+ self._tw.line(message)
1088
+ self._tw.line()
1089
+ self._tw.line(
1090
+ "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html"
1091
+ )
1092
+
1093
+ def summary_passes(self) -> None:
1094
+ self.summary_passes_combined("passed", "PASSES", "P")
1095
+
1096
+ def summary_xpasses(self) -> None:
1097
+ self.summary_passes_combined("xpassed", "XPASSES", "X")
1098
+
1099
+ def summary_passes_combined(
1100
+ self, which_reports: str, sep_title: str, needed_opt: str
1101
+ ) -> None:
1102
+ if self.config.option.tbstyle != "no":
1103
+ if self.hasopt(needed_opt):
1104
+ reports: list[TestReport] = self.getreports(which_reports)
1105
+ if not reports:
1106
+ return
1107
+ self.write_sep("=", sep_title)
1108
+ for rep in reports:
1109
+ if rep.sections:
1110
+ msg = self._getfailureheadline(rep)
1111
+ self.write_sep("_", msg, green=True, bold=True)
1112
+ self._outrep_summary(rep)
1113
+ self._handle_teardown_sections(rep.nodeid)
1114
+
1115
+ def _get_teardown_reports(self, nodeid: str) -> list[TestReport]:
1116
+ reports = self.getreports("")
1117
+ return [
1118
+ report
1119
+ for report in reports
1120
+ if report.when == "teardown" and report.nodeid == nodeid
1121
+ ]
1122
+
1123
+ def _handle_teardown_sections(self, nodeid: str) -> None:
1124
+ for report in self._get_teardown_reports(nodeid):
1125
+ self.print_teardown_sections(report)
1126
+
1127
+ def print_teardown_sections(self, rep: TestReport) -> None:
1128
+ showcapture = self.config.option.showcapture
1129
+ if showcapture == "no":
1130
+ return
1131
+ for secname, content in rep.sections:
1132
+ if showcapture != "all" and showcapture not in secname:
1133
+ continue
1134
+ if "teardown" in secname:
1135
+ self._tw.sep("-", secname)
1136
+ if content[-1:] == "\n":
1137
+ content = content[:-1]
1138
+ self._tw.line(content)
1139
+
1140
+ def summary_failures(self) -> None:
1141
+ style = self.config.option.tbstyle
1142
+ self.summary_failures_combined("failed", "FAILURES", style=style)
1143
+
1144
+ def summary_xfailures(self) -> None:
1145
+ show_tb = self.config.option.xfail_tb
1146
+ style = self.config.option.tbstyle if show_tb else "no"
1147
+ self.summary_failures_combined("xfailed", "XFAILURES", style=style)
1148
+
1149
+ def summary_failures_combined(
1150
+ self,
1151
+ which_reports: str,
1152
+ sep_title: str,
1153
+ *,
1154
+ style: str,
1155
+ needed_opt: str | None = None,
1156
+ ) -> None:
1157
+ if style != "no":
1158
+ if not needed_opt or self.hasopt(needed_opt):
1159
+ reports: list[BaseReport] = self.getreports(which_reports)
1160
+ if not reports:
1161
+ return
1162
+ self.write_sep("=", sep_title)
1163
+ if style == "line":
1164
+ for rep in reports:
1165
+ line = self._getcrashline(rep)
1166
+ self.write_line(line)
1167
+ else:
1168
+ for rep in reports:
1169
+ msg = self._getfailureheadline(rep)
1170
+ self.write_sep("_", msg, red=True, bold=True)
1171
+ self._outrep_summary(rep)
1172
+ self._handle_teardown_sections(rep.nodeid)
1173
+
1174
+ def summary_errors(self) -> None:
1175
+ if self.config.option.tbstyle != "no":
1176
+ reports: list[BaseReport] = self.getreports("error")
1177
+ if not reports:
1178
+ return
1179
+ self.write_sep("=", "ERRORS")
1180
+ for rep in self.stats["error"]:
1181
+ msg = self._getfailureheadline(rep)
1182
+ if rep.when == "collect":
1183
+ msg = "ERROR collecting " + msg
1184
+ else:
1185
+ msg = f"ERROR at {rep.when} of {msg}"
1186
+ self.write_sep("_", msg, red=True, bold=True)
1187
+ self._outrep_summary(rep)
1188
+
1189
+ def _outrep_summary(self, rep: BaseReport) -> None:
1190
+ rep.toterminal(self._tw)
1191
+ showcapture = self.config.option.showcapture
1192
+ if showcapture == "no":
1193
+ return
1194
+ for secname, content in rep.sections:
1195
+ if showcapture != "all" and showcapture not in secname:
1196
+ continue
1197
+ self._tw.sep("-", secname)
1198
+ if content[-1:] == "\n":
1199
+ content = content[:-1]
1200
+ self._tw.line(content)
1201
+
1202
+ def summary_stats(self) -> None:
1203
+ if self.verbosity < -1:
1204
+ return
1205
+
1206
+ session_duration = self._session_start.elapsed()
1207
+ (parts, main_color) = self.build_summary_stats_line()
1208
+ line_parts = []
1209
+
1210
+ display_sep = self.verbosity >= 0
1211
+ if display_sep:
1212
+ fullwidth = self._tw.fullwidth
1213
+ for text, markup in parts:
1214
+ with_markup = self._tw.markup(text, **markup)
1215
+ if display_sep:
1216
+ fullwidth += len(with_markup) - len(text)
1217
+ line_parts.append(with_markup)
1218
+ msg = ", ".join(line_parts)
1219
+
1220
+ main_markup = {main_color: True}
1221
+ duration = f" in {format_session_duration(session_duration.seconds)}"
1222
+ duration_with_markup = self._tw.markup(duration, **main_markup)
1223
+ if display_sep:
1224
+ fullwidth += len(duration_with_markup) - len(duration)
1225
+ msg += duration_with_markup
1226
+
1227
+ if display_sep:
1228
+ markup_for_end_sep = self._tw.markup("", **main_markup)
1229
+ if markup_for_end_sep.endswith("\x1b[0m"):
1230
+ markup_for_end_sep = markup_for_end_sep[:-4]
1231
+ fullwidth += len(markup_for_end_sep)
1232
+ msg += markup_for_end_sep
1233
+
1234
+ if display_sep:
1235
+ self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
1236
+ else:
1237
+ self.write_line(msg, **main_markup)
1238
+
1239
+ def short_test_summary(self) -> None:
1240
+ if not self.reportchars:
1241
+ return
1242
+
1243
+ def show_simple(lines: list[str], *, stat: str) -> None:
1244
+ failed = self.stats.get(stat, [])
1245
+ if not failed:
1246
+ return
1247
+ config = self.config
1248
+ for rep in failed:
1249
+ color = _color_for_type.get(stat, _color_for_type_default)
1250
+ line = _get_line_with_reprcrash_message(
1251
+ config, rep, self._tw, {color: True}
1252
+ )
1253
+ lines.append(line)
1254
+
1255
+ def show_xfailed(lines: list[str]) -> None:
1256
+ xfailed = self.stats.get("xfailed", [])
1257
+ for rep in xfailed:
1258
+ verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
1259
+ self.config, {_color_for_type["warnings"]: True}
1260
+ )
1261
+ markup_word = self._tw.markup(verbose_word, **verbose_markup)
1262
+ nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
1263
+ line = f"{markup_word} {nodeid}"
1264
+ reason = rep.wasxfail
1265
+ if reason:
1266
+ line += " - " + str(reason)
1267
+
1268
+ lines.append(line)
1269
+
1270
+ def show_xpassed(lines: list[str]) -> None:
1271
+ xpassed = self.stats.get("xpassed", [])
1272
+ for rep in xpassed:
1273
+ verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
1274
+ self.config, {_color_for_type["warnings"]: True}
1275
+ )
1276
+ markup_word = self._tw.markup(verbose_word, **verbose_markup)
1277
+ nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
1278
+ line = f"{markup_word} {nodeid}"
1279
+ reason = rep.wasxfail
1280
+ if reason:
1281
+ line += " - " + str(reason)
1282
+ lines.append(line)
1283
+
1284
+ def show_skipped_folded(lines: list[str]) -> None:
1285
+ skipped: list[CollectReport] = self.stats.get("skipped", [])
1286
+ fskips = _folded_skips(self.startpath, skipped) if skipped else []
1287
+ if not fskips:
1288
+ return
1289
+ verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup(
1290
+ self.config, {_color_for_type["warnings"]: True}
1291
+ )
1292
+ markup_word = self._tw.markup(verbose_word, **verbose_markup)
1293
+ prefix = "Skipped: "
1294
+ for num, fspath, lineno, reason in fskips:
1295
+ if reason.startswith(prefix):
1296
+ reason = reason[len(prefix) :]
1297
+ if lineno is not None:
1298
+ lines.append(f"{markup_word} [{num}] {fspath}:{lineno}: {reason}")
1299
+ else:
1300
+ lines.append(f"{markup_word} [{num}] {fspath}: {reason}")
1301
+
1302
+ def show_skipped_unfolded(lines: list[str]) -> None:
1303
+ skipped: list[CollectReport] = self.stats.get("skipped", [])
1304
+
1305
+ for rep in skipped:
1306
+ assert rep.longrepr is not None
1307
+ assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr)
1308
+ assert len(rep.longrepr) == 3, (rep, rep.longrepr)
1309
+
1310
+ verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
1311
+ self.config, {_color_for_type["warnings"]: True}
1312
+ )
1313
+ markup_word = self._tw.markup(verbose_word, **verbose_markup)
1314
+ nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
1315
+ line = f"{markup_word} {nodeid}"
1316
+ reason = rep.longrepr[2]
1317
+ if reason:
1318
+ line += " - " + str(reason)
1319
+ lines.append(line)
1320
+
1321
+ def show_skipped(lines: list[str]) -> None:
1322
+ if self.foldskipped:
1323
+ show_skipped_folded(lines)
1324
+ else:
1325
+ show_skipped_unfolded(lines)
1326
+
1327
+ REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = {
1328
+ "x": show_xfailed,
1329
+ "X": show_xpassed,
1330
+ "f": partial(show_simple, stat="failed"),
1331
+ "s": show_skipped,
1332
+ "p": partial(show_simple, stat="passed"),
1333
+ "E": partial(show_simple, stat="error"),
1334
+ }
1335
+
1336
+ lines: list[str] = []
1337
+ for char in self.reportchars:
1338
+ action = REPORTCHAR_ACTIONS.get(char)
1339
+ if action: # skipping e.g. "P" (passed with output) here.
1340
+ action(lines)
1341
+
1342
+ if lines:
1343
+ self.write_sep("=", "short test summary info", cyan=True, bold=True)
1344
+ for line in lines:
1345
+ self.write_line(line)
1346
+
1347
+ def _get_main_color(self) -> tuple[str, list[str]]:
1348
+ if self._main_color is None or self._known_types is None or self._is_last_item:
1349
+ self._set_main_color()
1350
+ assert self._main_color
1351
+ assert self._known_types
1352
+ return self._main_color, self._known_types
1353
+
1354
+ def _determine_main_color(self, unknown_type_seen: bool) -> str:
1355
+ stats = self.stats
1356
+ if "failed" in stats or "error" in stats:
1357
+ main_color = "red"
1358
+ elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
1359
+ main_color = "yellow"
1360
+ elif "passed" in stats or not self._is_last_item:
1361
+ main_color = "green"
1362
+ else:
1363
+ main_color = "yellow"
1364
+ return main_color
1365
+
1366
+ def _set_main_color(self) -> None:
1367
+ unknown_types: list[str] = []
1368
+ for found_type in self.stats:
1369
+ if found_type: # setup/teardown reports have an empty key, ignore them
1370
+ if found_type not in KNOWN_TYPES and found_type not in unknown_types:
1371
+ unknown_types.append(found_type)
1372
+ self._known_types = list(KNOWN_TYPES) + unknown_types
1373
+ self._main_color = self._determine_main_color(bool(unknown_types))
1374
+
1375
+ def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]:
1376
+ """
1377
+ Build the parts used in the last summary stats line.
1378
+
1379
+ The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
1380
+
1381
+ This function builds a list of the "parts" that make up for the text in that line, in
1382
+ the example above it would be::
1383
+
1384
+ [
1385
+ ("12 passed", {"green": True}),
1386
+ ("2 errors", {"red": True}
1387
+ ]
1388
+
1389
+ That last dict for each line is a "markup dictionary", used by TerminalWriter to
1390
+ color output.
1391
+
1392
+ The final color of the line is also determined by this function, and is the second
1393
+ element of the returned tuple.
1394
+ """
1395
+ if self.config.getoption("collectonly"):
1396
+ return self._build_collect_only_summary_stats_line()
1397
+ else:
1398
+ return self._build_normal_summary_stats_line()
1399
+
1400
+ def _get_reports_to_display(self, key: str) -> list[Any]:
1401
+ """Get test/collection reports for the given status key, such as `passed` or `error`."""
1402
+ reports = self.stats.get(key, [])
1403
+ return [x for x in reports if getattr(x, "count_towards_summary", True)]
1404
+
1405
+ def _build_normal_summary_stats_line(
1406
+ self,
1407
+ ) -> tuple[list[tuple[str, dict[str, bool]]], str]:
1408
+ main_color, known_types = self._get_main_color()
1409
+ parts = []
1410
+
1411
+ for key in known_types:
1412
+ reports = self._get_reports_to_display(key)
1413
+ if reports:
1414
+ count = len(reports)
1415
+ color = _color_for_type.get(key, _color_for_type_default)
1416
+ markup = {color: True, "bold": color == main_color}
1417
+ parts.append(("%d %s" % pluralize(count, key), markup)) # noqa: UP031
1418
+
1419
+ if not parts:
1420
+ parts = [("no tests ran", {_color_for_type_default: True})]
1421
+
1422
+ return parts, main_color
1423
+
1424
+ def _build_collect_only_summary_stats_line(
1425
+ self,
1426
+ ) -> tuple[list[tuple[str, dict[str, bool]]], str]:
1427
+ deselected = len(self._get_reports_to_display("deselected"))
1428
+ errors = len(self._get_reports_to_display("error"))
1429
+
1430
+ if self._numcollected == 0:
1431
+ parts = [("no tests collected", {"yellow": True})]
1432
+ main_color = "yellow"
1433
+
1434
+ elif deselected == 0:
1435
+ main_color = "green"
1436
+ collected_output = "%d %s collected" % pluralize(self._numcollected, "test") # noqa: UP031
1437
+ parts = [(collected_output, {main_color: True})]
1438
+ else:
1439
+ all_tests_were_deselected = self._numcollected == deselected
1440
+ if all_tests_were_deselected:
1441
+ main_color = "yellow"
1442
+ collected_output = f"no tests collected ({deselected} deselected)"
1443
+ else:
1444
+ main_color = "green"
1445
+ selected = self._numcollected - deselected
1446
+ collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
1447
+
1448
+ parts = [(collected_output, {main_color: True})]
1449
+
1450
+ if errors:
1451
+ main_color = _color_for_type["error"]
1452
+ parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] # noqa: UP031
1453
+
1454
+ return parts, main_color
1455
+
1456
+
1457
+ def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport):
1458
+ nodeid = config.cwd_relative_nodeid(rep.nodeid)
1459
+ path, *parts = nodeid.split("::")
1460
+ if parts:
1461
+ parts_markup = tw.markup("::".join(parts), bold=True)
1462
+ return path + "::" + parts_markup
1463
+ else:
1464
+ return path
1465
+
1466
+
1467
+ def _format_trimmed(format: str, msg: str, available_width: int) -> str | None:
1468
+ """Format msg into format, ellipsizing it if doesn't fit in available_width.
1469
+
1470
+ Returns None if even the ellipsis can't fit.
1471
+ """
1472
+ # Only use the first line.
1473
+ i = msg.find("\n")
1474
+ if i != -1:
1475
+ msg = msg[:i]
1476
+
1477
+ ellipsis = "..."
1478
+ format_width = wcswidth(format.format(""))
1479
+ if format_width + len(ellipsis) > available_width:
1480
+ return None
1481
+
1482
+ if format_width + wcswidth(msg) > available_width:
1483
+ available_width -= len(ellipsis)
1484
+ msg = msg[:available_width]
1485
+ while format_width + wcswidth(msg) > available_width:
1486
+ msg = msg[:-1]
1487
+ msg += ellipsis
1488
+
1489
+ return format.format(msg)
1490
+
1491
+
1492
+ def _get_line_with_reprcrash_message(
1493
+ config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: dict[str, bool]
1494
+ ) -> str:
1495
+ """Get summary line for a report, trying to add reprcrash message."""
1496
+ verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
1497
+ config, word_markup
1498
+ )
1499
+ word = tw.markup(verbose_word, **verbose_markup)
1500
+ node = _get_node_id_with_markup(tw, config, rep)
1501
+
1502
+ line = f"{word} {node}"
1503
+ line_width = wcswidth(line)
1504
+
1505
+ try:
1506
+ # Type ignored intentionally -- possible AttributeError expected.
1507
+ msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
1508
+ except AttributeError:
1509
+ pass
1510
+ else:
1511
+ if (
1512
+ running_on_ci() or config.option.verbose >= 2
1513
+ ) and not config.option.force_short_summary:
1514
+ msg = f" - {msg}"
1515
+ else:
1516
+ available_width = tw.fullwidth - line_width
1517
+ msg = _format_trimmed(" - {}", msg, available_width)
1518
+ if msg is not None:
1519
+ line += msg
1520
+
1521
+ return line
1522
+
1523
+
1524
+ def _folded_skips(
1525
+ startpath: Path,
1526
+ skipped: Sequence[CollectReport],
1527
+ ) -> list[tuple[int, str, int | None, str]]:
1528
+ d: dict[tuple[str, int | None, str], list[CollectReport]] = {}
1529
+ for event in skipped:
1530
+ assert event.longrepr is not None
1531
+ assert isinstance(event.longrepr, tuple), (event, event.longrepr)
1532
+ assert len(event.longrepr) == 3, (event, event.longrepr)
1533
+ fspath, lineno, reason = event.longrepr
1534
+ # For consistency, report all fspaths in relative form.
1535
+ fspath = bestrelpath(startpath, Path(fspath))
1536
+ keywords = getattr(event, "keywords", {})
1537
+ # Folding reports with global pytestmark variable.
1538
+ # This is a workaround, because for now we cannot identify the scope of a skip marker
1539
+ # TODO: Revisit after marks scope would be fixed.
1540
+ if (
1541
+ event.when == "setup"
1542
+ and "skip" in keywords
1543
+ and "pytestmark" not in keywords
1544
+ ):
1545
+ key: tuple[str, int | None, str] = (fspath, None, reason)
1546
+ else:
1547
+ key = (fspath, lineno, reason)
1548
+ d.setdefault(key, []).append(event)
1549
+ values: list[tuple[int, str, int | None, str]] = []
1550
+ for key, events in d.items():
1551
+ values.append((len(events), *key))
1552
+ return values
1553
+
1554
+
1555
+ _color_for_type = {
1556
+ "failed": "red",
1557
+ "error": "red",
1558
+ "warnings": "yellow",
1559
+ "passed": "green",
1560
+ }
1561
+ _color_for_type_default = "yellow"
1562
+
1563
+
1564
+ def pluralize(count: int, noun: str) -> tuple[int, str]:
1565
+ # No need to pluralize words such as `failed` or `passed`.
1566
+ if noun not in ["error", "warnings", "test"]:
1567
+ return count, noun
1568
+
1569
+ # The `warnings` key is plural. To avoid API breakage, we keep it that way but
1570
+ # set it to singular here so we can determine plurality in the same way as we do
1571
+ # for `error`.
1572
+ noun = noun.replace("warnings", "warning")
1573
+
1574
+ return count, noun + "s" if count != 1 else noun
1575
+
1576
+
1577
+ def _plugin_nameversions(plugininfo) -> list[str]:
1578
+ values: list[str] = []
1579
+ for plugin, dist in plugininfo:
1580
+ # Gets us name and version!
1581
+ name = f"{dist.project_name}-{dist.version}"
1582
+ # Questionable convenience, but it keeps things short.
1583
+ if name.startswith("pytest-"):
1584
+ name = name[7:]
1585
+ # We decided to print python package names they can have more than one plugin.
1586
+ if name not in values:
1587
+ values.append(name)
1588
+ return values
1589
+
1590
+
1591
+ def format_session_duration(seconds: float) -> str:
1592
+ """Format the given seconds in a human readable manner to show in the final summary."""
1593
+ if seconds < 60:
1594
+ return f"{seconds:.2f}s"
1595
+ else:
1596
+ dt = datetime.timedelta(seconds=int(seconds))
1597
+ return f"{seconds:.2f}s ({dt})"
1598
+
1599
+
1600
+ def format_node_duration(seconds: float) -> str:
1601
+ """Format the given seconds in a human readable manner to show in the test progress."""
1602
+ # The formatting is designed to be compact and readable, with at most 7 characters
1603
+ # for durations below 100 hours.
1604
+ if seconds < 0.00001:
1605
+ return f" {seconds * 1000000:.3f}us"
1606
+ if seconds < 0.0001:
1607
+ return f" {seconds * 1000000:.2f}us"
1608
+ if seconds < 0.001:
1609
+ return f" {seconds * 1000000:.1f}us"
1610
+ if seconds < 0.01:
1611
+ return f" {seconds * 1000:.3f}ms"
1612
+ if seconds < 0.1:
1613
+ return f" {seconds * 1000:.2f}ms"
1614
+ if seconds < 1:
1615
+ return f" {seconds * 1000:.1f}ms"
1616
+ if seconds < 60:
1617
+ return f" {seconds:.3f}s"
1618
+ if seconds < 3600:
1619
+ return f" {seconds // 60:.0f}m {seconds % 60:.0f}s"
1620
+ return f" {seconds // 3600:.0f}h {(seconds % 3600) // 60:.0f}m"
1621
+
1622
+
1623
+ def _get_raw_skip_reason(report: TestReport) -> str:
1624
+ """Get the reason string of a skip/xfail/xpass test report.
1625
+
1626
+ The string is just the part given by the user.
1627
+ """
1628
+ if hasattr(report, "wasxfail"):
1629
+ reason = report.wasxfail
1630
+ if reason.startswith("reason: "):
1631
+ reason = reason[len("reason: ") :]
1632
+ return reason
1633
+ else:
1634
+ assert report.skipped
1635
+ assert isinstance(report.longrepr, tuple)
1636
+ _, _, reason = report.longrepr
1637
+ if reason.startswith("Skipped: "):
1638
+ reason = reason[len("Skipped: ") :]
1639
+ elif reason == "Skipped":
1640
+ reason = ""
1641
+ return reason