id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
172,478 | import io
import re
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
def _to_str( # type: ignore
x: None,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
def _to_str(
x: t.Any,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Optional[t.Any],
charset: t.Optional[str] = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> t.Optional[t.Union[str, bytes]]:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
The provided code snippet includes necessary dependencies for implementing the `get_path_info` function. Write a Python function `def get_path_info( environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace" ) -> str` to solve the following problem:
Return the ``PATH_INFO`` from the WSGI environment and decode it unless ``charset`` is ``None``. :param environ: WSGI environment to get the path from. :param charset: The charset for the path info, or ``None`` if no decoding should be performed. :param errors: The decoding error handling. .. versionadded:: 0.9
Here is the function:
def get_path_info(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> str:
"""Return the ``PATH_INFO`` from the WSGI environment and decode it
unless ``charset`` is ``None``.
:param environ: WSGI environment to get the path from.
:param charset: The charset for the path info, or ``None`` if no
decoding should be performed.
:param errors: The decoding error handling.
.. versionadded:: 0.9
"""
path = environ.get("PATH_INFO", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True) # type: ignore | Return the ``PATH_INFO`` from the WSGI environment and decode it unless ``charset`` is ``None``. :param environ: WSGI environment to get the path from. :param charset: The charset for the path info, or ``None`` if no decoding should be performed. :param errors: The decoding error handling. .. versionadded:: 0.9 |
172,479 | import io
import re
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
def _to_str( # type: ignore
x: None,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
def _to_str(
x: t.Any,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Optional[t.Any],
charset: t.Optional[str] = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> t.Optional[t.Union[str, bytes]]:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
The provided code snippet includes necessary dependencies for implementing the `get_script_name` function. Write a Python function `def get_script_name( environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace" ) -> str` to solve the following problem:
Return the ``SCRIPT_NAME`` from the WSGI environment and decode it unless `charset` is set to ``None``. :param environ: WSGI environment to get the path from. :param charset: The charset for the path, or ``None`` if no decoding should be performed. :param errors: The decoding error handling. .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionadded:: 0.9
Here is the function:
def get_script_name(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> str:
"""Return the ``SCRIPT_NAME`` from the WSGI environment and decode
it unless `charset` is set to ``None``.
:param environ: WSGI environment to get the path from.
:param charset: The charset for the path, or ``None`` if no decoding
should be performed.
:param errors: The decoding error handling.
.. deprecated:: 2.2
Will be removed in Werkzeug 2.3.
.. versionadded:: 0.9
"""
warnings.warn(
"'get_script_name' is deprecated and will be removed in Werkzeug 2.3.",
DeprecationWarning,
stacklevel=2,
)
path = environ.get("SCRIPT_NAME", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True) # type: ignore | Return the ``SCRIPT_NAME`` from the WSGI environment and decode it unless `charset` is set to ``None``. :param environ: WSGI environment to get the path from. :param charset: The charset for the path, or ``None`` if no decoding should be performed. :param errors: The decoding error handling. .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionadded:: 0.9 |
172,480 | import io
import re
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
def _to_str( # type: ignore
x: None,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
def _to_str(
x: t.Any,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Optional[t.Any],
charset: t.Optional[str] = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> t.Optional[t.Union[str, bytes]]:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
The provided code snippet includes necessary dependencies for implementing the `pop_path_info` function. Write a Python function `def pop_path_info( environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace" ) -> t.Optional[str]` to solve the following problem:
Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` bytes are returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. :param charset: The ``encoding`` parameter passed to :func:`bytes.decode`. :param errors: The ``errors`` paramater passed to :func:`bytes.decode`.
Here is the function:
def pop_path_info(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> t.Optional[str]:
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` bytes are returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. deprecated:: 2.2
Will be removed in Werkzeug 2.3.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
:param charset: The ``encoding`` parameter passed to
:func:`bytes.decode`.
:param errors: The ``errors`` paramater passed to
:func:`bytes.decode`.
"""
warnings.warn(
"'pop_path_info' is deprecated and will be removed in Werkzeug 2.3.",
DeprecationWarning,
stacklevel=2,
)
path = environ.get("PATH_INFO")
if not path:
return None
script_name = environ.get("SCRIPT_NAME", "")
# shift multiple leading slashes over
old_path = path
path = path.lstrip("/")
if path != old_path:
script_name += "/" * (len(old_path) - len(path))
if "/" not in path:
environ["PATH_INFO"] = ""
environ["SCRIPT_NAME"] = script_name + path
rv = path.encode("latin1")
else:
segment, path = path.split("/", 1)
environ["PATH_INFO"] = f"/{path}"
environ["SCRIPT_NAME"] = script_name + segment
rv = segment.encode("latin1")
return _to_str(rv, charset, errors, allow_none_charset=True) # type: ignore | Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` bytes are returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. :param charset: The ``encoding`` parameter passed to :func:`bytes.decode`. :param errors: The ``errors`` paramater passed to :func:`bytes.decode`. |
172,481 | import io
import re
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
def _to_str( # type: ignore
x: None,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
def _to_str(
x: t.Any,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Optional[t.Any],
charset: t.Optional[str] = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> t.Optional[t.Union[str, bytes]]:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
The provided code snippet includes necessary dependencies for implementing the `peek_path_info` function. Write a Python function `def peek_path_info( environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace" ) -> t.Optional[str]` to solve the following problem:
Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` bytes are returned. .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked.
Here is the function:
def peek_path_info(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> t.Optional[str]:
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` bytes are returned.
.. deprecated:: 2.2
Will be removed in Werkzeug 2.3.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
warnings.warn(
"'peek_path_info' is deprecated and will be removed in Werkzeug 2.3.",
DeprecationWarning,
stacklevel=2,
)
segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
if segments:
return _to_str( # type: ignore
segments[0].encode("latin1"), charset, errors, allow_none_charset=True
)
return None | Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` bytes are returned. .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked. |
172,482 | import io
import re
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
def get_current_url(
environ: "WSGIEnvironment",
root_only: bool = False,
strip_querystring: bool = False,
host_only: bool = False,
trusted_hosts: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Recreate the URL for a request from the parts in a WSGI
environment.
The URL is an IRI, not a URI, so it may contain Unicode characters.
Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
:param environ: The WSGI environment to get the URL parts from.
:param root_only: Only build the root path, don't include the
remaining path or query string.
:param strip_querystring: Don't include the query string.
:param host_only: Only build the scheme and host.
:param trusted_hosts: A list of trusted host names to validate the
host against.
"""
parts = {
"scheme": environ["wsgi.url_scheme"],
"host": get_host(environ, trusted_hosts),
}
if not host_only:
parts["root_path"] = environ.get("SCRIPT_NAME", "")
if not root_only:
parts["path"] = environ.get("PATH_INFO", "")
if not strip_querystring:
parts["query_string"] = environ.get("QUERY_STRING", "").encode("latin1")
return _sansio_utils.get_current_url(**parts)
class _URLTuple(t.NamedTuple):
scheme: str
netloc: str
path: str
query: str
fragment: str
def url_parse(
url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True
) -> BaseURL:
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = _make_encode_wrapper(url)
is_text_based = isinstance(url, str)
if scheme is None:
scheme = s("")
netloc = query = fragment = s("")
i = url.find(s(":"))
if i > 0 and _scheme_re.match(_to_str(url[:i], errors="replace")):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1 :]
if not rest or any(c not in s("0123456789") for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s("//"):
delim = len(url)
for c in s("/?#"):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s("[") in netloc and s("]") not in netloc) or (
s("]") in netloc and s("[") not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and s("#") in url:
url, fragment = url.split(s("#"), 1)
if s("?") in url:
url, query = url.split(s("?"), 1)
result_type = URL if is_text_based else BytesURL
return result_type(scheme, netloc, url, query, fragment)
def uri_to_iri(
uri: t.Union[str, t.Tuple[str, str, str, str, str]],
charset: str = "utf-8",
errors: str = "werkzeug.url_quote",
) -> str:
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(_to_str(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
def url_join(
base: t.Union[str, t.Tuple[str, str, str, str, str]],
url: t.Union[str, t.Tuple[str, str, str, str, str]],
allow_fragments: bool = True,
) -> str:
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
_check_str_tuple((base, url))
s = _make_encode_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
base, allow_fragments=allow_fragments
)
scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s("/"):
segments = path.split(s("/"))
elif not path:
segments = bpath.split(s("/"))
if not query:
query = bquery
else:
segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s("."):
segments[-1] = s("")
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s(".")]
while True:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
del segments[i - 1 : i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(""), s("..")]
while segments[:2] == unwanted_marker:
del segments[1]
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
The provided code snippet includes necessary dependencies for implementing the `extract_path_info` function. Write a Python function `def extract_path_info( environ_or_baseurl: t.Union[str, "WSGIEnvironment"], path_or_url: t.Union[str, _URLTuple], charset: str = "utf-8", errors: str = "werkzeug.url_quote", collapse_http_schemes: bool = True, ) -> t.Optional[str]` to solve the following problem:
Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a string. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') '/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') '/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource. .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionchanged:: 0.15 The ``errors`` parameter defaults to leaving invalid bytes quoted instead of replacing them. .. versionadded:: 0.6
Here is the function:
def extract_path_info(
environ_or_baseurl: t.Union[str, "WSGIEnvironment"],
path_or_url: t.Union[str, _URLTuple],
charset: str = "utf-8",
errors: str = "werkzeug.url_quote",
collapse_http_schemes: bool = True,
) -> t.Optional[str]:
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a string. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. deprecated:: 2.2
Will be removed in Werkzeug 2.3.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
"""
warnings.warn(
"'extract_path_info' is deprecated and will be removed in Werkzeug 2.3.",
DeprecationWarning,
stacklevel=2,
)
def _normalize_netloc(scheme: str, netloc: str) -> str:
parts = netloc.split("@", 1)[-1].split(":", 1)
port: t.Optional[str]
if len(parts) == 2:
netloc, port = parts
if (scheme == "http" and port == "80") or (
scheme == "https" and port == "443"
):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += f":{port}"
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path = url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in ("http", "https"):
return None
else:
if not (base_scheme in ("http", "https") and base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip("/")
if not cur_path.startswith(base_path):
return None
return f"/{cur_path[len(base_path) :].lstrip('/')}" | Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a string. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') '/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') '/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource. .. deprecated:: 2.2 Will be removed in Werkzeug 2.3. .. versionchanged:: 0.15 The ``errors`` parameter defaults to leaving invalid bytes quoted instead of replacing them. .. versionadded:: 0.6 |
172,483 | import io
import re
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
def _make_chunk_iter(
stream: t.Union[t.Iterable[bytes], t.IO[bytes]],
limit: t.Optional[int],
buffer_size: int,
) -> t.Iterator[bytes]:
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, str)):
raise TypeError(
"Passed a string or byte object instead of true iterator or stream."
)
if not hasattr(stream, "read"):
for item in stream:
if item:
yield item
return
stream = t.cast(t.IO[bytes], stream)
if not isinstance(stream, LimitedStream) and limit is not None:
stream = t.cast(t.IO[bytes], LimitedStream(stream, limit))
_read = stream.read
while True:
item = _read(buffer_size)
if not item:
break
yield item
class chain(Iterator[_T], Generic[_T]):
def __init__(self, *iterables: Iterable[_T]) -> None: ...
def __next__(self) -> _T: ...
def __iter__(self) -> Iterator[_T]: ...
def from_iterable(iterable: Iterable[Iterable[_S]]) -> Iterator[_S]: ...
def _make_encode_wrapper(reference: str) -> t.Callable[[str], str]:
...
def _make_encode_wrapper(reference: bytes) -> t.Callable[[str], bytes]:
...
def _make_encode_wrapper(reference: t.AnyStr) -> t.Callable[[str], t.AnyStr]:
"""Create a function that will be called with a string argument. If
the reference is bytes, values will be encoded to bytes.
"""
if isinstance(reference, str):
return lambda x: x
return operator.methodcaller("encode", "latin1")
The provided code snippet includes necessary dependencies for implementing the `make_line_iter` function. Write a Python function `def make_line_iter( stream: t.Union[t.Iterable[bytes], t.IO[bytes]], limit: t.Optional[int] = None, buffer_size: int = 10 * 1024, cap_at_buffer: bool = False, ) -> t.Iterator[bytes]` to solve the following problem:
Safely iterates line-based over an input stream. If the input stream is not a :class:`LimitedStream` the `limit` parameter is mandatory. This uses the stream's :meth:`~file.read` method internally as opposite to the :meth:`~file.readline` method that is unsafe and can only be used in violation of the WSGI specification. The same problem applies to the `__iter__` function of the input stream which calls :meth:`~file.readline` without arguments. If you need line-by-line processing it's strongly recommended to iterate over the input stream using this helper function. .. versionchanged:: 0.8 This function now ensures that the limit was reached. .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is a :class:`LimitedStream`. :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however.
Here is the function:
def make_line_iter(
stream: t.Union[t.Iterable[bytes], t.IO[bytes]],
limit: t.Optional[int] = None,
buffer_size: int = 10 * 1024,
cap_at_buffer: bool = False,
) -> t.Iterator[bytes]:
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
s = _make_encode_wrapper(first_item)
empty = t.cast(bytes, s(""))
cr = t.cast(bytes, s("\r"))
lf = t.cast(bytes, s("\n"))
crlf = t.cast(bytes, s("\r\n"))
_iter = t.cast(t.Iterator[bytes], chain((first_item,), _iter))
def _iter_basic_lines() -> t.Iterator[bytes]:
_join = empty.join
buffer: t.List[bytes] = []
while True:
new_data = next(_iter, "")
if not new_data:
break
new_buf: t.List[bytes] = []
buf_size = 0
for item in t.cast(
t.Iterator[bytes], chain(buffer, new_data.splitlines(True))
):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous | Safely iterates line-based over an input stream. If the input stream is not a :class:`LimitedStream` the `limit` parameter is mandatory. This uses the stream's :meth:`~file.read` method internally as opposite to the :meth:`~file.readline` method that is unsafe and can only be used in violation of the WSGI specification. The same problem applies to the `__iter__` function of the input stream which calls :meth:`~file.readline` without arguments. If you need line-by-line processing it's strongly recommended to iterate over the input stream using this helper function. .. versionchanged:: 0.8 This function now ensures that the limit was reached. .. versionadded:: 0.9 added support for iterators as input stream. .. versionadded:: 0.11.10 added support for the `cap_at_buffer` parameter. :param stream: the stream or iterate to iterate over. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is a :class:`LimitedStream`. :param buffer_size: The optional buffer size. :param cap_at_buffer: if this is set chunks are split if they are longer than the buffer size. Internally this is implemented that the buffer size might be exhausted by a factor of two however. |
172,484 | import typing as t
from types import TracebackType
from urllib.parse import urlparse
from warnings import warn
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
def check_type(context: str, obj: object, need: t.Type = str) -> None:
if type(obj) is not need:
warn(
f"{context!r} requires {need.__name__!r}, got {type(obj).__name__!r}.",
WSGIWarning,
stacklevel=3,
) | null |
172,485 | import itertools
import linecache
import os
import re
import sys
import sysconfig
import traceback
import typing as t
from markupsafe import escape
from ..utils import cached_property
from .console import Console
class DebugFrameSummary(traceback.FrameSummary):
"""A :class:`traceback.FrameSummary` that can evaluate code in the
frame's namespace.
"""
__slots__ = (
"local_ns",
"global_ns",
"_cache_info",
"_cache_is_library",
"_cache_console",
)
def __init__(
self,
*,
locals: t.Dict[str, t.Any],
globals: t.Dict[str, t.Any],
**kwargs: t.Any,
) -> None:
super().__init__(locals=None, **kwargs)
self.local_ns = locals
self.global_ns = globals
def info(self) -> t.Optional[str]:
return self.local_ns.get("__traceback_info__")
def is_library(self) -> bool:
return any(
self.filename.startswith((path, os.path.realpath(path)))
for path in sysconfig.get_paths().values()
)
def console(self) -> Console:
return Console(self.global_ns, self.local_ns)
def eval(self, code: str) -> t.Any:
return self.console.eval(code)
def render_html(self, mark_library: bool) -> str:
context = 5
lines = linecache.getlines(self.filename)
line_idx = self.lineno - 1 # type: ignore[operator]
start_idx = max(0, line_idx - context)
stop_idx = min(len(lines), line_idx + context + 1)
rendered_lines = []
def render_line(line: str, cls: str) -> None:
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
colno = getattr(self, "colno", 0)
end_colno = getattr(self, "end_colno", 0)
if cls == "current" and colno and end_colno:
arrow = (
f'\n<span class="ws">{" " * prefix}</span>'
f'{" " * (colno - prefix)}{"^" * (end_colno - colno)}'
)
else:
arrow = ""
rendered_lines.append(
f'<pre class="line {cls}"><span class="ws">{" " * prefix}</span>'
f"{escape(stripped_line) if stripped_line else ' '}"
f"{arrow if arrow else ''}</pre>"
)
if lines:
for line in lines[start_idx:line_idx]:
render_line(line, "before")
render_line(lines[line_idx], "current")
for line in lines[line_idx + 1 : stop_idx]:
render_line(line, "after")
return FRAME_HTML % {
"id": id(self),
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.name),
"lines": "\n".join(rendered_lines),
"library": "library" if mark_library and self.is_library else "",
}
def _process_traceback(
exc: BaseException,
te: t.Optional[traceback.TracebackException] = None,
*,
skip: int = 0,
hide: bool = True,
) -> traceback.TracebackException:
if te is None:
te = traceback.TracebackException.from_exception(exc, lookup_lines=False)
# Get the frames the same way StackSummary.extract did, in order
# to match each frame with the FrameSummary to augment.
frame_gen = traceback.walk_tb(exc.__traceback__)
limit = getattr(sys, "tracebacklimit", None)
if limit is not None:
if limit < 0:
limit = 0
frame_gen = itertools.islice(frame_gen, limit)
if skip:
frame_gen = itertools.islice(frame_gen, skip, None)
del te.stack[:skip]
new_stack: t.List[DebugFrameSummary] = []
hidden = False
# Match each frame with the FrameSummary that was generated.
# Hide frames using Paste's __traceback_hide__ rules. Replace
# all visible FrameSummary with DebugFrameSummary.
for (f, _), fs in zip(frame_gen, te.stack):
if hide:
hide_value = f.f_locals.get("__traceback_hide__", False)
if hide_value in {"before", "before_and_this"}:
new_stack = []
hidden = False
if hide_value == "before_and_this":
continue
elif hide_value in {"reset", "reset_and_this"}:
hidden = False
if hide_value == "reset_and_this":
continue
elif hide_value in {"after", "after_and_this"}:
hidden = True
if hide_value == "after_and_this":
continue
elif hide_value or hidden:
continue
frame_args: t.Dict[str, t.Any] = {
"filename": fs.filename,
"lineno": fs.lineno,
"name": fs.name,
"locals": f.f_locals,
"globals": f.f_globals,
}
if hasattr(fs, "colno"):
frame_args["colno"] = fs.colno
frame_args["end_colno"] = fs.end_colno # type: ignore[attr-defined]
new_stack.append(DebugFrameSummary(**frame_args))
# The codeop module is used to compile code from the interactive
# debugger. Hide any codeop frames from the bottom of the traceback.
while new_stack:
module = new_stack[0].global_ns.get("__name__")
if module is None:
module = new_stack[0].local_ns.get("__name__")
if module == "codeop":
del new_stack[0]
else:
break
te.stack[:] = new_stack
if te.__context__:
context_exc = t.cast(BaseException, exc.__context__)
te.__context__ = _process_traceback(context_exc, te.__context__, hide=hide)
if te.__cause__:
cause_exc = t.cast(BaseException, exc.__cause__)
te.__cause__ = _process_traceback(cause_exc, te.__cause__, hide=hide)
return te | null |
172,486 | import itertools
import linecache
import os
import re
import sys
import sysconfig
import traceback
import typing as t
from markupsafe import escape
from ..utils import cached_property
from .console import Console
CONSOLE_HTML = (
HEADER
+ """\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
def render_console_html(secret: str, evalex_trusted: bool) -> str:
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
} | null |
172,487 | import codecs
import re
import sys
import typing as t
from collections import deque
from traceback import format_exception_only
from markupsafe import escape
class DebugReprGenerator:
def __init__(self) -> None:
self._stack: t.List[t.Any] = []
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.</span>deque([', "])", deque
)
def regex_repr(self, obj: t.Pattern) -> str:
pattern = repr(obj.pattern)
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
pattern = f"r{pattern}"
return f're.compile(<span class="string regex">{pattern}</span>)'
def string_repr(self, obj: t.Union[str, bytes], limit: int = 70) -> str:
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = "".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, str))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(
self,
d: t.Union[t.Dict[int, None], t.Dict[str, int], t.Dict[t.Union[str, int], int]],
recursive: bool,
limit: int = 5,
) -> str:
if recursive:
return _add_subclass_info("{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(d.items()):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
f' <span class="value">{self.repr(value)}</span></span>'
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info("".join(buf), d, dict)
def object_repr(
self, obj: t.Optional[t.Union[t.Type[dict], t.Callable, t.Type[list]]]
) -> str:
r = repr(obj)
return f'<span class="object">{escape(r)}</span>'
def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
if obj is helper:
return f'<span class="help">{helper!r}</span>'
if isinstance(obj, (int, float, complex)):
return f'<span class="number">{obj!r}</span>'
if isinstance(obj, str) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self) -> str:
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception:
info = "?"
return (
'<span class="brokenrepr">'
f"<broken repr ({escape(info.strip())})></span>"
)
def repr(self, obj: object) -> str:
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj: object) -> str:
repr = None
items: t.Optional[t.List[t.Tuple[str, str]]] = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in obj.items():
if not isinstance(key, str):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += f" {object.__repr__(obj)[1:-1]}"
return self.render_object_dump(items, title, repr)
def dump_locals(self, d: t.Dict[str, t.Any]) -> str:
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(
self, items: t.List[t.Tuple[str, str]], title: str, repr: t.Optional[str] = None
) -> str:
html_items = []
for key, value in items:
html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": f"<pre class=repr>{repr if repr else ''}</pre>",
"items": "\n".join(html_items),
}
The provided code snippet includes necessary dependencies for implementing the `debug_repr` function. Write a Python function `def debug_repr(obj: object) -> str` to solve the following problem:
Creates a debug repr of an object as HTML string.
Here is the function:
def debug_repr(obj: object) -> str:
"""Creates a debug repr of an object as HTML string."""
return DebugReprGenerator().repr(obj) | Creates a debug repr of an object as HTML string. |
172,488 | import codecs
import re
import sys
import typing as t
from collections import deque
from traceback import format_exception_only
from markupsafe import escape
missing = object()
class DebugReprGenerator:
def __init__(self) -> None:
self._stack: t.List[t.Any] = []
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.</span>deque([', "])", deque
)
def regex_repr(self, obj: t.Pattern) -> str:
pattern = repr(obj.pattern)
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
pattern = f"r{pattern}"
return f're.compile(<span class="string regex">{pattern}</span>)'
def string_repr(self, obj: t.Union[str, bytes], limit: int = 70) -> str:
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = "".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, str))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(
self,
d: t.Union[t.Dict[int, None], t.Dict[str, int], t.Dict[t.Union[str, int], int]],
recursive: bool,
limit: int = 5,
) -> str:
if recursive:
return _add_subclass_info("{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(d.items()):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
f' <span class="value">{self.repr(value)}</span></span>'
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info("".join(buf), d, dict)
def object_repr(
self, obj: t.Optional[t.Union[t.Type[dict], t.Callable, t.Type[list]]]
) -> str:
r = repr(obj)
return f'<span class="object">{escape(r)}</span>'
def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
if obj is helper:
return f'<span class="help">{helper!r}</span>'
if isinstance(obj, (int, float, complex)):
return f'<span class="number">{obj!r}</span>'
if isinstance(obj, str) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self) -> str:
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception:
info = "?"
return (
'<span class="brokenrepr">'
f"<broken repr ({escape(info.strip())})></span>"
)
def repr(self, obj: object) -> str:
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj: object) -> str:
repr = None
items: t.Optional[t.List[t.Tuple[str, str]]] = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in obj.items():
if not isinstance(key, str):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += f" {object.__repr__(obj)[1:-1]}"
return self.render_object_dump(items, title, repr)
def dump_locals(self, d: t.Dict[str, t.Any]) -> str:
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(
self, items: t.List[t.Tuple[str, str]], title: str, repr: t.Optional[str] = None
) -> str:
html_items = []
for key, value in items:
html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": f"<pre class=repr>{repr if repr else ''}</pre>",
"items": "\n".join(html_items),
}
The provided code snippet includes necessary dependencies for implementing the `dump` function. Write a Python function `def dump(obj: object = missing) -> None` to solve the following problem:
Print the object details to stdout._write (for the interactive console of the web debugger.
Here is the function:
def dump(obj: object = missing) -> None:
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv) # type: ignore | Print the object details to stdout._write (for the interactive console of the web debugger. |
172,489 | import codecs
import re
import sys
import typing as t
from collections import deque
from traceback import format_exception_only
from markupsafe import escape
def _add_subclass_info(
inner: str, obj: object, base: t.Union[t.Type, t.Tuple[t.Type, ...]]
) -> str:
if isinstance(base, tuple):
for cls in base:
if type(obj) is cls:
return inner
elif type(obj) is base:
return inner
module = ""
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
module = f'<span class="module">{obj.__class__.__module__}.</span>'
return f"{module}{type(obj).__name__}({inner})"
def _sequence_repr_maker(
left: str, right: str, base: t.Type, limit: int = 8
) -> t.Callable[["DebugReprGenerator", t.Iterable, bool], str]:
def proxy(self: "DebugReprGenerator", obj: t.Iterable, recursive: bool) -> str:
if recursive:
return _add_subclass_info(f"{left}...{right}", obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(", ")
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append("</span>")
buf.append(right)
return _add_subclass_info("".join(buf), obj, base)
return proxy | null |
172,490 | import codecs
import hashlib
import io
import json
import os
import sys
import atexit
import shutil
import tempfile
The provided code snippet includes necessary dependencies for implementing the `get_filename4code` function. Write a Python function `def get_filename4code(module, content, ext=None)` to solve the following problem:
Generate filename based on content The function ensures that the (temporary) directory exists, so that the file can be written. By default, the directory won't be cleaned up, so a filter can use the directory as a cache and decide not to regenerate if there's no change. In case the user preferres the files to be temporary files, an environment variable `PANDOCFILTER_CLEANUP` can be set to any non-empty value such as `1` to make sure the directory is created in a temporary location and removed after finishing the filter. In this case there's no caching and files will be regenerated each time the filter is run. Example: filename = get_filename4code("myfilter", code)
Here is the function:
def get_filename4code(module, content, ext=None):
"""Generate filename based on content
The function ensures that the (temporary) directory exists, so that the
file can be written.
By default, the directory won't be cleaned up,
so a filter can use the directory as a cache and
decide not to regenerate if there's no change.
In case the user preferres the files to be temporary files,
an environment variable `PANDOCFILTER_CLEANUP` can be set to
any non-empty value such as `1` to
make sure the directory is created in a temporary location and removed
after finishing the filter. In this case there's no caching and files
will be regenerated each time the filter is run.
Example:
filename = get_filename4code("myfilter", code)
"""
if os.getenv('PANDOCFILTER_CLEANUP'):
imagedir = tempfile.mkdtemp(prefix=module)
atexit.register(lambda: shutil.rmtree(imagedir))
else:
imagedir = module + "-images"
fn = hashlib.sha1(content.encode(sys.getfilesystemencoding())).hexdigest()
try:
os.mkdir(imagedir)
sys.stderr.write('Created directory ' + imagedir + '\n')
except OSError:
sys.stderr.write('Could not create directory "' + imagedir + '"\n')
if ext:
fn += "." + ext
return os.path.join(imagedir, fn) | Generate filename based on content The function ensures that the (temporary) directory exists, so that the file can be written. By default, the directory won't be cleaned up, so a filter can use the directory as a cache and decide not to regenerate if there's no change. In case the user preferres the files to be temporary files, an environment variable `PANDOCFILTER_CLEANUP` can be set to any non-empty value such as `1` to make sure the directory is created in a temporary location and removed after finishing the filter. In this case there's no caching and files will be regenerated each time the filter is run. Example: filename = get_filename4code("myfilter", code) |
172,491 | import codecs
import hashlib
import io
import json
import os
import sys
import atexit
import shutil
import tempfile
def get_value(kv, key, value = None):
"""get value from the keyvalues (options)"""
res = []
for k, v in kv:
if k == key:
value = v
else:
res.append([k, v])
return value, res
Str = elt('Str', 1)
The provided code snippet includes necessary dependencies for implementing the `get_caption` function. Write a Python function `def get_caption(kv)` to solve the following problem:
get caption from the keyvalues (options) Example: if key == 'CodeBlock': [[ident, classes, keyvals], code] = value caption, typef, keyvals = get_caption(keyvals) ... return Para([Image([ident, [], keyvals], caption, [filename, typef])])
Here is the function:
def get_caption(kv):
"""get caption from the keyvalues (options)
Example:
if key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
caption, typef, keyvals = get_caption(keyvals)
...
return Para([Image([ident, [], keyvals], caption, [filename, typef])])
"""
caption = []
typef = ""
value, res = get_value(kv, u"caption")
if value is not None:
caption = [Str(value)]
typef = "fig:"
return caption, typef, res | get caption from the keyvalues (options) Example: if key == 'CodeBlock': [[ident, classes, keyvals], code] = value caption, typef, keyvals = get_caption(keyvals) ... return Para([Image([ident, [], keyvals], caption, [filename, typef])]) |
172,492 | import codecs
import hashlib
import io
import json
import os
import sys
import atexit
import shutil
import tempfile
The provided code snippet includes necessary dependencies for implementing the `get_extension` function. Write a Python function `def get_extension(format, default, **alternates)` to solve the following problem:
get the extension for the result, needs a default and some specialisations Example: filetype = get_extension(format, "png", html="svg", latex="eps")
Here is the function:
def get_extension(format, default, **alternates):
"""get the extension for the result, needs a default and some specialisations
Example:
filetype = get_extension(format, "png", html="svg", latex="eps")
"""
try:
return alternates[format]
except KeyError:
return default | get the extension for the result, needs a default and some specialisations Example: filetype = get_extension(format, "png", html="svg", latex="eps") |
172,493 | import codecs
import hashlib
import io
import json
import os
import sys
import atexit
import shutil
import tempfile
def toJSONFilters(actions):
"""Generate a JSON-to-JSON filter from stdin to stdout
The filter:
* reads a JSON-formatted pandoc document from stdin
* transforms it by walking the tree and performing the actions
* returns a new JSON-formatted pandoc document to stdout
The argument `actions` is a list of functions of the form
`action(key, value, format, meta)`, as described in more
detail under `walk`.
This function calls `applyJSONFilters`, with the `format`
argument provided by the first command-line argument,
if present. (Pandoc sets this by default when calling
filters.)
"""
try:
input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
except AttributeError:
# Python 2 does not have sys.stdin.buffer.
# REF: https://stackoverflow.com/questions/2467928/python-unicodeencode
input_stream = codecs.getreader("utf-8")(sys.stdin)
source = input_stream.read()
if len(sys.argv) > 1:
format = sys.argv[1]
else:
format = ""
sys.stdout.write(applyJSONFilters(actions, source, format))
The provided code snippet includes necessary dependencies for implementing the `toJSONFilter` function. Write a Python function `def toJSONFilter(action)` to solve the following problem:
Like `toJSONFilters`, but takes a single action as argument.
Here is the function:
def toJSONFilter(action):
"""Like `toJSONFilters`, but takes a single action as argument.
"""
toJSONFilters([action]) | Like `toJSONFilters`, but takes a single action as argument. |
172,494 | import codecs
import hashlib
import io
import json
import os
import sys
import atexit
import shutil
import tempfile
The provided code snippet includes necessary dependencies for implementing the `attributes` function. Write a Python function `def attributes(attrs)` to solve the following problem:
Returns an attribute list, constructed from the dictionary attrs.
Here is the function:
def attributes(attrs):
"""Returns an attribute list, constructed from the
dictionary attrs.
"""
attrs = attrs or {}
ident = attrs.get("id", "")
classes = attrs.get("classes", [])
keyvals = [[x, attrs[x]] for x in attrs if (x != "classes" and x != "id")]
return [ident, classes, keyvals] | Returns an attribute list, constructed from the dictionary attrs. |
172,495 | import codecs
import hashlib
import io
import json
import os
import sys
import atexit
import shutil
import tempfile
def elt(eltType, numargs):
def fun(*args):
lenargs = len(args)
if lenargs != numargs:
raise ValueError(eltType + ' expects ' + str(numargs) +
' arguments, but given ' + str(lenargs))
if numargs == 0:
xs = []
elif len(args) == 1:
xs = args[0]
else:
xs = list(args)
return {'t': eltType, 'c': xs}
return fun | null |
172,496 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
_marker = object()
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else:
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
if hasattr(typing, 'Literal'):
Literal = typing.Literal
# 3.7:
else:
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
# 3.7.0-3.7.2
else:
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
# 3.7
else:
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
# 3.7
else:
if hasattr(typing, "Required"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
if hasattr(typing, "Required"):
get_type_hints = typing.get_type_hints
else:
import functools
import types
# replaces _strip_annotations()
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
else:
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
# 3.7-3.8
else:
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.7-3.9
else:
if hasattr(typing, 'ParamSpec'):
# Add default Parameter - PEP 696
# 3.7-3.9
else:
if not hasattr(typing, 'Concatenate'):
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
# 3.9
elif sys.version_info[:2] >= (3, 9):
# 3.7-8
else:
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
# 3.7-3.8
else:
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
if hasattr(typing, "LiteralString"):
LiteralString = typing.LiteralString
else:
if hasattr(typing, "Self"):
Self = typing.Self
else:
if hasattr(typing, "Never"):
Never = typing.Never
else:
if hasattr(typing, 'Required'):
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9):
else:
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, "Unpack"): # 3.11+
Unpack = typing.Unpack
elif sys.version_info[:2] >= (3, 9):
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else:
Unpack = _UnpackForm(
'Unpack',
doc="""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
""")
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default Parameter - PEP 696
class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
"""Type variable tuple."""
def __init__(self, name, *, default=_marker):
super().__init__(name)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
else:
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=_marker):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"):
reveal_type = typing.reveal_type
else:
if hasattr(typing, "assert_never"):
assert_never = typing.assert_never
else:
if hasattr(typing, "override"):
override = typing.override
else:
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
if hasattr(typing, "deprecated"):
deprecated = typing.deprecated
else:
_T = typing.TypeVar("_T")
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
The provided code snippet includes necessary dependencies for implementing the `_check_generic` function. Write a Python function `def _check_generic(cls, parameters, elen=_marker)` to solve the following problem:
Check correct count for parameters of a generic cls (internal helper). This gives a nice error message in case of count mismatch.
Here is the function:
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}") | Check correct count for parameters of a generic cls (internal helper). This gives a nice error message in case of count mismatch. |
172,497 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `final` function. Write a Python function `def final(f)` to solve the following problem:
This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class cannot be subclassed. For example: class Base: @final def done(self) -> None: ... class Sub(Base): def done(self) -> None: # Error reported by type checker ... @final class Leaf: ... class Other(Leaf): # Error reported by type checker ... There is no runtime checking of these properties. The decorator sets the ``__final__`` attribute to ``True`` on the decorated object to allow runtime introspection.
Here is the function:
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f | This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class cannot be subclassed. For example: class Base: @final def done(self) -> None: ... class Sub(Base): def done(self) -> None: # Error reported by type checker ... @final class Leaf: ... class Other(Leaf): # Error reported by type checker ... There is no runtime checking of these properties. The decorator sets the ``__final__`` attribute to ``True`` on the decorated object to allow runtime introspection. |
172,498 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
"""Type variable."""
__module__ = 'typing'
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=_marker, infer_variance=False):
super().__init__(name, *constraints, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
self.__infer_variance__ = infer_variance
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def IntVar(name):
return typing.TypeVar(name) | null |
172,499 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
_overload_dummy = typing._overload_dummy
The provided code snippet includes necessary dependencies for implementing the `overload` function. Write a Python function `def overload(func)` to solve the following problem:
Decorator for overloaded functions/methods. In a stub file, place two or more stub definitions for the same function in a row, each decorated with @overload. For example: @overload def utf8(value: None) -> None: ... @overload def utf8(value: bytes) -> bytes: ... @overload def utf8(value: str) -> bytes: ... In a non-stub file (i.e. a regular .py file), do the same but follow it with an implementation. The implementation should *not* be decorated with @overload. For example: @overload def utf8(value: None) -> None: ... @overload def utf8(value: bytes) -> bytes: ... @overload def utf8(value: str) -> bytes: ... def utf8(value): # implementation goes here The overloads for a function can be retrieved at runtime using the get_overloads() function.
Here is the function:
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy | Decorator for overloaded functions/methods. In a stub file, place two or more stub definitions for the same function in a row, each decorated with @overload. For example: @overload def utf8(value: None) -> None: ... @overload def utf8(value: bytes) -> bytes: ... @overload def utf8(value: str) -> bytes: ... In a non-stub file (i.e. a regular .py file), do the same but follow it with an implementation. The implementation should *not* be decorated with @overload. For example: @overload def utf8(value: None) -> None: ... @overload def utf8(value: bytes) -> bytes: ... @overload def utf8(value: str) -> bytes: ... def utf8(value): # implementation goes here The overloads for a function can be retrieved at runtime using the get_overloads() function. |
172,500 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `get_overloads` function. Write a Python function `def get_overloads(func)` to solve the following problem:
Return all defined overloads for *func* as a sequence.
Here is the function:
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values()) | Return all defined overloads for *func* as a sequence. |
172,501 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `clear_overloads` function. Write a Python function `def clear_overloads()` to solve the following problem:
Clear all overloads in the registry.
Here is the function:
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear() | Clear all overloads in the registry. |
172,502 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) | null |
172,503 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not typing.Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params) # noqa
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, typing.TypeVar) for p in params):
i = 0
while isinstance(params[i], typing.TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
f" Parameter {i + 1} is {params[i]}")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params, len(cls.__parameters__))
return typing._GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
if '__orig_bases__' in cls.__dict__:
error = typing.Generic in cls.__orig_bases__
else:
error = typing.Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
_maybe_adjust_parameters(cls)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, typing.Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
f' protocols, got {repr(base)}')
cls.__init__ = _no_init
The provided code snippet includes necessary dependencies for implementing the `_maybe_adjust_parameters` function. Write a Python function `def _maybe_adjust_parameters(cls)` to solve the following problem:
Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. The contents of this function are very similar to logic found in typing.Generic.__init_subclass__ on the CPython main branch.
Here is the function:
def _maybe_adjust_parameters(cls):
"""Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
The contents of this function are very similar
to logic found in typing.Generic.__init_subclass__
on the CPython main branch.
"""
tvars = []
if '__orig_bases__' in cls.__dict__:
tvars = typing._collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, typing._GenericAlias) and
base.__origin__ in (typing.Generic, Protocol)):
# for error messages
the_base = base.__origin__.__name__
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in {the_base}[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars) | Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. The contents of this function are very similar to logic found in typing.Generic.__init_subclass__ on the CPython main branch. |
172,504 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated') | null |
172,505 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
class _ProtocolMeta(abc.ABCMeta): # noqa: B024
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
The provided code snippet includes necessary dependencies for implementing the `runtime_checkable` function. Write a Python function `def runtime_checkable(cls)` to solve the following problem:
Mark a protocol class as a runtime protocol, so that it can be used with isinstance() and issubclass(). Raise TypeError if applied to a non-protocol class. This allows a simple-minded structural check very similar to the one-offs in collections.abc such as Hashable.
Here is the function:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
return cls | Mark a protocol class as a runtime protocol, so that it can be used with isinstance() and issubclass(). Raise TypeError if applied to a non-protocol class. This allows a simple-minded structural check very similar to the one-offs in collections.abc such as Hashable. |
172,506 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if sys.version_info >= (3, 10):
elif sys.version_info >= (3, 9):
else:
if sys.version_info >= (3, 11):
from typing import Any
else:
if sys.version_info >= (3, 11):
final = typing.final
else:
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
if sys.version_info >= (3, 12):
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else:
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False | null |
172,507 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs) | null |
172,508 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if sys.version_info >= (3, 10):
elif sys.version_info >= (3, 9):
else:
if sys.version_info >= (3, 11):
from typing import Any
else:
if sys.version_info >= (3, 11):
final = typing.final
else:
if hasattr(typing, "Required"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
super().__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
# Don't insert typing.Generic into __bases__ here,
# or Generic.__init_subclass__ will raise TypeError
# in the super().__new__() call.
# Instead, monkey-patch __bases__ onto the class after it's been created.
tp_dict = super().__new__(cls, name, (dict,), ns)
if any(issubclass(base, typing.Generic) for base in bases):
tp_dict.__bases__ = (typing.Generic, dict)
_maybe_adjust_parameters(tp_dict)
annotations = {}
own_annotations = ns.get('__annotations__', {})
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
kwds = {"module": tp_dict.__module__} if _TAKES_MODULE else {}
own_annotations = {
n: typing._type_check(tp, msg, **kwds)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
annotation_origin = get_origin(annotation_type)
if annotation_origin is Required:
required_keys.add(annotation_key)
elif annotation_origin is NotRequired:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
if sys.version_info >= (3, 12):
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else:
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
f'positional arguments but {len(args) + 2} '
'were given')
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total) | null |
172,509 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `is_typeddict` function. Write a Python function `def is_typeddict(tp)` to solve the following problem:
Check if an annotation is a TypedDict class For example:: class Film(TypedDict): title: str year: int is_typeddict(Film) # => True is_typeddict(Union[list, str]) # => False
Here is the function:
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, tuple(_TYPEDDICT_TYPES)) | Check if an annotation is a TypedDict class For example:: class Film(TypedDict): title: str year: int is_typeddict(Film) # => True is_typeddict(Union[list, str]) # => False |
172,510 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `assert_type` function. Write a Python function `def assert_type(__val, __typ)` to solve the following problem:
Assert (to the type checker) that the value is of the given type. When the type checker encounters a call to assert_type(), it emits an error if the value is not of the specified type:: def greet(name: str) -> None: assert_type(name, str) # ok assert_type(name, int) # type checker error At runtime this returns the first argument unchanged and otherwise does nothing.
Here is the function:
def assert_type(__val, __typ):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return __val | Assert (to the type checker) that the value is of the given type. When the type checker encounters a call to assert_type(), it emits an error if the value is not of the specified type:: def greet(name: str) -> None: assert_type(name, str) # ok assert_type(name, int) # type checker error At runtime this returns the first argument unchanged and otherwise does nothing. |
172,511 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else:
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
if hasattr(typing, 'Literal'):
Literal = typing.Literal
# 3.7:
else:
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
# 3.7.0-3.7.2
else:
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
# 3.7
else:
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
# 3.7
else:
if hasattr(typing, "Required"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
if hasattr(typing, "Required"):
get_type_hints = typing.get_type_hints
else:
import functools
import types
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return types.GenericAlias(t.__origin__, stripped_args)
if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
else:
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
# 3.7-3.8
else:
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.7-3.9
else:
if hasattr(typing, 'ParamSpec'):
# Add default Parameter - PEP 696
# 3.7-3.9
else:
if not hasattr(typing, 'Concatenate'):
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
# 3.9
elif sys.version_info[:2] >= (3, 9):
# 3.7-8
else:
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
# 3.7-3.8
else:
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
if hasattr(typing, "LiteralString"):
LiteralString = typing.LiteralString
else:
if hasattr(typing, "Self"):
Self = typing.Self
else:
if hasattr(typing, "Never"):
Never = typing.Never
else:
if hasattr(typing, 'Required'):
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9):
else:
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, "Unpack"): # 3.11+
Unpack = typing.Unpack
elif sys.version_info[:2] >= (3, 9):
else:
Unpack = _UnpackForm(
'Unpack',
doc="""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
""")
if hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default Parameter - PEP 696
else:
if hasattr(typing, "reveal_type"):
reveal_type = typing.reveal_type
else:
if hasattr(typing, "assert_never"):
assert_never = typing.assert_never
else:
if hasattr(typing, "override"):
override = typing.override
else:
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
if hasattr(typing, "deprecated"):
deprecated = typing.deprecated
else:
_T = typing.TypeVar("_T")
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
The provided code snippet includes necessary dependencies for implementing the `get_type_hints` function. Write a Python function `def get_type_hints(obj, globalns=None, localns=None, include_extras=False)` to solve the following problem:
Return type hints for an object. This is often the same as obj.__annotations__, but it handles forward references encoded as string literals, adds Optional[t] if a default value equal to None is set and recursively replaces all 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' (unless 'include_extras=True'). The argument may be a module, class, method, or function. The annotations are returned as a dictionary. For classes, annotations include also inherited members. TypeError is raised if the argument is not of a type that can contain annotations, and an empty dictionary is returned if no annotations are present. BEWARE -- the behavior of globalns and localns is counterintuitive (unless you are familiar with how eval() and exec() work). The search order is locals first, then globals. - If no dict arguments are passed, an attempt is made to use the globals from obj (or the respective module's globals for classes), and these are also used as the locals. If the object does not appear to have globals, an empty dictionary is used. - If one dict argument is passed, it is used for both globals and locals. - If two dict arguments are passed, they specify globals and locals, respectively.
Here is the function:
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"):
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else:
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()} | Return type hints for an object. This is often the same as obj.__annotations__, but it handles forward references encoded as string literals, adds Optional[t] if a default value equal to None is set and recursively replaces all 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' (unless 'include_extras=True'). The argument may be a module, class, method, or function. The annotations are returned as a dictionary. For classes, annotations include also inherited members. TypeError is raised if the argument is not of a type that can contain annotations, and an empty dictionary is returned if no annotations are present. BEWARE -- the behavior of globalns and localns is counterintuitive (unless you are familiar with how eval() and exec() work). The search order is locals first, then globals. - If no dict arguments are passed, an attempt is made to use the globals from obj (or the respective module's globals for classes), and these are also used as the locals. If the object does not appear to have globals, an empty dictionary is used. - If one dict argument is passed, it is used for both globals and locals. - If two dict arguments are passed, they specify globals and locals, respectively. |
172,512 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
The provided code snippet includes necessary dependencies for implementing the `get_args` function. Write a Python function `def get_args(tp)` to solve the following problem:
Get type arguments with all substitutions performed. For unions, basic simplifications used by Union constructor are performed. Examples:: get_args(Dict[str, int]) == (str, int) get_args(int) == () get_args(Union[int, Union[T, int], str][int]) == (int, str) get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) get_args(Callable[[], T][int]) == ([], int)
Here is the function:
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return () | Get type arguments with all substitutions performed. For unions, basic simplifications used by Union constructor are performed. Examples:: get_args(Dict[str, int]) == (str, int) get_args(int) == () get_args(Union[int, Union[T, int], str][int]) == (int, str) get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) get_args(Callable[[], T][int]) == ([], int) |
172,513 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `TypeAlias` function. Write a Python function `def TypeAlias(self, parameters)` to solve the following problem:
Special marker indicating that an assignment should be recognized as a proper type alias definition by type checkers. For example:: Predicate: TypeAlias = Callable[..., bool] It's invalid when used anywhere except as in the example above.
Here is the function:
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable") | Special marker indicating that an assignment should be recognized as a proper type alias definition by type checkers. For example:: Predicate: TypeAlias = Callable[..., bool] It's invalid when used anywhere except as in the example above. |
172,514 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
The provided code snippet includes necessary dependencies for implementing the `Concatenate` function. Write a Python function `def Concatenate(self, parameters)` to solve the following problem:
Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information.
Here is the function:
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters) | Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information. |
172,515 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `TypeGuard` function. Write a Python function `def TypeGuard(self, parameters)` to solve the following problem:
Special typing form used to annotate the return type of a user-defined type guard function. ``TypeGuard`` only accepts a single type argument. At runtime, functions marked this way should return a boolean. ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static type checkers to determine a more precise type of an expression within a program's code flow. Usually type narrowing is done by analyzing conditional code flow and applying the narrowing to a block of code. The conditional expression here is sometimes referred to as a "type guard". Sometimes it would be convenient to use a user-defined boolean function as a type guard. Such a function should use ``TypeGuard[...]`` as its return type to alert static type checkers to this intention. Using ``-> TypeGuard`` tells the static type checker that for a given function: 1. The return value is a boolean. 2. If the return value is ``True``, the type of its argument is the type inside ``TypeGuard``. For example:: def is_str(val: Union[str, float]): # "isinstance" type guard if isinstance(val, str): # Type of ``val`` is narrowed to ``str`` ... else: # Else, type of ``val`` is narrowed to ``float``. ... Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower form of ``TypeA`` (it can even be a wider form) and this may lead to type-unsafe results. The main reason is to allow for things like narrowing ``List[object]`` to ``List[str]`` even though the latter is not a subtype of the former, since ``List`` is invariant. The responsibility of writing type-safe type guards is left to the user. ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards).
Here is the function:
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,)) | Special typing form used to annotate the return type of a user-defined type guard function. ``TypeGuard`` only accepts a single type argument. At runtime, functions marked this way should return a boolean. ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static type checkers to determine a more precise type of an expression within a program's code flow. Usually type narrowing is done by analyzing conditional code flow and applying the narrowing to a block of code. The conditional expression here is sometimes referred to as a "type guard". Sometimes it would be convenient to use a user-defined boolean function as a type guard. Such a function should use ``TypeGuard[...]`` as its return type to alert static type checkers to this intention. Using ``-> TypeGuard`` tells the static type checker that for a given function: 1. The return value is a boolean. 2. If the return value is ``True``, the type of its argument is the type inside ``TypeGuard``. For example:: def is_str(val: Union[str, float]): # "isinstance" type guard if isinstance(val, str): # Type of ``val`` is narrowed to ``str`` ... else: # Else, type of ``val`` is narrowed to ``float``. ... Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower form of ``TypeA`` (it can even be a wider form) and this may lead to type-unsafe results. The main reason is to allow for things like narrowing ``List[object]`` to ``List[str]`` even though the latter is not a subtype of the former, since ``List`` is invariant. The responsibility of writing type-safe type guards is left to the user. ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). |
172,516 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `LiteralString` function. Write a Python function `def LiteralString(self, params)` to solve the following problem:
Represents an arbitrary literal string. Example:: from typing_extensions import LiteralString def query(sql: LiteralString) -> ...: ... query("SELECT * FROM table") # ok query(f"SELECT * FROM {input()}") # not ok See PEP 675 for details.
Here is the function:
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable") | Represents an arbitrary literal string. Example:: from typing_extensions import LiteralString def query(sql: LiteralString) -> ...: ... query("SELECT * FROM table") # ok query(f"SELECT * FROM {input()}") # not ok See PEP 675 for details. |
172,517 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `Self` function. Write a Python function `def Self(self, params)` to solve the following problem:
Used to spell the type of "self" in classes. Example:: from typing import Self class ReturnsSelf: def parse(self, data: bytes) -> Self: ... return self
Here is the function:
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable") | Used to spell the type of "self" in classes. Example:: from typing import Self class ReturnsSelf: def parse(self, data: bytes) -> Self: ... return self |
172,518 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if hasattr(typing, "Unpack"): # 3.11+
Unpack = typing.Unpack
elif sys.version_info[:2] >= (3, 9):
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
else:
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
Unpack = _UnpackForm(
'Unpack',
doc="""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
""")
The provided code snippet includes necessary dependencies for implementing the `Unpack` function. Write a Python function `def Unpack(self, parameters)` to solve the following problem:
A special typing construct to unpack a variadic type. For example: Shape = TypeVarTuple('Shape') Batch = NewType('Batch', int) def add_batch_axis( x: Array[Unpack[Shape]] ) -> Array[Batch, Unpack[Shape]]: ...
Here is the function:
def Unpack(self, parameters):
"""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,)) | A special typing construct to unpack a variadic type. For example: Shape = TypeVarTuple('Shape') Batch = NewType('Batch', int) def add_batch_axis( x: Array[Unpack[Shape]] ) -> Array[Batch, Unpack[Shape]]: ... |
172,519 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if sys.version_info >= (3, 10):
elif sys.version_info >= (3, 9):
else:
T = typing.TypeVar('T')
if sys.version_info >= (3, 11):
from typing import Any
else:
if sys.version_info >= (3, 11):
final = typing.final
else:
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
if sys.version_info >= (3, 12):
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else:
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
The provided code snippet includes necessary dependencies for implementing the `reveal_type` function. Write a Python function `def reveal_type(__obj: T) -> T` to solve the following problem:
Reveal the inferred type of a variable. When a static type checker encounters a call to ``reveal_type()``, it will emit the inferred type of the argument:: x: int = 1 reveal_type(x) Running a static type checker (e.g., ``mypy``) on this example will produce output similar to 'Revealed type is "builtins.int"'. At runtime, the function prints the runtime type of the argument and returns it unchanged.
Here is the function:
def reveal_type(__obj: T) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
return __obj | Reveal the inferred type of a variable. When a static type checker encounters a call to ``reveal_type()``, it will emit the inferred type of the argument:: x: int = 1 reveal_type(x) Running a static type checker (e.g., ``mypy``) on this example will produce output similar to 'Revealed type is "builtins.int"'. At runtime, the function prints the runtime type of the argument and returns it unchanged. |
172,520 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if hasattr(typing, "Never"):
Never = typing.Never
else:
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
The provided code snippet includes necessary dependencies for implementing the `assert_never` function. Write a Python function `def assert_never(__arg: Never) -> Never` to solve the following problem:
Assert to the type checker that a line of code is unreachable. Example:: def int_or_str(arg: int | str) -> None: match arg: case int(): print("It's an int") case str(): print("It's a str") case _: assert_never(arg) If a type checker finds that a call to assert_never() is reachable, it will emit an error. At runtime, this throws an exception when called.
Here is the function:
def assert_never(__arg: Never) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
raise AssertionError("Expected code to be unreachable") | Assert to the type checker that a line of code is unreachable. Example:: def int_or_str(arg: int | str) -> None: match arg: case int(): print("It's an int") case str(): print("It's a str") case _: assert_never(arg) If a type checker finds that a call to assert_never() is reachable, it will emit an error. At runtime, this throws an exception when called. |
172,521 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
T = typing.TypeVar('T')
if sys.version_info >= (3, 11):
from typing import Any
else:
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
Type = typing.Type
Any = object()
The provided code snippet includes necessary dependencies for implementing the `dataclass_transform` function. Write a Python function `def dataclass_transform( *, eq_default: bool = True, order_default: bool = False, kw_only_default: bool = False, frozen_default: bool = False, field_specifiers: typing.Tuple[ typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], ... ] = (), **kwargs: typing.Any, ) -> typing.Callable[[T], T]` to solve the following problem:
Decorator that marks a function, class, or metaclass as providing dataclass-like behavior. Example: from typing_extensions import dataclass_transform _T = TypeVar("_T") # Used on a decorator function @dataclass_transform() def create_model(cls: type[_T]) -> type[_T]: ... return cls @create_model class CustomerModel: id: int name: str # Used on a base class @dataclass_transform() class ModelBase: ... class CustomerModel(ModelBase): id: int name: str # Used on a metaclass @dataclass_transform() class ModelMeta(type): ... class ModelBase(metaclass=ModelMeta): ... class CustomerModel(ModelBase): id: int name: str Each of the ``CustomerModel`` classes defined in this example will now behave similarly to a dataclass created with the ``@dataclasses.dataclass`` decorator. For example, the type checker will synthesize an ``__init__`` method. The arguments to this decorator can be used to customize this behavior: - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be True or False if it is omitted by the caller. - ``order_default`` indicates whether the ``order`` parameter is assumed to be True or False if it is omitted by the caller. - ``kw_only_default`` indicates whether the ``kw_only`` parameter is assumed to be True or False if it is omitted by the caller. - ``frozen_default`` indicates whether the ``frozen`` parameter is assumed to be True or False if it is omitted by the caller. - ``field_specifiers`` specifies a static list of supported classes or functions that describe fields, similar to ``dataclasses.field()``. At runtime, this decorator records its arguments in the ``__dataclass_transform__`` attribute on the decorated object. See PEP 681 for details.
Here is the function:
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
frozen_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``frozen_default`` indicates whether the ``frozen`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"frozen_default": frozen_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator | Decorator that marks a function, class, or metaclass as providing dataclass-like behavior. Example: from typing_extensions import dataclass_transform _T = TypeVar("_T") # Used on a decorator function @dataclass_transform() def create_model(cls: type[_T]) -> type[_T]: ... return cls @create_model class CustomerModel: id: int name: str # Used on a base class @dataclass_transform() class ModelBase: ... class CustomerModel(ModelBase): id: int name: str # Used on a metaclass @dataclass_transform() class ModelMeta(type): ... class ModelBase(metaclass=ModelMeta): ... class CustomerModel(ModelBase): id: int name: str Each of the ``CustomerModel`` classes defined in this example will now behave similarly to a dataclass created with the ``@dataclasses.dataclass`` decorator. For example, the type checker will synthesize an ``__init__`` method. The arguments to this decorator can be used to customize this behavior: - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be True or False if it is omitted by the caller. - ``order_default`` indicates whether the ``order`` parameter is assumed to be True or False if it is omitted by the caller. - ``kw_only_default`` indicates whether the ``kw_only`` parameter is assumed to be True or False if it is omitted by the caller. - ``frozen_default`` indicates whether the ``frozen`` parameter is assumed to be True or False if it is omitted by the caller. - ``field_specifiers`` specifies a static list of supported classes or functions that describe fields, similar to ``dataclasses.field()``. At runtime, this decorator records its arguments in the ``__dataclass_transform__`` attribute on the decorated object. See PEP 681 for details. |
172,522 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
The provided code snippet includes necessary dependencies for implementing the `override` function. Write a Python function `def override(__arg: _F) -> _F` to solve the following problem:
Indicate that a method is intended to override a method in a base class. Usage: class Base: def method(self) -> None: ... pass class Child(Base): @override def method(self) -> None: super().method() When this decorator is applied to a method, the type checker will validate that it overrides a method with the same name on a base class. This helps prevent bugs that may occur when a base class is changed without an equivalent change to a child class. There is no runtime checking of these properties. The decorator sets the ``__override__`` attribute to ``True`` on the decorated object to allow runtime introspection. See PEP 698 for details.
Here is the function:
def override(__arg: _F) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None: ...
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
There is no runtime checking of these properties. The decorator
sets the ``__override__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
See PEP 698 for details.
"""
try:
__arg.__override__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return __arg | Indicate that a method is intended to override a method in a base class. Usage: class Base: def method(self) -> None: ... pass class Child(Base): @override def method(self) -> None: super().method() When this decorator is applied to a method, the type checker will validate that it overrides a method with the same name on a base class. This helps prevent bugs that may occur when a base class is changed without an equivalent change to a child class. There is no runtime checking of these properties. The decorator sets the ``__override__`` attribute to ``True`` on the decorated object to allow runtime introspection. See PEP 698 for details. |
172,523 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
Type = typing.Type
The provided code snippet includes necessary dependencies for implementing the `deprecated` function. Write a Python function `def deprecated( __msg: str, *, category: typing.Optional[typing.Type[Warning]] = DeprecationWarning, stacklevel: int = 1, ) -> typing.Callable[[_T], _T]` to solve the following problem:
Indicate that a class, function or overload is deprecated. Usage: @deprecated("Use B instead") class A: pass @deprecated("Use g instead") def f(): pass @overload @deprecated("int support is deprecated") def g(x: int) -> int: ... @overload def g(x: str) -> int: ... When this decorator is applied to an object, the type checker will generate a diagnostic on usage of the deprecated object. No runtime warning is issued. The decorator sets the ``__deprecated__`` attribute on the decorated object to the deprecation message passed to the decorator. If applied to an overload, the decorator must be after the ``@overload`` decorator for the attribute to exist on the overload as returned by ``get_overloads()``. See PEP 702 for details.
Here is the function:
def deprecated(
__msg: str,
*,
category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
stacklevel: int = 1,
) -> typing.Callable[[_T], _T]:
"""Indicate that a class, function or overload is deprecated.
Usage:
@deprecated("Use B instead")
class A:
pass
@deprecated("Use g instead")
def f():
pass
@overload
@deprecated("int support is deprecated")
def g(x: int) -> int: ...
@overload
def g(x: str) -> int: ...
When this decorator is applied to an object, the type checker
will generate a diagnostic on usage of the deprecated object.
No runtime warning is issued. The decorator sets the ``__deprecated__``
attribute on the decorated object to the deprecation message
passed to the decorator. If applied to an overload, the decorator
must be after the ``@overload`` decorator for the attribute to
exist on the overload as returned by ``get_overloads()``.
See PEP 702 for details.
"""
def decorator(__arg: _T) -> _T:
if category is None:
__arg.__deprecated__ = __msg
return __arg
elif isinstance(__arg, type):
original_new = __arg.__new__
has_init = __arg.__init__ is not object.__init__
@functools.wraps(original_new)
def __new__(cls, *args, **kwargs):
warnings.warn(__msg, category=category, stacklevel=stacklevel + 1)
# Mirrors a similar check in object.__new__.
if not has_init and (args or kwargs):
raise TypeError(f"{cls.__name__}() takes no arguments")
if original_new is not object.__new__:
return original_new(cls, *args, **kwargs)
else:
return original_new(cls)
__arg.__new__ = staticmethod(__new__)
__arg.__deprecated__ = __new__.__deprecated__ = __msg
return __arg
elif callable(__arg):
@functools.wraps(__arg)
def wrapper(*args, **kwargs):
warnings.warn(__msg, category=category, stacklevel=stacklevel + 1)
return __arg(*args, **kwargs)
__arg.__deprecated__ = wrapper.__deprecated__ = __msg
return wrapper
else:
raise TypeError(
"@deprecated decorator with non-None category must be applied to "
f"a class or callable, not {__arg!r}"
)
return decorator | Indicate that a class, function or overload is deprecated. Usage: @deprecated("Use B instead") class A: pass @deprecated("Use g instead") def f(): pass @overload @deprecated("int support is deprecated") def g(x: int) -> int: ... @overload def g(x: str) -> int: ... When this decorator is applied to an object, the type checker will generate a diagnostic on usage of the deprecated object. No runtime warning is issued. The decorator sets the ``__deprecated__`` attribute on the decorated object to the deprecation message passed to the decorator. If applied to an overload, the decorator must be after the ``@overload`` decorator for the attribute to exist on the overload as returned by ``get_overloads()``. See PEP 702 for details. |
172,524 | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
def NamedTuple(__typename, __fields=None, **kwargs):
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,) | null |
172,525 | from collections.abc import Mapping, MutableMapping, Sequence
from urllib.parse import urlsplit
import itertools
import json
import re
import sys
The provided code snippet includes necessary dependencies for implementing the `load_schema` function. Write a Python function `def load_schema(name)` to solve the following problem:
Load a schema from ./schemas/``name``.json and return it.
Here is the function:
def load_schema(name):
"""
Load a schema from ./schemas/``name``.json and return it.
"""
path = resources.files(__package__).joinpath(f"schemas/{name}.json")
data = path.read_text(encoding="utf-8")
return json.loads(data) | Load a schema from ./schemas/``name``.json and return it. |
172,526 | from collections.abc import Mapping, MutableMapping, Sequence
from urllib.parse import urlsplit
import itertools
import json
import re
import sys
The provided code snippet includes necessary dependencies for implementing the `format_as_index` function. Write a Python function `def format_as_index(container, indices)` to solve the following problem:
Construct a single string containing indexing operations for the indices. For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"] Arguments: container (str): A word to use for the thing being indexed indices (sequence): The indices to format.
Here is the function:
def format_as_index(container, indices):
"""
Construct a single string containing indexing operations for the indices.
For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"]
Arguments:
container (str):
A word to use for the thing being indexed
indices (sequence):
The indices to format.
"""
if not indices:
return container
return f"{container}[{']['.join(repr(index) for index in indices)}]" | Construct a single string containing indexing operations for the indices. For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"] Arguments: container (str): A word to use for the thing being indexed indices (sequence): The indices to format. |
172,527 | from json import JSONDecodeError
from textwrap import dedent
import argparse
import json
import sys
import traceback
import warnings
import attr
from jsonschema.exceptions import SchemaError
from jsonschema.validators import RefResolver, validator_for
def _resolve_name_with_default(name):
if "." not in name:
name = "jsonschema." + name
return resolve_name(name) | null |
172,528 | from json import JSONDecodeError
from textwrap import dedent
import argparse
import json
import sys
import traceback
import warnings
import attr
from jsonschema.exceptions import SchemaError
from jsonschema.validators import RefResolver, validator_for
parser = argparse.ArgumentParser(
description="JSON Schema Validation CLI",
)
parser.add_argument(
"-i", "--instance",
action="append",
dest="instances",
help="""
a path to a JSON instance (i.e. filename.json) to validate (may
be specified multiple times). If no instances are provided via this
option, one will be expected on standard input.
""",
)
parser.add_argument(
"-F", "--error-format",
help="""
the format to use for each validation error message, specified
in a form suitable for str.format. This string will be passed
one formatted object named 'error' for each ValidationError.
Only provide this option when using --output=plain, which is the
default. If this argument is unprovided and --output=plain is
used, a simple default representation will be used.
""",
)
parser.add_argument(
"-o", "--output",
choices=["plain", "pretty"],
default="plain",
help="""
an output format to use. 'plain' (default) will produce minimal
text with one line for each error, while 'pretty' will produce
more detailed human-readable output on multiple lines.
""",
)
parser.add_argument(
"-V", "--validator",
type=_resolve_name_with_default,
help="""
the fully qualified object name of a validator to use, or, for
validators that are registered with jsonschema, simply the name
of the class.
""",
)
parser.add_argument(
"--base-uri",
help="""
a base URI to assign to the provided schema, even if it does not
declare one (via e.g. $id). This option can be used if you wish to
resolve relative references to a particular URI (or local path)
""",
)
parser.add_argument(
"--version",
action="version",
version=metadata.version("jsonschema"),
)
parser.add_argument(
"schema",
help="the path to a JSON Schema to validate with (i.e. schema.json)",
)
def parse_args(args):
arguments = vars(parser.parse_args(args=args or ["--help"]))
if arguments["output"] != "plain" and arguments["error_format"]:
raise parser.error(
"--error-format can only be used with --output plain",
)
if arguments["output"] == "plain" and arguments["error_format"] is None:
arguments["error_format"] = "{error.instance}: {error.message}\n"
return arguments | null |
172,529 | from json import JSONDecodeError
from textwrap import dedent
import argparse
import json
import sys
import traceback
import warnings
import attr
from jsonschema.exceptions import SchemaError
from jsonschema.validators import RefResolver, validator_for
class _CannotLoadFile(Exception):
pass
class _Outputter:
_formatter = attr.ib()
_stdout = attr.ib()
_stderr = attr.ib()
def from_arguments(cls, arguments, stdout, stderr):
if arguments["output"] == "plain":
formatter = _PlainFormatter(arguments["error_format"])
elif arguments["output"] == "pretty":
formatter = _PrettyFormatter()
return cls(formatter=formatter, stdout=stdout, stderr=stderr)
def load(self, path):
try:
file = open(path)
except FileNotFoundError:
self.filenotfound_error(path=path, exc_info=sys.exc_info())
raise _CannotLoadFile()
with file:
try:
return json.load(file)
except JSONDecodeError:
self.parsing_error(path=path, exc_info=sys.exc_info())
raise _CannotLoadFile()
def filenotfound_error(self, **kwargs):
self._stderr.write(self._formatter.filenotfound_error(**kwargs))
def parsing_error(self, **kwargs):
self._stderr.write(self._formatter.parsing_error(**kwargs))
def validation_error(self, **kwargs):
self._stderr.write(self._formatter.validation_error(**kwargs))
def validation_success(self, **kwargs):
self._stdout.write(self._formatter.validation_success(**kwargs))
def _validate_instance(instance_path, instance, validator, outputter):
invalid = False
for error in validator.iter_errors(instance):
invalid = True
outputter.validation_error(instance_path=instance_path, error=error)
if not invalid:
outputter.validation_success(instance_path=instance_path)
return invalid
class SchemaError(_Error):
"""
A schema was invalid under its corresponding metaschema.
"""
_word_for_schema_in_error_message = "metaschema"
_word_for_instance_in_error_message = "schema"
class RefResolver:
"""
Resolve JSON References.
Arguments:
base_uri (str):
The URI of the referring document
referrer:
The actual referring document
store (dict):
A mapping from URIs to documents to cache
cache_remote (bool):
Whether remote refs should be cached after first resolution
handlers (dict):
A mapping from URI schemes to functions that should be used
to retrieve them
urljoin_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
remote_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of
resolved remote URLs.
Attributes:
cache_remote (bool):
Whether remote refs should be cached after first resolution
"""
def __init__(
self,
base_uri,
referrer,
store=m(),
cache_remote=True,
handlers=(),
urljoin_cache=None,
remote_cache=None,
):
if urljoin_cache is None:
urljoin_cache = lru_cache(1024)(urljoin)
if remote_cache is None:
remote_cache = lru_cache(1024)(self.resolve_from_url)
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self._scopes_stack = [base_uri]
self.store = _utils.URIDict(_store_schema_list())
self.store.update(store)
self.store.update(
(schema["$id"], schema)
for schema in store.values()
if isinstance(schema, Mapping) and "$id" in schema
)
self.store[base_uri] = referrer
self._urljoin_cache = urljoin_cache
self._remote_cache = remote_cache
def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`RefResolver`
"""
return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs) # noqa: B026, E501
def push_scope(self, scope):
"""
Enter a given sub-scope.
Treats further dereferences as being performed underneath the
given scope.
"""
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
"""
Exit the most recent entered scope.
Treats further dereferences as being performed underneath the
original scope.
Don't call this method more times than `push_scope` has been
called.
"""
try:
self._scopes_stack.pop()
except IndexError:
raise exceptions.RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`",
)
def resolution_scope(self):
"""
Retrieve the current resolution scope.
"""
return self._scopes_stack[-1]
def base_uri(self):
"""
Retrieve the current base URI, not including any fragment.
"""
uri, _ = urldefrag(self.resolution_scope)
return uri
def in_scope(self, scope):
"""
Temporarily enter the given scope for the duration of the context.
.. deprecated:: v4.0.0
"""
warnings.warn(
"jsonschema.RefResolver.in_scope is deprecated and will be "
"removed in a future release.",
DeprecationWarning,
stacklevel=3,
)
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
def _find_in_referrer(self, key):
return self._get_subschemas_cache()[key]
def _get_subschemas_cache(self):
cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS}
for keyword, subschema in _search_schema(
self.referrer, _match_subschema_keywords,
):
cache[keyword].append(subschema)
return cache
def _find_in_subschemas(self, url):
subschemas = self._get_subschemas_cache()["$id"]
if not subschemas:
return None
uri, fragment = urldefrag(url)
for subschema in subschemas:
target_uri = self._urljoin_cache(
self.resolution_scope, subschema["$id"],
)
if target_uri.rstrip("/") == uri.rstrip("/"):
if fragment:
subschema = self.resolve_fragment(subschema, fragment)
self.store[url] = subschema
return url, subschema
return None
def resolve(self, ref):
"""
Resolve the given reference.
"""
url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/")
match = self._find_in_subschemas(url)
if match is not None:
return match
return url, self._remote_cache(url)
def resolve_from_url(self, url):
"""
Resolve the given URL.
"""
url, fragment = urldefrag(url)
if not url:
url = self.base_uri
try:
document = self.store[url]
except KeyError:
try:
document = self.resolve_remote(url)
except Exception as exc:
raise exceptions.RefResolutionError(exc)
return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
Arguments:
document:
The referent document
fragment (str):
a URI fragment to resolve within it
"""
fragment = fragment.lstrip("/")
if not fragment:
return document
if document is self.referrer:
find = self._find_in_referrer
else:
def find(key):
yield from _search_schema(document, _match_keyword(key))
for keyword in ["$anchor", "$dynamicAnchor"]:
for subschema in find(keyword):
if fragment == subschema[keyword]:
return subschema
for keyword in ["id", "$id"]:
for subschema in find(keyword):
if "#" + fragment == subschema[keyword]:
return subschema
# Resolve via path
parts = unquote(fragment).split("/") if fragment else []
for part in parts:
part = part.replace("~1", "/").replace("~0", "~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise exceptions.RefResolutionError(
f"Unresolvable JSON pointer: {fragment!r}",
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
Arguments:
uri (str):
The URI to resolve
Returns:
The retrieved document
.. _requests: https://pypi.org/project/requests/
"""
try:
import requests
except ImportError:
requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif scheme in ["http", "https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
with urlopen(uri) as url:
result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
def validator_for(schema, default=_UNSET):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :kw:`$schema` keyword that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.abc.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class
cannot be determined.
If unprovided, the default is to return the latest supported
draft.
"""
DefaultValidator = _LATEST_VERSION if default is _UNSET else default
if schema is True or schema is False or "$schema" not in schema:
return DefaultValidator
if schema["$schema"] not in _META_SCHEMAS:
if default is _UNSET:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return _META_SCHEMAS.get(schema["$schema"], DefaultValidator)
def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin):
outputter = _Outputter.from_arguments(
arguments=arguments,
stdout=stdout,
stderr=stderr,
)
try:
schema = outputter.load(arguments["schema"])
except _CannotLoadFile:
return 1
if arguments["validator"] is None:
arguments["validator"] = validator_for(schema)
try:
arguments["validator"].check_schema(schema)
except SchemaError as error:
outputter.validation_error(
instance_path=arguments["schema"],
error=error,
)
return 1
if arguments["instances"]:
load, instances = outputter.load, arguments["instances"]
else:
def load(_):
try:
return json.load(stdin)
except JSONDecodeError:
outputter.parsing_error(
path="<stdin>", exc_info=sys.exc_info(),
)
raise _CannotLoadFile()
instances = ["<stdin>"]
resolver = RefResolver(
base_uri=arguments["base_uri"],
referrer=schema,
) if arguments["base_uri"] is not None else None
validator = arguments["validator"](schema, resolver=resolver)
exit_code = 0
for each in instances:
try:
instance = load(each)
except _CannotLoadFile:
exit_code = 1
else:
exit_code |= _validate_instance(
instance_path=each,
instance=instance,
validator=validator,
outputter=outputter,
)
return exit_code | null |
172,530 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
_F = typing.TypeVar("_F", bound=_FormatCheckCallable)
class FormatChecker:
"""
A ``format`` property checker.
JSON Schema does not mandate that the ``format`` property actually do any
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To add a check for a custom format use the `FormatChecker.checks`
decorator.
Arguments:
formats:
The known formats to validate. This argument can be used to
limit which formats will be used during validation.
"""
checkers: dict[
str,
tuple[_FormatCheckCallable, _RaisesType],
] = {}
def __init__(self, formats: typing.Iterable[str] | None = None):
if formats is None:
formats = self.checkers.keys()
self.checkers = {k: self.checkers[k] for k in formats}
def __repr__(self):
return "<FormatChecker checkers={}>".format(sorted(self.checkers))
def checks(
self, format: str, raises: _RaisesType = (),
) -> typing.Callable[[_F], _F]:
"""
Register a decorated function as validating a new format.
Arguments:
format:
The format that the decorated function will check.
raises:
The exception(s) raised by the decorated function when an
invalid instance is found.
The exception object will be accessible as the
`jsonschema.exceptions.ValidationError.cause` attribute of the
resulting validation error.
"""
def _checks(func: _F) -> _F:
self.checkers[format] = (func, raises)
return func
return _checks
def cls_checks(
cls, format: str, raises: _RaisesType = (),
) -> typing.Callable[[_F], _F]:
warnings.warn(
(
"FormatChecker.cls_checks is deprecated. Call "
"FormatChecker.checks on a specific FormatChecker instance "
"instead."
),
DeprecationWarning,
stacklevel=2,
)
return cls._cls_checks(format=format, raises=raises)
def _cls_checks(
cls, format: str, raises: _RaisesType = (),
) -> typing.Callable[[_F], _F]:
def _checks(func: _F) -> _F:
cls.checkers[format] = (func, raises)
return func
return _checks
def check(self, instance: object, format: str) -> None:
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format:
The format that instance should conform to
Raises:
FormatError:
if the instance does not conform to ``format``
"""
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
raise FormatError(f"{instance!r} is not a {format!r}", cause=cause)
def conforms(self, instance: object, format: str) -> bool:
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format:
The format that instance should conform to
Returns:
bool: whether it conformed
"""
try:
self.check(instance, format)
except FormatError:
return False
else:
return True
_draft_checkers: dict[str, FormatChecker] = dict(
draft3=draft3_format_checker,
draft4=draft4_format_checker,
draft6=draft6_format_checker,
draft7=draft7_format_checker,
draft201909=draft201909_format_checker,
draft202012=draft202012_format_checker,
)
def _checks_drafts(
name=None,
draft3=None,
draft4=None,
draft6=None,
draft7=None,
draft201909=None,
draft202012=None,
raises=(),
) -> typing.Callable[[_F], _F]:
draft3 = draft3 or name
draft4 = draft4 or name
draft6 = draft6 or name
draft7 = draft7 or name
draft201909 = draft201909 or name
draft202012 = draft202012 or name
def wrap(func: _F) -> _F:
if draft3:
func = _draft_checkers["draft3"].checks(draft3, raises)(func)
if draft4:
func = _draft_checkers["draft4"].checks(draft4, raises)(func)
if draft6:
func = _draft_checkers["draft6"].checks(draft6, raises)(func)
if draft7:
func = _draft_checkers["draft7"].checks(draft7, raises)(func)
if draft201909:
func = _draft_checkers["draft201909"].checks(draft201909, raises)(
func,
)
if draft202012:
func = _draft_checkers["draft202012"].checks(draft202012, raises)(
func,
)
# Oy. This is bad global state, but relied upon for now, until
# deprecation. See #519 and test_format_checkers_come_with_defaults
FormatChecker._cls_checks(
draft202012 or draft201909 or draft7 or draft6 or draft4 or draft3,
raises,
)(func)
return func
return wrap | null |
172,531 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_email(instance: object) -> bool:
if not isinstance(instance, str):
return True
return "@" in instance | null |
172,532 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_ipv4(instance: object) -> bool:
if not isinstance(instance, str):
return True
return bool(ipaddress.IPv4Address(instance)) | null |
172,533 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_ipv6(instance: object) -> bool:
if not isinstance(instance, str):
return True
address = ipaddress.IPv6Address(instance)
return not getattr(address, "scope_id", "") | null |
172,534 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_host_name(instance: object) -> bool:
if not isinstance(instance, str):
return True
return FQDN(instance).is_valid | null |
172,535 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_idn_host_name(instance: object) -> bool:
if not isinstance(instance, str):
return True
idna.encode(instance)
return True | null |
172,536 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def validate_rfc3986(url, rule='URI'):
"""
Validates strings according to RFC3986
:param url: String cointaining URI to validate
:param rule: It could be 'URI' (default) or 'URI_reference'.
:return: True or False
"""
if rule == 'URI':
return URI_RE_COMP.match(url)
elif rule == 'URI_reference':
return URI_REF_RE_COMP.match(url)
else:
raise ValueError('Invalid rule')
def is_uri(instance: object) -> bool:
if not isinstance(instance, str):
return True
return validate_rfc3986(instance, rule="URI") | null |
172,537 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def validate_rfc3986(url, rule='URI'):
"""
Validates strings according to RFC3986
:param url: String cointaining URI to validate
:param rule: It could be 'URI' (default) or 'URI_reference'.
:return: True or False
"""
if rule == 'URI':
return URI_RE_COMP.match(url)
elif rule == 'URI_reference':
return URI_REF_RE_COMP.match(url)
else:
raise ValueError('Invalid rule')
def is_uri_reference(instance: object) -> bool:
if not isinstance(instance, str):
return True
return validate_rfc3986(instance, rule="URI_reference") | null |
172,538 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
try:
import rfc3987
except ImportError:
with suppress(ImportError):
from rfc3986_validator import validate_rfc3986
)
else:
draft7="iri",
draft201909="iri",
draft202012="iri",
raises=ValueError,
)
)
)
with suppress(ImportError):
from rfc3339_validator import validate_rfc3339
)
)
with suppress(ImportError):
from webcolors import CSS21_NAMES_TO_HEX
import webcolors
with suppress(ImportError):
import jsonpointer
draft6="json-pointer",
draft7="json-pointer",
draft201909="json-pointer",
draft202012="json-pointer",
raises=jsonpointer.JsonPointerException,
)
)
with suppress(ImportError):
import uri_template
draft6="uri-template",
draft7="uri-template",
draft201909="uri-template",
draft202012="uri-template",
)
with suppress(ImportError):
import isoduration
draft201909="duration",
draft202012="duration",
raises=isoduration.DurationParsingException,
)
draft201909="uuid",
draft202012="uuid",
raises=ValueError,
)
def is_iri(instance: object) -> bool:
if not isinstance(instance, str):
return True
return rfc3987.parse(instance, rule="IRI") | null |
172,539 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
try:
import rfc3987
except ImportError:
with suppress(ImportError):
from rfc3986_validator import validate_rfc3986
)
else:
draft7="iri",
draft201909="iri",
draft202012="iri",
raises=ValueError,
)
)
)
with suppress(ImportError):
from rfc3339_validator import validate_rfc3339
)
)
with suppress(ImportError):
from webcolors import CSS21_NAMES_TO_HEX
import webcolors
with suppress(ImportError):
import jsonpointer
draft6="json-pointer",
draft7="json-pointer",
draft201909="json-pointer",
draft202012="json-pointer",
raises=jsonpointer.JsonPointerException,
)
)
with suppress(ImportError):
import uri_template
draft6="uri-template",
draft7="uri-template",
draft201909="uri-template",
draft202012="uri-template",
)
with suppress(ImportError):
import isoduration
draft201909="duration",
draft202012="duration",
raises=isoduration.DurationParsingException,
)
draft201909="uuid",
draft202012="uuid",
raises=ValueError,
)
def is_iri_reference(instance: object) -> bool:
if not isinstance(instance, str):
return True
return rfc3987.parse(instance, rule="IRI_reference") | null |
172,540 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
try:
import rfc3987
except ImportError:
with suppress(ImportError):
from rfc3986_validator import validate_rfc3986
)
else:
draft7="iri",
draft201909="iri",
draft202012="iri",
raises=ValueError,
)
)
)
with suppress(ImportError):
from rfc3339_validator import validate_rfc3339
)
)
with suppress(ImportError):
from webcolors import CSS21_NAMES_TO_HEX
import webcolors
with suppress(ImportError):
import jsonpointer
draft6="json-pointer",
draft7="json-pointer",
draft201909="json-pointer",
draft202012="json-pointer",
raises=jsonpointer.JsonPointerException,
)
)
with suppress(ImportError):
import uri_template
draft6="uri-template",
draft7="uri-template",
draft201909="uri-template",
draft202012="uri-template",
)
with suppress(ImportError):
import isoduration
draft201909="duration",
draft202012="duration",
raises=isoduration.DurationParsingException,
)
draft201909="uuid",
draft202012="uuid",
raises=ValueError,
)
def is_uri(instance: object) -> bool:
if not isinstance(instance, str):
return True
return rfc3987.parse(instance, rule="URI") | null |
172,541 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
try:
import rfc3987
except ImportError:
with suppress(ImportError):
from rfc3986_validator import validate_rfc3986
)
else:
draft7="iri",
draft201909="iri",
draft202012="iri",
raises=ValueError,
)
)
)
with suppress(ImportError):
from rfc3339_validator import validate_rfc3339
)
)
with suppress(ImportError):
from webcolors import CSS21_NAMES_TO_HEX
import webcolors
with suppress(ImportError):
import jsonpointer
draft6="json-pointer",
draft7="json-pointer",
draft201909="json-pointer",
draft202012="json-pointer",
raises=jsonpointer.JsonPointerException,
)
)
with suppress(ImportError):
import uri_template
draft6="uri-template",
draft7="uri-template",
draft201909="uri-template",
draft202012="uri-template",
)
with suppress(ImportError):
import isoduration
draft201909="duration",
draft202012="duration",
raises=isoduration.DurationParsingException,
)
draft201909="uuid",
draft202012="uuid",
raises=ValueError,
)
def is_uri_reference(instance: object) -> bool:
if not isinstance(instance, str):
return True
return rfc3987.parse(instance, rule="URI_reference") | null |
172,542 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
try:
import rfc3987
except ImportError:
with suppress(ImportError):
from rfc3986_validator import validate_rfc3986
)
else:
draft7="iri",
draft201909="iri",
draft202012="iri",
raises=ValueError,
)
)
)
with suppress(ImportError):
from rfc3339_validator import validate_rfc3339
def is_datetime(instance: object) -> bool:
if not isinstance(instance, str):
return True
return validate_rfc3339(instance.upper())
draft7="time",
draft201909="time",
draft202012="time",
)
)
with suppress(ImportError):
from webcolors import CSS21_NAMES_TO_HEX
import webcolors
with suppress(ImportError):
import jsonpointer
draft6="json-pointer",
draft7="json-pointer",
draft201909="json-pointer",
draft202012="json-pointer",
raises=jsonpointer.JsonPointerException,
)
)
with suppress(ImportError):
import uri_template
draft6="uri-template",
draft7="uri-template",
draft201909="uri-template",
draft202012="uri-template",
)
with suppress(ImportError):
import isoduration
draft201909="duration",
draft202012="duration",
raises=isoduration.DurationParsingException,
)
draft201909="uuid",
draft202012="uuid",
raises=ValueError,
)
def is_time(instance: object) -> bool:
if not isinstance(instance, str):
return True
return is_datetime("1970-01-01T" + instance) | null |
172,543 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_regex(instance: object) -> bool:
if not isinstance(instance, str):
return True
return bool(re.compile(instance)) | null |
172,544 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_date(instance: object) -> bool:
if not isinstance(instance, str):
return True
return bool(instance.isascii() and datetime.date.fromisoformat(instance)) | null |
172,545 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_draft3_time(instance: object) -> bool:
if not isinstance(instance, str):
return True
return bool(datetime.datetime.strptime(instance, "%H:%M:%S")) | null |
172,546 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
try:
import rfc3987
except ImportError:
with suppress(ImportError):
from rfc3986_validator import validate_rfc3986
)
else:
draft7="iri",
draft201909="iri",
draft202012="iri",
raises=ValueError,
)
)
)
with suppress(ImportError):
from rfc3339_validator import validate_rfc3339
)
)
with suppress(ImportError):
from webcolors import CSS21_NAMES_TO_HEX
import webcolors
def is_css_color_code(instance: object) -> bool:
return webcolors.normalize_hex(instance)
with suppress(ImportError):
import jsonpointer
draft6="json-pointer",
draft7="json-pointer",
draft201909="json-pointer",
draft202012="json-pointer",
raises=jsonpointer.JsonPointerException,
)
)
with suppress(ImportError):
import uri_template
draft6="uri-template",
draft7="uri-template",
draft201909="uri-template",
draft202012="uri-template",
)
with suppress(ImportError):
import isoduration
draft201909="duration",
draft202012="duration",
raises=isoduration.DurationParsingException,
)
draft201909="uuid",
draft202012="uuid",
raises=ValueError,
)
def is_css21_color(instance: object) -> bool:
if (
not isinstance(instance, str)
or instance.lower() in CSS21_NAMES_TO_HEX
):
return True
return is_css_color_code(instance) | null |
172,547 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_json_pointer(instance: object) -> bool:
if not isinstance(instance, str):
return True
return bool(jsonpointer.JsonPointer(instance)) | null |
172,548 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_relative_json_pointer(instance: object) -> bool:
# Definition taken from:
# https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
if not isinstance(instance, str):
return True
if not instance:
return False
non_negative_integer, rest = [], ""
for i, character in enumerate(instance):
if character.isdigit():
# digits with a leading "0" are not allowed
if i > 0 and int(instance[i - 1]) == 0:
return False
non_negative_integer.append(character)
continue
if not non_negative_integer:
return False
rest = instance[i:]
break
return (rest == "#") or bool(jsonpointer.JsonPointer(rest)) | null |
172,549 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_uri_template(instance: object) -> bool:
if not isinstance(instance, str):
return True
return uri_template.validate(instance) | null |
172,550 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
def is_duration(instance: object) -> bool:
if not isinstance(instance, str):
return True
isoduration.parse_duration(instance)
# FIXME: See bolsote/isoduration#25 and bolsote/isoduration#21
return instance.endswith(tuple("DMYWHMS")) | null |
172,551 | from __future__ import annotations
from contextlib import suppress
from uuid import UUID
import datetime
import ipaddress
import re
import typing
import warnings
from jsonschema.exceptions import FormatError
class UUID:
if sys.version_info >= (3, 7):
def __init__(
self,
hex: Optional[Text] = ...,
bytes: Optional[_Bytes] = ...,
bytes_le: Optional[_Bytes] = ...,
fields: Optional[_FieldsType] = ...,
int: Optional[_Int] = ...,
version: Optional[_Int] = ...,
*,
is_safe: SafeUUID = ...,
) -> None: ...
def is_safe(self) -> SafeUUID: ...
else:
def __init__(
self,
hex: Optional[Text] = ...,
bytes: Optional[_Bytes] = ...,
bytes_le: Optional[_Bytes] = ...,
fields: Optional[_FieldsType] = ...,
int: Optional[_Int] = ...,
version: Optional[_Int] = ...,
) -> None: ...
def bytes(self) -> _Bytes: ...
def bytes_le(self) -> _Bytes: ...
def clock_seq(self) -> _Int: ...
def clock_seq_hi_variant(self) -> _Int: ...
def clock_seq_low(self) -> _Int: ...
def fields(self) -> _FieldsType: ...
def hex(self) -> str: ...
def int(self) -> _Int: ...
def node(self) -> _Int: ...
def time(self) -> _Int: ...
def time_hi_version(self) -> _Int: ...
def time_low(self) -> _Int: ...
def time_mid(self) -> _Int: ...
def urn(self) -> str: ...
def variant(self) -> str: ...
def version(self) -> Optional[_Int]: ...
def __int__(self) -> _Int: ...
if sys.version_info >= (3,):
def __eq__(self, other: Any) -> bool: ...
def __lt__(self, other: Any) -> bool: ...
def __le__(self, other: Any) -> bool: ...
def __gt__(self, other: Any) -> bool: ...
def __ge__(self, other: Any) -> bool: ...
else:
def get_bytes(self) -> _Bytes: ...
def get_bytes_le(self) -> _Bytes: ...
def get_clock_seq(self) -> _Int: ...
def get_clock_seq_hi_variant(self) -> _Int: ...
def get_clock_seq_low(self) -> _Int: ...
def get_fields(self) -> _FieldsType: ...
def get_hex(self) -> str: ...
def get_node(self) -> _Int: ...
def get_time(self) -> _Int: ...
def get_time_hi_version(self) -> _Int: ...
def get_time_low(self) -> _Int: ...
def get_time_mid(self) -> _Int: ...
def get_urn(self) -> str: ...
def get_variant(self) -> str: ...
def get_version(self) -> Optional[_Int]: ...
def __cmp__(self, other: Any) -> _Int: ...
def is_uuid(instance: object) -> bool:
if not isinstance(instance, str):
return True
UUID(instance)
return all(instance[position] == "-" for position in (8, 13, 18, 23)) | null |
172,552 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
def id_of_ignore_ref(property="$id"):
def id_of(schema):
"""
Ignore an ``$id`` sibling of ``$ref`` if it is present.
Otherwise, return the ID of the given schema.
"""
if schema is True or schema is False or "$ref" in schema:
return ""
return schema.get(property, "")
return id_of | null |
172,553 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
The provided code snippet includes necessary dependencies for implementing the `ignore_ref_siblings` function. Write a Python function `def ignore_ref_siblings(schema)` to solve the following problem:
Ignore siblings of ``$ref`` if it is present. Otherwise, return all keywords. Suitable for use with `create`'s ``applicable_validators`` argument.
Here is the function:
def ignore_ref_siblings(schema):
"""
Ignore siblings of ``$ref`` if it is present.
Otherwise, return all keywords.
Suitable for use with `create`'s ``applicable_validators`` argument.
"""
ref = schema.get("$ref")
if ref is not None:
return [("$ref", ref)]
else:
return schema.items() | Ignore siblings of ``$ref`` if it is present. Otherwise, return all keywords. Suitable for use with `create`'s ``applicable_validators`` argument. |
172,554 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def dependencies_draft3(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in dependencies.items():
if property not in instance:
continue
if validator.is_type(dependency, "object"):
yield from validator.descend(
instance, dependency, schema_path=property,
)
elif validator.is_type(dependency, "string"):
if dependency not in instance:
message = f"{dependency!r} is a dependency of {property!r}"
yield ValidationError(message)
else:
for each in dependency:
if each not in instance:
message = f"{each!r} is a dependency of {property!r}"
yield ValidationError(message) | null |
172,555 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
The provided code snippet includes necessary dependencies for implementing the `dependencies_draft4_draft6_draft7` function. Write a Python function `def dependencies_draft4_draft6_draft7( validator, dependencies, instance, schema, )` to solve the following problem:
Support for the ``dependencies`` keyword from pre-draft 2019-09. In later drafts, the keyword was split into separate ``dependentRequired`` and ``dependentSchemas`` validators.
Here is the function:
def dependencies_draft4_draft6_draft7(
validator,
dependencies,
instance,
schema,
):
"""
Support for the ``dependencies`` keyword from pre-draft 2019-09.
In later drafts, the keyword was split into separate
``dependentRequired`` and ``dependentSchemas`` validators.
"""
if not validator.is_type(instance, "object"):
return
for property, dependency in dependencies.items():
if property not in instance:
continue
if validator.is_type(dependency, "array"):
for each in dependency:
if each not in instance:
message = f"{each!r} is a dependency of {property!r}"
yield ValidationError(message)
else:
yield from validator.descend(
instance, dependency, schema_path=property,
) | Support for the ``dependencies`` keyword from pre-draft 2019-09. In later drafts, the keyword was split into separate ``dependentRequired`` and ``dependentSchemas`` validators. |
172,556 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def disallow_draft3(validator, disallow, instance, schema):
for disallowed in _utils.ensure_list(disallow):
if validator.evolve(schema={"type": [disallowed]}).is_valid(instance):
message = f"{disallowed!r} is disallowed for {instance!r}"
yield ValidationError(message) | null |
172,557 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
def extends_draft3(validator, extends, instance, schema):
if validator.is_type(extends, "object"):
yield from validator.descend(instance, extends)
return
for index, subschema in enumerate(extends):
yield from validator.descend(instance, subschema, schema_path=index) | null |
172,558 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
def items_draft3_draft4(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "object"):
for index, item in enumerate(instance):
yield from validator.descend(item, items, path=index)
else:
for (index, item), subschema in zip(enumerate(instance), items):
yield from validator.descend(
item, subschema, path=index, schema_path=index,
) | null |
172,559 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
def items_draft6_draft7_draft201909(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "array"):
for (index, item), subschema in zip(enumerate(instance), items):
yield from validator.descend(
item, subschema, path=index, schema_path=index,
)
else:
for index, item in enumerate(instance):
yield from validator.descend(item, items, path=index) | null |
172,560 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
def minimum_draft3_draft4(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMinimum", False):
failed = instance <= minimum
cmp = "less than or equal to"
else:
failed = instance < minimum
cmp = "less than"
if failed:
message = f"{instance!r} is {cmp} the minimum of {minimum!r}"
yield ValidationError(message) | null |
172,561 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def maximum_draft3_draft4(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMaximum", False):
failed = instance >= maximum
cmp = "greater than or equal to"
else:
failed = instance > maximum
cmp = "greater than"
if failed:
message = f"{instance!r} is {cmp} the maximum of {maximum!r}"
yield ValidationError(message) | null |
172,562 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def properties_draft3(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in properties.items():
if property in instance:
yield from validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
)
elif subschema.get("required", False):
error = ValidationError(f"{property!r} is a required property")
error._set(
validator="required",
validator_value=subschema["required"],
instance=instance,
schema=schema,
)
error.path.appendleft(property)
error.schema_path.extend([property, "required"])
yield error | null |
172,563 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def type_draft3(validator, types, instance, schema):
types = _utils.ensure_list(types)
all_errors = []
for index, type in enumerate(types):
if validator.is_type(type, "object"):
errors = list(validator.descend(instance, type, schema_path=index))
if not errors:
return
all_errors.extend(errors)
else:
if validator.is_type(instance, type):
return
else:
reprs = []
for type in types:
try:
reprs.append(repr(type["name"]))
except Exception:
reprs.append(repr(type))
yield ValidationError(
f"{instance!r} is not of type {', '.join(reprs)}",
context=all_errors,
) | null |
172,564 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def contains_draft6_draft7(validator, contains, instance, schema):
if not validator.is_type(instance, "array"):
return
if not any(
validator.evolve(schema=contains).is_valid(element)
for element in instance
):
yield ValidationError(
f"None of {instance!r} are valid under the given schema",
) | null |
172,565 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
def recursiveRef(validator, recursiveRef, instance, schema):
lookup_url, target = validator.resolver.resolution_scope, validator.schema
for each in reversed(validator.resolver._scopes_stack[1:]):
lookup_url, next_target = validator.resolver.resolve(each)
if next_target.get("$recursiveAnchor"):
target = next_target
else:
break
fragment = recursiveRef.lstrip("#")
subschema = validator.resolver.resolve_fragment(target, fragment)
# FIXME: This is gutted (and not calling .descend) because it can trigger
# recursion errors, so there's a bug here. Re-enable the tests to
# see it.
subschema
return [] | null |
172,566 | from jsonschema import _utils
from jsonschema.exceptions import ValidationError
def find_evaluated_item_indexes_by_schema(validator, instance, schema):
"""
Get all indexes of items that get evaluated under the current schema
Covers all keywords related to unevaluatedItems: items, prefixItems, if,
then, else, contains, unevaluatedItems, allOf, oneOf, anyOf
"""
if validator.is_type(schema, "boolean"):
return []
evaluated_indexes = []
if "additionalItems" in schema:
return list(range(0, len(instance)))
if "$ref" in schema:
scope, resolved = validator.resolver.resolve(schema["$ref"])
validator.resolver.push_scope(scope)
try:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, resolved,
)
finally:
validator.resolver.pop_scope()
if "items" in schema:
if validator.is_type(schema["items"], "object"):
return list(range(0, len(instance)))
evaluated_indexes += list(range(0, len(schema["items"])))
if "if" in schema:
if validator.evolve(schema=schema["if"]).is_valid(instance):
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, schema["if"],
)
if "then" in schema:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, schema["then"],
)
else:
if "else" in schema:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, schema["else"],
)
for keyword in ["contains", "unevaluatedItems"]:
if keyword in schema:
for k, v in enumerate(instance):
if validator.evolve(schema=schema[keyword]).is_valid(v):
evaluated_indexes.append(k)
for keyword in ["allOf", "oneOf", "anyOf"]:
if keyword in schema:
for subschema in schema[keyword]:
errs = list(validator.descend(instance, subschema))
if not errs:
evaluated_indexes += find_evaluated_item_indexes_by_schema(
validator, instance, subschema,
)
return evaluated_indexes
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def unevaluatedItems_draft2019(validator, unevaluatedItems, instance, schema):
if not validator.is_type(instance, "array"):
return
evaluated_item_indexes = find_evaluated_item_indexes_by_schema(
validator, instance, schema,
)
unevaluated_items = [
item for index, item in enumerate(instance)
if index not in evaluated_item_indexes
]
if unevaluated_items:
error = "Unevaluated items are not allowed (%s %s unexpected)"
yield ValidationError(error % _utils.extras_msg(unevaluated_items)) | null |
172,567 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
prefix = len(schema.get("prefixItems", []))
total = len(instance)
if items is False and total > prefix:
message = f"Expected at most {prefix} items, but found {total}"
yield ValidationError(message)
else:
for index in range(prefix, total):
yield from validator.descend(
instance=instance[index],
schema=items,
path=index,
)
def pattern(validator, patrn, instance, schema):
if (
validator.is_type(instance, "string")
and not re.search(patrn, instance)
):
yield ValidationError(f"{instance!r} does not match {patrn!r}")
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
for pattern, subschema in patternProperties.items():
for k, v in instance.items():
if re.search(pattern, k):
yield from validator.descend(
v, subschema, path=k, schema_path=pattern,
) | null |
172,568 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
def propertyNames(validator, propertyNames, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in instance:
yield from validator.descend(instance=property, schema=propertyNames) | null |
172,569 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
def find_additional_properties(instance, schema):
"""
Return the set of additional properties for the given ``instance``.
Weeds out properties that should have been validated by ``properties`` and
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
"""
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
for property in instance:
if property not in properties:
if patterns and re.search(patterns, property):
continue
yield property
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in sorted(extras)), verb
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
extras = set(find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
yield from validator.descend(instance[extra], aP, path=extra)
elif not aP and extras:
if "patternProperties" in schema:
if len(extras) == 1:
verb = "does"
else:
verb = "do"
joined = ", ".join(repr(each) for each in sorted(extras))
patterns = ", ".join(
repr(each) for each in sorted(schema["patternProperties"])
)
error = f"{joined} {verb} not match any of the regexes: {patterns}"
yield ValidationError(error)
else:
error = "Additional properties are not allowed (%s %s unexpected)"
yield ValidationError(error % extras_msg(extras)) | null |
172,570 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in sorted(extras)), verb
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def additionalItems(validator, aI, instance, schema):
if (
not validator.is_type(instance, "array")
or validator.is_type(schema.get("items", {}), "object")
):
return
len_items = len(schema.get("items", []))
if validator.is_type(aI, "object"):
for index, item in enumerate(instance[len_items:], start=len_items):
yield from validator.descend(item, aI, path=index)
elif not aI and len(instance) > len(schema.get("items", [])):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error % extras_msg(instance[len(schema.get("items", [])):]),
) | null |
172,571 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
def equal(one, two):
class ValidationError(_Error):
def const(validator, const, instance, schema):
if not equal(instance, const):
yield ValidationError(f"{const!r} was expected") | null |
172,572 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def contains(validator, contains, instance, schema):
if not validator.is_type(instance, "array"):
return
matches = 0
min_contains = schema.get("minContains", 1)
max_contains = schema.get("maxContains", len(instance))
for each in instance:
if validator.evolve(schema=contains).is_valid(each):
matches += 1
if matches > max_contains:
yield ValidationError(
"Too many items match the given schema "
f"(expected at most {max_contains})",
validator="maxContains",
validator_value=max_contains,
)
return
if matches < min_contains:
if not matches:
yield ValidationError(
f"{instance!r} does not contain items "
"matching the given schema",
)
else:
yield ValidationError(
"Too few items match the given schema (expected at least "
f"{min_contains} but only {matches} matched)",
validator="minContains",
validator_value=min_contains,
) | null |
172,573 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def exclusiveMinimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance <= minimum:
yield ValidationError(
f"{instance!r} is less than or equal to "
f"the minimum of {minimum!r}",
) | null |
172,574 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
class ValidationError(_Error):
def exclusiveMaximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance >= maximum:
yield ValidationError(
f"{instance!r} is greater than or equal "
f"to the maximum of {maximum!r}",
) | null |
172,575 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance < minimum:
message = f"{instance!r} is less than the minimum of {minimum!r}"
yield ValidationError(message) | null |
172,576 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance > maximum:
message = f"{instance!r} is greater than the maximum of {maximum!r}"
yield ValidationError(message) | null |
172,577 | from fractions import Fraction
from urllib.parse import urldefrag, urljoin
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
find_evaluated_item_indexes_by_schema,
find_evaluated_property_keys_by_schema,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
class Fraction(Rational):
def __new__(
cls, numerator: Union[int, Rational] = ..., denominator: Optional[Union[int, Rational]] = ..., *, _normalize: bool = ...
) -> Fraction:
def __new__(cls, __value: Union[float, Decimal, str], *, _normalize: bool = ...) -> Fraction:
def from_float(cls, f: float) -> Fraction:
def from_decimal(cls, dec: Decimal) -> Fraction:
def limit_denominator(self, max_denominator: int = ...) -> Fraction:
def as_integer_ratio(self) -> Tuple[int, int]:
def numerator(self) -> int:
def denominator(self) -> int:
def __add__(self, other: Union[int, Fraction]) -> Fraction:
def __add__(self, other: float) -> float:
def __add__(self, other: complex) -> complex:
def __radd__(self, other: Union[int, Fraction]) -> Fraction:
def __radd__(self, other: float) -> float:
def __radd__(self, other: complex) -> complex:
def __sub__(self, other: Union[int, Fraction]) -> Fraction:
def __sub__(self, other: float) -> float:
def __sub__(self, other: complex) -> complex:
def __rsub__(self, other: Union[int, Fraction]) -> Fraction:
def __rsub__(self, other: float) -> float:
def __rsub__(self, other: complex) -> complex:
def __mul__(self, other: Union[int, Fraction]) -> Fraction:
def __mul__(self, other: float) -> float:
def __mul__(self, other: complex) -> complex:
def __rmul__(self, other: Union[int, Fraction]) -> Fraction:
def __rmul__(self, other: float) -> float:
def __rmul__(self, other: complex) -> complex:
def __truediv__(self, other: Union[int, Fraction]) -> Fraction:
def __truediv__(self, other: float) -> float:
def __truediv__(self, other: complex) -> complex:
def __rtruediv__(self, other: Union[int, Fraction]) -> Fraction:
def __rtruediv__(self, other: float) -> float:
def __rtruediv__(self, other: complex) -> complex:
def __div__(self, other: Union[int, Fraction]) -> Fraction:
def __div__(self, other: float) -> float:
def __div__(self, other: complex) -> complex:
def __rdiv__(self, other: Union[int, Fraction]) -> Fraction:
def __rdiv__(self, other: float) -> float:
def __rdiv__(self, other: complex) -> complex:
def __floordiv__(self, other: Union[int, Fraction]) -> int:
def __floordiv__(self, other: float) -> float:
def __rfloordiv__(self, other: Union[int, Fraction]) -> int:
def __rfloordiv__(self, other: float) -> float:
def __mod__(self, other: Union[int, Fraction]) -> Fraction:
def __mod__(self, other: float) -> float:
def __rmod__(self, other: Union[int, Fraction]) -> Fraction:
def __rmod__(self, other: float) -> float:
def __divmod__(self, other: Union[int, Fraction]) -> Tuple[int, Fraction]:
def __divmod__(self, other: float) -> Tuple[float, Fraction]:
def __rdivmod__(self, other: Union[int, Fraction]) -> Tuple[int, Fraction]:
def __rdivmod__(self, other: float) -> Tuple[float, Fraction]:
def __pow__(self, other: int) -> Fraction:
def __pow__(self, other: Union[float, Fraction]) -> float:
def __pow__(self, other: complex) -> complex:
def __rpow__(self, other: Union[int, float, Fraction]) -> float:
def __rpow__(self, other: complex) -> complex:
def __pos__(self) -> Fraction:
def __neg__(self) -> Fraction:
def __abs__(self) -> Fraction:
def __trunc__(self) -> int:
def __floor__(self) -> int:
def __ceil__(self) -> int:
def __round__(self, ndigits: None = ...) -> int:
def __round__(self, ndigits: int) -> Fraction:
def __hash__(self) -> int:
def __eq__(self, other: object) -> bool:
def __lt__(self, other: _ComparableNum) -> bool:
def __gt__(self, other: _ComparableNum) -> bool:
def __le__(self, other: _ComparableNum) -> bool:
def __ge__(self, other: _ComparableNum) -> bool:
def __bool__(self) -> bool:
def __nonzero__(self) -> bool:
def real(self) -> Fraction:
def imag(self) -> Literal[0]:
def conjugate(self) -> Fraction:
class ValidationError(_Error):
def multipleOf(validator, dB, instance, schema):
if not validator.is_type(instance, "number"):
return
if isinstance(dB, float):
quotient = instance / dB
try:
failed = int(quotient) != quotient
except OverflowError:
# When `instance` is large and `dB` is less than one,
# quotient can overflow to infinity; and then casting to int
# raises an error.
#
# In this case we fall back to Fraction logic, which is
# exact and cannot overflow. The performance is also
# acceptable: we try the fast all-float option first, and
# we know that fraction(dB) can have at most a few hundred
# digits in each part. The worst-case slowdown is therefore
# for already-slow enormous integers or Decimals.
failed = (Fraction(instance) / Fraction(dB)).denominator != 1
else:
failed = instance % dB
if failed:
yield ValidationError(f"{instance!r} is not a multiple of {dB}") | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.