index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
710,133
|
urllib3_future._collections
|
__delitem__
| null |
def __delitem__(self, key: str) -> None:
del self._container[_lower_wrapper(key)]
|
(self, key: str) -> NoneType
|
710,134
|
urllib3_future._collections
|
__eq__
| null |
def __eq__(self, other: object) -> bool:
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return False
else:
other_as_http_header_dict = type(self)(maybe_constructable)
return {_lower_wrapper(k): v for k, v in self.itermerged()} == {
_lower_wrapper(k): v for k, v in other_as_http_header_dict.itermerged()
}
|
(self, other: object) -> bool
|
710,135
|
urllib3_future._collections
|
__getitem__
| null |
def __getitem__(self, key: str) -> str:
val = self._container[_lower_wrapper(key)]
return ", ".join(val[1:])
|
(self, key: str) -> str
|
710,136
|
urllib3_future._collections
|
__init__
| null |
def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):
super().__init__()
self._container = {} # 'dict' is insert-ordered in Python 3.7+
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
|
(self, headers: 'ValidHTTPHeaderSource | None' = None, **kwargs: 'str')
|
710,141
|
urllib3_future._collections
|
__setitem__
| null |
def __setitem__(self, key: str, val: str) -> None:
# avoid a bytes/str comparison by decoding before httplib
self._container[_lower_wrapper(key)] = [key, val]
|
(self, key: str, val: str) -> NoneType
|
710,142
|
urllib3_future._collections
|
_copy_from
| null |
def _copy_from(self, other: HTTPHeaderDict) -> None:
for key in other:
val = other.getlist(key)
self._container[_lower_wrapper(key)] = [key, *val]
|
(self, other: urllib3_future._collections.HTTPHeaderDict) -> NoneType
|
710,143
|
urllib3_future._collections
|
_has_value_for_header
| null |
def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:
if header_name in self:
return potential_value in self._container[_lower_wrapper(header_name)][1:]
return False
|
(self, header_name: str, potential_value: str) -> bool
|
710,144
|
urllib3_future._collections
|
add
|
Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
If this is called with combine=True, instead of adding a new header value
as a distinct item during iteration, this will instead append the value to
any existing header value with a comma. If no existing header value exists
for the key, then the value will simply be added, ignoring the combine parameter.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
>>> list(headers.items())
[('foo', 'bar'), ('foo', 'baz')]
>>> headers.add('foo', 'quz', combine=True)
>>> list(headers.items())
[('foo', 'bar, baz, quz')]
|
def add(self, key: str, val: str, *, combine: bool = False) -> None:
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
If this is called with combine=True, instead of adding a new header value
as a distinct item during iteration, this will instead append the value to
any existing header value with a comma. If no existing header value exists
for the key, then the value will simply be added, ignoring the combine parameter.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
>>> list(headers.items())
[('foo', 'bar'), ('foo', 'baz')]
>>> headers.add('foo', 'quz', combine=True)
>>> list(headers.items())
[('foo', 'bar, baz, quz')]
"""
key_lower = _lower_wrapper(key)
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# if there are values here, then there is at least the initial
# key/value pair
if combine:
vals[-1] = vals[-1] + ", " + val
else:
vals.append(val)
|
(self, key: str, val: str, *, combine: bool = False) -> NoneType
|
710,150
|
urllib3_future._collections
|
getlist
|
Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist.
|
def getlist(
self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed
) -> list[str] | _DT:
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[_lower_wrapper(key)]
except KeyError:
if default is _Sentinel.not_passed:
# _DT is unbound; empty list is instance of List[str]
return []
# _DT is bound; default is instance of _DT
return default
else:
# _DT may or may not be bound; vals[1:] is instance of List[str], which
# meets our external interface requirement of `Union[List[str], _DT]`.
return vals[1:]
|
(self, key: str, default: Union[urllib3_future._collections._Sentinel, ~_DT] = <_Sentinel.not_passed: 1>) -> Union[list[str], ~_DT]
|
710,156
|
urllib3_future._collections
|
iteritems
|
Iterate over all header lines, including duplicate ones.
|
def iteritems(self) -> typing.Iterator[tuple[str, str]]:
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[_lower_wrapper(key)]
for val in vals[1:]:
yield vals[0], val
|
(self) -> Iterator[tuple[str, str]]
|
710,157
|
urllib3_future._collections
|
itermerged
|
Iterate over all headers, merging duplicate ones together.
|
def itermerged(self) -> typing.Iterator[tuple[str, str]]:
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[_lower_wrapper(key)]
yield val[0], ", ".join(val[1:])
|
(self) -> Iterator[tuple[str, str]]
|
710,191
|
urllib3_future.connectionpool
|
HTTPSConnectionPool
|
Same as :class:`.HTTPConnectionPool`, but HTTPS.
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
|
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls: type[HTTPSConnection] = HTTPSConnection
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
key_file: str | None = None,
cert_file: str | None = None,
cert_reqs: int | str | None = None,
key_password: str | None = None,
ca_certs: str | None = None,
ssl_version: int | str | None = None,
ssl_minimum_version: ssl.TLSVersion | None = None,
ssl_maximum_version: ssl.TLSVersion | None = None,
assert_hostname: str | Literal[False] | None = None,
assert_fingerprint: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
cert_data: str | bytes | None = None,
key_data: str | bytes | None = None,
**conn_kw: typing.Any,
) -> None:
super().__init__(
host,
port,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ca_cert_data = ca_cert_data
self.cert_data = cert_data
self.key_data = key_data
self.ssl_version = ssl_version
self.ssl_minimum_version = ssl_minimum_version
self.ssl_maximum_version = ssl_maximum_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]
"""Establishes a tunnel connection through HTTP CONNECT."""
if self.proxy and self.proxy.scheme == "https":
tunnel_scheme = "https"
else:
tunnel_scheme = "http"
conn.set_tunnel(
scheme=tunnel_scheme,
host=self._tunnel_host,
port=self.port,
headers=self.proxy_headers,
)
conn.connect()
def _new_conn(self, *, heb_timeout: Timeout | None = None) -> HTTPSConnection:
"""
Return a fresh :class:`urllib3.connection.HTTPConnection`.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
raise ImportError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host: str = self.host
actual_port = self.port
if self.proxy is not None and self.proxy.host is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = None
if self.happy_eyeballs:
log.debug(
"Attempting Happy-Eyeball %s:%s",
self.host,
self.port or "443",
)
dt_pre_resolve = datetime.now(tz=timezone.utc)
ip_addresses = self._resolver.getaddrinfo(
actual_host,
actual_port,
socket.AF_UNSPEC
if "socket_family" not in self.conn_kw
else self.conn_kw["socket_family"],
socket.SOCK_STREAM,
quic_upgrade_via_dns_rr=True,
)
delta_post_resolve = datetime.now(tz=timezone.utc) - dt_pre_resolve
target_pqc = {}
if (
"preemptive_quic_cache" in self.conn_kw
and self.conn_kw["preemptive_quic_cache"] is not None
):
target_pqc = self.conn_kw["preemptive_quic_cache"]
if any(_[1] == socket.SOCK_DGRAM for _ in ip_addresses):
if (self.host, self.port) not in target_pqc:
target_pqc[(self.host, self.port)] = (self.host, self.port)
if len(ip_addresses) > 1:
ipv6_addresses = []
ipv4_addresses = []
for ip_address in ip_addresses:
if ip_address[0] == socket.AF_INET6:
ipv6_addresses.append(ip_address)
else:
ipv4_addresses.append(ip_address)
if ipv4_addresses and ipv6_addresses:
log.debug(
"Happy-Eyeball Dual-Stack %s:%s",
self.host,
self.port or "443",
)
intermediary_addresses = []
for ipv6_entry, ipv4_entry in zip_longest(
ipv6_addresses, ipv4_addresses
):
if ipv6_entry:
intermediary_addresses.append(ipv6_entry)
if ipv4_entry:
intermediary_addresses.append(ipv4_entry)
ip_addresses = intermediary_addresses
else:
log.debug(
"Happy-Eyeball Single-Stack %s:%s",
self.host,
self.port or "443",
)
challengers = []
max_task = (
4 if isinstance(self.happy_eyeballs, bool) else self.happy_eyeballs
)
if heb_timeout is None:
heb_timeout = self.timeout
override_timeout = (
heb_timeout.connect_timeout
if heb_timeout.connect_timeout is not None
and isinstance(heb_timeout.connect_timeout, (float, int))
else 0.4
)
for ip_address in ip_addresses[:max_task]:
conn_kw = self.conn_kw.copy()
target_solo_addr = (
f"[{ip_address[-1][0]}]"
if ip_address[0] == socket.AF_INET6
else ip_address[-1][0]
)
conn_kw["resolver"] = ResolverDescription.from_url(
f"in-memory://default?hosts={self.host}:{target_solo_addr}"
).new()
conn_kw["socket_family"] = ip_address[0]
conn_kw["preemptive_quic_cache"] = target_pqc
challengers.append(
self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=override_timeout,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
ssl_version=self.ssl_version,
ssl_minimum_version=self.ssl_minimum_version,
ssl_maximum_version=self.ssl_maximum_version,
cert_data=self.cert_data,
key_data=self.key_data,
**conn_kw,
)
)
event = threading.Event()
winning_task: Future[None] | None = None
completed_count: int = 0
def _happy_eyeballs_completed(t: Future[None]) -> None:
nonlocal winning_task, event, completed_count
if winning_task is None and t.exception() is None:
winning_task = t
event.set()
return
completed_count += 1
if completed_count >= len(challengers):
event.set()
tpe = ThreadPoolExecutor(max_workers=max_task)
tasks: list[Future[None]] = []
for challenger in challengers:
task = tpe.submit(challenger.connect)
task.add_done_callback(_happy_eyeballs_completed)
tasks.append(task)
event.wait()
for task in tasks:
if task == winning_task:
continue
if task.running():
task.cancel()
else:
challengers[tasks.index(task)].close()
if winning_task is None:
within_delay_msg: str = (
f" within {override_timeout}s" if override_timeout else ""
)
raise NewConnectionError(
challengers[0],
f"Failed to establish a new connection: No suitable address to connect to using Happy Eyeballs algorithm for {actual_host}:{actual_port}{within_delay_msg}",
) from tasks[0].exception()
conn = challengers[tasks.index(winning_task)]
# we have to replace the resolution latency metric
if conn.conn_info:
conn.conn_info.resolution_latency = delta_post_resolve
tpe.shutdown(wait=False)
else:
log.debug(
"Happy-Eyeball Ineligible %s:%s",
self.host,
self.port or "443",
)
if conn is None:
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
ssl_version=self.ssl_version,
ssl_minimum_version=self.ssl_minimum_version,
ssl_maximum_version=self.ssl_maximum_version,
cert_data=self.cert_data,
key_data=self.key_data,
**self.conn_kw,
)
self.pool.put(conn, immediately_unavailable=True)
return conn
def _validate_conn(self, conn: HTTPConnection) -> None:
"""
Called right before a request is made, after the socket is created.
"""
super()._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if conn.is_closed:
conn.connect()
if not conn.is_verified:
warnings.warn(
(
f"Unverified HTTPS request is being made to host '{conn.host}'. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3future.readthedocs.io/en/latest/advanced-usage.html"
"#tls-warnings"
),
InsecureRequestWarning,
)
|
(host: 'str', port: 'int | None' = None, timeout: '_TYPE_TIMEOUT | None' = <_TYPE_DEFAULT.token: -1>, maxsize: 'int' = 1, block: 'bool' = False, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | bool | int | None' = None, _proxy: 'Url | None' = None, _proxy_headers: 'typing.Mapping[str, str] | None' = None, key_file: 'str | None' = None, cert_file: 'str | None' = None, cert_reqs: 'int | str | None' = None, key_password: 'str | None' = None, ca_certs: 'str | None' = None, ssl_version: 'int | str | None' = None, ssl_minimum_version: 'ssl.TLSVersion | None' = None, ssl_maximum_version: 'ssl.TLSVersion | None' = None, assert_hostname: 'str | Literal[False] | None' = None, assert_fingerprint: 'str | None' = None, ca_cert_dir: 'str | None' = None, ca_cert_data: 'None | str | bytes' = None, cert_data: 'str | bytes | None' = None, key_data: 'str | bytes | None' = None, **conn_kw: 'typing.Any') -> 'None'
|
710,199
|
urllib3_future.connectionpool
|
_new_conn
|
Return a fresh :class:`urllib3.connection.HTTPConnection`.
|
def _new_conn(self, *, heb_timeout: Timeout | None = None) -> HTTPSConnection:
"""
Return a fresh :class:`urllib3.connection.HTTPConnection`.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
raise ImportError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host: str = self.host
actual_port = self.port
if self.proxy is not None and self.proxy.host is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = None
if self.happy_eyeballs:
log.debug(
"Attempting Happy-Eyeball %s:%s",
self.host,
self.port or "443",
)
dt_pre_resolve = datetime.now(tz=timezone.utc)
ip_addresses = self._resolver.getaddrinfo(
actual_host,
actual_port,
socket.AF_UNSPEC
if "socket_family" not in self.conn_kw
else self.conn_kw["socket_family"],
socket.SOCK_STREAM,
quic_upgrade_via_dns_rr=True,
)
delta_post_resolve = datetime.now(tz=timezone.utc) - dt_pre_resolve
target_pqc = {}
if (
"preemptive_quic_cache" in self.conn_kw
and self.conn_kw["preemptive_quic_cache"] is not None
):
target_pqc = self.conn_kw["preemptive_quic_cache"]
if any(_[1] == socket.SOCK_DGRAM for _ in ip_addresses):
if (self.host, self.port) not in target_pqc:
target_pqc[(self.host, self.port)] = (self.host, self.port)
if len(ip_addresses) > 1:
ipv6_addresses = []
ipv4_addresses = []
for ip_address in ip_addresses:
if ip_address[0] == socket.AF_INET6:
ipv6_addresses.append(ip_address)
else:
ipv4_addresses.append(ip_address)
if ipv4_addresses and ipv6_addresses:
log.debug(
"Happy-Eyeball Dual-Stack %s:%s",
self.host,
self.port or "443",
)
intermediary_addresses = []
for ipv6_entry, ipv4_entry in zip_longest(
ipv6_addresses, ipv4_addresses
):
if ipv6_entry:
intermediary_addresses.append(ipv6_entry)
if ipv4_entry:
intermediary_addresses.append(ipv4_entry)
ip_addresses = intermediary_addresses
else:
log.debug(
"Happy-Eyeball Single-Stack %s:%s",
self.host,
self.port or "443",
)
challengers = []
max_task = (
4 if isinstance(self.happy_eyeballs, bool) else self.happy_eyeballs
)
if heb_timeout is None:
heb_timeout = self.timeout
override_timeout = (
heb_timeout.connect_timeout
if heb_timeout.connect_timeout is not None
and isinstance(heb_timeout.connect_timeout, (float, int))
else 0.4
)
for ip_address in ip_addresses[:max_task]:
conn_kw = self.conn_kw.copy()
target_solo_addr = (
f"[{ip_address[-1][0]}]"
if ip_address[0] == socket.AF_INET6
else ip_address[-1][0]
)
conn_kw["resolver"] = ResolverDescription.from_url(
f"in-memory://default?hosts={self.host}:{target_solo_addr}"
).new()
conn_kw["socket_family"] = ip_address[0]
conn_kw["preemptive_quic_cache"] = target_pqc
challengers.append(
self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=override_timeout,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
ssl_version=self.ssl_version,
ssl_minimum_version=self.ssl_minimum_version,
ssl_maximum_version=self.ssl_maximum_version,
cert_data=self.cert_data,
key_data=self.key_data,
**conn_kw,
)
)
event = threading.Event()
winning_task: Future[None] | None = None
completed_count: int = 0
def _happy_eyeballs_completed(t: Future[None]) -> None:
nonlocal winning_task, event, completed_count
if winning_task is None and t.exception() is None:
winning_task = t
event.set()
return
completed_count += 1
if completed_count >= len(challengers):
event.set()
tpe = ThreadPoolExecutor(max_workers=max_task)
tasks: list[Future[None]] = []
for challenger in challengers:
task = tpe.submit(challenger.connect)
task.add_done_callback(_happy_eyeballs_completed)
tasks.append(task)
event.wait()
for task in tasks:
if task == winning_task:
continue
if task.running():
task.cancel()
else:
challengers[tasks.index(task)].close()
if winning_task is None:
within_delay_msg: str = (
f" within {override_timeout}s" if override_timeout else ""
)
raise NewConnectionError(
challengers[0],
f"Failed to establish a new connection: No suitable address to connect to using Happy Eyeballs algorithm for {actual_host}:{actual_port}{within_delay_msg}",
) from tasks[0].exception()
conn = challengers[tasks.index(winning_task)]
# we have to replace the resolution latency metric
if conn.conn_info:
conn.conn_info.resolution_latency = delta_post_resolve
tpe.shutdown(wait=False)
else:
log.debug(
"Happy-Eyeball Ineligible %s:%s",
self.host,
self.port or "443",
)
if conn is None:
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
ssl_version=self.ssl_version,
ssl_minimum_version=self.ssl_minimum_version,
ssl_maximum_version=self.ssl_maximum_version,
cert_data=self.cert_data,
key_data=self.key_data,
**self.conn_kw,
)
self.pool.put(conn, immediately_unavailable=True)
return conn
|
(self, *, heb_timeout: Optional[urllib3_future.util.timeout.Timeout] = None) -> urllib3_future.connection.HTTPSConnection
|
710,203
|
urllib3_future.connectionpool
|
_validate_conn
|
Called right before a request is made, after the socket is created.
|
def _validate_conn(self, conn: HTTPConnection) -> None:
"""
Called right before a request is made, after the socket is created.
"""
super()._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if conn.is_closed:
conn.connect()
if not conn.is_verified:
warnings.warn(
(
f"Unverified HTTPS request is being made to host '{conn.host}'. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3future.readthedocs.io/en/latest/advanced-usage.html"
"#tls-warnings"
),
InsecureRequestWarning,
)
|
(self, conn: urllib3_future.connection.HTTPConnection) -> NoneType
|
710,211
|
urllib3_future.backend._base
|
HttpVersion
|
Describe possible SVN protocols that can be supported.
|
class HttpVersion(str, enum.Enum):
"""Describe possible SVN protocols that can be supported."""
h11 = "HTTP/1.1"
# we know that it is rather "HTTP/2" than "HTTP/2.0"
# it is this way to remain somewhat compatible with http.client
# http_svn (int). 9 -> 11 -> 20 -> 30
h2 = "HTTP/2.0"
h3 = "HTTP/3.0"
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
710,212
|
urllib3_future.poolmanager
|
PoolManager
|
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example:
.. code-block:: python
import urllib3
http = urllib3.PoolManager(num_pools=2)
resp1 = http.request("GET", "https://google.com/")
resp2 = http.request("GET", "https://google.com/mail")
resp3 = http.request("GET", "https://yahoo.com/")
print(len(http.pools))
# 2
|
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example:
.. code-block:: python
import urllib3
http = urllib3.PoolManager(num_pools=2)
resp1 = http.request("GET", "https://google.com/")
resp2 = http.request("GET", "https://google.com/mail")
resp3 = http.request("GET", "https://yahoo.com/")
print(len(http.pools))
# 2
"""
proxy: Url | None = None
proxy_config: ProxyConfig | None = None
def __init__(
self,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
preemptive_quic_cache: QuicPreemptiveCacheType | None = None,
resolver: ResolverDescription
| list[ResolverDescription]
| str
| list[str]
| BaseResolver
| None = None,
**connection_pool_kw: typing.Any,
) -> None:
super().__init__(headers)
self.connection_pool_kw = connection_pool_kw
self._num_pools = num_pools
self.pools: TrafficPolice[HTTPConnectionPool] = TrafficPolice(
num_pools, concurrency=True
)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
self._preemptive_quic_cache = preemptive_quic_cache
self._own_resolver = not isinstance(resolver, BaseResolver)
if resolver is None:
resolver = [ResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [ResolverDescription.from_url(resolver)]
elif isinstance(resolver, ResolverDescription):
resolver = [resolver]
self._resolvers: list[ResolverDescription] = []
if not isinstance(resolver, BaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
ResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
ResolverDescription.from_url("system://default?hosts=localhost")
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in connection_pool_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = connection_pool_kw["ca_cert_data"]
if "ca_cert_dir" in connection_pool_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = connection_pool_kw["ca_cert_dir"]
if "ca_certs" in connection_pool_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = connection_pool_kw["ca_certs"]
self._resolver: BaseResolver = (
ManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, BaseResolver)
else resolver
)
def __enter__(self: _SelfT) -> _SelfT:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[HTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
def clear(self) -> None:
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
if self._own_resolver and self._resolver.is_available():
self._resolver.close()
def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower())
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(
self, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
if "strict" in request_context:
request_context.pop("strict")
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
if self._preemptive_quic_cache is not None:
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(
self, pool_key: PoolKey, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
if self.pools.busy:
self.pools.release()
pool = self.pools.locate(pool_key, block=False)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools.put(pool, pool_key, immediately_unavailable=True)
return pool
def connection_from_url(
self, url: str, pool_kwargs: dict[str, typing.Any] | None = None
) -> HTTPConnectionPool:
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(
self, override: dict[str, typing.Any] | None
) -> dict[str, typing.Any]:
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
base_pool_kwargs.update(
{k: v for k, v in override.items() if v is not None}
)
return {
k: v
for k, v in base_pool_kwargs.items()
if k not in override or override[k] is not None
}
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool:
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def get_response(
self, *, promise: ResponsePromise | None = None
) -> HTTPResponse | None:
"""
Retrieve the first response available in the pools.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
"""
if promise is not None and not isinstance(promise, ResponsePromise):
raise TypeError(
f"get_response only support ResponsePromise but received {type(promise)} instead. "
f"This may occur if you expected the remote peer to support multiplexing but did not."
)
try:
with self.pools.borrow(
promise or ResponsePromise, block=False, not_idle_only=True
) as pool:
response = pool.get_response(promise=promise)
except UnavailableTraffic:
return None
if promise is not None and response is None:
raise ValueError(
"Invoked get_response with promise=... that no connections across pools recognize"
)
if response is None:
return None
from_promise = None
if promise:
from_promise = promise
else:
if (
response._fp
and hasattr(response._fp, "from_promise")
and response._fp.from_promise
):
from_promise = response._fp.from_promise
if from_promise is None:
raise ValueError(
"Internal: Unable to identify originating ResponsePromise from a LowLevelResponse"
)
self.pools.forget(from_promise)
# Retrieve request ctx
method = typing.cast(str, from_promise.get_parameter("method"))
redirect = typing.cast(bool, from_promise.get_parameter("pm_redirect"))
# Handle redirect?
if redirect and response.get_redirect_location():
url = typing.cast(str, from_promise.get_parameter("pm_url"))
body = typing.cast(
typing.Union[_TYPE_BODY, None], from_promise.get_parameter("body")
)
headers = typing.cast(
typing.Union[HTTPHeaderDict, None],
from_promise.get_parameter("headers"),
)
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Union[_TYPE_TIMEOUT, None], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
redirect_location = response.get_redirect_location()
assert isinstance(redirect_location, str)
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(
method, url, response=response, _pool=response._pool
)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
new_promise = self.urlopen(
method,
urljoin(url, redirect_location),
True,
body=body,
headers=headers,
retries=retries,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=True,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if retries.is_retry(method, response.status, has_retry_after):
url = typing.cast(str, from_promise.get_parameter("pm_url"))
body = typing.cast(
typing.Union[_TYPE_BODY, None], from_promise.get_parameter("body")
)
headers = typing.cast(
typing.Union[HTTPHeaderDict, None],
from_promise.get_parameter("headers"),
)
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Union[_TYPE_TIMEOUT, None], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
redirect_location = response.get_redirect_location()
assert isinstance(redirect_location, str)
try:
retries = retries.increment(
method, url, response=response, _pool=response._pool
)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
new_promise = self.urlopen(
method,
urljoin(url, redirect_location),
True,
body=body,
headers=headers,
retries=retries,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=False,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
return response
@typing.overload # type: ignore[override]
def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[False] = ...,
**kw: typing.Any,
) -> HTTPResponse:
...
@typing.overload
def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[True],
**kw: typing.Any,
) -> ResponsePromise:
...
def urlopen(
self, method: str, url: str, redirect: bool = True, **kw: typing.Any
) -> HTTPResponse | ResponsePromise:
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
if u.scheme is None:
warnings.warn(
"URLs without a scheme (ie 'https://') are deprecated and will raise an error "
"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs "
"start with 'https://' or 'http://'. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2920",
category=DeprecationWarning,
stacklevel=2,
)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
self.pools.memorize(response, conn)
self.pools.release()
if "multiplexed" in kw and kw["multiplexed"]:
if isinstance(response, ResponsePromise):
response.set_parameter("pm_redirect", redirect)
response.set_parameter("pm_url", url)
assert isinstance(response, ResponsePromise)
return response
# the established connection is not capable of doing multiplexed request
kw["multiplexed"] = False
assert isinstance(response, HTTPResponse)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
kw["headers"].discard(should_be_removed_header)
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
new_headers = kw["headers"].copy()
for header in kw["headers"]:
if header.lower() in retries.remove_headers_on_redirect:
new_headers.pop(header, None)
kw["headers"] = new_headers
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw) # type: ignore[no-any-return]
|
(num_pools: 'int' = 10, headers: 'typing.Mapping[str, str] | None' = None, preemptive_quic_cache: 'QuicPreemptiveCacheType | None' = None, resolver: 'ResolverDescription | list[ResolverDescription] | str | list[str] | BaseResolver | None' = None, **connection_pool_kw: 'typing.Any') -> 'None'
|
710,215
|
urllib3_future.poolmanager
|
__init__
| null |
def __init__(
self,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
preemptive_quic_cache: QuicPreemptiveCacheType | None = None,
resolver: ResolverDescription
| list[ResolverDescription]
| str
| list[str]
| BaseResolver
| None = None,
**connection_pool_kw: typing.Any,
) -> None:
super().__init__(headers)
self.connection_pool_kw = connection_pool_kw
self._num_pools = num_pools
self.pools: TrafficPolice[HTTPConnectionPool] = TrafficPolice(
num_pools, concurrency=True
)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
self._preemptive_quic_cache = preemptive_quic_cache
self._own_resolver = not isinstance(resolver, BaseResolver)
if resolver is None:
resolver = [ResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [ResolverDescription.from_url(resolver)]
elif isinstance(resolver, ResolverDescription):
resolver = [resolver]
self._resolvers: list[ResolverDescription] = []
if not isinstance(resolver, BaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
ResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
ResolverDescription.from_url("system://default?hosts=localhost")
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in connection_pool_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = connection_pool_kw["ca_cert_data"]
if "ca_cert_dir" in connection_pool_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = connection_pool_kw["ca_cert_dir"]
if "ca_certs" in connection_pool_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = connection_pool_kw["ca_certs"]
self._resolver: BaseResolver = (
ManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, BaseResolver)
else resolver
)
|
(self, num_pools: int = 10, headers: Optional[Mapping[str, str]] = None, preemptive_quic_cache: Optional[MutableMapping[Tuple[str, int], Optional[Tuple[str, int]]]] = None, resolver: Union[urllib3_future.contrib.resolver.factories.ResolverDescription, list[urllib3_future.contrib.resolver.factories.ResolverDescription], str, list[str], urllib3_future.contrib.resolver.protocols.BaseResolver, NoneType] = None, **connection_pool_kw: Any) -> NoneType
|
710,217
|
urllib3_future.poolmanager
|
_new_pool
|
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[HTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self, scheme: str, host: str, port: int, request_context: Optional[dict[str, Any]] = None) -> urllib3_future.connectionpool.HTTPConnectionPool
|
710,219
|
urllib3_future.poolmanager
|
clear
|
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
|
def clear(self) -> None:
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
if self._own_resolver and self._resolver.is_available():
self._resolver.close()
|
(self) -> NoneType
|
710,220
|
urllib3_future.poolmanager
|
connection_from_context
|
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
|
def connection_from_context(
self, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
if "strict" in request_context:
request_context.pop("strict")
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
if self._preemptive_quic_cache is not None:
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
return self.connection_from_pool_key(pool_key, request_context=request_context)
|
(self, request_context: dict[str, typing.Any]) -> urllib3_future.connectionpool.HTTPConnectionPool
|
710,221
|
urllib3_future.poolmanager
|
connection_from_host
|
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
|
def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower())
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
|
(self, host: str | None, port: Optional[int] = None, scheme: str | None = 'http', pool_kwargs: Optional[dict[str, Any]] = None) -> urllib3_future.connectionpool.HTTPConnectionPool
|
710,222
|
urllib3_future.poolmanager
|
connection_from_pool_key
|
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
|
def connection_from_pool_key(
self, pool_key: PoolKey, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
if self.pools.busy:
self.pools.release()
pool = self.pools.locate(pool_key, block=False)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools.put(pool, pool_key, immediately_unavailable=True)
return pool
|
(self, pool_key: urllib3_future.poolmanager.PoolKey, request_context: dict[str, typing.Any]) -> urllib3_future.connectionpool.HTTPConnectionPool
|
710,224
|
urllib3_future.poolmanager
|
get_response
|
Retrieve the first response available in the pools.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
|
def get_response(
self, *, promise: ResponsePromise | None = None
) -> HTTPResponse | None:
"""
Retrieve the first response available in the pools.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
"""
if promise is not None and not isinstance(promise, ResponsePromise):
raise TypeError(
f"get_response only support ResponsePromise but received {type(promise)} instead. "
f"This may occur if you expected the remote peer to support multiplexing but did not."
)
try:
with self.pools.borrow(
promise or ResponsePromise, block=False, not_idle_only=True
) as pool:
response = pool.get_response(promise=promise)
except UnavailableTraffic:
return None
if promise is not None and response is None:
raise ValueError(
"Invoked get_response with promise=... that no connections across pools recognize"
)
if response is None:
return None
from_promise = None
if promise:
from_promise = promise
else:
if (
response._fp
and hasattr(response._fp, "from_promise")
and response._fp.from_promise
):
from_promise = response._fp.from_promise
if from_promise is None:
raise ValueError(
"Internal: Unable to identify originating ResponsePromise from a LowLevelResponse"
)
self.pools.forget(from_promise)
# Retrieve request ctx
method = typing.cast(str, from_promise.get_parameter("method"))
redirect = typing.cast(bool, from_promise.get_parameter("pm_redirect"))
# Handle redirect?
if redirect and response.get_redirect_location():
url = typing.cast(str, from_promise.get_parameter("pm_url"))
body = typing.cast(
typing.Union[_TYPE_BODY, None], from_promise.get_parameter("body")
)
headers = typing.cast(
typing.Union[HTTPHeaderDict, None],
from_promise.get_parameter("headers"),
)
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Union[_TYPE_TIMEOUT, None], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
redirect_location = response.get_redirect_location()
assert isinstance(redirect_location, str)
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(
method, url, response=response, _pool=response._pool
)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
new_promise = self.urlopen(
method,
urljoin(url, redirect_location),
True,
body=body,
headers=headers,
retries=retries,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=True,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if retries.is_retry(method, response.status, has_retry_after):
url = typing.cast(str, from_promise.get_parameter("pm_url"))
body = typing.cast(
typing.Union[_TYPE_BODY, None], from_promise.get_parameter("body")
)
headers = typing.cast(
typing.Union[HTTPHeaderDict, None],
from_promise.get_parameter("headers"),
)
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Union[_TYPE_TIMEOUT, None], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
redirect_location = response.get_redirect_location()
assert isinstance(redirect_location, str)
try:
retries = retries.increment(
method, url, response=response, _pool=response._pool
)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
new_promise = self.urlopen(
method,
urljoin(url, redirect_location),
True,
body=body,
headers=headers,
retries=retries,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=False,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
return response
|
(self, *, promise: Optional[urllib3_future.backend._base.ResponsePromise] = None) -> urllib3_future.response.HTTPResponse | None
|
710,228
|
urllib3_future.poolmanager
|
urlopen
|
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
|
def urlopen(
self, method: str, url: str, redirect: bool = True, **kw: typing.Any
) -> HTTPResponse | ResponsePromise:
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
if u.scheme is None:
warnings.warn(
"URLs without a scheme (ie 'https://') are deprecated and will raise an error "
"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs "
"start with 'https://' or 'http://'. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2920",
category=DeprecationWarning,
stacklevel=2,
)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
self.pools.memorize(response, conn)
self.pools.release()
if "multiplexed" in kw and kw["multiplexed"]:
if isinstance(response, ResponsePromise):
response.set_parameter("pm_redirect", redirect)
response.set_parameter("pm_url", url)
assert isinstance(response, ResponsePromise)
return response
# the established connection is not capable of doing multiplexed request
kw["multiplexed"] = False
assert isinstance(response, HTTPResponse)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
kw["headers"].discard(should_be_removed_header)
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
new_headers = kw["headers"].copy()
for header in kw["headers"]:
if header.lower() in retries.remove_headers_on_redirect:
new_headers.pop(header, None)
kw["headers"] = new_headers
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw) # type: ignore[no-any-return]
|
(self, method: str, url: str, redirect: bool = True, **kw: Any) -> urllib3_future.response.HTTPResponse | urllib3_future.backend._base.ResponsePromise
|
710,229
|
urllib3_future.poolmanager
|
ProxyManager
|
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
:param proxy_assert_hostname:
The hostname of the certificate to verify against.
:param proxy_assert_fingerprint:
The fingerprint of the certificate to verify against.
Example:
.. code-block:: python
import urllib3
proxy = urllib3.ProxyManager("https://localhost:3128/")
resp1 = proxy.request("GET", "https://google.com/")
resp2 = proxy.request("GET", "https://httpbin.org/")
print(len(proxy.pools))
# 1
resp3 = proxy.request("GET", "https://httpbin.org/")
resp4 = proxy.request("GET", "https://twitter.com/")
print(len(proxy.pools))
# 3
|
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
:param proxy_assert_hostname:
The hostname of the certificate to verify against.
:param proxy_assert_fingerprint:
The fingerprint of the certificate to verify against.
Example:
.. code-block:: python
import urllib3
proxy = urllib3.ProxyManager("https://localhost:3128/")
resp1 = proxy.request("GET", "https://google.com/")
resp2 = proxy.request("GET", "https://httpbin.org/")
print(len(proxy.pools))
# 1
resp3 = proxy.request("GET", "https://httpbin.org/")
resp4 = proxy.request("GET", "https://twitter.com/")
print(len(proxy.pools))
# 3
"""
def __init__(
self,
proxy_url: str,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
proxy_headers: typing.Mapping[str, str] | None = None,
proxy_ssl_context: ssl.SSLContext | None = None,
use_forwarding_for_https: bool = False,
proxy_assert_hostname: None | str | Literal[False] = None,
proxy_assert_fingerprint: str | None = None,
**connection_pool_kw: typing.Any,
) -> None:
if isinstance(proxy_url, HTTPConnectionPool):
str_proxy_url = f"{proxy_url.scheme}://{proxy_url.host}:{proxy_url.port}"
else:
str_proxy_url = proxy_url
proxy = parse_url(str_proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(
proxy_ssl_context,
use_forwarding_for_https,
proxy_assert_hostname,
proxy_assert_fingerprint,
)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super().__init__(num_pools, headers, **connection_pool_kw)
def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
if scheme == "https":
return super().connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return super().connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs # type: ignore[union-attr]
)
def _set_proxy_headers(
self, url: str, headers: typing.Mapping[str, str] | None = None
) -> typing.Mapping[str, str]:
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
@typing.overload # type: ignore[override]
def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[False] = ...,
**kw: typing.Any,
) -> HTTPResponse:
...
@typing.overload
def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[True],
**kw: typing.Any,
) -> ResponsePromise:
...
def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
**kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super().urlopen(method, url, redirect=redirect, **kw) # type: ignore[no-any-return]
|
(proxy_url: 'str', num_pools: 'int' = 10, headers: 'typing.Mapping[str, str] | None' = None, proxy_headers: 'typing.Mapping[str, str] | None' = None, proxy_ssl_context: 'ssl.SSLContext | None' = None, use_forwarding_for_https: 'bool' = False, proxy_assert_hostname: 'None | str | Literal[False]' = None, proxy_assert_fingerprint: 'str | None' = None, **connection_pool_kw: 'typing.Any') -> 'None'
|
710,246
|
urllib3_future.poolmanager
|
urlopen
|
Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.
|
def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
**kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super().urlopen(method, url, redirect=redirect, **kw) # type: ignore[no-any-return]
|
(self, method: str, url: str, redirect: bool = True, **kw: Any) -> urllib3_future.response.HTTPResponse | urllib3_future.backend._base.ResponsePromise
|
710,247
|
urllib3_future.contrib.resolver.factories
|
ResolverDescription
|
Describe how a BaseResolver must be instantiated.
|
class ResolverDescription:
"""Describe how a BaseResolver must be instantiated."""
def __init__(
self,
protocol: ProtocolResolver,
specifier: str | None = None,
implementation: str | None = None,
server: str | None = None,
port: int | None = None,
*host_patterns: str,
**kwargs: typing.Any,
) -> None:
self.protocol = protocol
self.specifier = specifier
self.implementation = implementation
self.server = server
self.port = port
self.host_patterns = host_patterns
self.kwargs = kwargs
def __setitem__(self, key: str, value: typing.Any) -> None:
self.kwargs[key] = value
def __contains__(self, item: str) -> bool:
return item in self.kwargs
def new(self) -> BaseResolver:
kwargs = {**self.kwargs}
if self.server:
kwargs["server"] = self.server
if self.port:
kwargs["port"] = self.port
if self.host_patterns:
kwargs["patterns"] = self.host_patterns
return ResolverFactory.new(
self.protocol,
self.specifier,
self.implementation,
**kwargs,
)
@staticmethod
def from_url(url: str) -> ResolverDescription:
parsed_url = parse_url(url)
schema = parsed_url.scheme
if schema is None:
raise ValueError("Given DNS url is missing a protocol")
specifier = None
implementation = None
if "+" in schema:
schema, specifier = tuple(schema.lower().split("+", 1))
protocol = ProtocolResolver(schema)
kwargs: dict[str, typing.Any] = {}
if parsed_url.path:
kwargs["path"] = parsed_url.path
if parsed_url.auth:
kwargs["headers"] = dict()
if ":" in parsed_url.auth:
username, password = parsed_url.auth.split(":")
username = username.strip("'\"")
password = password.strip("'\"")
kwargs["headers"][
"Authorization"
] = f"Basic {b64encode(f'{username}:{password}'.encode()).decode()}"
else:
kwargs["headers"]["Authorization"] = f"Bearer {parsed_url.auth}"
if parsed_url.query:
parameters = parse_qs(parsed_url.query)
for parameter in parameters:
if not parameters[parameter]:
continue
parameter_insensible = parameter.lower()
if (
isinstance(parameters[parameter], list)
and len(parameters[parameter]) > 1
):
if parameter == "implementation":
raise ValueError("Only one implementation can be passed to URL")
values = []
for e in parameters[parameter]:
if "," in e:
values.extend(e.split(","))
else:
values.append(e)
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(values)
else:
values.append(kwargs[parameter_insensible])
kwargs[parameter_insensible] = values
continue
kwargs[parameter_insensible] = values
continue
value: str = parameters[parameter][0].lower().strip(" ")
if parameter == "implementation":
implementation = value
continue
if "," in value:
list_of_values = value.split(",")
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(list_of_values)
else:
list_of_values.append(kwargs[parameter_insensible])
continue
kwargs[parameter_insensible] = list_of_values
continue
value_converted: bool | int | float | None = None
if value in ["false", "true"]:
value_converted = True if value == "true" else False
elif value.isdigit():
value_converted = int(value)
elif (
value.count(".") == 1
and value.index(".") > 0
and value.replace(".", "").isdigit()
):
value_converted = float(value)
kwargs[parameter_insensible] = (
value if value_converted is None else value_converted
)
host_patterns = []
if "hosts" in kwargs:
host_patterns = (
kwargs["hosts"].split(",")
if isinstance(kwargs["hosts"], str)
else kwargs["hosts"]
)
del kwargs["hosts"]
return ResolverDescription(
protocol,
specifier,
implementation,
parsed_url.host,
parsed_url.port,
*host_patterns,
**kwargs,
)
|
(protocol: 'ProtocolResolver', specifier: 'str | None' = None, implementation: 'str | None' = None, server: 'str | None' = None, port: 'int | None' = None, *host_patterns: 'str', **kwargs: 'typing.Any') -> 'None'
|
710,251
|
urllib3_future.contrib.resolver.factories
|
from_url
| null |
@staticmethod
def from_url(url: str) -> ResolverDescription:
parsed_url = parse_url(url)
schema = parsed_url.scheme
if schema is None:
raise ValueError("Given DNS url is missing a protocol")
specifier = None
implementation = None
if "+" in schema:
schema, specifier = tuple(schema.lower().split("+", 1))
protocol = ProtocolResolver(schema)
kwargs: dict[str, typing.Any] = {}
if parsed_url.path:
kwargs["path"] = parsed_url.path
if parsed_url.auth:
kwargs["headers"] = dict()
if ":" in parsed_url.auth:
username, password = parsed_url.auth.split(":")
username = username.strip("'\"")
password = password.strip("'\"")
kwargs["headers"][
"Authorization"
] = f"Basic {b64encode(f'{username}:{password}'.encode()).decode()}"
else:
kwargs["headers"]["Authorization"] = f"Bearer {parsed_url.auth}"
if parsed_url.query:
parameters = parse_qs(parsed_url.query)
for parameter in parameters:
if not parameters[parameter]:
continue
parameter_insensible = parameter.lower()
if (
isinstance(parameters[parameter], list)
and len(parameters[parameter]) > 1
):
if parameter == "implementation":
raise ValueError("Only one implementation can be passed to URL")
values = []
for e in parameters[parameter]:
if "," in e:
values.extend(e.split(","))
else:
values.append(e)
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(values)
else:
values.append(kwargs[parameter_insensible])
kwargs[parameter_insensible] = values
continue
kwargs[parameter_insensible] = values
continue
value: str = parameters[parameter][0].lower().strip(" ")
if parameter == "implementation":
implementation = value
continue
if "," in value:
list_of_values = value.split(",")
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(list_of_values)
else:
list_of_values.append(kwargs[parameter_insensible])
continue
kwargs[parameter_insensible] = list_of_values
continue
value_converted: bool | int | float | None = None
if value in ["false", "true"]:
value_converted = True if value == "true" else False
elif value.isdigit():
value_converted = int(value)
elif (
value.count(".") == 1
and value.index(".") > 0
and value.replace(".", "").isdigit()
):
value_converted = float(value)
kwargs[parameter_insensible] = (
value if value_converted is None else value_converted
)
host_patterns = []
if "hosts" in kwargs:
host_patterns = (
kwargs["hosts"].split(",")
if isinstance(kwargs["hosts"], str)
else kwargs["hosts"]
)
del kwargs["hosts"]
return ResolverDescription(
protocol,
specifier,
implementation,
parsed_url.host,
parsed_url.port,
*host_patterns,
**kwargs,
)
|
(url: str) -> urllib3_future.contrib.resolver.factories.ResolverDescription
|
710,252
|
urllib3_future.contrib.resolver.factories
|
new
| null |
def new(self) -> BaseResolver:
kwargs = {**self.kwargs}
if self.server:
kwargs["server"] = self.server
if self.port:
kwargs["port"] = self.port
if self.host_patterns:
kwargs["patterns"] = self.host_patterns
return ResolverFactory.new(
self.protocol,
self.specifier,
self.implementation,
**kwargs,
)
|
(self) -> urllib3_future.contrib.resolver.protocols.BaseResolver
|
710,253
|
urllib3_future.backend._base
|
ResponsePromise
| null |
class ResponsePromise:
def __init__(
self,
conn: BaseBackend,
stream_id: int,
request_headers: list[tuple[bytes, bytes]],
**parameters: typing.Any,
) -> None:
self._uid: str = b64encode(token_bytes(16)).decode("ascii")
self._conn: BaseBackend = conn
self._stream_id: int = stream_id
self._response: LowLevelResponse | AsyncLowLevelResponse | None = None
self._request_headers = request_headers
self._parameters: typing.MutableMapping[str, typing.Any] = parameters
def __eq__(self, other: object) -> bool:
if not isinstance(other, ResponsePromise):
return False
return self.uid == other.uid
def __repr__(self) -> str:
return f"<ResponsePromise '{self.uid}' {self._conn._http_vsn_str} Stream[{self.stream_id}]>"
@property
def uid(self) -> str:
return self._uid
@property
def request_headers(self) -> list[tuple[bytes, bytes]]:
return self._request_headers
@property
def stream_id(self) -> int:
return self._stream_id
@property
def is_ready(self) -> bool:
return self._response is not None
@property
def response(self) -> LowLevelResponse | AsyncLowLevelResponse:
if not self._response:
raise OSError
return self._response
@response.setter
def response(self, value: LowLevelResponse | AsyncLowLevelResponse) -> None:
self._response = value
def set_parameter(self, key: str, value: typing.Any) -> None:
self._parameters[key] = value
def get_parameter(self, key: str) -> typing.Any | None:
return self._parameters[key] if key in self._parameters else None
def update_parameters(self, data: dict[str, typing.Any]) -> None:
self._parameters.update(data)
|
(conn: 'BaseBackend', stream_id: 'int', request_headers: 'list[tuple[bytes, bytes]]', **parameters: 'typing.Any') -> 'None'
|
710,254
|
urllib3_future.backend._base
|
__eq__
| null |
def __eq__(self, other: object) -> bool:
if not isinstance(other, ResponsePromise):
return False
return self.uid == other.uid
|
(self, other: object) -> bool
|
710,255
|
urllib3_future.backend._base
|
__init__
| null |
def __init__(
self,
conn: BaseBackend,
stream_id: int,
request_headers: list[tuple[bytes, bytes]],
**parameters: typing.Any,
) -> None:
self._uid: str = b64encode(token_bytes(16)).decode("ascii")
self._conn: BaseBackend = conn
self._stream_id: int = stream_id
self._response: LowLevelResponse | AsyncLowLevelResponse | None = None
self._request_headers = request_headers
self._parameters: typing.MutableMapping[str, typing.Any] = parameters
|
(self, conn: urllib3_future.backend._base.BaseBackend, stream_id: int, request_headers: list[tuple[bytes, bytes]], **parameters: Any) -> NoneType
|
710,256
|
urllib3_future.backend._base
|
__repr__
| null |
def __repr__(self) -> str:
return f"<ResponsePromise '{self.uid}' {self._conn._http_vsn_str} Stream[{self.stream_id}]>"
|
(self) -> str
|
710,257
|
urllib3_future.backend._base
|
get_parameter
| null |
def get_parameter(self, key: str) -> typing.Any | None:
return self._parameters[key] if key in self._parameters else None
|
(self, key: str) -> Optional[Any]
|
710,258
|
urllib3_future.backend._base
|
set_parameter
| null |
def set_parameter(self, key: str, value: typing.Any) -> None:
self._parameters[key] = value
|
(self, key: str, value: Any) -> NoneType
|
710,259
|
urllib3_future.backend._base
|
update_parameters
| null |
def update_parameters(self, data: dict[str, typing.Any]) -> None:
self._parameters.update(data)
|
(self, data: dict[str, typing.Any]) -> NoneType
|
710,260
|
urllib3_future.util.retry
|
Retry
|
Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool:
.. code-block:: python
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request("GET", "https://example.com/")
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", retries=Retry(10))
Retries can be disabled by passing ``False``:
.. code-block:: python
response = http.request("GET", "https://example.com/", retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param Collection allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``None`` value to retry on any verb.
:param Collection status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of previous retries}))
seconds. If `backoff_jitter` is non-zero, this sleep is extended by::
random.uniform(0, {backoff jitter})
seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will
sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever
be longer than `backoff_max`.
By default, backoff is disabled (factor set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param Collection remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
|
class Retry:
"""Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool:
.. code-block:: python
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request("GET", "https://example.com/")
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", retries=Retry(10))
Retries can be disabled by passing ``False``:
.. code-block:: python
response = http.request("GET", "https://example.com/", retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param Collection allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``None`` value to retry on any verb.
:param Collection status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of previous retries}))
seconds. If `backoff_jitter` is non-zero, this sleep is extended by::
random.uniform(0, {backoff jitter})
seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will
sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever
be longer than `backoff_max`.
By default, backoff is disabled (factor set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param Collection remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
#: Default methods to be used for ``allowed_methods``
DEFAULT_ALLOWED_METHODS = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
#: Default status codes to be used for ``status_forcelist``
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Default headers to be used for ``remove_headers_on_redirect``
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Cookie", "Authorization"])
#: Default maximum backoff time.
DEFAULT_BACKOFF_MAX = 120
# Backward compatibility; assigned outside of the class.
DEFAULT: typing.ClassVar[Retry]
def __init__(
self,
total: bool | int | None = 10,
connect: int | None = None,
read: int | None = None,
redirect: bool | int | None = None,
status: int | None = None,
other: int | None = None,
allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,
status_forcelist: typing.Collection[int] | None = None,
backoff_factor: float = 0,
backoff_max: float = DEFAULT_BACKOFF_MAX,
raise_on_redirect: bool = True,
raise_on_status: bool = True,
history: tuple[RequestHistory, ...] | None = None,
respect_retry_after_header: bool = True,
remove_headers_on_redirect: typing.Collection[
str
] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,
backoff_jitter: float = 0.0,
) -> None:
self.total = total
self.connect = connect
self.read = read
self.status = status
self.other = other
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.allowed_methods = allowed_methods
self.backoff_factor = backoff_factor
self.backoff_max = backoff_max
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or ()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
h.lower() for h in remove_headers_on_redirect
)
self.backoff_jitter = backoff_jitter
def new(self, **kw: typing.Any) -> Retry:
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
other=self.other,
allowed_methods=self.allowed_methods,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
backoff_max=self.backoff_max,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
backoff_jitter=self.backoff_jitter,
)
params.update(kw)
return type(self)(**params) # type: ignore[arg-type]
@classmethod
def from_int(
cls,
retries: Retry | bool | int | None,
redirect: bool | int | None = True,
default: Retry | bool | int | None = None,
) -> Retry:
"""Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self) -> float:
"""Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
if self.backoff_jitter != 0.0:
backoff_value += random.random() * self.backoff_jitter
return float(max(0, min(self.backoff_max, backoff_value)))
def parse_retry_after(self, retry_after: str) -> float:
seconds: float
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate_tz(retry_after)
if retry_date_tuple is None:
raise InvalidHeader(f"Invalid Retry-After header: {retry_after}")
retry_date = email.utils.mktime_tz(retry_date_tuple)
seconds = retry_date - time.time()
seconds = max(seconds, 0)
return seconds
def get_retry_after(self, response: HTTPResponse) -> float | None:
"""Get the value of Retry-After in seconds."""
retry_after = response.headers.get("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response: HTTPResponse) -> bool:
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self) -> None:
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response: HTTPResponse | None = None) -> None:
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
async def async_sleep_for_retry(self, response: AsyncHTTPResponse) -> bool:
retry_after = self.get_retry_after(response)
if retry_after:
await asyncio.sleep(retry_after)
return True
return False
async def _async_sleep_backoff(self) -> None:
backoff = self.get_backoff_time()
if backoff <= 0:
return
await asyncio.sleep(backoff)
async def async_sleep(self, response: AsyncHTTPResponse | None = None) -> None:
if self.respect_retry_after_header and response:
slept = await self.async_sleep_for_retry(response)
if slept:
return
await self._async_sleep_backoff()
def _is_connection_error(self, err: Exception) -> bool:
"""Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err: Exception) -> bool:
"""Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method: str) -> bool:
"""Checks if a given HTTP method should be retried upon, depending if
it is included in the allowed_methods
"""
if self.allowed_methods and method.upper() not in self.allowed_methods:
return False
return True
def is_retry(
self, method: str, status_code: int, has_retry_after: bool = False
) -> bool:
"""Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return bool(
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self) -> bool:
"""Are we out of retries?"""
retry_counts = [
x
for x in (
self.total,
self.connect,
self.read,
self.redirect,
self.status,
self.other,
)
if x
]
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method: str | None = None,
url: str | None = None,
response: HTTPResponse | AsyncHTTPResponse | None = None,
error: Exception | None = None,
_pool: ConnectionPool | AsyncConnectionPool | None = None,
_stacktrace: TracebackType | None = None,
) -> Retry:
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or method is None or not self._is_method_retryable(method):
raise reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
response_redirect_location = response.get_redirect_location()
if response_redirect_location:
redirect_location = response_redirect_location
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
reason = error or ResponseError(cause)
raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self) -> str:
return (
f"{type(self).__name__}(total={self.total}, connect={self.connect}, "
f"read={self.read}, redirect={self.redirect}, status={self.status})"
)
|
(total: 'bool | int | None' = 10, connect: 'int | None' = None, read: 'int | None' = None, redirect: 'bool | int | None' = None, status: 'int | None' = None, other: 'int | None' = None, allowed_methods: 'typing.Collection[str] | None' = frozenset({'HEAD', 'GET', 'PUT', 'DELETE', 'TRACE', 'OPTIONS'}), status_forcelist: 'typing.Collection[int] | None' = None, backoff_factor: 'float' = 0, backoff_max: 'float' = 120, raise_on_redirect: 'bool' = True, raise_on_status: 'bool' = True, history: 'tuple[RequestHistory, ...] | None' = None, respect_retry_after_header: 'bool' = True, remove_headers_on_redirect: 'typing.Collection[str]' = frozenset({'Cookie', 'Authorization'}), backoff_jitter: 'float' = 0.0) -> 'None'
|
710,263
|
urllib3_future.util.retry
|
_async_sleep_backoff
| null |
def sleep(self, response: HTTPResponse | None = None) -> None:
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
|
(self) -> NoneType
|
710,271
|
urllib3_future.util.retry
|
get_retry_after
|
Get the value of Retry-After in seconds.
|
def get_retry_after(self, response: HTTPResponse) -> float | None:
"""Get the value of Retry-After in seconds."""
retry_after = response.headers.get("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
|
(self, response: 'HTTPResponse') -> 'float | None'
|
710,272
|
urllib3_future.util.retry
|
increment
|
Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
|
def increment(
self,
method: str | None = None,
url: str | None = None,
response: HTTPResponse | AsyncHTTPResponse | None = None,
error: Exception | None = None,
_pool: ConnectionPool | AsyncConnectionPool | None = None,
_stacktrace: TracebackType | None = None,
) -> Retry:
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or method is None or not self._is_method_retryable(method):
raise reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
response_redirect_location = response.get_redirect_location()
if response_redirect_location:
redirect_location = response_redirect_location
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
reason = error or ResponseError(cause)
raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
|
(self, method: 'str | None' = None, url: 'str | None' = None, response: 'HTTPResponse | AsyncHTTPResponse | None' = None, error: 'Exception | None' = None, _pool: 'ConnectionPool | AsyncConnectionPool | None' = None, _stacktrace: 'TracebackType | None' = None) -> 'Retry'
|
710,277
|
urllib3_future.util.retry
|
sleep
|
Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
|
def sleep(self, response: HTTPResponse | None = None) -> None:
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
|
(self, response: 'HTTPResponse | None' = None) -> 'None'
|
710,278
|
urllib3_future.util.retry
|
sleep_for_retry
| null |
def sleep_for_retry(self, response: HTTPResponse) -> bool:
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
|
(self, response: 'HTTPResponse') -> 'bool'
|
710,279
|
urllib3_future.util.timeout
|
Timeout
|
Timeout configuration.
Timeouts can be defined as a default for a pool:
.. code-block:: python
import urllib3
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
http = urllib3.PoolManager(timeout=timeout)
resp = http.request("GET", "https://example.com/")
print(resp.status)
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``:
.. code-block:: python
no_timeout = Timeout(connect=None, read=None)
response = http.request("GET", "https://example.com/", timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: int, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: int, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: int, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
|
class Timeout:
"""Timeout configuration.
Timeouts can be defined as a default for a pool:
.. code-block:: python
import urllib3
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
http = urllib3.PoolManager(timeout=timeout)
resp = http.request("GET", "https://example.com/")
print(resp.status)
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request("GET", "https://example.com/", timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``:
.. code-block:: python
no_timeout = Timeout(connect=None, read=None)
response = http.request("GET", "https://example.com/", timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: int, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: int, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: int, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT
def __init__(
self,
total: _TYPE_TIMEOUT_INTERNAL = None,
connect: _TYPE_TIMEOUT_INTERNAL = _DEFAULT_TIMEOUT,
read: _TYPE_TIMEOUT_INTERNAL = _DEFAULT_TIMEOUT,
) -> None:
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect: float | None = None
def __repr__(self) -> str:
return f"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})"
# __str__ provided for backwards compatibility
__str__ = __repr__
@staticmethod
def resolve_default_timeout(timeout: _TYPE_TIMEOUT_INTERNAL) -> float | None:
return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout
@classmethod
def _validate_timeout(
cls, value: _TYPE_TIMEOUT, name: str
) -> _TYPE_TIMEOUT_INTERNAL:
"""Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is None or value is _DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value) # type: ignore[arg-type]
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
) from None
try:
if value <= 0: # type: ignore[operator]
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
) from None
return value # type: ignore[return-value]
@classmethod
def from_float(cls, timeout: _TYPE_TIMEOUT_INTERNAL) -> Timeout:
"""Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self) -> Timeout:
"""Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self) -> float:
"""Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = time.monotonic()
return self._start_connect
def get_connect_duration(self) -> float:
"""Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer that has not started."
)
return time.monotonic() - self._start_connect
@property
def connect_timeout(self) -> _TYPE_TIMEOUT_INTERNAL:
"""Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is _DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total) # type: ignore[type-var]
@property
def read_timeout(self) -> float | None:
"""Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not _DEFAULT_TIMEOUT
and self._read is not None
and self._read is not _DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self.resolve_default_timeout(self._read)
|
(total: '_TYPE_TIMEOUT_INTERNAL' = None, connect: '_TYPE_TIMEOUT_INTERNAL' = <_TYPE_DEFAULT.token: -1>, read: '_TYPE_TIMEOUT_INTERNAL' = <_TYPE_DEFAULT.token: -1>) -> 'None'
|
710,280
|
urllib3_future.util.timeout
|
__init__
| null |
def __init__(
self,
total: _TYPE_TIMEOUT_INTERNAL = None,
connect: _TYPE_TIMEOUT_INTERNAL = _DEFAULT_TIMEOUT,
read: _TYPE_TIMEOUT_INTERNAL = _DEFAULT_TIMEOUT,
) -> None:
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect: float | None = None
|
(self, total: '_TYPE_TIMEOUT_INTERNAL' = None, connect: '_TYPE_TIMEOUT_INTERNAL' = <_TYPE_DEFAULT.token: -1>, read: '_TYPE_TIMEOUT_INTERNAL' = <_TYPE_DEFAULT.token: -1>) -> 'None'
|
710,285
|
urllib3_future.util.timeout
|
resolve_default_timeout
| null |
@staticmethod
def resolve_default_timeout(timeout: _TYPE_TIMEOUT_INTERNAL) -> float | None:
return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout
|
(timeout: '_TYPE_TIMEOUT_INTERNAL') -> 'float | None'
|
710,294
|
urllib3_future._async.connectionpool
|
connection_from_url
|
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
|
def connection_from_url(url: str, **kw: typing.Any) -> AsyncHTTPConnectionPool:
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, _, host, port, *_ = parse_url(url)
scheme = scheme or "http"
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return AsyncHTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
else:
return AsyncHTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type]
|
(url: str, **kw: Any) -> urllib3_future._async.connectionpool.AsyncHTTPConnectionPool
|
710,295
|
urllib3_future._async.poolmanager
|
proxy_from_url
| null |
def proxy_from_url(url: str, **kw: typing.Any) -> AsyncProxyManager:
return AsyncProxyManager(proxy_url=url, **kw)
|
(url: str, **kw: Any) -> urllib3_future._async.poolmanager.AsyncProxyManager
|
710,310
|
urllib3_future
|
request
|
A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
Therefore, its side effects could be shared across dependencies relying on it.
To avoid side effects create a new ``PoolManager`` instance and use it instead.
The method does not accept low-level ``**urlopen_kw`` keyword arguments neither does
it issue multiplexed/concurrent request.
|
def request(
method: str,
url: str,
*,
body: _TYPE_BODY | None = None,
fields: _TYPE_FIELDS | None = None,
headers: typing.Mapping[str, str] | None = None,
preload_content: bool | None = True,
decode_content: bool | None = True,
redirect: bool | None = True,
retries: Retry | bool | int | None = None,
timeout: Timeout | float | int | None = 3,
json: typing.Any | None = None,
multiplexed: bool = False,
) -> HTTPResponse:
"""
A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
Therefore, its side effects could be shared across dependencies relying on it.
To avoid side effects create a new ``PoolManager`` instance and use it instead.
The method does not accept low-level ``**urlopen_kw`` keyword arguments neither does
it issue multiplexed/concurrent request.
"""
if multiplexed:
warnings.warn(
"Setting multiplexed=True in urllib3.request top-level function is a no-op. "
"Use a local PoolManager or HTTPPoolConnection instead.",
UserWarning,
)
return _DEFAULT_POOL.request(
method,
url,
body=body,
fields=fields,
headers=headers,
preload_content=preload_content,
decode_content=decode_content,
redirect=redirect,
retries=retries,
timeout=timeout,
json=json,
)
|
(method: str, url: str, *, body: Union[bytes, IO[Any], Iterable[bytes], Iterable[str], str, urllib3_future.backend._base.LowLevelResponse, urllib3_future.backend._async._base.AsyncLowLevelResponse, NoneType] = None, fields: Union[Sequence[Union[Tuple[str, Union[str, bytes, Tuple[str, Union[str, bytes]], Tuple[str, Union[str, bytes], str]]], urllib3_future.fields.RequestField]], Mapping[str, Union[str, bytes, Tuple[str, Union[str, bytes]], Tuple[str, Union[str, bytes], str]]], NoneType] = None, headers: Optional[Mapping[str, str]] = None, preload_content: bool | None = True, decode_content: bool | None = True, redirect: bool | None = True, retries: Union[urllib3_future.util.retry.Retry, bool, int, NoneType] = None, timeout: urllib3_future.util.timeout.Timeout | float | int | None = 3, json: Optional[Any] = None, multiplexed: bool = False) -> urllib3_future.response.HTTPResponse
|
710,319
|
names
|
get_first_name
| null |
def get_first_name(gender=None):
if gender not in ('male', 'female'):
gender = random.choice(('male', 'female'))
return get_name(FILES['first:%s' % gender]).capitalize()
|
(gender=None)
|
710,320
|
names
|
get_full_name
| null |
def get_full_name(gender=None):
return "{0} {1}".format(get_first_name(gender), get_last_name())
|
(gender=None)
|
710,321
|
names
|
get_last_name
| null |
def get_last_name():
return get_name(FILES['last']).capitalize()
|
()
|
710,322
|
names
|
get_name
| null |
def get_name(filename):
selected = random.random() * 90
with open(filename) as name_file:
for line in name_file:
name, _, cummulative, _ = line.split()
if float(cummulative) > selected:
return name
return "" # Return empty string if file is empty
|
(filename)
|
710,325
|
js2py.evaljs
|
EvalJs
|
This class supports continuous execution of javascript under same context.
>>> ctx = EvalJs()
>>> ctx.execute('var a = 10;function f(x) {return x*x};')
>>> ctx.f(9)
81
>>> ctx.a
10
context is a python dict or object that contains python variables that should be available to JavaScript
For example:
>>> ctx = EvalJs({'a': 30})
>>> ctx.execute('var x = a')
>>> ctx.x
30
You can enable JS require function via enable_require. With this feature enabled you can use js modules
from npm, for example:
>>> ctx = EvalJs(enable_require=True)
>>> ctx.execute("var esprima = require('esprima');")
>>> ctx.execute("esprima.parse('var a = 1')")
You can run interactive javascript console with console method!
|
class EvalJs(object):
"""This class supports continuous execution of javascript under same context.
>>> ctx = EvalJs()
>>> ctx.execute('var a = 10;function f(x) {return x*x};')
>>> ctx.f(9)
81
>>> ctx.a
10
context is a python dict or object that contains python variables that should be available to JavaScript
For example:
>>> ctx = EvalJs({'a': 30})
>>> ctx.execute('var x = a')
>>> ctx.x
30
You can enable JS require function via enable_require. With this feature enabled you can use js modules
from npm, for example:
>>> ctx = EvalJs(enable_require=True)
>>> ctx.execute("var esprima = require('esprima');")
>>> ctx.execute("esprima.parse('var a = 1')")
You can run interactive javascript console with console method!"""
def __init__(self, context={}, enable_require=False):
self.__dict__['_context'] = {}
exec (DEFAULT_HEADER, self._context)
self.__dict__['_var'] = self._context['var'].to_python()
if enable_require:
def _js_require_impl(npm_module_name):
from .node_import import require
from .base import to_python
return require(to_python(npm_module_name), context=self._context)
setattr(self._var, 'require', _js_require_impl)
if not isinstance(context, dict):
try:
context = context.__dict__
except:
raise TypeError(
'context has to be either a dict or have __dict__ attr')
for k, v in six.iteritems(context):
setattr(self._var, k, v)
def execute(self, js=None, use_compilation_plan=False):
"""executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
"""
try:
cache = self.__dict__['cache']
except KeyError:
cache = self.__dict__['cache'] = {}
hashkey = hashlib.md5(js.encode('utf-8')).digest()
try:
compiled = cache[hashkey]
except KeyError:
code = translate_js(
js, '', use_compilation_plan=use_compilation_plan)
compiled = cache[hashkey] = compile(code, '<EvalJS snippet>',
'exec')
exec (compiled, self._context)
def eval(self, expression, use_compilation_plan=False):
"""evaluates expression in current context and returns its value"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute(code, use_compilation_plan=use_compilation_plan)
return self['PyJsEvalResult']
def execute_debug(self, js):
"""executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = translate_js(js, '')
# make sure you have a temp folder:
filename = 'temp' + os.sep + '_' + hashlib.md5(
code.encode("utf-8")).hexdigest() + '.py'
try:
with open(filename, mode='w') as f:
f.write(code)
with open(filename, "r") as f:
pyCode = compile(f.read(), filename, 'exec')
exec(pyCode, self._context)
except Exception as err:
raise err
finally:
os.remove(filename)
try:
os.remove(filename + 'c')
except:
pass
def eval_debug(self, expression):
"""evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute_debug(code)
return self['PyJsEvalResult']
@property
def context(self):
return self._context
def __getattr__(self, var):
return getattr(self._var, var)
def __getitem__(self, var):
return getattr(self._var, var)
def __setattr__(self, var, val):
return setattr(self._var, var, val)
def __setitem__(self, var, val):
return setattr(self._var, var, val)
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
|
(context={}, enable_require=False)
|
710,326
|
js2py.evaljs
|
__getattr__
| null |
def __getattr__(self, var):
return getattr(self._var, var)
|
(self, var)
|
710,327
|
js2py.evaljs
|
__getitem__
| null |
def __getitem__(self, var):
return getattr(self._var, var)
|
(self, var)
|
710,328
|
js2py.evaljs
|
__init__
| null |
def __init__(self, context={}, enable_require=False):
self.__dict__['_context'] = {}
exec (DEFAULT_HEADER, self._context)
self.__dict__['_var'] = self._context['var'].to_python()
if enable_require:
def _js_require_impl(npm_module_name):
from .node_import import require
from .base import to_python
return require(to_python(npm_module_name), context=self._context)
setattr(self._var, 'require', _js_require_impl)
if not isinstance(context, dict):
try:
context = context.__dict__
except:
raise TypeError(
'context has to be either a dict or have __dict__ attr')
for k, v in six.iteritems(context):
setattr(self._var, k, v)
|
(self, context={}, enable_require=False)
|
710,329
|
js2py.evaljs
|
__setattr__
| null |
def __setattr__(self, var, val):
return setattr(self._var, var, val)
|
(self, var, val)
|
710,330
|
js2py.evaljs
|
__setitem__
| null |
def __setitem__(self, var, val):
return setattr(self._var, var, val)
|
(self, var, val)
|
710,331
|
js2py.evaljs
|
console
|
starts to interact (starts interactive console) Something like code.InteractiveConsole
|
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
|
(self)
|
710,332
|
js2py.evaljs
|
eval
|
evaluates expression in current context and returns its value
|
def eval(self, expression, use_compilation_plan=False):
"""evaluates expression in current context and returns its value"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute(code, use_compilation_plan=use_compilation_plan)
return self['PyJsEvalResult']
|
(self, expression, use_compilation_plan=False)
|
710,333
|
js2py.evaljs
|
eval_debug
|
evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
|
def eval_debug(self, expression):
"""evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute_debug(code)
return self['PyJsEvalResult']
|
(self, expression)
|
710,334
|
js2py.evaljs
|
execute
|
executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
|
def execute(self, js=None, use_compilation_plan=False):
"""executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
"""
try:
cache = self.__dict__['cache']
except KeyError:
cache = self.__dict__['cache'] = {}
hashkey = hashlib.md5(js.encode('utf-8')).digest()
try:
compiled = cache[hashkey]
except KeyError:
code = translate_js(
js, '', use_compilation_plan=use_compilation_plan)
compiled = cache[hashkey] = compile(code, '<EvalJS snippet>',
'exec')
exec (compiled, self._context)
|
(self, js=None, use_compilation_plan=False)
|
710,335
|
js2py.evaljs
|
execute_debug
|
executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
|
def execute_debug(self, js):
"""executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = translate_js(js, '')
# make sure you have a temp folder:
filename = 'temp' + os.sep + '_' + hashlib.md5(
code.encode("utf-8")).hexdigest() + '.py'
try:
with open(filename, mode='w') as f:
f.write(code)
with open(filename, "r") as f:
pyCode = compile(f.read(), filename, 'exec')
exec(pyCode, self._context)
except Exception as err:
raise err
finally:
os.remove(filename)
try:
os.remove(filename + 'c')
except:
pass
|
(self, js)
|
710,336
|
js2py.internals.simplex
|
JsException
| null |
class JsException(Exception):
def __init__(self, typ=None, message=None, throw=None):
if typ is None and message is None and throw is None:
# it means its the trasnlator based error (old format), do nothing
self._translator_based = True
else:
assert throw is None or (typ is None
and message is None), (throw, typ,
message)
self._translator_based = False
self.typ = typ
self.message = message
self.throw = throw
def get_thrown_value(self, space):
if self.throw is not None:
return self.throw
else:
return space.NewError(self.typ, self.message)
def __str__(self):
if self._translator_based:
if self.mes.Class == 'Error':
return self.mes.callprop('toString').value
else:
return self.mes.to_string().value
else:
if self.throw is not None:
from .conversions import to_string
return to_string(self.throw)
else:
return self.typ + ': ' + self.message
|
(typ=None, message=None, throw=None)
|
710,337
|
js2py.internals.simplex
|
__init__
| null |
def __init__(self, typ=None, message=None, throw=None):
if typ is None and message is None and throw is None:
# it means its the trasnlator based error (old format), do nothing
self._translator_based = True
else:
assert throw is None or (typ is None
and message is None), (throw, typ,
message)
self._translator_based = False
self.typ = typ
self.message = message
self.throw = throw
|
(self, typ=None, message=None, throw=None)
|
710,338
|
js2py.internals.simplex
|
__str__
| null |
def __str__(self):
if self._translator_based:
if self.mes.Class == 'Error':
return self.mes.callprop('toString').value
else:
return self.mes.to_string().value
else:
if self.throw is not None:
from .conversions import to_string
return to_string(self.throw)
else:
return self.typ + ': ' + self.message
|
(self)
|
710,339
|
js2py.internals.simplex
|
get_thrown_value
| null |
def get_thrown_value(self, space):
if self.throw is not None:
return self.throw
else:
return space.NewError(self.typ, self.message)
|
(self, space)
|
710,341
|
js2py.evaljs
|
disable_pyimport
| null |
def disable_pyimport():
import pyjsparser.parser
pyjsparser.parser.ENABLE_PYIMPORT = False
|
()
|
710,343
|
js2py.evaljs
|
eval_js
|
Just like javascript eval. Translates javascript to python,
executes and returns python object.
js is javascript source code
EXAMPLE:
>>> import js2py
>>> add = js2py.eval_js('function add(a, b) {return a + b}')
>>> add(1, 2) + 3
6
>>> add('1', 2, 3)
u'12'
>>> add.constructor
function Function() { [python code] }
NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.
For Js functions and objects, returns Python wrapper - basically behaves like normal python object.
If you really want to convert object to python dict you can use to_dict method.
|
def eval_js(js):
"""Just like javascript eval. Translates javascript to python,
executes and returns python object.
js is javascript source code
EXAMPLE:
>>> import js2py
>>> add = js2py.eval_js('function add(a, b) {return a + b}')
>>> add(1, 2) + 3
6
>>> add('1', 2, 3)
u'12'
>>> add.constructor
function Function() { [python code] }
NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.
For Js functions and objects, returns Python wrapper - basically behaves like normal python object.
If you really want to convert object to python dict you can use to_dict method.
"""
e = EvalJs()
return e.eval(js)
|
(js)
|
710,344
|
js2py.evaljs
|
eval_js6
|
Just like eval_js but with experimental support for js6 via babel.
|
def eval_js6(js):
"""Just like eval_js but with experimental support for js6 via babel."""
return eval_js(js6_to_js5(js))
|
(js)
|
710,346
|
js2py.evaljs
|
get_file_contents
| null |
def get_file_contents(path_or_file):
if hasattr(path_or_file, 'read'):
js = path_or_file.read()
else:
with codecs.open(path_as_local(path_or_file), "r", "utf-8") as f:
js = f.read()
return js
|
(path_or_file)
|
710,347
|
js2py.evaljs
|
import_js
|
Imports from javascript source file.
globals is your globals()
|
def import_js(path, lib_name, globals):
"""Imports from javascript source file.
globals is your globals()"""
with codecs.open(path_as_local(path), "r", "utf-8") as f:
js = f.read()
e = EvalJs()
e.execute(js)
var = e.context['var']
globals[lib_name] = var.to_python()
|
(path, lib_name, globals)
|
710,350
|
js2py.translators
|
parse
|
Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :)
|
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code)
|
(javascript_code)
|
710,352
|
js2py.node_import
|
require
|
Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and
finally translates the generated JS bundle to Python via Js2Py.
Returns a pure python object that behaves like the installed module. Nice!
:param module_name: Name of the npm module to require. For example 'esprima'. Supports specific versions via @
specification. Eg: 'crypto-js@3.3'.
:param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed
for some modules that use unsupported features of JS6 such as Map or typed arrays.
:param update: Whether to force update the translation. Otherwise uses a cached version if exists.
:param context: Optional context in which the translated module should be executed in. If provided, the
header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports.
:return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object.
|
def require(module_name, include_polyfill=True, update=False, context=None):
"""
Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and
finally translates the generated JS bundle to Python via Js2Py.
Returns a pure python object that behaves like the installed module. Nice!
:param module_name: Name of the npm module to require. For example 'esprima'. Supports specific versions via @
specification. Eg: 'crypto-js@3.3'.
:param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed
for some modules that use unsupported features of JS6 such as Map or typed arrays.
:param update: Whether to force update the translation. Otherwise uses a cached version if exists.
:param context: Optional context in which the translated module should be executed in. If provided, the
header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports.
:return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object.
"""
module_name, maybe_version = (module_name+"@@@").split('@')[:2]
py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update,
maybe_version_str=maybe_version)
# this is a bit hacky but we need to strip the default header from the generated code...
if context is not None:
if not py_code.startswith(DEFAULT_HEADER):
# new header version? retranslate...
assert not update, "Unexpected header."
py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=True)
assert py_code.startswith(DEFAULT_HEADER), "Unexpected header."
py_code = py_code[len(DEFAULT_HEADER):]
context = {} if context is None else context
exec(py_code, context)
return context['var'][_get_module_var_name(module_name)].to_py()
|
(module_name, include_polyfill=True, update=False, context=None)
|
710,353
|
js2py.evaljs
|
run_file
|
Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
|
def run_file(path_or_file, context=None):
''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
'''
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError('context must be the instance of EvalJs')
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context
|
(path_or_file, context=None)
|
710,354
|
js2py.evaljs
|
translate_file
|
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
|
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out)
|
(input_path, output_path)
|
710,355
|
js2py.translators.translator
|
translate_js
|
js has to be a javascript source code.
returns equivalent python code.
|
def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False, parse_fn=pyjsparser_parse_fn):
"""js has to be a javascript source code.
returns equivalent python code."""
if use_compilation_plan and not '//' in js and not '/*' in js:
return translate_js_with_compilation_plan(js, HEADER=HEADER)
parsed = parse_fn(js)
translating_nodes.clean_stacks()
return HEADER + translating_nodes.trans(
parsed) # syntax tree to python code
|
(js, HEADER='from js2py.pyjs import *\n# setting scope\nvar = Scope( JS_BUILTINS )\nset_global_object(var)\n\n# Code follows:\n', use_compilation_plan=False, parse_fn=<function pyjsparser_parse_fn at 0x7f61f97e16c0>)
|
710,356
|
js2py.evaljs
|
translate_js6
|
Just like translate_js but with experimental support for js6 via babel.
|
def translate_js6(js):
"""Just like translate_js but with experimental support for js6 via babel."""
return translate_js(js6_to_js5(js))
|
(js)
|
710,359
|
js2py.evaljs
|
write_file_contents
| null |
def write_file_contents(path_or_file, contents):
if hasattr(path_or_file, 'write'):
path_or_file.write(contents)
else:
with codecs.open(path_as_local(path_or_file), "w", "utf-8") as f:
f.write(contents)
|
(path_or_file, contents)
|
710,362
|
riotwatcher.LolWatcher
|
LolWatcher
|
LolWatcher class is intended to be the main interaction point with the APIs for
League of Legends.
|
class LolWatcher:
"""
LolWatcher class is intended to be the main interaction point with the APIs for
League of Legends.
"""
def __init__(
self,
api_key: str = None,
timeout: int = None,
kernel_url: str = None,
rate_limiter: RateLimiter = BasicRateLimiter(),
deserializer: Deserializer = DictionaryDeserializer(),
default_status_v4: bool = False,
**kwargs,
):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection
to the Riot API
:param string kernel_url: URL for the kernel instance to connect to, instead of
the API. See
https://github.com/meraki-analytics/kernel for
details.
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to
Handlers.RateLimit.BasicRateLimiter.
This parameter is not used when connecting to
a kernel instance.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is
Handlers.DictionaryDeserializer.
"""
if not kernel_url and not api_key:
raise ValueError("Either api_key or kernel_url must be set!")
if kernel_url:
handler_chain = [
SanitationHandler(),
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
DeprecationHandler(),
]
else:
handler_chain = [
SanitationHandler(),
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimiterAdapter(rate_limiter),
DeprecationHandler(),
]
if kernel_url:
UrlConfig.root_url = kernel_url
else:
UrlConfig.root_url = "https://{platform}.api.riotgames.com"
self._base_api = BaseApi(api_key, handler_chain, timeout=timeout)
self._champion = ChampionApiV3(self._base_api)
self._lol_status_v3 = LolStatusApiV3(self._base_api)
self._lol_status_v4 = LolStatusApiV4(self._base_api)
self._data_dragon = DataDragonApi(self._base_api)
self._clash = ClashApiV1(self._base_api)
self._champion_mastery = ChampionMasteryApiV4(self._base_api)
self._league = LeagueApiV4(self._base_api)
self._match = MatchApiV5(self._base_api)
self._spectator = SpectatorApiV5(self._base_api)
self._challenges = ChallengesApiV1(self._base_api)
self._summoner = SummonerApiV4(self._base_api)
self._lol_status = (
self._lol_status_v4 if default_status_v4 else self._lol_status_v3
)
# todo: tournament-stub
# todo: tournament
if "default_match_v5" in kwargs:
LOG.warning(
"property 'default_match_v5' has been deprecated and can be removed"
)
@property
def champion_mastery(self) -> ChampionMasteryApiV4:
"""
Interface to the ChampionMastery Endpoint
:rtype: league_of_legends.ChampionMasteryApiV4
"""
return self._champion_mastery
@property
def champion(self) -> ChampionApiV3:
"""
Interface to the Champion Endpoint
:rtype: league_of_legends.ChampionApiV3
"""
return self._champion
@property
def clash(self) -> ClashApiV1:
"""
Interface to the Clash Endpoint
:rtype: league_of_legends.ClashApiV1
"""
return self._clash
@property
def league(self) -> LeagueApiV4:
"""
Interface to the League Endpoint
:rtype: league_of_legends.LeagueApiV4
"""
return self._league
@property
def lol_status(self) -> Union[LolStatusApiV3, LolStatusApiV4]:
"""
Interface to the LoLStatus Endpoint
:rtype: league_of_legends.LolStatusApiV3
"""
return self._lol_status
@property
def lol_status_v3(self) -> LolStatusApiV3:
"""
Interface to the LoLStatus Endpoint
:rtype: league_of_legends.LolStatusApiV3
"""
return self._lol_status_v3
@property
def lol_status_v4(self) -> LolStatusApiV4:
"""
Interface to the LoLStatus Endpoint
:rtype: league_of_legends.LolStatusApiV4
"""
return self._lol_status_v4
@property
def match(self) -> MatchApiV5:
"""
Interface to the Match Endpoint
:rtype: league_of_legends.MatchApiV5
"""
return self._match
@property
def match_v4(self):
"""
This property has been deprecated. Use 'match' property instead.
Note that v4 is now permanently removed by Riot
"""
raise NotImplementedError(
"this property has been deprecated. Use 'match' property instead. Note "
+ "that v4 is now permanently removed by Riot"
)
@property
def match_v5(self):
"""this property has been deprecated. Use 'match' property instead."""
raise NotImplementedError(
"this property has been deprecated. Use 'match' property instead."
)
@property
def spectator(self) -> SpectatorApiV5:
"""
Interface to the Spectator Endpoint
:rtype: league_of_legends.SpectatorApiV4
"""
return self._spectator
@property
def data_dragon(self) -> DataDragonApi:
"""
Interface to the DataDragon Endpoint
:rtype: league_of_legends.DataDragonApi
"""
return self._data_dragon
@property
def summoner(self) -> SummonerApiV4:
"""
Interface to the Summoner Endpoint
:rtype: league_of_legends.SummonerApiV4
"""
return self._summoner
@property
def third_party_code(self) -> None:
"""
DEPRECATED: API has been removed by Riot
"""
raise NotImplementedError(
"API has been removed by Riot and no longer functions"
)
@property
def challenges(self) -> ChallengesApiV1:
"""
Interface to the Challenges Endpoint
:rtype: league_of_legends.ChallengesApiV1
"""
return self._challenges
|
(api_key: str = None, timeout: int = None, kernel_url: str = None, rate_limiter: riotwatcher.RateLimiter.RateLimiter = <riotwatcher.Handlers.RateLimit.BasicRateLimiter.BasicRateLimiter object at 0x7f89e048f9d0>, deserializer: riotwatcher.Deserializer.Deserializer = <riotwatcher.Handlers.DictionaryDeserializer.DictionaryDeserializer object at 0x7f89e0454bb0>, default_status_v4: bool = False, **kwargs)
|
710,363
|
riotwatcher.LolWatcher
|
__init__
|
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection
to the Riot API
:param string kernel_url: URL for the kernel instance to connect to, instead of
the API. See
https://github.com/meraki-analytics/kernel for
details.
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to
Handlers.RateLimit.BasicRateLimiter.
This parameter is not used when connecting to
a kernel instance.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is
Handlers.DictionaryDeserializer.
|
def __init__(
self,
api_key: str = None,
timeout: int = None,
kernel_url: str = None,
rate_limiter: RateLimiter = BasicRateLimiter(),
deserializer: Deserializer = DictionaryDeserializer(),
default_status_v4: bool = False,
**kwargs,
):
"""
Initialize a new instance of the RiotWatcher class.
:param string api_key: the API key to use for this instance
:param int timeout: Time to wait for a response before timing out a connection
to the Riot API
:param string kernel_url: URL for the kernel instance to connect to, instead of
the API. See
https://github.com/meraki-analytics/kernel for
details.
:param RateLimiter rate_limiter: Instance to be used for rate limiting.
This defaults to
Handlers.RateLimit.BasicRateLimiter.
This parameter is not used when connecting to
a kernel instance.
:param Deserializer deserializer: Instance to be used to deserialize responses
from the Riot Api. Default is
Handlers.DictionaryDeserializer.
"""
if not kernel_url and not api_key:
raise ValueError("Either api_key or kernel_url must be set!")
if kernel_url:
handler_chain = [
SanitationHandler(),
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
DeprecationHandler(),
]
else:
handler_chain = [
SanitationHandler(),
DeserializerAdapter(deserializer),
ThrowOnErrorHandler(),
TypeCorrectorHandler(),
RateLimiterAdapter(rate_limiter),
DeprecationHandler(),
]
if kernel_url:
UrlConfig.root_url = kernel_url
else:
UrlConfig.root_url = "https://{platform}.api.riotgames.com"
self._base_api = BaseApi(api_key, handler_chain, timeout=timeout)
self._champion = ChampionApiV3(self._base_api)
self._lol_status_v3 = LolStatusApiV3(self._base_api)
self._lol_status_v4 = LolStatusApiV4(self._base_api)
self._data_dragon = DataDragonApi(self._base_api)
self._clash = ClashApiV1(self._base_api)
self._champion_mastery = ChampionMasteryApiV4(self._base_api)
self._league = LeagueApiV4(self._base_api)
self._match = MatchApiV5(self._base_api)
self._spectator = SpectatorApiV5(self._base_api)
self._challenges = ChallengesApiV1(self._base_api)
self._summoner = SummonerApiV4(self._base_api)
self._lol_status = (
self._lol_status_v4 if default_status_v4 else self._lol_status_v3
)
# todo: tournament-stub
# todo: tournament
if "default_match_v5" in kwargs:
LOG.warning(
"property 'default_match_v5' has been deprecated and can be removed"
)
|
(self, api_key: Optional[str] = None, timeout: Optional[int] = None, kernel_url: Optional[str] = None, rate_limiter: riotwatcher.RateLimiter.RateLimiter = <riotwatcher.Handlers.RateLimit.BasicRateLimiter.BasicRateLimiter object at 0x7f89e048f9d0>, deserializer: riotwatcher.Deserializer.Deserializer = <riotwatcher.Handlers.DictionaryDeserializer.DictionaryDeserializer object at 0x7f89e0454bb0>, default_status_v4: bool = False, **kwargs)
|
710,364
|
leaguelosestreakmeter
|
losestreak
| null |
def losestreak(token, region, summoner_name):
lol_watcher = LolWatcher(token)
try:
summoner = lol_watcher.summoner.by_name(region, summoner_name)
except ApiError as err:
if err.response.status_code == 429:
raise Exception("you should retry in {} seconds".format(
err.response.headers["Retry-After"]), err.response.headers["Retry-After"])
elif err.response.status_code == 404:
raise Exception("summoner with that ridiculous name not found")
else:
raise Exception("unknow summoner fetching error ocurred")
matches = lol_watcher.match.matchlist_by_account(
region, summoner["accountId"], queue=420)
losestreak = 0
for match in matches["matches"]:
champion_id = match["champion"]
true_match = lol_watcher.match.by_id(region, match["gameId"])
for participant in true_match["participants"]:
if participant["championId"] == champion_id:
victim = participant["teamId"]
break
for team in true_match["teams"]:
if team["teamId"] == victim:
if team["win"] == "Fail":
losestreak += 1
elif team["win"] == "Win":
return losestreak
else:
raise Exception("lose detection failed")
|
(token, region, summoner_name)
|
710,366
|
offtheface.offtheface
|
ping
|
Example function with PEP 484 type annotations.
Returns:
리턴 없이 화면 print
|
def ping():
"""Example function with PEP 484 type annotations.
Returns:
리턴 없이 화면 print
"""
if len(sys.argv) > 1:
n = int(sys.argv[1])
print(f"{'o' * n}fftheface")
else:
print('offtheface')
|
()
|
710,372
|
xer_reader.src.table
|
Table
|
A class representing a P6 table
|
class Table:
"""A class representing a P6 table"""
depends: list[str]
description: str
key: str | None
def __init__(
self, name: str, labels: list[str], entries: list[dict[str, str]]
) -> None:
self.name: str = name
self.entries: list[dict[str, str]] = entries
self.labels: list[str] = labels
try:
self.description = table_data[name]["description"]
self.key = table_data[name]["key"]
self.depends = table_data[name]["depends"]
except KeyError:
self.description = ""
self.key = None
self.depends = []
def __bool___(self) -> bool:
return len(self.entries) > 0
def __len__(self) -> int:
return len(self.entries)
def __str__(self) -> str:
return self.name
@property
def values(self) -> list[list[str]]:
return [list(entry.values()) for entry in self.entries]
|
(name: str, labels: list[str], entries: list[dict[str, str]]) -> None
|
710,373
|
xer_reader.src.table
|
__bool___
| null |
def __bool___(self) -> bool:
return len(self.entries) > 0
|
(self) -> bool
|
710,374
|
xer_reader.src.table
|
__init__
| null |
def __init__(
self, name: str, labels: list[str], entries: list[dict[str, str]]
) -> None:
self.name: str = name
self.entries: list[dict[str, str]] = entries
self.labels: list[str] = labels
try:
self.description = table_data[name]["description"]
self.key = table_data[name]["key"]
self.depends = table_data[name]["depends"]
except KeyError:
self.description = ""
self.key = None
self.depends = []
|
(self, name: str, labels: list[str], entries: list[dict[str, str]]) -> NoneType
|
710,375
|
xer_reader.src.table
|
__len__
| null |
def __len__(self) -> int:
return len(self.entries)
|
(self) -> int
|
710,377
|
xer_reader.src.reader
|
XerReader
|
Open a XER file exported from Primavera P6 and read its contents.
|
class XerReader:
"""Open a XER file exported from Primavera P6 and read its contents."""
CODEC = "cp1252"
file_name: str
"""XER file name"""
data: str
"""XER file data as tab seperated text"""
def __init__(self, file: str | Path | BinaryIO) -> None:
self.file_name, self.data = _read_file(file)
_file_info = _parse_file_info(self.data)
self.currency: str = _file_info[7]
"""(str) Currency type set in P6"""
self.export_version: str = _file_info[0]
"""(str) P6 Version"""
self.export_date: datetime = datetime.strptime(_file_info[1], DATE_FORMAT)
"""(datetime) Date the XER file was exported"""
self.export_user: str = _file_info[4]
"""(str) P6 user name that exported the XER file"""
def check_errors(self) -> list[str]:
"""Check XER file for missing tables and orphan data
Returns:
list[str]: Descriptions of missing information
"""
errors = set()
id_map = {
data["key"]: table for table, data in table_data.items() if data["key"]
}
tables = self.parse_tables()
# Check for minimum tables required to be in the XER
for name in REQUIRED_TABLES:
if name not in tables:
errors.add(f"Missing Required Table {name}")
# Check for required table pairs
for table in tables.values():
for table2 in table.depends:
if table2 not in tables:
errors.add(f"Missing Table {table2} Required for Table {table}")
for row in table.entries:
for key, val in row.items():
if val == "":
continue
if not key.endswith("_id"):
continue
if key == "parent_wbs_id" and row["proj_node_flag"] == "Y":
continue
clean_key = key if key in id_map else _clean_foreign_key_label(key)
if clean_key:
if check_table := tables.get(id_map[clean_key]):
for entry in check_table.entries:
if entry.get(clean_key, "") == val:
break
else:
errors.add(
f"Orphan data {key} [{val}] in table {table}"
)
return list(errors)
def delete_tables(self, *table_names: str) -> str:
"""
Delete tables from XER file.
Does not modify `XerReader.data` attribute, but returns a new string.
Args:
*table_names (str): table names to remove from XER file
Returns:
str: XER File String with tables removed
"""
if not table_names:
raise ValueError("Must pass at least one table name")
rev_data = self.data
for name in table_names:
table_search = re.compile(rf"%T\t{name.upper()}\n(.|\s)*?(?=%T|%E)")
rev_data = table_search.sub("", rev_data)
return rev_data
def get_table_names(self) -> list[str]:
"""Get list of table names included in the XER file.
Returns:
list[str]: list of table names
"""
table_names = re.compile(r"(?<=%T\t)[A-Z]+")
return table_names.findall(self.data)
def get_table_str(self, table_name: str) -> str:
"""Get string for a specific table in the XER file.
Args:
table_name (str): Name of table
Returns:
str: Table header and rows
"""
re_search = re.compile(rf"(?<=%T\t{table_name.upper()}\n)(.|\s)*?(?=%T|%E)")
if found_table := re_search.search(self.data):
return re.sub(r"%[TFR]\t", "", found_table.group())
return ""
def has_table(self, table_name: str) -> bool:
"""Check if a table is included in the XER file.
Args:
table_name (str): table name
Returns:
bool: True if found; False if not found
"""
return f"%T\t{table_name.upper()}" in self.data
def parse_tables(self) -> dict[str, XerTable]:
"""
Parse tables into a dictionary with the table name as the key
and a `Table` object as the value.
Returns:
dict[str, Table]: dict of XER Tables
"""
tables = {}
for table_str in self.data.split("%T\t")[1:]:
name, table = _parse_table(table_str)
tables[name] = table
return tables
def to_csv(self, file_directory: str | Path = Path.cwd()) -> None:
"""
Generate a CSV file for each table in the XER file.
Uses `tab` as the delimiter.
Args:
file_directory (str | Path, optional): Directory to save CSV files.
Defaults to current working directory.
"""
for table in self.parse_tables().values():
_write_table_to_csv(
f"{self.file_name}_{table.name}", table, Path(file_directory)
)
def to_excel(self) -> None:
"""
Generate an Excel file with each table in the XER file on a seperate worksheet.
"""
wb = Workbook()
ws = wb.active
ws.title = "ERMHDR"
ws.append(_parse_file_info(self.data))
for name, table in self.parse_tables().items():
new_ws = wb.create_sheet(name)
new_ws.append(table.labels)
for entry in table.entries:
new_ws.append(list(entry.values()))
tab = Table(displayName=name, ref=new_ws.calculate_dimension())
new_ws.add_table(tab)
wb.save(f"{self.file_name}.xlsx")
def to_json(self, *tables: str) -> str:
"""Generate a json compliant string representation of tables in the XER file
Returns:
str: json compliant string representation of XER tables
"""
out_data = {}
if not tables:
out_data = {
name: _entry_by_key(table)
for name, table in self.parse_tables().items()
}
else:
out_data = {
name: _entry_by_key(table)
for name, table in self.parse_tables().items()
if name in tables
}
json_data = {self.file_name: {**out_data}}
return json.dumps(json_data, indent=2)
|
(file: str | pathlib.Path | typing.BinaryIO) -> None
|
710,378
|
xer_reader.src.reader
|
__init__
| null |
def __init__(self, file: str | Path | BinaryIO) -> None:
self.file_name, self.data = _read_file(file)
_file_info = _parse_file_info(self.data)
self.currency: str = _file_info[7]
"""(str) Currency type set in P6"""
self.export_version: str = _file_info[0]
"""(str) P6 Version"""
self.export_date: datetime = datetime.strptime(_file_info[1], DATE_FORMAT)
"""(datetime) Date the XER file was exported"""
self.export_user: str = _file_info[4]
"""(str) P6 user name that exported the XER file"""
|
(self, file: str | pathlib.Path | typing.BinaryIO) -> NoneType
|
710,379
|
xer_reader.src.reader
|
check_errors
|
Check XER file for missing tables and orphan data
Returns:
list[str]: Descriptions of missing information
|
def check_errors(self) -> list[str]:
"""Check XER file for missing tables and orphan data
Returns:
list[str]: Descriptions of missing information
"""
errors = set()
id_map = {
data["key"]: table for table, data in table_data.items() if data["key"]
}
tables = self.parse_tables()
# Check for minimum tables required to be in the XER
for name in REQUIRED_TABLES:
if name not in tables:
errors.add(f"Missing Required Table {name}")
# Check for required table pairs
for table in tables.values():
for table2 in table.depends:
if table2 not in tables:
errors.add(f"Missing Table {table2} Required for Table {table}")
for row in table.entries:
for key, val in row.items():
if val == "":
continue
if not key.endswith("_id"):
continue
if key == "parent_wbs_id" and row["proj_node_flag"] == "Y":
continue
clean_key = key if key in id_map else _clean_foreign_key_label(key)
if clean_key:
if check_table := tables.get(id_map[clean_key]):
for entry in check_table.entries:
if entry.get(clean_key, "") == val:
break
else:
errors.add(
f"Orphan data {key} [{val}] in table {table}"
)
return list(errors)
|
(self) -> list[str]
|
710,380
|
xer_reader.src.reader
|
delete_tables
|
Delete tables from XER file.
Does not modify `XerReader.data` attribute, but returns a new string.
Args:
*table_names (str): table names to remove from XER file
Returns:
str: XER File String with tables removed
|
def delete_tables(self, *table_names: str) -> str:
"""
Delete tables from XER file.
Does not modify `XerReader.data` attribute, but returns a new string.
Args:
*table_names (str): table names to remove from XER file
Returns:
str: XER File String with tables removed
"""
if not table_names:
raise ValueError("Must pass at least one table name")
rev_data = self.data
for name in table_names:
table_search = re.compile(rf"%T\t{name.upper()}\n(.|\s)*?(?=%T|%E)")
rev_data = table_search.sub("", rev_data)
return rev_data
|
(self, *table_names: str) -> str
|
710,381
|
xer_reader.src.reader
|
get_table_names
|
Get list of table names included in the XER file.
Returns:
list[str]: list of table names
|
def get_table_names(self) -> list[str]:
"""Get list of table names included in the XER file.
Returns:
list[str]: list of table names
"""
table_names = re.compile(r"(?<=%T\t)[A-Z]+")
return table_names.findall(self.data)
|
(self) -> list[str]
|
710,382
|
xer_reader.src.reader
|
get_table_str
|
Get string for a specific table in the XER file.
Args:
table_name (str): Name of table
Returns:
str: Table header and rows
|
def get_table_str(self, table_name: str) -> str:
"""Get string for a specific table in the XER file.
Args:
table_name (str): Name of table
Returns:
str: Table header and rows
"""
re_search = re.compile(rf"(?<=%T\t{table_name.upper()}\n)(.|\s)*?(?=%T|%E)")
if found_table := re_search.search(self.data):
return re.sub(r"%[TFR]\t", "", found_table.group())
return ""
|
(self, table_name: str) -> str
|
710,383
|
xer_reader.src.reader
|
has_table
|
Check if a table is included in the XER file.
Args:
table_name (str): table name
Returns:
bool: True if found; False if not found
|
def has_table(self, table_name: str) -> bool:
"""Check if a table is included in the XER file.
Args:
table_name (str): table name
Returns:
bool: True if found; False if not found
"""
return f"%T\t{table_name.upper()}" in self.data
|
(self, table_name: str) -> bool
|
710,384
|
xer_reader.src.reader
|
parse_tables
|
Parse tables into a dictionary with the table name as the key
and a `Table` object as the value.
Returns:
dict[str, Table]: dict of XER Tables
|
def parse_tables(self) -> dict[str, XerTable]:
"""
Parse tables into a dictionary with the table name as the key
and a `Table` object as the value.
Returns:
dict[str, Table]: dict of XER Tables
"""
tables = {}
for table_str in self.data.split("%T\t")[1:]:
name, table = _parse_table(table_str)
tables[name] = table
return tables
|
(self) -> dict[str, xer_reader.src.table.Table]
|
710,385
|
xer_reader.src.reader
|
to_csv
|
Generate a CSV file for each table in the XER file.
Uses `tab` as the delimiter.
Args:
file_directory (str | Path, optional): Directory to save CSV files.
Defaults to current working directory.
|
def to_csv(self, file_directory: str | Path = Path.cwd()) -> None:
"""
Generate a CSV file for each table in the XER file.
Uses `tab` as the delimiter.
Args:
file_directory (str | Path, optional): Directory to save CSV files.
Defaults to current working directory.
"""
for table in self.parse_tables().values():
_write_table_to_csv(
f"{self.file_name}_{table.name}", table, Path(file_directory)
)
|
(self, file_directory: str | pathlib.Path = PosixPath('/app')) -> NoneType
|
710,386
|
xer_reader.src.reader
|
to_excel
|
Generate an Excel file with each table in the XER file on a seperate worksheet.
|
def to_excel(self) -> None:
"""
Generate an Excel file with each table in the XER file on a seperate worksheet.
"""
wb = Workbook()
ws = wb.active
ws.title = "ERMHDR"
ws.append(_parse_file_info(self.data))
for name, table in self.parse_tables().items():
new_ws = wb.create_sheet(name)
new_ws.append(table.labels)
for entry in table.entries:
new_ws.append(list(entry.values()))
tab = Table(displayName=name, ref=new_ws.calculate_dimension())
new_ws.add_table(tab)
wb.save(f"{self.file_name}.xlsx")
|
(self) -> NoneType
|
710,387
|
xer_reader.src.reader
|
to_json
|
Generate a json compliant string representation of tables in the XER file
Returns:
str: json compliant string representation of XER tables
|
def to_json(self, *tables: str) -> str:
"""Generate a json compliant string representation of tables in the XER file
Returns:
str: json compliant string representation of XER tables
"""
out_data = {}
if not tables:
out_data = {
name: _entry_by_key(table)
for name, table in self.parse_tables().items()
}
else:
out_data = {
name: _entry_by_key(table)
for name, table in self.parse_tables().items()
if name in tables
}
json_data = {self.file_name: {**out_data}}
return json.dumps(json_data, indent=2)
|
(self, *tables: str) -> str
|
710,389
|
json_logging.framework_base
|
AppRequestInstrumentationConfigurator
|
Class to perform request instrumentation logging configuration. Should at least contains:
1- register before-request hook and create a RequestInfo object, store it to request context
2- register after-request hook and update response to stored RequestInfo object
3 - re-configure framework loggers.
NOTE: logger that is used to emit request instrumentation logs will need to assign to **self.request_logger**
|
class AppRequestInstrumentationConfigurator:
"""
Class to perform request instrumentation logging configuration. Should at least contains:
1- register before-request hook and create a RequestInfo object, store it to request context
2- register after-request hook and update response to stored RequestInfo object
3 - re-configure framework loggers.
NOTE: logger that is used to emit request instrumentation logs will need to assign to **self.request_logger**
"""
def __new__(cls, *args, **kw):
if not hasattr(cls, '_instance'):
cls._instance = object.__new__(cls)
cls._instance.request_logger = None
return cls._instance
def config(self, app, exclude_url_patterns=None):
"""
configuration logic
:param app:
"""
raise NotImplementedError
def get_request_logger(self):
"""
get the current logger that is used to logger the request instrumentation information
"""
return self.request_logger
|
(*args, **kw)
|
710,390
|
json_logging.framework_base
|
__new__
| null |
def __new__(cls, *args, **kw):
if not hasattr(cls, '_instance'):
cls._instance = object.__new__(cls)
cls._instance.request_logger = None
return cls._instance
|
(cls, *args, **kw)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.