Buckets:
| from __future__ import annotations | |
| import email | |
| import logging | |
| import random | |
| import re | |
| import time | |
| import typing | |
| from itertools import takewhile | |
| from types import TracebackType | |
| from ..exceptions import ( | |
| ConnectTimeoutError, | |
| InvalidHeader, | |
| MaxRetryError, | |
| ProtocolError, | |
| ProxyError, | |
| ReadTimeoutError, | |
| ResponseError, | |
| ) | |
| from .util import reraise | |
| if typing.TYPE_CHECKING: | |
| from typing_extensions import Self | |
| from ..connectionpool import ConnectionPool | |
| from ..response import BaseHTTPResponse | |
| log = logging.getLogger(__name__) | |
| # Data structure for representing the metadata of requests that result in a retry. | |
| class RequestHistory(typing.NamedTuple): | |
| method: str | None | |
| url: str | None | |
| error: Exception | None | |
| status: int | None | |
| redirect_location: str | None | |
| class Retry: | |
| """Retry configuration. | |
| Each retry attempt will create a new Retry object with updated values, so | |
| they can be safely reused. | |
| Retries can be defined as a default for a pool: | |
| .. code-block:: python | |
| retries = Retry(connect=5, read=2, redirect=5) | |
| http = PoolManager(retries=retries) | |
| response = http.request("GET", "https://example.com/") | |
| Or per-request (which overrides the default for the pool): | |
| .. code-block:: python | |
| response = http.request("GET", "https://example.com/", retries=Retry(10)) | |
| Retries can be disabled by passing ``False``: | |
| .. code-block:: python | |
| response = http.request("GET", "https://example.com/", retries=False) | |
| Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless | |
| retries are disabled, in which case the causing exception will be raised. | |
| :param int total: | |
| Total number of retries to allow. Takes precedence over other counts. | |
| Set to ``None`` to remove this constraint and fall back on other | |
| counts. | |
| Set to ``0`` to fail on the first retry. | |
| Set to ``False`` to disable and imply ``raise_on_redirect=False``. | |
| :param int connect: | |
| How many connection-related errors to retry on. | |
| These are errors raised before the request is sent to the remote server, | |
| which we assume has not triggered the server to process the request. | |
| Set to ``0`` to fail on the first retry of this type. | |
| :param int read: | |
| How many times to retry on read errors. | |
| These errors are raised after the request was sent to the server, so the | |
| request may have side-effects. | |
| Set to ``0`` to fail on the first retry of this type. | |
| :param int redirect: | |
| How many redirects to perform. Limit this to avoid infinite redirect | |
| loops. | |
| A redirect is a HTTP response with a status code 301, 302, 303, 307 or | |
| 308. | |
| Set to ``0`` to fail on the first retry of this type. | |
| Set to ``False`` to disable and imply ``raise_on_redirect=False``. | |
| :param int status: | |
| How many times to retry on bad status codes. | |
| These are retries made on responses, where status code matches | |
| ``status_forcelist``. | |
| Set to ``0`` to fail on the first retry of this type. | |
| :param int other: | |
| How many times to retry on other errors. | |
| Other errors are errors that are not connect, read, redirect or status errors. | |
| These errors might be raised after the request was sent to the server, so the | |
| request might have side-effects. | |
| Set to ``0`` to fail on the first retry of this type. | |
| If ``total`` is not set, it's a good idea to set this to 0 to account | |
| for unexpected edge cases and avoid infinite retry loops. | |
| :param Collection allowed_methods: | |
| Set of uppercased HTTP method verbs that we should retry on. | |
| By default, we only retry on methods which are considered to be | |
| idempotent (multiple requests with the same parameters end with the | |
| same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`. | |
| Set to a ``None`` value to retry on any verb. | |
| :param Collection status_forcelist: | |
| A set of integer HTTP status codes that we should force a retry on. | |
| A retry is initiated if the request method is in ``allowed_methods`` | |
| and the response status code is in ``status_forcelist``. | |
| By default, this is disabled with ``None``. | |
| :param float backoff_factor: | |
| A backoff factor to apply between attempts after the second try | |
| (most errors are resolved immediately by a second try without a | |
| delay). urllib3 will sleep for:: | |
| {backoff factor} * (2 ** ({number of previous retries})) | |
| seconds. If `backoff_jitter` is non-zero, this sleep is extended by:: | |
| random.uniform(0, {backoff jitter}) | |
| seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will | |
| sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever | |
| be longer than `backoff_max`. | |
| By default, backoff is disabled (factor set to 0). | |
| :param bool raise_on_redirect: Whether, if the number of redirects is | |
| exhausted, to raise a MaxRetryError, or to return a response with a | |
| response code in the 3xx range. | |
| :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: | |
| whether we should raise an exception, or return a response, | |
| if status falls in ``status_forcelist`` range and retries have | |
| been exhausted. | |
| :param tuple history: The history of the request encountered during | |
| each call to :meth:`~Retry.increment`. The list is in the order | |
| the requests occurred. Each list item is of class :class:`RequestHistory`. | |
| :param bool respect_retry_after_header: | |
| Whether to respect Retry-After header on status codes defined as | |
| :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. | |
| :param Collection remove_headers_on_redirect: | |
| Sequence of headers to remove from the request when a response | |
| indicating a redirect is returned before firing off the redirected | |
| request. | |
| :param int retry_after_max: Number of seconds to allow as the maximum for | |
| Retry-After headers. Defaults to :attr:`Retry.DEFAULT_RETRY_AFTER_MAX`. | |
| Any Retry-After headers larger than this value will be limited to this | |
| value. | |
| """ | |
| #: Default methods to be used for ``allowed_methods`` | |
| DEFAULT_ALLOWED_METHODS = frozenset( | |
| ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"] | |
| ) | |
| #: Default status codes to be used for ``status_forcelist`` | |
| RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) | |
| #: Default headers to be used for ``remove_headers_on_redirect`` | |
| DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset( | |
| ["Cookie", "Authorization", "Proxy-Authorization"] | |
| ) | |
| #: Default maximum backoff time. | |
| DEFAULT_BACKOFF_MAX = 120 | |
| # This is undocumented in the RFC. Setting to 6 hours matches other popular libraries. | |
| #: Default maximum allowed value for Retry-After headers in seconds | |
| DEFAULT_RETRY_AFTER_MAX: typing.Final[int] = 21600 | |
| # Backward compatibility; assigned outside of the class. | |
| DEFAULT: typing.ClassVar[Retry] | |
| def __init__( | |
| self, | |
| total: bool | int | None = 10, | |
| connect: int | None = None, | |
| read: int | None = None, | |
| redirect: bool | int | None = None, | |
| status: int | None = None, | |
| other: int | None = None, | |
| allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS, | |
| status_forcelist: typing.Collection[int] | None = None, | |
| backoff_factor: float = 0, | |
| backoff_max: float = DEFAULT_BACKOFF_MAX, | |
| raise_on_redirect: bool = True, | |
| raise_on_status: bool = True, | |
| history: tuple[RequestHistory, ...] | None = None, | |
| respect_retry_after_header: bool = True, | |
| remove_headers_on_redirect: typing.Collection[ | |
| str | |
| ] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT, | |
| backoff_jitter: float = 0.0, | |
| retry_after_max: int = DEFAULT_RETRY_AFTER_MAX, | |
| ) -> None: | |
| self.total = total | |
| self.connect = connect | |
| self.read = read | |
| self.status = status | |
| self.other = other | |
| if redirect is False or total is False: | |
| redirect = 0 | |
| raise_on_redirect = False | |
| self.redirect = redirect | |
| self.status_forcelist = status_forcelist or set() | |
| self.allowed_methods = allowed_methods | |
| self.backoff_factor = backoff_factor | |
| self.backoff_max = backoff_max | |
| self.retry_after_max = retry_after_max | |
| self.raise_on_redirect = raise_on_redirect | |
| self.raise_on_status = raise_on_status | |
| self.history = history or () | |
| self.respect_retry_after_header = respect_retry_after_header | |
| self.remove_headers_on_redirect = frozenset( | |
| h.lower() for h in remove_headers_on_redirect | |
| ) | |
| self.backoff_jitter = backoff_jitter | |
| def new(self, **kw: typing.Any) -> Self: | |
| params = dict( | |
| total=self.total, | |
| connect=self.connect, | |
| read=self.read, | |
| redirect=self.redirect, | |
| status=self.status, | |
| other=self.other, | |
| allowed_methods=self.allowed_methods, | |
| status_forcelist=self.status_forcelist, | |
| backoff_factor=self.backoff_factor, | |
| backoff_max=self.backoff_max, | |
| retry_after_max=self.retry_after_max, | |
| raise_on_redirect=self.raise_on_redirect, | |
| raise_on_status=self.raise_on_status, | |
| history=self.history, | |
| remove_headers_on_redirect=self.remove_headers_on_redirect, | |
| respect_retry_after_header=self.respect_retry_after_header, | |
| backoff_jitter=self.backoff_jitter, | |
| ) | |
| params.update(kw) | |
| return type(self)(**params) # type: ignore[arg-type] | |
| def from_int( | |
| cls, | |
| retries: Retry | bool | int | None, | |
| redirect: bool | int | None = True, | |
| default: Retry | bool | int | None = None, | |
| ) -> Retry: | |
| """Backwards-compatibility for the old retries format.""" | |
| if retries is None: | |
| retries = default if default is not None else cls.DEFAULT | |
| if isinstance(retries, Retry): | |
| return retries | |
| redirect = bool(redirect) and None | |
| new_retries = cls(retries, redirect=redirect) | |
| log.debug("Converted retries value: %r -> %r", retries, new_retries) | |
| return new_retries | |
| def get_backoff_time(self) -> float: | |
| """Formula for computing the current backoff | |
| :rtype: float | |
| """ | |
| # We want to consider only the last consecutive errors sequence (Ignore redirects). | |
| consecutive_errors_len = len( | |
| list( | |
| takewhile(lambda x: x.redirect_location is None, reversed(self.history)) | |
| ) | |
| ) | |
| if consecutive_errors_len <= 1: | |
| return 0 | |
| backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) | |
| if self.backoff_jitter != 0.0: | |
| backoff_value += random.random() * self.backoff_jitter | |
| return float(max(0, min(self.backoff_max, backoff_value))) | |
| def parse_retry_after(self, retry_after: str) -> float: | |
| seconds: float | |
| # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 | |
| if re.match(r"^\s*[0-9]+\s*$", retry_after): | |
| seconds = int(retry_after) | |
| else: | |
| retry_date_tuple = email.utils.parsedate_tz(retry_after) | |
| if retry_date_tuple is None: | |
| raise InvalidHeader(f"Invalid Retry-After header: {retry_after}") | |
| retry_date = email.utils.mktime_tz(retry_date_tuple) | |
| seconds = retry_date - time.time() | |
| seconds = max(seconds, 0) | |
| # Check the seconds do not exceed the specified maximum | |
| if seconds > self.retry_after_max: | |
| seconds = self.retry_after_max | |
| return seconds | |
| def get_retry_after(self, response: BaseHTTPResponse) -> float | None: | |
| """Get the value of Retry-After in seconds.""" | |
| retry_after = response.headers.get("Retry-After") | |
| if retry_after is None: | |
| return None | |
| return self.parse_retry_after(retry_after) | |
| def sleep_for_retry(self, response: BaseHTTPResponse) -> bool: | |
| retry_after = self.get_retry_after(response) | |
| if retry_after: | |
| time.sleep(retry_after) | |
| return True | |
| return False | |
| def _sleep_backoff(self) -> None: | |
| backoff = self.get_backoff_time() | |
| if backoff <= 0: | |
| return | |
| time.sleep(backoff) | |
| def sleep(self, response: BaseHTTPResponse | None = None) -> None: | |
| """Sleep between retry attempts. | |
| This method will respect a server's ``Retry-After`` response header | |
| and sleep the duration of the time requested. If that is not present, it | |
| will use an exponential backoff. By default, the backoff factor is 0 and | |
| this method will return immediately. | |
| """ | |
| if self.respect_retry_after_header and response: | |
| slept = self.sleep_for_retry(response) | |
| if slept: | |
| return | |
| self._sleep_backoff() | |
| def _is_connection_error(self, err: Exception) -> bool: | |
| """Errors when we're fairly sure that the server did not receive the | |
| request, so it should be safe to retry. | |
| """ | |
| if isinstance(err, ProxyError): | |
| err = err.original_error | |
| return isinstance(err, ConnectTimeoutError) | |
| def _is_read_error(self, err: Exception) -> bool: | |
| """Errors that occur after the request has been started, so we should | |
| assume that the server began processing it. | |
| """ | |
| return isinstance(err, (ReadTimeoutError, ProtocolError)) | |
| def _is_method_retryable(self, method: str) -> bool: | |
| """Checks if a given HTTP method should be retried upon, depending if | |
| it is included in the allowed_methods | |
| """ | |
| if self.allowed_methods and method.upper() not in self.allowed_methods: | |
| return False | |
| return True | |
| def is_retry( | |
| self, method: str, status_code: int, has_retry_after: bool = False | |
| ) -> bool: | |
| """Is this method/status code retryable? (Based on allowlists and control | |
| variables such as the number of total retries to allow, whether to | |
| respect the Retry-After header, whether this header is present, and | |
| whether the returned status code is on the list of status codes to | |
| be retried upon on the presence of the aforementioned header) | |
| """ | |
| if not self._is_method_retryable(method): | |
| return False | |
| if self.status_forcelist and status_code in self.status_forcelist: | |
| return True | |
| return bool( | |
| self.total | |
| and self.respect_retry_after_header | |
| and has_retry_after | |
| and (status_code in self.RETRY_AFTER_STATUS_CODES) | |
| ) | |
| def is_exhausted(self) -> bool: | |
| """Are we out of retries?""" | |
| retry_counts = [ | |
| x | |
| for x in ( | |
| self.total, | |
| self.connect, | |
| self.read, | |
| self.redirect, | |
| self.status, | |
| self.other, | |
| ) | |
| if x | |
| ] | |
| if not retry_counts: | |
| return False | |
| return min(retry_counts) < 0 | |
| def increment( | |
| self, | |
| method: str | None = None, | |
| url: str | None = None, | |
| response: BaseHTTPResponse | None = None, | |
| error: Exception | None = None, | |
| _pool: ConnectionPool | None = None, | |
| _stacktrace: TracebackType | None = None, | |
| ) -> Self: | |
| """Return a new Retry object with incremented retry counters. | |
| :param response: A response object, or None, if the server did not | |
| return a response. | |
| :type response: :class:`~urllib3.response.BaseHTTPResponse` | |
| :param Exception error: An error encountered during the request, or | |
| None if the response was received successfully. | |
| :return: A new ``Retry`` object. | |
| """ | |
| if self.total is False and error: | |
| # Disabled, indicate to re-raise the error. | |
| raise reraise(type(error), error, _stacktrace) | |
| total = self.total | |
| if total is not None: | |
| total -= 1 | |
| connect = self.connect | |
| read = self.read | |
| redirect = self.redirect | |
| status_count = self.status | |
| other = self.other | |
| cause = "unknown" | |
| status = None | |
| redirect_location = None | |
| if error and self._is_connection_error(error): | |
| # Connect retry? | |
| if connect is False: | |
| raise reraise(type(error), error, _stacktrace) | |
| elif connect is not None: | |
| connect -= 1 | |
| elif error and self._is_read_error(error): | |
| # Read retry? | |
| if read is False or method is None or not self._is_method_retryable(method): | |
| raise reraise(type(error), error, _stacktrace) | |
| elif read is not None: | |
| read -= 1 | |
| elif error: | |
| # Other retry? | |
| if other is not None: | |
| other -= 1 | |
| elif response and response.get_redirect_location(): | |
| # Redirect retry? | |
| if redirect is not None: | |
| redirect -= 1 | |
| cause = "too many redirects" | |
| response_redirect_location = response.get_redirect_location() | |
| if response_redirect_location: | |
| redirect_location = response_redirect_location | |
| status = response.status | |
| else: | |
| # Incrementing because of a server error like a 500 in | |
| # status_forcelist and the given method is in the allowed_methods | |
| cause = ResponseError.GENERIC_ERROR | |
| if response and response.status: | |
| if status_count is not None: | |
| status_count -= 1 | |
| cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) | |
| status = response.status | |
| history = self.history + ( | |
| RequestHistory(method, url, error, status, redirect_location), | |
| ) | |
| new_retry = self.new( | |
| total=total, | |
| connect=connect, | |
| read=read, | |
| redirect=redirect, | |
| status=status_count, | |
| other=other, | |
| history=history, | |
| ) | |
| if new_retry.is_exhausted(): | |
| reason = error or ResponseError(cause) | |
| raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type] | |
| log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) | |
| return new_retry | |
| def __repr__(self) -> str: | |
| return ( | |
| f"{type(self).__name__}(total={self.total}, connect={self.connect}, " | |
| f"read={self.read}, redirect={self.redirect}, status={self.status})" | |
| ) | |
| # For backwards compatibility (equivalent to pre-v1.9): | |
| Retry.DEFAULT = Retry(3) | |
Xet Storage Details
- Size:
- 19.2 kB
- Xet hash:
- 849dbe5e3c5d8e47431ee68612382995add0cf4e75468bfd651b494341226a73
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.