index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
709,799
|
configargparse
|
parse
| null |
def parse(self, stream):
# see ConfigFileParser.parse docstring
yaml, SafeLoader, _ = self._load_yaml()
try:
parsed_obj = yaml.load(stream, Loader=SafeLoader)
except Exception as e:
raise ConfigFileParserException("Couldn't parse config file: %s" % e)
if not isinstance(parsed_obj, dict):
raise ConfigFileParserException("The config file doesn't appear to "
"contain 'key: value' pairs (aka. a YAML mapping). "
"yaml.load('%s') returned type '%s' instead of 'dict'." % (
getattr(stream, 'name', 'stream'), type(parsed_obj).__name__))
result = OrderedDict()
for key, value in parsed_obj.items():
if isinstance(value, list):
result[key] = value
elif value is None:
pass
else:
result[key] = str(value)
return result
|
(self, stream)
|
709,800
|
configargparse
|
serialize
| null |
def serialize(self, items, default_flow_style=False):
# see ConfigFileParser.serialize docstring
# lazy-import so there's no dependency on yaml unless this class is used
yaml, _, Dumper = self._load_yaml()
# it looks like ordering can't be preserved: http://pyyaml.org/ticket/29
items = dict(items)
return yaml.dump(items, default_flow_style=default_flow_style, Dumper=Dumper)
|
(self, items, default_flow_style=False)
|
709,802
|
configargparse
|
already_on_command_line
|
Utility method for checking if any of the potential_command_line_args is
already present in existing_args.
Returns:
bool: already on command line?
|
def already_on_command_line(existing_args_list, potential_command_line_args, prefix_chars):
"""Utility method for checking if any of the potential_command_line_args is
already present in existing_args.
Returns:
bool: already on command line?
"""
arg_names = []
for arg_string in existing_args_list:
if arg_string and arg_string[0] in prefix_chars and "=" in arg_string :
option_string, explicit_arg = arg_string.split("=", 1)
arg_names.append(option_string)
else:
arg_names.append(arg_string)
return any(
potential_arg in arg_names for potential_arg in potential_command_line_args
)
|
(existing_args_list, potential_command_line_args, prefix_chars)
|
709,807
|
configargparse
|
get_argument_parser
|
Returns the global ArgumentParser instance with the given name. The 1st
time this function is called, a new ArgumentParser instance will be created
for the given name, and any args other than "name" will be passed on to the
ArgumentParser constructor.
|
def get_argument_parser(name=None, **kwargs):
"""Returns the global ArgumentParser instance with the given name. The 1st
time this function is called, a new ArgumentParser instance will be created
for the given name, and any args other than "name" will be passed on to the
ArgumentParser constructor.
"""
if name is None:
name = "default"
if len(kwargs) > 0 or name not in _parsers:
init_argument_parser(name, **kwargs)
return _parsers[name]
|
(name=None, **kwargs)
|
709,813
|
configargparse
|
get_toml_section
|
Given some TOML data (as loaded with `toml.load()`), returns the requested section of the data.
Returns ``None`` if the section is not found.
|
def get_toml_section(data, section):
"""
Given some TOML data (as loaded with `toml.load()`), returns the requested section of the data.
Returns ``None`` if the section is not found.
"""
sections = parse_toml_section_name(section) if isinstance(section, str) else section
itemdata = data.get(sections[0])
if not itemdata:
return None
sections = sections[1:]
if sections:
return get_toml_section(itemdata, sections)
else:
if not isinstance(itemdata, dict):
return None
return itemdata
|
(data, section)
|
709,815
|
configargparse
|
init_argument_parser
|
Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using get_argument_parser(..)
|
def init_argument_parser(name=None, **kwargs):
"""Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using get_argument_parser(..)
"""
if name is None:
name = "default"
if name in _parsers:
raise ValueError(("kwargs besides 'name' can only be passed in the"
" first time. '%s' ArgumentParser already exists: %s") % (
name, _parsers[name]))
kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter)
kwargs.setdefault('conflict_handler', 'resolve')
_parsers[name] = ArgumentParser(**kwargs)
|
(name=None, **kwargs)
|
709,817
|
configargparse
|
<lambda>
| null |
is_boolean_optional_action = lambda action: isinstance(action, argparse.BooleanOptionalAction)
|
(action)
|
709,820
|
configargparse
|
parse_toml_section_name
|
Parse a TOML section name to a sequence of strings.
The following names are all valid:
.. python::
"a.b.c" # this is best practice -> returns ("a", "b", "c")
" d.e.f " # same as [d.e.f] -> returns ("d", "e", "f")
" g . h . i " # same as [g.h.i] -> returns ("g", "h", "i")
' j . "ʞ" . "l" ' # same as [j."ʞ"."l"], double or simple quotes here are supported. -> returns ("j", "ʞ", "l")
|
def parse_toml_section_name(section_name):
"""
Parse a TOML section name to a sequence of strings.
The following names are all valid:
.. python::
"a.b.c" # this is best practice -> returns ("a", "b", "c")
" d.e.f " # same as [d.e.f] -> returns ("d", "e", "f")
" g . h . i " # same as [g.h.i] -> returns ("g", "h", "i")
' j . "ʞ" . "l" ' # same as [j."ʞ"."l"], double or simple quotes here are supported. -> returns ("j", "ʞ", "l")
"""
section = []
for row in csv.reader([section_name], delimiter='.'):
for a in row:
section.append(unquote_str(a.strip(), triple=False))
return tuple(section)
|
(section_name)
|
709,825
|
configargparse
|
unquote_str
|
Unquote a maybe quoted string representation.
If the string is not detected as being a quoted representation, it returns the same string as passed.
It supports all kinds of python quotes: ``"""``, ``'''``, ``"`` and ``'``.
:param triple: Also unquote tripple quoted strings.
@raises ValueError: If the string is detected as beeing quoted but literal_eval() fails to evaluate it as string.
This would be a bug in the regex.
|
def unquote_str(text, triple=True):
"""
Unquote a maybe quoted string representation.
If the string is not detected as being a quoted representation, it returns the same string as passed.
It supports all kinds of python quotes: ``\"\"\"``, ``'''``, ``"`` and ``'``.
:param triple: Also unquote tripple quoted strings.
@raises ValueError: If the string is detected as beeing quoted but literal_eval() fails to evaluate it as string.
This would be a bug in the regex.
"""
if is_quoted(text, triple=triple):
try:
s = ast.literal_eval(text)
assert isinstance(s, str)
except Exception as e:
raise ValueError(f"Error trying to unquote the quoted string: {text}: {e}") from e
return s
return text
|
(text, triple=True)
|
709,826
|
loops_diff
|
_jupyter_labextension_paths
| null |
def _jupyter_labextension_paths():
return [{
"src": "labextension",
"dest": "loops-diff"
}]
|
()
|
709,828
|
pytorch_msssim.ssim
|
MS_SSIM
| null |
class MS_SSIM(torch.nn.Module):
def __init__(
self,
data_range: float = 255,
size_average: bool = True,
win_size: int = 11,
win_sigma: float = 1.5,
channel: int = 3,
spatial_dims: int = 2,
weights: Optional[List[float]] = None,
K: Union[Tuple[float, float], List[float]] = (0.01, 0.03),
) -> None:
r""" class for ms-ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
"""
super(MS_SSIM, self).__init__()
self.win_size = win_size
self.win = _fspecial_gauss_1d(win_size, win_sigma).repeat([channel, 1] + [1] * spatial_dims)
self.size_average = size_average
self.data_range = data_range
self.weights = weights
self.K = K
def forward(self, X: Tensor, Y: Tensor) -> Tensor:
return ms_ssim(
X,
Y,
data_range=self.data_range,
size_average=self.size_average,
win=self.win,
weights=self.weights,
K=self.K,
)
|
(data_range: float = 255, size_average: bool = True, win_size: int = 11, win_sigma: float = 1.5, channel: int = 3, spatial_dims: int = 2, weights: Optional[List[float]] = None, K: Union[Tuple[float, float], List[float]] = (0.01, 0.03)) -> None
|
709,834
|
pytorch_msssim.ssim
|
__init__
|
class for ms-ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
|
def __init__(
self,
data_range: float = 255,
size_average: bool = True,
win_size: int = 11,
win_sigma: float = 1.5,
channel: int = 3,
spatial_dims: int = 2,
weights: Optional[List[float]] = None,
K: Union[Tuple[float, float], List[float]] = (0.01, 0.03),
) -> None:
r""" class for ms-ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
"""
super(MS_SSIM, self).__init__()
self.win_size = win_size
self.win = _fspecial_gauss_1d(win_size, win_sigma).repeat([channel, 1] + [1] * spatial_dims)
self.size_average = size_average
self.data_range = data_range
self.weights = weights
self.K = K
|
(self, data_range: float = 255, size_average: bool = True, win_size: int = 11, win_sigma: float = 1.5, channel: int = 3, spatial_dims: int = 2, weights: Optional[List[float]] = None, K: Union[Tuple[float, float], List[float]] = (0.01, 0.03)) -> NoneType
|
709,864
|
pytorch_msssim.ssim
|
forward
| null |
def forward(self, X: Tensor, Y: Tensor) -> Tensor:
return ms_ssim(
X,
Y,
data_range=self.data_range,
size_average=self.size_average,
win=self.win,
weights=self.weights,
K=self.K,
)
|
(self, X: torch.Tensor, Y: torch.Tensor) -> torch.Tensor
|
709,898
|
pytorch_msssim.ssim
|
SSIM
| null |
class SSIM(torch.nn.Module):
def __init__(
self,
data_range: float = 255,
size_average: bool = True,
win_size: int = 11,
win_sigma: float = 1.5,
channel: int = 3,
spatial_dims: int = 2,
K: Union[Tuple[float, float], List[float]] = (0.01, 0.03),
nonnegative_ssim: bool = False,
) -> None:
r""" class for ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu.
"""
super(SSIM, self).__init__()
self.win_size = win_size
self.win = _fspecial_gauss_1d(win_size, win_sigma).repeat([channel, 1] + [1] * spatial_dims)
self.size_average = size_average
self.data_range = data_range
self.K = K
self.nonnegative_ssim = nonnegative_ssim
def forward(self, X: Tensor, Y: Tensor) -> Tensor:
return ssim(
X,
Y,
data_range=self.data_range,
size_average=self.size_average,
win=self.win,
K=self.K,
nonnegative_ssim=self.nonnegative_ssim,
)
|
(data_range: float = 255, size_average: bool = True, win_size: int = 11, win_sigma: float = 1.5, channel: int = 3, spatial_dims: int = 2, K: Union[Tuple[float, float], List[float]] = (0.01, 0.03), nonnegative_ssim: bool = False) -> None
|
709,904
|
pytorch_msssim.ssim
|
__init__
|
class for ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu.
|
def __init__(
self,
data_range: float = 255,
size_average: bool = True,
win_size: int = 11,
win_sigma: float = 1.5,
channel: int = 3,
spatial_dims: int = 2,
K: Union[Tuple[float, float], List[float]] = (0.01, 0.03),
nonnegative_ssim: bool = False,
) -> None:
r""" class for ssim
Args:
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
channel (int, optional): input channels (default: 3)
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu.
"""
super(SSIM, self).__init__()
self.win_size = win_size
self.win = _fspecial_gauss_1d(win_size, win_sigma).repeat([channel, 1] + [1] * spatial_dims)
self.size_average = size_average
self.data_range = data_range
self.K = K
self.nonnegative_ssim = nonnegative_ssim
|
(self, data_range: float = 255, size_average: bool = True, win_size: int = 11, win_sigma: float = 1.5, channel: int = 3, spatial_dims: int = 2, K: Union[Tuple[float, float], List[float]] = (0.01, 0.03), nonnegative_ssim: bool = False) -> NoneType
|
709,934
|
pytorch_msssim.ssim
|
forward
| null |
def forward(self, X: Tensor, Y: Tensor) -> Tensor:
return ssim(
X,
Y,
data_range=self.data_range,
size_average=self.size_average,
win=self.win,
K=self.K,
nonnegative_ssim=self.nonnegative_ssim,
)
|
(self, X: torch.Tensor, Y: torch.Tensor) -> torch.Tensor
|
709,968
|
pytorch_msssim.ssim
|
ms_ssim
|
interface of ms-ssim
Args:
X (torch.Tensor): a batch of images, (N,C,[T,]H,W)
Y (torch.Tensor): a batch of images, (N,C,[T,]H,W)
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
Returns:
torch.Tensor: ms-ssim results
|
def ms_ssim(
X: Tensor,
Y: Tensor,
data_range: float = 255,
size_average: bool = True,
win_size: int = 11,
win_sigma: float = 1.5,
win: Optional[Tensor] = None,
weights: Optional[List[float]] = None,
K: Union[Tuple[float, float], List[float]] = (0.01, 0.03)
) -> Tensor:
r""" interface of ms-ssim
Args:
X (torch.Tensor): a batch of images, (N,C,[T,]H,W)
Y (torch.Tensor): a batch of images, (N,C,[T,]H,W)
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
weights (list, optional): weights for different levels
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
Returns:
torch.Tensor: ms-ssim results
"""
if not X.shape == Y.shape:
raise ValueError(f"Input images should have the same dimensions, but got {X.shape} and {Y.shape}.")
for d in range(len(X.shape) - 1, 1, -1):
X = X.squeeze(dim=d)
Y = Y.squeeze(dim=d)
#if not X.type() == Y.type():
# raise ValueError(f"Input images should have the same dtype, but got {X.type()} and {Y.type()}.")
if len(X.shape) == 4:
avg_pool = F.avg_pool2d
elif len(X.shape) == 5:
avg_pool = F.avg_pool3d
else:
raise ValueError(f"Input images should be 4-d or 5-d tensors, but got {X.shape}")
if win is not None: # set win_size
win_size = win.shape[-1]
if not (win_size % 2 == 1):
raise ValueError("Window size should be odd.")
smaller_side = min(X.shape[-2:])
assert smaller_side > (win_size - 1) * (
2 ** 4
), "Image size should be larger than %d due to the 4 downsamplings in ms-ssim" % ((win_size - 1) * (2 ** 4))
if weights is None:
weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
weights_tensor = X.new_tensor(weights)
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat([X.shape[1]] + [1] * (len(X.shape) - 1))
levels = weights_tensor.shape[0]
mcs = []
for i in range(levels):
ssim_per_channel, cs = _ssim(X, Y, win=win, data_range=data_range, size_average=False, K=K)
if i < levels - 1:
mcs.append(torch.relu(cs))
padding = [s % 2 for s in X.shape[2:]]
X = avg_pool(X, kernel_size=2, padding=padding)
Y = avg_pool(Y, kernel_size=2, padding=padding)
ssim_per_channel = torch.relu(ssim_per_channel) # type: ignore # (batch, channel)
mcs_and_ssim = torch.stack(mcs + [ssim_per_channel], dim=0) # (level, batch, channel)
ms_ssim_val = torch.prod(mcs_and_ssim ** weights_tensor.view(-1, 1, 1), dim=0)
if size_average:
return ms_ssim_val.mean()
else:
return ms_ssim_val.mean(1)
|
(X: torch.Tensor, Y: torch.Tensor, data_range: float = 255, size_average: bool = True, win_size: int = 11, win_sigma: float = 1.5, win: Optional[torch.Tensor] = None, weights: Optional[List[float]] = None, K: Union[Tuple[float, float], List[float]] = (0.01, 0.03)) -> torch.Tensor
|
709,969
|
pytorch_msssim.ssim
|
ssim
|
interface of ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu
Returns:
torch.Tensor: ssim results
|
def ssim(
X: Tensor,
Y: Tensor,
data_range: float = 255,
size_average: bool = True,
win_size: int = 11,
win_sigma: float = 1.5,
win: Optional[Tensor] = None,
K: Union[Tuple[float, float], List[float]] = (0.01, 0.03),
nonnegative_ssim: bool = False,
) -> Tensor:
r""" interface of ssim
Args:
X (torch.Tensor): a batch of images, (N,C,H,W)
Y (torch.Tensor): a batch of images, (N,C,H,W)
data_range (float or int, optional): value range of input images. (usually 1.0 or 255)
size_average (bool, optional): if size_average=True, ssim of all images will be averaged as a scalar
win_size: (int, optional): the size of gauss kernel
win_sigma: (float, optional): sigma of normal distribution
win (torch.Tensor, optional): 1-D gauss kernel. if None, a new kernel will be created according to win_size and win_sigma
K (list or tuple, optional): scalar constants (K1, K2). Try a larger K2 constant (e.g. 0.4) if you get a negative or NaN results.
nonnegative_ssim (bool, optional): force the ssim response to be nonnegative with relu
Returns:
torch.Tensor: ssim results
"""
if not X.shape == Y.shape:
raise ValueError(f"Input images should have the same dimensions, but got {X.shape} and {Y.shape}.")
for d in range(len(X.shape) - 1, 1, -1):
X = X.squeeze(dim=d)
Y = Y.squeeze(dim=d)
if len(X.shape) not in (4, 5):
raise ValueError(f"Input images should be 4-d or 5-d tensors, but got {X.shape}")
#if not X.type() == Y.type():
# raise ValueError(f"Input images should have the same dtype, but got {X.type()} and {Y.type()}.")
if win is not None: # set win_size
win_size = win.shape[-1]
if not (win_size % 2 == 1):
raise ValueError("Window size should be odd.")
if win is None:
win = _fspecial_gauss_1d(win_size, win_sigma)
win = win.repeat([X.shape[1]] + [1] * (len(X.shape) - 1))
ssim_per_channel, cs = _ssim(X, Y, data_range=data_range, win=win, size_average=False, K=K)
if nonnegative_ssim:
ssim_per_channel = torch.relu(ssim_per_channel)
if size_average:
return ssim_per_channel.mean()
else:
return ssim_per_channel.mean(1)
|
(X: torch.Tensor, Y: torch.Tensor, data_range: float = 255, size_average: bool = True, win_size: int = 11, win_sigma: float = 1.5, win: Optional[torch.Tensor] = None, K: Union[Tuple[float, float], List[float]] = (0.01, 0.03), nonnegative_ssim: bool = False) -> torch.Tensor
|
709,970
|
urllib3_future._async.connectionpool
|
AsyncHTTPConnectionPool
|
Task-safe async connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`http.client.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`http.client.HTTPConnection`.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.ProxyManager`
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.AsyncProxyManager`
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3._async.connection.AsyncHTTPConnection`,
:class:`urllib3._async.connection.AsyncHTTPSConnection` instances.
|
class AsyncHTTPConnectionPool(AsyncConnectionPool, AsyncRequestMethods):
"""
Task-safe async connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`http.client.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`http.client.HTTPConnection`.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.ProxyManager`
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.AsyncProxyManager`
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3._async.connection.AsyncHTTPConnection`,
:class:`urllib3._async.connection.AsyncHTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls: (
type[AsyncHTTPConnection] | type[AsyncHTTPSConnection]
) = AsyncHTTPConnection
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
_proxy_config: ProxyConfig | None = None,
resolver: AsyncResolverDescription
| list[AsyncResolverDescription]
| str
| list[str]
| AsyncBaseResolver
| None = None,
happy_eyeballs: bool | int = False,
**conn_kw: typing.Any,
):
AsyncConnectionPool.__init__(self, host, port)
AsyncRequestMethods.__init__(self, headers)
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.happy_eyeballs = happy_eyeballs
self._maxsize = maxsize
if self.QueueCls is not AsyncTrafficPolice and not issubclass(
self.QueueCls, AsyncTrafficPolice
):
warnings.warn(
"ConnectionPool QueueCls no longer support typical queue implementation "
"due to its inability to answer urllib3.future needs to handle concurrent streams "
"in a single connection. You may customize the implementation by passing a subclass of "
"urllib3.util._async.traffic_police.AsyncTrafficPolice if necessary.",
DeprecationWarning,
)
self.QueueCls = AsyncTrafficPolice
self.pool: AsyncTrafficPolice[AsyncHTTPConnection] | None = self.QueueCls(
maxsize
)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
self.proxy_config = _proxy_config
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
self.conn_kw["proxy"] = self.proxy
self.conn_kw["proxy_config"] = self.proxy_config
self._own_resolver = not isinstance(resolver, AsyncBaseResolver)
if resolver is None:
resolver = [AsyncResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [AsyncResolverDescription.from_url(resolver)]
elif isinstance(resolver, AsyncResolverDescription):
resolver = [resolver]
self._resolvers: list[AsyncResolverDescription] = []
if not isinstance(resolver, AsyncBaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
AsyncResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
AsyncResolverDescription.from_url(
"system://default?hosts=localhost"
)
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in conn_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = conn_kw["ca_cert_data"]
if "ca_cert_dir" in conn_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = conn_kw["ca_cert_dir"]
if "ca_certs" in conn_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = conn_kw["ca_certs"]
self._resolver: AsyncBaseResolver = (
AsyncManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, AsyncBaseResolver)
else resolver
)
self.conn_kw["resolver"] = self._resolver
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
async def _new_conn(
self, *, heb_timeout: Timeout | None = None
) -> AsyncHTTPConnection:
"""
Return a fresh :class:`HTTPConnection`.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = None
# this path is applicable if resolver yield at least two records.
# if A/AAAA records only (non-dual stack) -> spawn 4 (default) tasks with each record
# if A/AAAA records mixed (dual stack) -> spawn 4 (default) tasks following this pattern (IPv6, IPv4, IPv6, IPv4)
# if single record A or AAAA -> ignore
# Don't mind about HTTP/2 or 3 in here, we're unencrypted here!
if self.happy_eyeballs:
log.debug(
"Attempting Happy-Eyeball %s:%s",
self.host,
self.port or "443",
)
dt_pre_resolve = datetime.now(tz=timezone.utc)
ip_addresses = await self._resolver.getaddrinfo(
self.host,
self.port,
socket.AF_UNSPEC
if "socket_family" not in self.conn_kw
else self.conn_kw["socket_family"],
socket.SOCK_STREAM,
quic_upgrade_via_dns_rr=False,
)
delta_post_resolve = datetime.now(tz=timezone.utc) - dt_pre_resolve
if len(ip_addresses) > 1:
ipv6_addresses = []
ipv4_addresses = []
for ip_address in ip_addresses:
if ip_address[0] == socket.AF_INET6:
ipv6_addresses.append(ip_address)
else:
ipv4_addresses.append(ip_address)
if ipv4_addresses and ipv6_addresses:
log.debug(
"Happy-Eyeball Dual-Stack %s:%s",
self.host,
self.port or "443",
)
intermediary_addresses = []
for ipv6_entry, ipv4_entry in zip_longest(
ipv6_addresses, ipv4_addresses
):
if ipv6_entry:
intermediary_addresses.append(ipv6_entry)
if ipv4_entry:
intermediary_addresses.append(ipv4_entry)
ip_addresses = intermediary_addresses
else:
log.debug(
"Happy-Eyeball Single-Stack %s:%s",
self.host,
self.port or "443",
)
challengers = []
max_task = (
4 if isinstance(self.happy_eyeballs, bool) else self.happy_eyeballs
)
if heb_timeout is None:
heb_timeout = self.timeout
override_timeout = (
heb_timeout.connect_timeout
if heb_timeout.connect_timeout is not None
and isinstance(heb_timeout.connect_timeout, (float, int))
else None
)
for ip_address in ip_addresses[:max_task]:
conn_kw = self.conn_kw.copy()
target_solo_addr = (
f"[{ip_address[-1][0]}]"
if ip_address[0] == socket.AF_INET6
else ip_address[-1][0]
)
conn_kw["resolver"] = AsyncResolverDescription.from_url(
f"in-memory://default?hosts={self.host}:{target_solo_addr}"
).new()
conn_kw["socket_family"] = ip_address[0]
challengers.append(
self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
**conn_kw,
)
)
tasks = [
asyncio.create_task(challenger.connect())
for i, challenger in enumerate(challengers)
]
winner_task = None
remnant_tasks = []
pending: set[Task[None]] = set()
while True:
done, pending = await asyncio.wait(
tasks if not pending else pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=override_timeout,
)
while done:
finished_task = done.pop()
if finished_task.exception() is None:
winner_task = finished_task
if finished_task.exception():
continue
remnant_tasks.append(finished_task)
if winner_task:
break
for task in pending:
task.cancel()
if winner_task is None:
within_delay_msg: str = (
f" within {override_timeout}s" if override_timeout else ""
)
raise NewConnectionError(
challengers[0],
f"Failed to establish a new connection: No suitable address to connect to using Happy Eyeballs algorithm for {self.host}:{self.port}{within_delay_msg}",
) from tasks[0].exception()
conn = challengers[tasks.index(winner_task)]
# we have to replace the resolution latency metric
if conn.conn_info:
conn.conn_info.resolution_latency = delta_post_resolve
if len(remnant_tasks):
# we may have more than one conn ready, we shall then carefully close the others.
for disposable_remnant in remnant_tasks:
await challengers[tasks.index(disposable_remnant)].close()
else:
log.debug(
"Happy-Eyeball Ineligible %s:%s",
self.host,
self.port or "443",
)
if conn is None:
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
**self.conn_kw,
)
await self.pool.put(conn, immediately_unavailable=True)
return conn
async def _get_conn(
self, timeout: float | None = None, *, heb_timeout: Timeout | None = None
) -> AsyncHTTPConnection:
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed.")
try:
conn = await self.pool.get(
block=self.block, timeout=timeout, non_saturated_only=True
)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.") from None # Defensive:
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool is empty and a new connection can't be opened due to blocking mode.",
) from None
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
await conn.close()
return conn or await self._new_conn(heb_timeout=heb_timeout)
async def _put_conn(self, conn: AsyncHTTPConnection) -> None:
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
if self.pool is not None:
try:
await self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# Connection never got put back into the pool, close it.
if conn:
if conn.is_idle:
await conn.close()
if self.block:
# This should never happen if you got the conn from self._get_conn
raise FullPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from None
else:
# multiplexed connection may still have in-flight request not converted into response
# we shall not discard it until responses are consumed.
if conn and conn.is_idle is False:
log.warning(
"Connection pool is full, temporary increase, keeping connection, "
"multiplexed and not idle: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
if self.pool.maxsize is not None:
self.pool.maxsize += 1
return await self._put_conn(conn)
log.warning(
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
self.num_connections -= 1
return
# Connection never got put back into the pool, close it.
if conn:
await conn.close()
self.num_connections -= 1
async def _validate_conn(self, conn: AsyncHTTPConnection) -> None:
"""
Called right before a request is made, after the socket is created.
"""
if conn.is_closed:
await conn.connect()
async def _prepare_proxy(self, conn: AsyncHTTPConnection) -> None:
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:
"""Helper that always returns a :class:`urllib3.util.Timeout`"""
if timeout is _DEFAULT_TIMEOUT:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(
self,
err: BaseSSLError | OSError | SocketTimeout,
url: str,
timeout_value: _TYPE_TIMEOUT | None,
) -> None:
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={timeout_value})"
) from err
# See the above comment about EAGAIN in Python 3.
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={timeout_value})"
) from err
async def get_response(
self, *, promise: ResponsePromise | None = None
) -> AsyncHTTPResponse | None:
"""
Retrieve the first response available in the pool.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
try:
async with self.pool.borrow(
promise or ResponsePromise,
block=promise is not None,
not_idle_only=promise is None,
) as conn:
response = await conn.getresponse(
promise=promise, police_officer=self.pool
)
except UnavailableTraffic:
return None
if promise is not None and response is None:
raise ValueError(
"Invoked get_response with promise=... that no connection in pool recognize"
)
if response is None:
return None
from_promise = None
if promise:
from_promise = promise
else:
if (
response._fp
and hasattr(response._fp, "from_promise")
and response._fp.from_promise
):
from_promise = response._fp.from_promise
if from_promise is None:
raise ValueError(
"Internal: Unable to identify originating ResponsePromise from a LowLevelResponse"
)
self.pool.forget(from_promise)
# Retrieve request ctx
method = typing.cast(str, from_promise.get_parameter("method"))
redirect = typing.cast(bool, from_promise.get_parameter("redirect"))
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
url = typing.cast(str, from_promise.get_parameter("url"))
body = typing.cast(
typing.Optional[_TYPE_BODY], from_promise.get_parameter("body")
)
headers = typing.cast(HTTPHeaderDict, from_promise.get_parameter("headers"))
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Optional[_TYPE_TIMEOUT], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
await response.drain_conn()
raise
return response
await response.drain_conn()
await retries.async_sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
new_promise = await self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=True,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return await self.get_response(promise=new_promise if promise else None)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if retries.is_retry(method, response.status, has_retry_after):
url = typing.cast(str, from_promise.get_parameter("url"))
body = typing.cast(
typing.Optional[_TYPE_BODY], from_promise.get_parameter("body")
)
headers = typing.cast(HTTPHeaderDict, from_promise.get_parameter("headers"))
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Optional[_TYPE_TIMEOUT], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
await response.drain_conn()
raise
return response
await response.drain_conn()
await retries.async_sleep(response)
log.debug("Retry: %s", url)
new_promise = await self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=False,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return await self.get_response(promise=new_promise if promise else None)
return response
@typing.overload
async def _make_request(
self,
conn: AsyncHTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | None = ...,
timeout: _TYPE_TIMEOUT = ...,
chunked: bool = ...,
response_conn: AsyncHTTPConnection | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
enforce_content_length: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[True],
) -> ResponsePromise:
...
@typing.overload
async def _make_request(
self,
conn: AsyncHTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | None = ...,
timeout: _TYPE_TIMEOUT = ...,
chunked: bool = ...,
response_conn: AsyncHTTPConnection | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
enforce_content_length: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[False] = ...,
) -> AsyncHTTPResponse:
...
async def _make_request(
self,
conn: AsyncHTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | None = None,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
chunked: bool = False,
response_conn: AsyncHTTPConnection | None = None,
preload_content: bool = True,
decode_content: bool = True,
enforce_content_length: bool = True,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = None,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
]
| None = None,
multiplexed: Literal[False] | Literal[True] = False,
) -> AsyncHTTPResponse | ResponsePromise:
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param response_conn:
Set this to ``None`` if you will handle releasing the connection or
set the connection to have the response release it.
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
try:
# Trigger any extra validation we need to do.
try:
await self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# _validate_conn() starts the connection to an HTTPS proxy
# so we need to wrap errors with 'ProxyError' here too.
except (
OSError,
NewConnectionError,
TimeoutError,
BaseSSLError,
CertificateError,
SSLError,
) as e:
new_e: Exception = e
if isinstance(e, (BaseSSLError, CertificateError)):
new_e = SSLError(e)
# If the connection didn't successfully connect to it's proxy
# then there
if isinstance(
new_e, (OSError, NewConnectionError, TimeoutError, SSLError)
) and (conn and conn.proxy and not conn.has_connected_to_proxy):
new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
raise new_e
if on_post_connection is not None and conn.conn_info is not None:
# A second request does not redo handshake or DNS resolution.
if (
hasattr(conn, "_start_last_request")
and conn._start_last_request is not None
):
if conn.conn_info.tls_handshake_latency:
conn.conn_info.tls_handshake_latency = timedelta()
if conn.conn_info.established_latency:
conn.conn_info.established_latency = timedelta()
if conn.conn_info.resolution_latency:
conn.conn_info.resolution_latency = timedelta()
if conn.conn_info.request_sent_latency:
conn.conn_info.request_sent_latency = None
await on_post_connection(conn.conn_info)
if conn.is_multiplexed is False and multiplexed is True:
# overruling
multiplexed = False
try:
rp = await conn.request(
method,
url,
body=body,
headers=headers,
chunked=chunked,
preload_content=preload_content,
decode_content=decode_content,
enforce_content_length=enforce_content_length,
on_upload_body=on_upload_body,
)
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
# legitimately able to close the connection after sending a valid response.
# With this behaviour, the received response is still readable.
except BrokenPipeError as e:
rp = e.promise # type: ignore
except OSError as e:
rp = None
# MacOS/Linux
# EPROTOTYPE is needed on macOS
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE:
raise
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
if multiplexed:
if rp is None:
raise OSError
rp.set_parameter("read_timeout", read_timeout)
return rp
if not conn.is_closed:
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={read_timeout})"
)
conn.timeout = read_timeout
# Receive the response from the server
try:
response = await conn.getresponse(police_officer=self.pool)
except (BaseSSLError, OSError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# Set properties that are used by the pooling layer.
response.retries = retries
response._pool = self
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
# HTTP version
conn._http_vsn_str,
response.status,
response.length_remaining,
)
return response
async def close(self) -> None:
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
# Close all the HTTPConnections in the pool.
await old_pool.clear()
# Close allocated resolver if we own it. (aka. not shared)
if self._own_resolver and self._resolver.is_available():
await self._resolver.close()
def is_same_host(self, url: str) -> bool:
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, _, host, port, *_ = parse_url(url)
scheme = scheme or "http"
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
@typing.overload # type: ignore[override]
async def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | bool | int | None = ...,
redirect: bool = ...,
assert_same_host: bool = ...,
timeout: _TYPE_TIMEOUT = ...,
pool_timeout: int | None = ...,
release_conn: bool | None = ...,
chunked: bool = ...,
body_pos: _TYPE_BODY_POSITION | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[False] = ...,
**response_kw: typing.Any,
) -> AsyncHTTPResponse:
...
@typing.overload
async def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | bool | int | None = ...,
redirect: bool = ...,
assert_same_host: bool = ...,
timeout: _TYPE_TIMEOUT = ...,
pool_timeout: int | None = ...,
release_conn: bool | None = ...,
chunked: bool = ...,
body_pos: _TYPE_BODY_POSITION | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[True],
**response_kw: typing.Any,
) -> ResponsePromise:
...
async def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
redirect: bool = True,
assert_same_host: bool = True,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
pool_timeout: int | None = None,
release_conn: bool | None = None,
chunked: bool = False,
body_pos: _TYPE_BODY_POSITION | None = None,
preload_content: bool = True,
decode_content: bool = True,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = None,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
]
| None = None,
multiplexed: bool = False,
**response_kw: typing.Any,
) -> AsyncHTTPResponse | ResponsePromise:
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method
such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param bool preload_content:
If True, the response's body will be preloaded into memory.
:param bool decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of ``preload_content``
which defaults to ``True``.
:param bool chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param on_post_connection:
Callable to be invoked that will inform you of the connection specifications
for the request to be sent. See ``urllib3.ConnectionInfo`` class for more.
:param on_upload_body:
Callable that will be invoked upon body upload in order to be able to track
the progress. The values are expressed in bytes. It is possible that the total isn't
available, thus set to None. In order, arguments are:
(total_sent, total_to_be_sent, completed, any_error)
:param multiplexed:
Dispatch the request in a non-blocking way, this means that the
response will be retrieved in the future with the get_response()
method.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
parsed_url = parse_url(url)
destination_scheme = parsed_url.scheme
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = preload_content
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = to_str(_encode_target(url))
else:
url = to_str(parsed_url.url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
http_tunnel_required = connection_requires_http_tunnel(
self.proxy, self.proxy_config, destination_scheme
)
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
# have to copy the headers dict so we can safely change it without those
# changes being reflected in anyone else's copy.
if not http_tunnel_required:
headers = headers.copy() # type: ignore[attr-defined]
headers.update(self.proxy_headers) # type: ignore[union-attr]
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
await self.pool.wait_for_unallocated_or_available_slot()
conn = await self._get_conn(timeout=pool_timeout, heb_timeout=timeout_obj)
conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
# Is this a closed/new connection that requires CONNECT tunnelling?
if self.proxy is not None and http_tunnel_required and conn.is_closed:
try:
await self._prepare_proxy(conn)
except (BaseSSLError, OSError, SocketTimeout) as e:
self._raise_timeout(
err=e, url=self.proxy.url, timeout_value=conn.timeout
)
raise
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Make the request on the HTTPConnection object
response = await self._make_request( # type: ignore[call-overload,misc]
conn,
method,
url,
body=body,
headers=headers,
retries=retries,
timeout=timeout_obj,
chunked=chunked,
response_conn=response_conn,
preload_content=preload_content,
decode_content=decode_content,
enforce_content_length=True,
on_post_connection=on_post_connection,
on_upload_body=on_upload_body,
multiplexed=multiplexed,
)
# it was established a non-multiplexed connection. fallback to original behavior.
if not isinstance(response, ResponsePromise):
multiplexed = False
if multiplexed:
response.update_parameters(
{
"method": method,
"url": url,
"body": body,
"headers": headers,
"retries": retries,
"preload_content": preload_content,
"decode_content": decode_content,
"timeout": timeout,
"redirect": redirect,
"response_kw": response_kw,
"pool_timeout": pool_timeout,
"assert_same_host": assert_same_host,
"chunked": chunked,
"body_pos": body_pos,
}
)
release_this_conn = True if not conn.is_saturated else False
# Everything went great!
clean_exit = True
except EmptyPoolError:
# Didn't get a connection from the pool, no need to clean up
clean_exit = True
release_this_conn = False
raise
except (
TimeoutError,
OSError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
ProxyError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
new_e: Exception = e
if isinstance(e, (BaseSSLError, CertificateError)):
new_e = SSLError(e)
if isinstance(
new_e,
(
OSError,
NewConnectionError,
TimeoutError,
SSLError,
),
) and (conn and conn.proxy and not conn.has_connected_to_proxy):
new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
elif isinstance(new_e, OSError):
new_e = ProtocolError("Connection aborted.", new_e)
retries = retries.increment(
method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
)
await retries.async_sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
if conn:
await conn.close()
conn = None
release_this_conn = True
elif conn and conn.is_multiplexed is True:
# multiplexing allows us to issue more requests.
release_this_conn = True
if release_this_conn is True and conn is not None:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
await self._put_conn(conn)
if (
clean_exit
and isinstance(response, ResponsePromise)
and self.pool is not None
):
self.pool.memorize(response, conn)
elif release_this_conn is True and self.pool is not None:
await self.pool.kill_cursor()
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return await self.urlopen( # type: ignore[no-any-return,call-overload,misc]
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=multiplexed,
**response_kw,
)
if multiplexed:
assert isinstance(response, ResponsePromise)
return response # actually a response promise!
assert isinstance(response, AsyncHTTPResponse)
if redirect and response.get_redirect_location():
# Handle redirect?
redirect_location = response.get_redirect_location()
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
await response.drain_conn()
raise
return response
await response.drain_conn()
await retries.async_sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return await self.urlopen( # type: ignore[call-overload,no-any-return,misc]
method,
redirect_location,
body=body,
headers=headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=False,
**response_kw,
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
await response.drain_conn()
raise
return response
await response.drain_conn()
await retries.async_sleep(response)
log.debug("Retry: %s", url)
return await self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=False,
**response_kw,
)
return response
|
(host: 'str', port: 'int | None' = None, timeout: '_TYPE_TIMEOUT | None' = <_TYPE_DEFAULT.token: -1>, maxsize: 'int' = 1, block: 'bool' = False, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | bool | int | None' = None, _proxy: 'Url | None' = None, _proxy_headers: 'typing.Mapping[str, str] | None' = None, _proxy_config: 'ProxyConfig | None' = None, resolver: 'AsyncResolverDescription | list[AsyncResolverDescription] | str | list[str] | AsyncBaseResolver | None' = None, happy_eyeballs: 'bool | int' = False, **conn_kw: 'typing.Any')
|
709,973
|
urllib3_future._async.connectionpool
|
__init__
| null |
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
_proxy_config: ProxyConfig | None = None,
resolver: AsyncResolverDescription
| list[AsyncResolverDescription]
| str
| list[str]
| AsyncBaseResolver
| None = None,
happy_eyeballs: bool | int = False,
**conn_kw: typing.Any,
):
AsyncConnectionPool.__init__(self, host, port)
AsyncRequestMethods.__init__(self, headers)
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.happy_eyeballs = happy_eyeballs
self._maxsize = maxsize
if self.QueueCls is not AsyncTrafficPolice and not issubclass(
self.QueueCls, AsyncTrafficPolice
):
warnings.warn(
"ConnectionPool QueueCls no longer support typical queue implementation "
"due to its inability to answer urllib3.future needs to handle concurrent streams "
"in a single connection. You may customize the implementation by passing a subclass of "
"urllib3.util._async.traffic_police.AsyncTrafficPolice if necessary.",
DeprecationWarning,
)
self.QueueCls = AsyncTrafficPolice
self.pool: AsyncTrafficPolice[AsyncHTTPConnection] | None = self.QueueCls(
maxsize
)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
self.proxy_config = _proxy_config
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
self.conn_kw["proxy"] = self.proxy
self.conn_kw["proxy_config"] = self.proxy_config
self._own_resolver = not isinstance(resolver, AsyncBaseResolver)
if resolver is None:
resolver = [AsyncResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [AsyncResolverDescription.from_url(resolver)]
elif isinstance(resolver, AsyncResolverDescription):
resolver = [resolver]
self._resolvers: list[AsyncResolverDescription] = []
if not isinstance(resolver, AsyncBaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
AsyncResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
AsyncResolverDescription.from_url(
"system://default?hosts=localhost"
)
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in conn_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = conn_kw["ca_cert_data"]
if "ca_cert_dir" in conn_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = conn_kw["ca_cert_dir"]
if "ca_certs" in conn_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = conn_kw["ca_certs"]
self._resolver: AsyncBaseResolver = (
AsyncManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, AsyncBaseResolver)
else resolver
)
self.conn_kw["resolver"] = self._resolver
|
(self, host: str, port: Optional[int] = None, timeout: Union[float, urllib3_future.util.timeout._TYPE_DEFAULT, urllib3_future.util.timeout.Timeout, NoneType] = <_TYPE_DEFAULT.token: -1>, maxsize: int = 1, block: bool = False, headers: Optional[Mapping[str, str]] = None, retries: Union[urllib3_future.util.retry.Retry, bool, int, NoneType] = None, _proxy: Optional[urllib3_future.util.url.Url] = None, _proxy_headers: Optional[Mapping[str, str]] = None, _proxy_config: Optional[urllib3_future._typing.ProxyConfig] = None, resolver: Union[urllib3_future.contrib.resolver._async.factories.AsyncResolverDescription, list[urllib3_future.contrib.resolver._async.factories.AsyncResolverDescription], str, list[str], urllib3_future.contrib.resolver._async.protocols.AsyncBaseResolver, NoneType] = None, happy_eyeballs: bool | int = False, **conn_kw: Any)
|
709,975
|
urllib3_future._async.connectionpool
|
_get_conn
|
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
|
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
|
(self, timeout: Optional[float] = None, *, heb_timeout: Optional[urllib3_future.util.timeout.Timeout] = None) -> urllib3_future._async.connection.AsyncHTTPConnection
|
709,977
|
urllib3_future._async.connectionpool
|
_make_request
|
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param response_conn:
Set this to ``None`` if you will handle releasing the connection or
set the connection to have the response release it.
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
|
@typing.overload
async def _make_request(
self,
conn: AsyncHTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | None = ...,
timeout: _TYPE_TIMEOUT = ...,
chunked: bool = ...,
response_conn: AsyncHTTPConnection | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
enforce_content_length: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[False] = ...,
) -> AsyncHTTPResponse:
...
|
(self, conn: 'AsyncHTTPConnection', method: 'str', url: 'str', body: '_TYPE_BODY | None' = None, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | None' = None, timeout: '_TYPE_TIMEOUT' = <_TYPE_DEFAULT.token: -1>, chunked: 'bool' = False, response_conn: 'AsyncHTTPConnection | None' = None, preload_content: 'bool' = True, decode_content: 'bool' = True, enforce_content_length: 'bool' = True, on_post_connection: 'typing.Callable[[ConnectionInfo], typing.Awaitable[None]] | None' = None, on_upload_body: 'typing.Callable[[int, int | None, bool, bool], typing.Awaitable[None]] | None' = None, multiplexed: 'Literal[False] | Literal[True]' = False) -> 'AsyncHTTPResponse | ResponsePromise'
|
709,978
|
urllib3_future._async.connectionpool
|
_new_conn
|
Return a fresh :class:`HTTPConnection`.
|
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
|
(self, *, heb_timeout: Optional[urllib3_future.util.timeout.Timeout] = None) -> urllib3_future._async.connection.AsyncHTTPConnection
|
709,979
|
urllib3_future._async.connectionpool
|
_prepare_proxy
| null |
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
|
(self, conn: urllib3_future._async.connection.AsyncHTTPConnection) -> NoneType
|
709,980
|
urllib3_future._async.connectionpool
|
_put_conn
|
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
|
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
|
(self, conn: urllib3_future._async.connection.AsyncHTTPConnection) -> NoneType
|
709,982
|
urllib3_future._async.connectionpool
|
_validate_conn
|
Called right before a request is made, after the socket is created.
|
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
|
(self, conn: urllib3_future._async.connection.AsyncHTTPConnection) -> NoneType
|
709,983
|
urllib3_future._async.connectionpool
|
close
|
Close all pooled connections and disable the pool.
|
@typing.overload
async def _make_request(
self,
conn: AsyncHTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | None = ...,
timeout: _TYPE_TIMEOUT = ...,
chunked: bool = ...,
response_conn: AsyncHTTPConnection | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
enforce_content_length: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[False] = ...,
) -> AsyncHTTPResponse:
...
|
(self) -> NoneType
|
709,984
|
urllib3_future._async.connectionpool
|
get_response
|
Retrieve the first response available in the pool.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
|
def _raise_timeout(
self,
err: BaseSSLError | OSError | SocketTimeout,
url: str,
timeout_value: _TYPE_TIMEOUT | None,
) -> None:
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={timeout_value})"
) from err
# See the above comment about EAGAIN in Python 3.
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={timeout_value})"
) from err
|
(self, *, promise: Optional[urllib3_future.backend._base.ResponsePromise] = None) -> urllib3_future._async.response.AsyncHTTPResponse | None
|
709,986
|
urllib3_future._request_methods
|
request
|
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
|
@typing.overload
async def request(
self,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
fields: _TYPE_FIELDS | None = ...,
headers: typing.Mapping[str, str] | None = ...,
json: typing.Any | None = ...,
*,
multiplexed: Literal[True],
**urlopen_kw: typing.Any,
) -> ResponsePromise:
...
|
(self, method: 'str', url: 'str', body: '_TYPE_BODY | None' = None, fields: '_TYPE_FIELDS | None' = None, headers: 'typing.Mapping[str, str] | None' = None, json: 'typing.Any | None' = None, **urlopen_kw: 'typing.Any') -> 'AsyncHTTPResponse | ResponsePromise'
|
709,987
|
urllib3_future._request_methods
|
request_encode_body
|
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:func:`urllib3.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:func:`urllib.parse.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
|
@typing.overload
async def request_encode_body(
self,
method: str,
url: str,
fields: _TYPE_FIELDS | None = ...,
headers: typing.Mapping[str, str] | None = ...,
encode_multipart: bool = ...,
multipart_boundary: str | None = ...,
*,
multiplexed: Literal[True],
**urlopen_kw: typing.Any,
) -> ResponsePromise:
...
|
(self, method: 'str', url: 'str', fields: '_TYPE_FIELDS | None' = None, headers: 'typing.Mapping[str, str] | None' = None, encode_multipart: 'bool' = True, multipart_boundary: 'str | None' = None, **urlopen_kw: 'typing.Any') -> 'AsyncHTTPResponse | ResponsePromise'
|
709,988
|
urllib3_future._request_methods
|
request_encode_url
|
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
|
@typing.overload
async def request_encode_url(
self,
method: str,
url: str,
fields: _TYPE_ENCODE_URL_FIELDS | None = ...,
headers: typing.Mapping[str, str] | None = ...,
*,
multiplexed: Literal[True],
**urlopen_kw: typing.Any,
) -> ResponsePromise:
...
|
(self, method: 'str', url: 'str', fields: '_TYPE_ENCODE_URL_FIELDS | None' = None, headers: 'typing.Mapping[str, str] | None' = None, **urlopen_kw: 'typing.Any') -> 'AsyncHTTPResponse | ResponsePromise'
|
709,989
|
urllib3_future._async.connectionpool
|
urlopen
|
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method
such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param bool preload_content:
If True, the response's body will be preloaded into memory.
:param bool decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of ``preload_content``
which defaults to ``True``.
:param bool chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param on_post_connection:
Callable to be invoked that will inform you of the connection specifications
for the request to be sent. See ``urllib3.ConnectionInfo`` class for more.
:param on_upload_body:
Callable that will be invoked upon body upload in order to be able to track
the progress. The values are expressed in bytes. It is possible that the total isn't
available, thus set to None. In order, arguments are:
(total_sent, total_to_be_sent, completed, any_error)
:param multiplexed:
Dispatch the request in a non-blocking way, this means that the
response will be retrieved in the future with the get_response()
method.
|
@typing.overload
async def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | bool | int | None = ...,
redirect: bool = ...,
assert_same_host: bool = ...,
timeout: _TYPE_TIMEOUT = ...,
pool_timeout: int | None = ...,
release_conn: bool | None = ...,
chunked: bool = ...,
body_pos: _TYPE_BODY_POSITION | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], typing.Awaitable[None]]
| None = ...,
on_upload_body: typing.Callable[
[int, int | None, bool, bool], typing.Awaitable[None]
] = ...,
*,
multiplexed: Literal[True],
**response_kw: typing.Any,
) -> ResponsePromise:
...
|
(self, method: str, url: str, body: Union[bytes, IO[Any], Iterable[bytes], Iterable[str], str, urllib3_future.backend._base.LowLevelResponse, urllib3_future.backend._async._base.AsyncLowLevelResponse, NoneType] = None, headers: Optional[Mapping[str, str]] = None, retries: Union[urllib3_future.util.retry.Retry, bool, int, NoneType] = None, redirect: bool = True, assert_same_host: bool = True, timeout: Union[float, urllib3_future.util.timeout._TYPE_DEFAULT, urllib3_future.util.timeout.Timeout, NoneType] = <_TYPE_DEFAULT.token: -1>, pool_timeout: Optional[int] = None, release_conn: Optional[bool] = None, chunked: bool = False, body_pos: Union[int, urllib3_future.util.request._TYPE_FAILEDTELL, NoneType] = None, preload_content: bool = True, decode_content: bool = True, on_post_connection: Optional[Callable[[urllib3_future.backend._base.ConnectionInfo], Awaitable[NoneType]]] = None, on_upload_body: Optional[Callable[[int, int | None, bool, bool], Awaitable[NoneType]]] = None, multiplexed: bool = False, **response_kw: Any) -> urllib3_future._async.response.AsyncHTTPResponse | urllib3_future.backend._base.ResponsePromise
|
709,990
|
urllib3_future._async.response
|
AsyncHTTPResponse
| null |
class AsyncHTTPResponse(HTTPResponse):
def __init__(
self,
body: _TYPE_BODY = "",
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int = 0,
version: int = 0,
reason: str | None = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: AsyncLowLevelResponse | None = None,
pool: AsyncHTTPConnectionPool | None = None,
connection: AsyncHTTPConnection | None = None,
msg: Message | None = None,
retries: Retry | None = None,
enforce_content_length: bool = True,
request_method: str | None = None,
request_url: str | None = None,
auto_close: bool = True,
police_officer: AsyncTrafficPolice[AsyncHTTPConnection] | None = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
try:
self.status = int(status)
except ValueError:
self.status = 0 # merely for tests, was supported due to broken httplib.
self.version = version
self.reason = reason
self.decode_content = decode_content
self._has_decoded_content = False
self._request_url: str | None = request_url
self._retries: Retry | None = None
self.retries = retries
self.chunked = False
if "transfer-encoding" in self.headers:
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: ContentDecoder | None = None
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: AsyncLowLevelResponse | typing.IO[typing.Any] | None = None # type: ignore[assignment]
self._original_response = original_response # type: ignore[assignment]
self._fp_bytes_read = 0
if msg is not None:
warnings.warn(
"Passing msg=.. is deprecated and no-op in urllib3.future and is scheduled to be removed in a future major.",
DeprecationWarning,
stacklevel=2,
)
self.msg = msg
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool: AsyncHTTPConnectionPool = pool # type: ignore[assignment]
self._connection: AsyncHTTPConnection = connection # type: ignore[assignment]
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: int | None = None
# Determine length of response
self.length_remaining: int | None = self._init_length(request_method)
# Used to return the correct amount of bytes for partial read()s
self._decoded_buffer = BytesQueueBuffer()
self._police_officer: AsyncTrafficPolice[AsyncHTTPConnection] | None = police_officer # type: ignore[assignment]
if self._police_officer is not None:
self._police_officer.memorize(self, self._connection)
async def readinto(self, b: bytearray) -> int: # type: ignore[override]
temp = await self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
@asynccontextmanager
async def _error_catcher(self) -> typing.AsyncGenerator[None, None]: # type: ignore[override]
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except OSError as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
await self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
async def drain_conn(self) -> None: # type: ignore[override]
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
await self.read()
except (HTTPError, OSError, BaseSSLError):
pass
async def json(self) -> typing.Any:
"""
Parses the body of the HTTP response as JSON.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.
This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.
Read more :ref:`here <json>`.
"""
data = (await self.data).decode("utf-8")
return _json.loads(data)
@property
async def data(self) -> bytes: # type: ignore[override]
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return await self.read(cache_content=True)
return None # type: ignore[return-value]
async def _fp_read(self, amt: int | None = None) -> bytes: # type: ignore[override]
"""
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* 3.8 <= CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
"""
assert self._fp
c_int_max = 2**31 - 1
if (
(amt and amt > c_int_max)
or (self.length_remaining and self.length_remaining > c_int_max)
) and sys.version_info < (3, 10):
buffer = io.BytesIO()
# Besides `max_chunk_amt` being a maximum chunk size, it
# affects memory overhead of reading a response by this
# method in CPython.
# `c_int_max` equal to 2 GiB - 1 byte is the actual maximum
# chunk size that does not lead to an overflow error, but
# 256 MiB is a compromise.
max_chunk_amt = 2**28
while amt is None or amt != 0:
if amt is not None:
chunk_amt = min(amt, max_chunk_amt)
amt -= chunk_amt
else:
chunk_amt = max_chunk_amt
try:
if isinstance(self._fp, AsyncLowLevelResponse):
data = await self._fp.read(chunk_amt)
else:
data = self._fp.read(chunk_amt) # type: ignore[attr-defined]
except ValueError: # Defensive: overly protective
break # Defensive: can also be an indicator that read ended, should not happen.
if not data:
break
buffer.write(data)
del data # to reduce peak memory usage by `max_chunk_amt`.
return buffer.getvalue()
else:
# StringIO doesn't like amt=None
if isinstance(self._fp, AsyncLowLevelResponse):
return await self._fp.read(amt)
return self._fp.read(amt) if amt is not None else self._fp.read() # type: ignore[no-any-return]
async def _raw_read( # type: ignore[override]
self,
amt: int | None = None,
) -> bytes:
"""
Reads `amt` of bytes from the socket.
"""
if self._fp is None:
return None # type: ignore[return-value]
fp_closed = getattr(self._fp, "closed", False)
async with self._error_catcher():
data = (await self._fp_read(amt)) if not fp_closed else b""
if amt is not None and amt != 0 and not data:
# Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
return data
async def read( # type: ignore[override]
self,
amt: int | None = None,
decode_content: bool | None = None,
cache_content: bool = False,
) -> bytes:
try:
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if amt is not None:
cache_content = False
if amt < 0 and len(self._decoded_buffer):
return self._decoded_buffer.get(len(self._decoded_buffer))
if 0 < amt <= len(self._decoded_buffer):
return self._decoded_buffer.get(amt)
if self._police_officer is not None and not self._police_officer.busy:
async with self._police_officer.borrow(self):
data = await self._raw_read(amt)
else:
data = await self._raw_read(amt)
if amt and amt < 0:
amt = len(data)
flush_decoder = False
if amt is None:
flush_decoder = True
elif amt != 0 and not data:
flush_decoder = True
if not data and len(self._decoded_buffer) == 0:
return data
if amt is None:
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
else:
# do not waste memory on buffer when not decoding
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
while len(self._decoded_buffer) < amt and data:
# TODO make sure to initially read enough data to get past the headers
# For example, the GZ file header takes 10 bytes, we don't want to read
# it one byte at a time
if (
self._police_officer is not None
and not self._police_officer.busy
):
async with self._police_officer.borrow(self):
data = await self._raw_read(amt)
else:
data = await self._raw_read(amt)
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
data = self._decoded_buffer.get(amt)
return data
finally:
if (
self._fp
and hasattr(self._fp, "_eot")
and self._fp._eot
and self._police_officer is not None
):
self._police_officer.forget(self)
if self._police_officer.busy:
self._police_officer.release()
self._police_officer = None
async def stream( # type: ignore[override]
self, amt: int | None = 2**16, decode_content: bool | None = None
) -> typing.AsyncGenerator[bytes, None]:
while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0:
data = await self.read(amt=amt, decode_content=decode_content)
if data:
yield data
async def close(self) -> None: # type: ignore[override]
if not self.closed and self._fp:
self._fp.close()
if self._connection:
await self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
async def __aiter__(self) -> typing.AsyncIterator[bytes]:
buffer: list[bytes] = []
async for chunk in self.stream(-1, decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
def __del__(self) -> None:
if not self.closed:
if not self.closed and self._fp:
self._fp.close()
if not self.auto_close:
io.IOBase.close(self)
|
(body: '_TYPE_BODY' = '', headers: 'typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None' = None, status: 'int' = 0, version: 'int' = 0, reason: 'str | None' = None, preload_content: 'bool' = True, decode_content: 'bool' = True, original_response: 'AsyncLowLevelResponse | None' = None, pool: 'AsyncHTTPConnectionPool | None' = None, connection: 'AsyncHTTPConnection | None' = None, msg: 'Message | None' = None, retries: 'Retry | None' = None, enforce_content_length: 'bool' = True, request_method: 'str | None' = None, request_url: 'str | None' = None, auto_close: 'bool' = True, police_officer: 'AsyncTrafficPolice[AsyncHTTPConnection] | None' = None) -> 'None'
|
709,991
|
urllib3_future._async.response
|
__aiter__
| null |
@property
async def data(self) -> bytes: # type: ignore[override]
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return await self.read(cache_content=True)
return None # type: ignore[return-value]
|
(self) -> AsyncIterator[bytes]
|
709,992
|
urllib3_future._async.response
|
__del__
| null |
def __del__(self) -> None:
if not self.closed:
if not self.closed and self._fp:
self._fp.close()
if not self.auto_close:
io.IOBase.close(self)
|
(self) -> NoneType
|
709,993
|
urllib3_future._async.response
|
__init__
| null |
def __init__(
self,
body: _TYPE_BODY = "",
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int = 0,
version: int = 0,
reason: str | None = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: AsyncLowLevelResponse | None = None,
pool: AsyncHTTPConnectionPool | None = None,
connection: AsyncHTTPConnection | None = None,
msg: Message | None = None,
retries: Retry | None = None,
enforce_content_length: bool = True,
request_method: str | None = None,
request_url: str | None = None,
auto_close: bool = True,
police_officer: AsyncTrafficPolice[AsyncHTTPConnection] | None = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
try:
self.status = int(status)
except ValueError:
self.status = 0 # merely for tests, was supported due to broken httplib.
self.version = version
self.reason = reason
self.decode_content = decode_content
self._has_decoded_content = False
self._request_url: str | None = request_url
self._retries: Retry | None = None
self.retries = retries
self.chunked = False
if "transfer-encoding" in self.headers:
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: ContentDecoder | None = None
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: AsyncLowLevelResponse | typing.IO[typing.Any] | None = None # type: ignore[assignment]
self._original_response = original_response # type: ignore[assignment]
self._fp_bytes_read = 0
if msg is not None:
warnings.warn(
"Passing msg=.. is deprecated and no-op in urllib3.future and is scheduled to be removed in a future major.",
DeprecationWarning,
stacklevel=2,
)
self.msg = msg
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool: AsyncHTTPConnectionPool = pool # type: ignore[assignment]
self._connection: AsyncHTTPConnection = connection # type: ignore[assignment]
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: int | None = None
# Determine length of response
self.length_remaining: int | None = self._init_length(request_method)
# Used to return the correct amount of bytes for partial read()s
self._decoded_buffer = BytesQueueBuffer()
self._police_officer: AsyncTrafficPolice[AsyncHTTPConnection] | None = police_officer # type: ignore[assignment]
if self._police_officer is not None:
self._police_officer.memorize(self, self._connection)
|
(self, body: '_TYPE_BODY' = '', headers: 'typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None' = None, status: 'int' = 0, version: 'int' = 0, reason: 'str | None' = None, preload_content: 'bool' = True, decode_content: 'bool' = True, original_response: 'AsyncLowLevelResponse | None' = None, pool: 'AsyncHTTPConnectionPool | None' = None, connection: 'AsyncHTTPConnection | None' = None, msg: 'Message | None' = None, retries: 'Retry | None' = None, enforce_content_length: 'bool' = True, request_method: 'str | None' = None, request_url: 'str | None' = None, auto_close: 'bool' = True, police_officer: 'AsyncTrafficPolice[AsyncHTTPConnection] | None' = None) -> 'None'
|
709,994
|
urllib3_future.response
|
__iter__
| null |
def __iter__(self) -> typing.Iterator[bytes]:
buffer: list[bytes] = []
for chunk in self.stream(-1, decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
|
(self) -> Iterator[bytes]
|
709,996
|
urllib3_future._async.response
|
_error_catcher
|
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
|
@property
async def data(self) -> bytes: # type: ignore[override]
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return await self.read(cache_content=True)
return None # type: ignore[return-value]
|
(self) -> AsyncGenerator[NoneType, NoneType]
|
709,998
|
urllib3_future._async.response
|
_fp_read
|
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* 3.8 <= CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
|
@property
async def data(self) -> bytes: # type: ignore[override]
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return await self.read(cache_content=True)
return None # type: ignore[return-value]
|
(self, amt: Optional[int] = None) -> bytes
|
709,999
|
urllib3_future.response
|
_init_decoder
|
Set-up the _decoder attribute if necessary.
|
def _init_decoder(self) -> None:
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
if "content-encoding" not in self.headers:
return
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
|
(self) -> NoneType
|
710,000
|
urllib3_future.response
|
_init_length
|
Set initial length value for Response content if available.
|
def _init_length(self, request_method: str | None) -> int | None:
"""
Set initial length value for Response content if available.
"""
length: int | None
content_length: str | None = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
if "," in content_length:
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
else:
length = int(content_length)
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Check for responses that shouldn't include a body
if (
self.status in (204, 304)
or 100 <= self.status < 200
or request_method == "HEAD"
):
length = 0
return length
|
(self, request_method: str | None) -> int | None
|
710,001
|
urllib3_future._async.response
|
_raw_read
|
Reads `amt` of bytes from the socket.
|
@property
async def data(self) -> bytes: # type: ignore[override]
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return await self.read(cache_content=True)
return None # type: ignore[return-value]
|
(self, amt: Optional[int] = None) -> bytes
|
710,003
|
urllib3_future._async.response
|
drain_conn
|
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
@asynccontextmanager
async def _error_catcher(self) -> typing.AsyncGenerator[None, None]: # type: ignore[override]
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except OSError as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
await self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
|
(self) -> NoneType
|
710,005
|
urllib3_future.response
|
flush
| null |
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush() # type: ignore[return-value]
|
(self) -> NoneType
|
710,009
|
urllib3_future.response
|
is_from_promise
|
Determine if this response came from given promise.
|
def is_from_promise(self, promise: ResponsePromise) -> bool:
"""
Determine if this response came from given promise.
"""
return (
self._fp is not None
and hasattr(self._fp, "from_promise")
and self._fp.from_promise == promise
)
|
(self, promise: urllib3_future.backend._base.ResponsePromise) -> bool
|
710,011
|
urllib3_future._async.response
|
json
|
Parses the body of the HTTP response as JSON.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.
This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.
Read more :ref:`here <json>`.
|
@asynccontextmanager
async def _error_catcher(self) -> typing.AsyncGenerator[None, None]: # type: ignore[override]
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except OSError as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
await self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
|
(self) -> Any
|
710,015
|
urllib3_future.response
|
release_conn
| null |
def release_conn(self) -> None:
if not self._connection:
return None
if self._police_officer is not None:
if self._police_officer.busy:
self._police_officer.release()
self._connection = None
|
(self) -> NoneType
|
710,017
|
urllib3_future.response
|
supports_chunked_reads
|
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
|
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return False
|
(self) -> bool
|
710,019
|
urllib3_future._async.connectionpool
|
AsyncHTTPSConnectionPool
|
Same as :class:`.HTTPConnectionPool`, but HTTPS.
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
|
class AsyncHTTPSConnectionPool(AsyncHTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls: type[AsyncHTTPSConnection] = AsyncHTTPSConnection
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
key_file: str | None = None,
cert_file: str | None = None,
cert_reqs: int | str | None = None,
key_password: str | None = None,
ca_certs: str | None = None,
ssl_version: int | str | None = None,
ssl_minimum_version: ssl.TLSVersion | None = None,
ssl_maximum_version: ssl.TLSVersion | None = None,
assert_hostname: str | Literal[False] | None = None,
assert_fingerprint: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
cert_data: str | bytes | None = None,
key_data: str | bytes | None = None,
**conn_kw: typing.Any,
) -> None:
super().__init__(
host,
port,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ca_cert_data = ca_cert_data
self.cert_data = cert_data
self.key_data = key_data
self.ssl_version = ssl_version
self.ssl_minimum_version = ssl_minimum_version
self.ssl_maximum_version = ssl_maximum_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
async def _prepare_proxy(self, conn: AsyncHTTPSConnection) -> None: # type: ignore[override]
"""Establishes a tunnel connection through HTTP CONNECT."""
if self.proxy and self.proxy.scheme == "https":
tunnel_scheme = "https"
else:
tunnel_scheme = "http"
conn.set_tunnel(
scheme=tunnel_scheme,
host=self._tunnel_host,
port=self.port,
headers=self.proxy_headers,
)
await conn.connect()
async def _new_conn(
self, *, heb_timeout: Timeout | None = None
) -> AsyncHTTPSConnection:
"""
Return a fresh :class:`urllib3.connection.HTTPConnection`.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap]
raise ImportError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host: str = self.host
actual_port = self.port
if self.proxy is not None and self.proxy.host is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = None
if self.happy_eyeballs:
# Taking this path forward will establish a connection (aka. connect) prior to what usually
# take place. This is the only place where it is the most convenient.
log.debug(
"Attempting Happy-Eyeball %s:%s",
self.host,
self.port or "443",
)
# we have to get this metric here, as the underlying Connection object
# will have the DNS resolution set to 0s!
dt_pre_resolve = datetime.now(tz=timezone.utc)
ip_addresses = await self._resolver.getaddrinfo(
actual_host,
actual_port,
socket.AF_UNSPEC
if "socket_family" not in self.conn_kw
else self.conn_kw["socket_family"],
socket.SOCK_STREAM,
quic_upgrade_via_dns_rr=True, # we don't know if H3 is actually supported by the underlying Connection,
# we don't care, it will sort it out later.
)
delta_post_resolve = datetime.now(tz=timezone.utc) - dt_pre_resolve
target_pqc = {}
# does the user provided us with a quic capability cache? if so, use it!
if (
"preemptive_quic_cache" in self.conn_kw
and self.conn_kw["preemptive_quic_cache"] is not None
):
target_pqc = self.conn_kw["preemptive_quic_cache"]
# if the resolver hinted us toward using a DGRAM, we inject it into the quic capability cache.
if any(_[1] == socket.SOCK_DGRAM for _ in ip_addresses):
if (self.host, self.port) not in target_pqc:
target_pqc[(self.host, self.port)] = (self.host, self.port)
# HEB algorithm only make sense if the name resolution yield more than 1 record.
if len(ip_addresses) > 1:
ipv6_addresses = []
ipv4_addresses = []
for ip_address in ip_addresses:
if ip_address[0] == socket.AF_INET6:
ipv6_addresses.append(ip_address)
else:
ipv4_addresses.append(ip_address)
# if we have BOTH IPv4 and IPv6 entries, we want to reorder the records
# so that we can be as fair as possible when spawning the tasks.
if ipv4_addresses and ipv6_addresses:
log.debug(
"Happy-Eyeball Dual-Stack %s:%s",
self.host,
self.port or "443",
)
intermediary_addresses = []
for ipv6_entry, ipv4_entry in zip_longest(
ipv6_addresses, ipv4_addresses
):
if ipv6_entry:
intermediary_addresses.append(ipv6_entry)
if ipv4_entry:
intermediary_addresses.append(ipv4_entry)
ip_addresses = intermediary_addresses
else:
log.debug(
"Happy-Eyeball Single-Stack %s:%s",
self.host,
self.port or "443",
)
challengers = []
max_task = (
4 if isinstance(self.happy_eyeballs, bool) else self.happy_eyeballs
)
if heb_timeout is None:
heb_timeout = self.timeout
override_timeout = (
heb_timeout.connect_timeout
if heb_timeout.connect_timeout is not None
and isinstance(heb_timeout.connect_timeout, (float, int))
else None
)
for ip_address in ip_addresses[:max_task]:
conn_kw = self.conn_kw.copy()
target_solo_addr = (
f"[{ip_address[-1][0]}]"
if ip_address[0] == socket.AF_INET6
else ip_address[-1][0]
)
conn_kw["resolver"] = AsyncResolverDescription.from_url(
f"in-memory://default?hosts={self.host}:{target_solo_addr}"
).new()
conn_kw["socket_family"] = ip_address[0]
conn_kw["preemptive_quic_cache"] = target_pqc
challengers.append(
self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=override_timeout,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
ssl_version=self.ssl_version,
ssl_minimum_version=self.ssl_minimum_version,
ssl_maximum_version=self.ssl_maximum_version,
cert_data=self.cert_data,
key_data=self.key_data,
**conn_kw,
)
)
tasks = [
asyncio.create_task(challenger.connect())
for i, challenger in enumerate(challengers)
]
winner_task = None
remnant_tasks = []
pending: set[Task[None]] = set()
# here we'll need at least one task that ended successfully OR every task terminated/completed.
while True:
done, pending = await asyncio.wait(
tasks if not pending else pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=override_timeout,
)
while done:
finished_task = done.pop()
if finished_task.exception() is None:
winner_task = finished_task
if finished_task.exception():
continue
remnant_tasks.append(finished_task)
if winner_task or not pending:
break
# we need to kill the remaining tasks.
for task in pending:
task.cancel()
if winner_task is None:
within_delay_msg: str = (
f" within {override_timeout}s" if override_timeout else ""
)
raise NewConnectionError(
challengers[
0
], # that's a bummer, but it wasn't planned for this algorithm.
f"Failed to establish a new connection: No suitable address to connect to using Happy Eyeballs algorithm for {actual_host}:{actual_port}{within_delay_msg}",
) from tasks[0].exception()
conn = challengers[tasks.index(winner_task)]
# we have to replace the resolution latency metric
if conn.conn_info:
conn.conn_info.resolution_latency = delta_post_resolve
if len(remnant_tasks):
# we may have more than one conn ready, we shall then carefully close the others.
for disposable_remnant in remnant_tasks:
await challengers[tasks.index(disposable_remnant)].close()
else:
log.debug(
"Happy-Eyeball Ineligible %s:%s",
self.host,
self.port or "443",
)
if conn is None:
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
ca_cert_data=self.ca_cert_data,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
ssl_version=self.ssl_version,
ssl_minimum_version=self.ssl_minimum_version,
ssl_maximum_version=self.ssl_maximum_version,
cert_data=self.cert_data,
key_data=self.key_data,
**self.conn_kw,
)
await self.pool.put(conn, immediately_unavailable=True)
return conn
async def _validate_conn(self, conn: AsyncHTTPConnection) -> None:
"""
Called right before a request is made, after the socket is created.
"""
await super()._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if conn.is_closed:
await conn.connect()
if not conn.is_verified:
warnings.warn(
(
f"Unverified HTTPS request is being made to host '{conn.host}'. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3future.readthedocs.io/en/latest/advanced-usage.html"
"#tls-warnings"
),
InsecureRequestWarning,
)
|
(host: 'str', port: 'int | None' = None, timeout: '_TYPE_TIMEOUT | None' = <_TYPE_DEFAULT.token: -1>, maxsize: 'int' = 1, block: 'bool' = False, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | bool | int | None' = None, _proxy: 'Url | None' = None, _proxy_headers: 'typing.Mapping[str, str] | None' = None, key_file: 'str | None' = None, cert_file: 'str | None' = None, cert_reqs: 'int | str | None' = None, key_password: 'str | None' = None, ca_certs: 'str | None' = None, ssl_version: 'int | str | None' = None, ssl_minimum_version: 'ssl.TLSVersion | None' = None, ssl_maximum_version: 'ssl.TLSVersion | None' = None, assert_hostname: 'str | Literal[False] | None' = None, assert_fingerprint: 'str | None' = None, ca_cert_dir: 'str | None' = None, ca_cert_data: 'None | str | bytes' = None, cert_data: 'str | bytes | None' = None, key_data: 'str | bytes | None' = None, **conn_kw: 'typing.Any') -> 'None'
|
710,022
|
urllib3_future._async.connectionpool
|
__init__
| null |
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
key_file: str | None = None,
cert_file: str | None = None,
cert_reqs: int | str | None = None,
key_password: str | None = None,
ca_certs: str | None = None,
ssl_version: int | str | None = None,
ssl_minimum_version: ssl.TLSVersion | None = None,
ssl_maximum_version: ssl.TLSVersion | None = None,
assert_hostname: str | Literal[False] | None = None,
assert_fingerprint: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
cert_data: str | bytes | None = None,
key_data: str | bytes | None = None,
**conn_kw: typing.Any,
) -> None:
super().__init__(
host,
port,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ca_cert_data = ca_cert_data
self.cert_data = cert_data
self.key_data = key_data
self.ssl_version = ssl_version
self.ssl_minimum_version = ssl_minimum_version
self.ssl_maximum_version = ssl_maximum_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
|
(self, host: 'str', port: 'int | None' = None, timeout: '_TYPE_TIMEOUT | None' = <_TYPE_DEFAULT.token: -1>, maxsize: 'int' = 1, block: 'bool' = False, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | bool | int | None' = None, _proxy: 'Url | None' = None, _proxy_headers: 'typing.Mapping[str, str] | None' = None, key_file: 'str | None' = None, cert_file: 'str | None' = None, cert_reqs: 'int | str | None' = None, key_password: 'str | None' = None, ca_certs: 'str | None' = None, ssl_version: 'int | str | None' = None, ssl_minimum_version: 'ssl.TLSVersion | None' = None, ssl_maximum_version: 'ssl.TLSVersion | None' = None, assert_hostname: 'str | Literal[False] | None' = None, assert_fingerprint: 'str | None' = None, ca_cert_dir: 'str | None' = None, ca_cert_data: 'None | str | bytes' = None, cert_data: 'str | bytes | None' = None, key_data: 'str | bytes | None' = None, **conn_kw: 'typing.Any') -> 'None'
|
710,027
|
urllib3_future._async.connectionpool
|
_new_conn
|
Return a fresh :class:`urllib3.connection.HTTPConnection`.
|
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
key_file: str | None = None,
cert_file: str | None = None,
cert_reqs: int | str | None = None,
key_password: str | None = None,
ca_certs: str | None = None,
ssl_version: int | str | None = None,
ssl_minimum_version: ssl.TLSVersion | None = None,
ssl_maximum_version: ssl.TLSVersion | None = None,
assert_hostname: str | Literal[False] | None = None,
assert_fingerprint: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
cert_data: str | bytes | None = None,
key_data: str | bytes | None = None,
**conn_kw: typing.Any,
) -> None:
super().__init__(
host,
port,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ca_cert_data = ca_cert_data
self.cert_data = cert_data
self.key_data = key_data
self.ssl_version = ssl_version
self.ssl_minimum_version = ssl_minimum_version
self.ssl_maximum_version = ssl_maximum_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
|
(self, *, heb_timeout: Optional[urllib3_future.util.timeout.Timeout] = None) -> urllib3_future._async.connection.AsyncHTTPSConnection
|
710,028
|
urllib3_future._async.connectionpool
|
_prepare_proxy
|
Establishes a tunnel connection through HTTP CONNECT.
|
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
key_file: str | None = None,
cert_file: str | None = None,
cert_reqs: int | str | None = None,
key_password: str | None = None,
ca_certs: str | None = None,
ssl_version: int | str | None = None,
ssl_minimum_version: ssl.TLSVersion | None = None,
ssl_maximum_version: ssl.TLSVersion | None = None,
assert_hostname: str | Literal[False] | None = None,
assert_fingerprint: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
cert_data: str | bytes | None = None,
key_data: str | bytes | None = None,
**conn_kw: typing.Any,
) -> None:
super().__init__(
host,
port,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ca_cert_data = ca_cert_data
self.cert_data = cert_data
self.key_data = key_data
self.ssl_version = ssl_version
self.ssl_minimum_version = ssl_minimum_version
self.ssl_maximum_version = ssl_maximum_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
|
(self, conn: urllib3_future._async.connection.AsyncHTTPSConnection) -> NoneType
|
710,031
|
urllib3_future._async.connectionpool
|
_validate_conn
|
Called right before a request is made, after the socket is created.
|
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
key_file: str | None = None,
cert_file: str | None = None,
cert_reqs: int | str | None = None,
key_password: str | None = None,
ca_certs: str | None = None,
ssl_version: int | str | None = None,
ssl_minimum_version: ssl.TLSVersion | None = None,
ssl_maximum_version: ssl.TLSVersion | None = None,
assert_hostname: str | Literal[False] | None = None,
assert_fingerprint: str | None = None,
ca_cert_dir: str | None = None,
ca_cert_data: None | str | bytes = None,
cert_data: str | bytes | None = None,
key_data: str | bytes | None = None,
**conn_kw: typing.Any,
) -> None:
super().__init__(
host,
port,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ca_cert_data = ca_cert_data
self.cert_data = cert_data
self.key_data = key_data
self.ssl_version = ssl_version
self.ssl_minimum_version = ssl_minimum_version
self.ssl_maximum_version = ssl_maximum_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
|
(self, conn: urllib3_future._async.connection.AsyncHTTPConnection) -> NoneType
|
710,039
|
urllib3_future._async.poolmanager
|
AsyncPoolManager
|
Allows for arbitrary async requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3._async.connectionpool.AsyncConnectionPool` instances.
Example:
.. code-block:: python
import urllib3
http = urllib3.AsyncPoolManager(num_pools=2)
resp1 = await http.request("GET", "https://google.com/")
resp2 = await http.request("GET", "https://google.com/mail")
resp3 = await http.request("GET", "https://yahoo.com/")
print(len(http.pools))
# 2
|
class AsyncPoolManager(AsyncRequestMethods):
"""
Allows for arbitrary async requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3._async.connectionpool.AsyncConnectionPool` instances.
Example:
.. code-block:: python
import urllib3
http = urllib3.AsyncPoolManager(num_pools=2)
resp1 = await http.request("GET", "https://google.com/")
resp2 = await http.request("GET", "https://google.com/mail")
resp3 = await http.request("GET", "https://yahoo.com/")
print(len(http.pools))
# 2
"""
proxy: Url | None = None
proxy_config: ProxyConfig | None = None
def __init__(
self,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
preemptive_quic_cache: QuicPreemptiveCacheType | None = None,
resolver: AsyncResolverDescription
| list[AsyncResolverDescription]
| str
| list[str]
| AsyncBaseResolver
| None = None,
**connection_pool_kw: typing.Any,
) -> None:
super().__init__(headers)
self.connection_pool_kw = connection_pool_kw
self._num_pools = num_pools
self.pools: AsyncTrafficPolice[AsyncHTTPConnectionPool] = AsyncTrafficPolice(
num_pools, concurrency=True
)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
self._preemptive_quic_cache = preemptive_quic_cache
self._own_resolver = not isinstance(resolver, AsyncBaseResolver)
if resolver is None:
resolver = [AsyncResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [AsyncResolverDescription.from_url(resolver)]
elif isinstance(resolver, AsyncResolverDescription):
resolver = [resolver]
self._resolvers: list[AsyncResolverDescription] = []
if not isinstance(resolver, AsyncBaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
AsyncResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
AsyncResolverDescription.from_url(
"system://default?hosts=localhost"
)
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in connection_pool_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = connection_pool_kw["ca_cert_data"]
if "ca_cert_dir" in connection_pool_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = connection_pool_kw["ca_cert_dir"]
if "ca_certs" in connection_pool_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = connection_pool_kw["ca_certs"]
self._resolver: AsyncBaseResolver = (
AsyncManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, AsyncBaseResolver)
else resolver
)
async def __aenter__(self: _SelfT) -> _SelfT:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
await self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
async def clear(self) -> None:
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
await self.pools.clear()
if self._own_resolver and self._resolver.is_available():
await self._resolver.close()
async def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Get a :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower())
request_context["port"] = port
request_context["host"] = host
return await self.connection_from_context(request_context)
async def connection_from_context(
self, request_context: dict[str, typing.Any]
) -> AsyncHTTPConnectionPool:
"""
Get a :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
if "strict" in request_context:
request_context.pop("strict")
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
if self._preemptive_quic_cache is not None:
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
return await self.connection_from_pool_key(
pool_key, request_context=request_context
)
async def connection_from_pool_key(
self, pool_key: PoolKey, request_context: dict[str, typing.Any]
) -> AsyncHTTPConnectionPool:
"""
Get a :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
if self.pools.busy:
self.pools.release()
pool = await self.pools.locate(pool_key, block=False)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
await self.pools.wait_for_idle_or_available_slot()
await self.pools.put(pool, pool_key, immediately_unavailable=True)
return pool
async def connection_from_url(
self, url: str, pool_kwargs: dict[str, typing.Any] | None = None
) -> AsyncHTTPConnectionPool:
"""
Similar to :func:`urllib3.async_connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return await self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(
self, override: dict[str, typing.Any] | None
) -> dict[str, typing.Any]:
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
base_pool_kwargs.update(
{k: v for k, v in override.items() if v is not None}
)
return {
k: v
for k, v in base_pool_kwargs.items()
if k not in override or override[k] is not None
}
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool:
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
async def get_response(
self, *, promise: ResponsePromise | None = None
) -> AsyncHTTPResponse | None:
"""
Retrieve the first response available in the pools.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
"""
if promise is not None and not isinstance(promise, ResponsePromise):
raise TypeError(
f"get_response only support ResponsePromise but received {type(promise)} instead. "
f"This may occur if you expected the remote peer to support multiplexing but did not."
)
try:
async with self.pools.borrow(
promise or ResponsePromise, block=False, not_idle_only=True
) as pool:
response = await pool.get_response(promise=promise)
except UnavailableTraffic:
return None
if promise is not None and response is None:
raise ValueError(
"Invoked get_response with promise=... that no connections across pools recognize"
)
if response is None:
return None
from_promise = None
if promise:
from_promise = promise
else:
if (
response._fp
and hasattr(response._fp, "from_promise")
and response._fp.from_promise
):
from_promise = response._fp.from_promise
if from_promise is None:
raise ValueError(
"Internal: Unable to identify originating ResponsePromise from a LowLevelResponse"
)
self.pools.forget(from_promise)
# Retrieve request ctx
method = typing.cast(str, from_promise.get_parameter("method"))
redirect = typing.cast(bool, from_promise.get_parameter("pm_redirect"))
# Handle redirect?
if redirect and response.get_redirect_location():
url = typing.cast(str, from_promise.get_parameter("pm_url"))
body = typing.cast(
typing.Union[_TYPE_BODY, None], from_promise.get_parameter("body")
)
headers = typing.cast(
typing.Union[HTTPHeaderDict, None],
from_promise.get_parameter("headers"),
)
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Union[_TYPE_TIMEOUT, None], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
redirect_location = response.get_redirect_location()
assert isinstance(redirect_location, str)
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(
method, url, response=response, _pool=response._pool
)
except MaxRetryError:
if retries.raise_on_redirect:
await response.drain_conn()
raise
return response
await response.drain_conn()
await retries.async_sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
new_promise = await self.urlopen(
method,
urljoin(url, redirect_location),
True,
body=body,
headers=headers,
retries=retries,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=True,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return await self.get_response(promise=new_promise if promise else None)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if retries.is_retry(method, response.status, has_retry_after):
url = typing.cast(str, from_promise.get_parameter("pm_url"))
body = typing.cast(
typing.Union[_TYPE_BODY, None], from_promise.get_parameter("body")
)
headers = typing.cast(
typing.Union[HTTPHeaderDict, None],
from_promise.get_parameter("headers"),
)
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Union[_TYPE_TIMEOUT, None], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
redirect_location = response.get_redirect_location()
assert isinstance(redirect_location, str)
try:
retries = retries.increment(
method, url, response=response, _pool=response._pool
)
except MaxRetryError:
if retries.raise_on_status:
await response.drain_conn()
raise
return response
await response.drain_conn()
await retries.async_sleep(response)
log.debug("Retry: %s", url)
new_promise = await self.urlopen(
method,
urljoin(url, redirect_location),
True,
body=body,
headers=headers,
retries=retries,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=False,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return await self.get_response(promise=new_promise if promise else None)
return response
@typing.overload # type: ignore[override]
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[False] = ...,
**kw: typing.Any,
) -> AsyncHTTPResponse:
...
@typing.overload
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[True],
**kw: typing.Any,
) -> ResponsePromise:
...
async def urlopen(
self, method: str, url: str, redirect: bool = True, **kw: typing.Any
) -> AsyncHTTPResponse | ResponsePromise:
"""
Same as :meth:`urllib3.AsyncHTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3._async.connectionpool.AsyncConnectionPool` can be chosen for it.
"""
u = parse_url(url)
if u.scheme is None:
warnings.warn(
"URLs without a scheme (ie 'https://') are deprecated and will raise an error "
"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs "
"start with 'https://' or 'http://'. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2920",
category=DeprecationWarning,
stacklevel=2,
)
conn = await self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers
if self._proxy_requires_url_absolute_form(u):
response = await conn.urlopen(method, url, **kw)
else:
response = await conn.urlopen(method, u.request_uri, **kw)
self.pools.memorize(response, conn)
self.pools.release()
if "multiplexed" in kw and kw["multiplexed"]:
if isinstance(response, ResponsePromise):
response.set_parameter("pm_redirect", redirect)
response.set_parameter("pm_url", url)
assert isinstance(response, ResponsePromise)
return response
# the established connection is not capable of doing multiplexed request
kw["multiplexed"] = False
assert isinstance(response, AsyncHTTPResponse)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
kw["headers"].discard(should_be_removed_header)
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
new_headers = kw["headers"].copy()
for header in kw["headers"]:
if header.lower() in retries.remove_headers_on_redirect:
new_headers.pop(header, None)
kw["headers"] = new_headers
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
await response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
await response.drain_conn()
return await self.urlopen(method, redirect_location, **kw) # type: ignore[no-any-return]
|
(num_pools: 'int' = 10, headers: 'typing.Mapping[str, str] | None' = None, preemptive_quic_cache: 'QuicPreemptiveCacheType | None' = None, resolver: 'AsyncResolverDescription | list[AsyncResolverDescription] | str | list[str] | AsyncBaseResolver | None' = None, **connection_pool_kw: 'typing.Any') -> 'None'
|
710,040
|
urllib3_future._async.poolmanager
|
__aenter__
| null |
def __init__(
self,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
preemptive_quic_cache: QuicPreemptiveCacheType | None = None,
resolver: AsyncResolverDescription
| list[AsyncResolverDescription]
| str
| list[str]
| AsyncBaseResolver
| None = None,
**connection_pool_kw: typing.Any,
) -> None:
super().__init__(headers)
self.connection_pool_kw = connection_pool_kw
self._num_pools = num_pools
self.pools: AsyncTrafficPolice[AsyncHTTPConnectionPool] = AsyncTrafficPolice(
num_pools, concurrency=True
)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
self._preemptive_quic_cache = preemptive_quic_cache
self._own_resolver = not isinstance(resolver, AsyncBaseResolver)
if resolver is None:
resolver = [AsyncResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [AsyncResolverDescription.from_url(resolver)]
elif isinstance(resolver, AsyncResolverDescription):
resolver = [resolver]
self._resolvers: list[AsyncResolverDescription] = []
if not isinstance(resolver, AsyncBaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
AsyncResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
AsyncResolverDescription.from_url(
"system://default?hosts=localhost"
)
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in connection_pool_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = connection_pool_kw["ca_cert_data"]
if "ca_cert_dir" in connection_pool_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = connection_pool_kw["ca_cert_dir"]
if "ca_certs" in connection_pool_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = connection_pool_kw["ca_certs"]
self._resolver: AsyncBaseResolver = (
AsyncManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, AsyncBaseResolver)
else resolver
)
|
(self: ~_SelfT) -> ~_SelfT
|
710,043
|
urllib3_future._async.poolmanager
|
_merge_pool_kwargs
|
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
|
def _merge_pool_kwargs(
self, override: dict[str, typing.Any] | None
) -> dict[str, typing.Any]:
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
base_pool_kwargs.update(
{k: v for k, v in override.items() if v is not None}
)
return {
k: v
for k, v in base_pool_kwargs.items()
if k not in override or override[k] is not None
}
return base_pool_kwargs
|
(self, override: dict[str, typing.Any] | None) -> dict[str, typing.Any]
|
710,044
|
urllib3_future._async.poolmanager
|
_new_pool
|
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self, scheme: str, host: str, port: int, request_context: Optional[dict[str, Any]] = None) -> urllib3_future._async.connectionpool.AsyncHTTPConnectionPool
|
710,046
|
urllib3_future._async.poolmanager
|
clear
|
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self) -> NoneType
|
710,047
|
urllib3_future._async.poolmanager
|
connection_from_context
|
Get a :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self, request_context: dict[str, typing.Any]) -> urllib3_future._async.connectionpool.AsyncHTTPConnectionPool
|
710,048
|
urllib3_future._async.poolmanager
|
connection_from_host
|
Get a :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self, host: str | None, port: Optional[int] = None, scheme: str | None = 'http', pool_kwargs: Optional[dict[str, Any]] = None) -> urllib3_future._async.connectionpool.AsyncHTTPConnectionPool
|
710,049
|
urllib3_future._async.poolmanager
|
connection_from_pool_key
|
Get a :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self, pool_key: urllib3_future.poolmanager.PoolKey, request_context: dict[str, typing.Any]) -> urllib3_future._async.connectionpool.AsyncHTTPConnectionPool
|
710,050
|
urllib3_future._async.poolmanager
|
connection_from_url
|
Similar to :func:`urllib3.async_connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
|
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
"""
Create a new :class:`urllib3._async.connectionpool.AsyncConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[AsyncHTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
request_context["preemptive_quic_cache"] = self._preemptive_quic_cache
if not self._resolver.is_available():
self._resolver = self._resolver.recycle()
request_context["resolver"] = self._resolver
# By default, each HttpPool can have up to num_pools connections
if "maxsize" not in request_context:
request_context["maxsize"] = self._num_pools
return pool_cls(host, port, **request_context)
|
(self, url: str, pool_kwargs: Optional[dict[str, Any]] = None) -> urllib3_future._async.connectionpool.AsyncHTTPConnectionPool
|
710,051
|
urllib3_future._async.poolmanager
|
get_response
|
Retrieve the first response available in the pools.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
|
def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool:
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
|
(self, *, promise: Optional[urllib3_future.backend._base.ResponsePromise] = None) -> urllib3_future._async.response.AsyncHTTPResponse | None
|
710,055
|
urllib3_future._async.poolmanager
|
urlopen
|
Same as :meth:`urllib3.AsyncHTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3._async.connectionpool.AsyncConnectionPool` can be chosen for it.
|
@typing.overload
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[True],
**kw: typing.Any,
) -> ResponsePromise:
...
|
(self, method: str, url: str, redirect: bool = True, **kw: Any) -> urllib3_future._async.response.AsyncHTTPResponse | urllib3_future.backend._base.ResponsePromise
|
710,056
|
urllib3_future._async.poolmanager
|
AsyncProxyManager
|
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
:param proxy_assert_hostname:
The hostname of the certificate to verify against.
:param proxy_assert_fingerprint:
The fingerprint of the certificate to verify against.
Example:
.. code-block:: python
import urllib3
proxy = urllib3.AsyncProxyManager("https://localhost:3128/")
resp1 = await proxy.request("GET", "https://google.com/")
resp2 = await proxy.request("GET", "https://httpbin.org/")
print(len(proxy.pools))
# 1
resp3 = await proxy.request("GET", "https://httpbin.org/")
resp4 = await proxy.request("GET", "https://twitter.com/")
print(len(proxy.pools))
# 3
|
class AsyncProxyManager(AsyncPoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
:param proxy_assert_hostname:
The hostname of the certificate to verify against.
:param proxy_assert_fingerprint:
The fingerprint of the certificate to verify against.
Example:
.. code-block:: python
import urllib3
proxy = urllib3.AsyncProxyManager("https://localhost:3128/")
resp1 = await proxy.request("GET", "https://google.com/")
resp2 = await proxy.request("GET", "https://httpbin.org/")
print(len(proxy.pools))
# 1
resp3 = await proxy.request("GET", "https://httpbin.org/")
resp4 = await proxy.request("GET", "https://twitter.com/")
print(len(proxy.pools))
# 3
"""
def __init__(
self,
proxy_url: str,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
proxy_headers: typing.Mapping[str, str] | None = None,
proxy_ssl_context: ssl.SSLContext | None = None,
use_forwarding_for_https: bool = False,
proxy_assert_hostname: None | str | Literal[False] = None,
proxy_assert_fingerprint: str | None = None,
**connection_pool_kw: typing.Any,
) -> None:
if isinstance(proxy_url, AsyncHTTPConnectionPool):
str_proxy_url = f"{proxy_url.scheme}://{proxy_url.host}:{proxy_url.port}"
else:
str_proxy_url = proxy_url
proxy = parse_url(str_proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(
proxy_ssl_context,
use_forwarding_for_https,
proxy_assert_hostname,
proxy_assert_fingerprint,
)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super().__init__(num_pools, headers, **connection_pool_kw)
async def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> AsyncHTTPConnectionPool:
if scheme == "https":
return await super().connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return await super().connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs # type: ignore[union-attr]
)
def _set_proxy_headers(
self, url: str, headers: typing.Mapping[str, str] | None = None
) -> typing.Mapping[str, str]:
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
@typing.overload # type: ignore[override]
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[False] = ...,
**kw: typing.Any,
) -> AsyncHTTPResponse:
...
@typing.overload
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[True],
**kw: typing.Any,
) -> ResponsePromise:
...
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
**kw: typing.Any,
) -> AsyncHTTPResponse | ResponsePromise:
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return await super().urlopen(method, url, redirect=redirect, **kw) # type: ignore[no-any-return]
|
(proxy_url: 'str', num_pools: 'int' = 10, headers: 'typing.Mapping[str, str] | None' = None, proxy_headers: 'typing.Mapping[str, str] | None' = None, proxy_ssl_context: 'ssl.SSLContext | None' = None, use_forwarding_for_https: 'bool' = False, proxy_assert_hostname: 'None | str | Literal[False]' = None, proxy_assert_fingerprint: 'str | None' = None, **connection_pool_kw: 'typing.Any') -> 'None'
|
710,059
|
urllib3_future._async.poolmanager
|
__init__
| null |
def __init__(
self,
proxy_url: str,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
proxy_headers: typing.Mapping[str, str] | None = None,
proxy_ssl_context: ssl.SSLContext | None = None,
use_forwarding_for_https: bool = False,
proxy_assert_hostname: None | str | Literal[False] = None,
proxy_assert_fingerprint: str | None = None,
**connection_pool_kw: typing.Any,
) -> None:
if isinstance(proxy_url, AsyncHTTPConnectionPool):
str_proxy_url = f"{proxy_url.scheme}://{proxy_url.host}:{proxy_url.port}"
else:
str_proxy_url = proxy_url
proxy = parse_url(str_proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(
proxy_ssl_context,
use_forwarding_for_https,
proxy_assert_hostname,
proxy_assert_fingerprint,
)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super().__init__(num_pools, headers, **connection_pool_kw)
|
(self, proxy_url: 'str', num_pools: 'int' = 10, headers: 'typing.Mapping[str, str] | None' = None, proxy_headers: 'typing.Mapping[str, str] | None' = None, proxy_ssl_context: 'ssl.SSLContext | None' = None, use_forwarding_for_https: 'bool' = False, proxy_assert_hostname: 'None | str | Literal[False]' = None, proxy_assert_fingerprint: 'str | None' = None, **connection_pool_kw: 'typing.Any') -> 'None'
|
710,073
|
urllib3_future._async.poolmanager
|
urlopen
|
Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.
|
@typing.overload
async def urlopen(
self,
method: str,
url: str,
redirect: bool = True,
*,
multiplexed: Literal[True],
**kw: typing.Any,
) -> ResponsePromise:
...
|
(self, method: str, url: str, redirect: bool = True, **kw: Any) -> urllib3_future._async.response.AsyncHTTPResponse | urllib3_future.backend._base.ResponsePromise
|
710,074
|
urllib3_future.contrib.resolver._async.factories
|
AsyncResolverDescription
|
Describe how a BaseResolver must be instantiated.
|
class AsyncResolverDescription(ResolverDescription):
"""Describe how a BaseResolver must be instantiated."""
def new(self) -> AsyncBaseResolver:
kwargs = {**self.kwargs}
if self.server:
kwargs["server"] = self.server
if self.port:
kwargs["port"] = self.port
if self.host_patterns:
kwargs["patterns"] = self.host_patterns
return AsyncResolverFactory.new(
self.protocol,
self.specifier,
self.implementation,
**kwargs,
)
@staticmethod
def from_url(url: str) -> AsyncResolverDescription:
parsed_url = parse_url(url)
schema = parsed_url.scheme
if schema is None:
raise ValueError("Given DNS url is missing a protocol")
specifier = None
implementation = None
if "+" in schema:
schema, specifier = tuple(schema.lower().split("+", 1))
protocol = ProtocolResolver(schema)
kwargs: dict[str, typing.Any] = {}
if parsed_url.path:
kwargs["path"] = parsed_url.path
if parsed_url.auth:
kwargs["headers"] = dict()
if ":" in parsed_url.auth:
username, password = parsed_url.auth.split(":")
username = username.strip("'\"")
password = password.strip("'\"")
kwargs["headers"][
"Authorization"
] = f"Basic {b64encode(f'{username}:{password}'.encode()).decode()}"
else:
kwargs["headers"]["Authorization"] = f"Bearer {parsed_url.auth}"
if parsed_url.query:
parameters = parse_qs(parsed_url.query)
for parameter in parameters:
if not parameters[parameter]:
continue
parameter_insensible = parameter.lower()
if (
isinstance(parameters[parameter], list)
and len(parameters[parameter]) > 1
):
if parameter == "implementation":
raise ValueError("Only one implementation can be passed to URL")
values = []
for e in parameters[parameter]:
if "," in e:
values.extend(e.split(","))
else:
values.append(e)
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(values)
else:
values.append(kwargs[parameter_insensible])
kwargs[parameter_insensible] = values
continue
kwargs[parameter_insensible] = values
continue
value: str = parameters[parameter][0].lower().strip(" ")
if parameter == "implementation":
implementation = value
continue
if "," in value:
list_of_values = value.split(",")
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(list_of_values)
else:
list_of_values.append(kwargs[parameter_insensible])
continue
kwargs[parameter_insensible] = list_of_values
continue
value_converted: bool | int | float | None = None
if value in ["false", "true"]:
value_converted = True if value == "true" else False
elif value.isdigit():
value_converted = int(value)
elif (
value.count(".") == 1
and value.index(".") > 0
and value.replace(".", "").isdigit()
):
value_converted = float(value)
kwargs[parameter_insensible] = (
value if value_converted is None else value_converted
)
host_patterns = []
if "hosts" in kwargs:
host_patterns = (
kwargs["hosts"].split(",")
if isinstance(kwargs["hosts"], str)
else kwargs["hosts"]
)
del kwargs["hosts"]
return AsyncResolverDescription(
protocol,
specifier,
implementation,
parsed_url.host,
parsed_url.port,
*host_patterns,
**kwargs,
)
|
(protocol: 'ProtocolResolver', specifier: 'str | None' = None, implementation: 'str | None' = None, server: 'str | None' = None, port: 'int | None' = None, *host_patterns: 'str', **kwargs: 'typing.Any') -> 'None'
|
710,075
|
urllib3_future.contrib.resolver.factories
|
__contains__
| null |
def __contains__(self, item: str) -> bool:
return item in self.kwargs
|
(self, item: str) -> bool
|
710,076
|
urllib3_future.contrib.resolver.factories
|
__init__
| null |
def __init__(
self,
protocol: ProtocolResolver,
specifier: str | None = None,
implementation: str | None = None,
server: str | None = None,
port: int | None = None,
*host_patterns: str,
**kwargs: typing.Any,
) -> None:
self.protocol = protocol
self.specifier = specifier
self.implementation = implementation
self.server = server
self.port = port
self.host_patterns = host_patterns
self.kwargs = kwargs
|
(self, protocol: urllib3_future.contrib.resolver.protocols.ProtocolResolver, specifier: Optional[str] = None, implementation: Optional[str] = None, server: Optional[str] = None, port: Optional[int] = None, *host_patterns: str, **kwargs: Any) -> NoneType
|
710,077
|
urllib3_future.contrib.resolver.factories
|
__setitem__
| null |
def __setitem__(self, key: str, value: typing.Any) -> None:
self.kwargs[key] = value
|
(self, key: str, value: Any) -> NoneType
|
710,078
|
urllib3_future.contrib.resolver._async.factories
|
from_url
| null |
@staticmethod
def from_url(url: str) -> AsyncResolverDescription:
parsed_url = parse_url(url)
schema = parsed_url.scheme
if schema is None:
raise ValueError("Given DNS url is missing a protocol")
specifier = None
implementation = None
if "+" in schema:
schema, specifier = tuple(schema.lower().split("+", 1))
protocol = ProtocolResolver(schema)
kwargs: dict[str, typing.Any] = {}
if parsed_url.path:
kwargs["path"] = parsed_url.path
if parsed_url.auth:
kwargs["headers"] = dict()
if ":" in parsed_url.auth:
username, password = parsed_url.auth.split(":")
username = username.strip("'\"")
password = password.strip("'\"")
kwargs["headers"][
"Authorization"
] = f"Basic {b64encode(f'{username}:{password}'.encode()).decode()}"
else:
kwargs["headers"]["Authorization"] = f"Bearer {parsed_url.auth}"
if parsed_url.query:
parameters = parse_qs(parsed_url.query)
for parameter in parameters:
if not parameters[parameter]:
continue
parameter_insensible = parameter.lower()
if (
isinstance(parameters[parameter], list)
and len(parameters[parameter]) > 1
):
if parameter == "implementation":
raise ValueError("Only one implementation can be passed to URL")
values = []
for e in parameters[parameter]:
if "," in e:
values.extend(e.split(","))
else:
values.append(e)
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(values)
else:
values.append(kwargs[parameter_insensible])
kwargs[parameter_insensible] = values
continue
kwargs[parameter_insensible] = values
continue
value: str = parameters[parameter][0].lower().strip(" ")
if parameter == "implementation":
implementation = value
continue
if "," in value:
list_of_values = value.split(",")
if parameter_insensible in kwargs:
if isinstance(kwargs[parameter_insensible], list):
kwargs[parameter_insensible].extend(list_of_values)
else:
list_of_values.append(kwargs[parameter_insensible])
continue
kwargs[parameter_insensible] = list_of_values
continue
value_converted: bool | int | float | None = None
if value in ["false", "true"]:
value_converted = True if value == "true" else False
elif value.isdigit():
value_converted = int(value)
elif (
value.count(".") == 1
and value.index(".") > 0
and value.replace(".", "").isdigit()
):
value_converted = float(value)
kwargs[parameter_insensible] = (
value if value_converted is None else value_converted
)
host_patterns = []
if "hosts" in kwargs:
host_patterns = (
kwargs["hosts"].split(",")
if isinstance(kwargs["hosts"], str)
else kwargs["hosts"]
)
del kwargs["hosts"]
return AsyncResolverDescription(
protocol,
specifier,
implementation,
parsed_url.host,
parsed_url.port,
*host_patterns,
**kwargs,
)
|
(url: str) -> urllib3_future.contrib.resolver._async.factories.AsyncResolverDescription
|
710,079
|
urllib3_future.contrib.resolver._async.factories
|
new
| null |
def new(self) -> AsyncBaseResolver:
kwargs = {**self.kwargs}
if self.server:
kwargs["server"] = self.server
if self.port:
kwargs["port"] = self.port
if self.host_patterns:
kwargs["patterns"] = self.host_patterns
return AsyncResolverFactory.new(
self.protocol,
self.specifier,
self.implementation,
**kwargs,
)
|
(self) -> urllib3_future.contrib.resolver._async.protocols.AsyncBaseResolver
|
710,080
|
urllib3_future.response
|
HTTPResponse
|
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
|
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
if zstd is not None:
CONTENT_DECODERS += ["zstd"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
if zstd is not None:
DECODER_ERROR_CLASSES += (zstd.ZstdError,)
def __init__(
self,
body: _TYPE_BODY = "",
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int = 0,
version: int = 0,
reason: str | None = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: LowLevelResponse | None = None,
pool: HTTPConnectionPool | None = None,
connection: HTTPConnection | None = None,
msg: Message | None = None,
retries: Retry | None = None,
enforce_content_length: bool = True,
request_method: str | None = None,
request_url: str | None = None,
auto_close: bool = True,
police_officer: TrafficPolice[HTTPConnection] | None = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
try:
self.status = int(status)
except ValueError:
self.status = 0 # merely for tests, was supported due to broken httplib.
self.version = version
self.reason = reason
self.decode_content = decode_content
self._has_decoded_content = False
self._request_url: str | None = request_url
self._retries: Retry | None = None
self.retries = retries
self.chunked = False
if "transfer-encoding" in self.headers:
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: ContentDecoder | None = None
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: LowLevelResponse | typing.IO[typing.Any] | None = None
self._original_response = original_response
self._fp_bytes_read = 0
if msg is not None:
warnings.warn(
"Passing msg=.. is deprecated and no-op in urllib3.future and is scheduled to be removed in a future major.",
DeprecationWarning,
stacklevel=2,
)
self.msg = msg
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: int | None = None
# Determine length of response
self.length_remaining: int | None = self._init_length(request_method)
# Used to return the correct amount of bytes for partial read()s
self._decoded_buffer = BytesQueueBuffer()
self._police_officer: TrafficPolice[HTTPConnection] | None = police_officer
if self._police_officer is not None:
self._police_officer.memorize(self, self._connection)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def is_from_promise(self, promise: ResponsePromise) -> bool:
"""
Determine if this response came from given promise.
"""
return (
self._fp is not None
and hasattr(self._fp, "from_promise")
and self._fp.from_promise == promise
)
def get_redirect_location(self) -> str | None | Literal[False]:
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
def json(self) -> typing.Any:
"""
Parses the body of the HTTP response as JSON.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.
This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.
Read more :ref:`here <json>`.
"""
data = self.data.decode("utf-8")
return _json.loads(data)
@property
def retries(self) -> Retry | None:
return self._retries
@retries.setter
def retries(self, retries: Retry | None) -> None:
# Override the request_url if retries has a redirect location.
if retries is not None and retries.history:
self.url = retries.history[-1].redirect_location
self._retries = retries
def _init_decoder(self) -> None:
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
if "content-encoding" not in self.headers:
return
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: bool | None, flush_decoder: bool
) -> bytes:
"""
Decode the data passed in and potentially flush the decoder.
"""
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
self._has_decoded_content = True
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> str | None:
return self.url
def release_conn(self) -> None:
if not self._connection:
return None
if self._police_officer is not None:
if self._police_officer.busy:
self._police_officer.release()
self._connection = None
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> HTTPConnection | None:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
if bytes are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method: str | None) -> int | None:
"""
Set initial length value for Response content if available.
"""
length: int | None
content_length: str | None = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
if "," in content_length:
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
else:
length = int(content_length)
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Check for responses that shouldn't include a body
if (
self.status in (204, 304)
or 100 <= self.status < 200
or request_method == "HEAD"
):
length = 0
return length
@contextmanager
def _error_catcher(self) -> typing.Generator[None, None, None]:
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except OSError as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def _fp_read(self, amt: int | None = None) -> bytes:
"""
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* 3.8 <= CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
"""
assert self._fp
c_int_max = 2**31 - 1
if (
(amt and amt > c_int_max)
or (self.length_remaining and self.length_remaining > c_int_max)
) and sys.version_info < (3, 10):
buffer = io.BytesIO()
# Besides `max_chunk_amt` being a maximum chunk size, it
# affects memory overhead of reading a response by this
# method in CPython.
# `c_int_max` equal to 2 GiB - 1 byte is the actual maximum
# chunk size that does not lead to an overflow error, but
# 256 MiB is a compromise.
max_chunk_amt = 2**28
while amt is None or amt != 0:
if amt is not None:
chunk_amt = min(amt, max_chunk_amt)
amt -= chunk_amt
else:
chunk_amt = max_chunk_amt
try:
data = self._fp.read(chunk_amt)
except ValueError: # Defensive: overly protective
break # Defensive: can also be an indicator that read ended, should not happen.
if not data:
break
buffer.write(data)
del data # to reduce peak memory usage by `max_chunk_amt`.
return buffer.getvalue()
else:
# StringIO doesn't like amt=None
return self._fp.read(amt) if amt is not None else self._fp.read()
def _raw_read(
self,
amt: int | None = None,
) -> bytes:
"""
Reads `amt` of bytes from the socket.
"""
if self._fp is None:
return None # type: ignore[return-value]
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
data = self._fp_read(amt) if not fp_closed else b""
if amt is not None and amt != 0 and not data:
# Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
return data
def read(
self,
amt: int | None = None,
decode_content: bool | None = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
try:
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if amt is not None:
cache_content = False
if amt < 0 and len(self._decoded_buffer):
return self._decoded_buffer.get(len(self._decoded_buffer))
if 0 < amt <= len(self._decoded_buffer):
return self._decoded_buffer.get(amt)
if self._police_officer is not None and not self._police_officer.busy:
with self._police_officer.borrow(self):
data = self._raw_read(amt)
else:
data = self._raw_read(amt)
if amt and amt < 0:
amt = len(data)
flush_decoder = False
if amt is None:
flush_decoder = True
elif amt != 0 and not data:
flush_decoder = True
if not data and len(self._decoded_buffer) == 0:
return data
if amt is None:
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
else:
# do not waste memory on buffer when not decoding
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
while len(self._decoded_buffer) < amt and data:
# TODO make sure to initially read enough data to get past the headers
# For example, the GZ file header takes 10 bytes, we don't want to read
# it one byte at a time
if (
self._police_officer is not None
and not self._police_officer.busy
):
with self._police_officer.borrow(self):
data = self._raw_read(amt)
else:
data = self._raw_read(amt)
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
data = self._decoded_buffer.get(amt)
return data
finally:
if (
hasattr(self._fp, "_eot")
and self._fp._eot # type: ignore[union-attr]
and self._police_officer is not None
):
self._police_officer.forget(self)
if self._police_officer.busy:
self._police_officer.release()
self._police_officer = None
def stream(
self, amt: int | None = 2**16, decode_content: bool | None = None
) -> typing.Generator[bytes, None, None]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned. Setting -1 will output chunks as soon as they
arrive.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0:
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
# Overrides from io.IOBase
def readable(self) -> bool:
return True
def close(self) -> None:
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush() # type: ignore[return-value]
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return False
@property
def url(self) -> str | None:
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> typing.Iterator[bytes]:
buffer: list[bytes] = []
for chunk in self.stream(-1, decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
|
(body: '_TYPE_BODY' = '', headers: 'typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None' = None, status: 'int' = 0, version: 'int' = 0, reason: 'str | None' = None, preload_content: 'bool' = True, decode_content: 'bool' = True, original_response: 'LowLevelResponse | None' = None, pool: 'HTTPConnectionPool | None' = None, connection: 'HTTPConnection | None' = None, msg: 'Message | None' = None, retries: 'Retry | None' = None, enforce_content_length: 'bool' = True, request_method: 'str | None' = None, request_url: 'str | None' = None, auto_close: 'bool' = True, police_officer: 'TrafficPolice[HTTPConnection] | None' = None) -> 'None'
|
710,081
|
urllib3_future.response
|
__init__
| null |
def __init__(
self,
body: _TYPE_BODY = "",
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int = 0,
version: int = 0,
reason: str | None = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: LowLevelResponse | None = None,
pool: HTTPConnectionPool | None = None,
connection: HTTPConnection | None = None,
msg: Message | None = None,
retries: Retry | None = None,
enforce_content_length: bool = True,
request_method: str | None = None,
request_url: str | None = None,
auto_close: bool = True,
police_officer: TrafficPolice[HTTPConnection] | None = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
try:
self.status = int(status)
except ValueError:
self.status = 0 # merely for tests, was supported due to broken httplib.
self.version = version
self.reason = reason
self.decode_content = decode_content
self._has_decoded_content = False
self._request_url: str | None = request_url
self._retries: Retry | None = None
self.retries = retries
self.chunked = False
if "transfer-encoding" in self.headers:
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: ContentDecoder | None = None
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: LowLevelResponse | typing.IO[typing.Any] | None = None
self._original_response = original_response
self._fp_bytes_read = 0
if msg is not None:
warnings.warn(
"Passing msg=.. is deprecated and no-op in urllib3.future and is scheduled to be removed in a future major.",
DeprecationWarning,
stacklevel=2,
)
self.msg = msg
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: int | None = None
# Determine length of response
self.length_remaining: int | None = self._init_length(request_method)
# Used to return the correct amount of bytes for partial read()s
self._decoded_buffer = BytesQueueBuffer()
self._police_officer: TrafficPolice[HTTPConnection] | None = police_officer
if self._police_officer is not None:
self._police_officer.memorize(self, self._connection)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
|
(self, body: '_TYPE_BODY' = '', headers: 'typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None' = None, status: 'int' = 0, version: 'int' = 0, reason: 'str | None' = None, preload_content: 'bool' = True, decode_content: 'bool' = True, original_response: 'LowLevelResponse | None' = None, pool: 'HTTPConnectionPool | None' = None, connection: 'HTTPConnection | None' = None, msg: 'Message | None' = None, retries: 'Retry | None' = None, enforce_content_length: 'bool' = True, request_method: 'str | None' = None, request_url: 'str | None' = None, auto_close: 'bool' = True, police_officer: 'TrafficPolice[HTTPConnection] | None' = None) -> 'None'
|
710,086
|
urllib3_future.response
|
_fp_read
|
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* 3.8 <= CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
|
def _fp_read(self, amt: int | None = None) -> bytes:
"""
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* 3.8 <= CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
"""
assert self._fp
c_int_max = 2**31 - 1
if (
(amt and amt > c_int_max)
or (self.length_remaining and self.length_remaining > c_int_max)
) and sys.version_info < (3, 10):
buffer = io.BytesIO()
# Besides `max_chunk_amt` being a maximum chunk size, it
# affects memory overhead of reading a response by this
# method in CPython.
# `c_int_max` equal to 2 GiB - 1 byte is the actual maximum
# chunk size that does not lead to an overflow error, but
# 256 MiB is a compromise.
max_chunk_amt = 2**28
while amt is None or amt != 0:
if amt is not None:
chunk_amt = min(amt, max_chunk_amt)
amt -= chunk_amt
else:
chunk_amt = max_chunk_amt
try:
data = self._fp.read(chunk_amt)
except ValueError: # Defensive: overly protective
break # Defensive: can also be an indicator that read ended, should not happen.
if not data:
break
buffer.write(data)
del data # to reduce peak memory usage by `max_chunk_amt`.
return buffer.getvalue()
else:
# StringIO doesn't like amt=None
return self._fp.read(amt) if amt is not None else self._fp.read()
|
(self, amt: Optional[int] = None) -> bytes
|
710,089
|
urllib3_future.response
|
_raw_read
|
Reads `amt` of bytes from the socket.
|
def _raw_read(
self,
amt: int | None = None,
) -> bytes:
"""
Reads `amt` of bytes from the socket.
"""
if self._fp is None:
return None # type: ignore[return-value]
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
data = self._fp_read(amt) if not fp_closed else b""
if amt is not None and amt != 0 and not data:
# Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
return data
|
(self, amt: Optional[int] = None) -> bytes
|
710,091
|
urllib3_future.response
|
drain_conn
|
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError):
pass
|
(self) -> NoneType
|
710,100
|
urllib3_future.response
|
read
|
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
|
def read(
self,
amt: int | None = None,
decode_content: bool | None = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
try:
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if amt is not None:
cache_content = False
if amt < 0 and len(self._decoded_buffer):
return self._decoded_buffer.get(len(self._decoded_buffer))
if 0 < amt <= len(self._decoded_buffer):
return self._decoded_buffer.get(amt)
if self._police_officer is not None and not self._police_officer.busy:
with self._police_officer.borrow(self):
data = self._raw_read(amt)
else:
data = self._raw_read(amt)
if amt and amt < 0:
amt = len(data)
flush_decoder = False
if amt is None:
flush_decoder = True
elif amt != 0 and not data:
flush_decoder = True
if not data and len(self._decoded_buffer) == 0:
return data
if amt is None:
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
else:
# do not waste memory on buffer when not decoding
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
while len(self._decoded_buffer) < amt and data:
# TODO make sure to initially read enough data to get past the headers
# For example, the GZ file header takes 10 bytes, we don't want to read
# it one byte at a time
if (
self._police_officer is not None
and not self._police_officer.busy
):
with self._police_officer.borrow(self):
data = self._raw_read(amt)
else:
data = self._raw_read(amt)
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
data = self._decoded_buffer.get(amt)
return data
finally:
if (
hasattr(self._fp, "_eot")
and self._fp._eot # type: ignore[union-attr]
and self._police_officer is not None
):
self._police_officer.forget(self)
if self._police_officer.busy:
self._police_officer.release()
self._police_officer = None
|
(self, amt: Optional[int] = None, decode_content: Optional[bool] = None, cache_content: bool = False) -> bytes
|
710,104
|
urllib3_future.response
|
stream
|
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned. Setting -1 will output chunks as soon as they
arrive.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
|
def stream(
self, amt: int | None = 2**16, decode_content: bool | None = None
) -> typing.Generator[bytes, None, None]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned. Setting -1 will output chunks as soon as they
arrive.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0:
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
|
(self, amt: int | None = 65536, decode_content: Optional[bool] = None) -> Generator[bytes, NoneType, NoneType]
|
710,107
|
urllib3_future.backend._base
|
ConnectionInfo
| null |
class ConnectionInfo:
def __init__(self) -> None:
#: Time taken to establish the connection
self.established_latency: timedelta | None = None
#: HTTP protocol used with the remote peer (not the proxy)
self.http_version: HttpVersion | None = None
#: The SSL certificate presented by the remote peer (not the proxy)
self.certificate_der: bytes | None = None
self.certificate_dict: dict[
str, int | tuple[tuple[str, str], ...] | tuple[str, ...] | str
] | None = None
#: The SSL issuer certificate for the remote peer certificate (not the proxy)
self.issuer_certificate_der: bytes | None = None
self.issuer_certificate_dict: dict[
str, int | tuple[tuple[str, str], ...] | tuple[str, ...] | str
] | None = None
#: The IP address used to reach the remote peer (not the proxy), that was yield by your resolver.
self.destination_address: tuple[str, int] | None = None
#: The TLS cipher used to secure the exchanges (not the proxy)
self.cipher: str | None = None
#: The TLS revision used (not the proxy)
self.tls_version: TLSVersion | None = None
#: The time taken to reach a complete TLS liaison between the remote peer and us. (not the proxy)
self.tls_handshake_latency: timedelta | None = None
#: Time taken to resolve a domain name into a reachable IP address.
self.resolution_latency: timedelta | None = None
#: Time taken to encode and send the whole request through the socket.
self.request_sent_latency: timedelta | None = None
def __repr__(self) -> str:
return str(
{
"established_latency": self.established_latency,
"certificate_der": self.certificate_der,
"certificate_dict": self.certificate_dict,
"issuer_certificate_der": self.issuer_certificate_der,
"issuer_certificate_dict": self.issuer_certificate_dict,
"destination_address": self.destination_address,
"cipher": self.cipher,
"tls_version": self.tls_version,
"tls_handshake_latency": self.tls_handshake_latency,
"http_version": self.http_version,
"resolution_latency": self.resolution_latency,
"request_sent_latency": self.request_sent_latency,
}
)
def is_encrypted(self) -> bool:
return self.certificate_der is not None
|
() -> 'None'
|
710,108
|
urllib3_future.backend._base
|
__init__
| null |
def __init__(self) -> None:
#: Time taken to establish the connection
self.established_latency: timedelta | None = None
#: HTTP protocol used with the remote peer (not the proxy)
self.http_version: HttpVersion | None = None
#: The SSL certificate presented by the remote peer (not the proxy)
self.certificate_der: bytes | None = None
self.certificate_dict: dict[
str, int | tuple[tuple[str, str], ...] | tuple[str, ...] | str
] | None = None
#: The SSL issuer certificate for the remote peer certificate (not the proxy)
self.issuer_certificate_der: bytes | None = None
self.issuer_certificate_dict: dict[
str, int | tuple[tuple[str, str], ...] | tuple[str, ...] | str
] | None = None
#: The IP address used to reach the remote peer (not the proxy), that was yield by your resolver.
self.destination_address: tuple[str, int] | None = None
#: The TLS cipher used to secure the exchanges (not the proxy)
self.cipher: str | None = None
#: The TLS revision used (not the proxy)
self.tls_version: TLSVersion | None = None
#: The time taken to reach a complete TLS liaison between the remote peer and us. (not the proxy)
self.tls_handshake_latency: timedelta | None = None
#: Time taken to resolve a domain name into a reachable IP address.
self.resolution_latency: timedelta | None = None
#: Time taken to encode and send the whole request through the socket.
self.request_sent_latency: timedelta | None = None
|
(self) -> NoneType
|
710,109
|
urllib3_future.backend._base
|
__repr__
| null |
def __repr__(self) -> str:
return str(
{
"established_latency": self.established_latency,
"certificate_der": self.certificate_der,
"certificate_dict": self.certificate_dict,
"issuer_certificate_der": self.issuer_certificate_der,
"issuer_certificate_dict": self.issuer_certificate_dict,
"destination_address": self.destination_address,
"cipher": self.cipher,
"tls_version": self.tls_version,
"tls_handshake_latency": self.tls_handshake_latency,
"http_version": self.http_version,
"resolution_latency": self.resolution_latency,
"request_sent_latency": self.request_sent_latency,
}
)
|
(self) -> str
|
710,110
|
urllib3_future.backend._base
|
is_encrypted
| null |
def is_encrypted(self) -> bool:
return self.certificate_der is not None
|
(self) -> bool
|
710,111
|
urllib3_future.connectionpool
|
HTTPConnectionPool
|
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`http.client.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`http.client.HTTPConnection`.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.ProxyManager`
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.ProxyManager`
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
|
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`http.client.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`http.client.HTTPConnection`.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.ProxyManager`
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.ProxyManager`
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls: (type[HTTPConnection] | type[HTTPSConnection]) = HTTPConnection
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
_proxy_config: ProxyConfig | None = None,
resolver: ResolverDescription
| list[ResolverDescription]
| str
| list[str]
| BaseResolver
| None = None,
happy_eyeballs: bool | int = False,
**conn_kw: typing.Any,
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.happy_eyeballs = happy_eyeballs
self._maxsize = maxsize
if self.QueueCls is not TrafficPolice and not issubclass(
self.QueueCls, TrafficPolice
):
warnings.warn(
"ConnectionPool QueueCls no longer support typical queue implementation "
"due to its inability to answer urllib3.future needs to handle concurrent streams "
"in a single connection. You may customize the implementation by passing a subclass of "
"urllib3.util.traffic_police.TrafficPolice if necessary.",
DeprecationWarning,
)
self.QueueCls = TrafficPolice
self.pool: TrafficPolice[HTTPConnection] | None = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
self.proxy_config = _proxy_config
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
self.conn_kw["proxy"] = self.proxy
self.conn_kw["proxy_config"] = self.proxy_config
self._own_resolver = not isinstance(resolver, BaseResolver)
if resolver is None:
resolver = [ResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [ResolverDescription.from_url(resolver)]
elif isinstance(resolver, ResolverDescription):
resolver = [resolver]
self._resolvers: list[ResolverDescription] = []
if not isinstance(resolver, BaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
ResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
ResolverDescription.from_url("system://default?hosts=localhost")
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in conn_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = conn_kw["ca_cert_data"]
if "ca_cert_dir" in conn_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = conn_kw["ca_cert_dir"]
if "ca_certs" in conn_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = conn_kw["ca_certs"]
self._resolver: BaseResolver = (
ManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, BaseResolver)
else resolver
)
self.conn_kw["resolver"] = self._resolver
@property
def is_idle(self) -> bool:
return self.pool is None or self.pool.bag_only_idle
def _new_conn(self, *, heb_timeout: Timeout | None = None) -> HTTPConnection:
"""
Return a fresh :class:`HTTPConnection`.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = None
if self.happy_eyeballs:
log.debug(
"Attempting Happy-Eyeball %s:%s",
self.host,
self.port or "443",
)
dt_pre_resolve = datetime.now(tz=timezone.utc)
ip_addresses = self._resolver.getaddrinfo(
self.host,
self.port,
socket.AF_UNSPEC
if "socket_family" not in self.conn_kw
else self.conn_kw["socket_family"],
socket.SOCK_STREAM,
quic_upgrade_via_dns_rr=False,
)
delta_post_resolve = datetime.now(tz=timezone.utc) - dt_pre_resolve
if len(ip_addresses) > 1:
ipv6_addresses = []
ipv4_addresses = []
for ip_address in ip_addresses:
if ip_address[0] == socket.AF_INET6:
ipv6_addresses.append(ip_address)
else:
ipv4_addresses.append(ip_address)
if ipv4_addresses and ipv6_addresses:
log.debug(
"Happy-Eyeball Dual-Stack %s:%s",
self.host,
self.port or "443",
)
intermediary_addresses = []
for ipv6_entry, ipv4_entry in zip_longest(
ipv6_addresses, ipv4_addresses
):
if ipv6_entry:
intermediary_addresses.append(ipv6_entry)
if ipv4_entry:
intermediary_addresses.append(ipv4_entry)
ip_addresses = intermediary_addresses
else:
log.debug(
"Happy-Eyeball Single-Stack %s:%s",
self.host,
self.port or "443",
)
challengers = []
max_task = (
4 if isinstance(self.happy_eyeballs, bool) else self.happy_eyeballs
)
if heb_timeout is None:
heb_timeout = self.timeout
override_timeout = (
heb_timeout.connect_timeout
if heb_timeout.connect_timeout is not None
and isinstance(heb_timeout.connect_timeout, (float, int))
else 0.4
)
for ip_address in ip_addresses[:max_task]:
conn_kw = self.conn_kw.copy()
target_solo_addr = (
f"[{ip_address[-1][0]}]"
if ip_address[0] == socket.AF_INET6
else ip_address[-1][0]
)
conn_kw["resolver"] = ResolverDescription.from_url(
f"in-memory://default?hosts={self.host}:{target_solo_addr}"
).new()
conn_kw["socket_family"] = ip_address[0]
challengers.append(
self.ConnectionCls(
host=self.host,
port=self.port,
timeout=override_timeout,
**conn_kw,
)
)
event = threading.Event()
winning_task: Future[None] | None = None
completed_count: int = 0
def _happy_eyeballs_completed(t: Future[None]) -> None:
nonlocal winning_task, event, completed_count
if winning_task is None and t.exception() is None:
winning_task = t
event.set()
return
completed_count += 1
if completed_count >= len(challengers):
event.set()
tpe = ThreadPoolExecutor(max_workers=max_task)
tasks: list[Future[None]] = []
for challenger in challengers:
task = tpe.submit(challenger.connect)
task.add_done_callback(_happy_eyeballs_completed)
tasks.append(task)
event.wait()
for task in tasks:
if task == winning_task:
continue
if task.running():
task.cancel()
else:
challengers[tasks.index(task)].close()
if winning_task is None:
within_delay_msg: str = (
f" within {override_timeout}s" if override_timeout else ""
)
raise NewConnectionError(
challengers[0],
f"Failed to establish a new connection: No suitable address to connect to using Happy Eyeballs for {self.host}:{self.port}{within_delay_msg}",
) from tasks[0].exception()
conn = challengers[tasks.index(winning_task)]
# we have to replace the resolution latency metric
if conn.conn_info:
conn.conn_info.resolution_latency = delta_post_resolve
tpe.shutdown(wait=False)
else:
log.debug(
"Happy-Eyeball Ineligible %s:%s",
self.host,
self.port or "443",
)
if conn is None:
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
**self.conn_kw,
)
self.pool.put(conn, immediately_unavailable=True)
return conn
def _get_conn(
self, timeout: float | None = None, *, heb_timeout: Timeout | None = None
) -> HTTPConnection:
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed.")
try:
conn = self.pool.get(
block=self.block, timeout=timeout, non_saturated_only=True
)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.") from None # Defensive:
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool is empty and a new connection can't be opened due to blocking mode.",
) from None
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
return conn or self._new_conn(heb_timeout=heb_timeout)
def _put_conn(self, conn: HTTPConnection) -> None:
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
if self.pool is not None:
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# Connection never got put back into the pool, close it.
if conn:
if conn.is_idle:
conn.close()
if self.block:
# This should never happen if you got the conn from self._get_conn
raise FullPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from None
else:
# multiplexed connection may still have in-flight request not converted into response
# we shall not discard it until responses are consumed.
if conn and conn.is_idle is False:
log.warning(
"Connection pool is full, temporary increase, keeping connection, "
"multiplexed and not idle: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
if self.pool.maxsize is not None:
self.pool.maxsize += 1
return self._put_conn(conn)
log.warning(
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
self.num_connections -= 1
return
# Connection never got put back into the pool, close it.
if conn:
conn.close()
self.num_connections -= 1
def _validate_conn(self, conn: HTTPConnection) -> None:
"""
Called right before a request is made, after the socket is created.
"""
if conn.is_closed:
conn.connect()
def _prepare_proxy(self, conn: HTTPConnection) -> None:
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:
"""Helper that always returns a :class:`urllib3.util.Timeout`"""
if timeout is _DEFAULT_TIMEOUT:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(
self,
err: BaseSSLError | OSError | SocketTimeout,
url: str,
timeout_value: _TYPE_TIMEOUT | None,
) -> None:
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={timeout_value})"
) from err
# See the above comment about EAGAIN in Python 3.
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={timeout_value})"
) from err
def get_response(
self, *, promise: ResponsePromise | None = None
) -> HTTPResponse | None:
"""
Retrieve the first response available in the pool.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
if promise is not None and not isinstance(promise, ResponsePromise):
raise TypeError(
f"get_response only support ResponsePromise but received {type(promise)} instead. "
f"This may occur if you expected the remote peer to support multiplexing but did not."
)
try:
with self.pool.borrow(
promise or ResponsePromise,
block=promise is not None,
not_idle_only=promise is None,
) as conn:
response = conn.getresponse(promise=promise, police_officer=self.pool)
except UnavailableTraffic:
return None
if promise is not None and response is None:
raise ValueError(
"Invoked get_response with promise=... that no connection in pool recognize"
)
if response is None:
return None
from_promise = None
if promise:
from_promise = promise
else:
if (
response._fp
and hasattr(response._fp, "from_promise")
and response._fp.from_promise
):
from_promise = response._fp.from_promise
if from_promise is None:
raise ValueError(
"Internal: Unable to identify originating ResponsePromise from a LowLevelResponse"
)
self.pool.forget(from_promise)
# Retrieve request ctx
method = typing.cast(str, from_promise.get_parameter("method"))
redirect = typing.cast(bool, from_promise.get_parameter("redirect"))
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
url = typing.cast(str, from_promise.get_parameter("url"))
body = typing.cast(
typing.Optional[_TYPE_BODY], from_promise.get_parameter("body")
)
headers = typing.cast(HTTPHeaderDict, from_promise.get_parameter("headers"))
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Optional[_TYPE_TIMEOUT], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
new_promise = self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=True,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if retries.is_retry(method, response.status, has_retry_after):
url = typing.cast(str, from_promise.get_parameter("url"))
body = typing.cast(
typing.Optional[_TYPE_BODY], from_promise.get_parameter("body")
)
headers = typing.cast(HTTPHeaderDict, from_promise.get_parameter("headers"))
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Optional[_TYPE_TIMEOUT], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
new_promise = self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=False,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
return response
@typing.overload
def _make_request(
self,
conn: HTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | None = ...,
timeout: _TYPE_TIMEOUT = ...,
chunked: bool = ...,
response_conn: HTTPConnection | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
enforce_content_length: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = ...,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None] = ...,
*,
multiplexed: Literal[True],
) -> ResponsePromise:
...
@typing.overload
def _make_request(
self,
conn: HTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | None = ...,
timeout: _TYPE_TIMEOUT = ...,
chunked: bool = ...,
response_conn: HTTPConnection | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
enforce_content_length: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = ...,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None] = ...,
*,
multiplexed: Literal[False] = ...,
) -> HTTPResponse:
...
def _make_request(
self,
conn: HTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | None = None,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
chunked: bool = False,
response_conn: HTTPConnection | None = None,
preload_content: bool = True,
decode_content: bool = True,
enforce_content_length: bool = True,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = None,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None]
| None = None,
multiplexed: Literal[False] | Literal[True] = False,
) -> HTTPResponse | ResponsePromise:
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param response_conn:
Set this to ``None`` if you will handle releasing the connection or
set the connection to have the response release it.
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
try:
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# _validate_conn() starts the connection to an HTTPS proxy
# so we need to wrap errors with 'ProxyError' here too.
except (
OSError,
NewConnectionError,
TimeoutError,
BaseSSLError,
CertificateError,
SSLError,
) as e:
new_e: Exception = e
if isinstance(e, (BaseSSLError, CertificateError)):
new_e = SSLError(e)
# If the connection didn't successfully connect to it's proxy
# then there
if isinstance(
new_e, (OSError, NewConnectionError, TimeoutError, SSLError)
) and (conn and conn.proxy and not conn.has_connected_to_proxy):
new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
raise new_e
if on_post_connection is not None and conn.conn_info is not None:
# A second request does not redo handshake or DNS resolution.
if (
hasattr(conn, "_start_last_request")
and conn._start_last_request is not None
):
if conn.conn_info.tls_handshake_latency:
conn.conn_info.tls_handshake_latency = timedelta()
if conn.conn_info.established_latency:
conn.conn_info.established_latency = timedelta()
if conn.conn_info.resolution_latency:
conn.conn_info.resolution_latency = timedelta()
if conn.conn_info.request_sent_latency:
conn.conn_info.request_sent_latency = None
on_post_connection(conn.conn_info)
if conn.is_multiplexed is False and multiplexed is True:
# overruling
multiplexed = False
try:
rp = conn.request(
method,
url,
body=body,
headers=headers,
chunked=chunked,
preload_content=preload_content,
decode_content=decode_content,
enforce_content_length=enforce_content_length,
on_upload_body=on_upload_body,
)
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
# legitimately able to close the connection after sending a valid response.
# With this behaviour, the received response is still readable.
except BrokenPipeError as e:
rp = e.promise # type: ignore
except OSError as e:
rp = None
# MacOS/Linux
# EPROTOTYPE is needed on macOS
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE:
raise
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
if multiplexed:
if rp is None:
raise OSError
rp.set_parameter("read_timeout", read_timeout)
return rp
if not conn.is_closed:
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={read_timeout})"
)
conn.timeout = read_timeout
# Receive the response from the server
try:
response = conn.getresponse(police_officer=self.pool)
except (BaseSSLError, OSError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# Set properties that are used by the pooling layer.
response.retries = retries
response._pool = self
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
# HTTP version
conn._http_vsn_str,
response.status,
response.length_remaining,
)
return response
def close(self) -> None:
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
# Close all the HTTPConnections in the pool.
old_pool.clear()
# Close allocated resolver if we own it. (aka. not shared)
if self._own_resolver and self._resolver.is_available():
self._resolver.close()
def is_same_host(self, url: str) -> bool:
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, _, host, port, *_ = parse_url(url)
scheme = scheme or "http"
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
@typing.overload # type: ignore[override]
def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | bool | int | None = ...,
redirect: bool = ...,
assert_same_host: bool = ...,
timeout: _TYPE_TIMEOUT = ...,
pool_timeout: int | None = ...,
release_conn: bool | None = ...,
chunked: bool = ...,
body_pos: _TYPE_BODY_POSITION | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = ...,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None] = ...,
*,
multiplexed: Literal[False] = ...,
**response_kw: typing.Any,
) -> HTTPResponse:
...
@typing.overload
def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = ...,
headers: typing.Mapping[str, str] | None = ...,
retries: Retry | bool | int | None = ...,
redirect: bool = ...,
assert_same_host: bool = ...,
timeout: _TYPE_TIMEOUT = ...,
pool_timeout: int | None = ...,
release_conn: bool | None = ...,
chunked: bool = ...,
body_pos: _TYPE_BODY_POSITION | None = ...,
preload_content: bool = ...,
decode_content: bool = ...,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = ...,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None] = ...,
*,
multiplexed: Literal[True],
**response_kw: typing.Any,
) -> ResponsePromise:
...
def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
redirect: bool = True,
assert_same_host: bool = True,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
pool_timeout: int | None = None,
release_conn: bool | None = None,
chunked: bool = False,
body_pos: _TYPE_BODY_POSITION | None = None,
preload_content: bool = True,
decode_content: bool = True,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = None,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None]
| None = None,
multiplexed: bool = False,
**response_kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method
such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param bool preload_content:
If True, the response's body will be preloaded into memory.
:param bool decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of ``preload_content``
which defaults to ``True``.
:param bool chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param on_post_connection:
Callable to be invoked that will inform you of the connection specifications
for the request to be sent. See ``urllib3.ConnectionInfo`` class for more.
:param on_upload_body:
Callable that will be invoked upon body upload in order to be able to track
the progress. The values are expressed in bytes. It is possible that the total isn't
available, thus set to None. In order, arguments are:
(total_sent, total_to_be_sent, completed, any_error)
:param multiplexed:
Dispatch the request in a non-blocking way, this means that the
response will be retrieved in the future with the get_response()
method.
"""
parsed_url = parse_url(url)
destination_scheme = parsed_url.scheme
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = preload_content
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = to_str(_encode_target(url))
else:
url = to_str(parsed_url.url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
http_tunnel_required = connection_requires_http_tunnel(
self.proxy, self.proxy_config, destination_scheme
)
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
# have to copy the headers dict so we can safely change it without those
# changes being reflected in anyone else's copy.
if not http_tunnel_required:
headers = headers.copy() # type: ignore[attr-defined]
headers.update(self.proxy_headers) # type: ignore[union-attr]
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout, heb_timeout=timeout_obj)
conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
# Is this a closed/new connection that requires CONNECT tunnelling?
if self.proxy is not None and http_tunnel_required and conn.is_closed:
try:
self._prepare_proxy(conn)
except (BaseSSLError, OSError, SocketTimeout) as e:
self._raise_timeout(
err=e, url=self.proxy.url, timeout_value=conn.timeout
)
raise
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Make the request on the HTTPConnection object
response = self._make_request( # type: ignore[call-overload,misc]
conn,
method,
url,
body=body,
headers=headers,
retries=retries,
timeout=timeout_obj,
chunked=chunked,
response_conn=response_conn,
preload_content=preload_content,
decode_content=decode_content,
enforce_content_length=True,
on_post_connection=on_post_connection,
on_upload_body=on_upload_body,
multiplexed=multiplexed,
)
# it was established a non-multiplexed connection. fallback to original behavior.
if not isinstance(response, ResponsePromise):
multiplexed = False
if multiplexed:
response.set_parameter("method", method)
response.set_parameter("url", url)
response.set_parameter("body", body)
response.set_parameter("headers", headers)
response.set_parameter("retries", retries)
response.set_parameter("preload_content", preload_content)
response.set_parameter("decode_content", decode_content)
response.set_parameter("timeout", timeout_obj)
response.set_parameter("redirect", redirect)
response.set_parameter("response_kw", response_kw)
response.set_parameter("pool_timeout", pool_timeout)
response.set_parameter("assert_same_host", assert_same_host)
response.set_parameter("chunked", chunked)
response.set_parameter("body_pos", body_pos)
release_this_conn = True if not conn.is_saturated else False
# Everything went great!
clean_exit = True
except EmptyPoolError:
# Didn't get a connection from the pool, no need to clean up
clean_exit = True
release_this_conn = False
raise
except (
TimeoutError,
OSError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
ProxyError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
new_e: Exception = e
if isinstance(e, (BaseSSLError, CertificateError)):
new_e = SSLError(e)
if isinstance(
new_e,
(
OSError,
NewConnectionError,
TimeoutError,
SSLError,
),
) and (conn and conn.proxy and not conn.has_connected_to_proxy):
new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
elif isinstance(new_e, OSError):
new_e = ProtocolError("Connection aborted.", new_e)
retries = retries.increment(
method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
if conn:
conn.close()
conn = None
release_this_conn = True
elif conn and conn.is_multiplexed is True:
# multiplexing allows us to issue more requests.
release_this_conn = True
if release_this_conn is True and conn is not None:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if (
clean_exit
and isinstance(response, ResponsePromise)
and self.pool is not None
):
self.pool.memorize(response, conn)
elif release_this_conn is True and self.pool is not None:
self.pool.kill_cursor()
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen( # type: ignore[no-any-return,call-overload,misc]
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=multiplexed,
**response_kw,
)
if multiplexed:
assert isinstance(response, ResponsePromise)
return response # actually a response promise!
assert isinstance(response, HTTPResponse)
if redirect and response.get_redirect_location():
# Handle redirect?
redirect_location = response.get_redirect_location()
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen( # type: ignore[call-overload,no-any-return,misc]
method,
redirect_location,
body=body,
headers=headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=False,
**response_kw,
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=False,
**response_kw,
)
return response
|
(host: 'str', port: 'int | None' = None, timeout: '_TYPE_TIMEOUT | None' = <_TYPE_DEFAULT.token: -1>, maxsize: 'int' = 1, block: 'bool' = False, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | bool | int | None' = None, _proxy: 'Url | None' = None, _proxy_headers: 'typing.Mapping[str, str] | None' = None, _proxy_config: 'ProxyConfig | None' = None, resolver: 'ResolverDescription | list[ResolverDescription] | str | list[str] | BaseResolver | None' = None, happy_eyeballs: 'bool | int' = False, **conn_kw: 'typing.Any')
|
710,114
|
urllib3_future.connectionpool
|
__init__
| null |
def __init__(
self,
host: str,
port: int | None = None,
timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,
maxsize: int = 1,
block: bool = False,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
_proxy_headers: typing.Mapping[str, str] | None = None,
_proxy_config: ProxyConfig | None = None,
resolver: ResolverDescription
| list[ResolverDescription]
| str
| list[str]
| BaseResolver
| None = None,
happy_eyeballs: bool | int = False,
**conn_kw: typing.Any,
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.happy_eyeballs = happy_eyeballs
self._maxsize = maxsize
if self.QueueCls is not TrafficPolice and not issubclass(
self.QueueCls, TrafficPolice
):
warnings.warn(
"ConnectionPool QueueCls no longer support typical queue implementation "
"due to its inability to answer urllib3.future needs to handle concurrent streams "
"in a single connection. You may customize the implementation by passing a subclass of "
"urllib3.util.traffic_police.TrafficPolice if necessary.",
DeprecationWarning,
)
self.QueueCls = TrafficPolice
self.pool: TrafficPolice[HTTPConnection] | None = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
self.proxy_config = _proxy_config
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
self.conn_kw["proxy"] = self.proxy
self.conn_kw["proxy_config"] = self.proxy_config
self._own_resolver = not isinstance(resolver, BaseResolver)
if resolver is None:
resolver = [ResolverDescription(ProtocolResolver.SYSTEM)]
elif isinstance(resolver, str):
resolver = [ResolverDescription.from_url(resolver)]
elif isinstance(resolver, ResolverDescription):
resolver = [resolver]
self._resolvers: list[ResolverDescription] = []
if not isinstance(resolver, BaseResolver):
can_resolve_localhost: bool = False
for resolver_description in resolver:
if isinstance(resolver_description, str):
self._resolvers.append(
ResolverDescription.from_url(resolver_description)
)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
continue
self._resolvers.append(resolver_description)
if self._resolvers[-1].protocol == ProtocolResolver.SYSTEM:
can_resolve_localhost = True
if not can_resolve_localhost:
self._resolvers.append(
ResolverDescription.from_url("system://default?hosts=localhost")
)
#: We want to automatically forward ca_cert_data, ca_cert_dir, and ca_certs.
for rd in self._resolvers:
if "ca_cert_data" in conn_kw:
if "ca_cert_data" not in rd:
rd["ca_cert_data"] = conn_kw["ca_cert_data"]
if "ca_cert_dir" in conn_kw:
if "ca_cert_dir" not in rd:
rd["ca_cert_dir"] = conn_kw["ca_cert_dir"]
if "ca_certs" in conn_kw:
if "ca_certs" not in rd:
rd["ca_certs"] = conn_kw["ca_certs"]
self._resolver: BaseResolver = (
ManyResolver(*[r.new() for r in self._resolvers])
if not isinstance(resolver, BaseResolver)
else resolver
)
self.conn_kw["resolver"] = self._resolver
|
(self, host: str, port: Optional[int] = None, timeout: Union[float, urllib3_future.util.timeout._TYPE_DEFAULT, urllib3_future.util.timeout.Timeout, NoneType] = <_TYPE_DEFAULT.token: -1>, maxsize: int = 1, block: bool = False, headers: Optional[Mapping[str, str]] = None, retries: Union[urllib3_future.util.retry.Retry, bool, int, NoneType] = None, _proxy: Optional[urllib3_future.util.url.Url] = None, _proxy_headers: Optional[Mapping[str, str]] = None, _proxy_config: Optional[urllib3_future._typing.ProxyConfig] = None, resolver: Union[urllib3_future.contrib.resolver.factories.ResolverDescription, list[urllib3_future.contrib.resolver.factories.ResolverDescription], str, list[str], urllib3_future.contrib.resolver.protocols.BaseResolver, NoneType] = None, happy_eyeballs: bool | int = False, **conn_kw: Any)
|
710,116
|
urllib3_future.connectionpool
|
_get_conn
|
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
|
def _get_conn(
self, timeout: float | None = None, *, heb_timeout: Timeout | None = None
) -> HTTPConnection:
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed.")
try:
conn = self.pool.get(
block=self.block, timeout=timeout, non_saturated_only=True
)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.") from None # Defensive:
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool is empty and a new connection can't be opened due to blocking mode.",
) from None
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
return conn or self._new_conn(heb_timeout=heb_timeout)
|
(self, timeout: Optional[float] = None, *, heb_timeout: Optional[urllib3_future.util.timeout.Timeout] = None) -> urllib3_future.connection.HTTPConnection
|
710,118
|
urllib3_future.connectionpool
|
_make_request
|
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param response_conn:
Set this to ``None`` if you will handle releasing the connection or
set the connection to have the response release it.
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
|
def _make_request(
self,
conn: HTTPConnection,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | None = None,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
chunked: bool = False,
response_conn: HTTPConnection | None = None,
preload_content: bool = True,
decode_content: bool = True,
enforce_content_length: bool = True,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = None,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None]
| None = None,
multiplexed: Literal[False] | Literal[True] = False,
) -> HTTPResponse | ResponsePromise:
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param response_conn:
Set this to ``None`` if you will handle releasing the connection or
set the connection to have the response release it.
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
try:
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# _validate_conn() starts the connection to an HTTPS proxy
# so we need to wrap errors with 'ProxyError' here too.
except (
OSError,
NewConnectionError,
TimeoutError,
BaseSSLError,
CertificateError,
SSLError,
) as e:
new_e: Exception = e
if isinstance(e, (BaseSSLError, CertificateError)):
new_e = SSLError(e)
# If the connection didn't successfully connect to it's proxy
# then there
if isinstance(
new_e, (OSError, NewConnectionError, TimeoutError, SSLError)
) and (conn and conn.proxy and not conn.has_connected_to_proxy):
new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
raise new_e
if on_post_connection is not None and conn.conn_info is not None:
# A second request does not redo handshake or DNS resolution.
if (
hasattr(conn, "_start_last_request")
and conn._start_last_request is not None
):
if conn.conn_info.tls_handshake_latency:
conn.conn_info.tls_handshake_latency = timedelta()
if conn.conn_info.established_latency:
conn.conn_info.established_latency = timedelta()
if conn.conn_info.resolution_latency:
conn.conn_info.resolution_latency = timedelta()
if conn.conn_info.request_sent_latency:
conn.conn_info.request_sent_latency = None
on_post_connection(conn.conn_info)
if conn.is_multiplexed is False and multiplexed is True:
# overruling
multiplexed = False
try:
rp = conn.request(
method,
url,
body=body,
headers=headers,
chunked=chunked,
preload_content=preload_content,
decode_content=decode_content,
enforce_content_length=enforce_content_length,
on_upload_body=on_upload_body,
)
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
# legitimately able to close the connection after sending a valid response.
# With this behaviour, the received response is still readable.
except BrokenPipeError as e:
rp = e.promise # type: ignore
except OSError as e:
rp = None
# MacOS/Linux
# EPROTOTYPE is needed on macOS
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE:
raise
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
if multiplexed:
if rp is None:
raise OSError
rp.set_parameter("read_timeout", read_timeout)
return rp
if not conn.is_closed:
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, f"Read timed out. (read timeout={read_timeout})"
)
conn.timeout = read_timeout
# Receive the response from the server
try:
response = conn.getresponse(police_officer=self.pool)
except (BaseSSLError, OSError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# Set properties that are used by the pooling layer.
response.retries = retries
response._pool = self
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
# HTTP version
conn._http_vsn_str,
response.status,
response.length_remaining,
)
return response
|
(self, conn: 'HTTPConnection', method: 'str', url: 'str', body: '_TYPE_BODY | None' = None, headers: 'typing.Mapping[str, str] | None' = None, retries: 'Retry | None' = None, timeout: '_TYPE_TIMEOUT' = <_TYPE_DEFAULT.token: -1>, chunked: 'bool' = False, response_conn: 'HTTPConnection | None' = None, preload_content: 'bool' = True, decode_content: 'bool' = True, enforce_content_length: 'bool' = True, on_post_connection: 'typing.Callable[[ConnectionInfo], None] | None' = None, on_upload_body: 'typing.Callable[[int, int | None, bool, bool], None] | None' = None, multiplexed: 'Literal[False] | Literal[True]' = False) -> 'HTTPResponse | ResponsePromise'
|
710,119
|
urllib3_future.connectionpool
|
_new_conn
|
Return a fresh :class:`HTTPConnection`.
|
def _new_conn(self, *, heb_timeout: Timeout | None = None) -> HTTPConnection:
"""
Return a fresh :class:`HTTPConnection`.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = None
if self.happy_eyeballs:
log.debug(
"Attempting Happy-Eyeball %s:%s",
self.host,
self.port or "443",
)
dt_pre_resolve = datetime.now(tz=timezone.utc)
ip_addresses = self._resolver.getaddrinfo(
self.host,
self.port,
socket.AF_UNSPEC
if "socket_family" not in self.conn_kw
else self.conn_kw["socket_family"],
socket.SOCK_STREAM,
quic_upgrade_via_dns_rr=False,
)
delta_post_resolve = datetime.now(tz=timezone.utc) - dt_pre_resolve
if len(ip_addresses) > 1:
ipv6_addresses = []
ipv4_addresses = []
for ip_address in ip_addresses:
if ip_address[0] == socket.AF_INET6:
ipv6_addresses.append(ip_address)
else:
ipv4_addresses.append(ip_address)
if ipv4_addresses and ipv6_addresses:
log.debug(
"Happy-Eyeball Dual-Stack %s:%s",
self.host,
self.port or "443",
)
intermediary_addresses = []
for ipv6_entry, ipv4_entry in zip_longest(
ipv6_addresses, ipv4_addresses
):
if ipv6_entry:
intermediary_addresses.append(ipv6_entry)
if ipv4_entry:
intermediary_addresses.append(ipv4_entry)
ip_addresses = intermediary_addresses
else:
log.debug(
"Happy-Eyeball Single-Stack %s:%s",
self.host,
self.port or "443",
)
challengers = []
max_task = (
4 if isinstance(self.happy_eyeballs, bool) else self.happy_eyeballs
)
if heb_timeout is None:
heb_timeout = self.timeout
override_timeout = (
heb_timeout.connect_timeout
if heb_timeout.connect_timeout is not None
and isinstance(heb_timeout.connect_timeout, (float, int))
else 0.4
)
for ip_address in ip_addresses[:max_task]:
conn_kw = self.conn_kw.copy()
target_solo_addr = (
f"[{ip_address[-1][0]}]"
if ip_address[0] == socket.AF_INET6
else ip_address[-1][0]
)
conn_kw["resolver"] = ResolverDescription.from_url(
f"in-memory://default?hosts={self.host}:{target_solo_addr}"
).new()
conn_kw["socket_family"] = ip_address[0]
challengers.append(
self.ConnectionCls(
host=self.host,
port=self.port,
timeout=override_timeout,
**conn_kw,
)
)
event = threading.Event()
winning_task: Future[None] | None = None
completed_count: int = 0
def _happy_eyeballs_completed(t: Future[None]) -> None:
nonlocal winning_task, event, completed_count
if winning_task is None and t.exception() is None:
winning_task = t
event.set()
return
completed_count += 1
if completed_count >= len(challengers):
event.set()
tpe = ThreadPoolExecutor(max_workers=max_task)
tasks: list[Future[None]] = []
for challenger in challengers:
task = tpe.submit(challenger.connect)
task.add_done_callback(_happy_eyeballs_completed)
tasks.append(task)
event.wait()
for task in tasks:
if task == winning_task:
continue
if task.running():
task.cancel()
else:
challengers[tasks.index(task)].close()
if winning_task is None:
within_delay_msg: str = (
f" within {override_timeout}s" if override_timeout else ""
)
raise NewConnectionError(
challengers[0],
f"Failed to establish a new connection: No suitable address to connect to using Happy Eyeballs for {self.host}:{self.port}{within_delay_msg}",
) from tasks[0].exception()
conn = challengers[tasks.index(winning_task)]
# we have to replace the resolution latency metric
if conn.conn_info:
conn.conn_info.resolution_latency = delta_post_resolve
tpe.shutdown(wait=False)
else:
log.debug(
"Happy-Eyeball Ineligible %s:%s",
self.host,
self.port or "443",
)
if conn is None:
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
**self.conn_kw,
)
self.pool.put(conn, immediately_unavailable=True)
return conn
|
(self, *, heb_timeout: Optional[urllib3_future.util.timeout.Timeout] = None) -> urllib3_future.connection.HTTPConnection
|
710,120
|
urllib3_future.connectionpool
|
_prepare_proxy
| null |
def _prepare_proxy(self, conn: HTTPConnection) -> None:
# Nothing to do for HTTP connections.
pass
|
(self, conn: urllib3_future.connection.HTTPConnection) -> NoneType
|
710,121
|
urllib3_future.connectionpool
|
_put_conn
|
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
|
def _put_conn(self, conn: HTTPConnection) -> None:
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
if self.pool is not None:
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# Connection never got put back into the pool, close it.
if conn:
if conn.is_idle:
conn.close()
if self.block:
# This should never happen if you got the conn from self._get_conn
raise FullPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from None
else:
# multiplexed connection may still have in-flight request not converted into response
# we shall not discard it until responses are consumed.
if conn and conn.is_idle is False:
log.warning(
"Connection pool is full, temporary increase, keeping connection, "
"multiplexed and not idle: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
if self.pool.maxsize is not None:
self.pool.maxsize += 1
return self._put_conn(conn)
log.warning(
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
self.host,
self.pool.qsize(),
)
self.num_connections -= 1
return
# Connection never got put back into the pool, close it.
if conn:
conn.close()
self.num_connections -= 1
|
(self, conn: urllib3_future.connection.HTTPConnection) -> NoneType
|
710,123
|
urllib3_future.connectionpool
|
_validate_conn
|
Called right before a request is made, after the socket is created.
|
def _validate_conn(self, conn: HTTPConnection) -> None:
"""
Called right before a request is made, after the socket is created.
"""
if conn.is_closed:
conn.connect()
|
(self, conn: urllib3_future.connection.HTTPConnection) -> NoneType
|
710,124
|
urllib3_future.connectionpool
|
close
|
Close all pooled connections and disable the pool.
|
def close(self) -> None:
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
# Close all the HTTPConnections in the pool.
old_pool.clear()
# Close allocated resolver if we own it. (aka. not shared)
if self._own_resolver and self._resolver.is_available():
self._resolver.close()
|
(self) -> NoneType
|
710,125
|
urllib3_future.connectionpool
|
get_response
|
Retrieve the first response available in the pool.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
|
def get_response(
self, *, promise: ResponsePromise | None = None
) -> HTTPResponse | None:
"""
Retrieve the first response available in the pool.
This method should be called after issuing at least one request with ``multiplexed=True``.
If none available, return None.
"""
if self.pool is None:
raise ClosedPoolError(self, "Pool is closed")
if promise is not None and not isinstance(promise, ResponsePromise):
raise TypeError(
f"get_response only support ResponsePromise but received {type(promise)} instead. "
f"This may occur if you expected the remote peer to support multiplexing but did not."
)
try:
with self.pool.borrow(
promise or ResponsePromise,
block=promise is not None,
not_idle_only=promise is None,
) as conn:
response = conn.getresponse(promise=promise, police_officer=self.pool)
except UnavailableTraffic:
return None
if promise is not None and response is None:
raise ValueError(
"Invoked get_response with promise=... that no connection in pool recognize"
)
if response is None:
return None
from_promise = None
if promise:
from_promise = promise
else:
if (
response._fp
and hasattr(response._fp, "from_promise")
and response._fp.from_promise
):
from_promise = response._fp.from_promise
if from_promise is None:
raise ValueError(
"Internal: Unable to identify originating ResponsePromise from a LowLevelResponse"
)
self.pool.forget(from_promise)
# Retrieve request ctx
method = typing.cast(str, from_promise.get_parameter("method"))
redirect = typing.cast(bool, from_promise.get_parameter("redirect"))
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
url = typing.cast(str, from_promise.get_parameter("url"))
body = typing.cast(
typing.Optional[_TYPE_BODY], from_promise.get_parameter("body")
)
headers = typing.cast(HTTPHeaderDict, from_promise.get_parameter("headers"))
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Optional[_TYPE_TIMEOUT], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
new_promise = self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=True,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
retries = typing.cast(Retry, from_promise.get_parameter("retries"))
if retries.is_retry(method, response.status, has_retry_after):
url = typing.cast(str, from_promise.get_parameter("url"))
body = typing.cast(
typing.Optional[_TYPE_BODY], from_promise.get_parameter("body")
)
headers = typing.cast(HTTPHeaderDict, from_promise.get_parameter("headers"))
preload_content = typing.cast(
bool, from_promise.get_parameter("preload_content")
)
decode_content = typing.cast(
bool, from_promise.get_parameter("decode_content")
)
timeout = typing.cast(
typing.Optional[_TYPE_TIMEOUT], from_promise.get_parameter("timeout")
)
assert_same_host = typing.cast(
bool, from_promise.get_parameter("assert_same_host")
)
pool_timeout = from_promise.get_parameter("pool_timeout")
response_kw = typing.cast(
typing.MutableMapping[str, typing.Any],
from_promise.get_parameter("response_kw"),
)
chunked = typing.cast(bool, from_promise.get_parameter("chunked"))
body_pos = typing.cast(
_TYPE_BODY_POSITION, from_promise.get_parameter("body_pos")
)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
new_promise = self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=False,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=True,
**response_kw,
)
return self.get_response(promise=new_promise if promise else None)
return response
|
(self, *, promise: Optional[urllib3_future.backend._base.ResponsePromise] = None) -> urllib3_future.response.HTTPResponse | None
|
710,127
|
urllib3_future._request_methods
|
request
|
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
|
def request(
self,
method: str,
url: str,
body: _TYPE_BODY | None = None,
fields: _TYPE_FIELDS | None = None,
headers: typing.Mapping[str, str] | None = None,
json: typing.Any | None = None,
**urlopen_kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if json is not None and body is not None:
raise TypeError(
"request got values for both 'body' and 'json' parameters which are mutually exclusive"
)
if json is not None:
if headers is None:
headers = self.headers.copy() # type: ignore
if not ("content-type" in map(str.lower, headers.keys())):
headers["Content-Type"] = "application/json" # type: ignore
body = _json.dumps(json, separators=(",", ":"), ensure_ascii=False).encode(
"utf-8"
)
if body is not None:
urlopen_kw["body"] = body
if method in self._encode_url_methods:
return self.request_encode_url(
method,
url,
fields=fields, # type: ignore[arg-type]
headers=headers,
**urlopen_kw,
)
else:
return self.request_encode_body( # type: ignore[no-any-return]
method,
url,
fields=fields,
headers=headers,
**urlopen_kw,
)
|
(self, method: 'str', url: 'str', body: '_TYPE_BODY | None' = None, fields: '_TYPE_FIELDS | None' = None, headers: 'typing.Mapping[str, str] | None' = None, json: 'typing.Any | None' = None, **urlopen_kw: 'typing.Any') -> 'HTTPResponse | ResponsePromise'
|
710,128
|
urllib3_future._request_methods
|
request_encode_body
|
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:func:`urllib3.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:func:`urllib.parse.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
|
def request_encode_body(
self,
method: str,
url: str,
fields: _TYPE_FIELDS | None = None,
headers: typing.Mapping[str, str] | None = None,
encode_multipart: bool = True,
multipart_boundary: str | None = None,
**urlopen_kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:func:`urllib3.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:func:`urllib.parse.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw: dict[str, typing.Any] = {"headers": HTTPHeaderDict(headers)}
body: bytes | str
if fields:
if "body" in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one."
)
if encode_multipart:
body, content_type = encode_multipart_formdata(
fields, boundary=multipart_boundary
)
else:
body, content_type = (
urlencode(fields), # type: ignore[arg-type]
"application/x-www-form-urlencoded",
)
extra_kw["body"] = body
extra_kw["headers"].setdefault("Content-Type", content_type)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw) # type: ignore[no-any-return]
|
(self, method: 'str', url: 'str', fields: '_TYPE_FIELDS | None' = None, headers: 'typing.Mapping[str, str] | None' = None, encode_multipart: 'bool' = True, multipart_boundary: 'str | None' = None, **urlopen_kw: 'typing.Any') -> 'HTTPResponse | ResponsePromise'
|
710,129
|
urllib3_future._request_methods
|
request_encode_url
|
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
|
def request_encode_url(
self,
method: str,
url: str,
fields: _TYPE_ENCODE_URL_FIELDS | None = None,
headers: typing.Mapping[str, str] | None = None,
**urlopen_kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw: dict[str, typing.Any] = {"headers": headers}
extra_kw.update(urlopen_kw)
if fields:
url += "?" + urlencode(fields)
return self.urlopen(method, url, **extra_kw) # type: ignore[no-any-return]
|
(self, method: 'str', url: 'str', fields: '_TYPE_ENCODE_URL_FIELDS | None' = None, headers: 'typing.Mapping[str, str] | None' = None, **urlopen_kw: 'typing.Any') -> 'HTTPResponse | ResponsePromise'
|
710,130
|
urllib3_future.connectionpool
|
urlopen
|
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method
such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param bool preload_content:
If True, the response's body will be preloaded into memory.
:param bool decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of ``preload_content``
which defaults to ``True``.
:param bool chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param on_post_connection:
Callable to be invoked that will inform you of the connection specifications
for the request to be sent. See ``urllib3.ConnectionInfo`` class for more.
:param on_upload_body:
Callable that will be invoked upon body upload in order to be able to track
the progress. The values are expressed in bytes. It is possible that the total isn't
available, thus set to None. In order, arguments are:
(total_sent, total_to_be_sent, completed, any_error)
:param multiplexed:
Dispatch the request in a non-blocking way, this means that the
response will be retrieved in the future with the get_response()
method.
|
def urlopen(
self,
method: str,
url: str,
body: _TYPE_BODY | None = None,
headers: typing.Mapping[str, str] | None = None,
retries: Retry | bool | int | None = None,
redirect: bool = True,
assert_same_host: bool = True,
timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,
pool_timeout: int | None = None,
release_conn: bool | None = None,
chunked: bool = False,
body_pos: _TYPE_BODY_POSITION | None = None,
preload_content: bool = True,
decode_content: bool = True,
on_post_connection: typing.Callable[[ConnectionInfo], None] | None = None,
on_upload_body: typing.Callable[[int, int | None, bool, bool], None]
| None = None,
multiplexed: bool = False,
**response_kw: typing.Any,
) -> HTTPResponse | ResponsePromise:
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method
such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param url:
The URL to perform the request on.
:param body:
Data to send in the request body, either :class:`str`, :class:`bytes`,
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When ``False``, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param bool preload_content:
If True, the response's body will be preloaded into memory.
:param bool decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of ``preload_content``
which defaults to ``True``.
:param bool chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param on_post_connection:
Callable to be invoked that will inform you of the connection specifications
for the request to be sent. See ``urllib3.ConnectionInfo`` class for more.
:param on_upload_body:
Callable that will be invoked upon body upload in order to be able to track
the progress. The values are expressed in bytes. It is possible that the total isn't
available, thus set to None. In order, arguments are:
(total_sent, total_to_be_sent, completed, any_error)
:param multiplexed:
Dispatch the request in a non-blocking way, this means that the
response will be retrieved in the future with the get_response()
method.
"""
parsed_url = parse_url(url)
destination_scheme = parsed_url.scheme
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = preload_content
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = to_str(_encode_target(url))
else:
url = to_str(parsed_url.url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
http_tunnel_required = connection_requires_http_tunnel(
self.proxy, self.proxy_config, destination_scheme
)
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
# have to copy the headers dict so we can safely change it without those
# changes being reflected in anyone else's copy.
if not http_tunnel_required:
headers = headers.copy() # type: ignore[attr-defined]
headers.update(self.proxy_headers) # type: ignore[union-attr]
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout, heb_timeout=timeout_obj)
conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment]
# Is this a closed/new connection that requires CONNECT tunnelling?
if self.proxy is not None and http_tunnel_required and conn.is_closed:
try:
self._prepare_proxy(conn)
except (BaseSSLError, OSError, SocketTimeout) as e:
self._raise_timeout(
err=e, url=self.proxy.url, timeout_value=conn.timeout
)
raise
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Make the request on the HTTPConnection object
response = self._make_request( # type: ignore[call-overload,misc]
conn,
method,
url,
body=body,
headers=headers,
retries=retries,
timeout=timeout_obj,
chunked=chunked,
response_conn=response_conn,
preload_content=preload_content,
decode_content=decode_content,
enforce_content_length=True,
on_post_connection=on_post_connection,
on_upload_body=on_upload_body,
multiplexed=multiplexed,
)
# it was established a non-multiplexed connection. fallback to original behavior.
if not isinstance(response, ResponsePromise):
multiplexed = False
if multiplexed:
response.set_parameter("method", method)
response.set_parameter("url", url)
response.set_parameter("body", body)
response.set_parameter("headers", headers)
response.set_parameter("retries", retries)
response.set_parameter("preload_content", preload_content)
response.set_parameter("decode_content", decode_content)
response.set_parameter("timeout", timeout_obj)
response.set_parameter("redirect", redirect)
response.set_parameter("response_kw", response_kw)
response.set_parameter("pool_timeout", pool_timeout)
response.set_parameter("assert_same_host", assert_same_host)
response.set_parameter("chunked", chunked)
response.set_parameter("body_pos", body_pos)
release_this_conn = True if not conn.is_saturated else False
# Everything went great!
clean_exit = True
except EmptyPoolError:
# Didn't get a connection from the pool, no need to clean up
clean_exit = True
release_this_conn = False
raise
except (
TimeoutError,
OSError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
ProxyError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
new_e: Exception = e
if isinstance(e, (BaseSSLError, CertificateError)):
new_e = SSLError(e)
if isinstance(
new_e,
(
OSError,
NewConnectionError,
TimeoutError,
SSLError,
),
) and (conn and conn.proxy and not conn.has_connected_to_proxy):
new_e = _wrap_proxy_error(new_e, conn.proxy.scheme)
elif isinstance(new_e, OSError):
new_e = ProtocolError("Connection aborted.", new_e)
retries = retries.increment(
method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
if conn:
conn.close()
conn = None
release_this_conn = True
elif conn and conn.is_multiplexed is True:
# multiplexing allows us to issue more requests.
release_this_conn = True
if release_this_conn is True and conn is not None:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if (
clean_exit
and isinstance(response, ResponsePromise)
and self.pool is not None
):
self.pool.memorize(response, conn)
elif release_this_conn is True and self.pool is not None:
self.pool.kill_cursor()
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen( # type: ignore[no-any-return,call-overload,misc]
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=multiplexed,
**response_kw,
)
if multiplexed:
assert isinstance(response, ResponsePromise)
return response # actually a response promise!
assert isinstance(response, HTTPResponse)
if redirect and response.get_redirect_location():
# Handle redirect?
redirect_location = response.get_redirect_location()
if response.status == 303:
method = "GET"
body = None
headers = HTTPHeaderDict(headers)
for should_be_removed_header in NOT_FORWARDABLE_HEADERS:
headers.discard(should_be_removed_header)
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen( # type: ignore[call-overload,no-any-return,misc]
method,
redirect_location,
body=body,
headers=headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=False,
**response_kw,
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.headers.get("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
preload_content=preload_content,
decode_content=decode_content,
multiplexed=False,
**response_kw,
)
return response
|
(self, method: str, url: str, body: Union[bytes, IO[Any], Iterable[bytes], Iterable[str], str, urllib3_future.backend._base.LowLevelResponse, urllib3_future.backend._async._base.AsyncLowLevelResponse, NoneType] = None, headers: Optional[Mapping[str, str]] = None, retries: Union[urllib3_future.util.retry.Retry, bool, int, NoneType] = None, redirect: bool = True, assert_same_host: bool = True, timeout: Union[float, urllib3_future.util.timeout._TYPE_DEFAULT, urllib3_future.util.timeout.Timeout, NoneType] = <_TYPE_DEFAULT.token: -1>, pool_timeout: Optional[int] = None, release_conn: Optional[bool] = None, chunked: bool = False, body_pos: Union[int, urllib3_future.util.request._TYPE_FAILEDTELL, NoneType] = None, preload_content: bool = True, decode_content: bool = True, on_post_connection: Optional[Callable[[urllib3_future.backend._base.ConnectionInfo], NoneType]] = None, on_upload_body: Optional[Callable[[int, int | None, bool, bool], NoneType]] = None, multiplexed: bool = False, **response_kw: Any) -> urllib3_future.response.HTTPResponse | urllib3_future.backend._base.ResponsePromise
|
710,131
|
urllib3_future._collections
|
HTTPHeaderDict
|
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
|
class HTTPHeaderDict(typing.MutableMapping[str, str]):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
_container: typing.MutableMapping[str, list[str]]
def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):
super().__init__()
self._container = {} # 'dict' is insert-ordered in Python 3.7+
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key: str, val: str) -> None:
# avoid a bytes/str comparison by decoding before httplib
self._container[_lower_wrapper(key)] = [key, val]
def __getitem__(self, key: str) -> str:
val = self._container[_lower_wrapper(key)]
return ", ".join(val[1:])
def __delitem__(self, key: str) -> None:
del self._container[_lower_wrapper(key)]
def __contains__(self, key: object) -> bool:
if isinstance(key, str):
return _lower_wrapper(key) in self._container
return False
def setdefault(self, key: str, default: str = "") -> str:
return super().setdefault(key, default)
def __eq__(self, other: object) -> bool:
maybe_constructable = ensure_can_construct_http_header_dict(other)
if maybe_constructable is None:
return False
else:
other_as_http_header_dict = type(self)(maybe_constructable)
return {_lower_wrapper(k): v for k, v in self.itermerged()} == {
_lower_wrapper(k): v for k, v in other_as_http_header_dict.itermerged()
}
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __len__(self) -> int:
return len(self._container)
def __iter__(self) -> typing.Iterator[str]:
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def discard(self, key: str) -> None:
try:
del self[key]
except KeyError:
pass
def add(self, key: str, val: str, *, combine: bool = False) -> None:
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
If this is called with combine=True, instead of adding a new header value
as a distinct item during iteration, this will instead append the value to
any existing header value with a comma. If no existing header value exists
for the key, then the value will simply be added, ignoring the combine parameter.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
>>> list(headers.items())
[('foo', 'bar'), ('foo', 'baz')]
>>> headers.add('foo', 'quz', combine=True)
>>> list(headers.items())
[('foo', 'bar, baz, quz')]
"""
key_lower = _lower_wrapper(key)
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# if there are values here, then there is at least the initial
# key/value pair
if combine:
vals[-1] = vals[-1] + ", " + val
else:
vals.append(val)
def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError(
f"extend() takes at most 1 positional arguments ({len(args)} given)"
)
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, typing.Mapping):
for key, val in other.items():
self.add(key, val)
elif isinstance(other, typing.Iterable):
other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)
for key, value in other:
self.add(key, value)
elif hasattr(other, "keys") and hasattr(other, "__getitem__"):
# THIS IS NOT A TYPESAFE BRANCH
# In this branch, the object has a `keys` attr but is not a Mapping or any of
# the other types indicated in the method signature. We do some stuff with
# it as though it partially implements the Mapping interface, but we're not
# doing that stuff safely AT ALL.
for key in other.keys():
self.add(key, other[key])
for key, value in kwargs.items():
self.add(key, value)
@typing.overload
def getlist(self, key: str) -> list[str]:
...
@typing.overload
def getlist(self, key: str, default: _DT) -> list[str] | _DT:
...
def getlist(
self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed
) -> list[str] | _DT:
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[_lower_wrapper(key)]
except KeyError:
if default is _Sentinel.not_passed:
# _DT is unbound; empty list is instance of List[str]
return []
# _DT is bound; default is instance of _DT
return default
else:
# _DT may or may not be bound; vals[1:] is instance of List[str], which
# meets our external interface requirement of `Union[List[str], _DT]`.
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
# Backwards compatibility for http.cookiejar
get_all = getlist
def __repr__(self) -> str:
return f"{type(self).__name__}({dict(self.itermerged())})"
def _copy_from(self, other: HTTPHeaderDict) -> None:
for key in other:
val = other.getlist(key)
self._container[_lower_wrapper(key)] = [key, *val]
def copy(self) -> HTTPHeaderDict:
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self) -> typing.Iterator[tuple[str, str]]:
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[_lower_wrapper(key)]
for val in vals[1:]:
yield vals[0], val
def itermerged(self) -> typing.Iterator[tuple[str, str]]:
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[_lower_wrapper(key)]
yield val[0], ", ".join(val[1:])
def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]
return HTTPHeaderDictItemView(self)
def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:
if header_name in self:
return potential_value in self._container[_lower_wrapper(header_name)][1:]
return False
|
(headers: 'ValidHTTPHeaderSource | None' = None, **kwargs: 'str')
|
710,132
|
urllib3_future._collections
|
__contains__
| null |
def __contains__(self, key: object) -> bool:
if isinstance(key, str):
return _lower_wrapper(key) in self._container
return False
|
(self, key: object) -> bool
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.