code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
from __future__ import annotations import time from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any from .exceptions import AuthorizationPending, SlowDown from .tokens import BearerToken if TYPE_CHECKING: # pragma: no cover from .client import OAuth2Client class TokenEndpointPoolingJob(ABC): """Base class for Token Endpoint pooling jobs. This is used for decoupled flows like CIBA or Device Authorization. This class must be subclassed to implement actual BackChannel flows. This needs an [OAuth2Client][requests_oauth2client.client.OAuth2Client] that will be used to pool the token endpoint. The initial pooling `interval` is configurable. Args: client: the [OAuth2Client][requests_oauth2client.client.OAuth2Client] that will be used to pool the token endpoint. interval: initial pooling interval, in seconds. If `None`, default to `5`. slow_down_interval: when a [SlowDown][requests_oauth2client.exceptions.SlowDown] is received, this number of seconds will be added to the pooling interval. requests_kwargs: additional parameters for the underlying calls to [requests.request][] **token_kwargs: additional parameters for the token request """ def __init__( self, client: OAuth2Client, interval: int | None = None, slow_down_interval: int = 5, requests_kwargs: dict[str, Any] | None = None, **token_kwargs: Any, ): self.client = client self.interval = interval or 5 self.slow_down_interval = slow_down_interval self.requests_kwargs = requests_kwargs self.token_kwargs = token_kwargs def __call__(self) -> BearerToken | None: """Wrap the actual Token Endpoint call with a pooling interval. Everytime this method is called, it will wait for the entire duration of the pooling interval before calling [token_request()][requests_oauth2client.pooling.TokenEndpointPoolingJob.token_request]. So you can call it immediately after initiating the BackChannel flow, and it will wait before initiating the first call. This implements the logic to handle [AuthorizationPending][requests_oauth2client.exceptions.AuthorizationPending] or [SlowDown][requests_oauth2client.exceptions.SlowDown] requests by the AS. Returns: a [BearerToken][requests_oauth2client.tokens.BearerToken] if the AS returns one, or `None` if the Authorization is still pending. """ time.sleep(self.interval) try: return self.token_request() except SlowDown: self.interval += self.slow_down_interval except AuthorizationPending: pass return None @abstractmethod def token_request(self) -> BearerToken: """Abstract method for the token endpoint call. This must be implemented by subclasses. This method must Must raise [AuthorizationPending][requests_oauth2client.exceptions.AuthorizationPending] to retry after the pooling interval, or [SlowDown][requests_oauth2client.exceptions.SlowDown] to increase the pooling interval by `slow_down_interval` seconds. Returns: a [BearerToken][requests_oauth2client.tokens.BearerToken] """ raise NotImplementedError # pragma: no cover
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/pooling.py
0.936807
0.242441
pooling.py
pypi
from __future__ import annotations from typing import TYPE_CHECKING, Any import requests from typing_extensions import override from .authorization_request import AuthorizationResponse from .device_authorization import DeviceAuthorizationResponse from .exceptions import ExpiredAccessToken from .tokens import BearerToken if TYPE_CHECKING: # pragma: no cover from .client import OAuth2Client class BearerAuth(requests.auth.AuthBase): """An Auth Handler that includes a Bearer Token in API calls, as defined in [RFC6750$2.1]. As a prerequisite to using this `AuthBase`, you have to obtain an access token manually. You most likely don't want to do that by yourself, but instead use an instance of [OAuth2Client][requests_oauth2client.client.OAuth2Client] to do that for you. See the others Auth Handlers in this module, which will automatically obtain access tokens from an OAuth 2.x server. [RFC6750$2.1]: https://datatracker.ietf.org/doc/html/rfc6750#section-2.1 Usage: ```python auth = BearerAuth("my_access_token") resp = requests.get("https://my.api.local/resource", auth=auth) ``` The HTTP request will look like: ``` GET /resource HTTP/1.1 Host: my.api.local Authorization: Bearer my_access_token ``` Args: token: a [BearerToken][requests_oauth2client.tokens.BearerToken] or a string to use as token for this Auth Handler. If `None`, this Auth Handler is a no op. """ def __init__(self, token: str | BearerToken | None = None) -> None: self.token = token # type: ignore[assignment] # until https://github.com/python/mypy/issues/3004 is fixed @property def token(self) -> BearerToken | None: """Return the [BearerToken] that is used for authorization against the API. Returns: the configured [BearerToken][requests_oauth2client.tokens.BearerToken] used with this AuthHandler. """ return self._token @token.setter def token(self, token: str | BearerToken | None) -> None: """Change the access token used with this AuthHandler. Accepts a [BearerToken][requests_oauth2client.tokens.BearerToken] or an access token as `str`. Args: token: an access token to use for this Auth Handler """ if token is not None and not isinstance(token, BearerToken): token = BearerToken(token) self._token = token def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest: """Implement the usage of Bearer Tokens in requests. This will add a properly formatted `Authorization: Bearer <token>` header in the request. If the configured token is a instance of BearerToken with an expires_at attribute, raises [ExpiredAccessToken][requests_oauth2client.exceptions.ExpiredAccessToken] once the access token is expired. Args: request: a [PreparedRequest][requests.PreparedRequest] Returns: a [PreparedRequest][requests.PreparedRequest] with an Access Token added in Authorization Header """ if self.token is None: return request if self.token.is_expired(): raise ExpiredAccessToken(self.token) request.headers["Authorization"] = self.token.authorization_header() return request class BaseOAuth2RenewableTokenAuth(BearerAuth): """Base class for Bearer Token based Auth Handlers, with on obtainable or renewable token. In addition to adding a properly formatted `Authorization` header, this will obtain a new token once the current token is expired. Expiration is detected based on the `expires_in` hint returned by the AS. A configurable `leeway`, in number of seconds, will make sure that a new token is obtained some seconds before the actual expiration is reached. This may help in situations where the client, AS and RS have slightly offset clocks. Args: client: an OAuth2Client token: an initial Access Token, if you have one already. In most cases, leave `None`. leeway: expiration leeway, in number of seconds token_kwargs: additional kwargs to include in token requests """ def __init__( self, client: OAuth2Client, token: None | BearerToken | str = None, leeway: int = 20, **token_kwargs: Any, ) -> None: super().__init__(token) self.client = client self.leeway = leeway self.token_kwargs = token_kwargs def __call__( self, request: requests.PreparedRequest ) -> requests.PreparedRequest: # noqa: D102 token = self.token if token is None or token.is_expired(self.leeway): self.renew_token() return super().__call__(request) def renew_token(self) -> None: """Obtain a new Bearer Token. This should be implemented by subclasses. """ raise NotImplementedError def forget_token(self) -> None: """Forget the current token, forcing a renewal on the next usage of this Auth Handler.""" self.token = None class OAuth2ClientCredentialsAuth(BaseOAuth2RenewableTokenAuth): """An Auth Handler for the Client Credentials grant. This [requests AuthBase][requests.auth.AuthBase] automatically gets Access Tokens from an OAuth 2.0 Token Endpoint with the Client Credentials grant, and will get a new one once the current one is expired. Args: client: the [OAuth2Client][requests_oauth2client.client.OAuth2Client] to use to obtain Access Tokens. **token_kwargs: extra kw parameters to pass to the Token Endpoint. May include `scope`, `resource`, etc. Usage: ```python client = OAuth2Client( token_endpoint="https://my.as.local/token", auth=("client_id", "client_secret") ) oauth2cc = OAuth2ClientCredentialsAuth(client, scope="my_scope") resp = requests.post("https://my.api.local/resource", auth=oauth2cc) ``` """ def renew_token(self) -> None: """Obtain a new token for use within this Auth Handler.""" self.token = self.client.client_credentials(**self.token_kwargs) class OAuth2AccessTokenAuth(BaseOAuth2RenewableTokenAuth): """Authentication Handler for OAuth 2.0 Access Tokens and (optional) Refresh Tokens. This [Requests Auth handler][requests.auth.AuthBase] implementation uses an access token as Bearer token, and can automatically refresh it when expired, if a refresh token is available. Token can be a simple `str` containing a raw access token value, or a [BearerToken][requests_oauth2client.tokens.BearerToken] that can contain a refresh_token. If a refresh_token and an expiration date are available, this Auth Handler will automatically refresh the access token once it is expired. Args: client: the [OAuth2Client][requests_oauth2client.client.OAuth2Client] to use to refresh tokens. token: a access token that has been previously obtained **token_kwargs: additional kwargs to pass to the token endpoint Usage: ```python client = OAuth2Client(token_endpoint="https://my.as.local/token", auth=("client_id", "client_secret")) token = BearerToken( access_token="access_token", expires_in=600, refresh_token="refresh_token") # obtain a BearerToken any way you see fit, including a refresh token oauth2at_auth = OAuth2ClientCredentialsAuth(client, token, scope="my_scope") resp = requests.post("https://my.api.local/resource", auth=oauth2at_auth) ```` """ def renew_token(self) -> None: """Obtain a new token, using the Refresh Token, if available.""" if self.token and self.token.refresh_token and self.client is not None: self.token = self.client.refresh_token( refresh_token=self.token.refresh_token, **self.token_kwargs ) class OAuth2AuthorizationCodeAuth(OAuth2AccessTokenAuth): """Authentication handler for the Authorization Code grant. This [Requests Auth handler][requests.auth.AuthBase] implementation exchanges an Authorization Code for an access token, then automatically refreshes it once it is expired. Args: client: the [OAuth2Client][requests_oauth2client.client.OAuth2Client] to use to obtain Access Tokens. code: an Authorization Code that has been obtained from the AS. **token_kwargs: additional kwargs to pass to the token endpoint Usage: ```python client = OAuth2Client(token_endpoint="https://my.as.local/token", auth=("client_id", "client_secret")) code = "my_code" # you must obtain this code yourself resp = requests.post("https://my.api.local/resource", auth=OAuth2AuthorizationCodeAuth(client, code)) ```` """ def __init__( self, client: OAuth2Client, code: str | AuthorizationResponse, leeway: int = 20, **token_kwargs: Any, ) -> None: super().__init__(client, token=None, leeway=leeway, **token_kwargs) self.code: str | AuthorizationResponse | None = code def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest: """Implement the Authorization Code grant as an Authentication Handler. This exchanges an Authorization Code for an access token and adds it in the request. Args: request: a [PreparedRequest][requests.PreparedRequest] Returns: a [PreparedRequest][requests.PreparedRequest] with an Access Token added in Authorization Header """ token = self.token if token is None or token.is_expired(): self.exchange_code_for_token() return super().__call__(request) def exchange_code_for_token(self) -> None: """Obtain the initial access token with the authorization_code grant.""" if self.code: # pragma: no branch self.token = self.client.authorization_code(code=self.code, **self.token_kwargs) self.code = None class OAuth2ResourceOwnerPasswordAuth(BaseOAuth2RenewableTokenAuth): """Authentication Handler for the [Resource Owner Password Flow](https://www.rfc-editor.org/rfc/rfc6749#section-4.3). This [Requests Auth handler][requests.auth.AuthBase] implementation exchanges the user credentials for an Access Token, then automatically obtains a new one once it is expired. Note that this flow is considered *deprecated*, and the Authorization Code flow should be used whenever possible. Among other bad things, ROPC does not support SSO nor MFA and depends on the user typing its credentials directly inside the application instead of on a dedicated login page, which makes it totally insecure for 3rd party apps. It needs the username and password and an [OAuth2Client][requests_oauth2client.client.OAuth2Client] to be able to get a token from the AS Token Endpoint just before the first request using this Auth Handler is being sent. Args: client: the [OAuth2Client][requests_oauth2client.client.OAuth2Client] to use to obtain Access Tokens. username: the username. password: the user password. leeway: an amount of time, in seconds. **token_kwargs: additional kwargs to pass to the token endpoint. """ def __init__( self, client: OAuth2Client, username: str, password: str, leeway: int = 20, **token_kwargs: Any, ): super().__init__(client=client, leeway=leeway, **token_kwargs) self.username = username self.password = password @override def renew_token(self) -> None: """Exchange the user credentials for an Access Token.""" self.token = self.client.resource_owner_password( username=self.username, password=self.password, **self.token_kwargs, ) class OAuth2DeviceCodeAuth(OAuth2AccessTokenAuth): """Authentication Handler for the [Device Code Flow](https://www.rfc-editor.org/rfc/rfc8628). This [Requests Auth handler][requests.auth.AuthBase] implementation exchanges a Device Code for an Access Token, then automatically refreshes it once it is expired. It needs a Device Code and an [OAuth2Client][requests_oauth2client.client.OAuth2Client] to be able to get a token from the AS Token Endpoint just before the first request using this Auth Handler is being sent. Args: client: the [OAuth2Client][requests_oauth2client.client.OAuth2Client] to use to obtain Access Tokens. device_code: a Device Code obtained from the AS. interval: the interval to use to pool the Token Endpoint, in seconds. expires_in: the lifetime of the token, in seconds. **token_kwargs: additional kwargs to pass to the token endpoint. Usage: ```python client = OAuth2Client(token_endpoint="https://my.as.local/token", auth=("client_id", "client_secret")) device_code = client.device_authorization() auth = OAuth2DeviceCodeAuth(client, device_code) resp = requests.post("https://my.api.local/resource", auth=auth) ```` """ def __init__( self, client: OAuth2Client, device_code: str | DeviceAuthorizationResponse, leeway: int = 20, interval: int = 5, expires_in: int = 360, **token_kwargs: Any, ) -> None: super().__init__(client=client, leeway=leeway, token=None, **token_kwargs) self.device_code: str | DeviceAuthorizationResponse | None = device_code self.interval = interval self.expires_in = expires_in def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest: """Implement the Device Code grant as a request Authentication Handler. This exchanges a Device Code for an access token and adds it in HTTP requests. Args: request: a [requests.PreparedRequest][] Returns: a [requests.PreparedRequest][] with an Access Token added in Authorization Header """ token = self.token if token is None or token.is_expired(): self.exchange_device_code_for_token() return super().__call__(request) def exchange_device_code_for_token(self) -> None: """Exchange the Device Code for an access token. This will poll the Token Endpoint until the user finishes the authorization process. """ from .device_authorization import DeviceAuthorizationPoolingJob if self.device_code: # pragma: no branch pooling_job = DeviceAuthorizationPoolingJob( client=self.client, device_code=self.device_code, interval=self.interval, ) while self.token is None: self.token = pooling_job() self.device_code = None
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/auth.py
0.917659
0.505066
auth.py
pypi
from __future__ import annotations from datetime import datetime, timedelta from functools import wraps from typing import Any, Callable from furl import furl # type: ignore[import] def validate_endpoint_uri( uri: str, https: bool = True, no_fragment: bool = True, path: bool = True ) -> None: """Validate that an URI is suitable as an endpoint URI. It checks: - that the scheme is `https` - that no fragment is included - that a path is present Those check can be individually disabled using the parameters `https`, `no_fragment` and `path`. Args: uri: the uri https: if `True`, check that the uri is https no_fragment: if `True`, check that the uri contains no fragment path: if `True`, check that the uri contains a path component Raises: ValueError: if the supplied url is not suitable """ url = furl(uri) if https and url.scheme != "https": raise ValueError("url must use https") if no_fragment and url.fragment: raise ValueError("url must not contain a fragment") if path and (not url.path or url.path == "/"): raise ValueError("url has no path") def accepts_expires_in(f: Callable[..., Any]) -> Callable[..., Any]: """Decorate methods to handle both `expires_at` and `expires_in`. This decorates methods that accept an `expires_at` datetime parameter, to also allow an `expires_in` parameter in seconds. If supplied, `expires_in` will be converted to a datetime `expires_in` seconds in the future, and passed as `expires_at` in the decorated method. Args: f: the method to decorate, with an `expires_at` parameter Returns: a decorated method that accepts either `expires_in` or `expires_at`. """ @wraps(f) def decorator( *args: Any, expires_in: int | None = None, expires_at: datetime | None = None, **kwargs: Any, ) -> Any: if expires_in is None and expires_at is None: return f(*args, **kwargs) if expires_in and isinstance(expires_in, int) and expires_in >= 1: expires_at = datetime.now() + timedelta(seconds=expires_in) return f(*args, expires_at=expires_at, **kwargs) return decorator
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/utils.py
0.925217
0.485783
utils.py
pypi
from __future__ import annotations import pprint from datetime import datetime, timedelta, timezone from typing import TYPE_CHECKING, Any, Callable import jwskate from binapy import BinaPy from .exceptions import ( ExpiredIdToken, InvalidIdToken, MismatchingAcr, MismatchingAudience, MismatchingAzp, MismatchingIdTokenAlg, MismatchingIssuer, MismatchingNonce, MissingIdToken, ) from .utils import accepts_expires_in if TYPE_CHECKING: from .authorization_request import AuthorizationResponse from .client import OAuth2Client class IdToken(jwskate.SignedJwt): """Represent an ID Token. An ID Token is actually a Signed JWT. If the ID Token is encrypted, it must be decoded beforehand. """ @property def auth_time(self) -> datetime: """The last user authentication time.""" auth_time = self.claims.get("auth_time") if auth_time: return self.timestamp_to_datetime(auth_time) raise AttributeError("This ID Token doesn't have an `auth_time` attribute.") class BearerToken: """Represents a Bearer Token as returned by a Token Endpoint. This is a wrapper around a Bearer Token and associated parameters, such as expiration date and refresh token, as returned by an OAuth 2.x or OIDC 1.0 Token Endpoint. All parameters are as returned by a Token Endpoint. The token expiration date can be passed as datetime in the `expires_at` parameter, or an `expires_in` parameter, as number of seconds in the future, can be passed instead. Args: access_token: an `access_token`, as returned by the AS. expires_at: an expiration date. This method also accepts an `expires_in` hint as returned by the AS, if any. scope: a `scope`, as returned by the AS, if any. refresh_token: a `refresh_token`, as returned by the AS, if any. token_type: a `token_type`, as returned by the AS. id_token: an `id_token`, as returned by the AS, if any. **kwargs: additional parameters as returned by the AS, if any. """ TOKEN_TYPE = "Bearer" @accepts_expires_in def __init__( self, access_token: str, *, expires_at: datetime | None = None, scope: str | None = None, refresh_token: str | None = None, token_type: str = TOKEN_TYPE, id_token: str | None = None, **kwargs: Any, ): if token_type.title() != self.TOKEN_TYPE.title(): raise ValueError(f"Token Type is not '{self.TOKEN_TYPE}'!", token_type) self.access_token = access_token self.expires_at = expires_at self.scope = scope self.refresh_token = refresh_token self.id_token: IdToken | jwskate.JweCompact | None = None if id_token: try: self.id_token = IdToken(id_token) except jwskate.InvalidJwt: try: self.id_token = jwskate.JweCompact(id_token) except jwskate.InvalidJwe: raise InvalidIdToken( "ID Token is invalid because it is neither a JWT or a JWE." ) self.other = kwargs def is_expired(self, leeway: int = 0) -> bool | None: """Check if the access token is expired. Args: leeway: If the token expires in the next given number of seconds, then consider it expired already. Returns: `True` if the access token is expired, `False` if it is still valid, `None` if there is no expires_in hint. """ if self.expires_at: return datetime.now() + timedelta(seconds=leeway) > self.expires_at return None def authorization_header(self) -> str: """Return the appropriate Authorization Header value for this token. The value is formatted correctly according to RFC6750. Returns: the value to use in a HTTP Authorization Header """ return f"Bearer {self.access_token}" def validate_id_token( # noqa: C901 self, client: OAuth2Client, azr: AuthorizationResponse ) -> IdToken: """Validate that a token response is valid, and return the ID Token. This will validate the id_token as described in [OIDC 1.0 $3.1.3.7](https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation). If the ID Token was encrypted, this decrypts it and returns the clear-text ID Token. """ if not self.id_token: raise MissingIdToken() raw_id_token = self.id_token if ( isinstance(raw_id_token, jwskate.JweCompact) and client.id_token_encrypted_response_alg is None ): raise InvalidIdToken("ID Token is encrypted while it should be clear-text", self) elif ( isinstance(raw_id_token, IdToken) and client.id_token_encrypted_response_alg is not None ): raise InvalidIdToken("ID Token is clear-text while it should be encrypted", self) if isinstance(raw_id_token, jwskate.JweCompact): enc_jwk = client.id_token_decryption_key if enc_jwk is None: raise InvalidIdToken( "ID Token is encrypted but client does not have a decryption key", self ) nested_id_token = raw_id_token.decrypt(enc_jwk) id_token = IdToken(nested_id_token) else: id_token = raw_id_token if id_token.get_header("alg") is None and client.id_token_signed_response_alg is None: raise InvalidIdToken( "ID Token does not contain an `alg` parameter to specify the signature algorithm, " "and no algorithm has been configured for the client (using param id_token_signed_response_alg`." ) elif ( client.id_token_signed_response_alg is not None and id_token.alg != client.id_token_signed_response_alg ): raise MismatchingIdTokenAlg(id_token.alg, client.id_token_signed_response_alg) id_token_alg = id_token.alg or client.id_token_signed_response_alg if azr.issuer: if id_token.issuer != azr.issuer: raise MismatchingIssuer(id_token.issuer, azr.issuer, self) if id_token.audiences and client.client_id not in id_token.audiences: raise MismatchingAudience(id_token.audiences, client.client_id, self) if id_token.get_claim("azp") is not None and id_token.azp != client.client_id: raise MismatchingAzp(id_token.azp, client.client_id, self) if id_token.is_expired(): raise ExpiredIdToken(id_token) if azr.nonce: if id_token.nonce != azr.nonce: raise MismatchingNonce() if azr.acr_values: if id_token.acr not in azr.acr_values: raise MismatchingAcr(id_token.acr, azr.acr_values) hash_function: Callable[[str], str] # method used to calculate at_hash, s_hash, etc. if id_token_alg in jwskate.SignatureAlgs.ALL_SYMMETRIC: if not client.client_secret: raise InvalidIdToken( "ID Token is symmetrically signed but this client does not have a Client Secret." ) id_token.verify_signature( jwskate.SymmetricJwk.from_bytes(client.client_secret), alg=id_token_alg ) elif id_token_alg in jwskate.SignatureAlgs.ALL_ASYMMETRIC: if not client.authorization_server_jwks: raise InvalidIdToken( "ID Token is asymmetrically signed but the Authorization Server JWKS is not available." ) if id_token.get_header("kid") is None: raise InvalidIdToken( "ID Token does not contain a Key ID (kid) to specify the asymmetric key to use for signature verification." ) try: verification_jwk = client.authorization_server_jwks.get_jwk_by_kid(id_token.kid) except KeyError: raise InvalidIdToken( "ID Token is asymmetrically signed but its Key ID is not part of the Authorization Server JWKS." ) if id_token_alg not in verification_jwk.supported_signing_algorithms(): raise InvalidIdToken( "ID Token is asymmetrically signed but its algorithm is not supported by the verification key." ) id_token.verify_signature(verification_jwk, alg=id_token_alg) alg_class = jwskate.select_alg_class( verification_jwk.SIGNATURE_ALGORITHMS, jwk_alg=id_token_alg ) if alg_class == jwskate.EdDsa: if verification_jwk.crv == "Ed25519": hash_function = ( lambda token: BinaPy(token).to("sha512")[:32].to("b64u").ascii() ) elif verification_jwk.crv == "Ed448": hash_function = ( lambda token: BinaPy(token).to("shake256", 456).to("b64u").ascii() ) else: hash_alg = alg_class.hashing_alg.name hash_size = alg_class.hashing_alg.digest_size hash_function = ( lambda token: BinaPy(token) .to(hash_alg)[: hash_size // 2] .to("b64u") .ascii() ) at_hash = id_token.get_claim("at_hash") if at_hash is not None: expected_at_hash = hash_function(self.access_token) if expected_at_hash != at_hash: raise InvalidIdToken( f"Mismatching 'at_hash' value: expected '{expected_at_hash}', got '{at_hash}'" ) c_hash = id_token.get_claim("c_hash") if c_hash is not None: expected_c_hash = hash_function(azr.code) if expected_c_hash != c_hash: raise InvalidIdToken( f"Mismatching 'c_hash' value: expected '{expected_c_hash}', got '{c_hash}'" ) s_hash = id_token.get_claim("s_hash") if s_hash is not None: if azr.state is None: raise InvalidIdToken( "ID Token has a 's_hash' claim but no state was included in the request." ) expected_s_hash = hash_function(azr.state) if expected_s_hash != s_hash: raise InvalidIdToken( f"Mismatching 's_hash' value (expected '{expected_s_hash}', got '{s_hash}'" ) if azr.max_age is not None: try: auth_time = id_token.auth_time except AttributeError: raise InvalidIdToken( "A `max_age` parameter was included in the authorization request, " "but the ID Token does not contain an `auth_time` claim." ) auth_age = datetime.now(tz=timezone.utc) - auth_time if auth_age.seconds > azr.max_age + 60: raise InvalidIdToken( "User authentication happened too long ago. " "The `auth_time` parameter from the ID Token indicate that the last Authentication Time " f"was at {auth_time} ({auth_age.seconds} sec ago), but the authorization request `max_age` " f"parameter specified that it must be maximum {azr.max_age} sec ago." ) return id_token def __str__(self) -> str: """Return the access token value, as a string. Returns: the access token string """ return self.access_token def __contains__(self, key: str) -> bool: """Check existence of a key in the token response. Allows testing like `assert "refresh_token" in token_response`. Args: key: a key Returns: `True` if the key exists in the token response, `False` otherwise """ if key == "access_token": return True elif key == "refresh_token": return self.refresh_token is not None elif key == "scope": return self.scope is not None elif key == "token_type": return True elif key == "expires_in": return self.expires_at is not None elif key == "id_token": return self.id_token is not None else: return key in self.other def __getattr__(self, key: str) -> Any: """Return attributes from this BearerToken. Allows `token_response.expires_in` or `token_response.any_custom_attribute`. Args: key: a key Returns: the associated value in this token response Raises: AttributeError: if the attribute is not found in this response. """ if key == "expires_in": if self.expires_at is None: return None return int(self.expires_at.timestamp() - datetime.now().timestamp()) elif key == "token_type": return self.TOKEN_TYPE return self.other.get(key) or super().__getattribute__(key) def as_dict(self, expires_at: bool = False) -> dict[str, Any]: """Return all attributes from this BearerToken as a `dict`. Args: expires_at: if `True`, the dict will contain an extra `expires_at` field with the token expiration date. Returns a `dict` containing this BearerToken attributes. """ r: dict[str, Any] = { "access_token": self.access_token, "token_type": self.TOKEN_TYPE, } if self.expires_at: r["expires_in"] = self.expires_in if expires_at: r["expires_at"] = self.expires_at if self.scope: r["scope"] = self.scope if self.refresh_token: r["refresh_token"] = self.refresh_token if self.id_token: r["id_token"] = str(self.id_token) if self.other: r.update(self.other) return r def __repr__(self) -> str: """Return a representation of this BearerToken. This representation is a pretty formatted `dict` that looks like a Token Endpoint response. Returns: a `str` representation of this BearerToken. """ return pprint.pformat(self.as_dict()) def __eq__(self, other: object) -> bool: """Check if this BearerToken is equal to another. It supports comparison with another BearerToken, or with an `access_token` as `str`. Args: other: another token to compare to Returns: `True` if equal, `False` otherwise """ if isinstance(other, BearerToken): return ( self.access_token == other.access_token and self.refresh_token == other.refresh_token and self.expires_at == other.expires_at and self.token_type == other.token_type ) elif isinstance(other, str): return self.access_token == other return super().__eq__(other) class BearerTokenSerializer: """A helper class to serialize Token Response returned by an AS. This may be used to store BearerTokens in session or cookies. It needs a `dumper` and a `loader` functions that will respectively serialize and deserialize BearerTokens. Default implementations are provided with use gzip and base64url on the serialized JSON representation. Args: dumper: a function to serialize a token into a `str`. loader: a function do deserialize a serialized token representation. """ def __init__( self, dumper: Callable[[BearerToken], str] | None = None, loader: Callable[[str], BearerToken] | None = None, ): self.dumper = dumper or self.default_dumper self.loader = loader or self.default_loader @staticmethod def default_dumper(token: BearerToken) -> str: """Serialize a token as JSON, then compress with deflate, then encodes as base64url. Args: token: the `BearerToken` to serialize Returns: the serialized value """ return BinaPy.serialize_to("json", token.as_dict(True)).to("deflate").to("b64u").ascii() def default_loader( self, serialized: str, token_class: type[BearerToken] = BearerToken ) -> BearerToken: """Deserialize a BearerToken. This does the opposite operations than `default_dumper`. Args: serialized: the serialized token Returns: a BearerToken """ attrs = BinaPy(serialized).decode_from("b64u").decode_from("deflate").parse_from("json") expires_at = attrs.get("expires_at") if expires_at: attrs["expires_at"] = datetime.fromtimestamp(expires_at) return token_class(**attrs) def dumps(self, token: BearerToken) -> str: """Serialize and compress a given token for easier storage. Args: token: a BearerToken to serialize Returns: the serialized token, as a str """ return self.dumper(token) def loads(self, serialized: str) -> BearerToken: """Deserialize a serialized token. Args: serialized: the serialized token Returns: the deserialized token """ return self.loader(serialized)
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/tokens.py
0.873147
0.167525
tokens.py
pypi
from __future__ import annotations from datetime import datetime, timedelta from typing import TYPE_CHECKING, Any from .pooling import TokenEndpointPoolingJob from .tokens import BearerToken from .utils import accepts_expires_in if TYPE_CHECKING: # pragma: no cover from .client import OAuth2Client class DeviceAuthorizationResponse: """Represent a response returned by the device Authorization Endpoint. All parameters are those returned by the AS as response to a Device Authorization Request. Args: device_code: the `device_code` as returned by the AS. user_code: the `device_code` as returned by the AS. verification_uri: the `device_code` as returned by the AS. verification_uri_complete: the `device_code` as returned by the AS. expires_at: the expiration date for the device_code. Also accepts an `expires_in` parameter, as a number of seconds in the future. interval: the pooling `interval` as returned by the AS. **kwargs: additional parameters as returned by the AS. """ @accepts_expires_in def __init__( self, device_code: str, user_code: str, verification_uri: str, verification_uri_complete: str | None = None, expires_at: datetime | None = None, interval: int | None = None, **kwargs: Any, ): self.device_code = device_code self.user_code = user_code self.verification_uri = verification_uri self.verification_uri_complete = verification_uri_complete self.expires_at = expires_at self.interval = interval self.other = kwargs def is_expired(self, leeway: int = 0) -> bool | None: """Check if the `device_code` within this response is expired. Returns: `True` if the device_code is expired, `False` if it is still valid, `None` if there is no `expires_in` hint. """ if self.expires_at: return datetime.now() - timedelta(seconds=leeway) > self.expires_at return None class DeviceAuthorizationPoolingJob(TokenEndpointPoolingJob): """A Token Endpoint pooling job for the Device Authorization Flow. This periodically checks if the user has finished with his authorization in a Device Authorization flow. Args: client: an OAuth2Client that will be used to pool the token endpoint. device_code: a `device_code` as `str` or a `DeviceAuthorizationResponse`. interval: The pooling interval to use. This overrides the one in `auth_req_id` if it is a `BackChannelAuthenticationResponse`. slow_down_interval: Number of seconds to add to the pooling interval when the AS returns a slow down request. requests_kwargs: Additional parameters for the underlying calls to [requests.request][]. **token_kwargs: Additional parameters for the token request. Usage: ```python client = OAuth2Client( token_endpoint="https://my.as.local/token", auth=("client_id", "client_secret") ) pool_job = DeviceAuthorizationPoolingJob(client=client, device_code="my_device_code") token = None while token is None: token = pool_job() ``` """ def __init__( self, client: OAuth2Client, device_code: str | DeviceAuthorizationResponse, interval: int | None = None, slow_down_interval: int = 5, requests_kwargs: dict[str, Any] | None = None, **token_kwargs: Any, ): super().__init__( client=client, interval=interval, slow_down_interval=slow_down_interval, requests_kwargs=requests_kwargs, **token_kwargs, ) self.device_code = device_code def token_request(self) -> BearerToken: """Implement the Device Code token request. This actually calls [OAuth2Client.device_code(device_code)] on `client`. Returns: a [BearerToken][requests_oauth2client.tokens.BearerToken] """ return self.client.device_code( self.device_code, requests_kwargs=self.requests_kwargs, **self.token_kwargs )
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/device_authorization.py
0.953112
0.39065
device_authorization.py
pypi
from __future__ import annotations from typing import TYPE_CHECKING from jwskate import InvalidJwt if TYPE_CHECKING: import requests class OAuth2Error(Exception): """Base class for Exceptions raised when a backend endpoint returns an error. Args: response: the HTTP response containing the error """ def __init__(self, response: requests.Response): self.response = response @property def request(self) -> requests.PreparedRequest: """The request leading to the error.""" return self.response.request class EndpointError(OAuth2Error): """Base class for exceptions raised from backend endpoint errors. This contains the error message, description and uri that are returned by the AS in the OAuth 2.0 standardised way. Args: response: the raw requests.PreparedResponse containing the error. error: the `error` identifier as returned by the AS. description: the `error_description` as returned by the AS. uri: the `error_uri` as returned by the AS. """ def __init__( self, response: requests.Response, error: str, description: str | None = None, uri: str | None = None, ): super().__init__(response) self.error = error self.description = description self.uri = uri class InvalidTokenResponse(OAuth2Error): """Raised when the Token Endpoint returns a non-standard response.""" class ExpiredAccessToken(RuntimeError): """Raised when an expired access token is used.""" class UnknownTokenEndpointError(EndpointError): """Raised when an otherwise unknown error is returned by the token endpoint.""" class ServerError(EndpointError): """Raised when the token endpoint returns `error = server_error`.""" class TokenEndpointError(EndpointError): """Base class for errors that are specific to the token endpoint.""" class InvalidRequest(TokenEndpointError): """Raised when the Token Endpoint returns `error = invalid_request`.""" class InvalidClient(TokenEndpointError): """Raised when the Token Endpoint returns `error = invalid_client`.""" class InvalidScope(TokenEndpointError): """Raised when the Token Endpoint returns `error = invalid_scope`.""" class InvalidTarget(TokenEndpointError): """Raised when the Token Endpoint returns `error = invalid_target`.""" class InvalidGrant(TokenEndpointError): """Raised when the Token Endpoint returns `error = invalid_grant`.""" class AccessDenied(EndpointError): """Raised when the Authorization Server returns `error = access_denied`.""" class UnauthorizedClient(EndpointError): """Raised when the Authorization Server returns `error = unauthorized_client`.""" class RevocationError(EndpointError): """Base class for Revocation Endpoint errors.""" class UnsupportedTokenType(RevocationError): """Raised when the Revocation endpoint returns `error = unsupported_token_type`.""" class IntrospectionError(EndpointError): """Base class for Introspection Endpoint errors.""" class UnknownIntrospectionError(OAuth2Error): """Raised when the Introspection Endpoint returns a non-standard error.""" class DeviceAuthorizationError(EndpointError): """Base class for Device Authorization Endpoint errors.""" class AuthorizationPending(TokenEndpointError): """Raised when the Token Endpoint returns `error = authorization_pending`.""" class SlowDown(TokenEndpointError): """Raised when the Token Endpoint returns `error = slow_down`.""" class ExpiredToken(TokenEndpointError): """Raised when the Token Endpoint returns `error = expired_token`.""" class InvalidDeviceAuthorizationResponse(OAuth2Error): """Raised when the Device Authorization Endpoint returns a non-standard error response.""" class InvalidIdToken(InvalidJwt): """Raised when trying to validate an invalid Id Token value.""" class AuthorizationResponseError(Exception): """Base class for error responses returned by the Authorization endpoint. An `AuthorizationResponseError` contains the error message, description and uri that are returned by the AS. Args: error: the `error` identifier as returned by the AS description: the `error_description` as returned by the AS uri: the `error_uri` as returned by the AS """ def __init__(self, error: str, description: str | None = None, uri: str | None = None): self.error = error self.description = description self.uri = uri class InteractionRequired(AuthorizationResponseError): """Raised when the Authorization Endpoint returns `error = interaction_required`.""" class LoginRequired(InteractionRequired): """Raised when the Authorization Endpoint returns `error = login_required`.""" class AccountSelectionRequired(InteractionRequired): """Raised when the Authorization Endpoint returns `error = account_selection_required`.""" class SessionSelectionRequired(InteractionRequired): """Raised when the Authorization Endpoint returns `error = session_selection_required`.""" class ConsentRequired(InteractionRequired): """Raised when the Authorization Endpoint returns `error = consent_required`.""" class InvalidAuthResponse(Exception): """Raised when the Authorization Endpoint returns an invalid response.""" class MissingAuthCode(InvalidAuthResponse): """Raised when the Authorization Endpoint does not return the mandatory `code`. This happens when the Authorization Endpoint does not return an error, but does not return an authorization `code` either. """ class MissingIssuer(InvalidAuthResponse): """Raised when the Authorization Endpoint does not return an `iss` parameter as expected. The Authorization Server advertises its support with a flag `authorization_response_iss_parameter_supported` in its discovery document. If it is set to `true`, it must include an `iss` parameter in its authorization responses, containing its issuer identifier. """ class MissingIdToken(InvalidAuthResponse): """Raised when the Authorization Endpoint does not return a mandatory ID Token. This happens when the Authorization Endpoint does not return an error, but does not return an ID Token either. """ class MismatchingState(InvalidAuthResponse): """Raised on mismatching `state` value. This happens when the Authorization Endpoints returns a 'state' parameter that doesn't match the value passed in the Authorization Request. """ class MismatchingIssuer(InvalidAuthResponse): """Raised on mismatching `iss` value. This happens when the Authorization Endpoints returns an 'iss' that doesn't match the expected value. """ class MismatchingNonce(InvalidIdToken): """Raised on mismatching `nonce` value in an ID Token. This happens when the authorization request includes a `nonce` but the returned ID Token include a different value. """ class MismatchingAcr(InvalidIdToken): """Raised when the returned ID Token doesn't contain one of the requested ACR Values. This happens when the authorization request includes an `acr_values` parameter but the returned ID Token includes a different value. """ class MismatchingAudience(InvalidIdToken): """Raised when the ID Token audience does not include the requesting Client ID.""" class MismatchingAzp(InvalidIdToken): """Raised when the ID Token Authorized Presenter (azp) claim is not the Client ID.""" class MismatchingIdTokenAlg(InvalidIdToken): """Raised when the returned ID Token is signed with an unexpected alg.""" class ExpiredIdToken(InvalidIdToken): """Raised when the returned ID Token is expired.""" class BackChannelAuthenticationError(EndpointError): """Base class for errors returned by the BackChannel Authentication endpoint.""" class InvalidBackChannelAuthenticationResponse(OAuth2Error): """Raised when the BackChannel Authentication endpoint returns a non-standard response.""" class InvalidPushedAuthorizationResponse(OAuth2Error): """Raised when the Pushed Authorization Endpoint returns an error."""
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/exceptions.py
0.984906
0.369599
exceptions.py
pypi
from __future__ import annotations from typing import Any from flask import session from ..auth import OAuth2ClientCredentialsAuth from ..client import OAuth2Client from ..tokens import BearerToken, BearerTokenSerializer class FlaskSessionAuthMixin: """A Mixin for auth handlers to store their tokens in Flask session. Storing tokens in Flask session does ensure that each user of a Flask application has a different access token, and that tokens used for backend API access will be persisted between multiple requests to the front-end Flask app. Args: session_key: the key that will be used to store the access token in session. serializer: the serializer that will be used to store the access token in session. """ def __init__( self, session_key: str, serializer: BearerTokenSerializer | None = None, *args: Any, **kwargs: Any, ) -> None: super().__init__(*args, **kwargs) self.serializer = serializer or BearerTokenSerializer() self.session_key = session_key @property def token(self) -> BearerToken | None: """Return the Access Token stored in session. Returns: The current `BearerToken` for this session, if any. """ serialized_token = session.get(self.session_key) if serialized_token is None: return None return self.serializer.loads(serialized_token) @token.setter def token(self, token: BearerToken | str | None) -> None: """Store an Access Token in session. Args: token: the token to store """ if isinstance(token, str): token = BearerToken(token) # pragma: no cover if token: serialized_token = self.serializer.dumps(token) session[self.session_key] = serialized_token elif session and self.session_key in session: session.pop(self.session_key, None) class FlaskOAuth2ClientCredentialsAuth(FlaskSessionAuthMixin, OAuth2ClientCredentialsAuth): """A `requests` Auth handler for CC grant that stores its token in Flask session. It will automatically get Access Tokens from an OAuth 2.x AS with the Client Credentials grant (and can get a new one once the first one is expired), and stores the retrieved token, serialized in Flask `session`, so that each user has a different access token. Args: client: an OAuth2Client that will be used to retrieve tokens. session_key: the key that will be used to store the access token in Flask session serializer: a serializer that will be used to serialize the access token in Flask session **token_kwargs: additional kwargs for the Token Request """
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/flask/auth.py
0.9277
0.182681
auth.py
pypi
from __future__ import annotations from typing import Any import requests from requests_oauth2client import ApiClient, OAuth2Client, OAuth2ClientCredentialsAuth class Auth0Client(OAuth2Client): """An OAuth2Client for an Auth0 tenant. Instead of providing each endpoint URL separately, you only have to provide a tenant name and all endpoints will be initialized to work with your tenant. Args: tenant: the tenant name or FQDN. If it doesn't contain a `.` or it ends with `.eu`, `.us`, or `.au`, then `.auth0.com` will automatically be suffixed to the provided tenant name. auth: the client credentials, same definition as for [OAuth2Client][requests_oauth2client.client.OAuth2Client] session: the session to use, same definition as for [OAuth2Client][requests_oauth2client.client.OAuth2Client] """ def __init__( self, tenant: str, auth: requests.auth.AuthBase | tuple[str, str] | str | None = None, client_id: str | None = None, client_secret: str | None = None, session: requests.Session | None = None, ): if ( "." not in tenant or tenant.endswith(".eu") or tenant.endswith(".us") or tenant.endswith(".au") or tenant.endswith(".jp") ): tenant = f"{tenant}.auth0.com" if "://" in tenant and not tenant.startswith("https://"): raise ValueError( "Invalid tenant name. It must be a tenant name like 'mytenant.myregion' or a full issuer like 'https://mytenant.myregion.auth0.com'." ) self.tenant = tenant token_endpoint = f"https://{tenant}/oauth/token" revocation_endpoint = f"https://{tenant}/oauth/revoke" userinfo_endpoint = f"https://{tenant}/userinfo" jwks_uri = f"https://{tenant}/.well-known/jwks.json" super().__init__( token_endpoint=token_endpoint, revocation_endpoint=revocation_endpoint, userinfo_endpoint=userinfo_endpoint, jwks_uri=jwks_uri, auth=auth, client_id=client_id, client_secret=client_secret, session=session, ) class Auth0ManagementApiClient(ApiClient): """A wrapper around the Auth0 Management API. See [Auth0 Management API v2](https://auth0.com/docs/api/management/v2). You must provide the target tenant name and the credentials for a client that is allowed access to the Management API. Args: tenant: the tenant name. Same definition as for [Auth0Client][requests_oauth2client.vendor_specific.auth0.Auth0Client] auth: client credentials. Same definition as for [OAuth2Client][requests_oauth2client.client.OAuth2Client] session: requests session. Same definition as for [OAuth2Client][requests_oauth2client.client.OAuth2Client] **kwargs: additional kwargs to pass to the ApiClient base class Usage: ```python a0mgmt = Auth0ManagementApiClient("mytenant.eu", (client_id, client_secret)) users = a0mgmt.get("users", params={"page": 0, "per_page": 100}) ``` """ def __init__( self, tenant: str, auth: requests.auth.AuthBase | tuple[str, str] | str | None = None, client_id: str | None = None, client_secret: str | None = None, session: requests.Session | None = None, **kwargs: Any, ): client = Auth0Client( tenant, auth=auth, client_id=client_id, client_secret=client_secret, session=session ) audience = f"https://{client.tenant}/api/v2/" api_auth = OAuth2ClientCredentialsAuth(client, audience=audience) super().__init__( base_url=audience, auth=api_auth, session=session, **kwargs, )
/requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/vendor_specific/auth0.py
0.891487
0.19594
auth0.py
pypi
import io import time import webbrowser from typing import List, Optional import qrcode # type: ignore import requests from requests_oauthlib import OAuth2Session # type: ignore from ..exceptions import AuthFlowError from ..types import Plugin from ..utils import ServerDetails, make_scope from .utils import refresh_expired, scope_mismatch def _make_qr(msg: str) -> str: qr = qrcode.QRCode() qr.add_data(msg) f = io.StringIO() qr.print_ascii(out=f) f.seek(0) return f.read() def _prompt_user(partial_url: str, user_code: str, full_url: str) -> None: webbrowser.open(full_url) # Stole this prompt from AWS SSO print( f"""\ Attempting to automatically open the SSO authorization page in your default browser. If the browser does not open or you wish to use a different device to authorize this request, open the following URL: {partial_url} Then enter the code: {user_code}""" ) print(_make_qr(full_url)) def _poll_for_token( expires_in: float, interval: float, device_code: str, client_id: str, token_url: str ) -> dict: start = time.time() while (time.time() - start) < expires_in: # Sleep at start so we don't hit the server like, 30ms after we begin the process time.sleep(interval) res = requests.post( token_url, data={ "grant_type": "urn:ietf:params:oauth:grant-type:device_code", "device_code": device_code, "client_id": client_id, }, ) if res.ok: break if res.status_code > 500: raise AuthFlowError("Server error talking to OIDC server") try: # Can fail if we didn't get back JSON data = res.json() except: res.raise_for_status() if data["error"] == "authorization_pending": continue elif data["error"] == "slow_down": # Should never get this, we're waiting correctly continue elif data["error"] == "expired_token": raise AuthFlowError("Device code timed out") elif data["error"] == "invalid_grant": raise AuthFlowError("Device code timed out or was invalid") elif data["error"] == "access_denied": raise AuthFlowError("Idiot") else: res.raise_for_status() continue res.raise_for_status() return res.json() def device_code_flow( urls: ServerDetails, client_id: str, scope: List[str], aud: str ) -> dict: res = requests.post( urls.device_url, data={"client_id": client_id, "scope": make_scope(scope), "audience": aud}, ) res.raise_for_status() data = res.json() _prompt_user( data["verification_uri"], data["user_code"], data["verification_uri_complete"] ) token = _poll_for_token( data["expires_in"], data["interval"], data["device_code"], client_id, urls.token_url, ) if token["expires_in"]: token["expires_at"] = time.time() + token["expires_in"] return token def make_device_code_session( oidc_url: str, client_id: str, audience: str, token: Optional[dict] = None, scope: Optional[List[str]] = None, *, klass=OAuth2Session, plugin: Optional[Plugin] = None, **kwargs, ): auth_server = ServerDetails.discover(oidc_url) scope = make_scope(scope) def updater(token: dict) -> None: if plugin: plugin.update(token) token = None if plugin is None else plugin.load() if token is None or refresh_expired(token, margin=15) or scope_mismatch(token, scope): token = device_code_flow(auth_server, client_id, scope, audience) updater(token) session = klass( auto_refresh_url=auth_server.token_url, auto_refresh_kwargs={"client_id": client_id}, token=token, token_updater=updater, **kwargs, ) return session
/requests_oidc-0.6.2-py3-none-any.whl/requests_oidc/flows/device_code.py
0.665519
0.184143
device_code.py
pypi
from __future__ import print_function import base64 import json import os from getpass import getpass import yaml from cryptography.hazmat.primitives.serialization import load_pem_public_key from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 try: from urllib import urlopen except ImportError: from urllib.request import urlopen GITHUB_REPO = 'otetz/requests_proxy_adapter' TRAVIS_CONFIG_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), '.travis.yml') def load_key(pubkey): """Load public RSA key. Work around keys with incorrect header/footer format. Read more about RSA encryption with cryptography: https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/ """ try: return load_pem_public_key(pubkey.encode(), default_backend()) except ValueError: # workaround for https://github.com/travis-ci/travis-api/issues/196 pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END') return load_pem_public_key(pubkey.encode(), default_backend()) def encrypt(pubkey, password): """Encrypt password using given RSA public key and encode it with base64. The encrypted password can only be decrypted by someone with the private key (in this case, only Travis). """ key = load_key(pubkey) encrypted_password = key.encrypt(password, PKCS1v15()) return base64.b64encode(encrypted_password) def fetch_public_key(repo): """Download RSA public key Travis will use for this repo. Travis API docs: http://docs.travis-ci.com/api/#repository-keys """ keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo) data = json.loads(urlopen(keyurl).read().decode()) if 'key' not in data: errmsg = "Could not find public key for repo: {}.\n".format(repo) errmsg += "Have you already added your GitHub repo to Travis?" raise ValueError(errmsg) return data['key'] def prepend_line(filepath, line): """Rewrite a file adding a line to its beginning.""" with open(filepath) as f: lines = f.readlines() lines.insert(0, line) with open(filepath, 'w') as f: f.writelines(lines) def load_yaml_config(filepath): """Load yaml config file at the given path.""" with open(filepath) as f: return yaml.load(f) def save_yaml_config(filepath, config): """Save yaml config file at the given path.""" with open(filepath, 'w') as f: yaml.dump(config, f, default_flow_style=False) def update_travis_deploy_password(encrypted_password): """Put `encrypted_password` into the deploy section of .travis.yml.""" config = load_yaml_config(TRAVIS_CONFIG_FILE) config['deploy']['password'] = dict(secure=encrypted_password) save_yaml_config(TRAVIS_CONFIG_FILE, config) line = ('# This file was autogenerated and will overwrite' ' each time you run travis_pypi_setup.py\n') prepend_line(TRAVIS_CONFIG_FILE, line) def main(args): """Add a PyPI password to .travis.yml so that Travis can deploy to PyPI. Fetch the Travis public key for the repo, and encrypt the PyPI password with it before adding, so that only Travis can decrypt and use the PyPI password. """ public_key = fetch_public_key(args.repo) password = args.password or getpass('PyPI password: ') update_travis_deploy_password(encrypt(public_key, password.encode())) print("Wrote encrypted password to .travis.yml -- you're ready to deploy") if '__main__' == __name__: import argparse parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--repo', default=GITHUB_REPO, help='GitHub repo (default: %s)' % GITHUB_REPO) parser.add_argument('--password', help='PyPI password (will prompt if not provided)') args = parser.parse_args() main(args)
/requests_proxy_adapter-0.1.1.tar.gz/requests_proxy_adapter-0.1.1/travis_pypi_setup.py
0.611498
0.162214
travis_pypi_setup.py
pypi
import time from requests import PreparedRequest # noqa: F401 from requests.adapters import HTTPAdapter from requests.exceptions import ProxyError from urllib3 import Retry, HTTPResponse # noqa: F401 from .exceptions import ( PrivoxyError4Retry, ForwardingFailedError, NoServerDataError, ConnectionTimeoutError, PrivoxyError, ) class PrivoxyAdapter(HTTPAdapter): """ The transport adapter for Requests to use Privoxy proxy-server with retries when backend errors occurred. Implements Requests's :class:`HTTPAdapter` API. If privoxy backend raises `500 Internal Privoxy Error` in suitable cases make `retries` number of internal retries with delay of `retry_wait` seconds. :param str proxy_url: Complete URL-address of Privoxy proxy instance (scheme, host & port). :param int retry_wait: (optional) Waiting in seconds before next retry if backend raise specified errors. Default 1 second. :param int retries: (optional) Maximum number of retries. Default 3 times. :param kwargs: (optional) Arbitrary keyword arguments for parent class :class:`HTTPAdapter`. """ __attrs__ = HTTPAdapter.__attrs__ + ['_proxies', 'retry_wait', 'proxy_url', '_retries', '_count'] def __init__(self, proxy_url, retry_wait=1, retries=3, **kwargs): self.proxy_url = proxy_url self._proxies = { 'http': proxy_url, 'https': proxy_url, } self.retry_wait = retry_wait self._retries = retries self._count = 0 super(PrivoxyAdapter, self).__init__(**kwargs) def send(self, *args, **kwargs): """ Sends :class:`PreparedRequest` object. Returns Response object. Replace `proxies` in kwargs if present with adapter-initialised value. :param args: Variable length argument list. :param kwargs: Arbitrary keyword arguments. :return: Response object :rtype: :class:`Response` """ kwargs.pop('proxies', None) self._count += 1 try: res = super(PrivoxyAdapter, self).send(proxies=self._proxies, *args, **kwargs) except PrivoxyError4Retry: if self._count == self._retries: raise PrivoxyError("Too many retries: %d" % self._count) time.sleep(self.retry_wait) return self.send(*args, **kwargs) return res def build_response(self, req, resp): """ Builds a :class:`Response <requests.Response>` object from a urllib3 response. Build response are doing by parent class :class:`HTTPAdapter`. This code detect 500 in response status code and search in text of response specific strings. :param PreparedRequest req: The :class:`PreparedRequest` used to generate the response. :param HTTPResponse resp: The urllib3 :class:`HTTPResponse` object. :return: :class:`Response <requests.Response>` object :rtype: :class:`Response <requests.Response>` """ r = super(PrivoxyAdapter, self).build_response(req, resp) if r.status_code == 500 and r.text is not None and '500 Internal Privoxy Error' in r.text: if '<code>forwarding-failed</code>' in r.text: raise ForwardingFailedError if '<code>no-server-data</code>' in r.text: # pragma: no cover raise NoServerDataError if '<code>connection-timeout</code>' in r.text: # pragma: no cover raise ConnectionTimeoutError if '<title>500 Internal Privoxy Error</title>' in r.text: # pragma: no cover raise PrivoxyError(str(r.text)) raise ProxyError(r.text) # pragma: no cover return r def __repr__(self): return '<{class_name} url:{url} retry wait:{wait} s>'.format(class_name=self.__class__.__name__, url=self.proxy_url, wait=self.retry_wait) class RetryPrivoxyAdapter(PrivoxyAdapter): """ The transport adapter for Requests to use Privoxy proxy-server with retries when backend errors occurred and retries if errors occured on target site by :class:`Retry` module. Implements Requests's :class:`HTTPAdapter` API. Extend class :class:`PrivoxyAdapter` :param int retries: Total number of retries to allow. Takes precedence over other counts. :param float backoff_factor: A backoff factor to apply between attempts after the second try (most errors are resolved immediately by a second try without a delay). :param set status_forcelist: A set of integer HTTP status codes that we should force a retry on. :param args: Variable length argument list. :param kwargs: Arbitrary keyword arguments. """ __attrs__ = PrivoxyAdapter.__attrs__ + ['retries', 'backoff_factor', 'status_forcelist'] def __init__(self, retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), *args, **kwargs): self.retries = retries self.backoff_factor = backoff_factor self.status_forcelist = status_forcelist retry = Retry(total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist) super(RetryPrivoxyAdapter, self).__init__(max_retries=retry, *args, **kwargs) def __repr__(self): return '<{class_name} url:{url} retries:{retires} backoff factor:{backoff} retry wait:{wait} s ' \ 'status_forcelist:{statuses}>'.format(class_name=self.__class__.__name__, url=self.proxy_url, wait=self.retry_wait, retires=self.retries, backoff=self.backoff_factor, statuses=self.status_forcelist)
/requests_proxy_adapter-0.1.1.tar.gz/requests_proxy_adapter-0.1.1/requests_proxy_adapter/api.py
0.779532
0.162446
api.py
pypi
r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary items. Example:: >>> import requests >>> requests.codes['temporary_redirect'] 307 >>> requests.codes.teapot 418 >>> requests.codes['\o/'] 200 Some codes have multiple names, and both upper- and lower-case versions of the names are allowed. For example, ``codes.ok``, ``codes.OK``, and ``codes.okay`` all correspond to the HTTP status code 200. """ from .structures import LookupDict _codes = { # Informational. 100: ("continue",), 101: ("switching_protocols",), 102: ("processing",), 103: ("checkpoint",), 122: ("uri_too_long", "request_uri_too_long"), 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), 201: ("created",), 202: ("accepted",), 203: ("non_authoritative_info", "non_authoritative_information"), 204: ("no_content",), 205: ("reset_content", "reset"), 206: ("partial_content", "partial"), 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), 208: ("already_reported",), 226: ("im_used",), # Redirection. 300: ("multiple_choices",), 301: ("moved_permanently", "moved", "\\o-"), 302: ("found",), 303: ("see_other", "other"), 304: ("not_modified",), 305: ("use_proxy",), 306: ("switch_proxy",), 307: ("temporary_redirect", "temporary_moved", "temporary"), 308: ( "permanent_redirect", "resume_incomplete", "resume", ), # "resume" and "resume_incomplete" to be removed in 3.0 # Client Error. 400: ("bad_request", "bad"), 401: ("unauthorized",), 402: ("payment_required", "payment"), 403: ("forbidden",), 404: ("not_found", "-o-"), 405: ("method_not_allowed", "not_allowed"), 406: ("not_acceptable",), 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), 408: ("request_timeout", "timeout"), 409: ("conflict",), 410: ("gone",), 411: ("length_required",), 412: ("precondition_failed", "precondition"), 413: ("request_entity_too_large",), 414: ("request_uri_too_large",), 415: ("unsupported_media_type", "unsupported_media", "media_type"), 416: ( "requested_range_not_satisfiable", "requested_range", "range_not_satisfiable", ), 417: ("expectation_failed",), 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), 421: ("misdirected_request",), 422: ("unprocessable_entity", "unprocessable"), 423: ("locked",), 424: ("failed_dependency", "dependency"), 425: ("unordered_collection", "unordered"), 426: ("upgrade_required", "upgrade"), 428: ("precondition_required", "precondition"), 429: ("too_many_requests", "too_many"), 431: ("header_fields_too_large", "fields_too_large"), 444: ("no_response", "none"), 449: ("retry_with", "retry"), 450: ("blocked_by_windows_parental_controls", "parental_controls"), 451: ("unavailable_for_legal_reasons", "legal_reasons"), 499: ("client_closed_request",), # Server Error. 500: ("internal_server_error", "server_error", "/o\\", "✗"), 501: ("not_implemented",), 502: ("bad_gateway",), 503: ("service_unavailable", "unavailable"), 504: ("gateway_timeout",), 505: ("http_version_not_supported", "http_version"), 506: ("variant_also_negotiates",), 507: ("insufficient_storage",), 509: ("bandwidth_limit_exceeded", "bandwidth"), 510: ("not_extended",), 511: ("network_authentication_required", "network_auth", "network_authentication"), } codes = LookupDict(name="status_codes") def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(("\\", "/")): setattr(codes, title.upper(), code) def doc(code): names = ", ".join(f"``{n}``" for n in _codes[code]) return "* %d: %s" % (code, names) global __doc__ __doc__ = ( __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) if __doc__ is not None else None ) _init()
/requests_qwd-0.1.3.tar.gz/requests_qwd-0.1.3/requests_qwd/status_codes.py
0.846308
0.566258
status_codes.py
pypi
from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') >>> req <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("get", url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("options", url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. If `allow_redirects` is not provided, it will be set to `False` (as opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault("allow_redirects", False) return request("head", url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("post", url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("put", url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("patch", url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("delete", url, **kwargs)
/requests_qwd-0.1.3.tar.gz/requests_qwd-0.1.3/requests_qwd/api.py
0.853486
0.411466
api.py
pypi
from collections import OrderedDict from .compat import Mapping, MutableMapping class CaseInsensitiveDict(MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super().__init__() def __repr__(self): return f"<lookup '{self.name}'>" def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
/requests_qwd-0.1.3.tar.gz/requests_qwd-0.1.3/requests_qwd/structures.py
0.926893
0.4231
structures.py
pypi
from requests.adapters import BaseAdapter, HTTPAdapter import time import threading import logging log = logging.getLogger(__name__) # Use monotonic time if available in `time`, otherwise fall back to the default clock. now = time.monotonic if hasattr(time, 'monotonic') else time.time class RateLimitException(Exception): """Exception raised when too many requests have been made in this period.""" def __init__(self, time_remaining_s): """Construct a RateLimitException. Args: time_remaining_s (int): The time remaining before the current period ends and new requests can be made. """ self.time_remaining_s = time_remaining_s class RateLimitAdapter(BaseAdapter): """Adapter which rate limits requests.""" def __init__(self, adapter, calls=15, period=900.0): """Construct a RateLimitAdapter. Args: adapter (BaseAdapter): A request adapter on which all requests will be sent. calls (int): The maximum number of requests allowed in `period` seconds. period (float): The time period during which requests are totalled. Once one time period expires and another begins, the number of admitted requests is reset. """ super(RateLimitAdapter, self).__init__() class _ContextAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): return "[{0}] {1}".format(self.extra["class_name"], msg), kwargs self.log = _ContextAdapter(log, {"class_name": self.__class__.__name__}) self.adapter = adapter self.calls = calls self.period = period # Get a lock to wrap the sending logic. self.send_lock = threading.RLock() self.period_start = 0 self.period_end = 0 self.num_calls = 0 def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Send PreparedRequest object. Returns Response object. Args: request (PreparedRequest): The `PreparedRequest` being sent. stream (bool): (optional) Whether to stream the request content. timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a `(connect timeout, read timeout)` tuple. verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. cert: (optional) Any user-provided SSL certificate to be trusted. proxies: (optional) The proxies dictionary to apply to the request. Returns: Response object. """ while True: try: return self._inner_send(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) except RateLimitException as e: self.log.debug("Retrying send in %f seconds", e.time_remaining_s) time.sleep(e.time_remaining_s) def _inner_send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): with self.send_lock: prestart_time = now() # Is this the first event in this period? if prestart_time > self.period_end: first_event = True self.num_calls = 0 else: first_event = False # Verify if this request is allowed. if self.num_calls < self.calls: self.log.debug("Allowing send: %d period calls < %d allowed", self.num_calls, self.calls) self.num_calls += 1 resp = self.adapter.send(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) if first_event: # Now that we have a response, start the period. self.period_start = now() self.period_end = self.period_start + self.period self.log.debug("Period starts at %f, ends %f (prestart %f)", self.period_start, self.period_end, prestart_time) # And return the response. return resp else: # Raise an exception to trigger a retry later. time_remaining_s = self.period_end - prestart_time self.log.debug("Disallowing send: %d period calls >= %d allowed; wait %f seconds", self.num_calls, self.calls, time_remaining_s) raise RateLimitException(time_remaining_s) def close(self): """Clean up adapter specific items.""" self.adapter.close() class HTTPRateLimitAdapter(RateLimitAdapter): """Adapter which rate limits HTTP requests.""" def __init__(self, calls=15, period=900.0): """Construct an HTTPRateLimitAdapter. Args: calls (int): The maximum number of requests allowed in `period` seconds. period (float): The time period during which requests are totalled. Once one time period expires and another begins, the number of admitted requests is reset. """ adapter = HTTPAdapter() super(HTTPRateLimitAdapter, self).__init__(adapter, calls, period)
/requests_ratelimit_adapter-0.2.0.tar.gz/requests_ratelimit_adapter-0.2.0/requests_ratelimit_adapter/requests_ratelimit_adapter.py
0.925991
0.221856
requests_ratelimit_adapter.py
pypi
# Requests-Ratelimiter [![Build status](https://github.com/JWCook/requests-ratelimiter/workflows/Build/badge.svg)](https://github.com/JWCook/requests-ratelimiter/actions) [![Codecov](https://codecov.io/gh/JWCook/requests-ratelimiter/branch/main/graph/badge.svg)](https://codecov.io/gh/JWCook/requests-ratelimiter) [![Documentation Status](https://img.shields.io/readthedocs/requests-ratelimiter/stable?label=docs)](https://requests-ratelimiter.readthedocs.io) [![PyPI](https://img.shields.io/pypi/v/requests-ratelimiter?color=blue)](https://pypi.org/project/requests-ratelimiter) [![Conda](https://img.shields.io/conda/vn/conda-forge/requests-ratelimiter?color=blue)](https://anaconda.org/conda-forge/requests-ratelimiter) [![PyPI - Python Versions](https://img.shields.io/pypi/pyversions/requests-ratelimiter)](https://pypi.org/project/requests-ratelimiter) [![PyPI - Format](https://img.shields.io/pypi/format/requests-ratelimiter?color=blue)](https://pypi.org/project/requests-ratelimiter) This package is a simple wrapper around [pyrate-limiter](https://pyratelimiter.readthedocs.io) that adds convenient integration with the [requests](https://requests.readthedocs.io) library. Full project documentation can be found at [requests-ratelimiter.readthedocs.io](https://requests-ratelimiter.readthedocs.io). # Features * `pyrate-limiter` is a general-purpose rate limiting library that implements the leaky bucket algorithm, supports multiple rate limits, and has optional persistence with SQLite and Redis backends * `requests-ratelimiter` adds some extra conveniences specific to sending HTTP requests with the `requests` library * It can be used as either a [session](https://requests.readthedocs.io/en/latest/user/advanced/#session-objects) or a [transport adapter](https://requests.readthedocs.io/en/latest/user/advanced/#transport-adapters) * It can also be used as a mixin, for compatibility with other `requests`-based libraries * Rate limits are tracked separately per host * Different rate limits can optionally be applied to different hosts # Installation ``` pip install requests-ratelimiter ``` # Usage ## Usage Options There are three ways to use `requests-ratelimiter`: ### Session The simplest option is [`LimiterSession`](https://requests-ratelimiter.readthedocs.io/en/stable/reference.html#requests_ratelimiter.LimiterSession), which can be used as a drop-in replacement for [`requests.Session`](https://requests.readthedocs.io/en/latest/api/#requests.Session). Example: ```python from requests_ratelimiter import LimiterSession from time import time # Apply a rate limit of 5 requests per second to all requests session = LimiterSession(per_second=5) start = time() # Send requests that stay within the defined rate limit for i in range(20): response = session.get('https://httpbin.org/get') print(f'[t+{time()-start:.2f}] Sent request {i+1}') ``` Example output: ```bash [t+0.22] Sent request 1 [t+0.26] Sent request 2 [t+0.30] Sent request 3 [t+0.34] Sent request 4 [t+0.39] Sent request 5 [t+1.24] Sent request 6 [t+1.28] Sent request 7 [t+1.32] Sent request 8 [t+1.37] Sent request 9 [t+1.41] Sent request 10 [t+2.04] Sent request 11 ... ``` ### Adapter For more advanced usage, [`LimiterAdapter`](https://requests-ratelimiter.readthedocs.io/en/stable/reference.html#requests_ratelimiter.LimiterAdapter) is available to be used as a [transport adapter](https://requests.readthedocs.io/en/latest/user/advanced/#transport-adapters). Example: ```python from requests import Session from requests_ratelimiter import LimiterAdapter session = Session() # Apply a rate-limit (5 requests per second) to all requests adapter = LimiterAdapter(per_second=5) session.mount('http://', adapter) session.mount('https://', adapter) # Send rate-limited requests for user_id in range(100): response = session.get(f'https://api.some_site.com/v1/users/{user_id}') print(response.json()) ``` ### Mixin Finally, [`LimiterMixin`](https://requests-ratelimiter.readthedocs.io/en/stable/reference.html#requests_ratelimiter.LimiterMixin) is available for advanced use cases in which you want add rate-limiting features to a custom session or adapter class. See [Custom Session Example](#custom-session-example-requests-cache) below for an example. ## Rate Limit Settings ### Basic Settings The following parameters are available for the most common rate limit intervals: * `per_second`: Max requests per second * `per_minute`: Max requests per minute * `per_hour`: Max requests per hour * `per_day`: Max requests per day * `per_month`: Max requests per month * `burst`: Max number of consecutive requests allowed before applying per-second rate-limiting <!-- TODO: Section explaining burst rate limit --> ### Advanced Settings If you need to define more complex rate limits, you can create a `Limiter` object instead: ```python from pyrate_limiter import Duration, RequestRate, Limiter from requests_ratelimiter import LimiterSession nanocentury_rate = RequestRate(10, Duration.SECOND * 3.156) fortnight_rate = RequestRate(1000, Duration.DAY * 14) trimonthly_rate = RequestRate(10000, Duration.MONTH * 3) limiter = Limiter(nanocentury_rate, fortnight_rate, trimonthly_rate) session = LimiterSession(limiter=limiter) ``` See [pyrate-limiter docs](https://pyratelimiter.readthedocs.io/en/latest/#basic-usage) for more `Limiter` usage details. ## Backends By default, rate limits are tracked in memory and are not persistent. You can optionally use either SQLite or Redis to persist rate limits across threads, processes, and/or application restarts. You can specify which backend to use with the `bucket_class` argument. For example, to use SQLite: ```python from pyrate_limiter import SQLiteBucket from requests_ratelimiter import LimiterSession session = LimiterSession(per_second=5, bucket_class=SQLiteBucket) ``` See [pyrate-limiter docs](https://pyratelimiter.readthedocs.io/en/latest/#backends) for more details. ## Other Features ### Per-Host Rate Limit Tracking With either `LimiterSession` or `LimiterAdapter`, rate limits are tracked separately for each host. In other words, requests sent to one host will not count against the rate limit for any other hosts: ```python session = LimiterSession(per_second=5) # Make requests for two different hosts for _ in range(10): response = session.get(f'https://httpbin.org/get') print(response.json()) session.get(f'https://httpbingo.org/get') print(response.json()) ``` If you have a case where multiple hosts share the same rate limit, you can disable this behavior with the `per_host` option: ```python session = LimiterSession(per_second=5, per_host=False) ``` ### Per-Host Rate Limit Definitions With `LimiterAdapter`, you can apply different rate limits to different hosts or URLs: ```python # Apply a different set of rate limits (2/second and 100/minute) to a specific host adapter_2 = LimiterAdapter(per_second=2, per_minute=100) session.mount('https://api.some_site.com', adapter_2) ``` Behavior for matching requests is the same as other transport adapters: `requests` will use the adapter with the most specific (i.e., longest) URL prefix that matches a given request. For example: ```python session.mount('https://api.some_site.com/v1', adapter_3) session.mount('https://api.some_site.com/v1/users', adapter_4) # This request will use adapter_3 session.get('https://api.some_site.com/v1/') # This request will use adapter_4 session.get('https://api.some_site.com/v1/users/1234') ``` ### Rate Limit Error Handling Sometimes, server-side rate limiting may not behave exactly as documented (or may not be documented at all). Or you might encounter other scenarios where your client-side limit gets out of sync with the server-side limit. Typically, a server will send a `429: Too Many Requests` response for an exceeded rate limit. When this happens, `requests-ratelimiter` will adjust its request log in an attempt to catch up to the server-side limit. If a server sends a different status code other than 429 to indicate an exceeded limit, you can set this with `limit_statuses`: ```python session = LimiterSession(per_second=5, limit_statuses=[429, 500]) ``` Or if you would prefer to disable this behavior and handle it yourself: ```python session = LimiterSession(per_second=5, limit_statuses=[]) ``` # Compatibility There are many other useful libraries out there that add features to `requests`, most commonly by extending or modifying [requests.Session](https://requests.readthedocs.io/en/latest/api/#requests.Session) or [requests.HTTPAdapter](https://requests.readthedocs.io/en/latest/api/#requests.adapters.HTTPAdapter). To use `requests-ratelimiter` with one of these libraries, you have a few different options: 1. If the library provides a custom `Session` class, mount a `LimiterAdapter` on it 2. Or use `LimiterMixin` to create a custom `Session` class with features from both libraries 3. If the library provides a custom `Adapter` class, use `LimiterMixin` to create a custom `Adapter` class with features from both libraries ## Custom Session Example: Requests-Cache For example, to combine with [requests-cache](https://github.com/requests-cache/requests-cache), which also includes a separate mixin class: ```python from requests import Session from requests_cache import CacheMixin, RedisCache from requests_ratelimiter import LimiterMixin, RedisBucket class CachedLimiterSession(CacheMixin, LimiterMixin, Session): """Session class with caching and rate-limiting behavior. Accepts arguments for both LimiterSession and CachedSession. """ # Optionally use Redis as both the bucket backend and the cache backend session = CachedLimiterSession( per_second=5, bucket_class=RedisBucket, backend=RedisCache(), ) ``` This example has an extra benefit: cache hits won't count against your rate limit!
/requests_ratelimiter-0.4.1.tar.gz/requests_ratelimiter-0.4.1/README.md
0.650245
0.940298
README.md
pypi
from inspect import signature from logging import getLogger from time import time from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Type, Union from urllib.parse import urlparse from uuid import uuid4 from pyrate_limiter import Duration, Limiter, RequestRate from pyrate_limiter.bucket import AbstractBucket, MemoryListBucket, MemoryQueueBucket from requests import PreparedRequest, Response, Session from requests.adapters import HTTPAdapter if TYPE_CHECKING: MIXIN_BASE = Session else: MIXIN_BASE = object logger = getLogger(__name__) class LimiterMixin(MIXIN_BASE): """Mixin class that adds rate-limiting behavior to requests. See :py:class:`.LimiterSession` for parameter details. """ def __init__( self, per_second: float = 0, per_minute: float = 0, per_hour: float = 0, per_day: float = 0, per_month: float = 0, burst: float = 1, bucket_class: Type[AbstractBucket] = MemoryListBucket, bucket_kwargs: Optional[Dict] = None, time_function: Optional[Callable[..., float]] = None, limiter: Optional[Limiter] = None, max_delay: Union[int, float, None] = None, per_host: bool = True, limit_statuses: Iterable[int] = (429,), **kwargs, ): # Translate request rate values into RequestRate objects rates = [ RequestRate(limit, interval) for interval, limit in { Duration.SECOND * burst: per_second * burst, Duration.MINUTE: per_minute, Duration.HOUR: per_hour, Duration.DAY: per_day, Duration.MONTH: per_month, }.items() if limit ] # If using a persistent backend, we don't want to use monotonic time (the default) if bucket_class not in (MemoryListBucket, MemoryQueueBucket) and not time_function: time_function = time self.limiter = limiter or Limiter( *rates, bucket_class=bucket_class, bucket_kwargs=bucket_kwargs, time_function=time_function, ) self.limit_statuses = limit_statuses self.max_delay = max_delay self.per_host = per_host self._default_bucket = str(uuid4()) # If the superclass is an adapter or custom Session, pass along any valid keyword arguments session_kwargs = get_valid_kwargs(super().__init__, kwargs) super().__init__(**session_kwargs) # type: ignore # Base Session doesn't take any kwargs # Conveniently, both Session.send() and HTTPAdapter.send() have a mostly consistent signature def send(self, request: PreparedRequest, **kwargs) -> Response: """Send a request with rate-limiting. Raises: :py:exc:`.BucketFullException` if this request would result in a delay longer than ``max_delay`` """ with self.limiter.ratelimit( self._bucket_name(request), delay=True, max_delay=self.max_delay, ): response = super().send(request, **kwargs) if response.status_code in self.limit_statuses: self._fill_bucket(request) return response def _bucket_name(self, request): """Get a bucket name for the given request""" return urlparse(request.url).netloc if self.per_host else self._default_bucket def _fill_bucket(self, request: PreparedRequest): """Partially fill the bucket for the given request, requiring an extra delay until the next request. This is essentially an attempt to catch up to the actual (server-side) limit if we've gotten out of sync. If the server tracks multiple limits, there's no way to know which specific limit was exceeded, so the smallest rate will be used. For example, if the server allows 60 requests per minute, and we've tracked only 40 requests but received a 429 response, 20 additional "filler" requests will be added to the bucket to attempt to catch up to the server-side limit. If the server also has an hourly limit, we don't have enough information to know if we've exceeded that limit or how long to delay, so we'll keep delaying in 1-minute intervals. """ logger.info(f'Rate limit exceeded for {request.url}; filling limiter bucket') bucket = self.limiter.bucket_group[self._bucket_name(request)] # Determine how many requests we've made within the smallest defined time interval now = self.limiter.time_function() rate = self.limiter._rates[0] item_count, _ = bucket.inspect_expired_items(now - rate.interval) # TODO: After fixing usage with MemoryQueueBucket on py 3.11, don't add items over capacity # capacity = bucket.maxsize() - bucket.size() # n_filler_requests = min(capacity, rate.limit - item_count) # Add "filler" requests to reach the limit for that interval for _ in range(rate.limit - item_count): bucket.put(now) class LimiterSession(LimiterMixin, Session): """`Session <https://requests.readthedocs.io/en/latest/user/advanced/#session-objects>`_ that adds rate-limiting behavior to requests. The following parameters also apply to :py:class:`.LimiterMixin` and :py:class:`.LimiterAdapter`. .. note:: The ``per_*`` params are aliases for the most common rate limit intervals; for more complex rate limits, you can provide a :py:class:`~pyrate_limiter.limiter.Limiter` object instead. Args: per_second: Max requests per second per_minute: Max requests per minute per_hour: Max requests per hour per_day: Max requests per day per_month: Max requests per month burst: Max number of consecutive requests allowed before applying per-second rate-limiting bucket_class: Bucket backend class; may be one of :py:class:`~pyrate_limiter.bucket.MemoryQueueBucket` (default), :py:class:`~pyrate_limiter.sqlite_bucket.SQLiteBucket`, or :py:class:`~pyrate_limiter.bucket.RedisBucket` bucket_kwargs: Bucket backend keyword arguments limiter: An existing Limiter object to use instead of the above params max_delay: The maximum allowed delay time (in seconds); anything over this will abort the request and raise a :py:exc:`.BucketFullException` per_host: Track request rate limits separately for each host limit_statuses: Alternative HTTP status codes that indicate a rate limit was exceeded """ class LimiterAdapter(LimiterMixin, HTTPAdapter): # type: ignore # send signature accepts **kwargs """`Transport adapter <https://requests.readthedocs.io/en/latest/user/advanced/#transport-adapters>`_ that adds rate-limiting behavior to requests. See :py:class:`.LimiterSession` for parameter details. """ def get_valid_kwargs(func: Callable, kwargs: Dict) -> Dict: """Get the subset of non-None ``kwargs`` that are valid params for ``func``""" sig_params = list(signature(func).parameters) return {k: v for k, v in kwargs.items() if k in sig_params and v is not None}
/requests_ratelimiter-0.4.1.tar.gz/requests_ratelimiter-0.4.1/requests_ratelimiter/requests_ratelimiter.py
0.885483
0.205575
requests_ratelimiter.py
pypi
import warnings from typing import Any, Dict, List, Optional, Set from requests import Response, Session from urllib3.exceptions import HTTPWarning from urllib3.util.retry import Retry __all__: List[str] = ["SessionPlus"] RETRY_BACKOFF_FACTOR: float = 2 RETRY_STATUS_FORCELIST: Set[int] = { 413, # Client: Payload Too Large 429, # Client: Too Many Requests 500, # Server: Internal Server Error 502, # Server: Bad Gateway 503, # Server: Service Unavailable 504, # Server: Gateway Timeout } RETRY_TOTAL: int = 5 TIMEOUT: float = 10 class SessionPlus(Session): """requests.Session() object with some quality of life enhancements.""" _retry: bool _retry_backof_factor: float _retry_status_forcelist: Set[int] _retry_total: int _status_exceptions: bool _timeout: Optional[float] _verify: bool def __init__( self, retry: bool = False, retry_backoff_factor: float = RETRY_BACKOFF_FACTOR, retry_status_forcelist: Set[int] = RETRY_STATUS_FORCELIST, retry_total: int = RETRY_TOTAL, status_exceptions: bool = False, timeout: Optional[float] = TIMEOUT, verify: bool = True, **kwargs, ): """Instantiate SessionPlus object with timeout enabled. Args: retry (bool): enable/disable retries. Defaults to False retry_backoff_factor (float): used when calculating time between retries. Defaults to 2 retry_status_forcelist (set[int]): status codes to issue retries for. Defaults to [413,429,500,502-504] retry_total (int): total number of retries to attempt. Defaults to 5 status_exceptions (bool): raise exceptions for status codes >=400. Defaults to False timeout (int or None): timeout for HTTP calls. Defaults to 10 verify (bool): enable/disable certificate verification. Defaults to True """ super().__init__() self.retry_backoff_factor = retry_backoff_factor self.retry_status_forcelist = retry_status_forcelist self.retry_total = retry_total # load any additional namespaced retry settings as attributes for key, value in kwargs.items(): if key.startswith("retry_"): self.__dict__[key] = value self.retry = retry self.status_exceptions = status_exceptions self.timeout = timeout self.verify = verify @property def retry(self) -> bool: """Property to determine if retries are enabled/disabled.""" return self._retry @retry.setter def retry(self, value: bool): """Set boolean value then call helper function to enable/disable retries.""" self._retry = bool(value) self.update_retry() def update_retry(self): """Re-apply the Retry class with updated variables.""" if self._retry: retry = Retry(**self.retry_settings) else: retry = Retry(total=0, read=False) for adapter in self.adapters.values(): adapter.max_retries = retry @property def retry_backoff_factor(self) -> float: """Property used to determine backoff sleep time between retries.""" return self._retry_backoff_factor @retry_backoff_factor.setter def retry_backoff_factor(self, value: float): """Validate the value is a float.""" self._retry_backoff_factor = float(value) @property def retry_status_forcelist(self) -> Set[int]: """Property used to determine which status codes require a retry.""" return self._retry_status_forcelist @retry_status_forcelist.setter def retry_status_forcelist(self, values: Set[int]): """Validate the value is a list of integers.""" if not isinstance(values, (set, list)): raise ValueError("retry_status_forcelist must be a set or a list of integers") new_set: Set[int] = set(int(x) for x in values) self._retry_status_forcelist = new_set @property def retry_total(self) -> int: """Property to return the total number of retries.""" return self._retry_total @retry_total.setter def retry_total(self, value: int): """Validate the value is an integer.""" self._retry_total = int(value) @property def retry_settings(self) -> Dict[str, Any]: """Property to generate the Retry settings dictionary.""" settings: Dict[str, Any] = { "backoff_factor": self._retry_backoff_factor, "status_forcelist": self._retry_status_forcelist, "total": self._retry_total, } for key, value in self.__dict__.items(): if key.startswith("retry_"): settings[key.replace("retry_", "")] = value return settings @property def status_exceptions(self) -> bool: """Property to determine if exceptions should be raised for status codes >=400.""" return self._status_exceptions @status_exceptions.setter def status_exceptions(self, value: bool): """Set the value then modify the response hooks.""" self._status_exceptions = bool(value) entry_index: Optional[int] = None for i, hook in enumerate(self.hooks["response"]): if hook.__name__ == self._status_exception_response_hook.__name__: entry_index = i break if self._status_exceptions and not isinstance(entry_index, int): self.hooks["response"].append(self._status_exception_response_hook) elif not self._status_exceptions and isinstance(entry_index, int): self.hooks["response"].pop(entry_index) def _status_exception_response_hook(self, response: Response, *args, **kwargs): """Set the post-response hook to raise an exception if HTTP status code is >=400. Args: response (Response): The object returned after HTTP call is made """ response.raise_for_status() @property def timeout(self) -> Optional[float]: """Property to determine maximum time to wait for HTTP response before raising exception.""" return self._timeout @timeout.setter def timeout(self, value: Optional[float]): """Timeout can be number >0 or None where None disables timeout.""" if isinstance(value, (float, int, str)): value = float(value) if value <= 0.0: raise ValueError("timeout must be a float or integer greater than 0") elif value is not None: raise ValueError("timeout must be a float or integer greater than 0") self._timeout = value @property # type: ignore def verify(self) -> bool: """Property to determine if certificates should be validated or not.""" return self._verify @verify.setter def verify(self, value: bool): """Set the boolean them cycle through each warning and add/remove warnings as needed.""" self._verify = bool(value) key: str = "default" if self._verify else "ignore" pop_filters: List[int] = [] filter_found: bool = False for i, warn in enumerate(warnings.filters): if warn[2] == HTTPWarning: if warn[0] == key: filter_found = True else: pop_filters.append(i) if pop_filters: pop_filters.reverse() for filter_index in pop_filters: warnings.filters.pop(filter_index) # type: ignore if not filter_found: warnings.simplefilter(key, HTTPWarning) # type: ignore def send(self, request, **kwargs): """Send a given PreparedRequest.""" if not kwargs.get("timeout") and self.timeout: kwargs["timeout"] = self.timeout return super().send(request, **kwargs)
/requests_session_plus-1.0.4-py3-none-any.whl/requests_session_plus/__init__.py
0.921741
0.20268
__init__.py
pypi
from requests.utils import super_len from .multipart.encoder import CustomBytesIO, encode_with class StreamingIterator(object): """ This class provides a way of allowing iterators with a known size to be streamed instead of chunked. In requests, if you pass in an iterator it assumes you want to use chunked transfer-encoding to upload the data, which not all servers support well. Additionally, you may want to set the content-length yourself to avoid this but that will not work. The only way to preempt requests using a chunked transfer-encoding and forcing it to stream the uploads is to mimic a very specific interace. Instead of having to know these details you can instead just use this class. You simply provide the size and iterator and pass the instance of StreamingIterator to requests via the data parameter like so: .. code-block:: python from requests_toolbelt import StreamingIterator import requests # Let iterator be some generator that you already have and size be # the size of the data produced by the iterator r = requests.post(url, data=StreamingIterator(size, iterator)) You can also pass file-like objects to :py:class:`StreamingIterator` in case requests can't determize the filesize itself. This is the case with streaming file objects like ``stdin`` or any sockets. Wrapping e.g. files that are on disk with ``StreamingIterator`` is unnecessary, because requests can determine the filesize itself. Naturally, you should also set the `Content-Type` of your upload appropriately because the toolbelt will not attempt to guess that for you. """ def __init__(self, size, iterator, encoding='utf-8'): #: The expected size of the upload self.size = int(size) if self.size < 0: raise ValueError( 'The size of the upload must be a positive integer' ) #: Attribute that requests will check to determine the length of the #: body. See bug #80 for more details self.len = self.size #: Encoding the input data is using self.encoding = encoding #: The iterator used to generate the upload data self.iterator = iterator if hasattr(iterator, 'read'): self._file = iterator else: self._file = _IteratorAsBinaryFile(iterator, encoding) def read(self, size=-1): return encode_with(self._file.read(size), self.encoding) class _IteratorAsBinaryFile(object): def __init__(self, iterator, encoding='utf-8'): #: The iterator used to generate the upload data self.iterator = iterator #: Encoding the iterator is using self.encoding = encoding # The buffer we use to provide the correct number of bytes requested # during a read self._buffer = CustomBytesIO() def _get_bytes(self): try: return encode_with(next(self.iterator), self.encoding) except StopIteration: return b'' def _load_bytes(self, size): self._buffer.smart_truncate() amount_to_load = size - super_len(self._buffer) bytes_to_append = True while amount_to_load > 0 and bytes_to_append: bytes_to_append = self._get_bytes() amount_to_load -= self._buffer.append(bytes_to_append) def read(self, size=-1): size = int(size) if size == -1: return b''.join(self.iterator) self._load_bytes(size) return self._buffer.read(size)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/streaming_iterator.py
0.830525
0.437944
streaming_iterator.py
pypi
import requests from ._compat import urljoin class BaseUrlSession(requests.Session): """A Session with a URL that all requests will use as a base. Let's start by looking at a few examples: .. code-block:: python >>> from requests_toolbelt import sessions >>> s = sessions.BaseUrlSession( ... base_url='https://example.com/resource/') >>> r = s.get('sub-resource/', params={'foo': 'bar'}) >>> print(r.request.url) https://example.com/resource/sub-resource/?foo=bar Our call to the ``get`` method will make a request to the URL passed in when we created the Session and the partial resource name we provide. We implement this by overriding the ``request`` method of the Session. Likewise, we override the ``prepare_request`` method so you can construct a PreparedRequest in the same way: .. code-block:: python >>> from requests import Request >>> from requests_toolbelt import sessions >>> s = sessions.BaseUrlSession( ... base_url='https://example.com/resource/') >>> request = Request(method='GET', url='sub-resource/') >>> prepared_request = s.prepare_request(request) >>> r = s.send(prepared_request) >>> print(r.request.url) https://example.com/resource/sub-resource .. note:: The base URL that you provide and the path you provide are **very** important. Let's look at another *similar* example .. code-block:: python >>> from requests_toolbelt import sessions >>> s = sessions.BaseUrlSession( ... base_url='https://example.com/resource/') >>> r = s.get('/sub-resource/', params={'foo': 'bar'}) >>> print(r.request.url) https://example.com/sub-resource/?foo=bar The key difference here is that we called ``get`` with ``/sub-resource/``, i.e., there was a leading ``/``. This changes how we create the URL because we rely on :mod:`urllib.parse.urljoin`. To override how we generate the URL, sub-class this method and override the ``create_url`` method. Based on implementation from https://github.com/kennethreitz/requests/issues/2554#issuecomment-109341010 """ base_url = None def __init__(self, base_url=None): if base_url: self.base_url = base_url super(BaseUrlSession, self).__init__() def request(self, method, url, *args, **kwargs): """Send the request after generating the complete URL.""" url = self.create_url(url) return super(BaseUrlSession, self).request( method, url, *args, **kwargs ) def prepare_request(self, request, *args, **kwargs): """Prepare the request after generating the complete URL.""" request.url = self.create_url(request.url) return super(BaseUrlSession, self).prepare_request( request, *args, **kwargs ) def create_url(self, url): """Create the URL based off this partial path.""" return urljoin(self.base_url, url)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/sessions.py
0.810629
0.319785
sessions.py
pypi
from requests.auth import AuthBase, HTTPBasicAuth from requests.compat import urlparse, urlunparse class AuthHandler(AuthBase): """ The ``AuthHandler`` object takes a dictionary of domains paired with authentication strategies and will use this to determine which credentials to use when making a request. For example, you could do the following: .. code-block:: python from requests import HTTPDigestAuth from requests_toolbelt.auth.handler import AuthHandler import requests auth = AuthHandler({ 'https://api.github.com': ('sigmavirus24', 'fakepassword'), 'https://example.com': HTTPDigestAuth('username', 'password') }) r = requests.get('https://api.github.com/user', auth=auth) # => <Response [200]> r = requests.get('https://example.com/some/path', auth=auth) # => <Response [200]> s = requests.Session() s.auth = auth r = s.get('https://api.github.com/user') # => <Response [200]> .. warning:: :class:`requests.auth.HTTPDigestAuth` is not yet thread-safe. If you use :class:`AuthHandler` across multiple threads you should instantiate a new AuthHandler for each thread with a new HTTPDigestAuth instance for each thread. """ def __init__(self, strategies): self.strategies = dict(strategies) self._make_uniform() def __call__(self, request): auth = self.get_strategy_for(request.url) return auth(request) def __repr__(self): return '<AuthHandler({!r})>'.format(self.strategies) def _make_uniform(self): existing_strategies = list(self.strategies.items()) self.strategies = {} for (k, v) in existing_strategies: self.add_strategy(k, v) @staticmethod def _key_from_url(url): parsed = urlparse(url) return urlunparse((parsed.scheme.lower(), parsed.netloc.lower(), '', '', '', '')) def add_strategy(self, domain, strategy): """Add a new domain and authentication strategy. :param str domain: The domain you wish to match against. For example: ``'https://api.github.com'`` :param str strategy: The authentication strategy you wish to use for that domain. For example: ``('username', 'password')`` or ``requests.HTTPDigestAuth('username', 'password')`` .. code-block:: python a = AuthHandler({}) a.add_strategy('https://api.github.com', ('username', 'password')) """ # Turn tuples into Basic Authentication objects if isinstance(strategy, tuple): strategy = HTTPBasicAuth(*strategy) key = self._key_from_url(domain) self.strategies[key] = strategy def get_strategy_for(self, url): """Retrieve the authentication strategy for a specified URL. :param str url: The full URL you will be making a request against. For example, ``'https://api.github.com/user'`` :returns: Callable that adds authentication to a request. .. code-block:: python import requests a = AuthHandler({'example.com', ('foo', 'bar')}) strategy = a.get_strategy_for('http://example.com/example') assert isinstance(strategy, requests.auth.HTTPBasicAuth) """ key = self._key_from_url(url) return self.strategies.get(key, NullAuthStrategy()) def remove_strategy(self, domain): """Remove the domain and strategy from the collection of strategies. :param str domain: The domain you wish remove. For example, ``'https://api.github.com'``. .. code-block:: python a = AuthHandler({'example.com', ('foo', 'bar')}) a.remove_strategy('example.com') assert a.strategies == {} """ key = self._key_from_url(domain) if key in self.strategies: del self.strategies[key] class NullAuthStrategy(AuthBase): def __repr__(self): return '<NullAuthStrategy>' def __call__(self, r): return r
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/auth/handler.py
0.877293
0.334997
handler.py
pypi
import io _DEFAULT_CHUNKSIZE = 65536 __all__ = ['tee', 'tee_to_file', 'tee_to_bytearray'] def _tee(response, callback, chunksize, decode_content): for chunk in response.raw.stream(amt=chunksize, decode_content=decode_content): callback(chunk) yield chunk def tee(response, fileobject, chunksize=_DEFAULT_CHUNKSIZE, decode_content=None): """Stream the response both to the generator and a file. This will stream the response body while writing the bytes to ``fileobject``. Example usage: .. code-block:: python resp = requests.get(url, stream=True) with open('save_file', 'wb') as save_file: for chunk in tee(resp, save_file): # do stuff with chunk .. code-block:: python import io resp = requests.get(url, stream=True) fileobject = io.BytesIO() for chunk in tee(resp, fileobject): # do stuff with chunk :param response: Response from requests. :type response: requests.Response :param fileobject: Writable file-like object. :type fileobject: file, io.BytesIO :param int chunksize: (optional), Size of chunk to attempt to stream. :param bool decode_content: (optional), If True, this will decode the compressed content of the response. :raises: TypeError if the fileobject wasn't opened with the right mode or isn't a BytesIO object. """ # We will be streaming the raw bytes from over the wire, so we need to # ensure that writing to the fileobject will preserve those bytes. On # Python3, if the user passes an io.StringIO, this will fail, so we need # to check for BytesIO instead. if not ('b' in getattr(fileobject, 'mode', '') or isinstance(fileobject, io.BytesIO)): raise TypeError('tee() will write bytes directly to this fileobject' ', it must be opened with the "b" flag if it is a file' ' or inherit from io.BytesIO.') return _tee(response, fileobject.write, chunksize, decode_content) def tee_to_file(response, filename, chunksize=_DEFAULT_CHUNKSIZE, decode_content=None): """Stream the response both to the generator and a file. This will open a file named ``filename`` and stream the response body while writing the bytes to the opened file object. Example usage: .. code-block:: python resp = requests.get(url, stream=True) for chunk in tee_to_file(resp, 'save_file'): # do stuff with chunk :param response: Response from requests. :type response: requests.Response :param str filename: Name of file in which we write the response content. :param int chunksize: (optional), Size of chunk to attempt to stream. :param bool decode_content: (optional), If True, this will decode the compressed content of the response. """ with open(filename, 'wb') as fd: for chunk in tee(response, fd, chunksize, decode_content): yield chunk def tee_to_bytearray(response, bytearr, chunksize=_DEFAULT_CHUNKSIZE, decode_content=None): """Stream the response both to the generator and a bytearray. This will stream the response provided to the function, add them to the provided :class:`bytearray` and yield them to the user. .. note:: This uses the :meth:`bytearray.extend` by default instead of passing the bytearray into the ``readinto`` method. Example usage: .. code-block:: python b = bytearray() resp = requests.get(url, stream=True) for chunk in tee_to_bytearray(resp, b): # do stuff with chunk :param response: Response from requests. :type response: requests.Response :param bytearray bytearr: Array to add the streamed bytes to. :param int chunksize: (optional), Size of chunk to attempt to stream. :param bool decode_content: (optional), If True, this will decode the compressed content of the response. """ if not isinstance(bytearr, bytearray): raise TypeError('tee_to_bytearray() expects bytearr to be a ' 'bytearray') return _tee(response, bytearr.extend, chunksize, decode_content)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/downloadutils/tee.py
0.760917
0.282017
tee.py
pypi
"""Utilities for dealing with streamed requests.""" import os.path import re from .. import exceptions as exc # Regular expressions stolen from werkzeug/http.py # cd2c97bb0a076da2322f11adce0b2731f9193396 L62-L64 _QUOTED_STRING_RE = r'"[^"\\]*(?:\\.[^"\\]*)*"' _OPTION_HEADER_PIECE_RE = re.compile( r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' % (_QUOTED_STRING_RE, _QUOTED_STRING_RE) ) _DEFAULT_CHUNKSIZE = 512 def _get_filename(content_disposition): for match in _OPTION_HEADER_PIECE_RE.finditer(content_disposition): k, v = match.groups() if k == 'filename': # ignore any directory paths in the filename return os.path.split(v)[1] return None def get_download_file_path(response, path): """ Given a response and a path, return a file path for a download. If a ``path`` parameter is a directory, this function will parse the ``Content-Disposition`` header on the response to determine the name of the file as reported by the server, and return a file path in the specified directory. If ``path`` is empty or None, this function will return a path relative to the process' current working directory. If path is a full file path, return it. :param response: A Response object from requests :type response: requests.models.Response :param str path: Directory or file path. :returns: full file path to download as :rtype: str :raises: :class:`requests_toolbelt.exceptions.StreamingError` """ path_is_dir = path and os.path.isdir(path) if path and not path_is_dir: # fully qualified file path filepath = path else: response_filename = _get_filename( response.headers.get('content-disposition', '') ) if not response_filename: raise exc.StreamingError('No filename given to stream response to') if path_is_dir: # directory to download to filepath = os.path.join(path, response_filename) else: # fallback to downloading to current working directory filepath = response_filename return filepath def stream_response_to_file(response, path=None, chunksize=_DEFAULT_CHUNKSIZE): """Stream a response body to the specified file. Either use the ``path`` provided or use the name provided in the ``Content-Disposition`` header. .. warning:: If you pass this function an open file-like object as the ``path`` parameter, the function will not close that file for you. .. warning:: This function will not automatically close the response object passed in as the ``response`` parameter. If a ``path`` parameter is a directory, this function will parse the ``Content-Disposition`` header on the response to determine the name of the file as reported by the server, and return a file path in the specified directory. If no ``path`` parameter is supplied, this function will default to the process' current working directory. .. code-block:: python import requests from requests_toolbelt import exceptions from requests_toolbelt.downloadutils import stream r = requests.get(url, stream=True) try: filename = stream.stream_response_to_file(r) except exceptions.StreamingError as e: # The toolbelt could not find the filename in the # Content-Disposition print(e.message) You can also specify the filename as a string. This will be passed to the built-in :func:`open` and we will read the content into the file. .. code-block:: python import requests from requests_toolbelt.downloadutils import stream r = requests.get(url, stream=True) filename = stream.stream_response_to_file(r, path='myfile') If the calculated download file path already exists, this function will raise a StreamingError. Instead, if you want to manage the file object yourself, you need to provide either a :class:`io.BytesIO` object or a file opened with the `'b'` flag. See the two examples below for more details. .. code-block:: python import requests from requests_toolbelt.downloadutils import stream with open('myfile', 'wb') as fd: r = requests.get(url, stream=True) filename = stream.stream_response_to_file(r, path=fd) print('{} saved to {}'.format(url, filename)) .. code-block:: python import io import requests from requests_toolbelt.downloadutils import stream b = io.BytesIO() r = requests.get(url, stream=True) filename = stream.stream_response_to_file(r, path=b) assert filename is None :param response: A Response object from requests :type response: requests.models.Response :param path: *(optional)*, Either a string with the path to the location to save the response content, or a file-like object expecting bytes. :type path: :class:`str`, or object with a :meth:`write` :param int chunksize: (optional), Size of chunk to attempt to stream (default 512B). :returns: The name of the file, if one can be determined, else None :rtype: str :raises: :class:`requests_toolbelt.exceptions.StreamingError` """ pre_opened = False fd = None filename = None if path and callable(getattr(path, 'write', None)): pre_opened = True fd = path filename = getattr(fd, 'name', None) else: filename = get_download_file_path(response, path) if os.path.exists(filename): raise exc.StreamingError("File already exists: %s" % filename) fd = open(filename, 'wb') for chunk in response.iter_content(chunk_size=chunksize): fd.write(chunk) if not pre_opened: fd.close() return filename
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/downloadutils/stream.py
0.711932
0.291914
stream.py
pypi
import multiprocessing import requests from . import thread from .._compat import queue class Pool(object): """Pool that manages the threads containing sessions. :param queue: The queue you're expected to use to which you should add items. :type queue: queue.Queue :param initializer: Function used to initialize an instance of ``session``. :type initializer: collections.Callable :param auth_generator: Function used to generate new auth credentials for the session. :type auth_generator: collections.Callable :param int num_process: Number of threads to create. :param session: :type session: requests.Session """ def __init__(self, job_queue, initializer=None, auth_generator=None, num_processes=None, session=requests.Session): if num_processes is None: num_processes = multiprocessing.cpu_count() or 1 if num_processes < 1: raise ValueError("Number of processes should at least be 1.") self._job_queue = job_queue self._response_queue = queue.Queue() self._exc_queue = queue.Queue() self._processes = num_processes self._initializer = initializer or _identity self._auth = auth_generator or _identity self._session = session self._pool = [ thread.SessionThread(self._new_session(), self._job_queue, self._response_queue, self._exc_queue) for _ in range(self._processes) ] def _new_session(self): return self._auth(self._initializer(self._session())) @classmethod def from_exceptions(cls, exceptions, **kwargs): r"""Create a :class:`~Pool` from an :class:`~ThreadException`\ s. Provided an iterable that provides :class:`~ThreadException` objects, this classmethod will generate a new pool to retry the requests that caused the exceptions. :param exceptions: Iterable that returns :class:`~ThreadException` :type exceptions: iterable :param kwargs: Keyword arguments passed to the :class:`~Pool` initializer. :returns: An initialized :class:`~Pool` object. :rtype: :class:`~Pool` """ job_queue = queue.Queue() for exc in exceptions: job_queue.put(exc.request_kwargs) return cls(job_queue=job_queue, **kwargs) @classmethod def from_urls(cls, urls, request_kwargs=None, **kwargs): """Create a :class:`~Pool` from an iterable of URLs. :param urls: Iterable that returns URLs with which we create a pool. :type urls: iterable :param dict request_kwargs: Dictionary of other keyword arguments to provide to the request method. :param kwargs: Keyword arguments passed to the :class:`~Pool` initializer. :returns: An initialized :class:`~Pool` object. :rtype: :class:`~Pool` """ request_dict = {'method': 'GET'} request_dict.update(request_kwargs or {}) job_queue = queue.Queue() for url in urls: job = request_dict.copy() job.update({'url': url}) job_queue.put(job) return cls(job_queue=job_queue, **kwargs) def exceptions(self): """Iterate over all the exceptions in the pool. :returns: Generator of :class:`~ThreadException` """ while True: exc = self.get_exception() if exc is None: break yield exc def get_exception(self): """Get an exception from the pool. :rtype: :class:`~ThreadException` """ try: (request, exc) = self._exc_queue.get_nowait() except queue.Empty: return None else: return ThreadException(request, exc) def get_response(self): """Get a response from the pool. :rtype: :class:`~ThreadResponse` """ try: (request, response) = self._response_queue.get_nowait() except queue.Empty: return None else: return ThreadResponse(request, response) def responses(self): """Iterate over all the responses in the pool. :returns: Generator of :class:`~ThreadResponse` """ while True: resp = self.get_response() if resp is None: break yield resp def join_all(self): """Join all the threads to the master thread.""" for session_thread in self._pool: session_thread.join() class ThreadProxy(object): proxied_attr = None def __getattr__(self, attr): """Proxy attribute accesses to the proxied object.""" get = object.__getattribute__ if attr not in self.attrs: response = get(self, self.proxied_attr) return getattr(response, attr) else: return get(self, attr) class ThreadResponse(ThreadProxy): """A wrapper around a requests Response object. This will proxy most attribute access actions to the Response object. For example, if you wanted the parsed JSON from the response, you might do: .. code-block:: python thread_response = pool.get_response() json = thread_response.json() """ proxied_attr = 'response' attrs = frozenset(['request_kwargs', 'response']) def __init__(self, request_kwargs, response): #: The original keyword arguments provided to the queue self.request_kwargs = request_kwargs #: The wrapped response self.response = response class ThreadException(ThreadProxy): """A wrapper around an exception raised during a request. This will proxy most attribute access actions to the exception object. For example, if you wanted the message from the exception, you might do: .. code-block:: python thread_exc = pool.get_exception() msg = thread_exc.message """ proxied_attr = 'exception' attrs = frozenset(['request_kwargs', 'exception']) def __init__(self, request_kwargs, exception): #: The original keyword arguments provided to the queue self.request_kwargs = request_kwargs #: The captured and wrapped exception self.exception = exception def _identity(session_obj): return session_obj __all__ = ['ThreadException', 'ThreadResponse', 'Pool']
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/threaded/pool.py
0.840848
0.266202
pool.py
pypi
import requests from requests.adapters import HTTPAdapter from .._compat import poolmanager class SSLAdapter(HTTPAdapter): """ A HTTPS Adapter for Python Requests that allows the choice of the SSL/TLS version negotiated by Requests. This can be used either to enforce the choice of high-security TLS versions (where supported), or to work around misbehaving servers that fail to correctly negotiate the default TLS version being offered. Example usage: >>> import requests >>> import ssl >>> from requests_toolbelt import SSLAdapter >>> s = requests.Session() >>> s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1)) You can replace the chosen protocol with any that are available in the default Python SSL module. All subsequent requests that match the adapter prefix will use the chosen SSL version instead of the default. This adapter will also attempt to change the SSL/TLS version negotiated by Requests when using a proxy. However, this may not always be possible: prior to Requests v2.4.0 the adapter did not have access to the proxy setup code. In earlier versions of Requests, this adapter will not function properly when used with proxies. """ __attrs__ = HTTPAdapter.__attrs__ + ['ssl_version'] def __init__(self, ssl_version=None, **kwargs): self.ssl_version = ssl_version super(SSLAdapter, self).__init__(**kwargs) def init_poolmanager(self, connections, maxsize, block=False): self.poolmanager = poolmanager.PoolManager( num_pools=connections, maxsize=maxsize, block=block, ssl_version=self.ssl_version) if requests.__build__ >= 0x020400: # Earlier versions of requests either don't have this method or, worse, # don't allow passing arbitrary keyword arguments. As a result, only # conditionally define this method. def proxy_manager_for(self, *args, **kwargs): kwargs['ssl_version'] = self.ssl_version return super(SSLAdapter, self).proxy_manager_for(*args, **kwargs)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/adapters/ssl.py
0.725551
0.197328
ssl.py
pypi
from OpenSSL.crypto import PKey, X509 from cryptography import x509 from cryptography.hazmat.primitives.serialization import (load_pem_private_key, load_der_private_key) from cryptography.hazmat.primitives.serialization import Encoding from cryptography.hazmat.backends import default_backend from datetime import datetime from requests.adapters import HTTPAdapter import requests from .. import exceptions as exc """ importing the protocol constants from _ssl instead of ssl because only the constants are needed and to handle issues caused by importing from ssl on the 2.7.x line. """ try: from _ssl import PROTOCOL_TLS as PROTOCOL except ImportError: from _ssl import PROTOCOL_SSLv23 as PROTOCOL PyOpenSSLContext = None class X509Adapter(HTTPAdapter): r"""Adapter for use with X.509 certificates. Provides an interface for Requests sessions to contact HTTPS urls and authenticate with an X.509 cert by implementing the Transport Adapter interface. This class will need to be manually instantiated and mounted to the session :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. :param bytes cert_bytes: bytes object containing contents of a cryptography.x509Certificate object using the encoding specified by the ``encoding`` parameter. :param bytes pk_bytes: bytes object containing contents of a object that implements ``cryptography.hazmat.primitives.serialization.PrivateFormat`` using the encoding specified by the ``encoding`` parameter. :param password: string or utf8 encoded bytes containing the passphrase used for the private key. None if unencrypted. Defaults to None. :param encoding: Enumeration detailing the encoding method used on the ``cert_bytes`` parameter. Can be either PEM or DER. Defaults to PEM. :type encoding: :class: `cryptography.hazmat.primitives.serialization.Encoding` Usage:: >>> import requests >>> from requests_toolbelt.adapters.x509 import X509Adapter >>> s = requests.Session() >>> a = X509Adapter(max_retries=3, cert_bytes=b'...', pk_bytes=b'...', encoding='...' >>> s.mount('https://', a) """ def __init__(self, *args, **kwargs): self._import_pyopensslcontext() self._check_version() cert_bytes = kwargs.pop('cert_bytes', None) pk_bytes = kwargs.pop('pk_bytes', None) password = kwargs.pop('password', None) encoding = kwargs.pop('encoding', Encoding.PEM) password_bytes = None if cert_bytes is None or not isinstance(cert_bytes, bytes): raise ValueError('Invalid cert content provided. ' 'You must provide an X.509 cert ' 'formatted as a byte array.') if pk_bytes is None or not isinstance(pk_bytes, bytes): raise ValueError('Invalid private key content provided. ' 'You must provide a private key ' 'formatted as a byte array.') if isinstance(password, bytes): password_bytes = password elif password: password_bytes = password.encode('utf8') self.ssl_context = create_ssl_context(cert_bytes, pk_bytes, password_bytes, encoding) super(X509Adapter, self).__init__(*args, **kwargs) def init_poolmanager(self, *args, **kwargs): if self.ssl_context: kwargs['ssl_context'] = self.ssl_context return super(X509Adapter, self).init_poolmanager(*args, **kwargs) def proxy_manager_for(self, *args, **kwargs): if self.ssl_context: kwargs['ssl_context'] = self.ssl_context return super(X509Adapter, self).proxy_manager_for(*args, **kwargs) def _import_pyopensslcontext(self): global PyOpenSSLContext if requests.__build__ < 0x021200: PyOpenSSLContext = None else: try: from requests.packages.urllib3.contrib.pyopenssl \ import PyOpenSSLContext except ImportError: try: from urllib3.contrib.pyopenssl import PyOpenSSLContext except ImportError: PyOpenSSLContext = None def _check_version(self): if PyOpenSSLContext is None: raise exc.VersionMismatchError( "The X509Adapter requires at least Requests 2.12.0 to be " "installed. Version {} was found instead.".format( requests.__version__ ) ) def check_cert_dates(cert): """Verify that the supplied client cert is not invalid.""" now = datetime.utcnow() if cert.not_valid_after < now or cert.not_valid_before > now: raise ValueError('Client certificate expired: Not After: ' '{:%Y-%m-%d %H:%M:%SZ} ' 'Not Before: {:%Y-%m-%d %H:%M:%SZ}' .format(cert.not_valid_after, cert.not_valid_before)) def create_ssl_context(cert_byes, pk_bytes, password=None, encoding=Encoding.PEM): """Create an SSL Context with the supplied cert/password. :param cert_bytes array of bytes containing the cert encoded using the method supplied in the ``encoding`` parameter :param pk_bytes array of bytes containing the private key encoded using the method supplied in the ``encoding`` parameter :param password array of bytes containing the passphrase to be used with the supplied private key. None if unencrypted. Defaults to None. :param encoding ``cryptography.hazmat.primitives.serialization.Encoding`` details the encoding method used on the ``cert_bytes`` and ``pk_bytes`` parameters. Can be either PEM or DER. Defaults to PEM. """ backend = default_backend() cert = None key = None if encoding == Encoding.PEM: cert = x509.load_pem_x509_certificate(cert_byes, backend) key = load_pem_private_key(pk_bytes, password, backend) elif encoding == Encoding.DER: cert = x509.load_der_x509_certificate(cert_byes, backend) key = load_der_private_key(pk_bytes, password, backend) else: raise ValueError('Invalid encoding provided: Must be PEM or DER') if not (cert and key): raise ValueError('Cert and key could not be parsed from ' 'provided data') check_cert_dates(cert) ssl_context = PyOpenSSLContext(PROTOCOL) ssl_context._ctx.use_certificate(X509.from_cryptography(cert)) ssl_context._ctx.use_privatekey(PKey.from_cryptography_key(key)) return ssl_context
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/adapters/x509.py
0.756627
0.23518
x509.py
pypi
from requests.adapters import HTTPAdapter from .._compat import poolmanager, basestring class SourceAddressAdapter(HTTPAdapter): """ A Source Address Adapter for Python Requests that enables you to choose the local address to bind to. This allows you to send your HTTP requests from a specific interface and IP address. Two address formats are accepted. The first is a string: this will set the local IP address to the address given in the string, and will also choose a semi-random high port for the local port number. The second is a two-tuple of the form (ip address, port): for example, ``('10.10.10.10', 8999)``. This will set the local IP address to the first element, and the local port to the second element. If ``0`` is used as the port number, a semi-random high port will be selected. .. warning:: Setting an explicit local port can have negative interactions with connection-pooling in Requests: in particular, it risks the possibility of getting "Address in use" errors. The string-only argument is generally preferred to the tuple-form. Example usage: .. code-block:: python import requests from requests_toolbelt.adapters.source import SourceAddressAdapter s = requests.Session() s.mount('http://', SourceAddressAdapter('10.10.10.10')) s.mount('https://', SourceAddressAdapter(('10.10.10.10', 8999))) """ def __init__(self, source_address, **kwargs): if isinstance(source_address, basestring): self.source_address = (source_address, 0) elif isinstance(source_address, tuple): self.source_address = source_address else: raise TypeError( "source_address must be IP address string or (ip, port) tuple" ) super(SourceAddressAdapter, self).__init__(**kwargs) def init_poolmanager(self, connections, maxsize, block=False): self.poolmanager = poolmanager.PoolManager( num_pools=connections, maxsize=maxsize, block=block, source_address=self.source_address) def proxy_manager_for(self, *args, **kwargs): kwargs['source_address'] = self.source_address return super(SourceAddressAdapter, self).proxy_manager_for( *args, **kwargs)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/adapters/source.py
0.85223
0.336386
source.py
pypi
"""The implementation of the SocketOptionsAdapter.""" import socket import warnings import sys import requests from requests import adapters from .._compat import connection from .._compat import poolmanager from .. import exceptions as exc class SocketOptionsAdapter(adapters.HTTPAdapter): """An adapter for requests that allows users to specify socket options. Since version 2.4.0 of requests, it is possible to specify a custom list of socket options that need to be set before establishing the connection. Example usage:: >>> import socket >>> import requests >>> from requests_toolbelt.adapters import socket_options >>> s = requests.Session() >>> opts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)] >>> adapter = socket_options.SocketOptionsAdapter(socket_options=opts) >>> s.mount('http://', adapter) You can also take advantage of the list of default options on this class to keep using the original options in addition to your custom options. In that case, ``opts`` might look like:: >>> opts = socket_options.SocketOptionsAdapter.default_options + opts """ if connection is not None: default_options = getattr( connection.HTTPConnection, 'default_socket_options', [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] ) else: default_options = [] warnings.warn(exc.RequestsVersionTooOld, "This version of Requests is only compatible with a " "version of urllib3 which is too old to support " "setting options on a socket. This adapter is " "functionally useless.") def __init__(self, **kwargs): self.socket_options = kwargs.pop('socket_options', self.default_options) super(SocketOptionsAdapter, self).__init__(**kwargs) def init_poolmanager(self, connections, maxsize, block=False): if requests.__build__ >= 0x020400: # NOTE(Ian): Perhaps we should raise a warning self.poolmanager = poolmanager.PoolManager( num_pools=connections, maxsize=maxsize, block=block, socket_options=self.socket_options ) else: super(SocketOptionsAdapter, self).init_poolmanager( connections, maxsize, block ) class TCPKeepAliveAdapter(SocketOptionsAdapter): """An adapter for requests that turns on TCP Keep-Alive by default. The adapter sets 4 socket options: - ``SOL_SOCKET`` ``SO_KEEPALIVE`` - This turns on TCP Keep-Alive - ``IPPROTO_TCP`` ``TCP_KEEPINTVL`` 20 - Sets the keep alive interval - ``IPPROTO_TCP`` ``TCP_KEEPCNT`` 5 - Sets the number of keep alive probes - ``IPPROTO_TCP`` ``TCP_KEEPIDLE`` 60 - Sets the keep alive time if the socket library has the ``TCP_KEEPIDLE`` constant The latter three can be overridden by keyword arguments (respectively): - ``interval`` - ``count`` - ``idle`` You can use this adapter like so:: >>> from requests_toolbelt.adapters import socket_options >>> tcp = socket_options.TCPKeepAliveAdapter(idle=120, interval=10) >>> s = requests.Session() >>> s.mount('http://', tcp) """ def __init__(self, **kwargs): socket_options = kwargs.pop('socket_options', SocketOptionsAdapter.default_options) idle = kwargs.pop('idle', 60) interval = kwargs.pop('interval', 20) count = kwargs.pop('count', 5) socket_options = socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) ] # NOTE(Ian): OSX does not have these constants defined, so we # set them conditionally. if getattr(socket, 'TCP_KEEPINTVL', None) is not None: socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval)] elif sys.platform == 'darwin': # On OSX, TCP_KEEPALIVE from netinet/tcp.h is not exported # by python's socket module TCP_KEEPALIVE = getattr(socket, 'TCP_KEEPALIVE', 0x10) socket_options += [(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval)] if getattr(socket, 'TCP_KEEPCNT', None) is not None: socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, count)] if getattr(socket, 'TCP_KEEPIDLE', None) is not None: socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle)] super(TCPKeepAliveAdapter, self).__init__( socket_options=socket_options, **kwargs )
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/adapters/socket_options.py
0.621196
0.185283
socket_options.py
pypi
"""A collection of functions deprecated in requests.utils.""" import re import sys from requests import utils find_charset = re.compile( br'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I ).findall find_pragma = re.compile( br'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I ).findall find_xml = re.compile( br'^<\?xml.*?encoding=["\']*(.+?)["\'>]' ).findall def get_encodings_from_content(content): """Return encodings from given content string. .. code-block:: python import requests from requests_toolbelt.utils import deprecated r = requests.get(url) encodings = deprecated.get_encodings_from_content(r) :param content: bytestring to extract encodings from :type content: bytes :return: encodings detected in the provided content :rtype: list(str) """ encodings = (find_charset(content) + find_pragma(content) + find_xml(content)) if (3, 0) <= sys.version_info < (4, 0): encodings = [encoding.decode('utf8') for encoding in encodings] return encodings def get_unicode_from_response(response): """Return the requested content back in unicode. This will first attempt to retrieve the encoding from the response headers. If that fails, it will use :func:`requests_toolbelt.utils.deprecated.get_encodings_from_content` to determine encodings from HTML elements. .. code-block:: python import requests from requests_toolbelt.utils import deprecated r = requests.get(url) text = deprecated.get_unicode_from_response(r) :param response: Response object to get unicode content from. :type response: requests.models.Response """ tried_encodings = set() # Try charset from content-type encoding = utils.get_encoding_from_headers(response.headers) if encoding: try: return str(response.content, encoding) except UnicodeError: tried_encodings.add(encoding.lower()) encodings = get_encodings_from_content(response.content) for _encoding in encodings: _encoding = _encoding.lower() if _encoding in tried_encodings: continue try: return str(response.content, _encoding) except UnicodeError: tried_encodings.add(_encoding) # Fall back: if encoding: try: return str(response.content, encoding, errors='replace') except TypeError: pass return response.text
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/utils/deprecated.py
0.528047
0.171859
deprecated.py
pypi
import collections import platform import sys def user_agent(name, version, extras=None): """Return an internet-friendly user_agent string. The majority of this code has been wilfully stolen from the equivalent function in Requests. :param name: The intended name of the user-agent, e.g. "python-requests". :param version: The version of the user-agent, e.g. "0.0.1". :param extras: List of two-item tuples that are added to the user-agent string. :returns: Formatted user-agent string :rtype: str """ if extras is None: extras = [] return UserAgentBuilder( name, version ).include_extras( extras ).include_implementation( ).include_system().build() class UserAgentBuilder(object): """Class to provide a greater level of control than :func:`user_agent`. This is used by :func:`user_agent` to build its User-Agent string. .. code-block:: python user_agent_str = UserAgentBuilder( name='requests-toolbelt', version='17.4.0', ).include_implementation( ).include_system( ).include_extras([ ('requests', '2.14.2'), ('urllib3', '1.21.2'), ]).build() """ format_string = '%s/%s' def __init__(self, name, version): """Initialize our builder with the name and version of our user agent. :param str name: Name of our user-agent. :param str version: The version string for user-agent. """ self._pieces = collections.deque([(name, version)]) def build(self): """Finalize the User-Agent string. :returns: Formatted User-Agent string. :rtype: str """ return " ".join([self.format_string % piece for piece in self._pieces]) def include_extras(self, extras): """Include extra portions of the User-Agent. :param list extras: list of tuples of extra-name and extra-version """ if any(len(extra) != 2 for extra in extras): raise ValueError('Extras should be a sequence of two item tuples.') self._pieces.extend(extras) return self def include_implementation(self): """Append the implementation string to the user-agent string. This adds the the information that you're using CPython 2.7.13 to the User-Agent. """ self._pieces.append(_implementation_tuple()) return self def include_system(self): """Append the information about the Operating System.""" self._pieces.append(_platform_tuple()) return self def _implementation_tuple(): """Return the tuple of interpreter name and version. Returns a string that provides both the name and the version of the Python implementation currently running. For example, on CPython 2.7.5 it will return "CPython/2.7.5". This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. """ implementation = platform.python_implementation() if implementation == 'CPython': implementation_version = platform.python_version() elif implementation == 'PyPy': implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != 'final': implementation_version = ''.join([ implementation_version, sys.pypy_version_info.releaselevel ]) elif implementation == 'Jython': implementation_version = platform.python_version() # Complete Guess elif implementation == 'IronPython': implementation_version = platform.python_version() # Complete Guess else: implementation_version = 'Unknown' return (implementation, implementation_version) def _implementation_string(): return "%s/%s" % _implementation_tuple() def _platform_tuple(): try: p_system = platform.system() p_release = platform.release() except IOError: p_system = 'Unknown' p_release = 'Unknown' return (p_system, p_release)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/utils/user_agent.py
0.591369
0.290924
user_agent.py
pypi
import contextlib import io import os from uuid import uuid4 import requests from .._compat import fields class FileNotSupportedError(Exception): """File not supported error.""" class MultipartEncoder(object): """ The ``MultipartEncoder`` object is a generic interface to the engine that will create a ``multipart/form-data`` body for you. The basic usage is: .. code-block:: python import requests from requests_toolbelt import MultipartEncoder encoder = MultipartEncoder({'field': 'value', 'other_field': 'other_value'}) r = requests.post('https://httpbin.org/post', data=encoder, headers={'Content-Type': encoder.content_type}) If you do not need to take advantage of streaming the post body, you can also do: .. code-block:: python r = requests.post('https://httpbin.org/post', data=encoder.to_string(), headers={'Content-Type': encoder.content_type}) If you want the encoder to use a specific order, you can use an OrderedDict or more simply, a list of tuples: .. code-block:: python encoder = MultipartEncoder([('field', 'value'), ('other_field', 'other_value')]) .. versionchanged:: 0.4.0 You can also provide tuples as part values as you would provide them to requests' ``files`` parameter. .. code-block:: python encoder = MultipartEncoder({ 'field': ('file_name', b'{"a": "b"}', 'application/json', {'X-My-Header': 'my-value'}) ]) .. warning:: This object will end up directly in :mod:`httplib`. Currently, :mod:`httplib` has a hard-coded read size of **8192 bytes**. This means that it will loop until the file has been read and your upload could take a while. This is **not** a bug in requests. A feature is being considered for this object to allow you, the user, to specify what size should be returned on a read. If you have opinions on this, please weigh in on `this issue`_. .. _this issue: https://github.com/requests/toolbelt/issues/75 """ def __init__(self, fields, boundary=None, encoding='utf-8'): #: Boundary value either passed in by the user or created self.boundary_value = boundary or uuid4().hex # Computed boundary self.boundary = '--{}'.format(self.boundary_value) #: Encoding of the data being passed in self.encoding = encoding # Pre-encoded boundary self._encoded_boundary = b''.join([ encode_with(self.boundary, self.encoding), encode_with('\r\n', self.encoding) ]) #: Fields provided by the user self.fields = fields #: Whether or not the encoder is finished self.finished = False #: Pre-computed parts of the upload self.parts = [] # Pre-computed parts iterator self._iter_parts = iter([]) # The part we're currently working with self._current_part = None # Cached computation of the body's length self._len = None # Our buffer self._buffer = CustomBytesIO(encoding=encoding) # Pre-compute each part's headers self._prepare_parts() # Load boundary into buffer self._write_boundary() @property def len(self): """Length of the multipart/form-data body. requests will first attempt to get the length of the body by calling ``len(body)`` and then by checking for the ``len`` attribute. On 32-bit systems, the ``__len__`` method cannot return anything larger than an integer (in C) can hold. If the total size of the body is even slightly larger than 4GB users will see an OverflowError. This manifested itself in `bug #80`_. As such, we now calculate the length lazily as a property. .. _bug #80: https://github.com/requests/toolbelt/issues/80 """ # If _len isn't already calculated, calculate, return, and set it return self._len or self._calculate_length() def __repr__(self): return '<MultipartEncoder: {!r}>'.format(self.fields) def _calculate_length(self): """ This uses the parts to calculate the length of the body. This returns the calculated length so __len__ can be lazy. """ boundary_len = len(self.boundary) # Length of --{boundary} # boundary length + header length + body length + len('\r\n') * 2 self._len = sum( (boundary_len + total_len(p) + 4) for p in self.parts ) + boundary_len + 4 return self._len def _calculate_load_amount(self, read_size): """This calculates how many bytes need to be added to the buffer. When a consumer read's ``x`` from the buffer, there are two cases to satisfy: 1. Enough data in the buffer to return the requested amount 2. Not enough data This function uses the amount of unread bytes in the buffer and determines how much the Encoder has to load before it can return the requested amount of bytes. :param int read_size: the number of bytes the consumer requests :returns: int -- the number of bytes that must be loaded into the buffer before the read can be satisfied. This will be strictly non-negative """ amount = read_size - total_len(self._buffer) return amount if amount > 0 else 0 def _load(self, amount): """Load ``amount`` number of bytes into the buffer.""" self._buffer.smart_truncate() part = self._current_part or self._next_part() while amount == -1 or amount > 0: written = 0 if part and not part.bytes_left_to_write(): written += self._write(b'\r\n') written += self._write_boundary() part = self._next_part() if not part: written += self._write_closing_boundary() self.finished = True break written += part.write_to(self._buffer, amount) if amount != -1: amount -= written def _next_part(self): try: p = self._current_part = next(self._iter_parts) except StopIteration: p = None return p def _iter_fields(self): _fields = self.fields if hasattr(self.fields, 'items'): _fields = list(self.fields.items()) for k, v in _fields: file_name = None file_type = None file_headers = None if isinstance(v, (list, tuple)): if len(v) == 2: file_name, file_pointer = v elif len(v) == 3: file_name, file_pointer, file_type = v else: file_name, file_pointer, file_type, file_headers = v else: file_pointer = v field = fields.RequestField(name=k, data=file_pointer, filename=file_name, headers=file_headers) field.make_multipart(content_type=file_type) yield field def _prepare_parts(self): """This uses the fields provided by the user and creates Part objects. It populates the `parts` attribute and uses that to create a generator for iteration. """ enc = self.encoding self.parts = [Part.from_field(f, enc) for f in self._iter_fields()] self._iter_parts = iter(self.parts) def _write(self, bytes_to_write): """Write the bytes to the end of the buffer. :param bytes bytes_to_write: byte-string (or bytearray) to append to the buffer :returns: int -- the number of bytes written """ return self._buffer.append(bytes_to_write) def _write_boundary(self): """Write the boundary to the end of the buffer.""" return self._write(self._encoded_boundary) def _write_closing_boundary(self): """Write the bytes necessary to finish a multipart/form-data body.""" with reset(self._buffer): self._buffer.seek(-2, 2) self._buffer.write(b'--\r\n') return 2 def _write_headers(self, headers): """Write the current part's headers to the buffer.""" return self._write(encode_with(headers, self.encoding)) @property def content_type(self): return str( 'multipart/form-data; boundary={}'.format(self.boundary_value) ) def to_string(self): """Return the entirety of the data in the encoder. .. note:: This simply reads all of the data it can. If you have started streaming or reading data from the encoder, this method will only return whatever data is left in the encoder. .. note:: This method affects the internal state of the encoder. Calling this method will exhaust the encoder. :returns: the multipart message :rtype: bytes """ return self.read() def read(self, size=-1): """Read data from the streaming encoder. :param int size: (optional), If provided, ``read`` will return exactly that many bytes. If it is not provided, it will return the remaining bytes. :returns: bytes """ if self.finished: return self._buffer.read(size) bytes_to_load = size if bytes_to_load != -1 and bytes_to_load is not None: bytes_to_load = self._calculate_load_amount(int(size)) self._load(bytes_to_load) return self._buffer.read(size) def IDENTITY(monitor): return monitor class MultipartEncoderMonitor(object): """ An object used to monitor the progress of a :class:`MultipartEncoder`. The :class:`MultipartEncoder` should only be responsible for preparing and streaming the data. For anyone who wishes to monitor it, they shouldn't be using that instance to manage that as well. Using this class, they can monitor an encoder and register a callback. The callback receives the instance of the monitor. To use this monitor, you construct your :class:`MultipartEncoder` as you normally would. .. code-block:: python from requests_toolbelt import (MultipartEncoder, MultipartEncoderMonitor) import requests def callback(monitor): # Do something with this information pass m = MultipartEncoder(fields={'field0': 'value0'}) monitor = MultipartEncoderMonitor(m, callback) headers = {'Content-Type': monitor.content_type} r = requests.post('https://httpbin.org/post', data=monitor, headers=headers) Alternatively, if your use case is very simple, you can use the following pattern. .. code-block:: python from requests_toolbelt import MultipartEncoderMonitor import requests def callback(monitor): # Do something with this information pass monitor = MultipartEncoderMonitor.from_fields( fields={'field0': 'value0'}, callback ) headers = {'Content-Type': montior.content_type} r = requests.post('https://httpbin.org/post', data=monitor, headers=headers) """ def __init__(self, encoder, callback=None): #: Instance of the :class:`MultipartEncoder` being monitored self.encoder = encoder #: Optionally function to call after a read self.callback = callback or IDENTITY #: Number of bytes already read from the :class:`MultipartEncoder` #: instance self.bytes_read = 0 #: Avoid the same problem in bug #80 self.len = self.encoder.len @classmethod def from_fields(cls, fields, boundary=None, encoding='utf-8', callback=None): encoder = MultipartEncoder(fields, boundary, encoding) return cls(encoder, callback) @property def content_type(self): return self.encoder.content_type def to_string(self): return self.read() def read(self, size=-1): string = self.encoder.read(size) self.bytes_read += len(string) self.callback(self) return string def encode_with(string, encoding): """Encoding ``string`` with ``encoding`` if necessary. :param str string: If string is a bytes object, it will not encode it. Otherwise, this function will encode it with the provided encoding. :param str encoding: The encoding with which to encode string. :returns: encoded bytes object """ if not (string is None or isinstance(string, bytes)): return string.encode(encoding) return string def readable_data(data, encoding): """Coerce the data to an object with a ``read`` method.""" if hasattr(data, 'read'): return data return CustomBytesIO(data, encoding) def total_len(o): if hasattr(o, '__len__'): return len(o) if hasattr(o, 'len'): return o.len if hasattr(o, 'fileno'): try: fileno = o.fileno() except io.UnsupportedOperation: pass else: return os.fstat(fileno).st_size if hasattr(o, 'getvalue'): # e.g. BytesIO, cStringIO.StringIO return len(o.getvalue()) @contextlib.contextmanager def reset(buffer): """Keep track of the buffer's current position and write to the end. This is a context manager meant to be used when adding data to the buffer. It eliminates the need for every function to be concerned with the position of the cursor in the buffer. """ original_position = buffer.tell() buffer.seek(0, 2) yield buffer.seek(original_position, 0) def coerce_data(data, encoding): """Ensure that every object's __len__ behaves uniformly.""" if not isinstance(data, CustomBytesIO): if hasattr(data, 'getvalue'): return CustomBytesIO(data.getvalue(), encoding) if hasattr(data, 'fileno'): return FileWrapper(data) if not hasattr(data, 'read'): return CustomBytesIO(data, encoding) return data def to_list(fields): if hasattr(fields, 'items'): return list(fields.items()) return list(fields) class Part(object): def __init__(self, headers, body): self.headers = headers self.body = body self.headers_unread = True self.len = len(self.headers) + total_len(self.body) @classmethod def from_field(cls, field, encoding): """Create a part from a Request Field generated by urllib3.""" headers = encode_with(field.render_headers(), encoding) body = coerce_data(field.data, encoding) return cls(headers, body) def bytes_left_to_write(self): """Determine if there are bytes left to write. :returns: bool -- ``True`` if there are bytes left to write, otherwise ``False`` """ to_read = 0 if self.headers_unread: to_read += len(self.headers) return (to_read + total_len(self.body)) > 0 def write_to(self, buffer, size): """Write the requested amount of bytes to the buffer provided. The number of bytes written may exceed size on the first read since we load the headers ambitiously. :param CustomBytesIO buffer: buffer we want to write bytes to :param int size: number of bytes requested to be written to the buffer :returns: int -- number of bytes actually written """ written = 0 if self.headers_unread: written += buffer.append(self.headers) self.headers_unread = False while total_len(self.body) > 0 and (size == -1 or written < size): amount_to_read = size if size != -1: amount_to_read = size - written written += buffer.append(self.body.read(amount_to_read)) return written class CustomBytesIO(io.BytesIO): def __init__(self, buffer=None, encoding='utf-8'): buffer = encode_with(buffer, encoding) super(CustomBytesIO, self).__init__(buffer) def _get_end(self): current_pos = self.tell() self.seek(0, 2) length = self.tell() self.seek(current_pos, 0) return length @property def len(self): length = self._get_end() return length - self.tell() def append(self, bytes): with reset(self): written = self.write(bytes) return written def smart_truncate(self): to_be_read = total_len(self) already_read = self._get_end() - to_be_read if already_read >= to_be_read: old_bytes = self.read() self.seek(0, 0) self.truncate() self.write(old_bytes) self.seek(0, 0) # We want to be at the beginning class FileWrapper(object): def __init__(self, file_object): self.fd = file_object @property def len(self): return total_len(self.fd) - self.fd.tell() def read(self, length=-1): return self.fd.read(length) class FileFromURLWrapper(object): """File from URL wrapper. The :class:`FileFromURLWrapper` object gives you the ability to stream file from provided URL in chunks by :class:`MultipartEncoder`. Provide a stateless solution for streaming file from one server to another. You can use the :class:`FileFromURLWrapper` without a session or with a session as demonstated by the examples below: .. code-block:: python # no session import requests from requests_toolbelt import MultipartEncoder, FileFromURLWrapper url = 'https://httpbin.org/image/png' streaming_encoder = MultipartEncoder( fields={ 'file': FileFromURLWrapper(url) } ) r = requests.post( 'https://httpbin.org/post', data=streaming_encoder, headers={'Content-Type': streaming_encoder.content_type} ) .. code-block:: python # using a session import requests from requests_toolbelt import MultipartEncoder, FileFromURLWrapper session = requests.Session() url = 'https://httpbin.org/image/png' streaming_encoder = MultipartEncoder( fields={ 'file': FileFromURLWrapper(url, session=session) } ) r = session.post( 'https://httpbin.org/post', data=streaming_encoder, headers={'Content-Type': streaming_encoder.content_type} ) """ def __init__(self, file_url, session=None): self.session = session or requests.Session() requested_file = self._request_for_file(file_url) self.len = int(requested_file.headers['content-length']) self.raw_data = requested_file.raw def _request_for_file(self, file_url): """Make call for file under provided URL.""" response = self.session.get(file_url, stream=True) content_length = response.headers.get('content-length', None) if content_length is None: error_msg = ( "Data from provided URL {url} is not supported. Lack of " "content-length Header in requested file response.".format( url=file_url) ) raise FileNotSupportedError(error_msg) elif not content_length.isdigit(): error_msg = ( "Data from provided URL {url} is not supported. content-length" " header value is not a digit.".format(url=file_url) ) raise FileNotSupportedError(error_msg) return response def read(self, chunk_size): """Read file in chunks.""" chunk_size = chunk_size if chunk_size >= 0 else self.len chunk = self.raw_data.read(chunk_size) or b'' self.len -= len(chunk) if chunk else 0 # left to read return chunk
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/multipart/encoder.py
0.744656
0.26709
encoder.py
pypi
import sys import email.parser from .encoder import encode_with from requests.structures import CaseInsensitiveDict def _split_on_find(content, bound): point = content.find(bound) return content[:point], content[point + len(bound):] class ImproperBodyPartContentException(Exception): pass class NonMultipartContentTypeException(Exception): pass def _header_parser(string, encoding): major = sys.version_info[0] if major == 3: string = string.decode(encoding) headers = email.parser.HeaderParser().parsestr(string).items() return ( (encode_with(k, encoding), encode_with(v, encoding)) for k, v in headers ) class BodyPart(object): """ The ``BodyPart`` object is a ``Response``-like interface to an individual subpart of a multipart response. It is expected that these will generally be created by objects of the ``MultipartDecoder`` class. Like ``Response``, there is a ``CaseInsensitiveDict`` object named headers, ``content`` to access bytes, ``text`` to access unicode, and ``encoding`` to access the unicode codec. """ def __init__(self, content, encoding): self.encoding = encoding headers = {} # Split into header section (if any) and the content if b'\r\n\r\n' in content: first, self.content = _split_on_find(content, b'\r\n\r\n') if first != b'': headers = _header_parser(first.lstrip(), encoding) else: raise ImproperBodyPartContentException( 'content does not contain CR-LF-CR-LF' ) self.headers = CaseInsensitiveDict(headers) @property def text(self): """Content of the ``BodyPart`` in unicode.""" return self.content.decode(self.encoding) class MultipartDecoder(object): """ The ``MultipartDecoder`` object parses the multipart payload of a bytestring into a tuple of ``Response``-like ``BodyPart`` objects. The basic usage is:: import requests from requests_toolbelt import MultipartDecoder response = requests.get(url) decoder = MultipartDecoder.from_response(response) for part in decoder.parts: print(part.headers['content-type']) If the multipart content is not from a response, basic usage is:: from requests_toolbelt import MultipartDecoder decoder = MultipartDecoder(content, content_type) for part in decoder.parts: print(part.headers['content-type']) For both these usages, there is an optional ``encoding`` parameter. This is a string, which is the name of the unicode codec to use (default is ``'utf-8'``). """ def __init__(self, content, content_type, encoding='utf-8'): #: Original Content-Type header self.content_type = content_type #: Response body encoding self.encoding = encoding #: Parsed parts of the multipart response body self.parts = tuple() self._find_boundary() self._parse_body(content) def _find_boundary(self): ct_info = tuple(x.strip() for x in self.content_type.split(';')) mimetype = ct_info[0] if mimetype.split('/')[0].lower() != 'multipart': raise NonMultipartContentTypeException( "Unexpected mimetype in content-type: '{}'".format(mimetype) ) for item in ct_info[1:]: attr, value = _split_on_find( item, '=' ) if attr.lower() == 'boundary': self.boundary = encode_with(value.strip('"'), self.encoding) @staticmethod def _fix_first_part(part, boundary_marker): bm_len = len(boundary_marker) if boundary_marker == part[:bm_len]: return part[bm_len:] else: return part def _parse_body(self, content): boundary = b''.join((b'--', self.boundary)) def body_part(part): fixed = MultipartDecoder._fix_first_part(part, boundary) return BodyPart(fixed, self.encoding) def test_part(part): return (part != b'' and part != b'\r\n' and part[:4] != b'--\r\n' and part != b'--') parts = content.split(b''.join((b'\r\n', boundary))) self.parts = tuple(body_part(x) for x in parts if test_part(x)) @classmethod def from_response(cls, response, encoding='utf-8'): content = response.content content_type = response.headers.get('content-type', None) return cls(content, content_type, encoding)
/requests-toolbelt-1.0.0.tar.gz/requests-toolbelt-1.0.0/requests_toolbelt/multipart/decoder.py
0.635675
0.305905
decoder.py
pypi
from typing import Dict import toml from shillelagh.backends.apsw.db import connect import concurrent.futures class IOArgument: def __init__(self,gcp_service_account:Dict, sheeturl:str): self.GCP = gcp_service_account self.sheeturl = sheeturl @classmethod def instantiate(cls,gcp_service_account:Dict, sheeturl:str): return cls(gcp_service_account,sheeturl) def to_dict(self) -> dict: ret = {} for key, value in self.__dict__.items(): if hasattr(value, 'to_dict'): ret[key] = value.to_dict() else: ret[key] = value return ret @classmethod def fromSecretsTOML(cls,filepath:str = '.secrets.toml'): # 使用`toml`库加载和解析TOML文件 with open(filepath, "r", encoding="utf-8") as toml_file: toml_data = toml.load(toml_file) gcp = toml_data['gcp_service_account'] sheeturl = toml_data['private_gsheets_url'] return cls.instantiate(gcp,sheeturl) class GSheetIO: def __init__(self,io_args:IOArgument=None, enable_threading:bool = False): if io_args is None: self.args = IOArgument.fromSecretsTOML() else: self.args = io_args self.threading = enable_threading if self.threading: self.executor = concurrent.futures.ThreadPoolExecutor() self.executor.__enter__() self.running_tasks = [] @classmethod def fromIOArgs(cls, io_args: IOArgument): return cls(io_args) def __login_to_google__(self): print("\n\n##########################\n\n") print("Login to Google API") connect_args = { "path": ":memory:", "adapters": "gsheetsapi", "adapter_kwargs": { "gsheetsapi": { "service_account_info": { **self.args.GCP } } } } conn = connect(**connect_args) cursor = conn.cursor() print("Login done.") return cursor def run_query(self,query): ''' Args: query: example: 'SELECT * FROM SHEET'. "SHEET" will be replaced by the gsheet url internally. Returns: query results ''' cursor = self.__login_to_google__() sheet_url = self.args.sheeturl query = query.replace('SHEET', f'''"{sheet_url}"''') if not self.threading: return cursor.execute(query) else: def lazy(query): return cursor.execute(query) future = self.executor.submit(lazy,query) self.running_tasks.append(future) return future def sync(self, timeout:int=10) -> list: if self.threading: self.executor.__exit__(None,None,None) # Store futures and their corresponding index futures = {future: idx for idx, future in enumerate(self.running_tasks)} # Initialize an empty list of the same length as params ret = [None] * len(self.running_tasks) for future in concurrent.futures.as_completed(self.running_tasks): # Get the index of the completed future idx = futures[future] # Store the result at the appropriate index in ret try: result = future.result(timeout) except concurrent.futures.TimeoutError: result = None ret[idx] = result self.running_tasks.clear() return ret def get_whole_dataset(self): query = f'SELECT * FROM SHEET' dataset = self.run_query(query) return dataset def add_new_row(self,data: dict): columns_str = ", ".join(data.keys()) new_values_str = ", ".join([f"\'{str(x)}\'" for x in data.values()]) query = f'INSERT INTO SHEET ({columns_str}) VALUES ({new_values_str})' self.run_query(query)
/requests-toolkit-stable-0.25.1.tar.gz/requests-toolkit-stable-0.25.1/requests_toolkit/gsheet/_io.py
0.498047
0.211254
_io.py
pypi
import asyncio import random import threading from asyncio import Task from typing import List, Union,Dict,Callable class BasisLoop: def __init__(self): try: self.__loop__ = asyncio.get_event_loop() except: self.__loop__ = asyncio.new_event_loop() self.__tasks__ = [] def get_all_tasks(self): return self.__tasks__ def get_done_tasks(self): return list(filter(lambda x: x.done(), self.__tasks__)) def get_cancelled_tasks(self): return list(filter(lambda x: x.cancelled(), self.__tasks__)) def get_pending_tasks(self): return list(filter(lambda x:not x.cancelled() and not x.done(), self.__tasks__)) def wait_all_done(self): return self.join() def join(self): return [self.__loop__.run_until_complete(t) for t in self.__tasks__] def clear(self): self.__tasks__.clear() def get_task_statues(self): return [t._state for t in self.__tasks__] def sync(self): return self.join() @staticmethod def __create_coro__(async_func, *args): return async_func(*args) class PendingLoop(BasisLoop): ''' example usage: asyncpy def dumi(): print(0) await asyncio.sleep(1) print(1) loop = PendingLoop() loop.pend_new_task(dumi()) # task not started loop.pend_new_task(dumi()) # task not started loop.pend_new_task(dumi()) # task not started loop.wait_all_done() ''' def __init__(self): super().__init__() def pend_new_task(self, coro, name=None) ->Task: task = self.__loop__.create_task(coro) if name is not None: task.set_name(name) self.__tasks__.append(task) return task def __pend_dumi_tasks__(self, n_tasks: int) -> List[Task]: async def dumi(): print('task started') t = random.randint(0,10) await asyncio.sleep(t) print(f'task finished. Use time {t} seconds.') return [self.pend_new_task(dumi()) for _ in range(n_tasks)] class ThreadPoolLoop(PendingLoop): ''' example usage: import random asyncpy def dumi(): print('task started') t = random.randint(0, 10) await asyncio.sleep(t) print(f'task finished. Use time {t} seconds.') loop = ThreadPoolLoop() loop.start_new_task(dumi) # task started loop.start_new_task(dumi) # task started loop.start_new_task(dumi) # task started loop.yield_done(print) ''' def __init__(self): super().__init__() def start_new_task(self, async_func, *func_args, task_name:str=None): async def afun_ext(): print(f"Thread ID: {threading.current_thread().ident}, Thread Name: {threading.current_thread().name}") return await async_func(*func_args) async def create_task(): # coro = await self.__loop__.run_in_executor(None, self.__create_coro__, afun_ext) # 验证只有一个线程 coro = await self.__loop__.run_in_executor(None, self.__create_coro__, async_func,*func_args) task = self.__loop__.create_task(coro) if task_name is not None: task.__name__ = task_name else: task.__name__ = f'Task {len(self.__tasks__)}' self.__tasks__.append(task) return task return self.__loop__.run_until_complete(create_task()) async def __yield_done_generator__(self): """Async generator that yields completed tasks from a list of tasks.""" completed = set() tasks = self.__tasks__ while tasks: done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) for task in done - completed: completed.add(task) yield task def yield_done(self, callback:Union[Callable,Dict]): async def wrapper(): async for t in self.__yield_done_generator__(): print(f"{t.__name__} finished.") if isinstance(callback,dict): callback_ = callback[t.__name__] callback_(t.result()) else: callback(t.result()) task= self.__loop__.create_task(wrapper()) self.__loop__.run_until_complete(task)
/requests-toolkit-stable-0.25.1.tar.gz/requests-toolkit-stable-0.25.1/requests_toolkit/asyncpy/loop.py
0.47658
0.150934
loop.py
pypi
from typing import Union, List class ChatCompletionConfig: def __init__(self,*, user_msg: str, user_name: str = None, assistant: str = None, local_system:str = None, temperature:float = 1, top_p:float = 1, n:int = 1, stream:bool = False, stop: Union[str, List[str]] = None, max_tokens:int = 1000, presence_penalty:float = 0, frequency_penalty:float = 0, only_response = True, ): ''' :param user_msg: user message :param assistant: external knowledge base. E.g. chat history :param temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. :param top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. :param n: How many chat completion choices to generate for each input message. :param stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. :param stop: Up to 4 sequences where the API will stop generating further tokens. :param max_tokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). :param presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. :param frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. :param user_name: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. :param only_response: if only return the text response from ChatGPT :return: ''' self.user_name = user_name self.user_msg = user_msg self.assistant = assistant self.local_system = local_system self.temparature = temperature self.top_p = top_p self.n = n self.stream = stream self.stop = stop self.max_tokens = max_tokens self.presence_penalty = presence_penalty self.frequency_penalty = frequency_penalty self.only_response = only_response def to_dict(self) -> dict: ret = {} for key, value in self.__dict__.items(): if hasattr(value, 'to_dict'): ret[key] = value.to_dict() else: ret[key] = value return ret
/requests-toolkit-stable-0.25.1.tar.gz/requests-toolkit-stable-0.25.1/requests_toolkit/openpy/config/_chatcompletion.py
0.93423
0.544135
_chatcompletion.py
pypi
import requests from functools import wraps class ExpectedKeyError(Exception): """ Exception Class for dictionary keys that do not match what is expected from a requests.Response object that can be converted to requests_toolkit using .requests_toolkit(). ... Attributes: found_keys: dict expected_keys: dict message: str """ def __init__(self, found_keys, expected_keys, message="The requests.Response did not have the expected dictionary keys."): self.found_keys = found_keys self.expected_keys = expected_keys self.message = message super().__init__(self.message) def __str__(self): return f'{self.message}\n -> expected: {self.expected_keys} \n -> found: {self.found_keys}' def validate_keys(expected_keys: list, suppress_exception_for_extra_found_keys: bool = True): """ Decorator: Validates that keys found by calling .requests_toolkit() on a requests.response object match what is expected. ... Parameters: expected_keys: list of strings suppress_exception_for_extra_found_keys: bool Returns: requests.Response object """ def inner(func): @wraps(func) def wrapper(): response = func() try: response_data = response.json() except ValueError("requests.response did not return data that can be converted to requests_toolkit."): raise if isinstance(response_data, list): for entry in response_data: found_keys = entry.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) if found_keys - set(expected_keys) and not suppress_exception_for_extra_found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response elif isinstance(response_data, dict): found_keys = response_data.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response return wrapper return inner def handle_http_error(func): """ Decorator: Abstracts away handling of bad HTTP-status-codes by raising requests.HTTPError for non-200/300 responses. ... Parameters: func: function that returns requests.Response objects Returns: requests.Response object """ @wraps(func) def wrapper(): response = func() if response.ok: return response response.raise_for_status() return wrapper
/requests_toolkit-0.0.4-py3-none-any.whl/json_toolkit.py
0.861771
0.368718
json_toolkit.py
pypi
import requests from functools import wraps class ExpectedKeyError(Exception): """ Exception Class for dictionary keys that do not match what is expected from a requests.Response object that can be converted to requests_toolkit using .requests_toolkit(). ... Attributes: found_keys: dict expected_keys: dict message: str """ def __init__(self, found_keys, expected_keys, message="The requests.Response did not have the expected dictionary keys."): self.found_keys = found_keys self.expected_keys = expected_keys self.message = message super().__init__(self.message) def __str__(self): return f'{self.message}\n -> expected: {self.expected_keys} \n -> found: {self.found_keys}' def validate_keys(expected_keys: list, suppress_exception_for_extra_found_keys: bool = True): """ Decorator: Validates that keys found by calling .requests_toolkit() on a requests.response object match what is expected. ... Parameters: expected_keys: list of strings suppress_exception_for_extra_found_keys: bool Returns: requests.Response object """ def inner(func): @wraps(func) def wrapper(): response = func() try: response_data = response.json() except ValueError("requests.response did not return data that can be converted to requests_toolkit."): raise if isinstance(response_data, list): for entry in response_data: found_keys = entry.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) if found_keys - set(expected_keys) and not suppress_exception_for_extra_found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response elif isinstance(response_data, dict): found_keys = response_data.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response return wrapper return inner def handle_http_error(func): """ Decorator: Abstracts away handling of bad HTTP-status-codes by raising requests.HTTPError for non-200/300 responses. ... Parameters: func: function that returns requests.Response objects Returns: requests.Response object """ @wraps(func) def wrapper(): response = func() if response.ok: return response response.raise_for_status() return wrapper
/requests_toolkit-0.0.4-py3-none-any.whl/toolkit.py
0.861771
0.368718
toolkit.py
pypi
from functools import wraps class ExpectedKeyError(Exception): """ Exception Class for dictionary keys that do not match what is expected from a requests.Response object that can be converted to requests_toolkit using .requests_toolkit(). ... Attributes: found_keys: dict expected_keys: dict message: str """ def __init__(self, found_keys, expected_keys, message="The requests.Response did not have the expected dictionary keys."): self.found_keys = found_keys self.expected_keys = expected_keys self.message = message super().__init__(self.message) def __str__(self): return f'{self.message}\n -> expected: {self.expected_keys} \n -> found: {self.found_keys}' def validate_keys(expected_keys: list, suppress_exception_for_extra_found_keys: bool = True): """ Decorator: Validates that keys found by calling .requests_toolkit() on a requests.response object match what is expected. ... Parameters: expected_keys: list of strings suppress_exception_for_extra_found_keys: bool Returns: requests.Response object """ def inner(func): @wraps(func) def wrapper(): response = func() try: response_data = response.json() except ValueError("requests.response did not return data that can be converted to requests_toolkit."): raise if isinstance(response_data, list): for entry in response_data: found_keys = entry.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) if found_keys - set(expected_keys) and not suppress_exception_for_extra_found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response elif isinstance(response_data, dict): found_keys = response_data.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response return wrapper return inner def handle_http_error(func): """ Decorator: Abstracts away handling of bad HTTP-status-codes by raising requests.HTTPError for non-200/300 responses. ... Parameters: func: function that returns requests.Response objects Returns: requests.Response object """ @wraps(func) def wrapper(): response = func() if response.ok: return response response.raise_for_status() return wrapper
/requests_toolkit-0.0.4-py3-none-any.whl/requests_toolkit.py
0.921225
0.415907
requests_toolkit.py
pypi
import requests from functools import wraps class ExpectedKeyError(Exception): """ Exception Class for dictionary keys that do not match what is expected from a requests.Response object that can be converted to requests_toolkit using .requests_toolkit(). ... Attributes: found_keys: dict expected_keys: dict message: str """ def __init__(self, found_keys, expected_keys, message="The requests.Response did not have the expected dictionary keys."): self.found_keys = found_keys self.expected_keys = expected_keys self.message = message super().__init__(self.message) def __str__(self): return f'{self.message}\n -> expected: {self.expected_keys} \n -> found: {self.found_keys}' def traverse_json(response_data: list or dict, expected_data: list or dict, suppress_exception_for_extra_found_keys: bool): if isinstance(expected_data, list): for index, element in enumerate(expected_data): if isinstance(element, dict): found_keys = response_data[index].keys() expected_keys = element.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) if found_keys - set(expected_keys) and not suppress_exception_for_extra_found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return traverse_json( response_data=response_data[index], expected_data=expected_data[index], suppress_exception_for_extra_found_keys=suppress_exception_for_extra_found_keys ) else: return True elif isinstance(expected_data, dict): found_keys = response_data.keys() expected_keys = expected_data.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) for key in expected_keys: return traverse_json( response_data=response_data[key], expected_data=expected_data[key], suppress_exception_for_extra_found_keys=suppress_exception_for_extra_found_keys ) return True def validate_keys(expected_data: list or dict, suppress_exception_for_extra_found_keys: bool = True): """ Decorator: Validates that keys found by calling .requests_toolkit() on a requests.response object match what is expected. ... Parameters: expected_keys: list of strings suppress_exception_for_extra_found_keys: bool -> setting False enforces exact match of found keys and expected keys. Returns: requests.Response object """ def inner(func): @wraps(func) def wrapper(): response = func() traverse_json( response_data=response.json(), expected_data=expected_data, suppress_exception_for_extra_found_keys=suppress_exception_for_extra_found_keys ) return response return wrapper return inner def handle_http_error(throw_exception: bool = True): """ Decorator: Abstracts away handling of bad HTTP-status-codes by raising requests.HTTPError for non-200/300 responses. ... Parameters: func: function that returns requests.Response objects Returns: requests.Response object """ def inner(func): @wraps(func) def wrapper(): response = func() if not response.ok and throw_exception: response.raise_for_status() return response return wrapper return inner
/requests_toolkit-0.0.4-py3-none-any.whl/requests_toolkit/json_tools.py
0.75101
0.409457
json_tools.py
pypi
import requests from functools import wraps class ExpectedKeyError(Exception): """ Exception Class for dictionary keys that do not match what is expected from a requests.Response object that can be converted to requests_toolkit using .requests_toolkit(). ... Attributes: found_keys: dict expected_keys: dict message: str """ def __init__(self, found_keys, expected_keys, message="The requests.Response did not have the expected dictionary keys."): self.found_keys = found_keys self.expected_keys = expected_keys self.message = message super().__init__(self.message) def __str__(self): return f'{self.message}\n -> expected: {self.expected_keys} \n -> found: {self.found_keys}' def validate_keys(expected_keys: list, suppress_exception_for_extra_found_keys: bool = True): """ Decorator: Validates that keys found by calling .requests_toolkit() on a requests.response object match what is expected. ... Parameters: expected_keys: list of strings suppress_exception_for_extra_found_keys: bool Returns: requests.Response object """ def inner(func): @wraps(func) def wrapper(): response = func() try: response_data = response.json() except ValueError("requests.response did not return data that can be converted to requests_toolkit."): raise if isinstance(response_data, list): for entry in response_data: found_keys = entry.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) if found_keys - set(expected_keys) and not suppress_exception_for_extra_found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response elif isinstance(response_data, dict): found_keys = response_data.keys() if set(expected_keys) - found_keys: raise ExpectedKeyError(found_keys=found_keys, expected_keys=expected_keys) return response return wrapper return inner def handle_http_error(func): """ Decorator: Abstracts away handling of bad HTTP-status-codes by raising requests.HTTPError for non-200/300 responses. ... Parameters: func: function that returns requests.Response objects Returns: requests.Response object """ @wraps(func) def wrapper(): response = func() if response.ok: return response response.raise_for_status() return wrapper
/requests_toolkit-0.0.4-py3-none-any.whl/json/requests_toolkit.py
0.861771
0.368718
requests_toolkit.py
pypi
[![Python >= 3.6](https://img.shields.io/badge/python->=3.6-red.svg)](https://www.python.org/downloads/) [![](https://badgen.net/github/release/deedy5/requests_tor)](https://github.com/deedy5/requests_tor/releases) [![](https://badge.fury.io/py/requests-tor.svg)](https://pypi.org/project/requests-tor) # requests_tor `Release history:` [https://pypi.org/project/requests-tor/#history](https://pypi.org/project/requests-tor/#history) --- Multithreading requests via [TOR](https://www.torproject.org) with automatic TOR new identity. Wrapper of the [requests](https://docs.python-requests.org) and [stem](https://stem.torproject.org) libraries. Returns [requests.Response](https://docs.python-requests.org/en/latest/api/#requests.Response) object. Masking as Tor Browser by using its default headers: ``` "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US,en;q=0.5", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:102.0) Gecko/20100101 Firefox/102.0", ``` ### Install ``` pip install -U requests_tor ``` ### Dependencies Download and start [Tor Browser](https://www.torproject.org/download/) or install [Tor](https://community.torproject.org/onion-services/setup/install/) _Notes:_ * In Tor [torrc file](https://support.torproject.org/tbb/tbb-editing-torrc/) control port is disabled by default. Needs to uncomment line `ControlPort 9051` * If you face an error `Authentication failed: unable to read '/run/tor/control.authcookie' ([Errno 13] Permission denied: '/run/tor/control.authcookie')` - needs to add your current user to the tor group. `ps ax o comm,group | grep tor` - command to find tor group (group name will be in the second column, for example `debian-tor`). `sudo usermod -a -G debian-tor $USER` - add your current user to tor group * Restart Tor (`/etc/init.d/tor restart`) and re-login --- ### Simple usage ```python from requests_tor import RequestsTor # If you use the Tor browser rt = RequestsTor() OR # If you use the Tor rt = RequestsTor(tor_ports=(9050,), tor_cport=9051) url = 'https://httpbin.org/anything' r = rt.get(url) print(r.text) urls = ['https://foxnews.com', 'https://nbcnews.com', 'https://wsj.com/news/world', 'https://abcnews.go.com', 'https://cbsnews.com', 'https://nytimes.com', 'https://usatoday.com','https://reuters.com/world', 'http://bbc.com/news', 'https://theguardian.com/world', 'https://cnn.com', 'https://apnews.com'] r = rt.get_urls(urls) print(r[-1].text) ``` --- ### Advanced usage [Edit torrc file](https://support.torproject.org/tbb/tbb-editing-torrc/): 1. add [socks ports](https://2019.www.torproject.org/docs/tor-manual.html.en#SocksPort), ``` SocksPort 9000 IsolateDestAddr SocksPort 9001 IsolateDestAddr SocksPort 9002 IsolateDestAddr SocksPort 9003 IsolateDestAddr SocksPort 9004 IsolateDestAddr ``` 2. add password for control port [not necessary]: generate and add in torrc file [HashedControlPassword](https://2019.www.torproject.org/docs/tor-manual.html.en#HashedControlPassword). ``` HashedControlPassword hashed_password ``` --- ```python from requests_tor import RequestsTor rt = RequestsTor(tor_ports=(9000, 9001, 9002, 9003, 9004), tor_cport=9151, password=None, autochange_id=5, threads=8) """ tor_ports = specify Tor socks ports tuple (default is (9150,), as the default in Tor Browser), if more than one port is set, the requests will be sent sequentially through the each port; tor_cport = specify Tor control port (default is 9151 for Tor Browser, for Tor use 9051); password = specify Tor control port password (default is None); autochange_id = number of requests via a one Tor socks port (default=5) to change TOR identity, specify autochange_id = 0 to turn off autochange Tor identity; threads = specify threads to download urls list (default=8); """ # check your ip rt.check_ip() # new Tor identity. Сalling this function includes time.sleep(3) rt.new_id() # test automatic TOR new identity rt.test() # Requests. TOR new identity is executed after (autochange_id * len(tor_ports)) requests. # GET request. rt.get(url, params=None, **kwargs) # POST request. rt.post(url, data=None, json=None, **kwargs) # PUT request. rt.put(url, data=None, **kwargs) # PATCH request. rt.patch(url, data=None, **kwargs) # DELETE request. rt.delete(url, **kwargs) # HEAD request. rt.head(url, **kwargs) """ url – URL for the new Request object. params – dictionary, list of tuples or bytes to send in the query string. **kwargs – optional arguments that request takes: data – (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the request. json – (optional) A JSON serializable Python object to send in the body of the Request. headers – (optional) Dictionary of HTTP Headers to send with the Request. cookies – (optional) Dict or CookieJar object to send with the Request. files – (optional) Dictionary of 'name': file-like-objects (or {'name': file-tuple}) for multipart encoding upload. file-tuple can be a 2-tuple ('filename', fileobj), 3-tuple ('filename', fileobj, 'content_type') or a 4-tuple ('filename', fileobj, ' content_type', custom_headers), where 'content-type' is a string defining the content type of the given file and custom_headers a dict-like object containing additional headers to add for the file. auth – (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. timeout – (optional) How many seconds to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) tuple. allow_redirects (bool) – (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to True. proxies – (optional) Dictionary mapping protocol to the URL of the proxy. verify – (optional) Either a boolean, in which case it controls whether we verify the server’s TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to True. stream – (optional) if False, the response content will be immediately downloaded. cert – (optional) if String, path to ssl client cert file (.pem). If Tuple, (‘cert’, ‘key’) pair. """ ``` ## Examples ### 1. Get url with unique params and headers in request. ```python from requests_tor import RequestsTor rt = RequestsTor(tor_ports=(9000, 9001, 9002, 9003, 9004), autochange_id=5) url = 'https://httpbin.org/anything' params = { "id": 12345, "status": 'passed' } headers = { "Origin": "https://www.foxnews.com", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" } r = rt.get(url, params=params, headers=headers) print(r.text) ``` ### 2. Get list of urls concurrently. ```python from requests_tor import RequestsTor rt = RequestsTor(tor_ports=(9000, 9001, 9002, 9003, 9004), autochange_id=5) # get urls list concurrently. TOR new identity is executed depending on the number of socksports and # autochange_id parameter. In case of 5 socksports and autochange_id=5, after downloading 5*5=25 urls # TOR identity will be changed. It does matter, because calling TOR new identity includes time.sleep(3). # get_urls(urls) can accept params, headers and other arguments from requests library. urls = (f'https://checkip.amazonaws.com' for _ in range(10)) results = rt.get_urls(urls) for r in results: print(r.text) ``` ### 3. Get list of urls concurrently with unique ip for each url ```python from requests_tor import RequestsTor rt = RequestsTor(tor_ports=(9000, 9001, 9002, 9003, 9004), autochange_id=1) urls = (f'https://habr.com/ru/post/{x}' for x in range(1, 51)) r = rt.get_urls(urls) print(r[-1].text) ``` ---
/requests_tor-1.4.tar.gz/requests_tor-1.4/README.md
0.484136
0.919317
README.md
pypi
from datetime import datetime from typing import Any, Dict, Optional from uuid import UUID, uuid4 from django.http import HttpRequest, HttpResponse from django.urls import Resolver404, resolve from requests_tracker.base_collector import Collector from requests_tracker.headers.header_collector import HeaderCollector from requests_tracker.sql.sql_collector import SQLCollector class MainRequestCollector: request_id: UUID request: HttpRequest django_view: str start_time: datetime end_time: Optional[datetime] response: Optional[HttpResponse] sql_collector: SQLCollector header_collector: HeaderCollector def __init__(self, request: HttpRequest): self.request_id = uuid4() self.request = request try: self.django_view = resolve(self.request.path)._func_path except Resolver404: self.django_view = "NOT FOUND" self.start_time = datetime.now() self.end_time = None self.response = None self.sql_collector = SQLCollector() self.header_collector = HeaderCollector() def wrap_up_request(self, response: HttpResponse) -> None: """ Called after Django has processed the request, before response is returned """ self.set_end_time() self.response = response self.header_collector.process_request(self.request, self.response) @property def duration(self) -> Optional[int]: """duration in milliseconds""" return ( int((self.end_time - self.start_time).total_seconds() * 1000) if self.end_time is not None else None ) @property def finished(self) -> bool: return self.response is not None def set_end_time(self) -> None: self.end_time = datetime.now() def get_collectors(self) -> Dict[str, Collector]: collectors: Dict[str, Collector] = {} for attribute_name, attribute_value in self.__dict__.items(): if isinstance(attribute_value, Collector): attribute_value.generate_statistics() collectors[attribute_name] = attribute_value return collectors def get_as_context(self) -> Dict[str, Any]: return { "request": self.request, "request_id": self.request_id, "django_view": self.django_view, "start_time": self.start_time, "end_time": self.end_time, "duration": self.duration, "response": self.response, "finished": self.finished, **self.get_collectors(), } def matches_search_filter(self, search: str) -> bool: search = search.lower() return ( search in self.request.path.lower() or search in self.django_view.lower() or next( ( True for collector in self.get_collectors().values() if collector.matches_search_filter(search) ), False, ) )
/requests_tracker-0.3.2.tar.gz/requests_tracker-0.3.2/requests_tracker/main_request_collector.py
0.858941
0.157169
main_request_collector.py
pypi
import inspect import linecache from types import FrameType from typing import Any, Dict, Generator, List, Optional, Tuple from asgiref.local import Local from requests_tracker import settings _local_data = Local() # each tuple is: filename, line_no, func_name, source_line, frame_locals StackTrace = List[Tuple[str, int, str, str, Optional[Dict[str, Any]]]] def _stack_frames(*, skip: int = 0) -> Generator[FrameType, None, None]: skip += 1 # Skip the frame for this generator. frame = inspect.currentframe() while frame is not None: if skip > 0: skip -= 1 else: yield frame frame = frame.f_back def _is_excluded_frame(frame: FrameType, excluded_modules: Optional[List[str]]) -> bool: if not excluded_modules: return False frame_module = frame.f_globals.get("__name__") return ( any( frame_module == excluded_module or frame_module.startswith(f"{excluded_module}.") for excluded_module in excluded_modules ) if isinstance(frame_module, str) else False ) def get_stack_trace(*, skip: int = 0) -> StackTrace: """ Return a processed stack trace for the current call stack. If the ``ENABLE_STACKTRACES`` setting is False, return an empty :class:`list`. Otherwise return a :class:`list` of processed stack frame tuples (file name, line number, function name, source line, frame locals) for the current call stack. The first entry in the list will be for the bottom of the stack and the last entry will be for the top of the stack. ``skip`` is an :class:`int` indicating the number of stack frames above the frame for this function to omit from the stack trace. The default value of ``0`` means that the entry for the caller of this function will be the last entry in the returned stack trace. """ config = settings.get_config() if not config["ENABLE_STACKTRACES"]: return [] skip += 1 # Skip the frame for this function. stack_trace_recorder = getattr(_local_data, "stack_trace_recorder", None) if stack_trace_recorder is None: stack_trace_recorder = _StackTraceRecorder() _local_data.stack_trace_recorder = stack_trace_recorder return stack_trace_recorder.get_stack_trace( excluded_modules=config["HIDE_IN_STACKTRACES"], include_locals=config["ENABLE_STACKTRACES_LOCALS"], skip=skip, ) class _StackTraceRecorder: def __init__(self) -> None: self.filename_cache: Dict[str, Tuple[str, bool]] = {} def get_source_file(self, frame: FrameType) -> Tuple[str, bool]: frame_filename = frame.f_code.co_filename value = self.filename_cache.get(frame_filename) if value is None: filename = inspect.getsourcefile(frame) if filename is None: is_source = False filename = frame_filename else: is_source = True # Ensure linecache validity the first time this recorder # encounters the filename in this frame. linecache.checkcache(filename) value = (filename, is_source) self.filename_cache[frame_filename] = value return value def get_stack_trace( self, *, excluded_modules: Optional[List[str]] = None, include_locals: bool = False, skip: int = 0, ) -> StackTrace: trace = [] skip += 1 # Skip the frame for this method. for frame in _stack_frames(skip=skip): if _is_excluded_frame(frame, excluded_modules): continue filename, is_source = self.get_source_file(frame) line_no = frame.f_lineno func_name = frame.f_code.co_name if is_source: module = inspect.getmodule(frame, filename) module_globals = module.__dict__ if module is not None else None source_line = linecache.getline( filename, line_no, module_globals ).strip() else: source_line = "" frame_locals = frame.f_locals if include_locals else None trace.append((filename, line_no, func_name, source_line, frame_locals)) trace.reverse() return trace
/requests_tracker-0.3.2.tar.gz/requests_tracker-0.3.2/requests_tracker/stack_trace.py
0.844313
0.170854
stack_trace.py
pypi
import os import re from pprint import pformat from typing import Any, Dict, Optional from django import template from django.utils.safestring import mark_safe from requests_tracker.sql.sql_parser import parse_sql register = template.Library() @register.filter def split_and_last(value: str, splitter: str = ".") -> str: """ Takes in a string and splits it and returns the last item in the split. """ return value.split(splitter)[-1] @register.filter def dict_key_index(input_dict: Dict[str, Any], key: str) -> Optional[int]: """ Takes in a string key and returns the list index of the key. Returns None if key is not found. """ return next( (i for i, dict_key in enumerate(input_dict.keys()) if dict_key == key), None, ) @register.filter def pprint_datastructure_values(value: Any) -> str: return mark_safe( f"<pre>{pformat(value)}</pre>" if isinstance(value, (dict, list, tuple)) else str(value) ) @register.simple_tag def simplify_sql(sql: str) -> str: """ Takes in sql string and reformats it for a collapsed view. """ parsed_sql = parse_sql(sql, align_indent=False) simplify_re = re.compile(r"SELECT</strong> (...........*?) <strong>FROM") return simplify_re.sub( r"SELECT</strong> &#8226;&#8226;&#8226; <strong>FROM", parsed_sql ) @register.simple_tag def format_sql(sql: str) -> str: """ Reformats SQL with align indents and bolded keywords """ return parse_sql(sql, align_indent=True) @register.filter def simplify_path(path: str) -> str: """ Takes in python full path and returns it relative to the current running django project or site_packages. e.g. "/Users/my_user/Documents/django-project/venv/lib/python3.10/ site-packages/django/contrib/staticfiles/handlers.py" => ".../site-packages/django/contrib/staticfiles/handlers.py" """ if "/site-packages" in path: return f"...{ path[path.index('/site-packages'): ] }" elif path.startswith(os.getcwd()): # This should be the directory of the django project return f"...{path[len(os.getcwd()): ] }" return path
/requests_tracker-0.3.2.tar.gz/requests_tracker-0.3.2/requests_tracker/templatetags/format_tags.py
0.638948
0.243036
format_tags.py
pypi
import colorsys from typing import List from django import template from django.template.defaultfilters import stringfilter from requests_tracker.sql.dataclasses import SQLQueryInfo register = template.Library() @register.filter("method_class") @stringfilter def method_bulma_color_class(method: str) -> str: """Takes in HTTP method and returns a bulma class for colorization""" if method == "GET": return "is-info" elif method == "POST": return "is-success" elif method == "PUT": return "is-warning" elif method == "PATCH": return "is-warning is-light" elif method == "DELETE": return "is-danger" else: return "" @register.filter("status_code_class") @stringfilter def status_code_bulma_color_class(status_code_str: str) -> str: """Takes in HTTP status code and returns a bulma class for colorization""" try: status_code = int(status_code_str) except ValueError: status_code = 0 if 200 > status_code >= 100: return "is-info" elif 300 > status_code >= 200: return "is-success" elif 400 > status_code >= 300: return "is-link" elif 500 > status_code >= 400: return "is-warning" elif status_code >= 500: return "is-danger" return "is-dark" @register.simple_tag def contrast_color_from_number(color_number: int) -> str: starting_color = 0.6 # Blue ish color shift = 0.1 * (color_number // 4) # Shift by 10% for every 4 numbers color_addition = 0.25 * color_number # Cycle the color scheme 25% at a time hue = (starting_color + color_addition + shift) % 1 # Only want decimal part saturation = 0.65 value = 0.7 hsv_tuple = (hue, saturation, value) return "#" + "".join( f"{int(color * 255):02x}" for color in colorsys.hsv_to_rgb(*hsv_tuple) ) @register.simple_tag def timeline_bar_styles( queries: List[SQLQueryInfo], total_sql_time: float, current_index: int, ) -> str: current_query = queries[current_index] percentage = (current_query.duration / total_sql_time) * 100 offset_percentage = ( sum(query.duration for query in queries[:current_index]) / total_sql_time * 100 ) color = contrast_color_from_number(current_index + 100) return ( f"width: {percentage:.3f}%; " f"margin-left: {offset_percentage:.3f}%; " f"background-color: {color};" )
/requests_tracker-0.3.2.tar.gz/requests_tracker-0.3.2/requests_tracker/templatetags/style_tags.py
0.765506
0.269049
style_tags.py
pypi
import re import uuid from collections import defaultdict from typing import Dict, List, Optional, Tuple from requests_tracker.base_collector import Collector from requests_tracker.settings import get_config from requests_tracker.sql.dataclasses import PerDatabaseInfo, SQLQueryInfo SimilarQueryGroupsType = Dict[Tuple[str, str], List[SQLQueryInfo]] DuplicateQueryGroupsType = Dict[Tuple[str, Tuple[str, str]], List[SQLQueryInfo]] class SQLCollector(Collector): unfiltered_queries: List[SQLQueryInfo] databases: Dict[str, PerDatabaseInfo] sql_time: float transaction_ids: Dict[str, Optional[str]] def __init__(self) -> None: self.databases = {} self.sql_time = 0 self.unfiltered_queries = [] # synthetic transaction IDs, keyed by DB alias self.transaction_ids = {} @property def queries(self) -> List[SQLQueryInfo]: config = get_config() if ignore_patterns := config.get("IGNORE_SQL_PATTERNS"): return [ query for query in self.unfiltered_queries if not any( bool(re.match(pattern, query.raw_sql)) for pattern in ignore_patterns ) ] return self.unfiltered_queries @property def num_queries(self) -> int: return len(self.queries) def record(self, sql_query_info: SQLQueryInfo) -> None: self.unfiltered_queries.append(sql_query_info) def new_transaction_id(self, alias: str) -> str: """ Generate and return a new synthetic transaction ID for the specified DB alias. """ trans_id = uuid.uuid4().hex self.transaction_ids[alias] = trans_id return trans_id def current_transaction_id(self, alias: str) -> str: """ Return the current synthetic transaction ID for the specified DB alias. """ trans_id = self.transaction_ids.get(alias) # Sometimes it is not possible to detect the beginning of the first transaction, # so current_transaction_id() will be called before new_transaction_id(). In # that case there won't yet be a transaction ID. so it is necessary to generate # one using new_transaction_id(). if trans_id is None: trans_id = self.new_transaction_id(alias) return trans_id def generate_statistics(self) -> None: similar_query_groups: SimilarQueryGroupsType = defaultdict(list) duplicate_query_groups: DuplicateQueryGroupsType = defaultdict(list) self.databases = {} self.sql_time = 0 for query in self.queries: alias = query.alias if alias not in self.databases: self.databases[alias] = PerDatabaseInfo( time_spent=query.duration, num_queries=1, ) else: self.databases[alias].time_spent += query.duration self.databases[alias].num_queries += 1 self.sql_time += query.duration similar_query_groups[(query.alias, query.sql)].append(query) duplicate_query_groups[ ( query.alias, ( query.raw_sql, repr(tuple(query.raw_params) if query.raw_params else ()), ), ) ].append(query) similar_counts: Dict[str, int] = defaultdict(int) for (alias, _), query_group in similar_query_groups.items(): count = len(query_group) if count > 1: for query in query_group: query.similar_count = count similar_counts[alias] += count duplicate_counts: Dict[str, int] = defaultdict(int) for (alias, _), query_group in duplicate_query_groups.items(): count = len(query_group) if count > 1: for query in query_group: query.duplicate_count = count duplicate_counts[alias] += count for alias in self.databases: self.databases[alias].similar_count = similar_counts[alias] self.databases[alias].duplicate_count = duplicate_counts[alias] @property def total_similar_queries(self) -> int: return sum(database.similar_count for database in self.databases.values()) @property def total_duplicate_queries(self) -> int: return sum(database.duplicate_count for database in self.databases.values()) def matches_search_filter(self, search: str) -> bool: search = search.lower() return next( (True for query in self.queries if search in query.raw_sql.lower()), False, )
/requests_tracker-0.3.2.tar.gz/requests_tracker-0.3.2/requests_tracker/sql/sql_collector.py
0.857917
0.233149
sql_collector.py
pypi
from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('resume_incomplete', 'resume'), # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), } codes = LookupDict(name='status_codes') for (code, titles) in list(_codes.items()): for title in titles: setattr(codes, title, code) if not title.startswith('\\'): setattr(codes, title.upper(), code)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/status_codes.py
0.599954
0.160036
status_codes.py
pypi
from base64 import b64encode from collections import namedtuple from socket import error as SocketError try: from select import poll, POLLIN except ImportError: # `poll` doesn't exist on OSX and other platforms poll = False try: from select import select except ImportError: # `select` doesn't exist on AppEngine. select = False from .packages import six from .exceptions import LocationParseError class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): """ Datastructure for representing an HTTP URL. Used as a return value for :func:`parse_url`. """ slots = () def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) @property def hostname(self): """For backwards-compatibility with urlparse. We're nice like that.""" return self.host @property def request_uri(self): """Absolute path including the query string.""" uri = self.path or '/' if self.query is not None: uri += '?' + self.query return uri def split_first(s, delims): """ Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. If not found, then the first part is the full input string. Example: :: >>> split_first('foo/bar?baz', '?/=') ('foo', 'bar?baz', '/') >>> split_first('foo/bar?baz', '123') ('foo/bar?baz', '', None) Scales linearly with number of delims. Not ideal for large number of delims. """ min_idx = None min_delim = None for d in delims: idx = s.find(d) if idx < 0: continue if min_idx is None or idx < min_idx: min_idx = idx min_delim = d if min_idx is None or min_idx < 0: return s, '', None return s[:min_idx], s[min_idx+1:], min_delim def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. Partly backwards-compatible with :mod:`urlparse`. Example: :: >>> parse_url('http://google.com/mail/') Url(scheme='http', host='google.com', port=None, path='/', ...) >>> prase_url('google.com:80') Url(scheme=None, host='google.com', port=80, path=None, ...) >>> prase_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. # Additionally, this imeplementations does silly things to be optimal # on CPython. scheme = None auth = None host = None port = None path = None fragment = None query = None # Scheme if '://' in url: scheme, url = url.split('://', 1) # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) url, path_, delim = split_first(url, ['/', '?', '#']) if delim: # Reassemble the path path = delim + path_ # Auth if '@' in url: auth, url = url.split('@', 1) # IPv6 if url and url[0] == '[': host, url = url[1:].split(']', 1) # Port if ':' in url: _host, port = url.split(':', 1) if not host: host = _host if not port.isdigit(): raise LocationParseError("Failed to parse: %s" % url) port = int(port) elif not host and url: host = url if not path: return Url(scheme, auth, host, port, path, query, fragment) # Fragment if '#' in path: path, fragment = path.split('#', 1) # Query if '?' in path: path, query = path.split('?', 1) return Url(scheme, auth, host, port, path, query, fragment) def get_host(url): """ Deprecated. Use :func:`.parse_url` instead. """ p = parse_url(url) return p.scheme or 'http', p.hostname, p.port def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None): """ Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :param accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. Example: :: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'} """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = 'gzip,deflate' headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + \ b64encode(six.b(basic_auth)).decode('utf-8') return headers def is_connection_dropped(conn): """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if not sock: # Platform-specific: AppEngine return False if not poll: # Platform-specific if not select: # Platform-specific: AppEngine return False try: return select([sock], [], [], 0.0)[0] except SocketError: return True # This version is better on platforms that support it. p = poll() p.register(sock, POLLIN) for (fno, ev) in p.poll(0.0): if fno == sock.fileno(): # Either data is buffered (bad), or the connection is dropped. return True
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/util.py
0.845879
0.151655
util.py
pypi
import gzip import logging import zlib from io import BytesIO from .exceptions import DecodeError from .packages.six import string_types as basestring log = logging.getLogger(__name__) def decode_gzip(data): gzipper = gzip.GzipFile(fileobj=BytesIO(data)) return gzipper.read() def decode_deflate(data): try: return zlib.decompress(data) except zlib.error: return zlib.decompress(data, -zlib.MAX_WBITS) class HTTPResponse(object): """ HTTP Response container. Backwards-compatible to httplib's HTTPResponse but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. Extra parameters for behaviour not present in httplib.HTTPResponse: :param preload_content: If True, the response's body will be preloaded during construction. :param decode_content: If True, attempts to decode specific content-encoding's based on headers (like 'gzip' and 'deflate') will be skipped and raw data will be used instead. :param original_response: When this HTTPResponse wrapper is generated from an httplib.HTTPResponse object, it's convenient to include the original for debug purposes. It's otherwise unused. """ CONTENT_DECODERS = { 'gzip': decode_gzip, 'deflate': decode_deflate, } def __init__(self, body='', headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None): self.headers = headers or {} self.status = status self.version = version self.reason = reason self.strict = strict self._decode_content = decode_content self._body = body if body and isinstance(body, basestring) else None self._fp = None self._original_response = original_response self._pool = pool self._connection = connection if hasattr(body, 'read'): self._fp = body if preload_content and not self._body: self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in [301, 302, 303, 307]: return self.headers.get('location') return False def release_conn(self): if not self._pool or not self._connection: return self._pool._put_conn(self._connection) self._connection = None @property def data(self): # For backwords-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body if self._fp: return self.read(cache_content=True) def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, decoding and caching is skipped because we can't decode partial content nor does it make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. (Overridden if ``amt`` is set.) :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ content_encoding = self.headers.get('content-encoding') decoder = self.CONTENT_DECODERS.get(content_encoding) if decode_content is None: decode_content = self._decode_content if self._fp is None: return try: if amt is None: # cStringIO doesn't like amt=None data = self._fp.read() else: return self._fp.read(amt) try: if decode_content and decoder: data = decoder(data) except (IOError, zlib.error): raise DecodeError("Received response with content-encoding: %s, but " "failed to decode it." % content_encoding) if cache_content: self._body = data return data finally: if self._original_response and self._original_response.isclosed(): self.release_conn() @classmethod def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ # Normalize headers between different versions of Python headers = {} for k, v in r.getheaders(): # Python 3: Header keys are returned capitalised k = k.lower() has_value = headers.get(k) if has_value: # Python 3: Repeating header keys are unmerged. v = ', '.join([has_value, v]) headers[k] = v # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) return ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw) # Backwards-compatibility methods for httplib.HTTPResponse def getheaders(self): return self.headers def getheader(self, name, default=None): return self.headers.get(name, default)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/response.py
0.794185
0.158402
response.py
pypi
import codecs import mimetypes from uuid import uuid4 from io import BytesIO from .packages import six from .packages.six import b writer = codecs.lookup('utf-8')[3] def choose_boundary(): """ Our embarassingly-simple replacement for mimetools.choose_boundary. """ return uuid4().hex def get_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' def iter_fields(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data mime format. :param fields: Dictionary of fields or list of (key, value) field tuples. The key is treated as the field name, and the value as the body of the form-data bytes. If the value is a tuple of two elements, then the first element is treated as the filename of the form-data section. Field names and filenames must be unicode. :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for fieldname, value in iter_fields(fields): body.write(b('--%s\r\n' % (boundary))) if isinstance(value, tuple): filename, data = value writer(body).write('Content-Disposition: form-data; name="%s"; ' 'filename="%s"\r\n' % (fieldname, filename)) body.write(b('Content-Type: %s\r\n\r\n' % (get_content_type(filename)))) else: data = value writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % (fieldname)) body.write(b'Content-Type: text/plain\r\n\r\n') if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = b('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/filepost.py
0.686265
0.202148
filepost.py
pypi
import logging from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import connection_from_url, port_by_scheme from .request import RequestMethods from .util import parse_url __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } log = logging.getLogger(__name__) class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param \**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: :: >>> manager = PoolManager(num_pools=2) >>> r = manager.urlopen("http://google.com/") >>> r = manager.urlopen("http://google.com/mail") >>> r = manager.urlopen("http://yahoo.com/") >>> len(manager.pools) 2 """ def __init__(self, num_pools=10, **connection_pool_kw): self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ port = port or port_by_scheme.get(scheme, 80) pool_key = (scheme, host, port) # If the scheme, host, or port doesn't match existing open connections, # open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type pool_cls = pool_classes_by_scheme[scheme] pool = pool_cls(host, port, **self.connection_pool_kw) self.pools[pool_key] = pool return pool def connection_from_url(self, url): """ Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn't pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response if response.status == 303: method = 'GET' log.info("Redirecting %s -> %s" % (url, redirect_location)) kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown return self.urlopen(method, redirect_location, **kw) class ProxyManager(RequestMethods): """ Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method will make requests to any url through the defined proxy. """ def __init__(self, proxy_pool): self.proxy_pool = proxy_pool def _set_proxy_headers(self, headers=None): headers = headers or {} # Same headers are curl passes for --proxy1.0 headers['Accept'] = '*/*' headers['Proxy-Connection'] = 'Keep-Alive' return headers def urlopen(self, method, url, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." kw['assert_same_host'] = False kw['headers'] = self._set_proxy_headers(kw.get('headers')) return self.proxy_pool.urlopen(method, url, **kw) def proxy_from_url(url, **pool_kw): proxy_pool = connection_from_url(url, **pool_kw) return ProxyManager(proxy_pool)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/poolmanager.py
0.888982
0.172241
poolmanager.py
pypi
from collections import MutableMapping from threading import Lock try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict __all__ = ['RecentlyUsedContainer'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self._lock = Lock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self._lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self._lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self._lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self._lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self._lock: # Copy pointers to all values, then wipe the mapping # under Python 2, this copies the list of values twice :-| values = list(self._container.values()) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self._lock: return self._container.keys()
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/_collections.py
0.873431
0.266381
_collections.py
pypi
try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .filepost import encode_multipart_formdata __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-orm-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) _encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE']) def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplemented("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **urlopen_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data) tuple. For example: :: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'nonamefile': ('contents of nonamefile field'), } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if encode_multipart: body, content_type = encode_multipart_formdata(fields or {}, boundary=multipart_boundary) else: body, content_type = (urlencode(fields or {}), 'application/x-www-form-urlencoded') headers = headers or {} headers.update({'Content-Type': content_type}) return self.urlopen(method, url, body=body, headers=headers, **urlopen_kw)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/request.py
0.735642
0.223843
request.py
pypi
try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/urllib3/packages/ordered_dict.py
0.596903
0.266137
ordered_dict.py
pypi
import string import urllib2 from oauthlib.common import quote, unquote UNICODE_ASCII_CHARACTER_SET = (string.ascii_letters.decode('ascii') + string.digits.decode('ascii')) def filter_params(target): """Decorator which filters params to remove non-oauth_* parameters Assumes the decorated method takes a params dict or list of tuples as its first argument. """ def wrapper(params, *args, **kwargs): params = filter_oauth_params(params) return target(params, *args, **kwargs) wrapper.__doc__ = target.__doc__ return wrapper def filter_oauth_params(params): """Removes all non oauth parameters from a dict or a list of params.""" is_oauth = lambda kv: kv[0].startswith(u"oauth_") if isinstance(params, dict): return filter(is_oauth, params.items()) else: return filter(is_oauth, params) def escape(u): """Escape a unicode string in an OAuth-compatible fashion. Per `section 3.6`_ of the spec. .. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 """ if not isinstance(u, unicode): raise ValueError('Only unicode objects are escapable.') # Letters, digits, and the characters '_.-' are already treated as safe # by urllib.quote(). We need to add '~' to fully support rfc5849. return quote(u, safe='~') def unescape(u): if not isinstance(u, unicode): raise ValueError('Only unicode objects are unescapable.') return unquote(u) def urlencode(query): """Encode a sequence of two-element tuples or dictionary into a URL query string. Operates using an OAuth-safe escape() method, in contrast to urllib.urlencode. """ # Convert dictionaries to list of tuples if isinstance(query, dict): query = query.items() return u"&".join([u'='.join([escape(k), escape(v)]) for k, v in query]) def parse_keqv_list(l): """A unicode-safe version of urllib2.parse_keqv_list""" encoded_list = [u.encode('utf-8') for u in l] encoded_parsed = urllib2.parse_keqv_list(encoded_list) return dict((k.decode('utf-8'), v.decode('utf-8')) for k, v in encoded_parsed.items()) def parse_http_list(u): """A unicode-safe version of urllib2.parse_http_list""" encoded_str = u.encode('utf-8') encoded_list = urllib2.parse_http_list(encoded_str) return [s.decode('utf-8') for s in encoded_list] def parse_authorization_header(authorization_header): """Parse an OAuth authorization header into a list of 2-tuples""" auth_scheme = u'OAuth ' if authorization_header.startswith(auth_scheme): authorization_header = authorization_header.replace(auth_scheme, u'', 1) items = parse_http_list(authorization_header) try: return parse_keqv_list(items).items() except ValueError: raise ValueError('Malformed authorization header')
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/oauthlib/oauth1/rfc5849/utils.py
0.690976
0.345105
utils.py
pypi
import json import urlparse from oauthlib.common import add_params_to_uri, add_params_to_qs def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None, scope=None, state=None, **kwargs): """Prepare the authorization grant request URI. The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format as defined by [W3C.REC-html401-19991224]: response_type REQUIRED. Value MUST be set to "code". client_id REQUIRED. The client identifier as described in `Section 2.2`_. redirect_uri OPTIONAL. As described in `Section 3.1.2`_. scope OPTIONAL. The scope of the access request as described by `Section 3.3`_. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in `Section 10.12`_. GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1 Host: server.example.com .. _`W3C.REC-html401-19991224`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-W3C.REC-html401-19991224 .. _`Section 2.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-2.2 .. _`Section 3.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.1.2 .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 .. _`section 10.12`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-10.12 """ params = [((u'response_type', response_type)), ((u'client_id', client_id))] if redirect_uri: params.append((u'redirect_uri', redirect_uri)) if scope: params.append((u'scope', scope)) if state: params.append((u'state', state)) for k in kwargs: params.append((unicode(k), kwargs[k])) return add_params_to_uri(uri, params) def prepare_token_request(grant_type, body=u'', **kwargs): """Prepare the access token request. The client makes a request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "authorization_code". code REQUIRED. The authorization code received from the authorization server. redirect_uri REQUIRED, if the "redirect_uri" parameter was included in the authorization request as described in `Section 4.1.1`_, and their values MUST be identical. grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb .. _`Section 4.1.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-4.1.1 """ params = [(u'grant_type', grant_type)] for k in kwargs: params.append((unicode(k), kwargs[k])) return add_params_to_qs(body, params) def parse_authorization_code_response(uri, state=None): """Parse authorization grant response URI into a dict. If the resource owner grants the access request, the authorization server issues an authorization code and delivers it to the client by adding the following parameters to the query component of the redirection URI using the "application/x-www-form-urlencoded" format: code REQUIRED. The authorization code generated by the authorization server. The authorization code MUST expire shortly after it is issued to mitigate the risk of leaks. A maximum authorization code lifetime of 10 minutes is RECOMMENDED. The client MUST NOT use the authorization code more than once. If an authorization code is used more than once, the authorization server MUST deny the request and SHOULD revoke (when possible) all tokens previously issued based on that authorization code. The authorization code is bound to the client identifier and redirection URI. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. For example, the authorization server redirects the user-agent by sending the following HTTP response: HTTP/1.1 302 Found Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA &state=xyz """ query = urlparse.urlparse(uri).query params = dict(urlparse.parse_qsl(query)) if not u'code' in params: raise KeyError("Missing code parameter in response.") if state and params.get(u'state', None) != state: raise ValueError("Mismatching or missing state in response.") return params def parse_implicit_response(uri, state=None, scope=None): """Parse the implicit token response URI into a dict. If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the "application/x-www-form-urlencoded" format: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in Section 7.1. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. scope OPTIONAL, if identical to the scope requested by the client, otherwise REQUIRED. The scope of the access token as described by Section 3.3. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. HTTP/1.1 302 Found Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA &state=xyz&token_type=example&expires_in=3600 """ fragment = urlparse.urlparse(uri).fragment params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True)) validate_token_parameters(params, scope) if state and params.get(u'state', None) != state: raise ValueError("Mismatching or missing state in params.") return params def parse_token_response(body, scope=None): """Parse the JSON token response body into a dict. The authorization server issues an access token and optional refresh token, and constructs the response by adding the following parameters to the entity body of the HTTP response with a 200 (OK) status code: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in `Section 7.1`_. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. refresh_token OPTIONAL. The refresh token which can be used to obtain new access tokens using the same authorization grant as described in `Section 6`_. scope OPTIONAL, if identical to the scope requested by the client, otherwise REQUIRED. The scope of the access token as described by `Section 3.3`_. The parameters are included in the entity body of the HTTP response using the "application/json" media type as defined by [`RFC4627`_]. The parameters are serialized into a JSON structure by adding each parameter at the highest structure level. Parameter names and string values are included as JSON strings. Numerical values are included as JSON numbers. The order of parameters does not matter and can vary. For example: HTTP/1.1 200 OK Content-Type: application/json;charset=UTF-8 Cache-Control: no-store Pragma: no-cache { "access_token":"2YotnFZFEjr1zCsicMWpAA", "token_type":"example", "expires_in":3600, "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA", "example_parameter":"example_value" } .. _`Section 7.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-7.1 .. _`Section 6`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-6 .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 .. _`RFC4627`: http://tools.ietf.org/html/rfc4627 """ params = json.loads(body) validate_token_parameters(params, scope) return params def validate_token_parameters(params, scope=None): """Ensures token precence, token type, expiration and scope in params.""" if not u'access_token' in params: raise KeyError("Missing access token parameter.") if not u'token_type' in params: raise KeyError("Missing token type parameter.") # If the issued access token scope is different from the one requested by # the client, the authorization server MUST include the "scope" response # parameter to inform the client of the actual scope granted. # http://tools.ietf.org/html/draft-ietf-oauth-v2-25#section-3.3 new_scope = params.get(u'scope', None) if scope and new_scope and scope != new_scope: raise Warning("Scope has changed to %s." % new_scope)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/oauthlib/oauth2/draft25/parameters.py
0.723407
0.250271
parameters.py
pypi
from __future__ import absolute_import """ oauthlib.oauth2.draft25.tokens ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains methods for adding two types of access tokens to requests. - Bearer http://tools.ietf.org/html/draft-ietf-oauth-saml2-bearer-08 - MAC http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00 """ from binascii import b2a_base64 import hashlib import hmac from urlparse import urlparse from oauthlib.common import add_params_to_uri, add_params_to_qs from . import utils def prepare_mac_header(token, uri, key, http_method, nonce=None, headers=None, body=None, ext=u'', hash_algorithm=u'hmac-sha-1'): """Add an `MAC Access Authentication`_ signature to headers. Unlike OAuth 1, this HMAC signature does not require inclusion of the request payload/body, neither does it use a combination of client_secret and token_secret but rather a mac_key provided together with the access token. Currently two algorithms are supported, "hmac-sha-1" and "hmac-sha-256", `extension algorithms`_ are not supported. Example MAC Authorization header, linebreaks added for clarity Authorization: MAC id="h480djs93hd8", nonce="1336363200:dj83hs9s", mac="bhCQXTVyfj5cmA9uKkPFx1zeOXM=" .. _`MAC Access Authentication`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01 .. _`extension algorithms`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-7.1 :param uri: Request URI. :param headers: Request headers as a dictionary. :param http_method: HTTP Request method. :param key: MAC given provided by token endpoint. :param algorithm: HMAC algorithm provided by token endpoint. :return: headers dictionary with the authorization field added. """ http_method = http_method.upper() host, port = utils.host_from_uri(uri) if hash_algorithm.lower() == u'hmac-sha-1': h = hashlib.sha1 else: h = hashlib.sha256 nonce = nonce or u'{0}:{1}'.format(utils.generate_nonce(), utils.generate_timestamp()) sch, net, path, par, query, fra = urlparse(uri) if query: request_uri = path + u'?' + query else: request_uri = path # Hash the body/payload if body is not None: bodyhash = b2a_base64(h(body).digest())[:-1].decode('utf-8') else: bodyhash = u'' # Create the normalized base string base = [] base.append(nonce) base.append(http_method.upper()) base.append(request_uri) base.append(host) base.append(port) base.append(bodyhash) base.append(ext) base_string = '\n'.join(base) + u'\n' # hmac struggles with unicode strings - http://bugs.python.org/issue5285 if isinstance(key, unicode): key = key.encode('utf-8') sign = hmac.new(key, base_string, h) sign = b2a_base64(sign.digest())[:-1].decode('utf-8') header = [] header.append(u'MAC id="%s"' % token) header.append(u'nonce="%s"' % nonce) if bodyhash: header.append(u'bodyhash="%s"' % bodyhash) if ext: header.append(u'ext="%s"' % ext) header.append(u'mac="%s"' % sign) headers = headers or {} headers[u'Authorization'] = u', '.join(header) return headers def prepare_bearer_uri(token, uri): """Add a `Bearer Token`_ to the request URI. Not recommended, use only if client can't use authorization header or body. http://www.example.com/path?access_token=h480djs93hd8 .. _`Bearer Token`: http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-18 """ return add_params_to_uri(uri, [((u'access_token', token))]) def prepare_bearer_headers(token, headers=None): """Add a `Bearer Token`_ to the request URI. Recommended method of passing bearer tokens. Authorization: Bearer h480djs93hd8 .. _`Bearer Token`: http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-18 """ headers = headers or {} headers[u'Authorization'] = u'Bearer %s' % token return headers def prepare_bearer_body(token, body=u''): """Add a `Bearer Token`_ to the request body. access_token=h480djs93hd8 .. _`Bearer Token`: http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-18 """ return add_params_to_qs(body, [((u'access_token', token))])
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/oauthlib/oauth2/draft25/tokens.py
0.865608
0.240429
tokens.py
pypi
from tokens import prepare_bearer_uri, prepare_bearer_headers from tokens import prepare_bearer_body, prepare_mac_header from parameters import prepare_grant_uri, prepare_token_request from parameters import parse_authorization_code_response from parameters import parse_implicit_response, parse_token_response AUTH_HEADER = u'auth_header' URI_QUERY = u'query' BODY = u'body' class Client(object): def __init__(self, client_id, default_redirect_uri=None, token_type=None, access_token=None, refresh_token=None): """Initialize a client with commonly used attributes.""" self.client_id = client_id self.default_redirect_uri = default_redirect_uri self.token_type = token_type self.access_token = access_token self.refresh_token = refresh_token self.token_types = { u'bearer': self._add_bearer_token, u'mac': self._add_mac_token } def add_token(self, uri, http_method=u'GET', body=None, headers=None, token_placement=AUTH_HEADER): """Add token to the request uri, body or authorization header. The access token type provides the client with the information required to successfully utilize the access token to make a protected resource request (along with type-specific attributes). The client MUST NOT use an access token if it does not understand the token type. For example, the "bearer" token type defined in [I-D.ietf-oauth-v2-bearer] is utilized by simply including the access token string in the request: GET /resource/1 HTTP/1.1 Host: example.com Authorization: Bearer mF_9.B5f-4.1JqM while the "mac" token type defined in [I-D.ietf-oauth-v2-http-mac] is utilized by issuing a MAC key together with the access token which is used to sign certain components of the HTTP requests: GET /resource/1 HTTP/1.1 Host: example.com Authorization: MAC id="h480djs93hd8", nonce="274312:dj83hs9s", mac="kDZvddkndxvhGRXZhvuDjEWhGeE=" .. _`I-D.ietf-oauth-v2-bearer`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-I-D.ietf-oauth-v2-bearer .. _`I-D.ietf-oauth-v2-http-mac`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-I-D.ietf-oauth-v2-http-mac """ return self.token_types[self.token_type](uri, http_method, body, headers, token_placement) def prepare_refresh_body(self, body=u'', refresh_token=None, scope=None): """Prepare an access token request, using a refresh token. If the authorization server issued a refresh token to the client, the client makes a refresh request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "refresh_token". refresh_token REQUIRED. The refresh token issued to the client. scope OPTIONAL. The scope of the access request as described by Section 3.3. The requested scope MUST NOT include any scope not originally granted by the resource owner, and if omitted is treated as equal to the scope originally granted by the resource owner. """ refresh_token = refresh_token or self.refresh_token return prepare_token_request(u'refresh_token', body=body, scope=scope, refresh_token=refresh_token) def _add_bearer_token(self, uri, http_method=u'GET', body=None, headers=None, token_placement=AUTH_HEADER): """Add a bearer token to the request uri, body or authorization header.""" if token_placement == AUTH_HEADER: headers = prepare_bearer_headers(self.token, headers) if token_placement == URI_QUERY: uri = prepare_bearer_uri(self.token, uri) if token_placement == BODY: body = prepare_bearer_body(self.token, body) return uri, headers, body def _add_mac_token(self, uri, http_method=u'GET', body=None, headers=None, token_placement=AUTH_HEADER): """Add a MAC token to the request authorization header.""" headers = prepare_mac_header(self.token, uri, self.key, http_method, headers=headers, body=body, ext=self.ext, hash_algorithm=self.hash_algorithm) return uri, headers, body def _populate_attributes(self, response): """Add commonly used values such as access_token to self.""" if u'access_token' in response: self.access_token = response.get(u'access_token') if u'refresh_token' in response: self.refresh_token = response.get(u'refresh_token') if u'token_type' in response: self.token_type = response.get(u'token_type') if u'expires_in' in response: self.expires_in = response.get(u'expires_in') if u'code' in response: self.code = response.get(u'code') def prepare_request_uri(self, *args, **kwargs): """Abstract method used to create request URIs.""" raise NotImplementedError("Must be implemented by inheriting classes.") def prepare_request_body(self, *args, **kwargs): """Abstract method used to create request bodies.""" raise NotImplementedError("Must be implemented by inheriting classes.") def parse_request_uri_response(self, *args, **kwargs): """Abstract method used to parse redirection responses.""" def parse_request_body_response(self, *args, **kwargs): """Abstract method used to parse JSON responses.""" class WebApplicationClient(Client): """A client utilizing the authorization code grant workflow. A web application is a confidential client running on a web server. Resource owners access the client via an HTML user interface rendered in a user-agent on the device used by the resource owner. The client credentials as well as any access token issued to the client are stored on the web server and are not exposed to or accessible by the resource owner. The authorization code grant type is used to obtain both access tokens and refresh tokens and is optimized for confidential clients. As a redirection-based flow, the client must be capable of interacting with the resource owner's user-agent (typically a web browser) and capable of receiving incoming requests (via redirection) from the authorization server. """ def prepare_request_uri(self, uri, redirect_uri=None, scope=None, state=None, **kwargs): """Prepare the authorization code request URI The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format as defined by [`W3C.REC-html401-19991224`_]: response_type REQUIRED. Value MUST be set to "code". client_id REQUIRED. The client identifier as described in `Section 2.2`_. redirect_uri OPTIONAL. As described in `Section 3.1.2`_. scope OPTIONAL. The scope of the access request as described by `Section 3.3`_. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in `Section 10.12`_. .. _`W3C.REC-html401-19991224`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#ref-W3C.REC-html401-19991224 .. _`Section 2.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-2.2 .. _`Section 3.1.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.1.2 .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 .. _`Section 10.12`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-10.12 """ redirect_uri = redirect_uri or self.default_redirect_uri return prepare_grant_uri(uri, self.client_id, u'code', redirect_uri=redirect_uri, scope=scope, state=state, **kwargs) def prepare_request_body(self, code, body=u'', redirect_uri=None, **kwargs): """Prepare the access token request body. The client makes a request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "authorization_code". code REQUIRED. The authorization code received from the authorization server. redirect_uri REQUIRED, if the "redirect_uri" parameter was included in the authorization request as described in Section 4.1.1, and their values MUST be identical. .. _`Section 4.1.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-4.1.1 """ redirect_uri = redirect_uri or self.default_redirect_uri code = code or self.code return prepare_token_request(u'authorization_code', code=code, body=body, redirect_uri=redirect_uri, **kwargs) def parse_request_uri_response(self, uri, state=None): """Parse the URI query for code and state. If the resource owner grants the access request, the authorization server issues an authorization code and delivers it to the client by adding the following parameters to the query component of the redirection URI using the "application/x-www-form-urlencoded" format: code REQUIRED. The authorization code generated by the authorization server. The authorization code MUST expire shortly after it is issued to mitigate the risk of leaks. A maximum authorization code lifetime of 10 minutes is RECOMMENDED. The client MUST NOT use the authorization code more than once. If an authorization code is used more than once, the authorization server MUST deny the request and SHOULD revoke (when possible) all tokens previously issued based on that authorization code. The authorization code is bound to the client identifier and redirection URI. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. """ response = parse_authorization_code_response(uri, state=state) self._populate_attributes(response) return response def parse_request_body_response(self, body, scope=None): """Parse the JSON response body. If the access token request is valid and authorized, the authorization server issues an access token and optional refresh token as described in `Section 5.1`_. If the request client authentication failed or is invalid, the authorization server returns an error response as described in `Section 5.2`_. .. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1 .. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2 """ response = parse_token_response(body, scope=scope) self._populate_attributes(response) return response class UserAgentClient(Client): """A public client utilizing the implicit code grant workflow. A user-agent-based application is a public client in which the client code is downloaded from a web server and executes within a user-agent (e.g. web browser) on the device used by the resource owner. Protocol data and credentials are easily accessible (and often visible) to the resource owner. Since such applications reside within the user-agent, they can make seamless use of the user-agent capabilities when requesting authorization. The implicit grant type is used to obtain access tokens (it does not support the issuance of refresh tokens) and is optimized for public clients known to operate a particular redirection URI. These clients are typically implemented in a browser using a scripting language such as JavaScript. As a redirection-based flow, the client must be capable of interacting with the resource owner's user-agent (typically a web browser) and capable of receiving incoming requests (via redirection) from the authorization server. Unlike the authorization code grant type in which the client makes separate requests for authorization and access token, the client receives the access token as the result of the authorization request. The implicit grant type does not include client authentication, and relies on the presence of the resource owner and the registration of the redirection URI. Because the access token is encoded into the redirection URI, it may be exposed to the resource owner and other applications residing on the same device. """ def prepare_request_uri(self, uri, redirect_uri=None, scope=None, state=None, **kwargs): """Prepare the implicit grant request URI. The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format: response_type REQUIRED. Value MUST be set to "token". client_id REQUIRED. The client identifier as described in Section 2.2. redirect_uri OPTIONAL. As described in Section 3.1.2. scope OPTIONAL. The scope of the access request as described by Section 3.3. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in Section 10.12. """ redirect_uri = redirect_uri or self.default_redirect_uri return prepare_grant_uri(uri, self.client_id, u'token', redirect_uri=redirect_uri, state=state, scope=scope, **kwargs) def parse_request_uri_response(self, uri, state=None, scope=None): """Parse the response URI fragment. If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the "application/x-www-form-urlencoded" format: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in `Section 7.1`_. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. scope OPTIONAL, if identical to the scope requested by the client, otherwise REQUIRED. The scope of the access token as described by `Section 3.3`_. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. .. _`Section 7.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-7.1 .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 """ response = parse_implicit_response(uri, state=state, scope=scope) self._populate_attributes(response) return response class NativeApplicationClient(Client): """A public client utilizing the client credentials grant workflow. A native application is a public client installed and executed on the device used by the resource owner. Protocol data and credentials are accessible to the resource owner. It is assumed that any client authentication credentials included in the application can be extracted. On the other hand, dynamically issued credentials such as access tokens or refresh tokens can receive an acceptable level of protection. At a minimum, these credentials are protected from hostile servers with which the application may interact with. On some platforms these credentials might be protected from other applications residing on the same device. The client can request an access token using only its client credentials (or other supported means of authentication) when the client is requesting access to the protected resources under its control, or those of another resource owner which has been previously arranged with the authorization server (the method of which is beyond the scope of this specification). The client credentials grant type MUST only be used by confidential clients. Since the client authentication is used as the authorization grant, no additional authorization request is needed. """ def prepare_request_body(self, body=u'', scope=None, **kwargs): """Add the client credentials to the request body. The client makes a request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "client_credentials". scope OPTIONAL. The scope of the access request as described by `Section 3.3`_. .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 """ return prepare_token_request(u'client_credentials', body=body, scope=scope, **kwargs) def parse_request_body_response(self, body, scope=None): """Parse the JSON response body. If the access token request is valid and authorized, the authorization server issues an access token as described in `Section 5.1`_. A refresh token SHOULD NOT be included. If the request failed client authentication or is invalid, the authorization server returns an error response as described in `Section 5.2`_. .. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1 .. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2 """ response = parse_token_response(body, scope=scope) self._populate_attributes(response) return response class PasswordCredentialsClient(Client): """A public client using the resource owner password and username directly. The resource owner password credentials grant type is suitable in cases where the resource owner has a trust relationship with the client, such as the device operating system or a highly privileged application. The authorization server should take special care when enabling this grant type, and only allow it when other flows are not viable. The grant type is suitable for clients capable of obtaining the resource owner's credentials (username and password, typically using an interactive form). It is also used to migrate existing clients using direct authentication schemes such as HTTP Basic or Digest authentication to OAuth by converting the stored credentials to an access token. The method through which the client obtains the resource owner credentials is beyond the scope of this specification. The client MUST discard the credentials once an access token has been obtained. """ def prepare_request_body(self, username, password, body=u'', scope=None, **kwargs): """Add the resource owner password and username to the request body. The client makes a request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to "password". username REQUIRED. The resource owner username. password REQUIRED. The resource owner password. scope OPTIONAL. The scope of the access request as described by `Section 3.3`_. .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 """ return prepare_token_request(u'password', body=body, username=username, password=password, scope=scope, **kwargs) def parse_request_body_response(self, body, scope=None): """Parse the JSON response body. If the access token request is valid and authorized, the authorization server issues an access token and optional refresh token as described in `Section 5.1`_. If the request failed client authentication or is invalid, the authorization server returns an error response as described in `Section 5.2`_. .. `Section 5.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.1 .. `Section 5.2`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-5.2 """ response = parse_token_response(body, scope=scope) self._populate_attributes(response) return response class Server(object): pass
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version0/requests/packages/oauthlib/oauth2/draft25/__init__.py
0.8477
0.207295
__init__.py
pypi
from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('resume_incomplete', 'resume'), # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), } codes = LookupDict(name='status_codes') for (code, titles) in list(_codes.items()): for title in titles: setattr(codes, title, code) if not title.startswith('\\'): setattr(codes, title.upper(), code)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/status_codes.py
0.602179
0.164248
status_codes.py
pypi
from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ session = sessions.Session() return session.request(method=method, url=url, **kwargs) def get(url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('get', url, **kwargs) def options(url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('head', url, **kwargs) def post(url, data=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('post', url, data=data, **kwargs) def put(url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('delete', url, **kwargs)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/api.py
0.860046
0.382891
api.py
pypi
import gzip import logging import zlib from io import BytesIO from .exceptions import DecodeError from .packages.six import string_types as basestring log = logging.getLogger(__name__) def decode_gzip(data): gzipper = gzip.GzipFile(fileobj=BytesIO(data)) return gzipper.read() def decode_deflate(data): try: return zlib.decompress(data) except zlib.error: return zlib.decompress(data, -zlib.MAX_WBITS) class HTTPResponse(object): """ HTTP Response container. Backwards-compatible to httplib's HTTPResponse but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. Extra parameters for behaviour not present in httplib.HTTPResponse: :param preload_content: If True, the response's body will be preloaded during construction. :param decode_content: If True, attempts to decode specific content-encoding's based on headers (like 'gzip' and 'deflate') will be skipped and raw data will be used instead. :param original_response: When this HTTPResponse wrapper is generated from an httplib.HTTPResponse object, it's convenient to include the original for debug purposes. It's otherwise unused. """ CONTENT_DECODERS = { 'gzip': decode_gzip, 'deflate': decode_deflate, } def __init__(self, body='', headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None): self.headers = headers or {} self.status = status self.version = version self.reason = reason self.strict = strict self._decode_content = decode_content self._body = body if body and isinstance(body, basestring) else None self._fp = None self._original_response = original_response self._pool = pool self._connection = connection if hasattr(body, 'read'): self._fp = body if preload_content and not self._body: self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in [301, 302, 303, 307]: return self.headers.get('location') return False def release_conn(self): if not self._pool or not self._connection: return self._pool._put_conn(self._connection) self._connection = None @property def data(self): # For backwords-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body if self._fp: return self.read(cache_content=True) def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, decoding and caching is skipped because we can't decode partial content nor does it make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. (Overridden if ``amt`` is set.) :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ # Note: content-encoding value should be case-insensitive, per RFC 2616 # Section 3.5 content_encoding = self.headers.get('content-encoding', '').lower() decoder = self.CONTENT_DECODERS.get(content_encoding) if decode_content is None: decode_content = self._decode_content if self._fp is None: return try: if amt is None: # cStringIO doesn't like amt=None data = self._fp.read() else: return self._fp.read(amt) try: if decode_content and decoder: data = decoder(data) except (IOError, zlib.error): raise DecodeError("Received response with content-encoding: %s, but " "failed to decode it." % content_encoding) if cache_content: self._body = data return data finally: if self._original_response and self._original_response.isclosed(): self.release_conn() @classmethod def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ # Normalize headers between different versions of Python headers = {} for k, v in r.getheaders(): # Python 3: Header keys are returned capitalised k = k.lower() has_value = headers.get(k) if has_value: # Python 3: Repeating header keys are unmerged. v = ', '.join([has_value, v]) headers[k] = v # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) return ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw) # Backwards-compatibility methods for httplib.HTTPResponse def getheaders(self): return self.headers def getheader(self, name, default=None): return self.headers.get(name, default)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/packages/urllib3/response.py
0.825941
0.157687
response.py
pypi
import codecs import mimetypes from uuid import uuid4 from io import BytesIO from .packages import six from .packages.six import b writer = codecs.lookup('utf-8')[3] def choose_boundary(): """ Our embarassingly-simple replacement for mimetools.choose_boundary. """ return uuid4().hex def get_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' def iter_fields(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, value) or (key, value, MIME type) field tuples. The key is treated as the field name, and the value as the body of the form-data bytes. If the value is a tuple of two elements, then the first element is treated as the filename of the form-data section and a suitable MIME type is guessed based on the filename. If the value is a tuple of three elements, then the third element is treated as an explicit MIME type of the form-data section. Field names and filenames must be unicode. :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for fieldname, value in iter_fields(fields): body.write(b('--%s\r\n' % (boundary))) if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value content_type = get_content_type(filename) writer(body).write('Content-Disposition: form-data; name="%s"; ' 'filename="%s"\r\n' % (fieldname, filename)) body.write(b('Content-Type: %s\r\n\r\n' % (content_type,))) else: data = value writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % (fieldname)) body.write(b'\r\n') if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = b('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/packages/urllib3/filepost.py
0.658747
0.232332
filepost.py
pypi
import logging from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import connection_from_url, port_by_scheme from .request import RequestMethods from .util import parse_url __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } log = logging.getLogger(__name__) class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: :: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. """ port = port or port_by_scheme.get(scheme, 80) pool_key = (scheme, host, port) # If the scheme, host, or port doesn't match existing open connections, # open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type pool_cls = pool_classes_by_scheme[scheme] pool = pool_cls(host, port, **self.connection_pool_kw) self.pools[pool_key] = pool return pool def connection_from_url(self, url): """ Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn't pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response if response.status == 303: method = 'GET' log.info("Redirecting %s -> %s" % (url, redirect_location)) kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown return self.urlopen(method, redirect_location, **kw) class ProxyManager(RequestMethods): """ Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method will make requests to any url through the defined proxy. """ def __init__(self, proxy_pool): self.proxy_pool = proxy_pool def _set_proxy_headers(self, headers=None): headers_ = {'Accept': '*/*'} if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." kw['assert_same_host'] = False kw['headers'] = self._set_proxy_headers(kw.get('headers')) return self.proxy_pool.urlopen(method, url, **kw) def proxy_from_url(url, **pool_kw): proxy_pool = connection_from_url(url, **pool_kw) return ProxyManager(proxy_pool)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/packages/urllib3/poolmanager.py
0.900037
0.176778
poolmanager.py
pypi
from collections import MutableMapping from threading import Lock try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict __all__ = ['RecentlyUsedContainer'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self._lock = Lock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self._lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self._lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self._lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self._lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self._lock: # Copy pointers to all values, then wipe the mapping # under Python 2, this copies the list of values twice :-| values = list(self._container.values()) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self._lock: return self._container.keys()
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/packages/urllib3/_collections.py
0.873431
0.266381
_collections.py
pypi
try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .filepost import encode_multipart_formdata __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-orm-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) _encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE']) def __init__(self, headers=None): self.headers = headers or {} def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplemented("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **urlopen_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example: :: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if encode_multipart: body, content_type = encode_multipart_formdata(fields or {}, boundary=multipart_boundary) else: body, content_type = (urlencode(fields or {}), 'application/x-www-form-urlencoded') if headers is None: headers = self.headers headers_ = {'Content-Type': content_type} headers_.update(headers) return self.urlopen(method, url, body=body, headers=headers_, **urlopen_kw)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/packages/urllib3/request.py
0.764364
0.225715
request.py
pypi
try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
/requests-transition-1.0.4.0.tar.gz/requests-transition-1.0.4.0/version1/requests/packages/urllib3/packages/ordered_dict.py
0.596903
0.266137
ordered_dict.py
pypi
from __future__ import annotations import functools import logging from typing import ( Any, Hashable, Mapping, Iterable, Callable ) from frozendict import frozendict def ignore_unhashable(func): """ Sorce: https://stackoverflow.com/a/64111268/21997874 (MIT License maybe) 만약 unhashable을 만나 caching이 중지되었다면, 오류를 내보내지 않게 만드는 함수입니다. """ uncached = func.__wrapped__ attributes = functools.WRAPPER_ASSIGNMENTS + ('cache_info', 'cache_clear') @functools.wraps(func, assigned=attributes) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except TypeError as error: if 'unhashable type' in str(error): problematic_args = [arg for arg in args if not isinstance(arg, Hashable)] problematic_kwargs = {key: value for key, value in kwargs.items() if not isinstance(value, Hashable)} error_description = ('If one of arguments is unhashable, function cannot be cached. ' 'Please do not use caching.\n') if problematic_args: error_description += 'problematic argument(s): ' if problematic_args and problematic_kwargs: error_description += '\n' if problematic_kwargs: error_description += ( ('problematic keyword argument: ' if len(problematic_kwargs) == 1 else 'problematic keyword arguments: ') + ', '.join(f'{item}: {value}' for item, value in problematic_kwargs.items()) ) logging.warning(error_description) return uncached(*args, **kwargs) raise wrapper.__uncached__ = uncached # type: ignore return wrapper def freeze_dict_and_list(alert: bool = True, error: bool = False): """ 기본적으로는 가장 흔한 mutable인 mapping와 unhashable한 iterable를 hashable로 변환합니다. 만악 dict와 list 외의 mutable이 있다면 아무런 변환 없이 넘깁니다. 이때 alert가 True라면 경고를 내보내고, error가 True이면 exception이 나갑니다. """ def made_it_hashable(value) -> Any: if isinstance(value, Hashable): return value # 앞에서 Hashable은 이미 나가기 때문에 Iterable이나 Mapping 검사 시 hashable인지는 검사하지 않아도 됨. if isinstance(value, Mapping): # Mapping은 Iterable이기 때문에 Iterable보다 더 먼저 와야 값이 손상되지 않음! return frozendict(value) if isinstance(value, Iterable): # Mapping같이 특정한 경우에는 값이 손상될 수 있음. return tuple(value) if error: raise TypeError(f"type of '{value}' {type(value)}, " "which is nether hashable, iterable(like list), nor mapping(like dict).") if alert: logging.warning(f"type of '{value}' {type(value)}, " "which is nether hashable, iterable(like list), nor mapping(like dict). " "So this thing will not be converted to hashable, that means this function " "cannot be cached if your're using things like lru_cache.") return value def wrapper(func: Callable): def inner(*args, **kwargs): # 속도를 위해 제너레이터 컴프리헨션 대신 리스트 > 튜플 변환 사용 (약 1.5~2배 가량 빠름) new_args = [made_it_hashable(argument) for argument in args] new_kwargs = {kwname: made_it_hashable(kwvalue) for kwname, kwvalue in kwargs.items()} logging.debug((new_args, new_kwargs)) return func(*new_args, **new_kwargs) return inner return wrapper
/requests_utils-0.2.1-py3-none-any.whl/requests_utils/dealing_unhashable_args.py
0.736116
0.364014
dealing_unhashable_args.py
pypi
from concurrent.futures import as_completed, ThreadPoolExecutor from contextlib import contextmanager from typing import ContextManager, List, Optional from .client import ContainerBase, ContainerOptions TOR_IMAGE = "osminogin/tor-simple:0.4.3.6" class OnionCircuit(ContainerBase): """A TOR Docker Container Object. Attributes: container_options (ContainerOptions): Container Options for TOR docker instance. """ container_options: ContainerOptions = ContainerOptions(image=TOR_IMAGE) @contextmanager def OnionCircuits( # pylint: disable=invalid-name onion_count: int, startup_with_threads: bool = False, max_threads: int = 2, thread_pool_timeout: Optional[int] = None, show_log: bool = False, ) -> ContextManager[List[OnionCircuit]]: """Context manager which yields a list of started TOR containers. Takes care of starting and stopping multiple docker container instances of TOR. Args: onion_count (int): Number of TOR docker container instances to start. start_with_threads (bool): If True uses threads to start up the containers. max_threads (int): Max number of threads to use to start up the containers. thread_pool_timeout (Optional[int]): Timeout for ThreadPoolExecutor. show_log (bool): If True shows the containers logs. Yields: List[OnionCircuit]: A list of started OnionCircuit objects. """ onion_circuits = [OnionCircuit() for _ in range(onion_count)] try: if startup_with_threads: with ThreadPoolExecutor(max_workers=max_threads) as executor: futures = [ executor.submit(circuit.start, show_log=show_log) for circuit in onion_circuits ] for future in as_completed(futures, timeout=thread_pool_timeout): future.result() else: for circuit in onion_circuits: circuit.start(show_log=show_log) yield onion_circuits finally: if startup_with_threads: with ThreadPoolExecutor(max_workers=max_threads) as executor: futures = [ executor.submit(circuit.stop, show_log=show_log) for circuit in onion_circuits ] for future in as_completed(futures, thread_pool_timeout): future.result() else: for circuit in onion_circuits: circuit.stop(show_log=show_log)
/requests-whaor-0.2.1.tar.gz/requests-whaor-0.2.1/requests_whaor/circuit.py
0.850065
0.21963
circuit.py
pypi
from concurrent.futures import as_completed, ThreadPoolExecutor from contextlib import contextmanager, ExitStack import random import time from typing import Dict, List, Optional from loguru import logger import requests from requests.exceptions import ( # pylint: disable=redefined-builtin ConnectionError, ProxyError, Timeout, ) from .balancer import Balancer, OnionBalancer from .circuit import OnionCircuit, OnionCircuits from .network import WhaorNet def pause(sleep: int) -> None: """Sleep function with a little logging fun.""" if random.random() > 0.5: logger.debug("Warming things up.") else: logger.debug("Just chillin for a sec.") time.sleep(sleep) # let things connect class Requestor: """Makes proxied web requests via a rotating proxy TOR network.""" def __init__( self, onions: List[OnionCircuit], onion_balancer: Balancer, timeout: int, max_retries: int ) -> "Requestor": """Requestor __init__ method. Args: onions (List[OnionCircuit]): List of TOR containers. onion_balancer (Balancer): Balancer instances connected to TOR containers on the same network. timeout (int): Requests timeout. max_retries (int): Max number of time to retry on bad response or connection error. """ self.timeout = timeout self.onions = onions self.onion_balancer = onion_balancer self.max_retries = max_retries @property def rotating_proxy(self) -> Dict[str, str]: """Rotating proxy frontend input address.""" return self.onion_balancer.proxies def get( self, url: str, *args, **kwargs # noqa: ANN002, ANN003 ) -> Optional[requests.models.Response]: """Overload requests.get method. This will pass in the rotating proxy host address and timeout into the requests.get method. Additionally, It provides a way to automatically retry on connection failures and bad status_codes. Each time there is a failure it will try a new request with a new ip address. Args: url (str): url to send the get request. *args: arguments to pass to requests.get() method. **kwargs: keyword arguments to pass to requests.get() method. Returns: Response: If a response is found else None. """ retries = self.max_retries kwargs.pop("proxies", None) kwargs.pop("timeout", None) while retries > 0: try: response = requests.get( url, timeout=self.timeout, proxies=self.rotating_proxy, *args, **kwargs ) if response.ok: return response except (ProxyError, Timeout, ConnectionError) as error: logger.error(error) retries -= 1 logger.debug(f"Retrying {retries} more times.") return None def restart_onions(self, with_threads: bool = True, max_threads: int = 5) -> None: """Restart onion containers. This can be useful for changing ip addresses every n requests. Args: with_threads (bool): if True uses threads to restart the containers. max_threads (int): How many threads to use. """ if with_threads: with ThreadPoolExecutor(max_workers=max_threads) as executor: futures = [executor.submit(onion.restart) for onion in self.onions] for future in as_completed(futures): future.result() else: for onion in self.onions: onion.restart() pause(5) @contextmanager def RequestsWhaor( # pylint: disable=invalid-name, too-many-arguments onion_count: int = 5, start_with_threads: bool = True, max_threads: int = 5, timeout: int = 5, show_log: bool = False, max_retries: int = 5, ) -> Requestor: """Context manager which starts n amount of tor nodes behind a round robin reverse proxy. Args: onion_count (int): Number of TOR circuits to spin up. start_with_threads (bool): If True uses treads to spin up containers. max_threads (int): Max number of threads to use when spin up containers. timeout (int): Requests timeout. show_log (bool): If True shows the containers logs. max_retries (int): Max number of time to retry on bad response or connection error. Yields: Requestor: Makes proxied web requests via a rotating proxy TOR network. """ with ExitStack() as stack: try: network = stack.enter_context(WhaorNet()) onions = stack.enter_context( OnionCircuits( onion_count, startup_with_threads=start_with_threads, max_threads=max_threads, show_log=show_log, ) ) for onion in onions: network.connect_container(onion.container_id, onion.container_name) onion_balancer = stack.enter_context( OnionBalancer(onions=network.containers, show_log=show_log) ) network.connect_container(onion_balancer.container_id, onion_balancer.container_name) pause(5) logger.info(f"Dashboard Address: {onion_balancer.dashboard_address}") pause(1) yield Requestor( onions=onions, onion_balancer=onion_balancer, timeout=timeout, max_retries=max_retries, ) finally: stack.pop_all().close()
/requests-whaor-0.2.1.tar.gz/requests-whaor-0.2.1/requests_whaor/core.py
0.881761
0.159971
core.py
pypi
from contextlib import contextmanager from tempfile import _TemporaryFileWrapper as TemporaryFile from tempfile import NamedTemporaryFile from typing import Any, Dict, Optional from docker.models.volumes import Volume as DockerVolume from docker.types import Mount from jinja2 import Environment, FileSystemLoader from pydantic import validator from .client import Client from .paths import TEMPLATE_DIRECTORY, TEMPORARY_FILES_DIRECTORY class MountFile(Client): """Represents a file to be binded to a docker container. Attributes: template_name (str): Name of the jinja template. target_path (str): Local file system path. volume_driver (str): Type of docker volume. temporary_file (Optional[TemporaryFile]): A mounted instance of a TemporaryFile. mount (Optional[Mount]): An instance of the DockerMount. template_variables (Optional[Dict[Any, Any]]): Jinja template variables. """ template_name: str target_path: str volume_driver: str = "local" temporary_file: Optional[TemporaryFile] mount: Optional[Mount] template_variables: Optional[Dict[Any, Any]] class Config: """Pydantic Configuration.""" arbitrary_types_allowed = True json_encoders = {TemporaryFile: lambda temp: temp.name, DockerVolume: lambda vol: vol.name} @validator("template_name") def _template_must_exist( # pylint: disable=no-self-argument,no-self-use cls, template_name: str ) -> str: """Check if template exists in template directory.""" template_path = TEMPLATE_DIRECTORY / template_name if template_path.exists(): return template_name raise ValueError(f"{template_name} does not exist.") @property def volume_name(self) -> str: """Name of the volume.""" return self.template_name.replace(".", "_") + "_volume" @property def source_path(self) -> str: """Temporary file path name.""" return self.temporary_file.name def _render_template(self) -> str: """Render jinja template.""" env = Environment(loader=FileSystemLoader(TEMPLATE_DIRECTORY), keep_trailing_newline=True) template = env.get_template(self.template_name) if self.template_variables: return template.render(self.template_variables) return template.render() def _create_source_file(self) -> None: """Create temp file for storing render template output.""" self.temporary_file = NamedTemporaryFile(dir=TEMPORARY_FILES_DIRECTORY, suffix=".conf") def _generate_source_file(self) -> None: """Generate source file.""" render_data = self._render_template() self._create_source_file() self.temporary_file.write(render_data.encode()) self.temporary_file.seek(0) def start(self) -> None: """Start the volume mount.""" self._generate_source_file() self.mount = Mount( target=self.target_path, source=self.source_path, read_only=True, type="bind" ) def stop(self) -> None: """Stop the volume mount.""" if self.temporary_file: self.temporary_file.close() @contextmanager # pylint: disable=invalid-name def MountPoint( *, template_name: str, target_path: str, template_variables: Optional[Dict[Any, Any]] = None ) -> MountFile: """Context manager which yields a prepared instance of a docker volume. Args: template_name (str): Name of the jinja template. target_path (str): Local file system path. template_variables (Optional[Dict[Any, Any]]): Jinja template variables. """ mount_file = MountFile( template_name=template_name, target_path=target_path, template_variables=template_variables ) try: mount_file.start() yield mount_file finally: mount_file.stop()
/requests-whaor-0.2.1.tar.gz/requests-whaor-0.2.1/requests_whaor/mount.py
0.847527
0.162679
mount.py
pypi
from contextlib import contextmanager from typing import ContextManager, List, Optional from docker.models.containers import Container as DockerContainer from docker.models.networks import Network as DockerNetwork from loguru import logger from .client import Client class Network(Client): """Represents a docker network. Attributes: name (str): Network name. driver (str): Network driver. docker_network (Optional[DockerNetwork]): Holds an instance of a started docker network. """ name: str driver: str docker_network: Optional[DockerNetwork] def connect_container(self, container_id: str, container_name: str) -> None: """Connect container to network and give it a reachable network alias. Args: container_id (str): The containers id. container_name (str): The containers name. """ logger.debug(f"connecting {container_name} to the {self.network_name} network") self.docker_network.connect(container_id, aliases=[container_name]) @property def containers(self) -> List[DockerContainer]: """Return list of Container objects connected to network.""" self.docker_network.reload() return self.docker_network.containers @property def network_name(self) -> str: """Network name.""" return self.docker_network.name @property def network_id(self) -> str: """Network short id.""" return self.docker_network.short_id def start(self) -> None: """Start Docker network.""" client = self.get_client() self.docker_network = client.networks.create(name=self.name, driver=self.driver) logger.debug(f"Network: {self.network_name} {self.network_id} Created.") def stop(self) -> None: """Stop Docker network.""" self.docker_network.reload() if self.docker_network.containers: for container in self.docker_network.containers: self.docker_network.disconnect(container.name) self.docker_network.remove() logger.debug(f"Network: {self.network_name} {self.network_id} Destroyed.") @contextmanager # pylint: disable=invalid-name def WhaorNet(name: str = "whaornet", driver: str = "bridge") -> ContextManager[Network]: """Context manager which yields a network to connect containers to. Args: name (str): Name of network. driver (str): Type of network drivier. Yields: Network: A Docker network. """ whaornet = Network(name=name, driver=driver) try: whaornet.start() yield whaornet finally: whaornet.stop()
/requests-whaor-0.2.1.tar.gz/requests-whaor-0.2.1/requests_whaor/network.py
0.908882
0.18628
network.py
pypi
from contextlib import contextmanager from typing import Dict, List from docker.models.containers import Container from loguru import logger from pydantic import BaseModel as Base from .circuit import OnionCircuit from .client import ContainerBase, ContainerOptions from .mount import MountFile, MountPoint HAPROXY_IMAGE = "haproxy:2.2.3" class HAProxyOptions(Base): """Handles options for HAProxy docker instance. Attributes: max_connections (int): Maximum per-process number of concurrent connections. timeout_client (int): Maximum inactivity time on the client side. timeout_connect (int): Maximum time to wait for a connection attempt to a server to succeed. timeout_queue (int): Maximum time to wait in the queue for a connection slot to be free. timeout_server (int): Maximum inactivity time on the server side. listen_host_port (int): Frontend port to the proxy. backend_name (str): Name of Backend section. dashboard_bind_port (int): Port to open to reach the HAProxy dashboard. dashboard_refresh_rate (int): Refresh rate of the HAProxy dashboard page. onions (List[Container]): Each onion container that is connected to the whaornet. """ max_connections: int = 4096 timeout_client: int = 3600 timeout_connect: int = 1 timeout_queue: int = 5 timeout_server: int = 3600 listen_host_port: int = 8001 backend_name: str = "onions" dashboard_bind_port: int = 9999 dashboard_refresh_rate: int = 2 onions: List[Container] class Config: """Pydantic Configuration.""" arbitrary_types_allowed = True @property def ports(self) -> List[int]: """Ports which will be used to expose on the local network.""" return [self.listen_host_port, self.dashboard_bind_port] class Balancer(ContainerBase): """HAProxy Load Balancer. Attributes: haproxy_options (HAProxyOptions): HAProxy options object. container_options (ContainerOptions): Container options for the HA proxy instance. """ haproxy_options: HAProxyOptions container_options: ContainerOptions = ContainerOptions(image=HAPROXY_IMAGE) class Config: """Pydantic Configuration.""" arbitrary_types_allowed = True @property def address(self) -> str: """Return socks5 address to poxy requests through.""" return f"socks5://localhost:{self.haproxy_options.listen_host_port}" @property def dashboard_address(self) -> str: """Return full dashboard address.""" return f"http://localhost:{self.haproxy_options.dashboard_bind_port}" @property def proxies(self) -> Dict[str, str]: """Return proxies to mount onto a requests session.""" return { "http": self.address, "https": self.address, } def add_mount_point(self, mount: MountFile) -> None: """Mount a volume into the HAProxy container. Args: mount (MountFile): File to mount between the container and local file system. """ self.container_options.mounts.append(mount.mount) def display_settings(self) -> None: """Log config settings to stdout.""" logger.debug( "\n===================" "\nOnion Load Balancer" "\n===================" "\n" + self.json(indent=4) ) self.show_follow_logs_command() @contextmanager # pylint: disable=invalid-name def OnionBalancer(onions: List[OnionCircuit], show_log: bool = False) -> Balancer: """Context manager which yields a started instance of an HAProxy docker container. Args: onions (List[OnionCircuit]): List of tor containers to load balance requests across. show_log (bool): If True shows the HAProxies logs on start and stop. Yields: Balancer: A started instance of a HAProxy docker container. """ haproxy_options = HAProxyOptions(onions=onions) with MountPoint( template_name="haproxy.cfg", target_path="/usr/local/etc/haproxy/haproxy.cfg", template_variables=haproxy_options.dict(), ) as mount_point: try: balancer = Balancer(haproxy_options=haproxy_options) balancer.add_mount_point(mount_point) for port in haproxy_options.ports: balancer.expose_port(port) balancer.start(show_log=show_log) balancer.display_settings() yield balancer finally: balancer.stop(show_log=show_log)
/requests-whaor-0.2.1.tar.gz/requests-whaor-0.2.1/requests_whaor/balancer.py
0.922124
0.241389
balancer.py
pypi
import sys import asyncio import json from io import BytesIO from urllib.parse import urlparse, urlunparse, urljoin from concurrent.futures import ThreadPoolExecutor from concurrent.futures._base import TimeoutError from functools import partial from typing import Set, Union, List, MutableMapping, Optional, Mapping import requests from pyquery import PyQuery from fake_useragent import UserAgent import lxml from lxml import etree from parse import search as parse_search from parse import findall, Result from w3lib.encoding import html_to_unicode DEFAULT_ENCODING = 'utf-8' DEFAULT_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8' useragent = None # Typing. _XPath = Union[List[str], List['Element'], str, 'Element'] _Result = List['Result'] _XML = Union[str, bytes] _BaseXML = str _UserAgent = str _DefaultEncoding = str _RawXML = bytes _Encoding = str _LXML = etree.Element _Text = str _Search = Result _Containing = Union[str, List[str]] _Links = Set[str] _Attrs = MutableMapping _Find = Union[List['Element'], 'Element'] # Sanity checking. try: assert sys.version_info.major == 3 assert sys.version_info.minor > 5 except AssertionError: raise RuntimeError('Requests-XML requires Python 3.6+!') class BaseParser: """A basic XML/Element Parser, for Humans. :param element: The element from which to base the parsing upon. :param default_encoding: Which encoding to default to. :param xml: XML from which to base the parsing upon (optional). """ def __init__(self, *, element, session: 'XMLSession' = None, default_encoding: _DefaultEncoding = DEFAULT_ENCODING, xml: _XML = None) -> None: self.element = element self.session = session or XMLSession() self.default_encoding = default_encoding self._encoding = None self._xml = xml.encode(DEFAULT_ENCODING) if isinstance(xml, str) else xml self._lxml = None self._pq = None self._docinfo = None self._json = None @property def raw_xml(self) -> _RawXML: """Bytes representation of the XML content. (`learn more <http://www.diveintopython3.net/strings.html>`_). """ if self._xml: return self._xml else: return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding) @property def xml(self) -> _BaseXML: """Unicode representation of the XML content (`learn more <http://www.diveintopython3.net/strings.html>`_). """ if self._xml: return self.raw_xml.decode(self.encoding) else: return etree.tostring(self.element, encoding='unicode').strip() @xml.setter def xml(self, xml: str) -> None: self._xml = xml.encode(self.encoding) @raw_xml.setter def raw_xml(self, xml: bytes) -> None: """Property setter for self.html.""" self._xml = xml @property def pq(self) -> PyQuery: """`PyQuery <https://pythonhosted.org/pyquery/>`_ representation of the :class:`Element <Element>` or :class:`HTML <HTML>`. """ if self._pq is None: self._pq = PyQuery(self.raw_xml) return self._pq @property def lxml(self) -> _LXML: """`lxml <http://lxml.de>`_ representation of the :class:`Element <Element>` or :class:`XML <XML>`. """ if self._lxml is None: self._lxml = etree.fromstring(self.raw_xml) return self._lxml @property def text(self) -> _Text: """The text content of the :class:`Element <Element>` or :class:`XML <XML>`. """ return self.pq.text() @property def links(self) -> _Links: """All found links on page, in as–is form. Only works for Atom feeds.""" return list(set(x.text for x in self.xpath('//link'))) @property def docinfo(self) -> etree.DocInfo: if self._docinfo is None: self._docinfo = etree.parse(BytesIO(self.raw_xml)).docinfo return self._docinfo @property def xml_version(self) -> _Text: return self.docinfo.xml_version @property def root_tag(self) -> _Text: return self.docinfo.root_name @property def encoding(self) -> _Encoding: """The encoding string to be used, extracted from the XML and :class:`XMLResponse <XMLResponse>` header. """ if self._encoding: return self._encoding # Scan meta tags for charset. if self._xml: self._encoding = html_to_unicode(self.default_encoding, self._xml)[0] return self._encoding if self._encoding else self.default_encoding @encoding.setter def encoding(self, enc: str) -> None: """Property setter for self.encoding.""" self._encoding = enc def json(self, conversion: _Text = 'badgerfish') -> Mapping: """A JSON Representation of the XML. Default is badgerfish. :param conversion: Which conversion method to use. (`learn more <https://github.com/sanand0/xmljson#conventions>`_) """ if not self._json: if conversion is 'badgerfish': from xmljson import badgerfish as serializer elif conversion is 'abdera': from xmljson import abdera as serializer elif conversion is 'cobra': from xmljson import cobra as serializer elif conversion is 'gdata': from xmljson import gdata as serializer elif conversion is 'parker': from xmljson import parker as serializer elif conversion is 'yahoo': from xmljson import yahoo as serializer self._json = json.dumps(serializer.data(etree.fromstring(self.xml))) return self._json def xpath(self, selector: str, *, first: bool = False, _encoding: str = None) -> _XPath: """Given an XPath selector, returns a list of :class:`Element <Element>` objects or a single one. :param selector: XPath Selector to use. :param first: Whether or not to return just the first result. :param _encoding: The encoding format. If a sub-selector is specified (e.g. ``//a/@href``), a simple list of results is returned. See W3School's `XPath Examples <https://www.w3schools.com/xml/xpath_examples.asp>`_ for more details. If ``first`` is ``True``, only returns the first :class:`Element <Element>` found. """ selected = self.lxml.xpath(selector) elements = [ Element(element=selection, default_encoding=_encoding or self.encoding) if not isinstance(selection, etree._ElementUnicodeResult) else str(selection) for selection in selected ] return _get_first_or_list(elements, first) def search(self, template: str, first: bool = False) -> _Result: """Search the :class:`Element <Element>` for the given parse template. :param template: The Parse template to use. """ elements = [r for r in findall(template, self.xml)] return _get_first_or_list(elements, first) def find(self, selector: str = '*', containing: _Containing = None, first: bool = False, _encoding: str = None) -> _Find: """Given a simple element name, returns a list of :class:`Element <Element>` objects or a single one. :param selector: Element name to find. :param containing: If specified, only return elements that contain the provided text. :param first: Whether or not to return just the first result. :param _encoding: The encoding format. If ``first`` is ``True``, only returns the first :class:`Element <Element>` found. """ # Convert a single containing into a list. if isinstance(containing, str): containing = [containing] encoding = _encoding or self.encoding elements = [ Element(element=found, default_encoding=encoding) for found in self.pq(selector) ] if containing: elements_copy = elements.copy() elements = [] for element in elements_copy: if any([c.lower() in element.text.lower() for c in containing]): elements.append(element) elements.reverse() return _get_first_or_list(elements, first) def apply_stylesheet(self, stylesheet: 'XML') -> 'XML': transform = etree.XSLT(xslt_input=stylesheet.lxml) result = transform(self.lxml) return XML(xml=str(result)) class Element(BaseParser): """An element of HTML. :param element: The element from which to base the parsing upon. :param default_encoding: Which encoding to default to. """ __slots__ = [ 'element', 'default_encoding', '_encoding', '_xml', '_lxml', '_pq', '_attrs', 'session' ] def __init__(self, *, element, default_encoding: _DefaultEncoding = None) -> None: super(Element, self).__init__(element=element, default_encoding=default_encoding) self.element = element self._attrs = None def __repr__(self) -> str: attrs = ['{}={}'.format(attr, repr(self.attrs[attr])) for attr in self.attrs] return "<Element {} {}>".format(repr(self.element.tag), ' '.join(attrs)) @property def attrs(self) -> _Attrs: """Returns a dictionary of the attributes of the :class:`Element <Element>` (`learn more <https://www.w3schools.com/tags/ref_attributes.asp>`_). """ if self._attrs is None: self._attrs = {k: v for k, v in self.element.items()} # Split class and rel up, as there are ussually many of them: for attr in ['class', 'rel']: if attr in self._attrs: self._attrs[attr] = tuple(self._attrs[attr].split()) return self._attrs class XML(BaseParser): """An XML document, ready for parsing. :param xml: XML from which to base the parsing upon (optional). :param default_encoding: Which encoding to default to. """ def __init__(self, *, xml: _XML, default_encoding: str = DEFAULT_ENCODING) -> None: # Convert incoming unicode HTML into bytes. if isinstance(xml, str): xml = xml.encode(DEFAULT_ENCODING) super(XML, self).__init__( # Convert unicode HTML to bytes. element=PyQuery(xml)('xml') or PyQuery(f'<xml>{xml}</xml>')('xml'), xml=xml, default_encoding=default_encoding ) def __repr__(self) -> str: return f"<XML element={self.element!r}>" class XMLResponse(requests.Response): """An XML-enabled :class:`requests.Response <requests.Response>` object. Effectively the same, but with an intelligent ``.xml`` property added. """ def __init__(self) -> None: super(XMLResponse, self).__init__() self._xml = None # type: HTML @property def xml(self) -> XML: if not self._xml: self._xml = XML(xml=self.content, default_encoding=self.encoding) return self._xml @classmethod def _from_response(cls, response): xml_r = cls() xml_r.__dict__.update(response.__dict__) return xml_r def user_agent(style=None) -> _UserAgent: """Returns an apparently legit user-agent, if not requested one of a specific style. Defaults to a Chrome-style User-Agent. """ global useragent if (not useragent) and style: useragent = UserAgent() return useragent[style] if style else DEFAULT_USER_AGENT def _get_first_or_list(l, first=False): if first: try: return l[0] except IndexError: return None else: return l class XMLSession(requests.Session): """A consumable session, for cookie persistence and connection pooling, amongst other things. """ def __init__(self, mock_browser=True): super(XMLSession, self).__init__() # Mock a web browser's user agent. if mock_browser: self.headers['User-Agent'] = user_agent() self.hooks = {'response': self._handle_response} @staticmethod def _handle_response(response, **kwargs) -> XMLResponse: """Requests HTTP Response handler. Attaches .html property to class:`requests.Response <requests.Response>` objects. """ if not response.encoding: response.encoding = DEFAULT_ENCODING return response def request(self, *args, **kwargs) -> XMLResponse: """Makes an HTTP Request, with mocked User–Agent headers. Returns a class:`HTTPResponse <HTTPResponse>`. """ # Convert Request object into HTTPRequest object. r = super(XMLSession, self).request(*args, **kwargs) return XMLResponse._from_response(r) class AsyncXMLSession(requests.Session): """ An async consumable session. """ def __init__(self, loop=None, workers=None, mock_browser: bool = True, *args, **kwargs): """ Set or create an event loop and a thread pool. :param loop: Asyncio lopp to use. :param workers: Amount of threads to use for executing async calls. If not pass it will default to the number of processors on the machine, multiplied by 5. """ super().__init__(*args, **kwargs) # Mock a web browser's user agent. if mock_browser: self.headers['User-Agent'] = user_agent() self.hooks["response"].append(self.response_hook) self.loop = loop or asyncio.get_event_loop() self.thread_pool = ThreadPoolExecutor(max_workers=workers) @staticmethod def response_hook(response, **kwargs) -> XMLResponse: """ Change response enconding and replace it by a HTMLResponse. """ response.encoding = DEFAULT_ENCODING return XMLResponse._from_response(response) def request(self, *args, **kwargs): """ Partial original request func and run it in a thread. """ func = partial(super().request, *args, **kwargs) return self.loop.run_in_executor(self.thread_pool, func)
/requests-xml-0.2.3.tar.gz/requests-xml-0.2.3/requests_xml.py
0.575469
0.173884
requests_xml.py
pypi
Release History =============== dev --- - \[Short description of non-trivial change.\] 2.31.0 (2023-05-22) ------------------- **Security** - Versions of Requests between v2.3.0 and v2.30.0 are vulnerable to potential forwarding of `Proxy-Authorization` headers to destination servers when following HTTPS redirects. When proxies are defined with user info (https://user:pass@proxy:8080), Requests will construct a `Proxy-Authorization` header that is attached to the request to authenticate with the proxy. In cases where Requests receives a redirect response, it previously reattached the `Proxy-Authorization` header incorrectly, resulting in the value being sent through the tunneled connection to the destination server. Users who rely on defining their proxy credentials in the URL are *strongly* encouraged to upgrade to Requests 2.31.0+ to prevent unintentional leakage and rotate their proxy credentials once the change has been fully deployed. Users who do not use a proxy or do not supply their proxy credentials through the user information portion of their proxy URL are not subject to this vulnerability. Full details can be read in our [Github Security Advisory](https://github.com/psf/requests/security/advisories/GHSA-j8r2-6x86-q33q) and [CVE-2023-32681](https://nvd.nist.gov/vuln/detail/CVE-2023-32681). 2.30.0 (2023-05-03) ------------------- **Dependencies** - ⚠️ Added support for urllib3 2.0. ⚠️ This may contain minor breaking changes so we advise careful testing and reviewing https://urllib3.readthedocs.io/en/latest/v2-migration-guide.html prior to upgrading. Users who wish to stay on urllib3 1.x can pin to `urllib3<2`. 2.29.0 (2023-04-26) ------------------- **Improvements** - Requests now defers chunked requests to the urllib3 implementation to improve standardization. (#6226) - Requests relaxes header component requirements to support bytes/str subclasses. (#6356) 2.28.2 (2023-01-12) ------------------- **Dependencies** - Requests now supports charset\_normalizer 3.x. (#6261) **Bugfixes** - Updated MissingSchema exception to suggest https scheme rather than http. (#6188) 2.28.1 (2022-06-29) ------------------- **Improvements** - Speed optimization in `iter_content` with transition to `yield from`. (#6170) **Dependencies** - Added support for chardet 5.0.0 (#6179) - Added support for charset-normalizer 2.1.0 (#6169) 2.28.0 (2022-06-09) ------------------- **Deprecations** - ⚠️ Requests has officially dropped support for Python 2.7. ⚠️ (#6091) - Requests has officially dropped support for Python 3.6 (including pypy3.6). (#6091) **Improvements** - Wrap JSON parsing issues in Request's JSONDecodeError for payloads without an encoding to make `json()` API consistent. (#6097) - Parse header components consistently, raising an InvalidHeader error in all invalid cases. (#6154) - Added provisional 3.11 support with current beta build. (#6155) - Requests got a makeover and we decided to paint it black. (#6095) **Bugfixes** - Fixed bug where setting `CURL_CA_BUNDLE` to an empty string would disable cert verification. All Requests 2.x versions before 2.28.0 are affected. (#6074) - Fixed urllib3 exception leak, wrapping `urllib3.exceptions.SSLError` with `requests.exceptions.SSLError` for `content` and `iter_content`. (#6057) - Fixed issue where invalid Windows registry entries caused proxy resolution to raise an exception rather than ignoring the entry. (#6149) - Fixed issue where entire payload could be included in the error message for JSONDecodeError. (#6036) 2.27.1 (2022-01-05) ------------------- **Bugfixes** - Fixed parsing issue that resulted in the `auth` component being dropped from proxy URLs. (#6028) 2.27.0 (2022-01-03) ------------------- **Improvements** - Officially added support for Python 3.10. (#5928) - Added a `requests.exceptions.JSONDecodeError` to unify JSON exceptions between Python 2 and 3. This gets raised in the `response.json()` method, and is backwards compatible as it inherits from previously thrown exceptions. Can be caught from `requests.exceptions.RequestException` as well. (#5856) - Improved error text for misnamed `InvalidSchema` and `MissingSchema` exceptions. This is a temporary fix until exceptions can be renamed (Schema->Scheme). (#6017) - Improved proxy parsing for proxy URLs missing a scheme. This will address recent changes to `urlparse` in Python 3.9+. (#5917) **Bugfixes** - Fixed defect in `extract_zipped_paths` which could result in an infinite loop for some paths. (#5851) - Fixed handling for `AttributeError` when calculating length of files obtained by `Tarfile.extractfile()`. (#5239) - Fixed urllib3 exception leak, wrapping `urllib3.exceptions.InvalidHeader` with `requests.exceptions.InvalidHeader`. (#5914) - Fixed bug where two Host headers were sent for chunked requests. (#5391) - Fixed regression in Requests 2.26.0 where `Proxy-Authorization` was incorrectly stripped from all requests sent with `Session.send`. (#5924) - Fixed performance regression in 2.26.0 for hosts with a large number of proxies available in the environment. (#5924) - Fixed idna exception leak, wrapping `UnicodeError` with `requests.exceptions.InvalidURL` for URLs with a leading dot (.) in the domain. (#5414) **Deprecations** - Requests support for Python 2.7 and 3.6 will be ending in 2022. While we don't have exact dates, Requests 2.27.x is likely to be the last release series providing support. 2.26.0 (2021-07-13) ------------------- **Improvements** - Requests now supports Brotli compression, if either the `brotli` or `brotlicffi` package is installed. (#5783) - `Session.send` now correctly resolves proxy configurations from both the Session and Request. Behavior now matches `Session.request`. (#5681) **Bugfixes** - Fixed a race condition in zip extraction when using Requests in parallel from zip archive. (#5707) **Dependencies** - Instead of `chardet`, use the MIT-licensed `charset_normalizer` for Python3 to remove license ambiguity for projects bundling requests. If `chardet` is already installed on your machine it will be used instead of `charset_normalizer` to keep backwards compatibility. (#5797) You can also install `chardet` while installing requests by specifying `[use_chardet_on_py3]` extra as follows: ```shell pip install "requests[use_chardet_on_py3]" ``` Python2 still depends upon the `chardet` module. - Requests now supports `idna` 3.x on Python 3. `idna` 2.x will continue to be used on Python 2 installations. (#5711) **Deprecations** - The `requests[security]` extra has been converted to a no-op install. PyOpenSSL is no longer the recommended secure option for Requests. (#5867) - Requests has officially dropped support for Python 3.5. (#5867) 2.25.1 (2020-12-16) ------------------- **Bugfixes** - Requests now treats `application/json` as `utf8` by default. Resolving inconsistencies between `r.text` and `r.json` output. (#5673) **Dependencies** - Requests now supports chardet v4.x. 2.25.0 (2020-11-11) ------------------- **Improvements** - Added support for NETRC environment variable. (#5643) **Dependencies** - Requests now supports urllib3 v1.26. **Deprecations** - Requests v2.25.x will be the last release series with support for Python 3.5. - The `requests[security]` extra is officially deprecated and will be removed in Requests v2.26.0. 2.24.0 (2020-06-17) ------------------- **Improvements** - pyOpenSSL TLS implementation is now only used if Python either doesn't have an `ssl` module or doesn't support SNI. Previously pyOpenSSL was unconditionally used if available. This applies even if pyOpenSSL is installed via the `requests[security]` extra (#5443) - Redirect resolution should now only occur when `allow_redirects` is True. (#5492) - No longer perform unnecessary Content-Length calculation for requests that won't use it. (#5496) 2.23.0 (2020-02-19) ------------------- **Improvements** - Remove defunct reference to `prefetch` in Session `__attrs__` (#5110) **Bugfixes** - Requests no longer outputs password in basic auth usage warning. (#5099) **Dependencies** - Pinning for `chardet` and `idna` now uses major version instead of minor. This hopefully reduces the need for releases every time a dependency is updated. 2.22.0 (2019-05-15) ------------------- **Dependencies** - Requests now supports urllib3 v1.25.2. (note: 1.25.0 and 1.25.1 are incompatible) **Deprecations** - Requests has officially stopped support for Python 3.4. 2.21.0 (2018-12-10) ------------------- **Dependencies** - Requests now supports idna v2.8. 2.20.1 (2018-11-08) ------------------- **Bugfixes** - Fixed bug with unintended Authorization header stripping for redirects using default ports (http/80, https/443). 2.20.0 (2018-10-18) ------------------- **Bugfixes** - Content-Type header parsing is now case-insensitive (e.g. charset=utf8 v Charset=utf8). - Fixed exception leak where certain redirect urls would raise uncaught urllib3 exceptions. - Requests removes Authorization header from requests redirected from https to http on the same hostname. (CVE-2018-18074) - `should_bypass_proxies` now handles URIs without hostnames (e.g. files). **Dependencies** - Requests now supports urllib3 v1.24. **Deprecations** - Requests has officially stopped support for Python 2.6. 2.19.1 (2018-06-14) ------------------- **Bugfixes** - Fixed issue where status\_codes.py's `init` function failed trying to append to a `__doc__` value of `None`. 2.19.0 (2018-06-12) ------------------- **Improvements** - Warn user about possible slowdown when using cryptography version &lt; 1.3.4 - Check for invalid host in proxy URL, before forwarding request to adapter. - Fragments are now properly maintained across redirects. (RFC7231 7.1.2) - Removed use of cgi module to expedite library load time. - Added support for SHA-256 and SHA-512 digest auth algorithms. - Minor performance improvement to `Request.content`. - Migrate to using collections.abc for 3.7 compatibility. **Bugfixes** - Parsing empty `Link` headers with `parse_header_links()` no longer return one bogus entry. - Fixed issue where loading the default certificate bundle from a zip archive would raise an `IOError`. - Fixed issue with unexpected `ImportError` on windows system which do not support `winreg` module. - DNS resolution in proxy bypass no longer includes the username and password in the request. This also fixes the issue of DNS queries failing on macOS. - Properly normalize adapter prefixes for url comparison. - Passing `None` as a file pointer to the `files` param no longer raises an exception. - Calling `copy` on a `RequestsCookieJar` will now preserve the cookie policy correctly. **Dependencies** - We now support idna v2.7. - We now support urllib3 v1.23. 2.18.4 (2017-08-15) ------------------- **Improvements** - Error messages for invalid headers now include the header name for easier debugging **Dependencies** - We now support idna v2.6. 2.18.3 (2017-08-02) ------------------- **Improvements** - Running `$ python -m requests.help` now includes the installed version of idna. **Bugfixes** - Fixed issue where Requests would raise `ConnectionError` instead of `SSLError` when encountering SSL problems when using urllib3 v1.22. 2.18.2 (2017-07-25) ------------------- **Bugfixes** - `requests.help` no longer fails on Python 2.6 due to the absence of `ssl.OPENSSL_VERSION_NUMBER`. **Dependencies** - We now support urllib3 v1.22. 2.18.1 (2017-06-14) ------------------- **Bugfixes** - Fix an error in the packaging whereby the `*.whl` contained incorrect data that regressed the fix in v2.17.3. 2.18.0 (2017-06-14) ------------------- **Improvements** - `Response` is now a context manager, so can be used directly in a `with` statement without first having to be wrapped by `contextlib.closing()`. **Bugfixes** - Resolve installation failure if multiprocessing is not available - Resolve tests crash if multiprocessing is not able to determine the number of CPU cores - Resolve error swallowing in utils set\_environ generator 2.17.3 (2017-05-29) ------------------- **Improvements** - Improved `packages` namespace identity support, for monkeypatching libraries. 2.17.2 (2017-05-29) ------------------- **Improvements** - Improved `packages` namespace identity support, for monkeypatching libraries. 2.17.1 (2017-05-29) ------------------- **Improvements** - Improved `packages` namespace identity support, for monkeypatching libraries. 2.17.0 (2017-05-29) ------------------- **Improvements** - Removal of the 301 redirect cache. This improves thread-safety. 2.16.5 (2017-05-28) ------------------- - Improvements to `$ python -m requests.help`. 2.16.4 (2017-05-27) ------------------- - Introduction of the `$ python -m requests.help` command, for debugging with maintainers! 2.16.3 (2017-05-27) ------------------- - Further restored the `requests.packages` namespace for compatibility reasons. 2.16.2 (2017-05-27) ------------------- - Further restored the `requests.packages` namespace for compatibility reasons. No code modification (noted below) should be necessary any longer. 2.16.1 (2017-05-27) ------------------- - Restored the `requests.packages` namespace for compatibility reasons. - Bugfix for `urllib3` version parsing. **Note**: code that was written to import against the `requests.packages` namespace previously will have to import code that rests at this module-level now. For example: from requests.packages.urllib3.poolmanager import PoolManager Will need to be re-written to be: from requests.packages import urllib3 urllib3.poolmanager.PoolManager Or, even better: from urllib3.poolmanager import PoolManager 2.16.0 (2017-05-26) ------------------- - Unvendor ALL the things! 2.15.1 (2017-05-26) ------------------- - Everyone makes mistakes. 2.15.0 (2017-05-26) ------------------- **Improvements** - Introduction of the `Response.next` property, for getting the next `PreparedResponse` from a redirect chain (when `allow_redirects=False`). - Internal refactoring of `__version__` module. **Bugfixes** - Restored once-optional parameter for `requests.utils.get_environ_proxies()`. 2.14.2 (2017-05-10) ------------------- **Bugfixes** - Changed a less-than to an equal-to and an or in the dependency markers to widen compatibility with older setuptools releases. 2.14.1 (2017-05-09) ------------------- **Bugfixes** - Changed the dependency markers to widen compatibility with older pip releases. 2.14.0 (2017-05-09) ------------------- **Improvements** - It is now possible to pass `no_proxy` as a key to the `proxies` dictionary to provide handling similar to the `NO_PROXY` environment variable. - When users provide invalid paths to certificate bundle files or directories Requests now raises `IOError`, rather than failing at the time of the HTTPS request with a fairly inscrutable certificate validation error. - The behavior of `SessionRedirectMixin` was slightly altered. `resolve_redirects` will now detect a redirect by calling `get_redirect_target(response)` instead of directly querying `Response.is_redirect` and `Response.headers['location']`. Advanced users will be able to process malformed redirects more easily. - Changed the internal calculation of elapsed request time to have higher resolution on Windows. - Added `win_inet_pton` as conditional dependency for the `[socks]` extra on Windows with Python 2.7. - Changed the proxy bypass implementation on Windows: the proxy bypass check doesn't use forward and reverse DNS requests anymore - URLs with schemes that begin with `http` but are not `http` or `https` no longer have their host parts forced to lowercase. **Bugfixes** - Much improved handling of non-ASCII `Location` header values in redirects. Fewer `UnicodeDecodeErrors` are encountered on Python 2, and Python 3 now correctly understands that Latin-1 is unlikely to be the correct encoding. - If an attempt to `seek` file to find out its length fails, we now appropriately handle that by aborting our content-length calculations. - Restricted `HTTPDigestAuth` to only respond to auth challenges made on 4XX responses, rather than to all auth challenges. - Fixed some code that was firing `DeprecationWarning` on Python 3.6. - The dismayed person emoticon (`/o\\`) no longer has a big head. I'm sure this is what you were all worrying about most. **Miscellaneous** - Updated bundled urllib3 to v1.21.1. - Updated bundled chardet to v3.0.2. - Updated bundled idna to v2.5. - Updated bundled certifi to 2017.4.17. 2.13.0 (2017-01-24) ------------------- **Features** - Only load the `idna` library when we've determined we need it. This will save some memory for users. **Miscellaneous** - Updated bundled urllib3 to 1.20. - Updated bundled idna to 2.2. 2.12.5 (2017-01-18) ------------------- **Bugfixes** - Fixed an issue with JSON encoding detection, specifically detecting big-endian UTF-32 with BOM. 2.12.4 (2016-12-14) ------------------- **Bugfixes** - Fixed regression from 2.12.2 where non-string types were rejected in the basic auth parameters. While support for this behaviour has been re-added, the behaviour is deprecated and will be removed in the future. 2.12.3 (2016-12-01) ------------------- **Bugfixes** - Fixed regression from v2.12.1 for URLs with schemes that begin with "http". These URLs have historically been processed as though they were HTTP-schemed URLs, and so have had parameters added. This was removed in v2.12.2 in an overzealous attempt to resolve problems with IDNA-encoding those URLs. This change was reverted: the other fixes for IDNA-encoding have been judged to be sufficient to return to the behaviour Requests had before v2.12.0. 2.12.2 (2016-11-30) ------------------- **Bugfixes** - Fixed several issues with IDNA-encoding URLs that are technically invalid but which are widely accepted. Requests will now attempt to IDNA-encode a URL if it can but, if it fails, and the host contains only ASCII characters, it will be passed through optimistically. This will allow users to opt-in to using IDNA2003 themselves if they want to, and will also allow technically invalid but still common hostnames. - Fixed an issue where URLs with leading whitespace would raise `InvalidSchema` errors. - Fixed an issue where some URLs without the HTTP or HTTPS schemes would still have HTTP URL preparation applied to them. - Fixed an issue where Unicode strings could not be used in basic auth. - Fixed an issue encountered by some Requests plugins where constructing a Response object would cause `Response.content` to raise an `AttributeError`. 2.12.1 (2016-11-16) ------------------- **Bugfixes** - Updated setuptools 'security' extra for the new PyOpenSSL backend in urllib3. **Miscellaneous** - Updated bundled urllib3 to 1.19.1. 2.12.0 (2016-11-15) ------------------- **Improvements** - Updated support for internationalized domain names from IDNA2003 to IDNA2008. This updated support is required for several forms of IDNs and is mandatory for .de domains. - Much improved heuristics for guessing content lengths: Requests will no longer read an entire `StringIO` into memory. - Much improved logic for recalculating `Content-Length` headers for `PreparedRequest` objects. - Improved tolerance for file-like objects that have no `tell` method but do have a `seek` method. - Anything that is a subclass of `Mapping` is now treated like a dictionary by the `data=` keyword argument. - Requests now tolerates empty passwords in proxy credentials, rather than stripping the credentials. - If a request is made with a file-like object as the body and that request is redirected with a 307 or 308 status code, Requests will now attempt to rewind the body object so it can be replayed. **Bugfixes** - When calling `response.close`, the call to `close` will be propagated through to non-urllib3 backends. - Fixed issue where the `ALL_PROXY` environment variable would be preferred over scheme-specific variables like `HTTP_PROXY`. - Fixed issue where non-UTF8 reason phrases got severely mangled by falling back to decoding using ISO 8859-1 instead. - Fixed a bug where Requests would not correctly correlate cookies set when using custom Host headers if those Host headers did not use the native string type for the platform. **Miscellaneous** - Updated bundled urllib3 to 1.19. - Updated bundled certifi certs to 2016.09.26. 2.11.1 (2016-08-17) ------------------- **Bugfixes** - Fixed a bug when using `iter_content` with `decode_unicode=True` for streamed bodies would raise `AttributeError`. This bug was introduced in 2.11. - Strip Content-Type and Transfer-Encoding headers from the header block when following a redirect that transforms the verb from POST/PUT to GET. 2.11.0 (2016-08-08) ------------------- **Improvements** - Added support for the `ALL_PROXY` environment variable. - Reject header values that contain leading whitespace or newline characters to reduce risk of header smuggling. **Bugfixes** - Fixed occasional `TypeError` when attempting to decode a JSON response that occurred in an error case. Now correctly returns a `ValueError`. - Requests would incorrectly ignore a non-CIDR IP address in the `NO_PROXY` environment variables: Requests now treats it as a specific IP. - Fixed a bug when sending JSON data that could cause us to encounter obscure OpenSSL errors in certain network conditions (yes, really). - Added type checks to ensure that `iter_content` only accepts integers and `None` for chunk sizes. - Fixed issue where responses whose body had not been fully consumed would have the underlying connection closed but not returned to the connection pool, which could cause Requests to hang in situations where the `HTTPAdapter` had been configured to use a blocking connection pool. **Miscellaneous** - Updated bundled urllib3 to 1.16. - Some previous releases accidentally accepted non-strings as acceptable header values. This release does not. 2.10.0 (2016-04-29) ------------------- **New Features** - SOCKS Proxy Support! (requires PySocks; `$ pip install requests[socks]`) **Miscellaneous** - Updated bundled urllib3 to 1.15.1. 2.9.2 (2016-04-29) ------------------ **Improvements** - Change built-in CaseInsensitiveDict (used for headers) to use OrderedDict as its underlying datastore. **Bugfixes** - Don't use redirect\_cache if allow\_redirects=False - When passed objects that throw exceptions from `tell()`, send them via chunked transfer encoding instead of failing. - Raise a ProxyError for proxy related connection issues. 2.9.1 (2015-12-21) ------------------ **Bugfixes** - Resolve regression introduced in 2.9.0 that made it impossible to send binary strings as bodies in Python 3. - Fixed errors when calculating cookie expiration dates in certain locales. **Miscellaneous** - Updated bundled urllib3 to 1.13.1. 2.9.0 (2015-12-15) ------------------ **Minor Improvements** (Backwards compatible) - The `verify` keyword argument now supports being passed a path to a directory of CA certificates, not just a single-file bundle. - Warnings are now emitted when sending files opened in text mode. - Added the 511 Network Authentication Required status code to the status code registry. **Bugfixes** - For file-like objects that are not sought to the very beginning, we now send the content length for the number of bytes we will actually read, rather than the total size of the file, allowing partial file uploads. - When uploading file-like objects, if they are empty or have no obvious content length we set `Transfer-Encoding: chunked` rather than `Content-Length: 0`. - We correctly receive the response in buffered mode when uploading chunked bodies. - We now handle being passed a query string as a bytestring on Python 3, by decoding it as UTF-8. - Sessions are now closed in all cases (exceptional and not) when using the functional API rather than leaking and waiting for the garbage collector to clean them up. - Correctly handle digest auth headers with a malformed `qop` directive that contains no token, by treating it the same as if no `qop` directive was provided at all. - Minor performance improvements when removing specific cookies by name. **Miscellaneous** - Updated urllib3 to 1.13. 2.8.1 (2015-10-13) ------------------ **Bugfixes** - Update certificate bundle to match `certifi` 2015.9.6.2's weak certificate bundle. - Fix a bug in 2.8.0 where requests would raise `ConnectTimeout` instead of `ConnectionError` - When using the PreparedRequest flow, requests will now correctly respect the `json` parameter. Broken in 2.8.0. - When using the PreparedRequest flow, requests will now correctly handle a Unicode-string method name on Python 2. Broken in 2.8.0. 2.8.0 (2015-10-05) ------------------ **Minor Improvements** (Backwards Compatible) - Requests now supports per-host proxies. This allows the `proxies` dictionary to have entries of the form `{'<scheme>://<hostname>': '<proxy>'}`. Host-specific proxies will be used in preference to the previously-supported scheme-specific ones, but the previous syntax will continue to work. - `Response.raise_for_status` now prints the URL that failed as part of the exception message. - `requests.utils.get_netrc_auth` now takes an `raise_errors` kwarg, defaulting to `False`. When `True`, errors parsing `.netrc` files cause exceptions to be thrown. - Change to bundled projects import logic to make it easier to unbundle requests downstream. - Changed the default User-Agent string to avoid leaking data on Linux: now contains only the requests version. **Bugfixes** - The `json` parameter to `post()` and friends will now only be used if neither `data` nor `files` are present, consistent with the documentation. - We now ignore empty fields in the `NO_PROXY` environment variable. - Fixed problem where `httplib.BadStatusLine` would get raised if combining `stream=True` with `contextlib.closing`. - Prevented bugs where we would attempt to return the same connection back to the connection pool twice when sending a Chunked body. - Miscellaneous minor internal changes. - Digest Auth support is now thread safe. **Updates** - Updated urllib3 to 1.12. 2.7.0 (2015-05-03) ------------------ This is the first release that follows our new release process. For more, see [our documentation](https://requests.readthedocs.io/en/latest/community/release-process/). **Bugfixes** - Updated urllib3 to 1.10.4, resolving several bugs involving chunked transfer encoding and response framing. 2.6.2 (2015-04-23) ------------------ **Bugfixes** - Fix regression where compressed data that was sent as chunked data was not properly decompressed. (\#2561) 2.6.1 (2015-04-22) ------------------ **Bugfixes** - Remove VendorAlias import machinery introduced in v2.5.2. - Simplify the PreparedRequest.prepare API: We no longer require the user to pass an empty list to the hooks keyword argument. (c.f. \#2552) - Resolve redirects now receives and forwards all of the original arguments to the adapter. (\#2503) - Handle UnicodeDecodeErrors when trying to deal with a unicode URL that cannot be encoded in ASCII. (\#2540) - Populate the parsed path of the URI field when performing Digest Authentication. (\#2426) - Copy a PreparedRequest's CookieJar more reliably when it is not an instance of RequestsCookieJar. (\#2527) 2.6.0 (2015-03-14) ------------------ **Bugfixes** - CVE-2015-2296: Fix handling of cookies on redirect. Previously a cookie without a host value set would use the hostname for the redirected URL exposing requests users to session fixation attacks and potentially cookie stealing. This was disclosed privately by Matthew Daley of [BugFuzz](https://bugfuzz.com). This affects all versions of requests from v2.1.0 to v2.5.3 (inclusive on both ends). - Fix error when requests is an `install_requires` dependency and `python setup.py test` is run. (\#2462) - Fix error when urllib3 is unbundled and requests continues to use the vendored import location. - Include fixes to `urllib3`'s header handling. - Requests' handling of unvendored dependencies is now more restrictive. **Features and Improvements** - Support bytearrays when passed as parameters in the `files` argument. (\#2468) - Avoid data duplication when creating a request with `str`, `bytes`, or `bytearray` input to the `files` argument. 2.5.3 (2015-02-24) ------------------ **Bugfixes** - Revert changes to our vendored certificate bundle. For more context see (\#2455, \#2456, and <https://bugs.python.org/issue23476>) 2.5.2 (2015-02-23) ------------------ **Features and Improvements** - Add sha256 fingerprint support. ([shazow/urllib3\#540](https://github.com/shazow/urllib3/pull/540)) - Improve the performance of headers. ([shazow/urllib3\#544](https://github.com/shazow/urllib3/pull/544)) **Bugfixes** - Copy pip's import machinery. When downstream redistributors remove requests.packages.urllib3 the import machinery will continue to let those same symbols work. Example usage in requests' documentation and 3rd-party libraries relying on the vendored copies of urllib3 will work without having to fallback to the system urllib3. - Attempt to quote parts of the URL on redirect if unquoting and then quoting fails. (\#2356) - Fix filename type check for multipart form-data uploads. (\#2411) - Properly handle the case where a server issuing digest authentication challenges provides both auth and auth-int qop-values. (\#2408) - Fix a socket leak. ([shazow/urllib3\#549](https://github.com/shazow/urllib3/pull/549)) - Fix multiple `Set-Cookie` headers properly. ([shazow/urllib3\#534](https://github.com/shazow/urllib3/pull/534)) - Disable the built-in hostname verification. ([shazow/urllib3\#526](https://github.com/shazow/urllib3/pull/526)) - Fix the behaviour of decoding an exhausted stream. ([shazow/urllib3\#535](https://github.com/shazow/urllib3/pull/535)) **Security** - Pulled in an updated `cacert.pem`. - Drop RC4 from the default cipher list. ([shazow/urllib3\#551](https://github.com/shazow/urllib3/pull/551)) 2.5.1 (2014-12-23) ------------------ **Behavioural Changes** - Only catch HTTPErrors in raise\_for\_status (\#2382) **Bugfixes** - Handle LocationParseError from urllib3 (\#2344) - Handle file-like object filenames that are not strings (\#2379) - Unbreak HTTPDigestAuth handler. Allow new nonces to be negotiated (\#2389) 2.5.0 (2014-12-01) ------------------ **Improvements** - Allow usage of urllib3's Retry object with HTTPAdapters (\#2216) - The `iter_lines` method on a response now accepts a delimiter with which to split the content (\#2295) **Behavioural Changes** - Add deprecation warnings to functions in requests.utils that will be removed in 3.0 (\#2309) - Sessions used by the functional API are always closed (\#2326) - Restrict requests to HTTP/1.1 and HTTP/1.0 (stop accepting HTTP/0.9) (\#2323) **Bugfixes** - Only parse the URL once (\#2353) - Allow Content-Length header to always be overridden (\#2332) - Properly handle files in HTTPDigestAuth (\#2333) - Cap redirect\_cache size to prevent memory abuse (\#2299) - Fix HTTPDigestAuth handling of redirects after authenticating successfully (\#2253) - Fix crash with custom method parameter to Session.request (\#2317) - Fix how Link headers are parsed using the regular expression library (\#2271) **Documentation** - Add more references for interlinking (\#2348) - Update CSS for theme (\#2290) - Update width of buttons and sidebar (\#2289) - Replace references of Gittip with Gratipay (\#2282) - Add link to changelog in sidebar (\#2273) 2.4.3 (2014-10-06) ------------------ **Bugfixes** - Unicode URL improvements for Python 2. - Re-order JSON param for backwards compat. - Automatically defrag authentication schemes from host/pass URIs. ([\#2249](https://github.com/psf/requests/issues/2249)) 2.4.2 (2014-10-05) ------------------ **Improvements** - FINALLY! Add json parameter for uploads! ([\#2258](https://github.com/psf/requests/pull/2258)) - Support for bytestring URLs on Python 3.x ([\#2238](https://github.com/psf/requests/pull/2238)) **Bugfixes** - Avoid getting stuck in a loop ([\#2244](https://github.com/psf/requests/pull/2244)) - Multiple calls to iter\* fail with unhelpful error. ([\#2240](https://github.com/psf/requests/issues/2240), [\#2241](https://github.com/psf/requests/issues/2241)) **Documentation** - Correct redirection introduction ([\#2245](https://github.com/psf/requests/pull/2245/)) - Added example of how to send multiple files in one request. ([\#2227](https://github.com/psf/requests/pull/2227/)) - Clarify how to pass a custom set of CAs ([\#2248](https://github.com/psf/requests/pull/2248/)) 2.4.1 (2014-09-09) ------------------ - Now has a "security" package extras set, `$ pip install requests[security]` - Requests will now use Certifi if it is available. - Capture and re-raise urllib3 ProtocolError - Bugfix for responses that attempt to redirect to themselves forever (wtf?). 2.4.0 (2014-08-29) ------------------ **Behavioral Changes** - `Connection: keep-alive` header is now sent automatically. **Improvements** - Support for connect timeouts! Timeout now accepts a tuple (connect, read) which is used to set individual connect and read timeouts. - Allow copying of PreparedRequests without headers/cookies. - Updated bundled urllib3 version. - Refactored settings loading from environment -- new Session.merge\_environment\_settings. - Handle socket errors in iter\_content. 2.3.0 (2014-05-16) ------------------ **API Changes** - New `Response` property `is_redirect`, which is true when the library could have processed this response as a redirection (whether or not it actually did). - The `timeout` parameter now affects requests with both `stream=True` and `stream=False` equally. - The change in v2.0.0 to mandate explicit proxy schemes has been reverted. Proxy schemes now default to `http://`. - The `CaseInsensitiveDict` used for HTTP headers now behaves like a normal dictionary when references as string or viewed in the interpreter. **Bugfixes** - No longer expose Authorization or Proxy-Authorization headers on redirect. Fix CVE-2014-1829 and CVE-2014-1830 respectively. - Authorization is re-evaluated each redirect. - On redirect, pass url as native strings. - Fall-back to autodetected encoding for JSON when Unicode detection fails. - Headers set to `None` on the `Session` are now correctly not sent. - Correctly honor `decode_unicode` even if it wasn't used earlier in the same response. - Stop advertising `compress` as a supported Content-Encoding. - The `Response.history` parameter is now always a list. - Many, many `urllib3` bugfixes. 2.2.1 (2014-01-23) ------------------ **Bugfixes** - Fixes incorrect parsing of proxy credentials that contain a literal or encoded '\#' character. - Assorted urllib3 fixes. 2.2.0 (2014-01-09) ------------------ **API Changes** - New exception: `ContentDecodingError`. Raised instead of `urllib3` `DecodeError` exceptions. **Bugfixes** - Avoid many many exceptions from the buggy implementation of `proxy_bypass` on OS X in Python 2.6. - Avoid crashing when attempting to get authentication credentials from \~/.netrc when running as a user without a home directory. - Use the correct pool size for pools of connections to proxies. - Fix iteration of `CookieJar` objects. - Ensure that cookies are persisted over redirect. - Switch back to using chardet, since it has merged with charade. 2.1.0 (2013-12-05) ------------------ - Updated CA Bundle, of course. - Cookies set on individual Requests through a `Session` (e.g. via `Session.get()`) are no longer persisted to the `Session`. - Clean up connections when we hit problems during chunked upload, rather than leaking them. - Return connections to the pool when a chunked upload is successful, rather than leaking it. - Match the HTTPbis recommendation for HTTP 301 redirects. - Prevent hanging when using streaming uploads and Digest Auth when a 401 is received. - Values of headers set by Requests are now always the native string type. - Fix previously broken SNI support. - Fix accessing HTTP proxies using proxy authentication. - Unencode HTTP Basic usernames and passwords extracted from URLs. - Support for IP address ranges for no\_proxy environment variable - Parse headers correctly when users override the default `Host:` header. - Avoid munging the URL in case of case-sensitive servers. - Looser URL handling for non-HTTP/HTTPS urls. - Accept unicode methods in Python 2.6 and 2.7. - More resilient cookie handling. - Make `Response` objects pickleable. - Actually added MD5-sess to Digest Auth instead of pretending to like last time. - Updated internal urllib3. - Fixed @Lukasa's lack of taste. 2.0.1 (2013-10-24) ------------------ - Updated included CA Bundle with new mistrusts and automated process for the future - Added MD5-sess to Digest Auth - Accept per-file headers in multipart file POST messages. - Fixed: Don't send the full URL on CONNECT messages. - Fixed: Correctly lowercase a redirect scheme. - Fixed: Cookies not persisted when set via functional API. - Fixed: Translate urllib3 ProxyError into a requests ProxyError derived from ConnectionError. - Updated internal urllib3 and chardet. 2.0.0 (2013-09-24) ------------------ **API Changes:** - Keys in the Headers dictionary are now native strings on all Python versions, i.e. bytestrings on Python 2, unicode on Python 3. - Proxy URLs now *must* have an explicit scheme. A `MissingSchema` exception will be raised if they don't. - Timeouts now apply to read time if `Stream=False`. - `RequestException` is now a subclass of `IOError`, not `RuntimeError`. - Added new method to `PreparedRequest` objects: `PreparedRequest.copy()`. - Added new method to `Session` objects: `Session.update_request()`. This method updates a `Request` object with the data (e.g. cookies) stored on the `Session`. - Added new method to `Session` objects: `Session.prepare_request()`. This method updates and prepares a `Request` object, and returns the corresponding `PreparedRequest` object. - Added new method to `HTTPAdapter` objects: `HTTPAdapter.proxy_headers()`. This should not be called directly, but improves the subclass interface. - `httplib.IncompleteRead` exceptions caused by incorrect chunked encoding will now raise a Requests `ChunkedEncodingError` instead. - Invalid percent-escape sequences now cause a Requests `InvalidURL` exception to be raised. - HTTP 208 no longer uses reason phrase `"im_used"`. Correctly uses `"already_reported"`. - HTTP 226 reason added (`"im_used"`). **Bugfixes:** - Vastly improved proxy support, including the CONNECT verb. Special thanks to the many contributors who worked towards this improvement. - Cookies are now properly managed when 401 authentication responses are received. - Chunked encoding fixes. - Support for mixed case schemes. - Better handling of streaming downloads. - Retrieve environment proxies from more locations. - Minor cookies fixes. - Improved redirect behaviour. - Improved streaming behaviour, particularly for compressed data. - Miscellaneous small Python 3 text encoding bugs. - `.netrc` no longer overrides explicit auth. - Cookies set by hooks are now correctly persisted on Sessions. - Fix problem with cookies that specify port numbers in their host field. - `BytesIO` can be used to perform streaming uploads. - More generous parsing of the `no_proxy` environment variable. - Non-string objects can be passed in data values alongside files. 1.2.3 (2013-05-25) ------------------ - Simple packaging fix 1.2.2 (2013-05-23) ------------------ - Simple packaging fix 1.2.1 (2013-05-20) ------------------ - 301 and 302 redirects now change the verb to GET for all verbs, not just POST, improving browser compatibility. - Python 3.3.2 compatibility - Always percent-encode location headers - Fix connection adapter matching to be most-specific first - new argument to the default connection adapter for passing a block argument - prevent a KeyError when there's no link headers 1.2.0 (2013-03-31) ------------------ - Fixed cookies on sessions and on requests - Significantly change how hooks are dispatched - hooks now receive all the arguments specified by the user when making a request so hooks can make a secondary request with the same parameters. This is especially necessary for authentication handler authors - certifi support was removed - Fixed bug where using OAuth 1 with body `signature_type` sent no data - Major proxy work thanks to @Lukasa including parsing of proxy authentication from the proxy url - Fix DigestAuth handling too many 401s - Update vendored urllib3 to include SSL bug fixes - Allow keyword arguments to be passed to `json.loads()` via the `Response.json()` method - Don't send `Content-Length` header by default on `GET` or `HEAD` requests - Add `elapsed` attribute to `Response` objects to time how long a request took. - Fix `RequestsCookieJar` - Sessions and Adapters are now picklable, i.e., can be used with the multiprocessing library - Update charade to version 1.0.3 The change in how hooks are dispatched will likely cause a great deal of issues. 1.1.0 (2013-01-10) ------------------ - CHUNKED REQUESTS - Support for iterable response bodies - Assume servers persist redirect params - Allow explicit content types to be specified for file data - Make merge\_kwargs case-insensitive when looking up keys 1.0.3 (2012-12-18) ------------------ - Fix file upload encoding bug - Fix cookie behavior 1.0.2 (2012-12-17) ------------------ - Proxy fix for HTTPAdapter. 1.0.1 (2012-12-17) ------------------ - Cert verification exception bug. - Proxy fix for HTTPAdapter. 1.0.0 (2012-12-17) ------------------ - Massive Refactor and Simplification - Switch to Apache 2.0 license - Swappable Connection Adapters - Mountable Connection Adapters - Mutable ProcessedRequest chain - /s/prefetch/stream - Removal of all configuration - Standard library logging - Make Response.json() callable, not property. - Usage of new charade project, which provides python 2 and 3 simultaneous chardet. - Removal of all hooks except 'response' - Removal of all authentication helpers (OAuth, Kerberos) This is not a backwards compatible change. 0.14.2 (2012-10-27) ------------------- - Improved mime-compatible JSON handling - Proxy fixes - Path hack fixes - Case-Insensitive Content-Encoding headers - Support for CJK parameters in form posts 0.14.1 (2012-10-01) ------------------- - Python 3.3 Compatibility - Simply default accept-encoding - Bugfixes 0.14.0 (2012-09-02) ------------------- - No more iter\_content errors if already downloaded. 0.13.9 (2012-08-25) ------------------- - Fix for OAuth + POSTs - Remove exception eating from dispatch\_hook - General bugfixes 0.13.8 (2012-08-21) ------------------- - Incredible Link header support :) 0.13.7 (2012-08-19) ------------------- - Support for (key, value) lists everywhere. - Digest Authentication improvements. - Ensure proxy exclusions work properly. - Clearer UnicodeError exceptions. - Automatic casting of URLs to strings (fURL and such) - Bugfixes. 0.13.6 (2012-08-06) ------------------- - Long awaited fix for hanging connections! 0.13.5 (2012-07-27) ------------------- - Packaging fix 0.13.4 (2012-07-27) ------------------- - GSSAPI/Kerberos authentication! - App Engine 2.7 Fixes! - Fix leaking connections (from urllib3 update) - OAuthlib path hack fix - OAuthlib URL parameters fix. 0.13.3 (2012-07-12) ------------------- - Use simplejson if available. - Do not hide SSLErrors behind Timeouts. - Fixed param handling with urls containing fragments. - Significantly improved information in User Agent. - client certificates are ignored when verify=False 0.13.2 (2012-06-28) ------------------- - Zero dependencies (once again)! - New: Response.reason - Sign querystring parameters in OAuth 1.0 - Client certificates no longer ignored when verify=False - Add openSUSE certificate support 0.13.1 (2012-06-07) ------------------- - Allow passing a file or file-like object as data. - Allow hooks to return responses that indicate errors. - Fix Response.text and Response.json for body-less responses. 0.13.0 (2012-05-29) ------------------- - Removal of Requests.async in favor of [grequests](https://github.com/kennethreitz/grequests) - Allow disabling of cookie persistence. - New implementation of safe\_mode - cookies.get now supports default argument - Session cookies not saved when Session.request is called with return\_response=False - Env: no\_proxy support. - RequestsCookieJar improvements. - Various bug fixes. 0.12.1 (2012-05-08) ------------------- - New `Response.json` property. - Ability to add string file uploads. - Fix out-of-range issue with iter\_lines. - Fix iter\_content default size. - Fix POST redirects containing files. 0.12.0 (2012-05-02) ------------------- - EXPERIMENTAL OAUTH SUPPORT! - Proper CookieJar-backed cookies interface with awesome dict-like interface. - Speed fix for non-iterated content chunks. - Move `pre_request` to a more usable place. - New `pre_send` hook. - Lazily encode data, params, files. - Load system Certificate Bundle if `certify` isn't available. - Cleanups, fixes. 0.11.2 (2012-04-22) ------------------- - Attempt to use the OS's certificate bundle if `certifi` isn't available. - Infinite digest auth redirect fix. - Multi-part file upload improvements. - Fix decoding of invalid %encodings in URLs. - If there is no content in a response don't throw an error the second time that content is attempted to be read. - Upload data on redirects. 0.11.1 (2012-03-30) ------------------- - POST redirects now break RFC to do what browsers do: Follow up with a GET. - New `strict_mode` configuration to disable new redirect behavior. 0.11.0 (2012-03-14) ------------------- - Private SSL Certificate support - Remove select.poll from Gevent monkeypatching - Remove redundant generator for chunked transfer encoding - Fix: Response.ok raises Timeout Exception in safe\_mode 0.10.8 (2012-03-09) ------------------- - Generate chunked ValueError fix - Proxy configuration by environment variables - Simplification of iter\_lines. - New trust\_env configuration for disabling system/environment hints. - Suppress cookie errors. 0.10.7 (2012-03-07) ------------------- - encode\_uri = False 0.10.6 (2012-02-25) ------------------- - Allow '=' in cookies. 0.10.5 (2012-02-25) ------------------- - Response body with 0 content-length fix. - New async.imap. - Don't fail on netrc. 0.10.4 (2012-02-20) ------------------- - Honor netrc. 0.10.3 (2012-02-20) ------------------- - HEAD requests don't follow redirects anymore. - raise\_for\_status() doesn't raise for 3xx anymore. - Make Session objects picklable. - ValueError for invalid schema URLs. 0.10.2 (2012-01-15) ------------------- - Vastly improved URL quoting. - Additional allowed cookie key values. - Attempted fix for "Too many open files" Error - Replace unicode errors on first pass, no need for second pass. - Append '/' to bare-domain urls before query insertion. - Exceptions now inherit from RuntimeError. - Binary uploads + auth fix. - Bugfixes. 0.10.1 (2012-01-23) ------------------- - PYTHON 3 SUPPORT! - Dropped 2.5 Support. (*Backwards Incompatible*) 0.10.0 (2012-01-21) ------------------- - `Response.content` is now bytes-only. (*Backwards Incompatible*) - New `Response.text` is unicode-only. - If no `Response.encoding` is specified and `chardet` is available, `Response.text` will guess an encoding. - Default to ISO-8859-1 (Western) encoding for "text" subtypes. - Removal of decode\_unicode. (*Backwards Incompatible*) - New multiple-hooks system. - New `Response.register_hook` for registering hooks within the pipeline. - `Response.url` is now Unicode. 0.9.3 (2012-01-18) ------------------ - SSL verify=False bugfix (apparent on windows machines). 0.9.2 (2012-01-18) ------------------ - Asynchronous async.send method. - Support for proper chunk streams with boundaries. - session argument for Session classes. - Print entire hook tracebacks, not just exception instance. - Fix response.iter\_lines from pending next line. - Fix but in HTTP-digest auth w/ URI having query strings. - Fix in Event Hooks section. - Urllib3 update. 0.9.1 (2012-01-06) ------------------ - danger\_mode for automatic Response.raise\_for\_status() - Response.iter\_lines refactor 0.9.0 (2011-12-28) ------------------ - verify ssl is default. 0.8.9 (2011-12-28) ------------------ - Packaging fix. 0.8.8 (2011-12-28) ------------------ - SSL CERT VERIFICATION! - Release of Cerifi: Mozilla's cert list. - New 'verify' argument for SSL requests. - Urllib3 update. 0.8.7 (2011-12-24) ------------------ - iter\_lines last-line truncation fix - Force safe\_mode for async requests - Handle safe\_mode exceptions more consistently - Fix iteration on null responses in safe\_mode 0.8.6 (2011-12-18) ------------------ - Socket timeout fixes. - Proxy Authorization support. 0.8.5 (2011-12-14) ------------------ - Response.iter\_lines! 0.8.4 (2011-12-11) ------------------ - Prefetch bugfix. - Added license to installed version. 0.8.3 (2011-11-27) ------------------ - Converted auth system to use simpler callable objects. - New session parameter to API methods. - Display full URL while logging. 0.8.2 (2011-11-19) ------------------ - New Unicode decoding system, based on over-ridable Response.encoding. - Proper URL slash-quote handling. - Cookies with `[`, `]`, and `_` allowed. 0.8.1 (2011-11-15) ------------------ - URL Request path fix - Proxy fix. - Timeouts fix. 0.8.0 (2011-11-13) ------------------ - Keep-alive support! - Complete removal of Urllib2 - Complete removal of Poster - Complete removal of CookieJars - New ConnectionError raising - Safe\_mode for error catching - prefetch parameter for request methods - OPTION method - Async pool size throttling - File uploads send real names - Vendored in urllib3 0.7.6 (2011-11-07) ------------------ - Digest authentication bugfix (attach query data to path) 0.7.5 (2011-11-04) ------------------ - Response.content = None if there was an invalid response. - Redirection auth handling. 0.7.4 (2011-10-26) ------------------ - Session Hooks fix. 0.7.3 (2011-10-23) ------------------ - Digest Auth fix. 0.7.2 (2011-10-23) ------------------ - PATCH Fix. 0.7.1 (2011-10-23) ------------------ - Move away from urllib2 authentication handling. - Fully Remove AuthManager, AuthObject, &c. - New tuple-based auth system with handler callbacks. 0.7.0 (2011-10-22) ------------------ - Sessions are now the primary interface. - Deprecated InvalidMethodException. - PATCH fix. - New config system (no more global settings). 0.6.6 (2011-10-19) ------------------ - Session parameter bugfix (params merging). 0.6.5 (2011-10-18) ------------------ - Offline (fast) test suite. - Session dictionary argument merging. 0.6.4 (2011-10-13) ------------------ - Automatic decoding of unicode, based on HTTP Headers. - New `decode_unicode` setting. - Removal of `r.read/close` methods. - New `r.faw` interface for advanced response usage.\* - Automatic expansion of parameterized headers. 0.6.3 (2011-10-13) ------------------ - Beautiful `requests.async` module, for making async requests w/ gevent. 0.6.2 (2011-10-09) ------------------ - GET/HEAD obeys allow\_redirects=False. 0.6.1 (2011-08-20) ------------------ - Enhanced status codes experience `\o/` - Set a maximum number of redirects (`settings.max_redirects`) - Full Unicode URL support - Support for protocol-less redirects. - Allow for arbitrary request types. - Bugfixes 0.6.0 (2011-08-17) ------------------ - New callback hook system - New persistent sessions object and context manager - Transparent Dict-cookie handling - Status code reference object - Removed Response.cached - Added Response.request - All args are kwargs - Relative redirect support - HTTPError handling improvements - Improved https testing - Bugfixes 0.5.1 (2011-07-23) ------------------ - International Domain Name Support! - Access headers without fetching entire body (`read()`) - Use lists as dicts for parameters - Add Forced Basic Authentication - Forced Basic is default authentication type - `python-requests.org` default User-Agent header - CaseInsensitiveDict lower-case caching - Response.history bugfix 0.5.0 (2011-06-21) ------------------ - PATCH Support - Support for Proxies - HTTPBin Test Suite - Redirect Fixes - settings.verbose stream writing - Querystrings for all methods - URLErrors (Connection Refused, Timeout, Invalid URLs) are treated as explicitly raised `r.requests.get('hwe://blah'); r.raise_for_status()` 0.4.1 (2011-05-22) ------------------ - Improved Redirection Handling - New 'allow\_redirects' param for following non-GET/HEAD Redirects - Settings module refactoring 0.4.0 (2011-05-15) ------------------ - Response.history: list of redirected responses - Case-Insensitive Header Dictionaries! - Unicode URLs 0.3.4 (2011-05-14) ------------------ - Urllib2 HTTPAuthentication Recursion fix (Basic/Digest) - Internal Refactor - Bytes data upload Bugfix 0.3.3 (2011-05-12) ------------------ - Request timeouts - Unicode url-encoded data - Settings context manager and module 0.3.2 (2011-04-15) ------------------ - Automatic Decompression of GZip Encoded Content - AutoAuth Support for Tupled HTTP Auth 0.3.1 (2011-04-01) ------------------ - Cookie Changes - Response.read() - Poster fix 0.3.0 (2011-02-25) ------------------ - Automatic Authentication API Change - Smarter Query URL Parameterization - Allow file uploads and POST data together - New Authentication Manager System : - Simpler Basic HTTP System - Supports all built-in urllib2 Auths - Allows for custom Auth Handlers 0.2.4 (2011-02-19) ------------------ - Python 2.5 Support - PyPy-c v1.4 Support - Auto-Authentication tests - Improved Request object constructor 0.2.3 (2011-02-15) ------------------ - New HTTPHandling Methods : - Response.\_\_nonzero\_\_ (false if bad HTTP Status) - Response.ok (True if expected HTTP Status) - Response.error (Logged HTTPError if bad HTTP Status) - Response.raise\_for\_status() (Raises stored HTTPError) 0.2.2 (2011-02-14) ------------------ - Still handles request in the event of an HTTPError. (Issue \#2) - Eventlet and Gevent Monkeypatch support. - Cookie Support (Issue \#1) 0.2.1 (2011-02-14) ------------------ - Added file attribute to POST and PUT requests for multipart-encode file uploads. - Added Request.url attribute for context and redirects 0.2.0 (2011-02-14) ------------------ - Birth! 0.0.1 (2011-02-13) ------------------ - Frustration - Conception
/requests-2.31.0.tar.gz/requests-2.31.0/HISTORY.md
0.829906
0.803444
HISTORY.md
pypi
from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('already_reported',), 226: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('permanent_redirect', 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 421: ('misdirected_request',), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 451: ('unavailable_for_legal_reasons', 'legal_reasons'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), 511: ('network_authentication_required', 'network_auth', 'network_authentication'), } codes = LookupDict(name='status_codes') for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(('\\', '/')): setattr(codes, title.upper(), code)
/requests2-2.16.0.tar.gz/requests2-2.16.0/requests/status_codes.py
0.570092
0.170473
status_codes.py
pypi
from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): r"""Sends a OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('delete', url, **kwargs)
/requests2-2.16.0.tar.gz/requests2-2.16.0/requests/api.py
0.875075
0.424531
api.py
pypi
import collections from .compat import OrderedDict class CaseInsensitiveDict(collections.MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items() ) def __eq__(self, other): if isinstance(other, collections.Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super(LookupDict, self).__init__() def __repr__(self): return '<lookup \'%s\'>' % (self.name) def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
/requests2-2.16.0.tar.gz/requests2-2.16.0/requests/structures.py
0.872741
0.433981
structures.py
pypi
# __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> 'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('http://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key2": "value2", "key1": "value1" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <http://python-requests.org>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __build__, __author__, __author_email__, __license__ from .__version__ import __copyright__, __cake__ # Check urllib3 for compatibility. import urllib3 major, minor, patch = urllib3.__version__.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1, < 1.22 try: assert major == 1 assert minor >= 21 assert minor <= 22 except AssertionError: raise RuntimeError('Requests dependency \'urllib3\' must be version >= 1.21.1, < 1.22!') # Check chardet for compatibility. import chardet major, minor, patch = chardet.__version__.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet >= 3.0.2, < 3.1.0 try: assert major == 3 assert minor < 1 assert patch >= 2 except AssertionError: raise RuntimeError('Requests dependency \'chardet\' must be version >= 3.0.2, < 3.1.0!') # Attempt to enable urllib3's SNI support, if possible try: from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass import warnings # urllib3's DependencyWarnings should be silenced. from urllib3.exceptions import DependencyWarning warnings.simplefilter('ignore', DependencyWarning) from . import utils from .models import Request, Response, PreparedRequest from .api import request, get, head, post, patch, put, delete, options from .sessions import session, Session from .status_codes import codes from .exceptions import ( RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, ConnectionError, FileModeWarning, ConnectTimeout, ReadTimeout ) # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter('default', FileModeWarning, append=True)
/requests2-2.16.0.tar.gz/requests2-2.16.0/requests/__init__.py
0.680242
0.178956
__init__.py
pypi
from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('already_reported',), 226: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('resume_incomplete', 'resume'), # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 451: ('unavailable_for_legal_reasons', 'legal_reasons'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), } codes = LookupDict(name='status_codes') for (code, titles) in list(_codes.items()): for title in titles: setattr(codes, title, code) if not title.startswith('\\'): setattr(codes, title.upper(), code)
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/status_codes.py
0.590897
0.163345
status_codes.py
pypi
from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ session = sessions.Session() return session.request(method=method, url=url, **kwargs) def get(url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('get', url, **kwargs) def options(url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('post', url, data=data, **kwargs) def put(url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('delete', url, **kwargs)
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/api.py
0.856332
0.382228
api.py
pypi
import os import collections from itertools import islice class IteratorProxy(object): """docstring for IteratorProxy""" def __init__(self, i): self.i = i # self.i = chain.from_iterable(i) def __iter__(self): return self.i def __len__(self): if hasattr(self.i, '__len__'): return len(self.i) if hasattr(self.i, 'len'): return self.i.len if hasattr(self.i, 'fileno'): return os.fstat(self.i.fileno()).st_size def read(self, n): return "".join(islice(self.i, None, n)) class CaseInsensitiveDict(collections.MutableMapping): """ A case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = dict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items() ) def __eq__(self, other): if isinstance(other, collections.Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super(LookupDict, self).__init__() def __repr__(self): return '<lookup \'%s\'>' % (self.name) def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/structures.py
0.753285
0.2438
structures.py
pypi
import codecs import mimetypes from uuid import uuid4 from io import BytesIO from .packages import six from .packages.six import b from .fields import RequestField writer = codecs.lookup('utf-8')[3] def choose_boundary(): """ Our embarassingly-simple replacement for mimetools.choose_boundary. """ return uuid4().hex def iter_field_objects(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts, and lists of :class:`~urllib3.fields.RequestField`. """ if isinstance(fields, dict): i = six.iteritems(fields) else: i = iter(fields) for field in i: if isinstance(field, RequestField): yield field else: yield RequestField.from_tuples(*field) def iter_fields(fields): """ Iterate over fields. .. deprecated :: The addition of `~urllib3.fields.RequestField` makes this function obsolete. Instead, use :func:`iter_field_objects`, which returns `~urllib3.fields.RequestField` objects, instead. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for field in iter_field_objects(fields): body.write(b('--%s\r\n' % (boundary))) writer(body).write(field.render_headers()) data = field.data if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = str('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/packages/urllib3/filepost.py
0.687945
0.167491
filepost.py
pypi
from collections import MutableMapping try: from threading import RLock except ImportError: # Platform-specific: No threads available class RLock: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict __all__ = ['RecentlyUsedContainer'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self.lock: # Copy pointers to all values, then wipe the mapping # under Python 2, this copies the list of values twice :-| values = list(self._container.values()) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self.lock: return self._container.keys()
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/packages/urllib3/_collections.py
0.794584
0.259148
_collections.py
pypi
try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .filepost import encode_multipart_formdata __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) _encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE']) def __init__(self, headers=None): self.headers = headers or {} def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplemented("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **urlopen_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example: :: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if encode_multipart: body, content_type = encode_multipart_formdata(fields or {}, boundary=multipart_boundary) else: body, content_type = (urlencode(fields or {}), 'application/x-www-form-urlencoded') if headers is None: headers = self.headers headers_ = {'Content-Type': content_type} headers_.update(headers) return self.urlopen(method, url, body=body, headers=headers_, **urlopen_kw)
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/packages/urllib3/request.py
0.764188
0.223462
request.py
pypi
import email.utils import mimetypes from .packages import six def guess_content_type(filename, default='application/octet-stream'): """ Guess the "Content-Type" of a file. :param filename: The filename to guess the "Content-Type" of using :mod:`mimetimes`. :param default: If no "Content-Type" can be guessed, default to `default`. """ if filename: return mimetypes.guess_type(filename)[0] or default return default def format_header_param(name, value): """ Helper function to format and quote a single header parameter. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows RFC 2231, as suggested by RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ if not any(ch in value for ch in '"\\\r\n'): result = '%s="%s"' % (name, value) try: result.encode('ascii') except UnicodeEncodeError: pass else: return result if not six.PY3: # Python 2: value = value.encode('utf-8') value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value) return value class RequestField(object): """ A data container for request body parameters. :param name: The name of this request field. :param data: The data/value body. :param filename: An optional filename of the request field. :param headers: An optional dict-like object of headers to initially use for the field. """ def __init__(self, name, data, filename=None, headers=None): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) @classmethod def from_tuples(cls, fieldname, value): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example: :: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value content_type = guess_content_type(filename) else: filename = None content_type = None data = value request_param = cls(fieldname, data, filename=filename) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ Overridable helper function to format a single header parameter. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ return format_header_param(name, value) def _render_parts(self, header_parts): """ Helper function to format and quote a single header. Useful for single headers that are composed of multiple items. E.g., 'Content-Disposition' fields. :param header_parts: A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as `k1="v1"; k2="v2"; ...`. """ parts = [] iterable = header_parts if isinstance(header_parts, dict): iterable = header_parts.items() for name, value in iterable: if value: parts.append(self._render_part(name, value)) return '; '.join(parts) def render_headers(self): """ Renders the headers for this request field. """ lines = [] sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append('%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append('%s: %s' % (header_name, header_value)) lines.append('\r\n') return '\r\n'.join(lines) def make_multipart(self, content_disposition=None, content_type=None, content_location=None): """ Makes this request field into a multipart request field. This method overrides "Content-Disposition", "Content-Type" and "Content-Location" headers to the request parameter. :param content_type: The 'Content-Type' of the request body. :param content_location: The 'Content-Location' of the request body. """ self.headers['Content-Disposition'] = content_disposition or 'form-data' self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))]) self.headers['Content-Type'] = content_type self.headers['Content-Location'] = content_location
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/packages/urllib3/fields.py
0.753739
0.443661
fields.py
pypi
try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
/requests_SSL_v3-2.1.0.tar.gz/requests_SSL_v3-2.1.0/requests/packages/urllib3/packages/ordered_dict.py
0.596903
0.266137
ordered_dict.py
pypi
from requests.adapters import BaseAdapter, HTTPAdapter from requests.compat import urlparse class Helper(object): @classmethod def get_subclasses(helper_class, in_class): """ Get all of the subclasses of in_class (recursively) """ subclasses = in_class.__subclasses__() for subclass in list(subclasses): subclasses.extend(helper_class.get_subclasses(subclass)) return subclasses class BaseAdapterExtension(BaseAdapter): """ A base class for the Adapter Extensions """ SUPPORTED_SCHEMES = [] @classmethod def supports_request(cls, request): """ Determines if this adapter extension is expected this 'type' of request. Returns True if the request.url scheme is supported by this extension. Otherwise returns False. An AdapterExtension should consider overriding this to perform more advanced logic """ # Parse the request URL parsed_url = urlparse(request.url) # Determine the request scheme scheme = parsed_url.scheme # Check the scheme is supported if scheme in cls.SUPPORTED_SCHEMES: return True else: return False class HTTPAdapterExtensionWrapper(HTTPAdapter, BaseAdapterExtension): """ A wrapper for the requests.HTTPAdapter class. Adds the SUPPORTED_SCHEMES information to the HTTPAdapter, making it compatible with the extension framework logic. """ SUPPORTED_SCHEMES = ['http', 'https'] class FAdapterExtension(object): """ A Factory for selecting the appropriate AdapterExtension for a given request. """ @classmethod def get_adapter_extension_class(cls, request): """ Returns the appropraite AdapterExtension to handle the request. If no AdapterExtension supports the request, None is returned. """ # Iterate over the adapters that subclass BaseAdapterExtension for adapter_extension_cls in Helper.get_subclasses(BaseAdapterExtension): # If the adapter supports the request, return the adapter class if adapter_extension_cls.supports_request(request): return adapter_extension_cls return None
/requests_extensions.adapters-0.1.2.zip/requests_extensions.adapters-0.1.2/src/requests_extensions/adapters/core/__init__.py
0.858363
0.182225
__init__.py
pypi
# __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> b'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('https://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key1": "value1", "key2": "value2" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <https://requests.readthedocs.io>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ import urllib3 import chardet import warnings from .exceptions import RequestsDependencyWarning def check_compatibility(urllib3_version, chardet_version): urllib3_version = urllib3_version.split('.') assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: urllib3_version.append('0') # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1, <= 1.25 assert major == 1 assert minor >= 21 assert minor <= 25 # Check chardet for compatibility. major, minor, patch = chardet_version.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet >= 3.0.2, < 3.1.0 assert major == 3 assert minor < 1 assert patch >= 2 def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: cryptography_version = list(map(int, cryptography_version.split('.'))) except ValueError: return if cryptography_version < [1, 3, 4]: warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) warnings.warn(warning, RequestsDependencyWarning) # Check imported dependencies for compatibility. try: check_compatibility(urllib3.__version__, chardet.__version__) except (AssertionError, ValueError): warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " "version!".format(urllib3.__version__, chardet.__version__), RequestsDependencyWarning) # Attempt to enable urllib3's SNI support, if possible try: from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from urllib3.exceptions import DependencyWarning warnings.simplefilter('ignore', DependencyWarning) from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __build__, __author__, __author_email__, __license__ from .__version__ import __copyright__, __cake__ from . import utils from . import packages from .models import Request, Response, PreparedRequest from .api import request, get, head, post, patch, put, delete, options from .sessions import session, Session from .status_codes import codes from .exceptions import ( RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, ConnectionError, FileModeWarning, ConnectTimeout, ReadTimeout ) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter('default', FileModeWarning, append=True)
/requestsaa-0.1.2-py3-none-any.whl/requests/__init__.py
0.734405
0.272867
__init__.py
pypi
r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary items. Example:: >>> import requests >>> requests.codes['temporary_redirect'] 307 >>> requests.codes.teapot 418 >>> requests.codes['\o/'] 200 Some codes have multiple names, and both upper- and lower-case versions of the names are allowed. For example, ``codes.ok``, ``codes.OK``, and ``codes.okay`` all correspond to the HTTP status code 200. """ from .structures import LookupDict _codes = { # Informational. 100: ("continue",), 101: ("switching_protocols",), 102: ("processing",), 103: ("checkpoint",), 122: ("uri_too_long", "request_uri_too_long"), 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), 201: ("created",), 202: ("accepted",), 203: ("non_authoritative_info", "non_authoritative_information"), 204: ("no_content",), 205: ("reset_content", "reset"), 206: ("partial_content", "partial"), 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), 208: ("already_reported",), 226: ("im_used",), # Redirection. 300: ("multiple_choices",), 301: ("moved_permanently", "moved", "\\o-"), 302: ("found",), 303: ("see_other", "other"), 304: ("not_modified",), 305: ("use_proxy",), 306: ("switch_proxy",), 307: ("temporary_redirect", "temporary_moved", "temporary"), 308: ( "permanent_redirect", "resume_incomplete", "resume", ), # "resume" and "resume_incomplete" to be removed in 3.0 # Client Error. 400: ("bad_request", "bad"), 401: ("unauthorized",), 402: ("payment_required", "payment"), 403: ("forbidden",), 404: ("not_found", "-o-"), 405: ("method_not_allowed", "not_allowed"), 406: ("not_acceptable",), 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), 408: ("request_timeout", "timeout"), 409: ("conflict",), 410: ("gone",), 411: ("length_required",), 412: ("precondition_failed", "precondition"), 413: ("request_entity_too_large",), 414: ("request_uri_too_large",), 415: ("unsupported_media_type", "unsupported_media", "media_type"), 416: ( "requested_range_not_satisfiable", "requested_range", "range_not_satisfiable", ), 417: ("expectation_failed",), 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), 421: ("misdirected_request",), 422: ("unprocessable_entity", "unprocessable"), 423: ("locked",), 424: ("failed_dependency", "dependency"), 425: ("unordered_collection", "unordered"), 426: ("upgrade_required", "upgrade"), 428: ("precondition_required", "precondition"), 429: ("too_many_requests", "too_many"), 431: ("header_fields_too_large", "fields_too_large"), 444: ("no_response", "none"), 449: ("retry_with", "retry"), 450: ("blocked_by_windows_parental_controls", "parental_controls"), 451: ("unavailable_for_legal_reasons", "legal_reasons"), 499: ("client_closed_request",), # Server Error. 500: ("internal_server_error", "server_error", "/o\\", "✗"), 501: ("not_implemented",), 502: ("bad_gateway",), 503: ("service_unavailable", "unavailable"), 504: ("gateway_timeout",), 505: ("http_version_not_supported", "http_version"), 506: ("variant_also_negotiates",), 507: ("insufficient_storage",), 509: ("bandwidth_limit_exceeded", "bandwidth"), 510: ("not_extended",), 511: ("network_authentication_required", "network_auth", "network_authentication"), } codes = LookupDict(name="status_codes") def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(("\\", "/")): setattr(codes, title.upper(), code) def doc(code): names = ", ".join(f"``{n}``" for n in _codes[code]) return "* %d: %s" % (code, names) global __doc__ __doc__ = ( __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) if __doc__ is not None else None ) _init()
/requestsabc-001A-0.0.3.tar.gz/requestsabc-001A-0.0.3/requestsa/status_codes.py
0.846308
0.566258
status_codes.py
pypi
from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') >>> req <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("get", url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("options", url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. If `allow_redirects` is not provided, it will be set to `False` (as opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault("allow_redirects", False) return request("head", url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("post", url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("put", url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("patch", url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("delete", url, **kwargs)
/requestsabc-001A-0.0.3.tar.gz/requestsabc-001A-0.0.3/requestsa/api.py
0.864953
0.413063
api.py
pypi
from collections import OrderedDict from .compat import Mapping, MutableMapping class CaseInsensitiveDict(MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super().__init__() def __repr__(self): return f"<lookup '{self.name}'>" def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
/requestsabc-001A-0.0.3.tar.gz/requestsabc-001A-0.0.3/requestsa/structures.py
0.926893
0.4231
structures.py
pypi
from __future__ import absolute_import import codecs from io import BytesIO from .packages import six from .packages.six import b from .fields import RequestField writer = codecs.lookup('utf-8')[3] def choose_boundary(): """ Our embarrassingly-simple replacement for mimetools.choose_boundary. We are lazily loading uuid here, because we don't want its issues https://bugs.python.org/issue5885 https://bugs.python.org/issue11063 to affect our entire library. """ from uuid import uuid4 return uuid4().hex def iter_field_objects(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts, and lists of :class:`~urllib3.fields.RequestField`. """ if isinstance(fields, dict): i = six.iteritems(fields) else: i = iter(fields) for field in i: if isinstance(field, RequestField): yield field else: yield RequestField.from_tuples(*field) def iter_fields(fields): """ .. deprecated:: 1.6 Iterate over fields. The addition of :class:`~urllib3.fields.RequestField` makes this function obsolete. Instead, use :func:`iter_field_objects`, which returns :class:`~urllib3.fields.RequestField` objects. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for field in iter_field_objects(fields): body.write(b('--%s\r\n' % (boundary))) writer(body).write(field.render_headers()) data = field.data if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = str('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
/requestscore-0.0.0.tar.gz/requestscore-0.0.0/requests_core/http_manager/filepost.py
0.696475
0.19477
filepost.py
pypi
from ._collections import HTTPHeaderDict # This dictionary is used to store the default ports for specific schemes to # control whether the port is inserted into the Host header. DEFAULT_PORTS = {"http": 80, "https": 443} class Request(object): """ The base, common, Request object. This object provides a *semantic* representation of a HTTP request. It includes all the magical parts of a HTTP request that we have come to know and love: it has a method, a target (the path & query portions of a URI), some headers, and optionally a body. All of urllib3 manipulates these Request objects, passing them around and changing them as necessary. The low-level layers know how to send these objects. """ def __init__(self, method, target, headers=None, body=None): # : The HTTP method in use. Must be a byte string. self.method = method # : The request target: that is, the path and query portions of the URI. self.target = target # : The request headers. These are always stored as a HTTPHeaderDict. self.headers = HTTPHeaderDict(headers) # : The request body. This is allowed to be one a few kind of objects: #: - A byte string. #: - A "readable" object. #: - An iterable of byte strings. #: - A text string (not recommended, auto-encoded to UTF-8) self.body = body def add_host(self, host, port, scheme): """ Add the Host header, as needed. This helper method exists to circumvent an ordering problem: the best layer to add the Host header is the bottom layer, but it is the layer that will add headers last. That means that they will appear at the bottom of the header block. Proxies, caches, and other intermediaries *hate* it when clients do that because the Host header is routing information, and they'd like to see it as early as possible. For this reason, this method ensures that the Host header will be the first one emitted. It also ensures that we do not duplicate the host header: if there already is one, we just use that one. """ if b'host' not in self.headers: # We test against a sentinel object here to forcibly always insert # the port for schemes we don't understand. if port is DEFAULT_PORTS.get(scheme, object()): header = host else: header = "{}:{}".format(host, port) headers = HTTPHeaderDict(host=header) headers._copy_from(self.headers) self.headers = headers class Response(object): """ The abstract low-level Response object that urllib3 works on. This is not the high-level helpful Response object that is exposed at the higher layers of urllib3: it's just a simple object that just exposes the lowest-level HTTP semantics to allow processing by the higher levels. """ def __init__(self, status_code, headers, body, version): # : The HTTP status code of the response. self.status_code = status_code # : The headers on the response, as a HTTPHeaderDict. self.headers = HTTPHeaderDict(headers) # : The request body. This is an iterable of bytes, and *must* be #: iterated if the connection is to be preserved. self.body = body # : The HTTP version of the response. Stored as a bytestring. self.version = version @property def complete(self): """ If the response can be safely returned to the connection pool, returns True. """ return self.body.complete
/requestscore-0.0.0.tar.gz/requestscore-0.0.0/requests_core/http_manager/base.py
0.846276
0.467879
base.py
pypi
from __future__ import absolute_import from .filepost import encode_multipart_formdata from .packages import six from .packages.six.moves.urllib.parse import urlencode __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) def __init__(self, headers=None): self.headers = headers or {} def urlopen( self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw ): # Abstract raise NotImplementedError( "Classes extending RequestMethods must implement " "their own ``urlopen`` method." ) def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url( method, url, fields=fields, headers=headers, **urlopen_kw ) else: return self.request_encode_body( method, url, fields=fields, headers=headers, **urlopen_kw ) def request_encode_url( self, method, url, fields=None, headers=None, **urlopen_kw ): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if headers is None: headers = self.headers extra_kw = {'headers': headers} extra_kw.update(urlopen_kw) if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **extra_kw) def request_encode_body( self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw ): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if headers is None: headers = self.headers extra_kw = {'headers': {}} if fields: if 'body' in urlopen_kw: raise TypeError( "request got values for both 'fields' and 'body', can only specify one." ) if encode_multipart: body, content_type = encode_multipart_formdata( fields, boundary=multipart_boundary ) else: body, content_type = urlencode( fields ), 'application/x-www-form-urlencoded' if isinstance(body, six.text_type): body = body.encode('utf-8') extra_kw['body'] = body extra_kw['headers'] = {'Content-Type': content_type} extra_kw['headers'].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw)
/requestscore-0.0.0.tar.gz/requestscore-0.0.0/requests_core/http_manager/request.py
0.823257
0.173778
request.py
pypi
from __future__ import absolute_import import email.utils import mimetypes from .packages import six def guess_content_type(filename, default='application/octet-stream'): """ Guess the "Content-Type" of a file. :param filename: The filename to guess the "Content-Type" of using :mod:`mimetypes`. :param default: If no "Content-Type" can be guessed, default to `default`. """ if filename: return mimetypes.guess_type(filename)[0] or default return default def format_header_param(name, value): """ Helper function to format and quote a single header parameter. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows RFC 2231, as suggested by RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ if not any(ch in value for ch in '"\\\r\n'): result = '%s="%s"' % (name, value) try: result.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result if not six.PY3 and isinstance(value, six.text_type): # Python 2: value = value.encode('utf-8') value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value) return value class RequestField(object): """ A data container for request body parameters. :param name: The name of this request field. :param data: The data/value body. :param filename: An optional filename of the request field. :param headers: An optional dict-like object of headers to initially use for the field. """ def __init__(self, name, data, filename=None, headers=None): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) @classmethod def from_tuples(cls, fieldname, value): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value content_type = guess_content_type(filename) else: filename = None content_type = None data = value request_param = cls(fieldname, data, filename=filename) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ Overridable helper function to format a single header parameter. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ return format_header_param(name, value) def _render_parts(self, header_parts): """ Helper function to format and quote a single header. Useful for single headers that are composed of multiple items. E.g., 'Content-Disposition' fields. :param header_parts: A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as `k1="v1"; k2="v2"; ...`. """ parts = [] iterable = header_parts if isinstance(header_parts, dict): iterable = header_parts.items() for name, value in iterable: if value is not None: parts.append(self._render_part(name, value)) return '; '.join(parts) def render_headers(self): """ Renders the headers for this request field. """ lines = [] sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append('%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append('%s: %s' % (header_name, header_value)) lines.append('\r\n') return '\r\n'.join(lines) def make_multipart( self, content_disposition=None, content_type=None, content_location=None, ): """ Makes this request field into a multipart request field. This method overrides "Content-Disposition", "Content-Type" and "Content-Location" headers to the request parameter. :param content_type: The 'Content-Type' of the request body. :param content_location: The 'Content-Location' of the request body. """ self.headers[ 'Content-Disposition' ] = content_disposition or 'form-data' self.headers['Content-Disposition'] += '; '.join( [ '', self._render_parts( (('name', self._name), ('filename', self._filename)) ), ] ) self.headers['Content-Type'] = content_type self.headers['Content-Location'] = content_location
/requestscore-0.0.0.tar.gz/requestscore-0.0.0/requests_core/http_manager/fields.py
0.809276
0.410579
fields.py
pypi
try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError( 'update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),) ) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self) == len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
/requestscore-0.0.0.tar.gz/requestscore-0.0.0/requests_core/http_manager/packages/ordered_dict.py
0.562898
0.282054
ordered_dict.py
pypi
from __future__ import absolute_import import time import logging from collections import namedtuple from itertools import takewhile import email import re from ..exceptions import ( ConnectTimeoutError, MaxRetryError, ProtocolError, ReadTimeoutError, ResponseError, InvalidHeader, ) from ..packages import six log = logging.getLogger(__name__) # Data structure for representing the metadata of requests that result in a retry. RequestHistory = namedtuple( 'RequestHistory', ["method", "url", "error", "status", "redirect_location"] ) class Retry(object): """ Retry configuration. Each retry attempt will create a new Retry object with updated values, so they can be safely reused. Retries can be defined as a default for a pool:: retries = Retry(connect=5, read=2, redirect=5) http = PoolManager(retries=retries) response = http.request('GET', 'http://example.com/') Or per-request (which overrides the default for the pool):: response = http.request('GET', 'http://example.com/', retries=Retry(10)) Retries can be disabled by passing ``False``:: response = http.request('GET', 'http://example.com/', retries=False) Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless retries are disabled, in which case the causing exception will be raised. :param int total: Total number of retries to allow. Takes precedence over other counts. Set to ``None`` to remove this constraint and fall back on other counts. It's a good idea to set this to some sensibly-high value to account for unexpected edge cases and avoid infinite retry loops. Set to ``0`` to fail on the first retry. Set to ``False`` to disable and imply ``raise_on_redirect=False``. :param int connect: How many connection-related errors to retry on. These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. Set to ``0`` to fail on the first retry of this type. :param int read: How many times to retry on read errors. These errors are raised after the request was sent to the server, so the request may have side-effects. Set to ``0`` to fail on the first retry of this type. :param int redirect: How many redirects to perform. Limit this to avoid infinite redirect loops. A redirect is a HTTP response with a status code 301, 302, 303, 307 or 308. Set to ``0`` to fail on the first retry of this type. Set to ``False`` to disable and imply ``raise_on_redirect=False``. :param int status: How many times to retry on bad status codes. These are retries made on responses, where status code matches ``status_forcelist``. Set to ``0`` to fail on the first retry of this type. :param iterable method_whitelist: Set of uppercased HTTP method verbs that we should retry on. By default, we only retry on methods which are considered to be idempotent (multiple requests with the same parameters end with the same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. Set to a ``False`` value to retry on any verb. :param iterable status_forcelist: A set of integer HTTP status codes that we should force a retry on. A retry is initiated if the request method is in ``method_whitelist`` and the response status code is in ``status_forcelist``. By default, this is disabled with ``None``. :param float backoff_factor: A backoff factor to apply between attempts after the second try (most errors are resolved immediately by a second try without a delay). urllib3 will sleep for:: {backoff factor} * (2 ^ ({number of total retries} - 1)) seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer than :attr:`Retry.BACKOFF_MAX`. By default, backoff is disabled (set to 0). :param bool raise_on_redirect: Whether, if the number of redirects is exhausted, to raise a MaxRetryError, or to return a response with a response code in the 3xx range. :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: whether we should raise an exception, or return a response, if status falls in ``status_forcelist`` range and retries have been exhausted. :param tuple history: The history of the request encountered during each call to :meth:`~Retry.increment`. The list is in the order the requests occurred. Each list item is of class :class:`RequestHistory`. :param bool respect_retry_after_header: Whether to respect Retry-After header on status codes defined as :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. """ DEFAULT_METHOD_WHITELIST = frozenset( ['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'] ) RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) # : Maximum backoff time. BACKOFF_MAX = 120 def __init__( self, total=10, connect=None, read=None, redirect=None, status=None, method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0, raise_on_redirect=True, raise_on_status=True, history=None, respect_retry_after_header=True, ): self.total = total self.connect = connect self.read = read self.status = status if redirect is False or total is False: redirect = 0 raise_on_redirect = False self.redirect = redirect self.status_forcelist = status_forcelist or set() self.method_whitelist = method_whitelist self.backoff_factor = backoff_factor self.raise_on_redirect = raise_on_redirect self.raise_on_status = raise_on_status self.history = history or tuple() self.respect_retry_after_header = respect_retry_after_header def new(self, **kw): params = dict( total=self.total, connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, history=self.history, ) params.update(kw) return type(self)(**params) @classmethod def from_int(cls, retries, redirect=True, default=None): """ Backwards-compatibility for the old retries format.""" if retries is None: retries = default if default is not None else cls.DEFAULT if isinstance(retries, Retry): return retries redirect = bool(redirect) and None new_retries = cls(retries, redirect=redirect) log.debug("Converted retries value: %r -> %r", retries, new_retries) return new_retries def get_backoff_time(self): """ Formula for computing the current backoff :rtype: float """ # We want to consider only the last consecutive errors sequence (Ignore redirects). consecutive_errors_len = len( list( takewhile( lambda x: x.redirect_location is None, reversed(self.history), ) ) ) if consecutive_errors_len <= 1: return 0 backoff_value = self.backoff_factor * ( 2 ** (consecutive_errors_len - 1) ) return min(self.BACKOFF_MAX, backoff_value) def parse_retry_after(self, retry_after): # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 if re.match(r"^\s*[0-9]+\s*$", retry_after): seconds = int(retry_after) else: retry_date_tuple = email.utils.parsedate(retry_after) if retry_date_tuple is None: raise InvalidHeader( "Invalid Retry-After header: %s" % retry_after ) retry_date = time.mktime(retry_date_tuple) seconds = retry_date - time.time() if seconds < 0: seconds = 0 return seconds def get_retry_after(self, response): """ Get the value of Retry-After in seconds. """ retry_after = response.getheader("Retry-After") if retry_after is None: return None return self.parse_retry_after(retry_after) def sleep_for_retry(self, response=None): retry_after = self.get_retry_after(response) if retry_after: time.sleep(retry_after) return True return False def _sleep_backoff(self): backoff = self.get_backoff_time() if backoff <= 0: return time.sleep(backoff) def sleep(self, response=None): """ Sleep between retry attempts. This method will respect a server's ``Retry-After`` response header and sleep the duration of the time requested. If that is not present, it will use an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ if response: slept = self.sleep_for_retry(response) if slept: return self._sleep_backoff() def _is_connection_error(self, err): """ Errors when we're fairly sure that the server did not receive the request, so it should be safe to retry. """ return isinstance(err, ConnectTimeoutError) def _is_read_error(self, err): """ Errors that occur after the request has been started, so we should assume that the server began processing it. """ return isinstance(err, (ReadTimeoutError, ProtocolError)) def _is_method_retryable(self, method): """ Checks if a given HTTP method should be retried upon, depending if it is included on the method whitelist. """ if self.method_whitelist and method.upper( ) not in self.method_whitelist: return False return True def is_retry(self, method, status_code, has_retry_after=False): """ Is this method/status code retryable? (Based on whitelists and control variables such as the number of total retries to allow, whether to respect the Retry-After header, whether this header is present, and whether the returned status code is on the list of status codes to be retried upon on the presence of the aforementioned header) """ if not self._is_method_retryable(method): return False if self.status_forcelist and status_code in self.status_forcelist: return True return ( self.total and self.respect_retry_after_header and has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES) ) def is_exhausted(self): """ Are we out of retries? """ retry_counts = ( self.total, self.connect, self.read, self.redirect, self.status ) retry_counts = list(filter(None, retry_counts)) if not retry_counts: return False return min(retry_counts) < 0 def increment( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, ): """ Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status cause = 'unknown' status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = 'too many redirects' redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and a the given method is in the whitelist cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format( status_code=response.status ) status = response.status history = self.history + ( RequestHistory(method, url, error, status, redirect_location), ) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, history=history, ) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error or ResponseError(cause)) log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) return new_retry def __repr__(self): return ( '{cls.__name__}(total={self.total}, connect={self.connect}, ' 'read={self.read}, redirect={self.redirect}, status={self.status})' ).format( cls=type(self), self=self ) # For backwards compatibility (equivalent to pre-v1.9): Retry.DEFAULT = Retry(3)
/requestscore-0.0.0.tar.gz/requestscore-0.0.0/requests_core/http_manager/util/retry.py
0.808219
0.28934
retry.py
pypi
from .socketrequests_ import Socket from .types_ import socketinfo from .socketsession import SocketSession from .errors import InvalidRequestType, InvalidCertificate, InvalidJson, InvalidRequest, InvalidURL from typing import List from ssl import SSLContext def get(url: str, port: int, *, headers: List[str]=None, ssl: bool=False, bytes_amt: int=8192, ssl_certificate: SSLContext=None ) -> socketinfo: """Sends a 'GET' Request to the server Returns: socketinfo: response data """ return Socket(url, port, ssl=ssl, bytes_amt=bytes_amt, ssl_certificate=ssl_certificate).send("GET", headers=headers) def post(url: str, port: int, *, headers: List[str]=None, ssl: bool=False, json: List[str]=None, bytes_amt: int=8192, ssl_certificate: SSLContext=None ) -> socketinfo: """Sends a 'POST' Request to the server Returns: socketinfo: response data """ return Socket(url, port, ssl=ssl, bytes_amt=bytes_amt, ssl_certificate=ssl_certificate).send("POST", headers=headers, json=json) def put(url: str, port: int, *, headers: List[str]=None, ssl: bool=False, json: List[str]=None, bytes_amt: int=8192, ssl_certificate: SSLContext=None ) -> socketinfo: """Sends a 'PUT Request to the server Returns: socketinfo: response data """ return Socket(url, port, ssl=ssl, bytes_amt=bytes_amt, ssl_certificate=ssl_certificate).send("PUT", headers=headers, json=json) def delete(url: str, port: int, *, headers: List[str]=None, ssl: bool=False, json: dict=None, bytes_amt: int=8192, ssl_certificate: SSLContext=None ) -> socketinfo: """Sends a 'DELETE' Request to the server Returns: socketinfo: response data """ return Socket(url, port, ssl=ssl, bytes_amt=bytes_amt, ssl_certificate=ssl_certificate).send("DELETE", headers=headers, json=json)
/requestsocket-0.0.1-py3-none-any.whl/socketrequest/__init__.py
0.574395
0.17113
__init__.py
pypi
# TUTORIAL: How to create your own pip library [![Build Status](https://travis-ci.com/MichaelKim0407/tutorial-pip-package.svg?branch=master)](https://travis-ci.com/MichaelKim0407/tutorial-pip-package) [![Coverage Status](https://coveralls.io/repos/github/MichaelKim0407/tutorial-pip-package/badge.svg?branch=master)](https://coveralls.io/github/MichaelKim0407/tutorial-pip-package?branch=master) Author: Michael Kim <mkim0407@gmail.com> ## Overview The idea of `pip` roots back to the `import` keyword in Python, and that the keyword works for both standard library and user-defined modules. While user-defined modules are often single-use and not very complicated, it can be helpful that they can be reused across different projects without copy-pasting, or even shared with other developers. Before moving on to `pip`, there are several other possible approaches. 1. Add modules to the standard Python library. This is not a good approach because every developer needs different libraries, so increasing the size of the Python distribution is not beneficial. Also, code in the standard library should have a higher standard and have less flexibility when changes are needed. 2. Modify `PYTHONPATH` environment variable. While this can work locally on one machine, modifying the system setup can be problematic when it comes to distribution/deployment, and it has a high chance of messing things up on other parts of the system. ### So what is `pip`? From the [homepage](https://pip.pypa.io/en/stable/): > pip is the package installer for Python. > You can use pip to install packages from the Python Package Index and other indexes. ### `pip` vs `pypi` `pip` is the package installer, while [Python Package Index](https://pypi.org/), or `pypi`, is the package distribution platform that `pip` references *by default*. Because running `pip install {package}` will find the package on `pypi`, download, and then install it, it is easy to confuse them as one integral service. However, a package for `pip` does not have to live on `pypi`, as we'll demonstrate in this tutorial, and apparently you can download packages from `pypi` without using `pip`. ### Recommendations for this tutorial It is recommended to create a virtual environment and do everything in it for the purpose of this tutorial, so that you won't mess up your python installation. For Python 3.6+, you may use the `venv` module in the standard library. [HOWTO](https://docs.python.org/3/library/venv.html#creating-virtual-environments) For previous versions of Python, you may use [`virtualenv`](https://virtualenv.pypa.io/en/latest/). After creating the virtual environment, it might be a good idea to update the base packages we are going to use: ```bash $ pip install -U pip setuptools ``` ## Step 1: Create an importable module! Since `pip` is going to install modules that we can `import`, we need to have one ready first. Let's create `my_pip_package.py`: ```python def hello_world(): print("This is my first pip package!") ``` Confirm that it can be imported properly: ```bash $ python -c "import my_pip_package; my_pip_package.hello_world()" This is my first pip package! ``` Checkout the repo at this stage using the [`01-create-module`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/01-create-module) tag. ## Step 2: Create `setup.py` `setup.py` is used to tell `pip` how to install the package. You can find the full documentation [here](https://setuptools.readthedocs.io/en/latest/setuptools.html). For this tutorial we will have the most basic setup ready, and expand upon it. ```python from setuptools import setup from my_pip_package import __version__ setup( name='my_pip_package', version=__version__, url='https://github.com/MichaelKim0407/tutorial-pip-package', author='Michael Kim', author_email='mkim0407@gmail.com', py_modules=['my_pip_package'], ) ``` Change url and author info for yourself. Add this to `my_pip_package.py`: ```python __version__ = 'dev' ``` To confirm that `setup.py` works properly: ```bash $ pip install -e . ``` It should install the package and create a folder called `my_pip_package.egg-info`. If you are using version control systems like `git`, make sure to ignore that folder. Now, you should be able to import the package outside of the folder: ```bash $ cd .. $ python -c "import my_pip_package; my_pip_package.hello_world()" This is my first pip package! ``` If you have pushed your code to a git hosting service, you should be able to install it anywhere right now: ```bash $ pip install git+git://github.com/MichaelKim0407/tutorial-pip-package.git#egg=my_pip_package ``` (replace with your own repo url) Note for `pipenv`: you should use `-e` flag so that `pipenv` will pick up dependencies in the lock file. Checkout the repo at this stage using the [`02-setup-py`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/02-setup-py) tag. ## Step 3: Convert to multi-file package This step is optional, if you want to keep everything in one file. However, the setup is slightly different so we'll keep this as a separate step. First, turn the Python module into a package: ```bash $ mkdir my_pip_package $ mv my_pip_package.py my_pip_package/__init__.py ``` Add another Python file in the package, e.g. `math.py`: ```python def add(x, y): return x + y ``` Change the following lines in `setup.py`: `from setuptools import setup` -> `from setuptools import setup, find_packages` `py_modules=['my_pip_package']` -> `packages=find_packages()` Test that everything works: ```bash $ python -c "import my_pip_package; my_pip_package.hello_world()" This is my first pip package! $ python -c "from my_pip_package.math import add; print(add(1, 3))" 4 ``` Checkout the repo at this stage using the [`03-convert-package`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/03-convert-package) tag. ## Step 4: Adding dependencies If you want to use another `pip` library as dependency, you can specify it in `setup.py`. First, let's add the following code to `math.py`: ```python from returns import returns @returns(int) def div_int(x, y): return x / y ``` The `returns` decorator comes from the `returns-decorator` package (DISCLAIMER: created by the author of this tutorial), which is available on `pypi`. When writing production code you should totally use `//`, but for the sake of demonstration let's use the decorator for now. To specify `returns-decorator` as a dependency, add the following entry to `setup(...)` in `setup.py`: ```python install_requires=[ 'returns-decorator', ], ``` Run `pip install -e .` again to pick up the new dependency. Now verify that it works: ```bash $ python -c "from my_pip_package.math import div_int; print(div_int(3, 2))" 1 ``` You may also specify versions of your dependency, e.g. `returns-decorator>=1.1`. For the full spec, see [PEP 508](https://www.python.org/dev/peps/pep-0508/). Checkout the repo at this stage using the [`04-dependency`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/04-dependency) tag. ## Step 5: Adding optional (extra) dependencies Sometimes certain parts of your code require a specific dependency, but it's not necessarily useful for all use cases. One example would be the `sqlalchemy` library, which supports a variety of SQL dialects, but in most cases anyone using it would only be interested in one dialect. Installing all dependencies is both inefficient and messy, so it's better to let the user decide what exactly is needed. However, it would be cumbersome for the user to install the specific dependencies. This is where extra dependencies some in. For this tutorial, after the last step, let's pretend that we don't want to always install `returns-decorator` unless `math` is used. We can replace the `install_requires` with the following: ```python extras_require={ 'math': [ 'returns-decorator', ], }, ``` Note the `s`: `install_requires` is singular but `extras_require` is plural. Now, we can install the extra dependency by appending `[math]` in the installation: ```bash $ pip install -e .[math] ``` or ```bash $ pip install git+git://github.com/MichaelKim0407/tutorial-pip-package.git#egg=my_pip_package[math] ``` However, we are not finished just yet - since we want to add more extra dependencies in the future, it's better to keep them organized. One good habit is to make a `[dev]` extra dependency, which includes all dependencies needed for local development. In `setup.py`: ```python extra_math = [ 'returns-decorator', ] extra_dev = [ *extra_math, ] ``` and in `setup(...)`: ```python extras_require={ 'math': extra_math, 'dev': extra_dev, }, ``` Now we can just run `pip install -e .[dev]` whenever we want to setup a dev environment. Checkout the repo at this stage using the [`05-extra-dependency`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/05-extra-dependency) tag. ## Step 6: Command line entries `pip` allows packages to create command line entries in the `bin/` folder. First, let's make a function that accepts command line arguments in `math.py`, and make the module callable: ```python def cmd_add(args=None): import argparse parser = argparse.ArgumentParser() parser.add_argument('x', type=float) parser.add_argument('y', type=float) parsed_args = parser.parse_args(args) print(add(parsed_args.x, parsed_args.y)) if __name__ == '__main__': cmd_add() ``` Test it out: ```bash $ python my_pip_package/math.py 1.5 3 4.5 ``` Now, add the following entry to `setup(...)`: ```python entry_points={ 'console_scripts': [ 'add=my_pip_package.math:cmd_add', ], }, ``` The syntax is `{cmd entry name}={module path}:{function name}`. Run `pip install -e .[dev]` again to create the command line entry. ```bash $ add 1.6 4 5.6 ``` The `__name__ == '__main__'` part is not really needed, so let's remove it. Also, since the `add` command requires the `[math]` dependency, let's make it explicit for anyone wishing to use the command: ```python extra_bin = [ *extra_math, ] ``` and ```python extra_requires = { ..., 'bin': extra_bin, } ``` Checkout the repo at this stage using the [`06-command`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/06-command) tag. ## Step 7: Adding tests! If you are developing a package you should probably include tests from the beginning, but since it's a different step in the setup we'll do it now. For this tutorial, we'll be using `pytest` for testing and `pytest-cov` for coverage. Lets include the packages in the extras: ```python extra_test = [ *extra_math, 'pytest>=4', 'pytest-cov>=2', ] ``` and update the `[dev]` extra dependency to include testing: ```python extra_dev = [ *extra_test, ] ``` Run `pip install -e .[dev]` again to pick up the new dependencies. For the sake of length, we'll add to the repo without writing them down here. Run `pytest` to test the package. Once everything's passed, we can move on for coverage test. Create `.coveragerc`: ``` [run] source = my_pip_package ``` And run `pytest --cov` to see coverage. `--cov-report` can also be specified to provide formatting for coverage report. My favorite is `pytest --cov --cov-report term-missing:skip-covered`, which lists all the line numbers that are not covered by tests, while hiding all files that have been completely covered. Lastly, don't forget to ignore the test output in `.gitignore`: ``` .pytest_cache/ .coverage ``` Checkout the repo at this stage using the [`07-tests`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/07-tests) tag. ## Step 8: Adding tests to CI While testing locally can catch a lot of problems already, running tests automatically is a further step on quality control, especially multiple developers are involved, and it also shows the world that your library is indeed working as intended. For GitHub repos, we'll be using [Travis CI](https://travis-ci.com) to run the CI tests. We'll be using [Coveralls](https://coveralls.io) for coverage reporting. (There is an alternative called [Codecov](https://codecov.io/), however [it has a pretty significant issue for Python](https://github.com/codecov/codecov-python/issues/136).) First, `coveralls` requires an extra dependency, so let's create an extra called `ci`: ```python extra_ci = [ *extra_test, 'python-coveralls', ] ``` Next, add the CI configuration, which should be called `.travis.yml`. Details on how to write it can be found [here](https://docs.travis-ci.com/). See code in repo for how we are doing it. Let's also add the badges to the top of our README file so everyone can see them immediately. The code to embed badges can be found on travis and coveralls. After the CI runs successfully, the badges will be updated. Checkout the repo at this stage using the [`08-ci`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/08-ci) tag. ## Step 9: Releasing on pypi! At this point, your library can already be shared with the world, however it is not on pypi yet. To release on pypi, there are a few things we need to take care of. First, add some classifiers for your package in `setup()`. A full list of classifiers can be found [here](https://pypi.org/pypi?%3Aaction=list_classifiers). Next, change `__version__` to a standard version string, such as `1.0`. Next, change the name of your package, if you followed the tutorial thus far, since `my_pip_package` would be taken by me. Be creative! The `name` argument in `setup()` does not need to match the name of the python package, but it's better to keep them the same so that anyone that installs your library won't be confused. You may also want to add a `description` in `setup()`. Once everything is good, we can package the library: ```bash $ python setup.py sdist ``` If should create a `.tar.gz` file under `dist/`. You can unzip the file to inspect its contents. Also, don't forget to add `dist/` to `.gitignore`. The file is now ready to be uploaded to `pypi`. Create an account on `pypi`, and store the credentials in `~/.pypirc`: ``` [pypi] username = password = ``` Finally, to upload the file: ```bash $ twine upload dist/{packaged file}.tar.gz ``` Your package should now show up on `pypi` and installable using `pip install`. It would also be a good idea to create a release on GitHub, and drop the packaged file as an attachment. Checkout the repo at this stage using the [`09-release`](https://github.com/MichaelKim0407/tutorial-pip-package/tree/09-release) tag.
/requestsping-1.1.tar.gz/requestsping-1.1/README.md
0.452778
0.944944
README.md
pypi
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """Multiprocessing pool providing a unique request session per process.""" import itertools import multiprocessing import os import requests __author__ = "miruka" __email__ = "miruka@disroot.org" __license__ = "LGPLv3" __version__ = "1.0.1" def _split_items(iterable, split_in): """Split iterable sequence of items into even subsequences. Args: iterable: Iterable sequence of element, like a list or tuple. split_in (int): Number of even chunks to split the given sequence in. Returns: (tuple, list): Defaults to tuple unless `iterable` was a list. Tuple/list containing a tuple/list for each subsequence. Examples: >>> list(_split_items([i for i in range(1, 11)], 3)): [[1, 4, 7, 10], [2, 5, 8], [3, 6, 9]] """ if not isinstance(iterable, (list, tuple)): iterable = tuple(iterable) for i in range(split_in): yield iterable[i::split_in] def _flatten_or_not(flatten, iterable): return [i for sub in iterable for i in sub] if flatten else iterable class RequestsPool(object): """Multiprocessing pool providing a unique request session per process. Every process started by the `map()` or `starmap()` methods will have a unique and constant request session assigned; this allows safe requests multiprocessing usage. The session will be passed as additional argument to the `map()`/`starmap()` target function. Attributes: processes (int): Number of processes to run in parallel, defaults to `os.cpu_count()`. special_func (function): Function that will be run to get a specific object for each process, defaults to `requests.Session`. special_args (tuple): Positional arguments passed to the `special_func`, defaults to `()` (no args). special_kwargs (dict): Keyword arguments passed to the `special_func`, defaults to `{}` (no kwargs). Undocumented additional `multiprocessing.Pool` attributes: initializer (function): Function ran at the start of a `multiprocessing.Pool` worker. Defaults to `None`. initargs (tuple): Arguments passed to `initializer`, defaults to `()`. maxtasksperchild (int): Maximum number of tasks per process? Defaults to `None`. """ def __init__(self, processes=None, special_func=None, special_args=None, special_kwargs=None, initializer=None, initargs=(), maxtasksperchild=None): self.processes = processes or os.cpu_count() self.special_func = special_func or requests.Session self.special_args = special_args or () self.special_kwargs = special_kwargs or {} self.pool = multiprocessing.Pool(processes, initializer, initargs, maxtasksperchild) def __enter__(self): return self def __exit__(self, type_, value, traceback): self.pool.close() def __getstate__(self): """Remove self.pool when pickling to avoid multiprocessing errors.""" self_dict = self.__dict__.copy() del self_dict['pool'] return self_dict def _map_wrap_func(self, func, subsequence): special = self.special_func(*self.special_args, **self.special_kwargs) return [func(item, special) for item in subsequence] def _starmap_wrap_func(self, func, subsequence): special = self.special_func(*self.special_args, **self.special_kwargs) return [func(*item, special) for item in subsequence] def map(self, func, iterable, chunksize=None, flatten=True): """Run _map_wrap_func functions, each with split chunks of iterable. Example: Get multiple pages in parallel, here two at a time: >>> URLS = ("https://pypi.org/", "https://git.io", ... "https://gentoo.org") >>> def get_url(url, session): ... return session.get(url) ... >>> with requestspool.RequestsPool(2) as rp: ... print(rp.map(get_url, URLS)) ... [<Response [200]>, <Response [200]>, <Response [200]>] """ return _flatten_or_not(flatten, self.pool.starmap( self._map_wrap_func, itertools.product((func,), _split_items(iterable, self.processes)), chunksize )) def starmap(self, func, iterable, chunksize=None, flatten=True): """Same as map(), but run _map_starmap_func instead. Example: Get three pages in parallel, pass a same timeout parameter to all target function calls, get each processes's results in their own sublist: >>> from itertools import product >>> URLS = ("https://pypi.org/", "https://git.io", ... "https://gentoo.org") >>> def get_url_timeout(url, timeout, session): ... return session.get(url, timeout=timeout) ... >>> with requestspool.RequestsPool(3) as rp: ... print(rp.starmap(get_url_timeout, ... product(URLS, [6]), flatten=False)) ... [[<Response [200]>], [<Response [200]>], [<Response [200]>]] """ return _flatten_or_not(flatten, self.pool.starmap( self._starmap_wrap_func, itertools.product((func,), _split_items(iterable, self.processes)), chunksize ))
/requestspool-1.0.1.tar.gz/requestspool-1.0.1/requestspool.py
0.857336
0.176565
requestspool.py
pypi